├── .gitignore ├── .gitlab-ci.yml ├── AddOns ├── Prometheus │ ├── configmap.yaml │ ├── deployment.yaml │ ├── node-exporter.yaml │ └── pushgateway.yaml ├── dashboard.yaml ├── dns.yaml └── grafana.yaml ├── ClusterStart.yaml ├── Demo.yaml ├── Demo ├── ConfigMaps │ └── countly.yaml └── Manifests │ ├── countly.yaml │ ├── distcc-daemon.yaml │ └── mongo.yaml ├── Docker ├── Countly │ ├── Dockerfile │ └── runit │ │ ├── countly-api.sh │ │ └── countly-dashboard.sh ├── Demo │ └── Dockerfile ├── Kernel │ └── Dockerfile ├── KubeAddOns │ └── Dockerfile ├── Wrk │ ├── Dockerfile │ ├── README.md │ ├── runner.sh │ └── send_summary.lua ├── azure-cli │ ├── Dockerfile │ ├── Readme.org │ └── entrypoint.sh ├── boinc │ ├── Dockerfile │ ├── README.md │ ├── attach.sh │ ├── boinc_rpc.sh │ └── runner.sh ├── cloudbuild.yaml ├── distcc-daemon │ ├── Dockerfile │ ├── config │ └── runner.sh ├── distcc-master │ ├── Dockerfile │ ├── config │ └── runner.sh ├── distcc │ ├── Dockerfile │ ├── config │ └── runner.sh ├── echo │ ├── Dockerfile │ └── echo.py ├── falcon │ ├── Dockerfile │ └── app.py ├── fluentd-kubectl │ ├── Dockerfile │ └── fluent.conf ├── fluentd-reportstats │ ├── Dockerfile │ └── fluent.conf ├── grafana │ ├── Dockerfile │ ├── README.md │ ├── dashboards │ │ └── cncfdemo.json │ └── run.sh ├── gunicorn │ ├── Dockerfile │ └── gunicorn_conf.py └── kubectl │ └── Dockerfile ├── Images ├── base │ ├── README.md │ ├── ansible.cfg │ ├── base.sh │ ├── disable_tty.sh │ ├── packer.json │ └── playbook.yml └── golden │ ├── README.md │ ├── ansible.cfg │ ├── disable_tty.sh │ ├── packer.json │ └── playbook.yml ├── LICENSE ├── README.md ├── Web ├── api │ ├── _src │ │ ├── Schemas │ │ │ └── new.json │ │ ├── requirements.txt │ │ ├── summary.py │ │ └── trace.py │ ├── _tests │ │ ├── gen_mock_run.py │ │ ├── gen_mock_summary.py │ │ ├── mock.out │ │ └── new │ │ │ └── event.json │ ├── setup.cfg │ ├── summary.yml │ └── trace.yml └── results │ ├── 404.html │ ├── alpha.sh │ ├── aws-iot-sdk-browser-bundle.js │ ├── bundle.js │ ├── chart.js │ ├── favicon.ico │ ├── img │ ├── aws.png │ └── logo_cncf.png │ ├── js │ ├── fetch.js │ └── url-search-params.js │ ├── main.css │ ├── main2.css │ ├── release.sh │ ├── sample.json │ ├── sample2.json │ ├── search │ ├── search.html │ ├── summary.html │ ├── summary.json │ └── summary.json.bak ├── cncfdemo-cli ├── README.md ├── cncfdemo │ ├── Deployment │ │ ├── Countly │ │ │ ├── configMaps │ │ │ │ └── countly │ │ │ │ │ ├── api.js │ │ │ │ │ └── frontend.js │ │ │ └── countly.yaml.j2 │ │ ├── Mongo │ │ │ └── mongo.yaml.j2 │ │ ├── distcc │ │ │ ├── README.md │ │ │ ├── distcc-ds.yaml │ │ │ └── distcc-svc.yaml │ │ ├── echo │ │ │ ├── echo-rc.yaml │ │ │ └── echo-svc.yaml │ │ └── runner.sh │ ├── __init__.py │ ├── bootstrap │ │ ├── DO │ │ │ ├── simple.py │ │ │ └── simple2.py │ │ ├── __init__.py │ │ ├── aws │ │ │ ├── Policies │ │ │ │ └── lambda-policy.json │ │ │ ├── __init__.py │ │ │ ├── cli.py │ │ │ ├── execution_plans │ │ │ │ ├── __init__.py │ │ │ │ ├── asg │ │ │ │ └── vpc │ │ │ └── utils.py │ │ ├── bootstrap.py │ │ └── main.py │ ├── cncf.py │ ├── kubectl │ │ ├── __init__.py │ │ ├── cmd_create.py │ │ ├── configmap.py │ │ └── utils.py │ └── utils │ │ └── utils.py └── setup.py ├── discovery ├── README.md └── lambda.py └── provisioning ├── Dockerfile ├── aws ├── Readme.mkd ├── aws.tf ├── cert.tf ├── cleanup.tf ├── init-cfssl ├── input.tf ├── keypair.tf ├── modules.tf ├── modules │ ├── bastion │ │ ├── ec2.tf │ │ ├── input.tf │ │ ├── output.tf │ │ └── user-data.yml │ ├── dns │ │ ├── dns.tf │ │ ├── input.tf │ │ └── output.tf │ ├── etcd │ │ ├── cloud-config.tf │ │ ├── cloud-config.yml │ │ ├── ec2.tf │ │ ├── elb.tf │ │ ├── input.tf │ │ ├── kube-apiserver.yml │ │ └── output.tf │ ├── iam │ │ ├── etcd.tf │ │ ├── io.tf │ │ └── worker.tf │ ├── security │ │ ├── io.tf │ │ └── security.tf │ ├── vpc │ │ ├── input.tf │ │ ├── output.tf │ │ ├── private.tf │ │ ├── public.tf │ │ └── vpc.tf │ └── worker │ │ ├── cloud-config.tf │ │ ├── cloud-config.yml │ │ ├── ec2.tf │ │ ├── input.tf │ │ └── output.tf ├── output.tf └── wait-for-cluster ├── azure ├── azure.tf ├── docs │ ├── azure_app_endpoints.png │ ├── azure_app_registration.png │ ├── guid_from_oauth_endpoint.png │ ├── key_generation_copy_me.png │ ├── research.md │ ├── research.org │ └── web_api_application_type.png ├── init-cfssl ├── input.tf ├── modules.tf ├── modules │ ├── bastion │ │ ├── bastion-node.tf │ │ ├── bastion-user-data.yml │ │ ├── input.tf │ │ └── output.tf │ ├── dns │ │ ├── dns.tf │ │ ├── input.tf │ │ └── output.tf │ ├── etcd │ │ ├── etcd-cloud-config.tf │ │ ├── etcd-cloud-config.yml │ │ ├── etcd-load-balancer.tf │ │ ├── etcd-nodes.tf │ │ ├── input.tf │ │ ├── kube-apiserver.yml │ │ └── output.tf │ ├── network │ │ ├── input.tf │ │ ├── output.tf │ │ └── virtual_network.tf │ └── worker │ │ ├── input.tf │ │ ├── worker-cloud-config.tf │ │ ├── worker-cloud-config.yml │ │ └── worker-nodes.tf ├── output.tf ├── readme.org ├── runme ├── servicePrincipalProfile.json ├── ssl-ssh-cloud.tf └── wait-for-cluster ├── cross-cloud ├── cloud.tf ├── input.tf └── output.tf ├── gce ├── cert.tf ├── cloud-config.tf ├── gce.tf ├── init-cfssl ├── input.tf ├── keypair.tf ├── modules.tf ├── modules │ ├── bastion │ │ ├── input.tf │ │ ├── node.tf │ │ ├── output.tf │ │ └── user-data.yml │ ├── dns │ │ ├── dns.tf │ │ ├── input.tf │ │ └── output.tf │ ├── etcd │ │ ├── cloud-config.tf │ │ ├── cloud-config.yml │ │ ├── discovery.tf │ │ ├── external_lb.tf │ │ ├── input.tf │ │ ├── internal_lb.tf │ │ ├── load-balancer.tf │ │ ├── nodes.tf │ │ └── output.tf │ ├── security │ │ ├── io.tf │ │ └── security.tf │ ├── vpc │ │ ├── azure-security.tf │ │ ├── gce-subnet.tf │ │ ├── io.tf │ │ ├── output.tf │ │ ├── private.tf │ │ ├── public.tf │ │ └── vpc.tf │ └── worker │ │ ├── cloud-config.tf │ │ ├── cloud-config.yml │ │ ├── input.tf │ │ └── nodes.tf ├── output.tf ├── runme └── wait-for-cluster ├── gke ├── gke.tf ├── input.tf ├── modules.tf ├── modules │ ├── cluster │ │ ├── cluster.tf │ │ ├── input.tf │ │ ├── node-pool.tf │ │ └── output.tf │ └── vpc │ │ ├── gce-subnet.tf │ │ ├── input.tf │ │ ├── output.tf │ │ └── vpc.tf └── output.tf ├── kubeconfig ├── input.tf ├── kubeconfig.tf └── output.tf ├── packet ├── init-cfssl ├── input.tf ├── modules.tf ├── modules │ ├── dns │ │ ├── dns.tf │ │ ├── input.tf │ │ └── output.tf │ ├── etcd │ │ ├── discovery.tf │ │ ├── etcd-cloud-config.tf │ │ ├── etcd-cloud-config.yml │ │ ├── etcd-nodes.tf │ │ ├── input.tf │ │ ├── kube-apiserver.yml │ │ ├── kube-controller-manager.yml │ │ ├── kube-proxy.yml │ │ ├── kube-scheduler.yml │ │ └── output.tf │ └── worker │ │ ├── input.tf │ │ ├── kube-proxy.yml │ │ ├── output.tf │ │ ├── worker-cloud-config.tf │ │ ├── worker-cloud-config.yml │ │ └── worker-nodes.tf ├── output.tf ├── packet.tf └── ssl-ssh-cloud.tf └── provision.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *~ 3 | *.swp 4 | *.swo 5 | *.egg-info/ 6 | *.out 7 | *.zip 8 | *.pyc 9 | *.dist-info/ 10 | data/ 11 | terraform.tfstate 12 | terraform.tfstate.backup 13 | tmp/ 14 | *.env 15 | .terraform/ 16 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # This file is a template, and might need editing before it works on your project. 2 | # Official docker image. 3 | 4 | image: docker:latest 5 | variables: 6 | DOCKER_HOST: 127.0.0.1:2375 7 | privileged: 'true' 8 | services: 9 | - docker:dind 10 | 11 | stages: 12 | - build-provisioning 13 | - deploy 14 | - destroy 15 | 16 | build-provisioning: 17 | stage: build-provisioning 18 | only: 19 | - master 20 | script: 21 | - docker login -u "gitlab-ci-token" -p "$CI_JOB_TOKEN" $CI_REGISTRY 22 | - docker build --pull -t "$CI_REGISTRY_IMAGE/provisioning:latest" ./provisioning 23 | - docker push "$CI_REGISTRY_IMAGE/provisioning:latest" 24 | 25 | deploy_cloud: 26 | image: registry.gitlab.com/cncf/demo/provisioning:latest 27 | stage: deploy 28 | only: 29 | - aws 30 | - azure 31 | - gce 32 | - gke 33 | - packet 34 | environment: 35 | name: $CI_COMMIT_REF_NAME 36 | url: https://$CI_ENVIRONMENT_SLUG.cncf.ci/ 37 | on_stop: destroy_cloud 38 | artifacts: 39 | when: always 40 | expire_in: 4 weeks 41 | name: "${CI_ENVIRONMENT_SLUG}" 42 | paths: 43 | - ./provisioning/data/ 44 | script: 45 | - ./provisioning/provision.sh ${CI_COMMIT_REF_NAME}-deploy ${CI_ENVIRONMENT_SLUG} 46 | 47 | destroy_cloud: 48 | image: registry.gitlab.com/cncf/demo/provisioning:latest 49 | stage: destroy 50 | when: manual 51 | environment: 52 | name: $CI_COMMIT_REF_NAME 53 | action: stop 54 | artifacts: 55 | when: always 56 | expire_in: 4 weeks 57 | name: "${CI_ENVIRONMENT_SLUG}" 58 | paths: 59 | - ./provisioning/data/ 60 | script: 61 | - ./provisioning/provision.sh ${CI_COMMIT_REF_NAME}-destroy ${CI_ENVIRONMENT_SLUG} 62 | -------------------------------------------------------------------------------- /AddOns/Prometheus/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | labels: 9 | name: prometheus 10 | spec: 11 | selector: 12 | app: prometheus 13 | ports: 14 | - name: prometheus 15 | protocol: TCP 16 | port: 9090 17 | nodePort: 30900 18 | type: NodePort 19 | 20 | --- 21 | 22 | apiVersion: extensions/v1beta1 23 | kind: Deployment 24 | metadata: 25 | name: prometheus 26 | namespace: monitoring 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: prometheus 32 | template: 33 | metadata: 34 | name: prometheus 35 | labels: 36 | app: prometheus 37 | spec: 38 | containers: 39 | - name: prometheus 40 | image: prom/prometheus:v1.3.0 41 | args: 42 | - '-storage.local.retention=6h' 43 | - '-config.file=/etc/prometheus/prometheus.yml' 44 | ports: 45 | - name: web 46 | containerPort: 9090 47 | volumeMounts: 48 | - name: config-volume 49 | mountPath: /etc/prometheus 50 | volumes: 51 | - name: config-volume 52 | configMap: 53 | name: prometheus 54 | -------------------------------------------------------------------------------- /AddOns/Prometheus/node-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | labels: 9 | app: node-exporter 10 | name: node-exporter 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: scrape 15 | port: 9100 16 | protocol: TCP 17 | selector: 18 | app: node-exporter 19 | type: ClusterIP 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: DaemonSet 23 | metadata: 24 | name: node-exporter 25 | namespace: monitoring 26 | spec: 27 | template: 28 | metadata: 29 | labels: 30 | app: node-exporter 31 | name: node-exporter 32 | spec: 33 | containers: 34 | - image: prom/node-exporter 35 | name: node-exporter 36 | ports: 37 | - containerPort: 9100 38 | hostPort: 9100 39 | name: scrape 40 | hostNetwork: true 41 | hostPID: true 42 | -------------------------------------------------------------------------------- /AddOns/Prometheus/pushgateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pushgateway 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | labels: 9 | name: pushgateway 10 | spec: 11 | selector: 12 | app: pushgateway 13 | type: NodePort 14 | ports: 15 | - name: pushgateway 16 | protocol: TCP 17 | port: 9091 18 | nodePort: 30901 19 | 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: Deployment 23 | metadata: 24 | name: pushgateway 25 | namespace: monitoring 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: pushgateway 31 | template: 32 | metadata: 33 | name: pushgateway 34 | labels: 35 | app: pushgateway 36 | spec: 37 | containers: 38 | - name: pushgateway 39 | image: prom/pushgateway:latest 40 | ports: 41 | - name: web 42 | containerPort: 9091 43 | -------------------------------------------------------------------------------- /AddOns/dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubernetes-dashboard 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | kubernetes.io/cluster-service: "true" 9 | spec: 10 | selector: 11 | k8s-app: kubernetes-dashboard 12 | ports: 13 | - port: 80 14 | targetPort: 9090 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: ReplicationController 20 | metadata: 21 | name: kubernetes-dashboard-v1.4.2 22 | namespace: kube-system 23 | labels: 24 | k8s-app: kubernetes-dashboard 25 | version: v1.4.2 26 | kubernetes.io/cluster-service: "true" 27 | spec: 28 | replicas: 1 29 | selector: 30 | k8s-app: kubernetes-dashboard 31 | template: 32 | metadata: 33 | labels: 34 | k8s-app: kubernetes-dashboard 35 | version: v1.4.2 36 | kubernetes.io/cluster-service: "true" 37 | annotations: 38 | scheduler.alpha.kubernetes.io/critical-pod: '' 39 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 40 | spec: 41 | containers: 42 | - name: kubernetes-dashboard 43 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.2 44 | resources: 45 | limits: 46 | cpu: 100m 47 | memory: 50Mi 48 | requests: 49 | cpu: 100m 50 | memory: 50Mi 51 | ports: 52 | - containerPort: 9090 53 | livenessProbe: 54 | httpGet: 55 | path: / 56 | port: 9090 57 | initialDelaySeconds: 30 58 | timeoutSeconds: 30 59 | 60 | -------------------------------------------------------------------------------- /AddOns/grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | app: grafana 9 | type: NodePort 10 | ports: 11 | - name: grafana 12 | port: 3000 13 | targetPort: 3000 14 | nodePort: 30000 15 | --- 16 | apiVersion: extensions/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: grafana 20 | namespace: monitoring 21 | labels: 22 | app: grafana 23 | spec: 24 | replicas: 1 25 | revisionHistoryLimit: 0 26 | template: 27 | metadata: 28 | labels: 29 | app: grafana 30 | spec: 31 | containers: 32 | - image: zilman/kube-grafana 33 | name: grafana 34 | imagePullPolicy: Always 35 | ports: 36 | - containerPort: 3000 37 | env: 38 | - name: GF_AUTH_BASIC_ENABLED 39 | value: "false" 40 | - name: GF_AUTH_ANONYMOUS_ENABLED 41 | value: "true" 42 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 43 | value: Admin 44 | #- name: GF_SERVER_ROOT_URL 45 | #value: /api/v1/proxy/namespaces/default/services/grafana/ 46 | -------------------------------------------------------------------------------- /ClusterStart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: clusterstart 5 | spec: 6 | activeDeadlineSeconds: 180 7 | template: 8 | metadata: 9 | name: clusterstart 10 | spec: 11 | containers: 12 | - name: clusterstart 13 | image: zilman/kube-addons 14 | command: ["kubectl"] 15 | args: ["create", "-f", "AddOns", "--recursive"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /Demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: clusterstart 5 | spec: 6 | activeDeadlineSeconds: 180 7 | template: 8 | metadata: 9 | name: clusterstart 10 | spec: 11 | containers: 12 | - name: clusterstart 13 | image: zilman/cncf-demo 14 | command: ["kubectl"] 15 | args: ["create", "-f", "Demo", "--recursive"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /Demo/ConfigMaps/countly.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: countly 6 | data: 7 | api.js: | 8 | var countlyConfig = { 9 | 10 | mongodb: { 11 | host: "mongos.default", 12 | db: "countly", 13 | port: 27017, 14 | max_pool_size: 500, 15 | }, 16 | 17 | api: { 18 | port: 3001, 19 | host: "localhost", 20 | max_sockets: 1024 21 | }, 22 | 23 | path: "", 24 | logging: { 25 | info: ["jobs", "push"], 26 | default: "warn" 27 | } 28 | 29 | }; 30 | 31 | module.exports = countlyConfig; 32 | frontend.js: | 33 | var countlyConfig = { 34 | 35 | mongodb: { 36 | host: "mongos.default", 37 | db: "countly", 38 | port: 27017, 39 | max_pool_size: 10, 40 | }, 41 | 42 | web: { 43 | port: 6001, 44 | host: "localhost", 45 | use_intercom: true 46 | }, 47 | 48 | path: "", 49 | cdn: "" 50 | 51 | }; 52 | 53 | module.exports = countlyConfig; 54 | 55 | -------------------------------------------------------------------------------- /Demo/Manifests/countly.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: countly 6 | name: countly 7 | spec: 8 | selector: 9 | app: countly 10 | type: NodePort 11 | ports: 12 | - name: countly 13 | protocol: TCP 14 | port: 80 15 | nodePort: 32080 16 | type: LoadBalancer 17 | 18 | --- 19 | 20 | apiVersion: extensions/v1beta1 21 | kind: Deployment 22 | metadata: 23 | name: countly 24 | spec: 25 | replicas: 1 26 | selector: 27 | matchLabels: 28 | app: countly 29 | template: 30 | metadata: 31 | name: countly 32 | labels: 33 | app: countly 34 | annotations: 35 | pod.alpha.kubernetes.io/init-containers: "[{\"name\": \"ns\", \"image\": \"busybox\", \"command\": [\"/bin/sh\", \"-c\", \"sleep 5; [[ $(nslookup mongos.default | tail -n +5 | wc -l) -ge 1 ]]\"]},{\"name\": \"wait\", \"image\": \"busybox\", \"command\": [\"/bin/sh\", \"-c\", \"sleep 10\"]},{\"name\": \"connect\", \"image\": \"mongo:3.2\", \"command\": [\"mongo\", \"--host\", \"mongos.default\", \"--eval\", \"db.getSiblingDB('admin').runCommand({listShards:1})\"]}]" 36 | spec: 37 | containers: 38 | - name: countly 39 | image: countly/countly-server:16.06 40 | ports: 41 | - name: countly 42 | containerPort: 80 -------------------------------------------------------------------------------- /Demo/Manifests/distcc-daemon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: distcc 6 | name: distcc 7 | spec: 8 | selector: 9 | app: distcc 10 | clusterIP: None 11 | ports: 12 | - port: 3632 13 | 14 | --- 15 | 16 | apiVersion: extensions/v1beta1 17 | kind: DaemonSet 18 | metadata: 19 | name: distcc 20 | spec: 21 | template: 22 | metadata: 23 | labels: 24 | app: distcc 25 | name: distcc 26 | spec: 27 | containers: 28 | - image: zilman/distcc-daemon 29 | name: distcc 30 | ports: 31 | - containerPort: 3632 32 | hostPort: 3632 33 | -------------------------------------------------------------------------------- /Docker/Countly/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM countly/countly-server:16.06 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | # Add custom Countly configs - these in turn come from k8s volume 6 | ADD ./runit/countly-api.sh /etc/service/countly-api/run 7 | ADD ./runit/countly-dashboard.sh /etc/service/countly-dashboard/run 8 | 9 | -------------------------------------------------------------------------------- /Docker/Countly/runit/countly-api.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp /etc/config/api.js /opt/countly/api/config.js 4 | chown countly:countly /opt/countly/api/config.js 5 | 6 | exec /sbin/setuser countly /usr/bin/nodejs /opt/countly/api/api.js 7 | -------------------------------------------------------------------------------- /Docker/Countly/runit/countly-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp /etc/config/frontend.js /opt/countly/frontend/express/config.js 4 | chown -R countly:countly /opt/countly/frontend/express/config.js 5 | 6 | exec /sbin/setuser countly /usr/bin/nodejs /opt/countly/frontend/express/app.js 7 | -------------------------------------------------------------------------------- /Docker/Demo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kubectl 2 | 3 | ADD Demo Demo 4 | -------------------------------------------------------------------------------- /Docker/Kernel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:wheezy 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | RUN apt-get -y update && apt-get -y install openssh-client coreutils fakeroot build-essential kernel-package wget xz-utils gnupg bc devscripts apt-utils initramfs-tools aria2 curl && apt-get clean 6 | -------------------------------------------------------------------------------- /Docker/KubeAddOns/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kubectl 2 | 3 | ADD AddOns AddOns 4 | -------------------------------------------------------------------------------- /Docker/Wrk/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM williamyeh/wrk:4.0.2 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | RUN apk add --update curl --no-cache 6 | 7 | ADD runner.sh /wrk/ 8 | ADD send_summary.lua /wrk/ 9 | 10 | ENTRYPOINT ["/wrk/runner.sh"] 11 | -------------------------------------------------------------------------------- /Docker/Wrk/README.md: -------------------------------------------------------------------------------- 1 | ## Summary 2 | 3 | This is an Alpine based image with curl and [wrk](https://github.com/wg/wrk) (HTTP benchmarking tool). 4 | 5 | ## Usage 6 | 7 | To try it out, simply pass a URL: 8 | 9 | docker run -e URL="http://google.com" zilman/wrk 10 | 11 | runner.sh is a simple wrapper around the wrk command. 12 | 13 | Pass enviorment variables to override any of the defaults: 14 | 15 | - DURATION 16 | - CONNECTIONS 17 | - THREADS 18 | - TIMEOUT 19 | 20 | Summary results are curl'd to an endpoint as defined in the included lua script. 21 | Set your own location by exporting PUSHGATEWAY. 22 | 23 | #### Tip 24 | 25 | You can poke around by doing: 26 | docker run --entrypoint /bin/sh --rm -ti zilman/wrk 27 | -------------------------------------------------------------------------------- /Docker/Wrk/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | if [ -z "$URL" ]; then echo "URL Required" && exit 1; fi 6 | 7 | # Wrk Defaults 8 | SCRIPT=${SCRIPT-/wrk/send_summary.lua} 9 | DURATION=${DURATION-5} 10 | CONNECTIONS=${CONNECTIONS-5} 11 | THREADS=${THREADS-2} 12 | TIMEOUT=${TIMEOUT-3} 13 | 14 | # Global Defaults 15 | export hostIP=$(curl -m1 -s http://169.254.169.254/latest/meta-data/local-ipv4) 16 | export podID=$HOSTNAME 17 | 18 | hostIP=${hostIP:=127.0.0.1} 19 | 20 | export PUSHGATEWAY_SERVICE_PORT=${PUSHGATEWAY_SERVICE_PORT:=9091} 21 | export PUSHGATEWAY=${PUSHGATEWAY-pushgateway} 22 | 23 | wrk -s $SCRIPT -d$DURATION -c$CONNECTIONS -t$THREADS --timeout $TIMEOUT $URL 24 | -------------------------------------------------------------------------------- /Docker/Wrk/send_summary.lua: -------------------------------------------------------------------------------- 1 | local random = math.random 2 | 3 | local function uuid() 4 | local template ='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx' 5 | return string.gsub(template, '[xy]', function (c) 6 | local v = (c == 'x') and random(0, 0xf) or random(8, 0xb) 7 | return string.format('%x', v) 8 | end) 9 | end 10 | 11 | function init(args) 12 | 13 | -- print(args[0]) 14 | 15 | wrk.path = wrk.path .. '&device_id=' .. uuid() 16 | -- TODO: write a proper add_query_parm function instead of appending at the end 17 | 18 | end 19 | 20 | -- function response(status, headers, body) 21 | -- print(status) 22 | -- todo: keep seperate counts per status code 23 | -- end 24 | 25 | function done(summary, latency, requests) 26 | 27 | local msg = "echo 'metric_name %d' | curl -s -m3 --data-binary @- http://$PUSHGATEWAY:$PUSHGATEWAY_SERVICE_PORT/metrics/job/wrk/name/$podID/instance/$hostIP" 28 | local t = os.execute(msg:format(summary.requests)) 29 | 30 | end 31 | -------------------------------------------------------------------------------- /Docker/azure-cli/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM azuresdk/azure-cli-python 2 | MAINTAINER "Hippie Hacker " 3 | COPY entrypoint.sh / 4 | ENTRYPOINT ["/entrypoint.sh"] 5 | CMD ["azure"] -------------------------------------------------------------------------------- /Docker/azure-cli/Readme.org: -------------------------------------------------------------------------------- 1 | purpose: to generate an azure.env for use with terraform. 2 | 3 | This is published on docker hub as [[https://hub.docker.com/r/generate/creds/][generate/creds:azure]] 4 | 5 | #+BEGIN_SRC shell 6 | $ docker run -v $(pwd)/data:/data -ti generate/creds:azure 7 | To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code GY7W7BMRZ to authenticate. 8 | Name CloudName SubscriptionId State IsDefault 9 | ------------- ----------- ------------------------------------ -------- ----------- 10 | Free Trial AzureCloud 5358e673-95e7-4cd8-9791-ca28dd5e3cbb Disabled True 11 | Pay-As-You-Go AzureCloud 70693672-7c0d-485f-ac08-06d458c80f0e Enabled 12 | 13 | Please enter the Name of the account you wish to use. If you do not see 14 | a valid account in the list press Ctrl+C to abort and create one. 15 | If you leave this blank we will use the Current account. 16 | > Pay-As-You-Go 17 | Using subscription_id: 70693672-7c0d-485f-ac08-06d458c80f0e 18 | Using tenant_id: 9996322a-93ac-43ae-80be-887a3e8194a1 19 | ==> Creating service principal 20 | Retrying role assignment creation: 1/36 21 | Retrying role assignment creation: 2/36 22 | ./data/azure.env created 23 | $ cat ./data/azure.env 24 | export ARM_SUBSCRIPTION_ID=70693672-XXXX-4858-ac08-06888888880e 25 | export ARM_TENANT_ID=9896828a-93ac-43ae-YYYY-887a3e8898a1 26 | export ARM_CLIENT_ID=968448ae-f9f9-ZZZZ-bf43-5c081da88975 27 | export ARM_CLIENT_SECRET=BBBBBBBB-8eaa-AAAA-aafe-75b02ad4ceba 28 | #+END_SRC 29 | -------------------------------------------------------------------------------- /Docker/azure-cli/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | azure_subscription_id= # Derived from the account after login 5 | 6 | askSubscription() { 7 | az account list -o table 8 | echo "" 9 | echo "Please enter the Name of the account you wish to use. If you do not see" 10 | echo "a valid account in the list press Ctrl+C to abort and create one." 11 | echo "If you leave this blank we will use the Current account." 12 | echo -n "> " 13 | read azure_subscription_id 14 | if [ "$azure_subscription_id" != "" ]; then 15 | az account set --subscription $azure_subscription_id 16 | azure_subscription_id=$(az account show | jq -r .id) 17 | else 18 | azure_subscription_id=$(az account show | jq -r .id) 19 | fi 20 | ARM_SUBSCRIPTION_ID=$azure_subscription_id 21 | ARM_TENANT_ID=$(az account show | jq -r .tenantId) 22 | echo "Using subscription_id: $ARM_SUBSCRIPTION_ID" 23 | echo "Using tenant_id: $ARM_TENANT_ID" 24 | } 25 | 26 | createServicePrincipal() { 27 | echo "==> Creating service principal" 28 | CREDS_JSON=$( az ad sp create-for-rbac) 29 | ARM_TENANT_ID=$( echo ${CREDS_JSON} | jq -r .tenant ) 30 | ARM_CLIENT_ID=$( echo ${CREDS_JSON} | jq -r .appId ) 31 | ARM_CLIENT_SECRET=$( echo ${CREDS_JSON} | jq -r .password ) 32 | if [ $? -ne 0 ]; then 33 | echo "Error creating service principal: $azure_client_id" 34 | exit 1 35 | fi 36 | } 37 | 38 | showConfigs() { 39 | echo ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID 40 | echo ARM_TENANT_ID=$ARM_TENANT_ID 41 | echo ARM_CLIENT_ID=$ARM_CLIENT_ID 42 | echo ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET 43 | } 44 | 45 | az login > /dev/null 46 | askSubscription 47 | createServicePrincipal 48 | showConfigs > /data/azure.env 49 | echo "./data/azure.env created" 50 | echo 'sudo chown -R $(whoami):$(whoami) ./data' 51 | -------------------------------------------------------------------------------- /Docker/boinc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage:0.9.19 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | RUN apt update -y && \ 6 | apt install -y boinc-client && \ 7 | apt-get clean && \ 8 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 9 | 10 | RUN mkdir -p /var/lib/boinc-client/projects/www.worldcommunitygrid.org && \ 11 | mkdir -p /var/lib/boinc-client/slots && \ 12 | chown -R boinc:boinc /var/lib/boinc-client 13 | 14 | ADD runner.sh /var/lib/boinc-client 15 | ADD attach.sh /var/lib/boinc-client 16 | 17 | WORKDIR /var/lib/boinc-client 18 | 19 | ENTRYPOINT ["/var/lib/boinc-client/runner.sh"] 20 | -------------------------------------------------------------------------------- /Docker/boinc/README.md: -------------------------------------------------------------------------------- 1 | 2 | boinccmd --lookup_account http://www.worldcommunitygrid.org zilman zombocom 3 | status: Success 4 | poll status: operation in progress 5 | account key: d2804d9d05efdad427b69bc020d5492f 6 | 7 | pkill boinc 8 | 9 | /var/lib/boinc-client 10 | boinc & 11 | boinccmd --project_attach http://www.worldcommunitygrid.org d2804d9d05efdad427b69bc020d5492f 12 | 13 | weak account key is "better": 14 | 15 | boinccmd --project_attach http://www.worldcommunitygrid.org 1013367_21303863232c651457665d59cf936248 16 | 17 | /usr/bin/boinc --skip_cpu_benchmarks --exit_after_finish --fetch_minimal_work --exit_when_idle --abort_jobs_on_exit --no_gpus 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /Docker/boinc/attach.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while true; do 4 | 5 | if grep -q "Initialization complete" /var/lib/boinc-client/log; then 6 | exec boinccmd --project_attach http://www.worldcommunitygrid.org 1013367_21303863232c651457665d59cf936248 & 7 | break 8 | else 9 | sleep 2 10 | fi 11 | 12 | done 13 | -------------------------------------------------------------------------------- /Docker/boinc/boinc_rpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # inspired by: https://github.com/BOINC/boinc/blob/master/lib/gui_rpc_client.cpp 4 | 5 | NONCE=$(printf "\n\n\n\003" | nc localhost 31416) 6 | AUTH=$(cat gui_rpc_auth.cfg) 7 | HASH=$(echo "$NONCE$AUTH" | md5sum | awk '{print $1}') 8 | 9 | printf "\n\n$HASH\n\n\n\003" | nc localhost 31416 10 | 11 | : ' 12 | Oh my word, so sometimes it responds a bit and if one insert a long enough delay here then it is possible to send to the rpc port: 13 | 14 | 15 | 16 | 0 17 | 18 | 19 | 20 | IF and only IF all of this was part of the same tcp session. Ouch. 21 | ' 22 | -------------------------------------------------------------------------------- /Docker/boinc/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | /sbin/setuser boinc /var/lib/boinc-client/attach.sh & 5 | 6 | exec /sbin/setuser boinc /usr/bin/boinc --exit_after_finish --fetch_minimal_work --exit_when_idle --abort_jobs_on_exit --no_gpus >>/var/lib/boinc-client/log 2>&1 7 | -------------------------------------------------------------------------------- /Docker/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | args: ['build', '-t', 'gcr.io/$PROJECT_ID/builder-test', 'kubectl/Dockerfile'] 4 | -------------------------------------------------------------------------------- /Docker/distcc-daemon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kernel 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get install -y distcc distcc-pump 5 | 6 | COPY config /etc/default/distcc 7 | COPY runner.sh /runner.sh 8 | 9 | ENTRYPOINT ["/runner.sh"] 10 | 11 | -------------------------------------------------------------------------------- /Docker/distcc-daemon/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /Docker/distcc-daemon/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | distccd --daemon --allow 10.0.0.0/8 4 | sleep infinity 5 | -------------------------------------------------------------------------------- /Docker/distcc-master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kernel 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get install -y distcc distcc-pump 5 | 6 | COPY config /etc/default/distcc 7 | COPY runner.sh /runner.sh 8 | 9 | ENTRYPOINT ["/runner.sh"] 10 | 11 | -------------------------------------------------------------------------------- /Docker/distcc-master/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /Docker/distcc-master/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | /etc/init.d/distcc start 6 | 7 | git clone --depth 1 git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git 8 | cd linux-stable && make defconfig 9 | 10 | export DISTCC_HOSTS="$(getent hosts distcc | awk '{ printf "%s,cpp,lzo ", $1 }')" 11 | export N_JOBS="$(echo $(getent hosts distcc | wc -l)+2 | bc)" 12 | 13 | distcc --show-hosts 14 | 15 | eval $(distcc-pump --startup) 16 | export PATH=/usr/lib/distcc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 17 | 18 | DISTCC_VERBOSE=1 make -j$N_JOBS 2>&1 | tee build.log 19 | 20 | sleep infinity 21 | -------------------------------------------------------------------------------- /Docker/distcc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get clean && apt update && apt install -y 5 | 6 | RUN apt install -y kernel-package 7 | RUN apt install -y git build-essential 8 | RUN apt install -y distcc distcc-pump 9 | 10 | COPY config /etc/default/distcc 11 | COPY runner.sh /runner.sh 12 | 13 | ENTRYPOINT ["/runner.sh"] 14 | 15 | -------------------------------------------------------------------------------- /Docker/distcc/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /Docker/distcc/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /etc/init.d/distcc start 4 | sleep infinity 5 | -------------------------------------------------------------------------------- /Docker/echo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/falcon 2 | MAINTAINER Eugene Zilman 3 | 4 | COPY echo.py /app.py 5 | 6 | EXPOSE 8000 7 | CMD ["app:app"] 8 | -------------------------------------------------------------------------------- /Docker/echo/echo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import random 4 | import json 5 | 6 | import falcon 7 | 8 | 9 | class JSONResource(object): 10 | def on_get(self, request, response): 11 | json_data = {'message': "Hello, world!"} 12 | response.body = json.dumps(json_data) 13 | 14 | 15 | class PlaintextResource(object): 16 | def on_get(self, request, response): 17 | response.set_header('Content-Type', 'text/plain') 18 | response.body = b'OK' 19 | 20 | 21 | def append_headers(request, response, resource, params): 22 | for pair in request.get_param_as_list('append_header') or []: 23 | try: 24 | name, value = pair.split(',', 1) 25 | except: 26 | name, value = pair.split(',', 1), None 27 | response.append_header(name, value) 28 | 29 | 30 | def timeout(request, response, resource, params): 31 | if random.randrange(100) < sorted((0, request.get_param_as_int('timeout_probability') or 0, 100))[1]: 32 | secs = request.get_param_as_int('timeout_seconds') or 1 33 | raise falcon.HTTPServiceUnavailable('Temporarily Unavailable', 'Timed out, wait {} second'.format(secs), secs) 34 | 35 | 36 | def error(request, response, resource, params): 37 | if random.randrange(100) < sorted((0, request.get_param_as_int('error_probability') or 0, 100))[1]: 38 | raise falcon.HTTPInternalServerError('INTERNAL SERVER ERROR', 'The server encountered an unexpected condition that prevented it from fulfilling the request.') 39 | 40 | 41 | @falcon.before(timeout) 42 | @falcon.before(error) 43 | @falcon.before(append_headers) 44 | class EchoResource(object): 45 | def on_get(self, request, response): 46 | response.set_header('Content-Type', request.get_param('Content-Type') or 'text/plain') 47 | response.status = request.get_param('status') or '200 OK' 48 | response.data = request.get_param('body') or 'OK' 49 | 50 | 51 | app = falcon.API() 52 | app.add_route("/json", JSONResource()) 53 | app.add_route("/plaintext", PlaintextResource()) 54 | app.add_route("/echo", EchoResource()) 55 | 56 | 57 | if __name__ == "__main__": 58 | from wsgiref import simple_server 59 | 60 | httpd = simple_server.make_server('localhost', 8080, app) 61 | httpd.serve_forever() 62 | -------------------------------------------------------------------------------- /Docker/falcon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/gunicorn 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN pip install meinheld falcon 5 | 6 | COPY app.py / 7 | 8 | EXPOSE 8000 9 | CMD ["app:app"] 10 | -------------------------------------------------------------------------------- /Docker/falcon/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import falcon 5 | 6 | 7 | class Hello(object): 8 | def on_get(self, request, response): 9 | response.set_header('Content-Type', 'text/plain') 10 | response.body = b'Hello, world!' 11 | 12 | 13 | app = falcon.API() 14 | app.add_route("/", Hello()) 15 | 16 | 17 | if __name__ == "__main__": 18 | from wsgiref import simple_server 19 | 20 | httpd = simple_server.make_server('localhost', 8080, app) 21 | httpd.serve_forever() 22 | -------------------------------------------------------------------------------- /Docker/fluentd-kubectl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fluent/fluentd:latest-onbuild 2 | MAINTAINER Eugene Zilman 3 | WORKDIR /home/fluent 4 | ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH 5 | ENV KUBE_LATEST_VERSION="v1.4.4" 6 | 7 | USER root 8 | 9 | RUN apk add --update ca-certificates \ 10 | && apk add --update -t deps curl \ 11 | && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ 12 | && chmod +x /usr/local/bin/kubectl \ 13 | && apk del --purge deps \ 14 | && rm /var/cache/apk/* 15 | 16 | RUN apk --no-cache add sudo build-base ruby-dev && \ 17 | sudo -u fluent gem install fluent-plugin-secure-forward fluent-plugin-s3 && \ 18 | rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && sudo -u fluent gem sources -c && \ 19 | apk del sudo build-base ruby-dev 20 | 21 | USER fluent 22 | CMD exec fluentd -c /fluentd/etc/$FLUENTD_CONF -p /fluentd/plugins $FLUENTD_OPT 23 | 24 | -------------------------------------------------------------------------------- /Docker/fluentd-kubectl/fluent.conf: -------------------------------------------------------------------------------- 1 | 2 | @type exec 3 | tag joblog 4 | command sh -c "kubectl logs $(kubectl get pod -l job-name=${"job_name"} -a --output=jsonpath={.items..metadata.name})" 5 | keys message 6 | run_interval 5s 7 | 8 | 9 | 10 | @type s3 11 | 12 | s3_bucket stats.cncfdemo.io 13 | s3_region us-west-2 14 | 15 | path "fluentd/#{ENV["uuid"]}/" 16 | buffer_path /fluentd/log/s3_buffer 17 | 18 | time_slice_format %Y%m%d%H%M%S 19 | time_slice_wait 1s 20 | utc 21 | 22 | s3_object_key_format "%{path}#{ENV["step"]}.%{file_extension}" 23 | format json 24 | store_as json 25 | include_time_key true 26 | time_format %s 27 | overwrite true 28 | 29 | 30 | -------------------------------------------------------------------------------- /Docker/fluentd-reportstats/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fluent/fluentd:latest-onbuild 2 | MAINTAINER Eugene Zilman 3 | WORKDIR /home/fluent 4 | ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH 5 | 6 | USER root 7 | RUN apk --no-cache add sudo build-base ruby-dev && \ 8 | sudo -u fluent gem install fluent-plugin-secure-forward fluent-plugin-s3 && \ 9 | rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && sudo -u fluent gem sources -c && \ 10 | apk del sudo build-base ruby-dev 11 | 12 | EXPOSE 24284 13 | 14 | USER fluent 15 | CMD exec fluentd -c /fluentd/etc/$FLUENTD_CONF -p /fluentd/plugins $FLUENTD_OPT 16 | -------------------------------------------------------------------------------- /Docker/fluentd-reportstats/fluent.conf: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | path /fluentd/log/test.log 4 | pos_file /fluentd/log/test.log.pos 5 | tag batman 6 | read_from_head true 7 | format none 8 | 9 | 10 | 11 | @type file 12 | path /fluentd/log/test2.out 13 | 14 | 15 | 16 | @type s3 17 | 18 | s3_bucket stats.cncfdemo.io 19 | s3_region us-west-2 20 | path "fluentd/#{ENV["uuid"]}/" 21 | 22 | buffer_path /fluentd/log/s3_buffer 23 | 24 | time_slice_format %Y%m%d%H%M%S 25 | time_slice_wait 10s 26 | utc 27 | 28 | s3_object_key_format "%{path}#{ENV["step"]}.%{file_extension}.%{index}" 29 | format json 30 | store_as json 31 | include_time_key true 32 | time_format %s 33 | 34 | 35 | -------------------------------------------------------------------------------- /Docker/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:3.1.1 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y curl 5 | 6 | COPY dashboards /dashboards 7 | COPY run.sh /run.sh 8 | 9 | EXPOSE 3000 10 | ENTRYPOINT /run.sh 11 | -------------------------------------------------------------------------------- /Docker/grafana/README.md: -------------------------------------------------------------------------------- 1 | # Roll your own Grafana Dashboards / Influx sink 2 | 3 | Use the webUI to build your dashboards, save the result json files in the directory here and bake the image. 4 | 5 | Note: this is actively worked on, ymmv. 6 | -------------------------------------------------------------------------------- /Docker/grafana/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HEADER_CONTENT_TYPE="Content-Type: application/json" 4 | HEADER_ACCEPT="Accept: application/json" 5 | 6 | GRAFANA_SERVICE_PORT=${GRAFANA_SERVICE_PORT:-3000} 7 | DASHBOARD_LOCATION=${DASHBOARD_LOCATION:-"/dashboards"} 8 | 9 | # Allow access to dashboards without having to log in 10 | export GF_AUTH_ANONYMOUS_ENABLED=${GF_AUTH_ANONYMOUS_ENABLED:-true} 11 | export GF_SERVER_HTTP_PORT=${GRAFANA_SERVICE_PORT} 12 | 13 | set -m 14 | echo "Starting Grafana in the background" 15 | exec /usr/sbin/grafana-server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana & 16 | 17 | echo "Waiting for Grafana to come up..." 18 | until $(curl -k --fail --output /dev/null --silent localhost:${GRAFANA_SERVICE_PORT}/api/org); do 19 | printf "." 20 | sleep 2 21 | done 22 | echo "Grafana is up and running." 23 | echo "Creating default datasource..." 24 | 25 | AddDataSource() { 26 | curl 'http://localhost:3000/api/datasources' \ 27 | -X POST \ 28 | -H 'Content-Type: application/json;charset=UTF-8' \ 29 | --data-binary \ 30 | '{"name":"Prometheus","type":"prometheus","url":"http://prometheus.monitoring:9090","access":"proxy","isDefault":true}' 31 | } 32 | 33 | 34 | until AddDataSource; do 35 | echo 'Configuring Grafana...' 36 | sleep 1 37 | done 38 | echo 'Done!' 39 | 40 | 41 | echo "" 42 | echo "Importing default dashboards..." 43 | for filename in ${DASHBOARD_LOCATION}/*.json; do 44 | echo "Importing ${filename} ..." 45 | curl -k -i -XPOST --data "@${filename}" -H "${HEADER_ACCEPT}" -H "${HEADER_CONTENT_TYPE}" "localhost:${GRAFANA_SERVICE_PORT}/api/dashboards/import" 46 | echo "" 47 | echo "Done importing ${filename}" 48 | done 49 | echo "" 50 | echo "Bringing Grafana back to the foreground" 51 | fg 52 | 53 | -------------------------------------------------------------------------------- /Docker/gunicorn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN pip install gunicorn 5 | 6 | COPY gunicorn_conf.py / 7 | 8 | ENTRYPOINT ["/usr/local/bin/gunicorn", "--config", "/gunicorn_conf.py"] 9 | 10 | 11 | -------------------------------------------------------------------------------- /Docker/gunicorn/gunicorn_conf.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import sys 4 | 5 | # Sane Defaults 6 | 7 | workers = multiprocessing.cpu_count() 8 | bind = '0.0.0.0:8080' 9 | keepalive = 120 10 | errorlog = '-' 11 | pidfile = 'gunicorn.pid' 12 | worker_class = "meinheld.gmeinheld.MeinheldWorker" 13 | 14 | def post_fork(server, worker): 15 | # Disalbe access log 16 | import meinheld.server 17 | meinheld.server.set_access_logger(None) 18 | 19 | # Override from ENV 20 | for k,v in os.environ.items(): 21 | if k.startswith("GUNICORN_"): 22 | key = k.split('_', 1)[1].lower() 23 | locals()[key] = v 24 | 25 | -------------------------------------------------------------------------------- /Docker/kubectl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ENV KUBE_LATEST_VERSION="v1.4.4" 4 | 5 | RUN apk add --update ca-certificates \ 6 | && apk add --update -t deps curl \ 7 | && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ 8 | && chmod +x /usr/local/bin/kubectl \ 9 | && apk del --purge deps \ 10 | && rm /var/cache/apk/* 11 | 12 | ENTRYPOINT /usr/bin/tail -f /dev/null 13 | -------------------------------------------------------------------------------- /Images/base/README.md: -------------------------------------------------------------------------------- 1 | ## Centos7 base image 2 | 3 | This image is a minimally modified Centos7 configured with sensible defaults for hosting a Kubernetes cluster. 4 | 5 | ## Quickstart 6 | 7 | Simply install and configure [packer](https://www.packer.io/) and fork this repo to customize. 8 | 9 | > packer build packer.json 10 | 11 | ## Dependencies 12 | 13 | - Packer 0.11+ 14 | - Ansible 2.1+ installed ([installation instructions] (http://docs.ansible.com/ansible/intro_installation.html)) 15 | -------------------------------------------------------------------------------- /Images/base/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | host_key_checking = False 4 | 5 | remote_user = root 6 | private_key_file = ~/.ssh/your.key 7 | -------------------------------------------------------------------------------- /Images/base/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | region='us-west-2' 4 | productCode='aw0evgkw8e5c1q413zgy5pjce' 5 | 6 | AMI=$(aws --region $region ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=$productCode --query 'Images | [-1] | ImageId' --out text) 7 | 8 | echo $AMI 9 | 10 | # This is a convenience script to grab the latest CentOS7 AMI id. 11 | # A soon to be released evrsion of Packer has a 'dynamic source AMI' feature 12 | # so once can specifiy the latest image right in the packer template. 13 | 14 | # Otherwise the output of this script would have to be injected into the packer template. 15 | -------------------------------------------------------------------------------- /Images/base/disable_tty.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sed -i.bak -e '/Defaults.*requiretty/s/^/#/' /etc/sudoers 3 | -------------------------------------------------------------------------------- /Images/base/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "builders": [{ 4 | "type": "amazon-ebs", 5 | "region": "us-west-2", 6 | "source_ami_filter": { 7 | "filters": { 8 | "virtualization-type": "hvm", 9 | "name": "*CentOS Linux 7 x86_64 HVM EBS*", 10 | "root-device-type": "ebs" }, 11 | "most_recent": true 12 | }, 13 | "instance_type": "c4.2xlarge", 14 | "ssh_username": "centos", 15 | "ssh_pty" : false, 16 | "ami_name": "cncfbase{{timestamp}}", 17 | "user_data_file": "disable_tty.sh" 18 | }], 19 | 20 | "provisioners": [{ 21 | "type": "ansible", 22 | "playbook_file": "playbook.yml", 23 | "user": "centos", 24 | "sftp_command": "/usr/libexec/openssh/sftp-server", 25 | "extra_arguments": [ "-vvv", "--extra-vars", "packer=yes" ] 26 | }] 27 | 28 | } 29 | -------------------------------------------------------------------------------- /Images/base/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: yes 4 | 5 | tasks: 6 | - selinux: state=disabled 7 | - copy: content="overlay" dest=/etc/modules-load.d/overlay.conf 8 | - copy: 9 | content: | 10 | net.bridge.bridge-nf-call-ip6tables = 1 11 | net.bridge.bridge-nf-call-iptables = 1 12 | dest: /usr/lib/sysctl.d/90-system.conf 13 | 14 | - yum: name=* state=latest 15 | -------------------------------------------------------------------------------- /Images/golden/README.md: -------------------------------------------------------------------------------- 1 | ## Run on specific host 2 | Provide a hosts inventory to the i flag, note the trailing comma 3 | 4 | ```ansible-playbook playbook.yml -i 42.867.53.09, --step``` 5 | 6 | ## Useful ansible tips 7 | 8 | ansible-playbook playbook.yml --list-tasks 9 | --step, --start-at-task="foo" 10 | 11 | ## Centos7 based golden Kubernetes image 12 | 13 | This image builds on the [base image](https://github.com/cncf/demo/tree/master/Images/base), 14 | a minimally modified Centos7 configured with sensible defaults for hosting a Kubernetes cluster. 15 | 16 | It bakes in everything needed to run Kubernetes master and/or minion nodes into one AMI. It is suggested to configure an instance to bootstrap as a minion or master via userdata. 17 | 18 | ## Configuration via Userdata 19 | 20 | 21 | Simply write a file named `kubernetes-master` _or_ `kubernetes-minion` and specify a `cluster_name` environment variable. That's it. 22 | 23 | 24 | ``` 25 | 26 | #!/bin/bash 27 | 28 | set -ex 29 | 30 | HOSTNAME_OVERRIDE=$(curl -s http://169.254.169.254/2007-01-19/meta-data/local-hostname | cut -d" " -f1) 31 | 32 | cat << EOF > /etc/sysconfig/kubernetes-{master,minion} 33 | 34 | CLUSTER_NAME={cncfdemo} 35 | KUBELET_HOSTNAME=--hostname-override=$HOSTNAME_OVERRIDE 36 | 37 | EOF 38 | 39 | ``` 40 | 41 | Note: The hostname override is an example specific to AWS. Adjust if needed. 42 | 43 | ## Customization Quickstart 44 | 45 | Simply install and configure [packer](https://www.packer.io/) and fork this repo to customize. 46 | 47 | > packer build packer.json 48 | 49 | ## Dependencies 50 | 51 | - Packer 0.11+ 52 | - Ansible 2.1+ installed ([installation instructions] (http://docs.ansible.com/ansible/intro_installation.html)) 53 | -------------------------------------------------------------------------------- /Images/golden/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | host_key_checking = False 4 | 5 | remote_user = root 6 | private_key_file = ~/.ssh/cncf-aws.pem 7 | -------------------------------------------------------------------------------- /Images/golden/disable_tty.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sed -i.bak -e '/Defaults.*requiretty/s/^/#/' /etc/sudoers 3 | -------------------------------------------------------------------------------- /Images/golden/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "builders": [{ 4 | "type": "amazon-ebs", 5 | "region": "us-west-2", 6 | "source_ami_filter": { 7 | "filters": { 8 | "virtualization-type": "hvm", 9 | "name": "cncfbase*", 10 | "root-device-type": "ebs" 11 | }, 12 | "owners": ["750548967590"], 13 | "most_recent": true 14 | }, 15 | "instance_type": "c4.2xlarge", 16 | "ssh_username": "centos", 17 | "ssh_pty" : false, 18 | "ami_name": "cncfgolden{{timestamp}}", 19 | "user_data_file": "disable_tty.sh" 20 | }], 21 | 22 | "provisioners": [{ 23 | "type": "ansible", 24 | "playbook_file": "playbook.yml", 25 | "user": "centos", 26 | "sftp_command": "/usr/libexec/openssh/sftp-server", 27 | "extra_arguments": [ "--extra-vars", "packer=yes" ] 28 | }] 29 | 30 | } 31 | -------------------------------------------------------------------------------- /Images/golden/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: False 4 | become: yes 5 | 6 | tasks: 7 | 8 | - yum_repository: 9 | name: Kubernetes 10 | description: Kubernetes Repository 11 | baseurl: http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 12 | gpgcheck: no 13 | 14 | - yum: name={{ item }} state=latest 15 | with_items: 16 | - docker 17 | - kubernetes-cni 18 | - kubectl 19 | - kubelet 20 | - kubeadm 21 | 22 | - lineinfile: 23 | dest: /etc/sysconfig/docker-storage 24 | regexp: '^DOCKER_STORAGE_OPTIONS=' 25 | line: 'DOCKER_STORAGE_OPTIONS="--storage-driver=overlay"' 26 | 27 | - name: Temp remove of extra args from drop-in - upstream rpm sets incorrectly 28 | lineinfile: 29 | dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 30 | regexp: '^Environment="KUBELET_EXTRA_ARGS' 31 | state: absent 32 | 33 | - lineinfile: 34 | dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 35 | insertafter: '^Environment=\"KUBELET_AUTHZ_ARGS' 36 | line: 'Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd --cloud-provider=aws"' 37 | 38 | - copy: 39 | content: | 40 | kind: MasterConfiguration 41 | apiVersion: kubeadm.k8s.io/v1alpha1 42 | cloudProvider: aws 43 | dest: /etc/kubernetes/kubeadm.conf 44 | 45 | - file: path=/tmp/helm state=directory 46 | - unarchive: 47 | src: "https://storage.googleapis.com/kubernetes-helm/helm-v2.4.2-linux-amd64.tar.gz" 48 | dest: /tmp/helm 49 | remote_src: True 50 | - copy: 51 | src: "/tmp/helm/linux-amd64/helm" 52 | dest: /usr/local/bin/helm 53 | owner: root 54 | group: root 55 | mode: 0700 56 | remote_src: True 57 | 58 | - copy: src=services/ dest=/etc/systemd/system/ mode=0644 59 | - service: name={{ item }} enabled=true 60 | with_items: 61 | - docker 62 | - kubelet 63 | - setup-network-environment 64 | - setup-kubernetes-masters 65 | - setup-kubernetes-minions 66 | 67 | -------------------------------------------------------------------------------- /Web/api/_src/Schemas/new.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-04/schema#", 3 | "title": "NewDemoModel", 4 | "required": ["Metadata"], 5 | "type": "object", 6 | "properties": { 7 | "Metadata": { 8 | "required": ["Masters", "Minions", "Provider", "RAM", "vcpu", "Storage"], 9 | "properties": { 10 | "Masters": { 11 | "properties": { 12 | "Instance": { 13 | "type": "string" 14 | }, 15 | "size": { 16 | "type": "integer" 17 | } 18 | }, 19 | "type": "object" 20 | }, 21 | "Minions": { 22 | "properties": { 23 | "Instance": { 24 | "type": "string" 25 | }, 26 | "size": { 27 | "type": "integer" 28 | } 29 | }, 30 | "type": "object" 31 | }, 32 | "Provider": { 33 | "type": "string" 34 | }, 35 | "RAM": { 36 | "type": "string" 37 | }, 38 | "Storage": { 39 | "type": "string" 40 | }, 41 | "vcpu": { 42 | "type": "integer" 43 | } 44 | }, 45 | "type": "object" 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /Web/api/_src/requirements.txt: -------------------------------------------------------------------------------- 1 | hashids 2 | jsonschema 3 | -------------------------------------------------------------------------------- /Web/api/_src/summary.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import json 4 | 5 | import botocore 6 | import boto3 7 | 8 | 9 | def respond(body=None, err=None): 10 | return { 11 | 'statusCode': '400' if err else '200', 12 | 'body': json.dumps(err.message) if err else json.dumps(body), 13 | 'headers': { 14 | 'Content-Type': 'application/json', 15 | }, 16 | } 17 | 18 | 19 | def handler(event, context): 20 | 21 | #print("Received event: " + json.dumps(event, indent=2)) # logs to CloudWatch 22 | bucket = boto3.resource('s3').Bucket('stats.cncfdemo.io') 23 | key = event['Records'][0]['s3']['object']['key'] 24 | 25 | blob = bucket.Object(key).get() 26 | finished = json.loads(blob['Body'].read()) 27 | 28 | try: 29 | blob2 = bucket.Object('summary/summary.json').get() 30 | summary = json.loads(blob2['Body'].read()) 31 | sorted_summary = sorted(summary['Results'], key = lambda k: k['timestart']) 32 | except: 33 | sorted_summary = [] 34 | 35 | another = finished['results'] 36 | 37 | another['id'] = finished['Metadata']['id'] 38 | another['timestart'] = finished['Metadata']['timestart'] 39 | another['timeend'] = finished['Metadata']['timeend'] 40 | 41 | sorted_summary.append(another) 42 | bucket.put_object(Key='summary/summary.json', Body=json.dumps({"Results" : sorted_summary}), ContentType='application/json', ACL='public-read') 43 | 44 | print("summaries: ", len(sorted_summary)) 45 | -------------------------------------------------------------------------------- /Web/api/_tests/gen_mock_summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import sys 6 | 7 | import time, datetime 8 | import json 9 | import random 10 | 11 | from hashids import Hashids 12 | 13 | hashids = Hashids(salt='grabfromenv') 14 | now = int(time.time()) 15 | 16 | N = int(sys.argv[1]) if 1 < len(sys.argv) else 2 17 | 18 | 19 | data = [] 20 | for _ in range (0,N): 21 | now += 1 22 | data.append({ 23 | 'id': hashids.encode(now), 24 | 'timestart': now, 25 | 'timeend': now + random.randint(850,1100), 26 | 'Boinc_Jobs': 0, 27 | 'CPU': random.randint(3,12), 28 | 'DistCC': random.randint(900,1424), 29 | 'HTTP_Requests': random.randint(2000000, 2542424), 30 | 'Memory': random.randint(16,32), 31 | 'Provider': 'AWS'}) 32 | 33 | print(json.dumps({"Results" : data})) 34 | -------------------------------------------------------------------------------- /Web/api/_tests/new/event.json: -------------------------------------------------------------------------------- 1 | { 2 | "body": { 3 | "Metadata": { 4 | "Masters": { 5 | "size": 1, 6 | "type": "m3.medium" 7 | }, 8 | "Minions": { 9 | "size": 3, 10 | "type": "m4.large" 11 | }, 12 | "Provider": "AWS", 13 | "RAM": "24GiB", 14 | "Storage": "250GB", 15 | "vcpu": 6 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /Web/api/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /Web/api/summary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: cncfdemo-summary 3 | environments: 4 | dev: 5 | profile: default 6 | region: us-west-2 7 | policy: 8 | resources: 9 | - arn: arn:aws:s3:::* 10 | actions: 11 | - "*" 12 | - arn: arn:aws:logs:*:*:* 13 | actions: 14 | - "*" 15 | 16 | lambda: 17 | description: cncfdemo summary view 18 | handler: summary.handler 19 | runtime: python2.7 20 | memory_size: 256 21 | timeout: 3 22 | 23 | -------------------------------------------------------------------------------- /Web/api/trace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: cncfdemo-start 3 | environments: 4 | dev: 5 | profile: default 6 | region: us-west-2 7 | policy: 8 | resources: 9 | - arn: arn:aws:s3:::* 10 | actions: 11 | - "*" 12 | - arn: arn:aws:logs:*:*:* 13 | actions: 14 | - "*" 15 | 16 | lambda: 17 | description: cncfdemo traces 18 | handler: trace.handler 19 | runtime: python2.7 20 | memory_size: 256 21 | timeout: 3 22 | 23 | -------------------------------------------------------------------------------- /Web/results/404.html: -------------------------------------------------------------------------------- 1 | 404 2 | -------------------------------------------------------------------------------- /Web/results/alpha.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws s3 sync . s3://alpha.cncfdemo.io --region us-west-2 --delete --exclude "search*" --exclude ".*" --exclude "*.sh" && \ 4 | aws s3 sync . s3://alpha.cncfdemo.io --region us-west-2 --delete --exclude "*" --include "search" --no-guess-mime-type --content-type text/html 5 | -------------------------------------------------------------------------------- /Web/results/chart.js: -------------------------------------------------------------------------------- 1 | 2 | var colors = d3.scale.category20(); 3 | var chart; 4 | nv.addGraph(function() { 5 | chart = nv.models.stackedAreaChart() 6 | .useInteractiveGuideline(true) 7 | .x(function(d) { return d[0] }) 8 | .y(function(d) { return d[1] }) 9 | .duration(300); 10 | 11 | chart.showControls(false) 12 | chart.style("expand"); 13 | 14 | chart.xAxis.tickFormat(function(d) { return d3.time.format('%H:%M')(new Date(d)) }); 15 | chart.yAxis.tickFormat(d3.format(',.4f')); 16 | chart.legend.vers('furious'); 17 | d3.select('#chart1') 18 | .datum(histcatexplong) 19 | .transition().duration(1000) 20 | .call(chart) 21 | .each('start', function() { 22 | setTimeout(function() { 23 | d3.selectAll('#chart1 *').each(function() { 24 | if(this.__transition__) 25 | this.__transition__.duration = 1; 26 | }) 27 | }, 0) 28 | }); 29 | nv.utils.windowResize(chart.update); 30 | return chart; 31 | }); 32 | 33 | 34 | function volatileChart(startPrice, volatility, numPoints) { 35 | var rval = []; 36 | var now =+new Date(); 37 | numPoints = numPoints || 100; 38 | for(var i = 1; i < numPoints; i++) { 39 | rval.push({x: now + i * 1000 * 60 * 60 * 24, y: startPrice}); 40 | var rnd = Math.random(); 41 | var changePct = 2 * volatility * rnd; 42 | if ( changePct > volatility) { 43 | changePct -= (2*volatility); 44 | } 45 | startPrice = startPrice + startPrice * changePct; 46 | } 47 | return rval; 48 | } 49 | 50 | wrk = volatileChart(25.0, 0.09,30); 51 | 52 | nv.addGraph(function() { 53 | var chart = nv.models.sparklinePlus(); 54 | chart.margin({left:70}) 55 | .x(function(d,i) { return i }) 56 | .showLastValue(true) 57 | .xTickFormat(function(d) { 58 | return d3.time.format('%M:%S')(new Date(wrk[d].x)) 59 | }); 60 | d3.select('#spark1') 61 | .datum(wrk) 62 | .call(chart); 63 | 64 | chart.alignValue(false); 65 | chart.showLastValue(false); 66 | chart.animate(false); 67 | return chart; 68 | }); 69 | -------------------------------------------------------------------------------- /Web/results/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/Web/results/favicon.ico -------------------------------------------------------------------------------- /Web/results/img/aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/Web/results/img/aws.png -------------------------------------------------------------------------------- /Web/results/img/logo_cncf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/Web/results/img/logo_cncf.png -------------------------------------------------------------------------------- /Web/results/main2.css: -------------------------------------------------------------------------------- 1 | html, body { 2 | width: 100vw; 3 | min-height: 100vh; 4 | box-sizing: border-box; 5 | margin: 0; 6 | padding: 0; 7 | color: #212121; 8 | font-family: "Helvetica Neue", "Calibri Light", Roboto, sans-serif; 9 | -webkit-font-smoothing: antialiased; 10 | -moz-osx-font-smoothing: grayscale; 11 | letter-spacing: 0.02em; 12 | } 13 | 14 | body { display: flex; flex-flow: column nowrap; color: #7f7f7f; } 15 | 16 | *, *:before, *:after { box-sizing: inherit; } 17 | a { color: #039be5; text-decoration: none; background-color: transparent; -webkit-tap-highlight-color: transparent; } 18 | 19 | .flex-row { display: flex; flex-flow: row wrap; } 20 | .flex-row > * { align-self: center; } 21 | .space-between { justify-content: space-between; } 22 | .space-around { justify-content: space-around; } 23 | 24 | header { background-color: #ffffff; display: flex; flex-flow: column nowrap;} 25 | main { background-color: #f9f9f9; flex: 1; } 26 | footer { background-color: #333333; } 27 | 28 | .metadata { color: #333; font-size: 2rem; } 29 | 30 | 31 | #r1, #r2 { padding: 2rem; } 32 | #r2 { font-size: 0.625em; font-weight: 100; } 33 | #r2 > div > ul {list-style: none; font-size: 0.8em; padding: 0 0 0 0.2em; } 34 | 35 | #aws:before { 36 | background-image: url(img/aws.png); 37 | background-size: 50px 40px; 38 | content: ''; 39 | position: absolute; 40 | margin-left: -50px; 41 | width: 50px; 42 | height: 50px; 43 | background-position: left center; 44 | margin-top: -0.5rem; 45 | } 46 | 47 | #command { flex: 0 0 20rem; line-height: 1.5rem; font-size: 1rem; color: #7f7f7f; } 48 | #command span { display: inline-block; height: 32px; font-size: 13px; font-weight: 500; color: rgba(0,0,0,0.6); line-height: 32px; padding: 0 12px; border-radius: 16px; background-color: #e4e4e4; } 49 | 50 | footer > p { padding: 0 1rem; color: #ccc; } 51 | -------------------------------------------------------------------------------- /Web/results/release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws s3 sync . s3://beta.cncfdemo.io --region us-west-2 --delete --exclude "search*" --exclude ".*" --exclude "*.sh" && \ 4 | aws s3 sync . s3://beta.cncfdemo.io --region us-west-2 --delete --exclude "*" --include "search" --no-guess-mime-type --content-type text/html 5 | -------------------------------------------------------------------------------- /Web/results/search: -------------------------------------------------------------------------------- 1 | search.html -------------------------------------------------------------------------------- /Web/results/summary.json.bak: -------------------------------------------------------------------------------- 1 | { 2 | "Results": [{ 3 | "id": "joOKmJg", 4 | "timestart": 323232, 5 | "timeend": 323232, 6 | "Provider": "AWS", 7 | "CPU": 7, 8 | "Memory": 24, 9 | "DistCC": 1061, 10 | "HTTP_Requests": "2,432,234", 11 | "Boinc_Jobs": 0 12 | }, 13 | { 14 | "Provider": "AWS", 15 | "CPU": 6, 16 | "Memory": 24, 17 | "DistCC": 1299, 18 | "HTTP_Requests": "2,129,533", 19 | "Boinc_Jobs": 0 20 | 21 | }] 22 | 23 | 24 | } 25 | -------------------------------------------------------------------------------- /cncfdemo-cli/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Local install for now 3 | 4 | mkvirtualenv cncf 5 | pip install --editable . 6 | 7 | And that's it, `cncfdemo` should be created and added to your path. 8 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/Countly/configMaps/countly/api.js: -------------------------------------------------------------------------------- 1 | var countlyConfig = { 2 | 3 | mongodb: { 4 | host: "mongos.default", 5 | db: "countly", 6 | port: 27017, 7 | max_pool_size: 500, 8 | }, 9 | 10 | api: { 11 | port: 3001, 12 | host: "localhost", 13 | max_sockets: 1024 14 | }, 15 | 16 | path: "", 17 | logging: { 18 | info: ["jobs", "push"], 19 | default: "warn" 20 | } 21 | 22 | }; 23 | 24 | module.exports = countlyConfig; 25 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/Countly/configMaps/countly/frontend.js: -------------------------------------------------------------------------------- 1 | var countlyConfig = { 2 | 3 | mongodb: { 4 | host: "mongos.default", 5 | db: "countly", 6 | port: 27017, 7 | max_pool_size: 10, 8 | }, 9 | 10 | web: { 11 | port: 6001, 12 | host: "localhost", 13 | use_intercom: true 14 | }, 15 | 16 | path: "", 17 | cdn: "" 18 | 19 | }; 20 | 21 | module.exports = countlyConfig; 22 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/Countly/countly.yaml.j2: -------------------------------------------------------------------------------- 1 | {%- macro initContainers() -%} 2 | 3 | {"name": "ns", "image": "busybox", "command": ["/bin/sh", "-c", "sleep 5; [[ $(nslookup mongos.default | tail -n +5 | wc -l) -ge 1 ]]"]}, 4 | {"name": "wait", "image": "busybox", "command": ["/bin/sh", "-c", "sleep 10"]}, 5 | {"name": "connect", "image": "mongo:3.2", "command": ["mongo", "--host", "mongos.default", "--eval", "db.getSiblingDB('admin').runCommand({listShards:1})"]} 6 | 7 | {%- endmacro -%} 8 | 9 | apiVersion: v1 10 | kind: Service 11 | metadata: 12 | labels: 13 | name: countly 14 | name: countly 15 | spec: 16 | selector: 17 | app: countly 18 | type: NodePort 19 | ports: 20 | - name: countly 21 | protocol: TCP 22 | port: 80 23 | nodePort: 32080 24 | type: LoadBalancer 25 | 26 | --- 27 | 28 | apiVersion: extensions/v1beta1 29 | kind: Deployment 30 | metadata: 31 | name: countly 32 | spec: 33 | replicas: 1 34 | selector: 35 | matchLabels: 36 | app: countly 37 | template: 38 | metadata: 39 | name: countly 40 | labels: 41 | app: countly 42 | annotations: 43 | pod.alpha.kubernetes.io/init-containers: "[{{ initContainers()|json_dump|replace('\\n','') }}]" 44 | spec: 45 | containers: 46 | - name: countly 47 | image: countly/countly-server:16.06 48 | ports: 49 | - name: countly 50 | containerPort: 80 51 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/README.md: -------------------------------------------------------------------------------- 1 | ### Usage 2 | 3 | runner.sh should self configure as master or slave and do approximately: 4 | 5 | ``` 6 | git clone --depth 1 git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git 7 | cd linux-stable && make defconfig 8 | 9 | export DISTCC_HOSTS=$(getent hosts distcc | awk '{ printf "%s,cpp,lzo ", $1 }') 10 | #distcc --show-hosts 11 | eval $(distcc-pump --startup) 12 | export PATH=/usr/lib/distcc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 13 | 14 | fakeroot make-kpkg --initrd --append-to-version=testbuild --revision=0.1 kernel_image 15 | ``` 16 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/distcc-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: distcc 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: distcc 10 | name: distcc 11 | spec: 12 | containers: 13 | - image: zilman/distcc-daemon 14 | name: distcc 15 | ports: 16 | - containerPort: 3632 17 | hostPort: 3632 18 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/distcc-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: distcc 6 | name: distcc 7 | spec: 8 | selector: 9 | app: distcc 10 | clusterIP: None 11 | ports: 12 | - port: 3632 13 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/echo/echo-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: echo-v1 5 | labels: 6 | k8s-app: echo 7 | spec: 8 | replicas: 2 9 | selector: 10 | k8s-app: echo 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: echo 15 | spec: 16 | nodeSelector: 17 | echo: "yes" 18 | containers: 19 | - name: echo 20 | image: zilman/echo:latest 21 | env: 22 | - name: GUNICORN_BIND 23 | value: 0.0.0.0:8000 24 | ports: 25 | - containerPort: 8000 26 | name: gunicorn-local 27 | protocol: TCP 28 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/echo/echo-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: echo 5 | namespace: default 6 | labels: 7 | k8s-app: echo 8 | spec: 9 | selector: 10 | k8s-app: echo 11 | ports: 12 | - name: dns 13 | port: 8000 14 | protocol: TCP 15 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cncfdemo create configmap demo --from-file . --recursive 4 | cncfdemo create -f . --recursive 5 | 6 | # kubectl get pods 7 | # kubectl logs -f mongors1-setup-k8cxn 8 | # kubectl logs -f mongors2-setup-wje6o 9 | # kubectl logs -f mongocfg-setup-tewjt 10 | # these can really only be scripted by listening for events from the api on a background thread 11 | 12 | # Optional step: python Utils/AWS/route53.py -elb $(./Utils/get_ingress.sh countly) -domain countly.cncfdemo.io 13 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/cncfdemo-cli/cncfdemo/__init__.py -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/DO/simple.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import time 4 | import requests 5 | 6 | # TODO: paramatize constants, intergrate as subcomand of cncfdemo-cli, add flag for master 7 | 8 | DESIRED = 3 9 | image = 22597876 10 | token = '' 11 | 12 | API_URL = 'https://api.digitalocean.com/v2/droplets' 13 | headers = {'Content-Type': 'application/json', 14 | 'Authorization': 'Bearer {}'.format(token)} 15 | 16 | names = ['Minion{}'.format(num) for num in range(1, DESIRED + 1)] 17 | data = {'image': image, 'names': names, 'region': 'nyc3', 'size': '512mb', 'private_networking': True} 18 | 19 | r = requests.post(API_URL, headers=headers, json=data) 20 | 21 | droplets = [(d['id'], d['name'],d['networks']['v4']) for d in r.json().get('droplets')] 22 | active = [] 23 | 24 | for droplet in droplets: 25 | droplet_id, name, private_ip = droplet 26 | while not private_ip: 27 | resp = requests.get(API_URL + '/' + str(droplet_id), headers=headers) 28 | try: 29 | private_ip = resp.json()['droplet']['networks']['v4'][0]['ip_address'] 30 | active.append((droplet_id, name, private_ip)) 31 | except: 32 | time.sleep(3) 33 | 34 | 35 | print active 36 | # TODO: droplet hostname/private_ip pairs need to be registered with external endpoint so DNS discovery works and cluster can boot. 37 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/DO/simple2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import time 4 | i 5 | iidsfsmport requests 6 | 7 | # TODO: paramatize constants, intergrate as subcomand of cncfdemo-cli, add flag for master 8 | 9 | DESIRED = 3 10 | image = 22597876 11 | token = '416337c44b27714dc13e1aecf4c1918a73f38cfde85a2b66f61d281aa4f821bc' 12 | 13 | API_URL = 'https://api.digitalocean.com/v2/droplets' 14 | headers = {'Content-Type': 'application/json', 15 | 'Authorization': 'Bearer {}'.format(token)} 16 | 17 | names = ['Minion{}'.format(num) for num in range(1, DESIRED + 1)] 18 | data = {'image': image, 'names': names, 'region': 'nyc3', 'size': '512mb', 'private_networking': True} 19 | 20 | r = requests.post(API_URL, headers=headers, json=data) 21 | 22 | droplets = [(d['id'], d['name'],d['networks']['v4']) for d in r.json().get('droplets')] 23 | active = [] 24 | 25 | for droplet in droplets: 26 | droplet_id, name, private_ip = droplet 27 | while not private_ip: 28 | resp = requests.get(API_URL + '/' + str(droplet_id), headers=headers) 29 | try: 30 | private_ip = resp.json()['droplet']['networks']['v4'][0]['ip_address'] 31 | active.append((droplet_id, name, private_ip)) 32 | except: 33 | time.sleep(3) 34 | 35 | 36 | print active 37 | # TODO: droplet hostname/private_ip pairs need to be registered with external endpoint so DNS discovery works and cluster can boot. 38 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/cncfdemo-cli/cncfdemo/bootstrap/__init__.py -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/Policies/lambda-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "ec2:Describe*", 7 | "Resource": "*" 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": [ 12 | "logs:CreateLogGroup", 13 | "logs:CreateLogStream", 14 | "logs:PutLogEvents" 15 | ], 16 | "Resource": "arn:aws:logs:*:*:*" 17 | }, 18 | { 19 | "Effect": "Allow", 20 | "Action": [ 21 | "route53:*" 22 | ], 23 | "Resource": [ 24 | "*" 25 | ] 26 | }, 27 | { 28 | "Effect": "Allow", 29 | "Action": [ 30 | "autoscaling:Describe*" 31 | ], 32 | "Resource": [ 33 | "*" 34 | ] 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/cncfdemo-cli/cncfdemo/bootstrap/aws/__init__.py -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/execution_plans/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/cncfdemo-cli/cncfdemo/bootstrap/aws/execution_plans/__init__.py -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/utils.py: -------------------------------------------------------------------------------- 1 | from functools import partial, reduce 2 | import collections 3 | 4 | import sys 5 | import botocore 6 | import click 7 | import time 8 | 9 | 10 | class Action(collections.namedtuple('Action', [ "resource", "method", "arguments", "saveas" ])): 11 | def __new__(cls, resource, method, arguments, saveas=""): 12 | return super(Action, cls).__new__(cls, resource, method, arguments, saveas) 13 | 14 | 15 | def pluck(source, selector): 16 | return reduce(lambda d,k: d.get(k, {}), selector.split('.'), source) 17 | 18 | 19 | def unroll(pair): 20 | get, selector = pair 21 | selector = selector.split('.') 22 | item = selector.pop(0) 23 | return getattr(get(item), '.'.join(selector)) 24 | 25 | 26 | def walk(adict): 27 | for key, value in adict.iteritems(): 28 | if isinstance(value, dict): 29 | walk(value) 30 | elif isinstance(value, tuple) and isinstance(value[0], partial): 31 | adict[key] = unroll(value) 32 | elif isinstance(value, collections.Sequence): 33 | for item in value: 34 | if isinstance(item, dict): 35 | walk(item) 36 | return adict 37 | 38 | 39 | def execute2(context, actions): 40 | 41 | for a in map(lambda action: Action(*action), actions): 42 | 43 | try: 44 | 45 | if a.method == 'create_launch_configuration': 46 | click.echo('waiting some more..') 47 | time.sleep(10) # AWS API bug, remove in future 48 | 49 | resource = context[a.resource] 50 | arguments = walk(a.arguments) 51 | result = getattr(resource, a.method)(**arguments) 52 | click.echo("{}... OK".format(a.method)) 53 | if a.saveas: 54 | context[a.saveas] = result 55 | 56 | 57 | except botocore.exceptions.ClientError as e: 58 | 59 | Errors = ['InvalidKeyPair.Duplicate','InvalidGroup.Duplicate','InvalidPermission.Duplicate','EntityAlreadyExists','AlreadyExists', \ 60 | 'InvalidGroup.NotFound','NoSuchEntity','ValidationError','LimitExceeded','DependencyViolation', 'DryRunOperation'] 61 | 62 | if e.response['Error']['Code'] in Errors: 63 | click.echo(e.response['Error']['Message']) 64 | else: 65 | click.echo("Unexpected error: {}".format(e)) 66 | sys.exit("Aborting..") 67 | 68 | return context 69 | 70 | 71 | def DhcpConfigurations(region): 72 | domain_name = 'ec2.internal' if region == 'us-east-1' else '{}.compute.internal'.format(region) 73 | return [{'Key': 'domain-name-servers', 'Values': ['AmazonProvidedDNS']}, {'Key': 'domain-name', 'Values': ['{} k8s'.format(domain_name)]}] 74 | 75 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/bootstrap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import click 4 | from aws.cli import aws 5 | 6 | @click.group() 7 | def cli(): 8 | pass 9 | 10 | 11 | cli.add_command(aws) 12 | 13 | 14 | if __name__ == '__main__': 15 | cli() 16 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import click 4 | from aws.cli import aws 5 | 6 | 7 | @click.group() 8 | def cli(): 9 | pass 10 | 11 | 12 | @click.group() 13 | def bootstrap(): 14 | pass 15 | 16 | 17 | cli.add_command(bootstrap) 18 | bootstrap.add_command(aws) 19 | 20 | 21 | if __name__ == '__main__': 22 | cli() 23 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/cncf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import click 4 | 5 | from cncfdemo.bootstrap.main import bootstrap 6 | from cncfdemo.kubectl.cmd_create import create 7 | 8 | @click.group() 9 | def cli(): 10 | """Welcome to the Cloud Native Computing Foundation Demo""" 11 | pass 12 | 13 | 14 | cli.add_command(bootstrap) 15 | cli.add_command(create) 16 | 17 | 18 | if __name__ == '__main__': 19 | cli() 20 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/kubectl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/cncfdemo-cli/cncfdemo/kubectl/__init__.py -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/kubectl/cmd_create.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | 6 | import yaml, json 7 | 8 | import glob2 9 | 10 | import click 11 | import jinja2 12 | 13 | from cncfdemo.kubectl.configmap import configmap 14 | from cncfdemo.kubectl.utils import create as kreate, json_dump 15 | 16 | 17 | @click.group() 18 | def cli(): 19 | pass 20 | 21 | 22 | @click.group(invoke_without_command=True) 23 | #@click.group() 24 | @click.option('-f', '--filename', type=click.Path(exists=True), help='Filename or directory to use to create the resource', required=False) 25 | @click.option('-R', '--recursive', is_flag=True, help='Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.') 26 | @click.option('--dry-run', is_flag=True, help='Do not submit to kubernetes apiserver') 27 | @click.option('--debug', is_flag=True, help='Print output to stdout') 28 | @click.pass_context 29 | def create(ctx, filename, recursive, dry_run, debug): 30 | """Either '-f' option or subcommand required.""" 31 | 32 | if ctx.invoked_subcommand: 33 | return 'defer to subcommand' 34 | 35 | if not filename: 36 | #click.echo('error: Missing option "-f".') 37 | click.echo(create.get_help(ctx)) 38 | sys.exit(0) 39 | 40 | realpath = os.path.realpath(filename) 41 | manifests = [] 42 | 43 | if os.path.isfile(filename): 44 | manifests.extend([realpath]) 45 | 46 | if os.path.isdir(filename): 47 | if recursive: 48 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.j2')]) 49 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.yml')]) 50 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.yaml')]) 51 | manifests = [f for f in manifests if os.path.isfile(f)] 52 | else: 53 | manifests.extend([realpath+'/'+f for f in os.listdir(realpath) if os.path.isfile(realpath+'/'+f) and f.endswith(('.j2','.yaml','.yml'))]) 54 | 55 | 56 | if not manifests: 57 | click.echo('no manifest files found') 58 | sys.exit(0) 59 | 60 | if debug: 61 | click.echo(manifests) 62 | 63 | for manifest in manifests: 64 | definitions = None 65 | 66 | t = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(os.path.realpath(manifest)))) 67 | t.filters['json_dump'] = json_dump 68 | definitions = t.get_template(os.path.basename(manifest)).render() 69 | 70 | if debug: 71 | print definitions if definitions else '' 72 | 73 | for definition in yaml.load_all(definitions): 74 | # import ipdb; ipdb.set_trace() 75 | if not dry_run: 76 | resp, status = kreate(definition) 77 | 78 | 79 | cli.add_command(create) 80 | create.add_command(configmap) 81 | 82 | 83 | if __name__ == '__main__': 84 | create() 85 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/kubectl/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import jinja2 4 | import json 5 | 6 | import click 7 | import requests 8 | 9 | 10 | def json_dump(foo): 11 | return json.dumps(foo)[1:-1] 12 | 13 | 14 | def create(definition, overrides={}): 15 | 16 | defaults = { 'scheme': 'http', 17 | 'host': 'localhost', 18 | 'port': '8001', 19 | 'path': 'api', 20 | 'apiVersion': 'v1', 21 | 'namespace': 'default' 22 | } 23 | 24 | endpoint = defaults.copy() 25 | endpoint.update(overrides) 26 | endpoint.update(definition) 27 | 28 | endpoint['path'] += 's' if not endpoint['apiVersion'] == 'v1' else '' 29 | endpoint['kind'] += 's' if not endpoint.get('kind','').endswith('s') else '' 30 | endpoint['kind'] = endpoint['kind'].lower() 31 | 32 | url = '{scheme}://{host}:{port}/{path}/{apiVersion}/namespaces/{namespace}/{kind}'.format(**endpoint) 33 | 34 | #import ipdb; ipdb.set_trace() 35 | r = requests.post(url, json=definition) 36 | response = json.loads(r.content) 37 | 38 | if r.ok: 39 | click.echo('{} "{}" created'.format(response['kind'], response['metadata']['name'])) 40 | else: 41 | click.echo('Error from server: error when creating "{}": {}'.format(response['details']['name'], response['message'])) 42 | 43 | return r.content, r.ok 44 | 45 | 46 | if __name__ == "__main__": 47 | pass 48 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/utils/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import datetime 5 | 6 | import requests 7 | import imghdr 8 | 9 | 10 | def getimg(url): 11 | r = requests.get(url) 12 | return r.content if r.ok else '' 13 | 14 | 15 | def saveimg(img, path, ext, name='image'): 16 | with open(path+'/'+name+'.'+ext, 'wb') as f: 17 | f.write(img) 18 | 19 | 20 | def grabimg(url, path='.'): 21 | img = getimg(URL) 22 | imgtype = imghdr.what(None, img) 23 | return False if not imgtype else saveimg(img, path, imgtype) 24 | 25 | 26 | def rfc3339(datetime_obj=datetime.datetime.now()): 27 | return datetime_obj.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z' 28 | 29 | 30 | def makedir3339(path='.'): 31 | dir = '/'.join((path, rfc3339())) 32 | os.makedirs('/'.join((path, rfc3339()))) 33 | return dir 34 | 35 | 36 | if __name__ == "__main__": 37 | 38 | # Just a check, will remove this 39 | 40 | URL = 'https://upload.wikimedia.org/wikipedia/commons/thumb/b/b9/Caspar_David_Friedrich_-_Wanderer_above_the_sea_of_fog.jpg/600px-Caspar_David_Friedrich_-_Wanderer_above_the_sea_of_fog.jpg' 41 | 42 | grabimg(URL, path=makedir3339()) 43 | -------------------------------------------------------------------------------- /cncfdemo-cli/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='cncf', 5 | version='0.1', 6 | packages=['cncfdemo'], 7 | include_package_data=True, 8 | install_requires=[ 9 | 'click', 10 | 'requests', 11 | 'glob2', 12 | 'pyyaml', 13 | 'jinja2', 14 | 'boto3', 15 | ], 16 | entry_points=''' 17 | [console_scripts] 18 | cncfdemo=cncfdemo.cncf:cli 19 | ''', 20 | ) 21 | -------------------------------------------------------------------------------- /discovery/README.md: -------------------------------------------------------------------------------- 1 | # Hosted discovery service for Kubeadm cluster bootstrap 2 | 3 | This is fashioned after the ideas described in [running your own etc discovery service](https://coreos.com/os/docs/latest/cluster-discovery.html#running-your-own-discovery-service). 4 | You may be familiar with the `discovery.etcd.io` endpoint for bootstraping generic etcd clusters, this is the same idea but specifically for Kubernetes clusters with Kubeadm. 5 | 6 | 7 | ### Before starting the cluster - get a new token 8 | 9 | ``` 10 | $ token=$(curl -s https://discovery.cncfdemo.io/new) 11 | $ echo $token 12 | cncfci.J7eoGQxsQsAWaE4V 13 | ``` 14 | 15 | ### On the master - register master ip to to the token 16 | 17 | ``` 18 | $ token=cncfci.J7eoGQxsQsAWaE4V 19 | $ master_ip=$(hostname -I | cut -d" " -f 1) 20 | $ echo $master_ip 21 | 172.42.42.42 22 | $ curl -s https://discovery.cncfdemo.io/$token?ip=$master_ip 23 | 172.42.42.42:6443 24 | $ kubeadm init --token $token 25 | ``` 26 | 27 | ### On the nodes - discover master ip from the token 28 | 29 | ``` 30 | $ token=cncfci.J7eoGQxsQsAWaE4V 31 | $ master_ip=$(curl -s https://discovery.cncfdemo.io/$token) 32 | $ echo $master_ip 33 | 172.42.42.42:6443 34 | $ kubeadm join --token $token $master_ip 35 | ``` 36 | 37 | The `discovery.cncfdemo.io` endpoint is hosted with AWS API Gateway + Lambda and stores the token and ip pairs in dynamodb for a short amount of time. Instructions on how to roll your own (with or without lambda) coming soon. 38 | -------------------------------------------------------------------------------- /provisioning/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | MAINTAINER "Denver Williams " 3 | ENV KUBECTL_VERSION=v1.5.2 4 | ENV HELM_VERSION=v2.4.1 5 | ENV GCLOUD_VERSION=150.0.0 6 | ENV AWSCLI_VERSION=1.11.75 7 | ENV AZURECLI_VERSION=2.0.2 8 | ENV PACKETCLI_VERSION=1.33 9 | ENV TERRAFORM_VERSION=0.9.4 10 | ENV ARC=amd64 11 | 12 | # Install AWS / AZURE CLI Deps 13 | RUN apk update 14 | RUN apk add --update git bash util-linux wget tar curl build-base jq \ 15 | py-pip groff less openssh bind-tools python python-dev libffi-dev openssl-dev 16 | 17 | # no way to pin this packet-cli at the moment 18 | RUN go get -u github.com/ebsarr/packet 19 | RUN pip install packet-python==${PACKETCLI_VERSION} argh tabulate 20 | RUN pip install azure-cli==${AZURECLI_VERSION} 21 | RUN pip install awscli==${AWSCLI_VERSION} 22 | 23 | RUN apk --purge -v del py-pip && \ 24 | rm /var/cache/apk/* 25 | 26 | # Install Google Cloud SDK 27 | RUN wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GCLOUD_VERSION}-linux-x86.tar.gz && \ 28 | tar xvfz google-cloud-sdk-${GCLOUD_VERSION}-linux-x86.tar.gz && \ 29 | ./google-cloud-sdk/install.sh -q 30 | 31 | 32 | #Install Kubectl 33 | RUN wget -O /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/$ARC/kubectl && \ 34 | chmod +x /usr/local/bin/kubectl 35 | 36 | #Install helm 37 | RUN wget https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz && \ 38 | tar xvzf helm-${HELM_VERSION}-linux-amd64.tar.gz && \ 39 | mv linux-amd64/helm /usr/local/bin && \ 40 | rm -rf helm-*gz linux-amd64 41 | 42 | # Install Terraform 43 | RUN wget https://releases.hashicorp.com/terraform/$TERRAFORM_VERSION/terraform_"${TERRAFORM_VERSION}"_linux_$ARC.zip 44 | RUN unzip terraform*.zip -d /usr/bin 45 | 46 | # Install CFSSL 47 | RUN go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ 48 | go get -u github.com/cloudflare/cfssl/cmd/... 49 | 50 | # Install Gzip+base64 Provider 51 | RUN go get -u github.com/jakexks/terraform-provider-gzip && \ 52 | echo providers { >> ~/.terraformrc && \ 53 | echo ' gzip = "terraform-provider-gzip"' >> ~/.terraformrc && \ 54 | echo } >> ~/.terraformrc 55 | 56 | #Add Terraform Modules 57 | 58 | COPY provision.sh /cncf/ 59 | RUN chmod +x /cncf/provision.sh 60 | #ENTRYPOINT ["/cncf/provision.sh"] 61 | WORKDIR /cncf/ 62 | #CMD ["aws-deploy"] 63 | -------------------------------------------------------------------------------- /provisioning/aws/Readme.mkd: -------------------------------------------------------------------------------- 1 | ## Prerequisites 2 | * [docker](https://docker.io/) 3 | 4 | * AWS User with following Permissions: 5 | - AmazonEC2FullAccess 6 | - AmazonS3FullAccess 7 | - AWSCodeDeployFullAccess 8 | - AmazonRoute53DomainsFullAccess 9 | - AmazonRoute53FullAccess 10 | - IAMFullAccess 11 | - IAMUserChangePassword 12 | 13 | * Must use a config config from the repo data/terraform.tfvars 14 | 15 | ## export AWS Authentication 16 | 17 | ``` 18 | export AWS_ACCESS_KEY_ID="YOUR_AWS_KEY_ID" 19 | export AWS_SECRET_ACCESS_KEY="YOUR_AWS_SECRET_KEY" 20 | ``` 21 | 22 | ## create AWS Kubernetes Endpoint 23 | 24 | ``` 25 | docker run -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 26 | -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ 27 | -v $(pwd)/data:/cncf/data create/aws 28 | ``` 29 | 30 | ## configure kubectl on local system 31 | 32 | ``` 33 | sudo chown -R $(whoami):$(whoami) data/ 34 | export KUBECONFIG=$(pwd)/data/kubeconfig 35 | $ kubectl get nodes 36 | NAME STATUS AGE 37 | ip-10-0-10-10.ap-southeast-2.compute.internal Ready,SchedulingDisabled 7m 38 | ip-10-0-10-11.ap-southeast-2.compute.internal Ready,SchedulingDisabled 6m 39 | ip-10-0-10-12.ap-southeast-2.compute.internal Ready,SchedulingDisabled 7m 40 | ip-10-0-10-51.ap-southeast-2.compute.internal Ready 6m 41 | ip-10-0-11-7.ap-southeast-2.compute.internal Ready 6m 42 | ip-10-0-12-68.ap-southeast-2.compute.internal Ready 6m 43 | ``` 44 | 45 | ## data folder contains certs + kubeconfig 46 | 47 | It also contains a json file containing details on current cluster state. 48 | 49 | ``` 50 | $ sudo cat ./data/kubeconfig 51 | apiVersion: v1 52 | clusters: 53 | - cluster: 54 | certificate-authority: .cfssl/ca.pem 55 | server: https://kz8s-apiserver-test-453655923.ap-southeast-2.elb.amazonaws.com 56 | name: cluster-test 57 | contexts: 58 | - context: 59 | cluster: cluster-test 60 | user: admin-test 61 | name: test 62 | current-context: test 63 | kind: Config 64 | preferences: {} 65 | users: 66 | - name: admin-test 67 | user: 68 | client-certificate: .cfssl/k8s-admin.pem 69 | client-key: .cfssl/k8s-admin-key.pem 70 | ``` 71 | 72 | 73 | ## destroy AWS Kubernetes Endpoint 74 | 75 | ``` 76 | docker run -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 77 | -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ 78 | -v $(pwd)/data:/cncf/data terminate/aws 79 | ``` 80 | -------------------------------------------------------------------------------- /provisioning/aws/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { } 2 | provider "gzip" {compressionlevel = "BestCompression"} 3 | 4 | # configured via: 5 | # $ export AWS_ACCESS_KEY_ID="anaccesskey" 6 | # $ export AWS_SECRET_ACCESS_KEY="asecretkey" 7 | # $ export AWS_DEFAULT_REGION="us-west-2" 8 | # https://www.terraform.io/docs/providers/aws/#environment-variables 9 | -------------------------------------------------------------------------------- /provisioning/aws/cert.tf: -------------------------------------------------------------------------------- 1 | # Gen Certs 2 | resource "null_resource" "ssl_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = < ${ var.data_dir }/${ var.aws_key_name }.pem 10 | chmod 400 ${ var.data_dir }/${ var.aws_key_name }.pem 11 | EOF 12 | } 13 | } 14 | 15 | resource "null_resource" "dummy_dependency2" { 16 | depends_on = [ "null_resource.aws_keypair" ] 17 | } 18 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/ec2.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "bastion" { 2 | ami = "${ var.ami_id }" 3 | associate_public_ip_address = true 4 | instance_type = "${ var.instance_type }" 5 | key_name = "${ var.key_name }" 6 | 7 | # TODO: force private_ip to prevent collision with etcd machines 8 | 9 | source_dest_check = false 10 | subnet_id = "${ element( split(",", var.subnet_ids), 0 ) }" 11 | 12 | tags { 13 | builtWith = "terraform" 14 | kz8s = "${ var.name }" 15 | Name = "kz8s-bastion" 16 | role = "bastion" 17 | } 18 | 19 | user_data = "${ data.template_file.user-data.rendered }" 20 | 21 | vpc_security_group_ids = [ 22 | "${ var.security_group_id }", 23 | ] 24 | } 25 | 26 | data "template_file" "user-data" { 27 | template = "${ file( "${ path.module }/user-data.yml" )}" 28 | 29 | vars { 30 | internal_tld = "${ var.internal_tld }" 31 | } 32 | } 33 | 34 | resource "null_resource" "dummy_dependency" { 35 | depends_on = [ "aws_instance.bastion" ] 36 | } 37 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/input.tf: -------------------------------------------------------------------------------- 1 | variable "ami_id" {} 2 | variable "instance_type" {} 3 | variable "internal_tld" {} 4 | variable "key_name" {} 5 | variable "name" {} 6 | variable "security_group_id" {} 7 | variable "subnet_ids" {} 8 | variable "vpc_id" {} 9 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/output.tf: -------------------------------------------------------------------------------- 1 | output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "ip" { value = "${ aws_instance.bastion.public_ip }" } 3 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/user-data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | --- 4 | coreos: 5 | update: 6 | reboot-strategy: etcd-lock 7 | 8 | etcd2: 9 | discovery-srv: ${ internal_tld } 10 | proxy: on 11 | 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | -------------------------------------------------------------------------------- /provisioning/aws/modules/dns/dns.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route53_zone" "internal" { 2 | comment = "Kubernetes cluster DNS (internal)" 3 | name = "${ var.internal_tld }" 4 | tags { 5 | builtWith = "terraform" 6 | KubernetesCluster = "${ var.name }" 7 | Name = "k8s-${ var.name }" 8 | } 9 | vpc_id = "${ var.vpc_id }" 10 | } 11 | 12 | resource "aws_route53_record" "A-etcd" { 13 | name = "etcd" 14 | records = [ "${ var.master_ips }" ] 15 | ttl = "300" 16 | type = "A" 17 | zone_id = "${ aws_route53_zone.internal.zone_id }" 18 | } 19 | 20 | resource "aws_route53_record" "A-etcds" { 21 | name = "etcd${ count.index+1 }" 22 | count = "${ var.master_node_count }" 23 | ttl = "300" 24 | type = "A" 25 | records = [ "${ element(var.master_ips, count.index) }" ] 26 | zone_id = "${ aws_route53_zone.internal.zone_id }" 27 | } 28 | 29 | resource "aws_route53_record" "CNAME-master" { 30 | name = "master" 31 | records = [ "etcd.${ var.internal_tld }" ] 32 | ttl = "300" 33 | type = "CNAME" 34 | zone_id = "${ aws_route53_zone.internal.zone_id }" 35 | } 36 | 37 | resource "aws_route53_record" "etcd-client-tcp" { 38 | name = "_etcd-client._tcp" 39 | ttl = "300" 40 | type = "SRV" 41 | records = [ "${ formatlist("0 0 2379 %v", aws_route53_record.A-etcds.*.fqdn) }" ] 42 | zone_id = "${ aws_route53_zone.internal.zone_id }" 43 | } 44 | 45 | resource "aws_route53_record" "etcd-server-tcp" { 46 | name = "_etcd-server-ssl._tcp" 47 | ttl = "300" 48 | type = "SRV" 49 | records = [ "${ formatlist("0 0 2380 %v", aws_route53_record.A-etcds.*.fqdn) }" ] 50 | zone_id = "${ aws_route53_zone.internal.zone_id }" 51 | } 52 | 53 | resource "null_resource" "dummy_dependency" { 54 | depends_on = [ 55 | "aws_route53_record.etcd-server-tcp", 56 | "aws_route53_record.A-etcd", 57 | ] 58 | } 59 | -------------------------------------------------------------------------------- /provisioning/aws/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "internal_tld" {} 2 | variable "name" {} 3 | variable "vpc_id" {} 4 | variable "master_ips" { type = "list"} 5 | variable "master_node_count" {} 6 | 7 | -------------------------------------------------------------------------------- /provisioning/aws/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "internal_name_servers" { value = "${ aws_route53_zone.internal.name_servers }" } 3 | output "internal_zone_id" { value = "${ aws_route53_zone.internal.zone_id }" } 4 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/cloud-config.tf: -------------------------------------------------------------------------------- 1 | resource "gzip_me" "ca" { 2 | input = "${ var.ca }" 3 | } 4 | 5 | resource "gzip_me" "k8s_etcd" { 6 | input = "${ var.k8s_etcd }" 7 | } 8 | 9 | resource "gzip_me" "k8s_etcd_key" { 10 | input = "${ var.k8s_etcd_key }" 11 | } 12 | 13 | resource "gzip_me" "k8s_apiserver" { 14 | input = "${ var.k8s_apiserver }" 15 | } 16 | 17 | resource "gzip_me" "k8s_apiserver_key" { 18 | input = "${ var.k8s_apiserver_key }" 19 | } 20 | 21 | data "template_file" "kube-apiserver" { 22 | template = "${ file( "${ path.module }/kube-apiserver.yml" )}" 23 | 24 | vars { 25 | internal_tld = "${ var.internal_tld }" 26 | service_cidr = "${ var.service_cidr }" 27 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 28 | kubelet_image_url = "${ var.kubelet_image_url }" 29 | kubelet_image_tag = "${ var.kubelet_image_tag }" 30 | } 31 | } 32 | 33 | resource "gzip_me" "kube-apiserver" { 34 | input = "${ data.template_file.kube-apiserver.rendered }" 35 | } 36 | 37 | data "template_file" "cloud-config" { 38 | count = "${ var.master_node_count }" 39 | template = "${ file( "${ path.module }/cloud-config.yml" )}" 40 | 41 | vars { 42 | cluster_domain = "${ var.cluster_domain }" 43 | cluster-token = "etcd-cluster-${ var.name }" 44 | dns_service_ip = "${ var.dns_service_ip }" 45 | fqdn = "etcd${ count.index + 1 }.${ var.internal_tld }" 46 | hostname = "etcd${ count.index + 1 }" 47 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 48 | kubelet_image_url = "${ var.kubelet_image_url }" 49 | kubelet_image_tag = "${ var.kubelet_image_tag }" 50 | internal_tld = "${ var.internal_tld }" 51 | pod_cidr = "${ var.pod_cidr }" 52 | region = "${ var.region }" 53 | service_cidr = "${ var.service_cidr }" 54 | ca = "${ gzip_me.ca.output }" 55 | k8s_etcd = "${ gzip_me.k8s_etcd.output }" 56 | k8s_etcd_key = "${ gzip_me.k8s_etcd_key.output }" 57 | k8s_apiserver = "${ gzip_me.k8s_apiserver.output }" 58 | k8s_apiserver_key = "${ gzip_me.k8s_apiserver_key.output }" 59 | kube-apiserver-yml = "${ gzip_me.kube-apiserver.output }" 60 | } 61 | } 62 | 63 | 64 | 65 | # data "template_file" "kube-controller-manager" 66 | 67 | # data "template_file" "kube-proxy" 68 | 69 | # data "template_file" "kube-scheduler" 70 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/ec2.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "etcd" { 2 | count = "${ var.master_node_count }" 3 | 4 | ami = "${ var.ami_id }" 5 | associate_public_ip_address = false 6 | iam_instance_profile = "${ var.instance_profile_name }" 7 | instance_type = "${ var.instance_type }" 8 | key_name = "${ var.key_name }" 9 | 10 | root_block_device { 11 | volume_size = 124 12 | volume_type = "gp2" 13 | } 14 | 15 | source_dest_check = false 16 | subnet_id = "${ element( split(",", var.subnet_ids_private), 0 ) }" 17 | 18 | tags { 19 | builtWith = "terraform" 20 | KubernetesCluster = "${ var.name }" # used by kubelet's aws provider to determine cluster 21 | kz8s = "${ var.name }" 22 | Name = "etcd${ count.index + 1 }-${ var.name }" 23 | role = "etcd,apiserver" 24 | version = "${ var.kubelet_image_tag }" 25 | visibility = "private" 26 | } 27 | 28 | user_data = "${ element(data.template_file.cloud-config.*.rendered, count.index) }" 29 | vpc_security_group_ids = [ "${ var.etcd_security_group_id }" ] 30 | } 31 | 32 | resource "null_resource" "dummy_dependency" { 33 | depends_on = [ "aws_instance.etcd" ] 34 | } 35 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/elb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "external" { 2 | name = "kz8s-apiserver-${replace(var.name, "/(.{0,17})(.*)/", "$1")}" 3 | 4 | cross_zone_load_balancing = false 5 | 6 | health_check { 7 | healthy_threshold = 2 8 | unhealthy_threshold = 2 9 | timeout = 3 10 | target = "HTTP:8080/" 11 | interval = 30 12 | } 13 | 14 | instances = [ "${ aws_instance.etcd.*.id }" ] 15 | idle_timeout = 3600 16 | 17 | listener { 18 | instance_port = 443 19 | instance_protocol = "tcp" 20 | lb_port = 443 21 | lb_protocol = "tcp" 22 | } 23 | 24 | security_groups = [ "${ var.external_elb_security_group_id }" ] 25 | subnets = [ "${ split(",", var.subnet_ids_public) }" ] 26 | 27 | tags { 28 | builtWith = "terraform" 29 | kz8s = "${ var.name }" 30 | Name = "kz8s-apiserver" 31 | role = "apiserver" 32 | visibility = "public" 33 | KubernetesCluster = "${ var.name }" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/input.tf: -------------------------------------------------------------------------------- 1 | variable "ami_id" {} 2 | variable "cluster_domain" {} 3 | variable "kubelet_image_url" {} 4 | variable "kubelet_image_tag" {} 5 | variable "depends_id" {} 6 | variable "dns_service_ip" {} 7 | variable "etcd_security_group_id" {} 8 | variable "external_elb_security_group_id" {} 9 | variable "instance_type" {} 10 | variable "internal_tld" {} 11 | variable "key_name" {} 12 | variable "name" {} 13 | variable "pod_cidr" {} 14 | variable "region" {} 15 | variable "service_cidr" {} 16 | variable "subnet_ids_private" {} 17 | variable "subnet_ids_public" {} 18 | variable "vpc_id" {} 19 | variable "ca" {} 20 | variable "k8s_etcd" {} 21 | variable "k8s_etcd_key" {} 22 | variable "k8s_apiserver" {} 23 | variable "k8s_apiserver_key" {} 24 | variable "instance_profile_name" {} 25 | variable "master_node_count" {} 26 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/kube-apiserver.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: ${ hyperkube } 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --admission-control=LimitRanger 15 | - --admission-control=NamespaceExists 16 | - --admission-control=NamespaceLifecycle 17 | - --admission-control=ResourceQuota 18 | - --admission-control=SecurityContextDeny 19 | - --admission-control=ServiceAccount 20 | - --allow-privileged=true 21 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem 22 | - --cloud-provider=aws 23 | - --etcd-servers=http://etcd.${ internal_tld }:2379 24 | - --insecure-bind-address=0.0.0.0 25 | - --secure-port=443 26 | - --service-account-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 27 | - --service-cluster-ip-range=${ service_cidr } 28 | - --tls-cert-file=/etc/kubernetes/ssl/k8s-apiserver.pem 29 | - --tls-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 30 | - --v=2 31 | livenessProbe: 32 | httpGet: 33 | host: 127.0.0.1 34 | port: 8080 35 | path: /healthz 36 | initialDelaySeconds: 15 37 | timeoutSeconds: 15 38 | ports: 39 | - containerPort: 443 40 | hostPort: 443 41 | name: https 42 | - containerPort: 8080 43 | hostPort: 8080 44 | name: local 45 | volumeMounts: 46 | - mountPath: /etc/kubernetes/ssl 47 | name: ssl-certs-kubernetes 48 | readOnly: true 49 | - mountPath: /etc/ssl/certs 50 | name: ssl-certs-host 51 | readOnly: true 52 | volumes: 53 | - hostPath: 54 | path: /etc/kubernetes/ssl 55 | name: ssl-certs-kubernetes 56 | - hostPath: 57 | path: /usr/share/ca-certificates 58 | name: ssl-certs-host 59 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | 2 | #output "depends_id" { value = "${ null_resource.dummy_dependency.id }" } 3 | output "external_elb" { value = "${ aws_elb.external.dns_name }" } 4 | output "internal_ips" { value = "${ join(",", aws_instance.etcd.*.public_ip) }" } 5 | 6 | output "master_ips" { value = ["${ aws_instance.etcd.*.private_ip }"] } 7 | -------------------------------------------------------------------------------- /provisioning/aws/modules/iam/etcd.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "master" { 2 | name = "master-k8s-${ var.name }" 3 | 4 | assume_role_policy = </dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" ping -c1 "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/azure/azure.tf: -------------------------------------------------------------------------------- 1 | # Configure the Microsoft Azure Provider 2 | provider "azurerm" { } 3 | 4 | resource "azurerm_resource_group" "cncf" { 5 | name = "${ var.name }" 6 | location = "${ var.location }" 7 | } 8 | 9 | resource "azurerm_storage_account" "cncf" { 10 | # * azurerm_storage_account.cncf: name can only consist of lowercase letters 11 | # and numbers, and must be between 3 and 24 characters long FIXME: 12 | # storage_account name must be globally unique 13 | name = "${ var.name }x" 14 | resource_group_name = "${ var.name }" 15 | location = "${ var.location }" 16 | account_type = "Standard_LRS" 17 | } 18 | 19 | resource "azurerm_storage_container" "cncf" { 20 | name = "${ var.name }" 21 | resource_group_name = "${ var.name }" 22 | storage_account_name = "${ azurerm_storage_account.cncf.name }" 23 | container_access_type = "private" 24 | } 25 | 26 | resource "azurerm_availability_set" "cncf" { 27 | name = "${ var.name }" 28 | resource_group_name = "${ var.name }" 29 | location = "${ var.location }" 30 | } 31 | 32 | -------------------------------------------------------------------------------- /provisioning/azure/docs/azure_app_endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/docs/azure_app_endpoints.png -------------------------------------------------------------------------------- /provisioning/azure/docs/azure_app_registration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/docs/azure_app_registration.png -------------------------------------------------------------------------------- /provisioning/azure/docs/guid_from_oauth_endpoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/docs/guid_from_oauth_endpoint.png -------------------------------------------------------------------------------- /provisioning/azure/docs/key_generation_copy_me.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/docs/key_generation_copy_me.png -------------------------------------------------------------------------------- /provisioning/azure/docs/web_api_application_type.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/docs/web_api_application_type.png -------------------------------------------------------------------------------- /provisioning/azure/init-cfssl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | function usage { cat <ca-csr.json 95 | echo "$(ca-config)" >ca-config.json 96 | 97 | # generate ca 98 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca - 99 | _chmod ca 100 | 101 | # generate keys and certs 102 | generate k8s-admin client-server "${DEFAULT_HOSTS}" 103 | generate k8s-apiserver client-server "${DEFAULT_HOSTS},${K8S_SERVICE_IP},master.${INTERNAL_TLD},*.${REGION}.cloudapp.azure.com" 104 | generate k8s-etcd client-server "etcd.${INTERNAL_TLD},etcd1.${INTERNAL_TLD},etcd2.${INTERNAL_TLD},etcd3.${INTERNAL_TLD}" 105 | generate k8s-worker client "${DEFAULT_HOSTS}" 106 | 107 | # TODO: fix cert provisioning hacks 108 | #tar -rf k8s-apiserver.tar k8s-etcd.pem k8s-etcd-key.pem 109 | #tar -rf k8s-worker.tar ca.pem 110 | #bzip2 k8s-apiserver.tar 111 | #bzip2 k8s-worker.tar 112 | -------------------------------------------------------------------------------- /provisioning/azure/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "azure" } 2 | 3 | variable "internal_tld" { default = "azure.cncf.demo" } 4 | variable "data_dir" { default = "/cncf/data/azure" } 5 | 6 | # Azure Cloud Specific Settings 7 | variable "location" { default = "westus" } 8 | variable "vpc_cidr" { default = "10.0.0.0/16" } 9 | 10 | # VM Image and size 11 | variable "admin_username" { default = "cncf"} 12 | variable "image_publisher" { default = "CoreOS" } 13 | variable "image_offer" { default = "CoreOS" } 14 | variable "image_sku" { default = "Stable" } 15 | variable "image_version" { default = "1298.6.0" } 16 | variable "master_vm_size" { default = "Standard_A2" } 17 | variable "worker_vm_size" { default = "Standard_A2" } 18 | variable "bastion_vm_size" { default = "Standard_A2" } 19 | 20 | # Kubernetes 21 | variable "cluster_domain" { default = "cluster.local" } 22 | variable "pod_cidr" { default = "10.2.0.0/16" } 23 | variable "service_cidr" { default = "10.3.0.0/24" } 24 | variable "k8s_service_ip" { default = "10.3.0.1" } 25 | variable "dns_service_ip" { default = "10.3.0.10" } 26 | variable "master_node_count" { default = "3" } 27 | variable "worker_node_count" { default = "3" } 28 | # Autoscaling not supported by Kuberenetes on Azure yet 29 | # variable "worker_node_min" { default = "3" } 30 | # variable "worker_node_max" { default = "5" } 31 | 32 | # Deployment Artifact Versions 33 | # Hyperkube 34 | # Set from https://quay.io/repository/coreos/hyperkube?tab=tags 35 | variable "kubelet_image_url" { default = "quay.io/coreos/hyperkube"} 36 | variable "kubelet_image_tag" { default = "v1.4.7_coreos.0"} 37 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/bastion-node.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_public_ip" "cncf" { 2 | name = "PublicIPForBastion" 3 | location = "${ var.location }" 4 | resource_group_name = "${ var.name }" 5 | public_ip_address_allocation = "static" 6 | domain_name_label = "bastion${ var.name }" 7 | } 8 | 9 | resource "azurerm_network_interface" "cncf" { 10 | name = "${ var.name }" 11 | location = "${ var.location }" 12 | resource_group_name = "${ var.name }" 13 | 14 | ip_configuration { 15 | name = "${ var.name }" 16 | subnet_id = "${ var.subnet_id }" 17 | private_ip_address_allocation = "dynamic" 18 | public_ip_address_id = "${ azurerm_public_ip.cncf.id }" 19 | } 20 | } 21 | 22 | resource "azurerm_virtual_machine" "cncf" { 23 | name = "${ var.name }" 24 | location = "${ var.location }" 25 | availability_set_id = "${ var.availability_id }" 26 | resource_group_name = "${ var.name }" 27 | network_interface_ids = ["${azurerm_network_interface.cncf.id}"] 28 | vm_size = "${ var.bastion_vm_size }" 29 | 30 | storage_image_reference { 31 | publisher = "${ var.image_publisher }" 32 | offer = "${ var.image_offer }" 33 | sku = "${ var.image_sku }" 34 | version = "${ var.image_version}" 35 | } 36 | 37 | storage_os_disk { 38 | name = "disk2" 39 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/disk2.vhd" 40 | caching = "ReadWrite" 41 | create_option = "FromImage" 42 | } 43 | 44 | os_profile { 45 | computer_name = "hostname" 46 | admin_username = "${ var.admin_username }" 47 | admin_password = "Password1234!" 48 | custom_data = "${ data.template_file.bastion-user-data.rendered }" 49 | #custom_data = "${file("${path.module}/user-data2.yml")}" 50 | } 51 | 52 | os_profile_linux_config { 53 | disable_password_authentication = true 54 | ssh_keys { 55 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 56 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 57 | } 58 | } 59 | } 60 | 61 | data "template_file" "bastion-user-data" { 62 | template = "${ file( "${ path.module }/bastion-user-data.yml" )}" 63 | vars { 64 | internal_tld = "${ var.internal_tld }" 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/bastion-user-data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | --- 4 | coreos: 5 | update: 6 | reboot-strategy: etcd-lock 7 | 8 | etcd2: 9 | discovery-srv: ${ internal_tld } 10 | proxy: on 11 | 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | # - name: s3-iam-get.service 16 | # command: start 17 | # content: | 18 | # [Unit] 19 | # Description=s3-iam-get 20 | # [Service] 21 | # Type=oneshot 22 | # RemainAfterExit=yes 23 | # ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | # ExecStartPre=/usr/bin/curl -L -o /opt/bin/s3-iam-get \ 25 | # https://raw.githubusercontent.com/kz8s/s3-iam-get/master/s3-iam-get 26 | # ExecStart=/usr/bin/chmod +x /opt/bin/s3-iam-get 27 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "location" {} 3 | variable "bastion_vm_size" {} 4 | variable "image_publisher" {} 5 | variable "image_offer" {} 6 | variable "image_sku" {} 7 | variable "image_version" {} 8 | variable "admin_username" {} 9 | variable "internal_tld" {} 10 | variable "subnet_id" {} 11 | variable "availability_id" {} 12 | variable "storage_container" {} 13 | variable "storage_primary_endpoint" {} 14 | variable "data_dir" {} 15 | 16 | # variable "allow_ssh_cidr" {} 17 | # variable "security_group_id" {} 18 | # variable "subnet_ids" {} 19 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/output.tf: -------------------------------------------------------------------------------- 1 | output "bastion_ip" { value = "${azurerm_public_ip.cncf.ip_address}" } 2 | output "bastion_fqdn" { value = "${azurerm_public_ip.cncf.fqdn}" } 3 | -------------------------------------------------------------------------------- /provisioning/azure/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "internal_tld" {} 2 | variable "name" {} 3 | variable "name_servers_file" {} 4 | variable "master_ips" { type = "list" } 5 | variable "master_node_count" {} 6 | -------------------------------------------------------------------------------- /provisioning/azure/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | # output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "internal_name_servers" { value = "${ azurerm_dns_zone.cncf.name_servers }" } 3 | output "internal_zone_id" { value = "${ azurerm_dns_zone.cncf.zone_id }" } 4 | output "name_servers_file" { value = "${ var.name_servers_file }" } 5 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-cloud-config.tf: -------------------------------------------------------------------------------- 1 | provider "gzip" { 2 | compressionlevel = "BestCompression" 3 | } 4 | 5 | resource "gzip_me" "kube-apiserver" { 6 | input = "${ data.template_file.kube_apiserver.rendered }" 7 | } 8 | resource "gzip_me" "k8s_cloud_config" { 9 | input = "${ var.k8s_cloud_config }" 10 | } 11 | 12 | resource "gzip_me" "ca" { 13 | input = "${ var.ca }" 14 | } 15 | 16 | resource "gzip_me" "k8s_etcd" { 17 | input = "${ var.k8s_etcd }" 18 | } 19 | 20 | resource "gzip_me" "k8s_etcd_key" { 21 | input = "${ var.k8s_etcd_key }" 22 | } 23 | 24 | resource "gzip_me" "k8s_apiserver" { 25 | input = "${ var.k8s_apiserver }" 26 | } 27 | 28 | resource "gzip_me" "k8s_apiserver_key" { 29 | input = "${ var.k8s_apiserver_key }" 30 | } 31 | 32 | data "template_file" "kube_apiserver" { 33 | template = "${ file( "${ path.module }/kube-apiserver.yml" )}" 34 | vars { 35 | internal_tld = "${ var.internal_tld }" 36 | service_cidr = "${ var.service_cidr }" 37 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 38 | kubelet_image_url = "${ var.kubelet_image_url }" 39 | kubelet_image_tag = "${ var.kubelet_image_tag }" 40 | } 41 | } 42 | 43 | data "template_file" "etcd_cloud_config" { 44 | count = "${ var.master_node_count }" 45 | template = "${ file( "${ path.module }/etcd-cloud-config.yml" )}" 46 | 47 | vars { 48 | # bucket = "${ var.s3_bucket }" 49 | cluster_domain = "${ var.cluster_domain }" 50 | cluster-token = "etcd-cluster-${ var.name }" 51 | dns_service_ip = "${ var.dns_service_ip }" 52 | fqdn = "etcd${ count.index + 1 }.${ var.internal_tld }" 53 | hostname = "etcd${ count.index + 1 }" 54 | kubelet_image_url = "${ var.kubelet_image_url }" 55 | kubelet_image_tag = "${ var.kubelet_image_tag }" 56 | internal_tld = "${ var.internal_tld }" 57 | pod_cidr = "${ var.pod_cidr }" 58 | location = "${ var.location }" 59 | service_cidr = "${ var.service_cidr }" 60 | k8s_cloud_config = "${ gzip_me.k8s_cloud_config.output }" 61 | ca = "${ gzip_me.ca.output }" 62 | k8s_etcd = "${ gzip_me.k8s_etcd.output }" 63 | k8s_etcd_key = "${ gzip_me.k8s_etcd_key.output }" 64 | k8s_apiserver = "${ gzip_me.k8s_apiserver.output }" 65 | k8s_apiserver_key = "${ gzip_me.k8s_apiserver_key.output }" 66 | k8s_apiserver_yml = "${ gzip_me.kube-apiserver.output }" 67 | node-ip = "${ element(azurerm_network_interface.cncf.*.private_ip_address, count.index) }" 68 | 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-load-balancer.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_public_ip" "cncf" { 2 | name = "PublicIPForLB" 3 | location = "${ var.location }" 4 | resource_group_name = "${ var.name }" 5 | public_ip_address_allocation = "static" 6 | domain_name_label = "k8s-${ var.name }" 7 | } 8 | 9 | resource "azurerm_lb" "cncf" { 10 | name = "TestLoadBalancer" 11 | location = "${ azurerm_public_ip.cncf.location }" 12 | resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 13 | 14 | frontend_ip_configuration { 15 | name = "PublicIPAddress" 16 | public_ip_address_id = "${azurerm_public_ip.cncf.id}" 17 | } 18 | } 19 | 20 | resource "azurerm_lb_rule" "cncf" { 21 | resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 22 | loadbalancer_id = "${azurerm_lb.cncf.id}" 23 | probe_id = "${ azurerm_lb_probe.cncf.id }" 24 | backend_address_pool_id = "${ azurerm_lb_backend_address_pool.cncf.id }" 25 | name = "LBRule" 26 | protocol = "Tcp" 27 | frontend_port = 443 28 | backend_port = 443 29 | frontend_ip_configuration_name = "PublicIPAddress" 30 | } 31 | 32 | resource "azurerm_lb_probe" "cncf" { 33 | resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 34 | loadbalancer_id = "${azurerm_lb.cncf.id}" 35 | name = "${ var.name }" 36 | protocol = "Http" 37 | port = 8080 38 | request_path = "/" 39 | interval_in_seconds = 30 40 | number_of_probes = 5 41 | } 42 | 43 | resource "azurerm_lb_backend_address_pool" "cncf" { 44 | resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 45 | loadbalancer_id = "${azurerm_lb.cncf.id}" 46 | name = "BackEndAddressPool" 47 | } 48 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-nodes.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_interface" "cncf" { 2 | count = "${ var.master_node_count }" 3 | name = "etcd-interface${ count.index + 1 }" 4 | location = "${ var.location }" 5 | resource_group_name = "${ var.name }" 6 | 7 | ip_configuration { 8 | name = "etcd-nic${ count.index + 1 }" 9 | subnet_id = "${ var.subnet_id }" 10 | private_ip_address_allocation = "dynamic" 11 | # private_ip_address = "${ element( split(",", var.etcd-ips), count.index ) }" 12 | load_balancer_backend_address_pools_ids = ["${ azurerm_lb_backend_address_pool.cncf.id }"] 13 | } 14 | } 15 | 16 | resource "azurerm_virtual_machine" "cncf" { 17 | count = "${ var.master_node_count }" 18 | name = "etcd-master${ count.index + 1 }" 19 | location = "${ var.location }" 20 | availability_set_id = "${ var.availability_id }" 21 | resource_group_name = "${ var.name }" 22 | network_interface_ids = ["${ element(azurerm_network_interface.cncf.*.id, count.index) }"] 23 | vm_size = "${ var.master_vm_size }" 24 | 25 | storage_image_reference { 26 | publisher = "${ var.image_publisher }" 27 | offer = "${ var.image_offer }" 28 | sku = "${ var.image_sku }" 29 | version = "${ var.image_version}" 30 | } 31 | 32 | storage_os_disk { 33 | name = "etcd-disks${ count.index + 1 }" 34 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/etcd-vhd${ count.index + 1 }.vhd" 35 | caching = "ReadWrite" 36 | create_option = "FromImage" 37 | } 38 | 39 | os_profile { 40 | computer_name = "etcd-master${ count.index + 1 }" 41 | admin_username = "${ var.admin_username }" 42 | admin_password = "Password1234!" 43 | custom_data = "${ element(data.template_file.etcd_cloud_config.*.rendered, count.index) }" 44 | } 45 | 46 | os_profile_linux_config { 47 | disable_password_authentication = true 48 | ssh_keys { 49 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 50 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/input.tf: -------------------------------------------------------------------------------- 1 | variable "location" {} 2 | variable "subnet_id" {} 3 | variable "name" {} 4 | variable "master_vm_size" {} 5 | variable "master_node_count" {} 6 | variable "image_publisher" {} 7 | variable "image_offer" {} 8 | variable "image_sku" {} 9 | variable "image_version" {} 10 | variable "availability_id" {} 11 | variable "storage_account" {} 12 | variable "storage_primary_endpoint" {} 13 | variable "storage_container" {} 14 | variable "cluster_domain" {} 15 | variable "dns_service_ip" {} 16 | variable "internal_tld" {} 17 | variable "pod_cidr" {} 18 | variable "service_cidr" {} 19 | variable "admin_username" {} 20 | variable "kubelet_image_url" {} 21 | variable "kubelet_image_tag" {} 22 | variable "k8s_cloud_config" {} 23 | # variable "etcd_security_group_id" {} 24 | # variable "external_elb_security_group_id" {} 25 | variable "ca" {} 26 | variable "k8s_etcd" {} 27 | variable "k8s_etcd_key" {} 28 | variable "k8s_apiserver" {} 29 | variable "k8s_apiserver_key" {} 30 | variable "data_dir" {} 31 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/kube-apiserver.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: ${ kubelet_image_url }:${ kubelet_image_tag } 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --admission-control=LimitRanger 15 | - --admission-control=NamespaceExists 16 | - --admission-control=NamespaceLifecycle 17 | - --admission-control=ResourceQuota 18 | - --admission-control=SecurityContextDeny 19 | - --admission-control=ServiceAccount 20 | - --allow-privileged=true 21 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem 22 | - --cloud-provider=azure 23 | - --cloud-config=/etc/kubernetes/ssl/azure-config.json 24 | - --etcd-servers=http://etcd.${ internal_tld }:2379 25 | - --insecure-bind-address=0.0.0.0 26 | - --secure-port=443 27 | - --service-account-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 28 | - --service-cluster-ip-range=${ service_cidr } 29 | - --tls-cert-file=/etc/kubernetes/ssl/k8s-apiserver.pem 30 | - --tls-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 31 | - --v=2 32 | livenessProbe: 33 | httpGet: 34 | host: 127.0.0.1 35 | port: 8080 36 | path: /healthz 37 | initialDelaySeconds: 15 38 | timeoutSeconds: 15 39 | ports: 40 | - containerPort: 443 41 | hostPort: 443 42 | name: https 43 | - containerPort: 8080 44 | hostPort: 8080 45 | name: local 46 | volumeMounts: 47 | - mountPath: /etc/kubernetes/ssl 48 | name: ssl-certs-kubernetes 49 | readOnly: true 50 | - mountPath: /etc/ssl/certs 51 | name: ssl-certs-host 52 | readOnly: true 53 | volumes: 54 | - hostPath: 55 | path: /etc/kubernetes/ssl 56 | name: ssl-certs-kubernetes 57 | - hostPath: 58 | path: /usr/share/ca-certificates 59 | name: ssl-certs-host 60 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | output "external_lb" { value = "${azurerm_lb_backend_address_pool.cncf.id }" } 2 | output "fqdn_lb" { value = "${azurerm_public_ip.cncf.fqdn}" } 3 | output "master_ips" { value = ["${ azurerm_network_interface.cncf.*.private_ip_address }"] } 4 | -------------------------------------------------------------------------------- /provisioning/azure/modules/network/input.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_cidr" {} 2 | variable "name" {} 3 | variable "name_servers_file" {} 4 | variable "location" {} 5 | -------------------------------------------------------------------------------- /provisioning/azure/modules/network/output.tf: -------------------------------------------------------------------------------- 1 | output "subnet_id" { value = "${ azurerm_subnet.cncf.id }" } 2 | -------------------------------------------------------------------------------- /provisioning/azure/modules/network/virtual_network.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_security_group" "cncf" { 2 | name = "${ var.name }" 3 | location = "${ var.location}" 4 | resource_group_name = "${ var.name }" 5 | } 6 | 7 | resource "azurerm_subnet" "cncf" { 8 | name = "${ var.name }" 9 | resource_group_name = "${ var.name }" 10 | virtual_network_name = "${azurerm_virtual_network.cncf.name}" 11 | address_prefix = "10.0.10.0/24" 12 | route_table_id = "${ azurerm_route_table.cncf.id }" 13 | 14 | } 15 | 16 | resource "azurerm_virtual_network" "cncf" { 17 | name = "${ var.name }" 18 | resource_group_name = "${ var.name }" 19 | address_space = ["${ var.vpc_cidr }"] 20 | location = "${ var.location }" 21 | dns_servers = [ 22 | "${ element(split( ",", file(var.name_servers_file) ),0) }", 23 | "${ element(split( ",", file(var.name_servers_file) ),1) }", 24 | "8.8.8.8" 25 | ] 26 | # getting dns servers in list form was difficult 27 | # module.vpc.azurerm_virtual_network.main: Creating... 28 | # address_space.#: "" => "1" 29 | # address_space.0: "" => "10.0.0.0/16" 30 | # dns_servers.#: "" => "4" 31 | # dns_servers.0: "" => "40.90.4.9" 32 | # dns_servers.1: "" => "13.107.24.9" 33 | # dns_servers.2: "" => "64.4.48.9" 34 | # dns_servers.3: "" => "13.107.160.9" 35 | } 36 | 37 | resource "azurerm_route_table" "cncf" { 38 | name = "${ var.name }" 39 | location = "${ var.location }" 40 | resource_group_name = "${ var.name }" 41 | } 42 | -------------------------------------------------------------------------------- /provisioning/azure/modules/worker/input.tf: -------------------------------------------------------------------------------- 1 | variable "location" {} 2 | variable "subnet_id" {} 3 | variable "name" {} 4 | variable "worker_vm_size" {} 5 | variable "worker_node_count" {} 6 | variable "image_publisher" {} 7 | variable "image_offer" {} 8 | variable "image_sku" {} 9 | variable "image_version" {} 10 | variable "storage_account" {} 11 | variable "storage_primary_endpoint" {} 12 | variable "storage_container" {} 13 | variable "availability_id" {} 14 | variable "external_lb" {} 15 | variable "cluster_domain" {} 16 | variable "dns_service_ip" {} 17 | variable "internal_tld" {} 18 | variable "admin_username" {} 19 | variable "kubelet_image_url" {} 20 | variable "kubelet_image_tag" {} 21 | variable "k8s_cloud_config" {} 22 | variable "ca" {} 23 | variable "k8s_worker" {} 24 | variable "k8s_worker_key" {} 25 | variable "data_dir" {} 26 | -------------------------------------------------------------------------------- /provisioning/azure/modules/worker/worker-cloud-config.tf: -------------------------------------------------------------------------------- 1 | provider "gzip" { 2 | compressionlevel = "BestCompression" 3 | } 4 | 5 | resource "gzip_me" "k8s_cloud_config" { 6 | input = "${ var.k8s_cloud_config }" 7 | } 8 | 9 | resource "gzip_me" "ca" { 10 | input = "${ var.ca }" 11 | } 12 | 13 | resource "gzip_me" "k8s_worker" { 14 | input = "${ var.k8s_worker }" 15 | } 16 | 17 | resource "gzip_me" "k8s_worker_key" { 18 | input = "${ var.k8s_worker_key }" 19 | } 20 | 21 | data "template_file" "worker_cloud_config" { 22 | template = "${ file( "${ path.module }/worker-cloud-config.yml" )}" 23 | 24 | vars { 25 | cluster_domain = "${ var.cluster_domain }" 26 | dns_service_ip = "${ var.dns_service_ip }" 27 | kubelet_image_url = "${ var.kubelet_image_url }" 28 | kubelet_image_tag = "${ var.kubelet_image_tag }" 29 | internal_tld = "${ var.internal_tld }" 30 | location = "${ var.location }" 31 | k8s_cloud_config = "${ gzip_me.k8s_cloud_config.output }" 32 | ca = "${ gzip_me.ca.output }" 33 | k8s_worker = "${ gzip_me.k8s_worker.output }" 34 | k8s_worker_key = "${ gzip_me.k8s_worker_key.output }" 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /provisioning/azure/modules/worker/worker-nodes.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_interface" "cncf" { 2 | count = "${ var.worker_node_count }" 3 | name = "worker-interface${ count.index + 1 }" 4 | location = "${ var.location }" 5 | resource_group_name = "${ var.name }" 6 | 7 | ip_configuration { 8 | name = "worker-nic${ count.index + 1 }" 9 | subnet_id = "${ var.subnet_id }" 10 | private_ip_address_allocation = "dynamic" 11 | } 12 | } 13 | 14 | resource "azurerm_virtual_machine" "cncf" { 15 | count = "${ var.worker_node_count }" 16 | name = "worker-node${ count.index + 1 }" 17 | location = "${ var.location }" 18 | availability_set_id = "${ var.availability_id }" 19 | resource_group_name = "${ var.name }" 20 | network_interface_ids = ["${ element(azurerm_network_interface.cncf.*.id, count.index) }"] 21 | vm_size = "${ var.worker_vm_size }" 22 | 23 | storage_image_reference { 24 | publisher = "${ var.image_publisher }" 25 | offer = "${ var.image_offer }" 26 | sku = "${ var.image_sku }" 27 | version = "${ var.image_version}" 28 | } 29 | 30 | storage_os_disk { 31 | name = "worker-disks${ count.index + 1 }" 32 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/worker-vhd${ count.index + 1 }.vhd" 33 | caching = "ReadWrite" 34 | create_option = "FromImage" 35 | } 36 | 37 | os_profile { 38 | computer_name = "worker-node${ count.index + 1 }" 39 | admin_username = "${ var.admin_username }" 40 | admin_password = "Password1234!" 41 | custom_data = "${ element(data.template_file.worker_cloud_config.*.rendered, count.index) }" 42 | } 43 | 44 | os_profile_linux_config { 45 | disable_password_authentication = true 46 | ssh_keys { 47 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 48 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /provisioning/azure/output.tf: -------------------------------------------------------------------------------- 1 | output "fqdn_k8s" { value = "${ module.etcd.fqdn_lb}" } 2 | output "bastion_ip" { value = "${ module.bastion.bastion_ip}" } 3 | output "bastion_fqdn" { value = "${ module.bastion.bastion_fqdn}" } 4 | output "k8s_admin" { value = "${ k8s_admin}"} 5 | # fixme for use outside container 6 | output "ssh_key_setup" { value = "eval $(ssh-agent) ; ssh-add ${ var.data_dir }/.ssh/id_rsa"} 7 | output "ssh_via_bastion" { value = "ssh -At ${ var.admin_username }@${ module.bastion.bastion_fqdn } ssh ${ var.admin_username }@etcd1.${ var.internal_tld }"} 8 | output "kubeconfig" { value = "${ module.kubeconfig.kubeconfig }"} 9 | -------------------------------------------------------------------------------- /provisioning/azure/runme: -------------------------------------------------------------------------------- 1 | rm -rf /cncf/data/.ssh/ 2 | rm -rf /cncf/data/.cfssl/ 3 | rm -rf /cncf/data/azure-config.json 4 | rm -rf /build/azure/terraform.tfstate* 5 | rm -rf /build/azure/azure_dns* 6 | terraform get 7 | terraform apply -target null_resource.sshkey_gen 8 | terraform apply -target null_resource.ssl_gen 9 | terraform apply -target null_resource.cloud_gen 10 | terraform apply -target module.dns.null_resource.dns_gen 11 | terraform apply -target module.etcd.azurerm_network_interface.cncf 12 | time terraform apply -------------------------------------------------------------------------------- /provisioning/azure/servicePrincipalProfile.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/azure/servicePrincipalProfile.json -------------------------------------------------------------------------------- /provisioning/azure/ssl-ssh-cloud.tf: -------------------------------------------------------------------------------- 1 | #Gen Certs and SSH KeyPair 2 | resource "null_resource" "ssl_ssh_cloud_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = < ${ var.data_dir }/azure-config.json 40 | { 41 | "aadClientId": "$${ARM_CLIENT_ID}", 42 | "aadClientSecret": "$${ARM_CLIENT_SECRET}", 43 | "tenantId": "$${ARM_TENANT_ID}", 44 | "subscriptionId": "$${ARM_SUBSCRIPTION_ID}", 45 | "resourceGroup": "${ var.name }", 46 | "location": "${ var.location }", 47 | "subnetName": "${ var.name }", 48 | "securityGroupName": "${ var.name }", 49 | "vnetName": "${ var.name }", 50 | "routeTableName": "${ var.name }", 51 | "primaryAvailabilitySetName": "${ var.name }" 52 | } 53 | JSON 54 | COMMAND 55 | } 56 | 57 | provisioner "local-exec" { 58 | when = "destroy" 59 | on_failure = "continue" 60 | command = </dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" getent hosts "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/cross-cloud/cloud.tf: -------------------------------------------------------------------------------- 1 | module "aws" { 2 | source = "../aws" 3 | name = "${ var.name }-aws" 4 | internal_tld = "${ var.name }-aws.cncf.demo" 5 | data_dir = "${ var.data_dir }/aws" 6 | } 7 | 8 | module "azure" { 9 | source = "../azure" 10 | name = "${ var.name }azure" 11 | internal_tld = "${ var.name }-azure.cncf.demo" 12 | data_dir = "${ var.data_dir }/azure" 13 | } 14 | 15 | module "packet" { 16 | source = "../packet" 17 | name = "${ var.name }-packet" 18 | data_dir = "${ var.data_dir }/packet" 19 | packet_project_id = "${ var.packet_project_id }" 20 | } 21 | 22 | module "gce" { 23 | source = "../gce" 24 | name = "${ var.name }-gce" 25 | data_dir = "${ var.data_dir }/gce" 26 | } 27 | 28 | module "gke" { 29 | source = "../gke" 30 | name = "${ var.name }-gke" 31 | data_dir = "${ var.data_dir}/gke" 32 | } 33 | 34 | 35 | resource "null_resource" "kubeconfig" { 36 | 37 | provisioner "local-exec" { 38 | command = < ${ var.data-dir }/azure-config.json 7 | # { 8 | # "aadClientId": "$${ARM_CLIENT_ID}", 9 | # "aadClientSecret": "$${ARM_CLIENT_SECRET}", 10 | # "tenantId": "$${ARM_TENANT_ID}", 11 | # "subscriptionId": "$${ARM_SUBSCRIPTION_ID}", 12 | # "resourceGroup": "${ var.name }", 13 | # "location": "${ var.location }", 14 | # "subnetName": "${ var.name }", 15 | # "securityGroupName": "${ var.name }", 16 | # "vnetName": "${ var.name }", 17 | # "routeTableName": "${ var.name }", 18 | # "primaryAvailabilitySetName": "${ var.name }" 19 | # } 20 | # JSON 21 | # COMMAND 22 | # } 23 | 24 | # provisioner "local-exec" { 25 | # when = "destroy" 26 | # on_failure = "continue" 27 | # command = < ${ var.etcd_discovery } 8 | EOF 9 | } 10 | 11 | provisioner "local-exec" { 12 | when = "destroy" 13 | on_failure = "continue" 14 | command = </dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" getent hosts "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/gke/gke.tf: -------------------------------------------------------------------------------- 1 | provider "google" {} 2 | 3 | -------------------------------------------------------------------------------- /provisioning/gke/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "gkecluster" } 2 | variable "region" { default = "us-central1" } 3 | variable "zone" { default = "us-central1-a" } 4 | variable "project" { default = "test-163823" } 5 | variable "cidr" { default = "10.0.0.0/16" } 6 | variable "node_count" { default = "3" } 7 | variable "node_version" { default = "1.6.2" } 8 | variable "master_user" { default = "cncf" } 9 | variable "master_password" { default = "demo"} 10 | variable "vm_size" { default = "n1-standard-1"} 11 | variable "node_pool_count" { default = "3"} 12 | variable "data_dir" { default = "/cncf/data" } 13 | 14 | -------------------------------------------------------------------------------- /provisioning/gke/modules.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | source = "./modules/vpc" 3 | name = "${ var.name }" 4 | cidr = "${ var.cidr }" 5 | region = "${ var.region }" 6 | } 7 | 8 | module "cluster" { 9 | source = "./modules/cluster" 10 | name = "${ var.name }" 11 | region = "${ var.region }" 12 | zone = "${ var.zone }" 13 | project = "${ var.project}" 14 | node_count = "${ var.node_count }" 15 | network = "${ var.name }" 16 | subnetwork = "${ var.name }" 17 | node_version = "${ var.node_version }" 18 | master_user = "${ var.master_user }" 19 | master_password = "${ var.master_password }" 20 | vm_size = "${ var.vm_size }" 21 | node_pool_count = "${ var.node_pool_count }" 22 | data_dir = "${ var.data_dir }" 23 | } 24 | 25 | module "kubeconfig" { 26 | source = "../kubeconfig" 27 | 28 | ca_pem = "${ var.data_dir }/ca.pem" 29 | admin_pem = "${ var.data_dir }/k8s-admin.pem" 30 | admin_key_pem = "${ var.data_dir }/k8s-admin-key.pem" 31 | fqdn_k8s = "${ module.cluster.fqdn_k8s }" 32 | data_dir = "${ var.data_dir }" 33 | name = "gke_${ var.project }_${ var.zone }-a_${ var.name }" 34 | } 35 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/cluster.tf: -------------------------------------------------------------------------------- 1 | resource "google_container_cluster" "cncf" { 2 | name = "${ var.name }" 3 | zone = "${ var.zone }" 4 | project = "${ var.project }" 5 | initial_node_count = "${ var.node_count }" 6 | 7 | additional_zones = [ 8 | "us-central1-b", 9 | "us-central1-c", 10 | ] 11 | 12 | network = "${ var.network }" 13 | subnetwork = "${ var.subnetwork }" 14 | node_version = "${ var.node_version }" 15 | 16 | master_auth { 17 | username = "${ var.master_user }" 18 | password = "${ var.master_password }" 19 | } 20 | 21 | node_config { 22 | machine_type = "${ var.vm_size }" 23 | oauth_scopes = [ 24 | "https://www.googleapis.com/auth/compute", 25 | "https://www.googleapis.com/auth/devstorage.read_only", 26 | "https://www.googleapis.com/auth/logging.write", 27 | "https://www.googleapis.com/auth/monitoring", 28 | ] 29 | } 30 | } 31 | 32 | 33 | resource "null_resource" "file" { 34 | 35 | provisioner "local-exec" { 36 | command = < "${ var.data_dir }/ca.pem" 38 | echo "${ base64decode(google_container_cluster.cncf.master_auth.0.client_certificate) }" > "${ var.data_dir }/k8s-admin.pem" 39 | echo "${ base64decode(google_container_cluster.cncf.master_auth.0.client_key) }" > "${ var.data_dir }/k8s-admin-key.pem" 40 | LOCAL_EXEC 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "region" {} 3 | variable "zone" {} 4 | variable "project" {} 5 | variable "node_count" {} 6 | variable "network" {} 7 | variable "subnetwork" {} 8 | variable "node_version" {} 9 | variable "master_user" {} 10 | variable "master_password" {} 11 | variable "node_pool_count" {} 12 | variable "vm_size" {} 13 | variable "data_dir" {} 14 | 15 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/node-pool.tf: -------------------------------------------------------------------------------- 1 | resource "google_container_node_pool" "cncf" { 2 | name = "${ var.name }" 3 | project = "${ var.project }" 4 | zone = "${ var.zone }" 5 | cluster = "${google_container_cluster.cncf.name}" 6 | initial_node_count = "${ var.node_pool_count }" 7 | } 8 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "fqdn_k8s" { value = "${ google_container_cluster.cncf.endpoint }" } 2 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/gce-subnet.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_subnetwork" "cncf" { 2 | name = "${ var.name }" 3 | ip_cidr_range = "${ var.cidr }" 4 | network = "${ google_compute_network.cncf.self_link }" 5 | region = "${ var.region }" 6 | } 7 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/input.tf: -------------------------------------------------------------------------------- 1 | variable "cidr" {} 2 | variable "name" {} 3 | variable "region" {} 4 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/gke/modules/vpc/output.tf -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_network" "cncf" { 2 | name = "${ var.name }" 3 | auto_create_subnetworks = "false" 4 | } 5 | -------------------------------------------------------------------------------- /provisioning/gke/output.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { value = "${ module.kubeconfig.kubeconfig }"} 2 | -------------------------------------------------------------------------------- /provisioning/kubeconfig/input.tf: -------------------------------------------------------------------------------- 1 | variable "admin_key_pem" {} 2 | variable "admin_pem" {} 3 | variable "ca_pem" {} 4 | variable "fqdn_k8s" {} 5 | variable "name" {} 6 | variable "data_dir" {} 7 | -------------------------------------------------------------------------------- /provisioning/kubeconfig/kubeconfig.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "kubeconfig" { 2 | template = <ca-csr.json 95 | echo "$(ca-config)" >ca-config.json 96 | 97 | # generate ca 98 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca - 99 | _chmod ca 100 | 101 | # generate keys and certs 102 | generate k8s-admin client-server "${DEFAULT_HOSTS}" 103 | generate k8s-apiserver client-server "${DEFAULT_HOSTS},${K8S_SERVICE_IP},master.${INTERNAL_TLD},endpoint.${INTERNAL_TLD}" 104 | generate k8s-etcd client-server "etcd.${INTERNAL_TLD},etcd1.${INTERNAL_TLD},etcd2.${INTERNAL_TLD},etcd3.${INTERNAL_TLD}" 105 | generate k8s-worker client "${DEFAULT_HOSTS}" 106 | 107 | # TODO: fix cert provisioning hacks 108 | #tar -rf k8s-apiserver.tar k8s-etcd.pem k8s-etcd-key.pem 109 | #tar -rf k8s-worker.tar ca.pem 110 | #bzip2 k8s-apiserver.tar 111 | #bzip2 k8s-worker.tar 112 | -------------------------------------------------------------------------------- /provisioning/packet/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "packet" } 2 | 3 | # Set with env TF_VAR_packet_project_id 4 | variable "packet_project_id" {} # required for now 5 | # https://www.packet.net/locations/ 6 | variable "packet_facility" { default = "sjc1" } 7 | variable "packet_billing_cycle" { default = "hourly" } 8 | variable "packet_operating_system" { default = "coreos_stable" } 9 | variable "packet_master_device_plan" { default = "baremetal_0" } 10 | variable "packet_worker_device_plan" { default = "baremetal_0" } 11 | 12 | variable "domain" { default = "cncf.ci" } 13 | variable "data_dir" { default = "/cncf/data/packet" } 14 | 15 | # VM Image and size 16 | variable "admin_username" { default = "core"} 17 | 18 | # Kubernetes 19 | variable "cluster_domain" { default = "cluster.local" } 20 | variable "pod_cidr" { default = "10.2.0.0/16" } 21 | variable "service_cidr" { default = "10.3.0.0/24" } 22 | variable "k8s_service_ip" { default = "10.3.0.1" } 23 | variable "dns_service_ip" { default = "10.3.0.10" } 24 | variable "master_node_count" { default = "3" } 25 | variable "worker_node_count" { default = "3" } 26 | # Autoscaling not supported by Kuberenetes on Azure yet 27 | # variable "worker_node_min" { default = "3" } 28 | # variable "worker_node_max" { default = "5" } 29 | 30 | # Deployment Artifact Versions 31 | # Hyperkube 32 | # Set from https://quay.io/repository/coreos/hyperkube 33 | variable "kubelet_image_url" { default = "quay.io/coreos/hyperkube"} 34 | variable "kubelet_image_tag" { default = "v1.6.2_coreos.0"} 35 | -------------------------------------------------------------------------------- /provisioning/packet/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "master_ips" { type = "list" } 3 | variable "public_master_ips" { type = "list" } 4 | variable "public_worker_ips" { type = "list" } 5 | variable "master_node_count" {} 6 | variable "worker_node_count" {} 7 | variable "domain" {} 8 | variable "record_ttl" { default = "60" } 9 | -------------------------------------------------------------------------------- /provisioning/packet/modules/dns/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/f9ad665227073edad51cd0b6422ce87a4bbd0818/provisioning/packet/modules/dns/output.tf -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/discovery.tf: -------------------------------------------------------------------------------- 1 | #Get Discovery URL 2 | resource "null_resource" "discovery_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = < ${ var.etcd_discovery } 7 | EOF 8 | } 9 | 10 | provisioner "local-exec" { 11 | when = "destroy" 12 | on_failure = "continue" 13 | command = <