├── camel ├── api-router │ ├── project │ │ └── .gitkeep │ ├── _catalog_1.sh │ ├── cleanup.sh │ └── demo.sh └── simple-microservices │ ├── demo.sh │ └── cleanup.sh ├── service-mesh ├── istio │ ├── setup │ │ ├── binaries │ │ │ └── .gitkeep │ │ ├── cleanup.sh │ │ └── demo.sh │ ├── echo │ │ ├── cleanup.sh │ │ ├── app │ │ │ ├── echo-app.yaml │ │ │ ├── logic-app.yaml │ │ │ ├── vanilla-app.yaml │ │ │ └── mixer-config-quota-echo.yaml │ │ └── demo.sh │ └── bookinfo │ │ ├── reset-bookinfo-route-v1.sh │ │ ├── cleanup.sh │ │ └── demo.sh ├── istio-openshift │ ├── clean-routerules.sh │ ├── deploy-bookinfo.sh │ ├── boot-minishift.sh │ ├── get-ingress-url.sh │ ├── setup-namespace.sh │ ├── install.sh │ └── demo.sh └── envoy │ └── spring-boot │ ├── cleanup.sh │ └── demo.sh ├── ticket-monster-msa ├── orders │ ├── project │ │ └── .gitkeep │ ├── _port-forward-mysql.sh │ ├── mysql │ ├── _orders_1.sh │ ├── cleanup.sh │ ├── temp.sh │ └── demo.sh ├── search │ ├── _search_1.sh │ ├── cleanup.sh │ └── demo.sh ├── admin │ ├── _admin_1.sh │ ├── mysql │ ├── cleanup.sh │ └── demo.sh └── ui │ ├── cleanup.sh │ └── demo.sh ├── spring-boot ├── simple-hello-world │ ├── project │ │ └── .gitkeep │ ├── _import.sh │ ├── _debug.sh │ ├── _config-demo.sh │ ├── cleanup.sh │ ├── demo.sh │ ├── _impl-svc.sh │ └── Readme.md └── hystrix-hello-world │ ├── cleanup.sh │ ├── setup-kubeflix.sh │ ├── impl.java │ ├── _impl-svc.sh │ ├── demo.sh │ ├── impl-pom.xml │ └── Readme.md ├── debezium └── docker │ ├── delete2.json │ ├── cleanup.sh │ ├── key.json │ ├── inventory-connector.json │ ├── delete1.json │ ├── value.json │ └── demo.sh ├── demo-namespace.yaml ├── rolling_update ├── _rolling_2.sh ├── svc.yaml ├── _rolling_1.sh ├── rc-v1.yaml ├── rc-v2.yaml └── demo.sh ├── quota ├── quota.yaml ├── limits.yaml ├── pod2.yaml ├── pod1.yaml └── demo.sh ├── secrets ├── secret.yaml ├── pod.yaml └── demo.sh ├── deployment ├── svc.yaml ├── _deploy_1.sh ├── _deploy_2.sh ├── deployment.yaml └── demo.sh ├── daemon_sets ├── svc.yaml ├── _daemon_2.sh ├── daemon.yaml ├── _daemon_1.sh └── demo.sh ├── .gitignore ├── README.md ├── cleanup.sh ├── bluegreen ├── _bg_1.sh ├── _bg_2.sh ├── cleanup.sh ├── app-blue-v1.yaml ├── app-green-v2.yaml └── demo.sh ├── services ├── _scale_2.sh ├── _scale_1.sh └── demo.sh ├── ticket-monster-monolith ├── mysql ├── cleanup.sh └── demo.sh ├── setup.sh ├── graceful_termination ├── pod.yaml └── demo.sh ├── obsidian └── install-minishift.sh ├── 3scale └── openshift-gateway │ ├── cleanup.sh │ └── demo.sh ├── pods ├── pod.yaml └── demo.sh ├── replication_controllers ├── rc.yaml └── demo.sh ├── openshift └── pipelines │ ├── cleanup.sh │ └── demo.sh ├── util.sh └── LICENSE /camel/api-router/project/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /service-mesh/istio/setup/binaries/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ticket-monster-msa/orders/project/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/project/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /debezium/docker/delete2.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": null, 3 | "payload": null 4 | } 5 | -------------------------------------------------------------------------------- /demo-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: demos 5 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/clean-routerules.sh: -------------------------------------------------------------------------------- 1 | kubectl delete routerule --all -n istio-samples -------------------------------------------------------------------------------- /rolling_update/_rolling_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | 6 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/deploy-bookinfo.sh: -------------------------------------------------------------------------------- 1 | oc apply -f <(istioctl kube-inject -f istio-0.3.0/samples/bookinfo/kube/bookinfo.yaml) 2 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/boot-minishift.sh: -------------------------------------------------------------------------------- 1 | export XHYVE_EXPERIMENTAL_NFS_SHARE=true 2 | minishift start --memory=4096 --disk-size=30g --openshift-version=v3.7.0-rc.0 3 | -------------------------------------------------------------------------------- /service-mesh/istio/echo/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl delete deploy/echo 4 | kubectl delete deploy/logic 5 | 6 | kubectl delete svc/echo 7 | kubectl delete svc/logic -------------------------------------------------------------------------------- /service-mesh/istio-openshift/get-ingress-url.sh: -------------------------------------------------------------------------------- 1 | echo $(minishift ip):$(kubectl get svc/istio-ingress -n istio-system -o yaml | grep -i nodeport | head -n 1 | awk '{ print $2 }') 2 | -------------------------------------------------------------------------------- /quota/quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: demo-quota 5 | namespace: demos 6 | spec: 7 | hard: 8 | cpu: 4 9 | memory: 4Gi 10 | -------------------------------------------------------------------------------- /secrets/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: demos 5 | name: my-secret-password 6 | type: opaque 7 | data: 8 | username: dGhvY2tpbgo= 9 | password: eW91IHdpc2gK 10 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/setup-namespace.sh: -------------------------------------------------------------------------------- 1 | oc adm policy add-scc-to-user anyuid -z default 2 | oc adm policy add-scc-to-user privileged -z default 3 | oc adm policy add-cluster-role-to-user cluster-admin -z default -------------------------------------------------------------------------------- /deployment/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: demos 5 | name: deployment-demo 6 | spec: 7 | ports: 8 | - port: 80 9 | protocol: TCP 10 | selector: 11 | demo: deployment 12 | -------------------------------------------------------------------------------- /rolling_update/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: demos 5 | name: update-demo-svc 6 | spec: 7 | ports: 8 | - port: 80 9 | protocol: TCP 10 | selector: 11 | demo: update 12 | -------------------------------------------------------------------------------- /quota/limits.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: demo-limits 5 | namespace: demos 6 | spec: 7 | limits: 8 | - type: Container 9 | defaultRequest: 10 | cpu: 100m 11 | memory: 256Mi 12 | -------------------------------------------------------------------------------- /quota/pod2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: demos 5 | name: quota-demo-unspecified-pod 6 | spec: 7 | containers: 8 | - name: hostname 9 | image: gcr.io/google_containers/serve_hostname:1.1 10 | -------------------------------------------------------------------------------- /service-mesh/istio/bookinfo/reset-bookinfo-route-v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | VERSION="0.1.6" 3 | istioctl delete route-rule $(istioctl get route-rule) 4 | istioctl create -f ../setup/binaries/istio-$VERSION/samples/apps/bookinfo/route-rule-all-v1.yaml 5 | -------------------------------------------------------------------------------- /daemon_sets/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: demos 5 | name: daemon-demo-svc 6 | spec: 7 | ports: 8 | - port: 80 9 | protocol: TCP 10 | targetPort: 9376 11 | selector: 12 | demo: daemons 13 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/_import.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | cd $(relative project/simple-hello-world) 7 | 8 | desc "Let's import our project to an awesome CI/CD system" 9 | read -s 10 | run "mvn fabric8:import -Dfabric8.namespace=default" 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | .idea 3 | *.iml 4 | *.log 5 | .classpath 6 | .project 7 | .settings 8 | spring-boot/**/project/** 9 | **/cli/** 10 | ticket-monster-msa/**/project/** 11 | camel/**/project/** 12 | service-mesh/**/project/** 13 | service-mesh/**/binaries/** 14 | javac-services.** 15 | /**/istio-0.*/ 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kube-demos 2 | Kubernetes demos 3 | 4 | To run these demos you need 'pv' and 'tmux' installed. 5 | 6 | On Mac OS X, you also need to install 'coreutils' 7 | 8 | ``` 9 | $ brew install coreutils pv tmux 10 | ``` 11 | 12 | Additional notes: 13 | 14 | * For openshift, relax the SCCs 15 | 16 | -------------------------------------------------------------------------------- /ticket-monster-msa/orders/_port-forward-mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | MYSQL_POD=$(oc get pod | grep -i running | grep mysqlorders | awk '{print $1}') 6 | 7 | desc "Port forward the mysql pod locally" 8 | run "oc port-forward $MYSQL_POD 3306:3306" 9 | 10 | -------------------------------------------------------------------------------- /quota/pod1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: demos 5 | name: quota-demo-large-pod 6 | spec: 7 | containers: 8 | - name: hostname 9 | image: gcr.io/google_containers/serve_hostname:1.1 10 | resources: 11 | limits: 12 | cpu: 4.1 13 | memory: 4.1Gi 14 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/_debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | cd $(relative project/simple-hello-world) 7 | 8 | desc "Make sure fabric8:run is already running!" 9 | read -s 10 | 11 | 12 | desc "Let's debug and port forward" 13 | run "mvn fabric8:debug" 14 | -------------------------------------------------------------------------------- /cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/util.sh 4 | 5 | desc "Nuke it all" 6 | run "kubectl delete namespace demos" 7 | while kubectl get namespace demos >/dev/null 2>&1; do 8 | run "kubectl get namespace demos" 9 | done 10 | run "kubectl get namespace demos" 11 | run "kubectl get namespaces" 12 | -------------------------------------------------------------------------------- /debezium/docker/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONTAINERS="zookeeper kafka mysql connect mysqlterm watcher" 4 | 5 | kill -14 $(ps aux | grep minishift | grep vnNTL | awk '{print $2}') > /dev/null 2>&1 6 | 7 | 8 | tmux kill-pane -t 2 9 | tmux kill-pane -t 1 10 | 11 | docker rm -f $CONTAINERS > /dev/null 2>&1 12 | 13 | -------------------------------------------------------------------------------- /bluegreen/_bg_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(oc get route | grep bluegreen | awk '{print $2}') 6 | 7 | desc "Run some load at our service through the OpenShift Route" 8 | run "while true; do \\ 9 | curl --connect-timeout 1 -s $IP; \\ 10 | sleep 0.5; \\ 11 | done" 12 | -------------------------------------------------------------------------------- /ticket-monster-msa/search/_search_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | until $(curl --output /dev/null --silent --head --fail http://localhost:8080/health); do 6 | sleep 5 7 | done 8 | 9 | desc "Find all events" 10 | run "curl -s http://localhost:8080/search/events | pretty-json" 11 | 12 | -------------------------------------------------------------------------------- /ticket-monster-msa/admin/_admin_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | until $(curl --output /dev/null --silent --head --fail http://localhost:8080/health); do 6 | sleep 5 7 | done 8 | 9 | 10 | desc "Find all events via admin" 11 | run "curl -s http://localhost:8080/admin/forge/events | pretty-json" 12 | 13 | -------------------------------------------------------------------------------- /debezium/docker/key.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": { 3 | "type": "struct", 4 | "name": "dbserver1.inventory.customers.Key", 5 | "optional": false, 6 | "fields": [ 7 | { 8 | "field": "id", 9 | "type": "int32", 10 | "optional": false 11 | } 12 | ] 13 | }, 14 | "payload": { 15 | "id": 1004 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /daemon_sets/_daemon_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "Color each node, slowly" 6 | run "for NODE in \$(kubectl get nodes -o name | grep -i master | cut -f2 -d/); do \\ 7 | kubectl label node \$NODE color=red; \\ 8 | kubectl --namespace=demos describe ds daemons-demo-daemon; \\ 9 | sleep 15; \\ 10 | done" 11 | -------------------------------------------------------------------------------- /services/_scale_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "Resize the RC and watch the service backends change" 6 | run "kubectl --namespace=demos scale deploy deployment-demo --replicas=1" 7 | run "kubectl --namespace=demos scale rc deployment-demo --replicas=2" 8 | run "kubectl --namespace=demos scale rc deployment-demo --replicas=5" 9 | -------------------------------------------------------------------------------- /deployment/_deploy_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(kubectl --namespace=demos get svc deployment-demo \ 6 | -o go-template='{{.spec.clusterIP}}') 7 | 8 | run "minishift ssh -- '\\ 9 | while true; do \\ 10 | curl --connect-timeout 1 -s $IP; \\ 11 | sleep 0.5; \\ 12 | done \\ 13 | '" 14 | -------------------------------------------------------------------------------- /rolling_update/_rolling_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(kubectl --namespace=demos get svc update-demo-svc \ 6 | -o go-template='{{.spec.clusterIP}}') 7 | 8 | run "minishift ssh -- '\\ 9 | while true; do \\ 10 | curl --connect-timeout 1 -s $IP; \\ 11 | sleep 0.5; \\ 12 | done \\ 13 | '" 14 | -------------------------------------------------------------------------------- /services/_scale_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(kubectl --namespace=demos get svc deployment-demo \ 6 | -o go-template='{{.spec.clusterIP}}') 7 | 8 | run "minishift ssh -- '\\ 9 | while true; do \\ 10 | curl --connect-timeout 1 -s $IP && echo; \\ 11 | sleep 0.5; \\ 12 | done \\ 13 | '" 14 | -------------------------------------------------------------------------------- /ticket-monster-monolith/mysql: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | MYSQL_POD_NAME=$(kubectl get pod | grep Running | grep mysql | awk '{ print $1 }') 4 | MYSQL_POD_IP=$(kubectl describe pod $MYSQL_POD_NAME | grep IP | awk '{ print $2 }') 5 | 6 | kubectl exec -it $MYSQL_POD_NAME -- /opt/rh/rh-mysql57/root/usr/bin/mysql -h$MYSQL_POD_IP -P3306 -uroot -padmin "$@" ticketmonster 2>&1 | grep -v "Warning: Using a password" 7 | 8 | -------------------------------------------------------------------------------- /ticket-monster-msa/admin/mysql: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | MYSQL_POD_NAME=$(kubectl get pod | grep Running | grep mysqladmin | awk '{ print $1 }') 4 | MYSQL_POD_IP=$(kubectl describe pod $MYSQL_POD_NAME | grep IP | awk '{ print $2 }') 5 | 6 | kubectl exec -it $MYSQL_POD_NAME -- /opt/rh/rh-mysql56/root/usr/bin/mysql -h$MYSQL_POD_IP -P3306 -uroot -padmin "$@" ticketmonster 2>&1 | grep -v "Warning: Using a password" 7 | 8 | -------------------------------------------------------------------------------- /ticket-monster-msa/orders/mysql: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | MYSQL_POD_NAME=$(kubectl get pod | grep Running | grep mysqlorders | awk '{ print $1 }') 4 | MYSQL_POD_IP=$(kubectl describe pod $MYSQL_POD_NAME | grep IP | awk '{ print $2 }') 5 | 6 | kubectl exec -it $MYSQL_POD_NAME -- /opt/rh/rh-mysql56/root/usr/bin/mysql -h$MYSQL_POD_IP -P3306 -uroot -padmin "$@" ticketmonster 2>&1 | grep -v "Warning: Using a password" 7 | 8 | -------------------------------------------------------------------------------- /daemon_sets/daemon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | namespace: demos 5 | name: daemons-demo-daemon 6 | spec: 7 | template: 8 | metadata: 9 | labels: 10 | demo: daemons 11 | spec: 12 | nodeSelector: 13 | color: red 14 | containers: 15 | - name: hostname 16 | image: gcr.io/google_containers/serve_hostname:1.1 17 | -------------------------------------------------------------------------------- /bluegreen/_bg_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(kubectl --namespace=demos get svc app-green-v2 \ 6 | -o go-template='{{.spec.clusterIP}}') 7 | 8 | desc "Smoke test our green/v2 app by pointing directly at svc" 9 | run "minishift ssh -- '\\ 10 | while true; do \\ 11 | curl --connect-timeout 1 -s $IP; \\ 12 | sleep 0.5; \\ 13 | done \\ 14 | '" 15 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/util.sh 4 | 5 | 6 | oc new-project demos 7 | oc policy add-role-to-user edit system:serviceaccount:demos:deployer 8 | 9 | oc policy add-role-to-user edit system:serviceaccount:demos:default 10 | oc adm policy add-scc-to-user anyuid system:serviceaccount:demos:default 11 | oc policy add-role-to-user view system:serviceaccount:$(oc project -q):default -n $(oc project -q) -------------------------------------------------------------------------------- /daemon_sets/_daemon_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | IP=$(kubectl --namespace=demos get svc daemon-demo-svc \ 6 | -o go-template='{{.spec.clusterIP}}') 7 | 8 | run "gcloud compute ssh --zone=us-central1-b $SSH_NODE --command '\\ 9 | while true; do \\ 10 | curl --connect-timeout 1 -s $IP && echo || echo \"(timeout)\"; \\ 11 | sleep 1; \\ 12 | done \\ 13 | '" 14 | -------------------------------------------------------------------------------- /camel/api-router/_catalog_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | until $(curl --output /dev/null --silent --fail http://localhost:8181/catalog/category); do 6 | sleep 5 7 | done 8 | 9 | 10 | desc "Query the category" 11 | run "curl -s http://localhost:8181/catalog/category | pretty-json" 12 | 13 | desc "Query the products" 14 | run "curl -s http://localhost:8181/catalog/product | pretty-json" 15 | -------------------------------------------------------------------------------- /graceful_termination/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: demos 5 | name: graceful-demo-pod 6 | labels: 7 | demo: graceful-termination 8 | spec: 9 | terminationGracePeriodSeconds: 15 10 | containers: 11 | - name: busybox 12 | image: busybox 13 | command: 14 | - sh 15 | - -c 16 | - "trap \"while true; do echo 'self destructing'; sleep 1; done\" SIGTERM; while true; do date; sleep 1; done" 17 | -------------------------------------------------------------------------------- /secrets/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: demos 5 | name: secrets-demo-pod 6 | spec: 7 | containers: 8 | - name: busybox 9 | image: busybox 10 | command: 11 | - sh 12 | - -c 13 | - while true; do sleep 3600; done 14 | volumeMounts: 15 | - name: my-password 16 | mountPath: /data 17 | volumes: 18 | - name: my-password 19 | secret: 20 | secretName: my-secret-password 21 | -------------------------------------------------------------------------------- /ticket-monster-monolith/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . $(dirname ${BASH_SOURCE})/../util.sh 3 | 4 | echo "delete mysql" 5 | oc process mysql-persistent -n openshift MYSQL_DATABASE=ticketmonster MYSQL_USER=ticket MYSQL_PASSWORD=monster MYSQL_ROOT_PASSWORD=admin | oc delete -f - 6 | 7 | echo "delete ticketmonster" 8 | oc delete dc/ticketmonster 9 | oc delete svc/ticketmonster 10 | oc delete bc/ticketmonster-monolith 11 | oc delete is/ticketmonster-monolith 12 | -------------------------------------------------------------------------------- /debezium/docker/inventory-connector.json: -------------------------------------------------------------------------------- 1 | { "name": "inventory-connector", "config": { "connector.class": "io.debezium.connector.mysql.MySqlConnector", "tasks.max": "1", "database.hostname": "mysql", "database.port": "3306", "database.user": "debezium", "database.password": "dbz", "database.server.id": "184054", "database.server.name": "dbserver1", "database.whitelist": "inventory", "database.history.kafka.bootstrap.servers": "kafka:9092", "database.history.kafka.topic": "dbhistory.inventory" } } -------------------------------------------------------------------------------- /service-mesh/envoy/spring-boot/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../../util.sh 4 | 5 | istioctl delete route-rule productpage-default 6 | istioctl delete route-rule reviews-default 7 | istioctl delete route-rule ratings-default 8 | istioctl delete route-rule details-default 9 | istioctl delete route-rule reviews-test-v2 10 | istioctl delete route-rule ratings-test-delay 11 | #istioctl delete mixer-rule ratings-ratelimit 12 | 13 | kubectl delete -f $(relative app/bookinfo.yaml) -------------------------------------------------------------------------------- /obsidian/install-minishift.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "creating obsidian project" 6 | run "oc new-project obsidian" 7 | 8 | desc "enter your github access token:" 9 | read TOKEN 10 | 11 | curl -s -L -o /tmp/deploy-obsidian.sh https://raw.githubusercontent.com/openshiftio/appdev-documentation/production/scripts/deploy_launchpad_mission.sh 12 | chmod +x /tmp/deploy-obsidian.sh 13 | 14 | desc "installing..." 15 | run "/tmp/deploy-obsidian.sh -p obsidian -i admin:admin -g christian-posta:$TOKEN" -------------------------------------------------------------------------------- /service-mesh/istio/bookinfo/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION="0.1.6" 4 | . $(dirname ${BASH_SOURCE})/../../../util.sh 5 | 6 | istioctl delete route-rule productpage-default 7 | istioctl delete route-rule reviews-default 8 | istioctl delete route-rule ratings-default 9 | istioctl delete route-rule details-default 10 | istioctl delete route-rule reviews-test-v2 11 | istioctl delete route-rule ratings-test-delay 12 | #istioctl delete mixer-rule ratings-ratelimit 13 | 14 | kubectl delete -f $(relative ../setup/binaries/istio-$VERSION/samples/apps/bookinfo/bookinfo.yaml) -------------------------------------------------------------------------------- /3scale/openshift-gateway/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | oc delete template 3scale-gateway --namespace=demos 7 | 8 | 9 | oc delete dc threescalegw --namespace=demos 10 | oc delete bc threescalegw --namespace=demos 11 | oc delete svc threescalegw --namespace=demos 12 | oc delete is threescalegw --namespace=demos 13 | oc delete is threescalegw-centos --namespace=demos 14 | oc delete build $(oc get builds | grep -i complete | grep threescalegw | awk '{print $1}') 15 | 16 | docker rmi -f $(docker images | grep threescalegw | awk '{print $3}') 17 | 18 | -------------------------------------------------------------------------------- /pods/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: demos 5 | name: pods-demo-pod 6 | labels: 7 | demo: pods 8 | spec: 9 | containers: 10 | - name: busybox 11 | image: busybox 12 | command: 13 | - sh 14 | - -c 15 | - while true; do (hostname; date) > /data/index.html; sleep 1; done 16 | volumeMounts: 17 | - name: content 18 | mountPath: /data 19 | - name: nginx 20 | image: nginx 21 | volumeMounts: 22 | - name: content 23 | mountPath: /usr/share/nginx/html 24 | readOnly: true 25 | volumes: 26 | - name: content 27 | -------------------------------------------------------------------------------- /debezium/docker/delete1.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "{....}", 3 | "payload": { 4 | "before": { 5 | "id": 1004, 6 | "first_name": "Anne Marie", 7 | "last_name": "Kretchmar", 8 | "email": "annek@noanswer.org" 9 | }, 10 | "after": null, 11 | "source": { 12 | "name": "mysql-server-1", 13 | "server_id": 223344, 14 | "ts_sec": 1477431692, 15 | "gtid": null, 16 | "file": "mysql-bin.000003", 17 | "pos": 805, 18 | "row": 0, 19 | "snapshot": null 20 | }, 21 | "op": "d", 22 | "ts_ms": 1477431692906 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/_config-demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | cd $(relative project/simple-hello-world) 7 | 8 | 9 | desc "Let's create a ConfigMap" 10 | run "cat helloserviceConfigMap.yml" 11 | run "kubectl create -f helloserviceConfigMap.yml" 12 | run "kubectl get configMap" 13 | run "kubectl get configMap helloservice -o yaml" 14 | 15 | desc "Update your code to use some application properties" 16 | read -s 17 | 18 | desc "Run our app outside kubernetes, but connected to it" 19 | run "mvn spring-boot:run" 20 | 21 | desc "Run our app INSIDE kubernetes" 22 | run "mvn fabric8:run" 23 | -------------------------------------------------------------------------------- /replication_controllers/rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostnames 5 | spec: 6 | replicas: 5 7 | selector: 8 | run: hostnames 9 | template: 10 | metadata: 11 | labels: 12 | run: hostnames 13 | spec: 14 | containers: 15 | - image: gcr.io/google_containers/serve_hostname:1.1 16 | imagePullPolicy: IfNotPresent 17 | name: hostnames 18 | resources: {} 19 | terminationMessagePath: /dev/termination-log 20 | dnsPolicy: ClusterFirst 21 | restartPolicy: Always 22 | securityContext: {} 23 | terminationGracePeriodSeconds: 30 24 | -------------------------------------------------------------------------------- /graceful_termination/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "Create a pod" 6 | run "cat $(relative pod.yaml)" 7 | run "kubectl --namespace=demos create -f $(relative pod.yaml)" 8 | 9 | desc "Hey look, a pod!" 10 | run "kubectl --namespace=demos get pods" 11 | 12 | desc "Get the pod's logs" 13 | run "kubectl --namespace=demos logs graceful-demo-pod --follow" 14 | 15 | desc "Delete the pod" 16 | run "kubectl --namespace=demos delete pod graceful-demo-pod" 17 | run "kubectl --namespace=demos get pods graceful-demo-pod" 18 | 19 | desc "Get the pod's logs" 20 | run "kubectl --namespace=demos logs graceful-demo-pod --follow" 21 | -------------------------------------------------------------------------------- /secrets/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "Create a secret" 6 | run "cat $(relative secret.yaml)" 7 | run "kubectl --namespace=demos create -f $(relative secret.yaml)" 8 | 9 | desc "Create a pod which uses that secret" 10 | run "cat $(relative pod.yaml)" 11 | run "kubectl --namespace=demos create -f $(relative pod.yaml)" 12 | 13 | while true; do 14 | run "kubectl --namespace=demos get pod secrets-demo-pod" 15 | status=$(kubectl --namespace=demos get pod secrets-demo-pod | tail -1 | awk '{print $3}') 16 | if [ "$status" == "Running" ]; then 17 | break 18 | fi 19 | done 20 | run "kubectl --namespace=demos exec --tty -i secrets-demo-pod sh" 21 | -------------------------------------------------------------------------------- /openshift/pipelines/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | oc delete dc jenkins nodejs-mongodb-example mongodb --namespace=demos 5 | oc delete svc jenkins jenkins-jnlp mongodb nodejs-mongodb-example --namespace=demos 6 | oc delete route jenkins nodejs-mongodb-example --namespace=demos 7 | oc delete bc nodejs-mongodb-example sample-pipeline --namespace=demos 8 | oc delete is/nodejs-mongodb-example --namespace=demos 9 | oc delete pod $(oc get pod | grep nodejs | awk '{print $1}') 10 | 11 | 12 | echo "removing docker images that have 'example' in their name" 13 | docker rmi -f $(docker images | grep nodejs-mongodb-example | awk '{print $3}') 14 | docker rmi -f $(docker images | grep nodejs-mongodb-example | awk '{print $3}') -------------------------------------------------------------------------------- /bluegreen/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | oc delete dc $(oc get dc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 5 | oc delete deployment $(oc get deployment --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 6 | oc delete svc $(oc get svc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 7 | oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 8 | oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 9 | oc delete route $(oc get route --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 10 | oc delete pod $(oc get pod --namespace=demos | awk '{print $1}') 11 | 12 | -------------------------------------------------------------------------------- /deployment/_deploy_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2016 The Kubernetes Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | . $(dirname ${BASH_SOURCE})/../util.sh 17 | 18 | run "" # wait for first input 19 | 20 | -------------------------------------------------------------------------------- /3scale/openshift-gateway/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | desc "enter your access token" 7 | read ACCESS_TOKEN 8 | 9 | desc "enter your admin site prefix (ie, prefix-admin.3scale.net)" 10 | read PREFIX 11 | 12 | desc "Create Secrets" 13 | run "oc secret new-basicauth apicast-configuration-url-secret --password=\"https://$ACCESS_TOKEN@$PREFIX-admin.3scale.net\"" 14 | 15 | desc "Creating gateway" 16 | run "curl -s -L https://raw.githubusercontent.com/3scale/3scale-amp-openshift-templates/2.0.0.GA-redhat-2/apicast-gateway/apicast.yml | sed 's/3scale-amp20/registry.access.redhat.com\/3scale-amp20/g' | oc new-app -f -" 17 | 18 | desc "Creating route" 19 | run "oc expose svc/apicast" 20 | 21 | desc "now create your API on 3scale, promote it to production, and try to curl it!" 22 | read -s -------------------------------------------------------------------------------- /camel/api-router/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/api-router) 7 | 8 | oc delete dc api-router-app --namespace=demos 9 | oc delete bc api-router-app --namespace=demos 10 | oc delete svc api-router-app --namespace=demos 11 | oc delete svc api-router --namespace=demos 12 | oc delete is api-router-app --namespace=demos 13 | oc delete template api-router-app --namespace=demos 14 | 15 | oc delete build $(oc get builds | grep -i complete | grep api-router-app | awk '{print $1}') 16 | oc delete pod $(oc get pod | grep Completed | awk '{print $1}') 17 | 18 | oc delete route api-router 19 | 20 | oc delete is fis-java-openshift --namespace=demos 21 | oc delete is fis-karaf-openshift --namespace=demos 22 | 23 | docker rmi -f $(docker images | grep api-router-app | awk '{print $3}') 24 | 25 | -------------------------------------------------------------------------------- /ticket-monster-msa/orders/_orders_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | until $(curl --output /dev/null --silent --head --fail http://localhost:8080/health); do 6 | sleep 5 7 | done 8 | 9 | #curl -v -X POST -H "Accept: application/json" -H "Content-Type: application/json" -d '{"ticketRequests":[{"ticketPriceGuideId":1,"quantity":1}],"email":"foo@bar.com","performance":1,"performanceName":"Rock concert of the decade at Roy Thomson Hall"}' http://localhost:8080/orders/bookings 10 | 11 | desc "Add a booking" 12 | run "curl -s -X POST -H \"Accept: application/json\" -H \"Content-Type: application/json\" -d '{\"ticketRequests\":[{\"ticketPriceGuideId\":1,\"quantity\":1}],\"email\":\"foo@bar.com\",\"performance\":1,\"performanceName\":\"Rock concert of the decade at Roy Thomson Hall\"}' http://localhost:8080/orders/bookings | pretty-json" 13 | 14 | -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/hystrix-hello-world) 7 | oc delete dc $(oc get dc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 8 | oc delete svc $(oc get svc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 9 | oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 10 | oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 11 | oc delete route $(oc get route --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 12 | oc delete pod $(oc get pod --namespace=demos | awk '{print $1}') 13 | 14 | 15 | echo "removing docker images that have 'example' in their name" 16 | docker rmi -f $(docker images | grep example | awk '{print $3}') -------------------------------------------------------------------------------- /rolling_update/rc-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | namespace: demos 5 | name: update-demo-rc-v1 6 | spec: 7 | replicas: 5 8 | selector: 9 | demo: update 10 | demo-version: v1 11 | template: 12 | metadata: 13 | labels: 14 | demo: update 15 | demo-version: v1 16 | spec: 17 | containers: 18 | - name: busybox 19 | image: busybox 20 | command: 21 | - sh 22 | - -c 23 | - while true; do echo "$(hostname) v1" > /data/index.html; sleep 60; done 24 | volumeMounts: 25 | - name: content 26 | mountPath: /data 27 | - name: nginx 28 | image: nginx 29 | volumeMounts: 30 | - name: content 31 | mountPath: /usr/share/nginx/html 32 | readOnly: true 33 | volumes: 34 | - name: content 35 | -------------------------------------------------------------------------------- /rolling_update/rc-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | namespace: demos 5 | name: update-demo-rc-v2 6 | spec: 7 | replicas: 5 8 | selector: 9 | demo: update 10 | demo-version: v2 11 | template: 12 | metadata: 13 | labels: 14 | demo: update 15 | demo-version: v2 16 | spec: 17 | containers: 18 | - name: busybox 19 | image: busybox 20 | command: 21 | - sh 22 | - -c 23 | - while true; do echo "$(hostname) v2" > /data/index.html; sleep 60; done 24 | volumeMounts: 25 | - name: content 26 | mountPath: /data 27 | - name: nginx 28 | image: nginx 29 | volumeMounts: 30 | - name: content 31 | mountPath: /usr/share/nginx/html 32 | readOnly: true 33 | volumes: 34 | - name: content 35 | -------------------------------------------------------------------------------- /service-mesh/istio/setup/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . $(dirname ${BASH_SOURCE})/../../../util.sh 3 | 4 | 5 | # delete addons 6 | kubectl delete deploy/grafana 7 | kubectl delete deploy/prometheus 8 | kubectl delete deploy/servicegraph 9 | kubectl delete deploy/zipkin 10 | 11 | kubectl delete svc/grafana 12 | kubectl delete svc/prometheus 13 | kubectl delete svc/servicegraph 14 | kubectl delete svc/zipkin 15 | 16 | kubectl delete cm/prometheus 17 | 18 | # delete istio infra 19 | kubectl delete deploy/istio-egress 20 | kubectl delete deploy/istio-ingress 21 | kubectl delete deploy/istio-pilot 22 | kubectl delete deploy/istio-mixer 23 | 24 | kubectl delete svc/istio-ingress 25 | kubectl delete svc/istio-egress 26 | kubectl delete svc/istio-pilot 27 | kubectl delete svc/istio-mixer 28 | 29 | kubectl delete cm/istio 30 | 31 | kubectl delete sa/istio-ingress-service-account 32 | kubectl delete sa/istio-pilot-service-account 33 | -------------------------------------------------------------------------------- /ticket-monster-msa/ui/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/ticket-monster-ui) 7 | 8 | oc delete dc $(oc get dc --namespace=demos | grep ticket-monster-ui | awk '{print $1}') --namespace=demos 9 | oc delete svc ticket-monster-ui --namespace=demos 10 | #oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 11 | #oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 12 | oc delete route ticketmonster --namespace=demos 13 | oc delete pod $(oc get pod --namespace=demos | grep ticket-monster-ui |awk '{print $1}') 14 | oc delete is ticket-monster-ui --namespace=demos 15 | oc delete bc ticket-monster-ui --namespace=demos 16 | 17 | echo "removing docker images that have 'example' in their name" 18 | docker rmi -f $(docker images | grep ticket-monster-ui | awk '{print $3}') -------------------------------------------------------------------------------- /rolling_update/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | SOURCE_DIR=$PWD 6 | 7 | 8 | desc "Create a service that fronts any version of this demo" 9 | run "cat $(relative svc.yaml)" 10 | run "kubectl --namespace=demos create -f $(relative svc.yaml)" 11 | 12 | desc "Run v1 of our app" 13 | run "cat $(relative rc-v1.yaml)" 14 | run "kubectl --namespace=demos create -f $(relative rc-v1.yaml)" 15 | run "kubectl --namespace=demos get pods -l demo=update" 16 | 17 | tmux split-window -v -d -c $SOURCE_DIR 18 | tmux send-keys -t bottom C-z './_rolling_1.sh' Enter 19 | 20 | desc "Prep the load" 21 | read -s 22 | 23 | desc "Do a rolling update to v2" 24 | run "cat $(relative rc-v2.yaml)" 25 | run "kubectl --namespace=demos rolling-update \\ 26 | update-demo-rc-v1 -f $(relative rc-v2.yaml) --update-period=5s" 27 | 28 | tmux send-keys -t bottom C-c 29 | tmux send-keys -t bottom C-z 'exit' Enter 30 | 31 | -------------------------------------------------------------------------------- /daemon_sets/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | for NODE in $(kubectl get nodes -o name | cut -f2 -d/); do 6 | kubectl label node $NODE color- --overwrite >/dev/null 2>&1 7 | done 8 | 9 | desc "No labels on nodes" 10 | run "kubectl get nodes \\ 11 | -o go-template='{{range .items}}{{.metadata.name}}{{\"\t\"}}{{.metadata.labels}}{{\"\n\"}}{{end}}'" 12 | 13 | desc "Run a service to front our daemon" 14 | run "cat $(relative svc.yaml)" 15 | run "kubectl --namespace=demos create -f $(relative svc.yaml)" 16 | 17 | desc "Run our daemon" 18 | run "cat $(relative daemon.yaml)" 19 | run "kubectl --namespace=demos create -f $(relative daemon.yaml)" 20 | run "kubectl --namespace=demos describe ds daemons-demo-daemon" 21 | 22 | tmux new -d -s my-session \ 23 | "$(dirname ${BASH_SOURCE})/_daemon_1.sh" \; \ 24 | split-window -h -d "sleep 15; $(dirname $BASH_SOURCE)/_daemon_2.sh" \; \ 25 | attach \; 26 | -------------------------------------------------------------------------------- /pods/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "There are no running pods" 6 | run "kubectl --namespace=demos get pods" 7 | 8 | desc "Create a pod" 9 | run "cat $(relative pod.yaml)" 10 | run "kubectl --namespace=demos create -f $(relative pod.yaml)" 11 | 12 | desc "Hey look, a pod!" 13 | run "kubectl --namespace=demos get pods" 14 | 15 | desc "Get the pod's IP" 16 | run "kubectl --namespace=demos get pod pods-demo-pod -o yaml | grep podIP" 17 | 18 | trap "" SIGINT 19 | IP=$(kubectl --namespace=demos get pod pods-demo-pod -o yaml \ 20 | | grep podIP \ 21 | | cut -f2 -d:) 22 | 23 | desc "login to node and poke the pod" 24 | run "minishift ssh -- ' \\ 25 | for i in \$(seq 1 10); do \\ 26 | curl --connect-timeout 1 -s $IP; \\ 27 | sleep 1; \\ 28 | done\\ 29 | '" 30 | desc "Let's cleanup and delete that pod" 31 | run "kubectl --namespace=demos delete pod pods-demo-pod" -------------------------------------------------------------------------------- /openshift/pipelines/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | SOURCE_DIR=$PWD 6 | 7 | # Make sure jenkins gets auto-created when deploying a jenkisn pipeline 8 | # check the master-config.yaml file 9 | 10 | desc "Create our first pipeline" 11 | 12 | TEMPLATE_EXISTS=$(oc get template | grep ticket-monster-mysql) 13 | if [[ ! $TEMPLATE_EXISTS ]]; then 14 | 15 | desc "Let's make sure the jenkins template is there'" 16 | run "oc create -f https://raw.githubusercontent.com/openshift/openshift-ansible/master/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json" 17 | fi 18 | 19 | desc "Create a sample pipeline" 20 | run "oc new-app -f https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/samplepipeline.json" 21 | 22 | desc "Go to web console, or continue to kick off the build here" 23 | read -s 24 | 25 | run "oc start-build sample-pipeline" 26 | -------------------------------------------------------------------------------- /deployment/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: deployment-demo 5 | namespace: demos 6 | spec: 7 | selector: 8 | matchLabels: 9 | demo: deployment 10 | replicas: 5 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | type: RollingUpdate 16 | template: 17 | metadata: 18 | labels: 19 | demo: deployment 20 | version: v1 21 | spec: 22 | containers: 23 | - name: busybox 24 | image: busybox 25 | command: [ "sh", "-c", "while true; do echo $(hostname) v1 > /data/index.html; sleep 60; done" ] 26 | volumeMounts: 27 | - name: content 28 | mountPath: /data 29 | - name: nginx 30 | image: nginx 31 | volumeMounts: 32 | - name: content 33 | mountPath: /usr/share/nginx/html 34 | readOnly: true 35 | volumes: 36 | - name: content 37 | -------------------------------------------------------------------------------- /camel/simple-microservices/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/fabric8-hello.git $(relative project/simple-microservices)" 7 | 8 | SOURCE_DIR=$PWD 9 | 10 | desc "We now have a project with two microservices" 11 | run "cd $(relative project/simple-microservices) && ls -l" 12 | 13 | desc "Let's build and deploy the helloswarm service" 14 | run "cd helloswarm" 15 | run "mvn clean install" 16 | run "mvn fabric8:deploy" 17 | 18 | desc "see what's been deployed" 19 | run "oc get pod" 20 | run "oc get service" 21 | 22 | desc "now let's deploy the client with a circuit breaker" 23 | run "cd ../client-hystrix" 24 | run "mvn clean install" 25 | 26 | tmux split-window -v -d -c $SOURCE_DIR 27 | tmux send-keys -t bottom C-z 'oc scale dc helloswarm --replicas=0' 28 | 29 | run "mvn fabric8:run" 30 | 31 | tmux send-keys -t bottom C-c 32 | tmux send-keys -t bottom C-z 'exit' Enter -------------------------------------------------------------------------------- /service-mesh/envoy/spring-boot/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../../util.sh 4 | 5 | if [ -d "$(relative project/sb-webmvc-envoy)" ]; then 6 | pushd $(relative project/sb-webmvc-envoy) 7 | git pull 8 | popd 9 | else 10 | git clone https://github.com/christian-posta/sb-webmvc-envoy.git $(relative project/sb-webmvc-envoy) 11 | fi 12 | 13 | 14 | desc "Here's our project'" 15 | run "ls -l $(relative project/sb-webmvc-envoy)" 16 | 17 | pushd $(relative project/sb-webmvc-envoy/spring-boot-ipaddress-service) 18 | 19 | desc "Lets take a look at the ipaddress service" 20 | run "ls -l" 21 | 22 | desc "let's build our application" 23 | run "mvn clean install" 24 | 25 | desc "let's examine the kubernetes resource files" 26 | run "cat target/classes/META-INF/fabric8/openshift.yml" 27 | 28 | desc "let's deploy our service" 29 | run "mvn fabric8:build fabric8:deploy" 30 | run "oc deploy spring-boot-ipaddress-se --latest" 31 | run "oc get pod" 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /service-mesh/istio/echo/app/echo-app.yaml: -------------------------------------------------------------------------------- 1 | # Example service with an injected proxy 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: echo 6 | labels: 7 | app: echo 8 | spec: 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | name: http 13 | - port: 8080 14 | targetPort: 8080 15 | name: http-alternative 16 | selector: 17 | app: echo 18 | --- 19 | apiVersion: extensions/v1beta1 20 | kind: Deployment 21 | metadata: 22 | name: echo 23 | spec: 24 | replicas: 1 25 | template: 26 | metadata: 27 | labels: 28 | app: echo 29 | version: unversioned 30 | spec: 31 | containers: 32 | - name: app 33 | image: docker.io/istio/app:2017-03-17-22.11.25 34 | imagePullPolicy: Always 35 | args: 36 | - --port 37 | - "80" 38 | - --port 39 | - "8080" 40 | - --version 41 | - "unversioned" 42 | ports: 43 | - containerPort: 80 44 | - containerPort: 8080 45 | --- 46 | -------------------------------------------------------------------------------- /service-mesh/istio/echo/app/logic-app.yaml: -------------------------------------------------------------------------------- 1 | # Example service with an injected proxy 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: logic 6 | labels: 7 | app: logic 8 | spec: 9 | ports: 10 | - port: 80 11 | targetPort: 8080 12 | name: http 13 | - port: 8080 14 | targetPort: 80 15 | name: http-alternative 16 | selector: 17 | app: logic 18 | --- 19 | apiVersion: extensions/v1beta1 20 | kind: Deployment 21 | metadata: 22 | name: logic 23 | spec: 24 | replicas: 1 25 | template: 26 | metadata: 27 | labels: 28 | app: logic 29 | version: unversioned 30 | spec: 31 | containers: 32 | - name: app 33 | image: docker.io/istio/app:2017-03-17-22.11.25 34 | imagePullPolicy: Always 35 | args: 36 | - --port 37 | - "8080" 38 | - --port 39 | - "80" 40 | - --version 41 | - "unversioned" 42 | ports: 43 | - containerPort: 8080 44 | - containerPort: 80 45 | --- 46 | -------------------------------------------------------------------------------- /service-mesh/istio/echo/app/vanilla-app.yaml: -------------------------------------------------------------------------------- 1 | # Example service with an injected proxy 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: vanilla 6 | labels: 7 | app: vanilla 8 | spec: 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | name: http 13 | - port: 8080 14 | targetPort: 8080 15 | name: http-alternative 16 | selector: 17 | app: vanilla 18 | --- 19 | apiVersion: extensions/v1beta1 20 | kind: Deployment 21 | metadata: 22 | name: vanilla 23 | spec: 24 | replicas: 1 25 | template: 26 | metadata: 27 | labels: 28 | app: vanilla 29 | version: unversioned 30 | spec: 31 | containers: 32 | - name: app 33 | image: docker.io/istio/app:2017-03-22-17.30.06 34 | imagePullPolicy: Always 35 | args: 36 | - --port 37 | - "80" 38 | - --port 39 | - "8080" 40 | - --version 41 | - "unversioned" 42 | ports: 43 | - containerPort: 80 44 | - containerPort: 8080 45 | --- 46 | -------------------------------------------------------------------------------- /bluegreen/app-blue-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: app-blue-v1 5 | spec: 6 | ports: 7 | - port: 80 8 | protocol: TCP 9 | selector: 10 | demo: update 11 | demo-color: blue 12 | --- 13 | apiVersion: extensions/v1beta1 14 | kind: Deployment 15 | metadata: 16 | name: app-blue-v1 17 | spec: 18 | replicas: 5 19 | template: 20 | metadata: 21 | labels: 22 | demo: update 23 | demo-color: blue 24 | demo-version: v1 25 | spec: 26 | containers: 27 | - name: busybox 28 | image: busybox 29 | command: 30 | - sh 31 | - -c 32 | - while true; do echo "$(hostname) is blue v1" > /data/index.html; sleep 60; done 33 | volumeMounts: 34 | - name: content 35 | mountPath: /data 36 | - name: nginx 37 | image: nginx 38 | volumeMounts: 39 | - name: content 40 | mountPath: /usr/share/nginx/html 41 | readOnly: true 42 | volumes: 43 | - name: content 44 | -------------------------------------------------------------------------------- /bluegreen/app-green-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: app-green-v2 5 | spec: 6 | ports: 7 | - port: 80 8 | protocol: TCP 9 | selector: 10 | demo: update 11 | demo-color: green 12 | --- 13 | apiVersion: extensions/v1beta1 14 | kind: Deployment 15 | metadata: 16 | name: app-green-v2 17 | spec: 18 | replicas: 5 19 | template: 20 | metadata: 21 | labels: 22 | demo: update 23 | demo-color: green 24 | demo-version: v2 25 | spec: 26 | containers: 27 | - name: busybox 28 | image: busybox 29 | command: 30 | - sh 31 | - -c 32 | - while true; do echo "$(hostname) is green v2" > /data/index.html; sleep 60; done 33 | volumeMounts: 34 | - name: content 35 | mountPath: /data 36 | - name: nginx 37 | image: nginx 38 | volumeMounts: 39 | - name: content 40 | mountPath: /usr/share/nginx/html 41 | readOnly: true 42 | volumes: 43 | - name: content 44 | -------------------------------------------------------------------------------- /ticket-monster-msa/search/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/ticket-monster-search) 7 | 8 | oc delete dc $(oc get dc --namespace=demos | grep ticket-monster-search | awk '{print $1}') --namespace=demos 9 | oc delete svc tm-search --namespace=demos 10 | #oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 11 | #oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 12 | oc delete pod $(oc get pod --namespace=demos | grep ticket-monster-search |awk '{print $1}') 13 | oc delete is ticket-monster-search --namespace=demos 14 | oc delete bc ticket-monster-search-s2i --namespace=demos 15 | 16 | oc delete build $(oc get builds | grep -i complete | grep ticket-monster-search | awk '{print $1}') 17 | oc delete pod $(oc get pod | grep Completed | awk '{print $1}') 18 | 19 | echo "removing docker images that have 'example' in their name" 20 | docker rmi -f $(docker images | grep ticket-monster-search | awk '{print $3}') -------------------------------------------------------------------------------- /ticket-monster-msa/search/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/ticket-monster-search.git $(relative project/ticket-monster-search)" 7 | 8 | SOURCE_DIR=$PWD 9 | 10 | desc "We now have a project!" 11 | run "cd $(relative project/ticket-monster-search) && ls -l" 12 | 13 | desc "Let's build the project and run locally!" 14 | 15 | tmux split-window -v -d -c $SOURCE_DIR 16 | tmux send-keys -t bottom C-z './_search_1.sh' Enter 17 | 18 | run "mvn wildfly-swarm:run" 19 | 20 | tmux send-keys -t bottom C-c 21 | tmux send-keys -t bottom C-z 'exit' Enter 22 | 23 | run "mvn -Pf8,default fabric8:deploy" 24 | 25 | ## slight of hand.. bounce the UI pod because we've prob changed the tm-search service IP 26 | ## be deleting and restarting it. Typically the UI service would deploy its own tm-search with 27 | ## appropriate selectors, etc. but we'll hide it by bouncing it here: 28 | oc delete pod $(oc get pod | grep ticket-monster-ui | awk '{print $1}') > /dev/null 2>&1 29 | -------------------------------------------------------------------------------- /service-mesh/istio/echo/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../../util.sh 4 | 5 | desc "let's take a look at one of the pods" 6 | run "cat $(relative app/echo-app.yaml)" 7 | 8 | desc "now let's add the sidecar proxy" 9 | run "istioctl kube-inject -f $(relative app/echo-app.yaml)" 10 | 11 | desc "let's add an echo-server with the proxy enabled" 12 | run "kubectl apply -f <(istioctl kube-inject -f $(relative app/echo-app.yaml))" 13 | 14 | desc "let's add a client to call the echo server" 15 | run "kubectl apply -f <(istioctl kube-inject -f $(relative app/logic-app.yaml))" 16 | 17 | desc "show pods" 18 | run "kubectl get pod" 19 | 20 | ECHO_POD=$(kubectl get pod | grep ^echo | awk '{ print $1 }') 21 | desc "send requests from echo pod to logic pod" 22 | run "kubectl exec $ECHO_POD -c app /bin/client -- -url http://logic/demo-text-here --count 10" 23 | 24 | LOGIC_POD=$(kubectl get pod | grep ^logic | awk '{ print $1 }') 25 | desc "send requests from logic pod to echo pod" 26 | run "kubectl exec $LOGIC_POD -c app /bin/client -- -url http://echo/demo-text-there --count 10" -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/simple-hello-world) 7 | oc delete dc $(oc get dc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 8 | oc delete svc $(oc get svc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 9 | oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 10 | oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 11 | oc delete route $(oc get route --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 12 | oc delete pod $(oc get pod --namespace=demos | awk '{print $1}') 13 | oc delete is simple-hello-world --namespace=demos 14 | oc delete bc/simple-hello-world-s2i 15 | 16 | oc delete build $(oc get builds | grep -i complete | grep simple-hello-world | awk '{print $1}') 17 | 18 | echo "removing docker images that have 'example' in their name" 19 | docker rmi -f $(docker images | grep simple-hello-world | awk '{print $3}') -------------------------------------------------------------------------------- /ticket-monster-msa/ui/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/ticket-monster-ui.git $(relative project/ticket-monster-ui)" 7 | 8 | 9 | desc "We now have a project!" 10 | run "cd $(relative project/ticket-monster-ui/web) && ls -l" 11 | 12 | 13 | desc "Let's build the Docker image and deploy to OpenShift" 14 | read -s 15 | 16 | desc "Create a new build in OpenShift" 17 | run "oc new-build --binary=true --name ticket-monster-ui" 18 | run "oc start-build ticket-monster-ui --from-dir=." 19 | 20 | BUILD_ID=$(oc get build | grep ticket | grep Running | awk '{print $1}') 21 | run "oc logs -f build/$BUILD_ID" 22 | 23 | desc "let's deploy the UI now!" 24 | run "oc new-app ticket-monster-ui" 25 | 26 | desc "check status" 27 | run "oc status" 28 | run "oc get pod" 29 | 30 | desc "Create an openshift route" 31 | run "oc expose svc ticket-monster-ui --name=ticketmonster" 32 | run "oc get route" 33 | 34 | ROUTE_ID=$(oc get route | grep ticketmonster | awk '{print $2}') 35 | run "open http://$ROUTE_ID" -------------------------------------------------------------------------------- /ticket-monster-msa/admin/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/ticket-monster-admin) 7 | 8 | oc delete dc ticket-monster-admin --namespace=demos 9 | oc delete dc mysqladmin --namespace=demos 10 | oc delete svc tm-admin --namespace=demos 11 | oc delete svc mysqladmin --namespace=demos 12 | #oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 13 | #oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 14 | oc delete pod $(oc get pod --namespace=demos | grep ticket-monster-admin |awk '{print $1}') 15 | oc delete is ticket-monster-admin --namespace=demos 16 | oc delete bc ticket-monster-admin-s2i --namespace=demos 17 | oc delete build $(oc get builds | grep -i complete | grep ticket-monster-admin | awk '{print $1}') 18 | oc delete pod $(oc get pod | grep Completed | awk '{print $1}') 19 | 20 | oc delete template ticket-monster-mysql --namespace=demos 21 | 22 | echo "removing docker images that have 'example' in their name" 23 | docker rmi -f $(docker images | grep ticket-monster-admin | awk '{print $3}') -------------------------------------------------------------------------------- /ticket-monster-msa/orders/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/ticket-monster-orders) 7 | 8 | oc delete dc ticket-monster-orders --namespace=demos 9 | oc delete dc mysqlorders --namespace=demos 10 | oc delete svc tm-orders --namespace=demos 11 | oc delete svc mysqlorders --namespace=demos 12 | #oc delete rc $(oc get rc --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 13 | #oc delete configmap $(oc get configmap --namespace=demos | grep ^[a-z] | awk '{print $1}') --namespace=demos 14 | oc delete pod $(oc get pod --namespace=demos | grep ticket-monster-orders |awk '{print $1}') 15 | oc delete is ticket-monster-orders --namespace=demos 16 | oc delete bc ticket-monster-orders-s2i --namespace=demos 17 | oc delete build $(oc get builds | grep -i complete | grep ticket-monster-orders | awk '{print $1}') 18 | oc delete pod $(oc get pod | grep Completed | awk '{print $1}') 19 | 20 | oc delete template ticket-monster-mysql --namespace=demos 21 | 22 | echo "removing docker images that have 'example' in their name" 23 | docker rmi -f $(docker images | grep ticket-monster-orders | awk '{print $3}') -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/setup-kubeflix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # we should already have this 4 | #oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:demos:exposecontroller 5 | #oc apply -f http://central.maven.org/maven2/io/fabric8/devops/apps/exposecontroller/2.2.327/exposecontroller-2.2.327-openshift.yml 6 | #oc get cm/exposecontroller -o yaml | sed s/Route/NodePort/g | oc apply -f - 7 | 8 | # Turbine 9 | oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:demos:turbine 10 | oc apply -f http://central.maven.org/maven2/io/fabric8/kubeflix/turbine-server/1.0.28/turbine-server-1.0.28-openshift.yml 11 | 12 | # Hystrix 13 | oc apply -f http://central.maven.org/maven2/io/fabric8/kubeflix/hystrix-dashboard/1.0.28/hystrix-dashboard-1.0.28-openshift.yml 14 | 15 | # sleep for a sec to let the exposecontroller do it's thing 16 | sleep 15 17 | 18 | TURBINE_URL=$(oc get svc turbine-server -o yaml | grep exposeUrl | tr -s ' ' | cut -d ' ' -f3) 19 | HYSTRIX_DASHBOARD_URL=$(oc get svc hystrix-dashboard -o yaml | grep exposeUrl | tr -s ' ' | cut -d ' ' -f3) 20 | 21 | echo "turbine url: $TURBINE_URL" 22 | echo "hystrix-dashboard url: $HYSTRIX_DASHBOARD_URL" -------------------------------------------------------------------------------- /camel/simple-microservices/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | rm -fr $(relative project/simple-microservices) 7 | 8 | oc delete dc helloswarm --namespace=demos 9 | oc delete dc client --namespace=demos 10 | oc delete svc helloswarm --namespace=demos 11 | oc delete svc svc --namespace=demos 12 | oc delete pod $(oc get pod --namespace=demos | grep helloswarm |awk '{print $1}') 13 | oc delete pod $(oc get pod --namespace=demos | grep client |awk '{print $1}') 14 | oc delete is helloswarm --namespace=demos 15 | oc delete is client --namespace=demos 16 | oc delete bc helloswarm-s2i --namespace=demos 17 | oc delete bc client-s2i --namespace=demos 18 | oc delete bc client-hystrix-s2i --namespace=demos 19 | oc delete rc $(oc get rc | grep -i client-hystrix | awk '{print $1}') --namespace=demos 20 | oc delete build $(oc get builds | grep -i complete | grep helloswarm | awk '{print $1}') 21 | oc delete build $(oc get builds | grep -i complete | grep client | awk '{print $1}') 22 | oc delete pod $(oc get pod | grep Completed | awk '{print $1}') 23 | 24 | echo "removing docker images that have 'example' in their name" 25 | docker rmi -f $(docker images | grep helloswarm | awk '{print $3}') 26 | docker rmi -f $(docker images | grep client | awk '{print $3}') -------------------------------------------------------------------------------- /deployment/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | SOURCE_DIR=$PWD 6 | 7 | 8 | desc "Create a service that fronts any version of this demo" 9 | run "cat $(relative svc.yaml)" 10 | run "kubectl --namespace=demos apply -f $(relative svc.yaml)" 11 | 12 | desc "Deploy v1 of our app" 13 | run "cat $(relative deployment.yaml)" 14 | run "kubectl --namespace=demos apply -f $(relative deployment.yaml)" 15 | 16 | desc "Check out our deployment" 17 | run "kubectl get deployment" 18 | run "kubectl get pods" 19 | run "kubectl get svc" 20 | 21 | 22 | tmux split-window -v -d -c $SOURCE_DIR 23 | tmux send-keys -t bottom C-z './_deploy_1.sh' Enter 24 | 25 | desc "Ready to do a deployment?" 26 | read -s 27 | 28 | desc "Update the deployment" 29 | run "cat $(relative deployment.yaml) | sed 's/ v1/ v2/g' | kubectl --namespace=demos apply -f-" 30 | 31 | desc "Deployment history" 32 | run "kubectl --namespace=demos rollout history deployment deployment-demo" 33 | 34 | desc "Rollback the deployment" 35 | run "kubectl --namespace=demos rollout undo deployment deployment-demo" 36 | 37 | tmux send-keys -t bottom C-c 38 | tmux send-keys -t bottom C-z 'exit' Enter 39 | 40 | desc "clean up" 41 | run "kubectl delete deployment/deployment-demo" 42 | run "kubectl delete svc/deployment-demo" -------------------------------------------------------------------------------- /util.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | readonly reset=$(tput sgr0) 4 | readonly green=$(tput bold; tput setaf 2) 5 | readonly yellow=$(tput bold; tput setaf 3) 6 | readonly blue=$(tput bold; tput setaf 6) 7 | 8 | function desc() { 9 | maybe_first_prompt 10 | echo "$blue# $@$reset" 11 | prompt 12 | } 13 | 14 | function prompt() { 15 | echo -n "$yellow\$ $reset" 16 | } 17 | 18 | started="" 19 | function maybe_first_prompt() { 20 | if [ -z "$started" ]; then 21 | prompt 22 | started=true 23 | fi 24 | } 25 | 26 | function backtotop() { 27 | 28 | clear 29 | } 30 | 31 | function run() { 32 | maybe_first_prompt 33 | rate=25 34 | if [ -n "$DEMO_RUN_FAST" ]; then 35 | rate=1000 36 | fi 37 | echo "$green$1$reset" | pv -qL $rate 38 | if [ -n "$DEMO_RUN_FAST" ]; then 39 | sleep 0.5 40 | fi 41 | eval "$1" 42 | r=$? 43 | read -d '' -t 1 -n 10000 # clear stdin 44 | prompt 45 | if [ -z "$DEMO_AUTO_RUN" ]; then 46 | read -s 47 | fi 48 | return $r 49 | } 50 | 51 | function relative() { 52 | for arg; do 53 | echo "$(realpath $(dirname $(which $0)))/$arg" | sed "s|$(realpath $(pwd))|.|" 54 | done 55 | } 56 | 57 | SSH_NODE=$(kubectl get nodes | tail -1 | cut -f1 -d' ') 58 | 59 | trap "echo" EXIT 60 | -------------------------------------------------------------------------------- /quota/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "There is no quota" 6 | run "kubectl --namespace=demos get quota" 7 | 8 | desc "Install quota" 9 | run "cat $(relative quota.yaml)" 10 | run "kubectl --namespace=demos create -f $(relative quota.yaml)" 11 | run "kubectl --namespace=demos describe quota demo-quota" 12 | 13 | desc "Create a large pod - should fail" 14 | run "cat $(relative pod1.yaml)" 15 | run "kubectl --namespace=demos create -f $(relative pod1.yaml)" 16 | run "kubectl --namespace=demos describe quota demo-quota" 17 | 18 | desc "Create a pod with no limits - should fail" 19 | run "cat $(relative pod2.yaml)" 20 | run "kubectl --namespace=demos create -f $(relative pod2.yaml)" 21 | run "kubectl --namespace=demos describe quota demo-quota" 22 | 23 | desc "There are no default limits" 24 | run "kubectl --namespace=demos get limits" 25 | 26 | desc "Set default limits" 27 | run "cat $(relative limits.yaml)" 28 | run "kubectl --namespace=demos create -f $(relative limits.yaml)" 29 | run "kubectl --namespace=demos describe limits demo-limits" 30 | 31 | desc "Create a pod with no limits - should succeed now" 32 | run "cat $(relative pod2.yaml)" 33 | run "kubectl --namespace=demos create -f $(relative pod2.yaml)" 34 | run "kubectl --namespace=demos describe quota demo-quota" 35 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/install.sh: -------------------------------------------------------------------------------- 1 | 2 | # Note, need minikube to use 3.7 of openshift 3 | # minishift start --memory=4096 --disk-size=30g --openshift-version=v3.7.0-alpha.1 4 | oc new-project istio-system 5 | oc project istio-system 6 | oc adm policy add-scc-to-user anyuid -z istio-ingress-service-account 7 | oc adm policy add-scc-to-user privileged -z istio-ingress-service-account 8 | oc adm policy add-scc-to-user anyuid -z istio-egress-service-account 9 | oc adm policy add-scc-to-user privileged -z istio-egress-service-account 10 | oc adm policy add-scc-to-user anyuid -z istio-pilot-service-account 11 | oc adm policy add-scc-to-user privileged -z istio-pilot-service-account 12 | oc adm policy add-scc-to-user anyuid -z default 13 | oc adm policy add-scc-to-user privileged -z default 14 | oc adm policy add-cluster-role-to-user cluster-admin -z default 15 | 16 | curl -L https://git.io/getLatestIstio | sh - 17 | ISTIO=`ls | grep istio` 18 | export PATH="$PATH:~/$ISTIO/bin" 19 | cd $ISTIO 20 | oc apply -f install/kubernetes/istio.yaml 21 | 22 | oc create -f install/kubernetes/addons/prometheus.yaml 23 | oc create -f install/kubernetes/addons/grafana.yaml 24 | oc create -f install/kubernetes/addons/servicegraph.yaml 25 | oc create -f install/kubernetes/addons/zipkin.yaml 26 | oc expose svc grafana 27 | oc expose svc servicegraph 28 | oc expose svc zipkin -------------------------------------------------------------------------------- /ticket-monster-msa/orders/temp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | #desc "Let's build the project and run locally!" 7 | # 8 | #tmux split-window -v -d -c $PWD 9 | #tmux send-keys -t bottom C-z 'while true; do echo foo; sleep 5; done' Enter 10 | # 11 | #run "for i in 'seq 1 3'; do echo bar; done" 12 | #tmux send-keys -t bottom C-c 13 | #tmux send-keys -t bottom C-z 'exit' Enter 14 | # 15 | # 16 | #desc " continue ... " 17 | #read -s 18 | 19 | 20 | ##################### 21 | #SOURCE_DIR=$PWD 22 | #echo "$SOURCE_DIR/../infra/project/ticket-monster-infra" 23 | # 24 | #TEMPLATE_EXISTS=$(oc get template | grep ticket-monster-mysql) 25 | #if [[ ! $TEMPLATE_EXISTS ]]; then 26 | # 27 | # if [ ! -d $SOURCE_DIR/../infra/project/ticket-monster-infra ]; then 28 | # git clone https://github.com/christian-posta/ticket-monster-infra ../../../infra/project/ticket-monster-infra 29 | # fi 30 | # desc "Deploy mysqlorders" 31 | # run "oc create -f ../../../infra/project/ticket-monster-infra/mysql-openshift-template.yml" 32 | #fi 33 | ###################### 34 | 35 | 36 | #desc "show tables" 37 | #run "$(relative mysql) -e 'show tables;'" 38 | 39 | ###################### 40 | 41 | SOURCE_DIR=$PWD 42 | tmux split-window -v -d -c $SOURCE_DIR 43 | tmux send-keys -t bottom C-z './_port-forward-mysql.sh' Enter 44 | 45 | 46 | -------------------------------------------------------------------------------- /ticket-monster-monolith/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | GITHASH=master 6 | 7 | if [ ! -d $(relative project/monolith) ]; then 8 | echo "clone project..." 9 | git clone https://github.com/ticket-monster-msa/monolith $(relative project/monolith) 10 | pushd project/monolith 11 | git checkout $GITHASH 12 | popd 13 | fi 14 | 15 | 16 | 17 | if [[ "$(oc get dc/mysql 2>&1)" == *"not found"* ]]; then 18 | echo "installing mysql database" 19 | oc process mysql-persistent -n openshift MYSQL_DATABASE=ticketmonster MYSQL_USER=ticket MYSQL_PASSWORD=monster MYSQL_ROOT_PASSWORD=admin | oc create -f - 20 | else 21 | echo "skipping mysql installation" 22 | fi 23 | 24 | 25 | if [[ "$(oc get dc/ticketmonster 2>&1)" == *"not found"* ]]; then 26 | echo "building ticketmonster for mysql" 27 | pushd $(relative project/monolith) 28 | mvn clean install -Pmysql-openshift 29 | cd target/openshift 30 | oc new-build --binary=true --strategy=source --image-stream=wildfly:10.0 --name=ticketmonster-monolith --env MYSQL_DATABASE=ticketmonster --env MYSQL_USER=ticket --env MYSQL_PASSWORD=monster 31 | oc start-build ticketmonster-monolith --from-dir=. --follow=true 32 | oc new-app --name=ticketmonster --image-stream=ticketmonster-monolith 33 | popd 34 | else 35 | echo "skipping ticketmonster installation" 36 | fi 37 | 38 | 39 | -------------------------------------------------------------------------------- /camel/api-router/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/api-router.git $(relative project/api-router)" 7 | 8 | SOURCE_DIR=$PWD 9 | 10 | desc "We now have a project!" 11 | run "cd $(relative project/api-router) && ls -l" 12 | 13 | desc "Let's build the project and run locally!" 14 | 15 | 16 | 17 | run "mvn clean install" 18 | 19 | tmux split-window -v -d -c $SOURCE_DIR 20 | tmux send-keys -t bottom C-z './_catalog_1.sh' Enter 21 | 22 | desc "Start the service" 23 | read -s 24 | run "mvn exec:java" 25 | 26 | tmux send-keys -t bottom C-c 27 | tmux send-keys -t bottom C-z 'exit' Enter 28 | 29 | 30 | # install FIS image streams 31 | curl -s -L https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json | oc create -f - > /dev/null 2>&1 32 | 33 | desc "Create a quickstart template" 34 | run "oc create -f quickstart-template.json" 35 | 36 | desc "Create our app" 37 | run "oc process api-router-app -v GIT_REPO=https://github.com/christian-posta/api-router,IMAGE_STREAM_NAMESPACE=demos | oc create -f -" 38 | 39 | tmux split-window -v -d -c $SOURCE_DIR 40 | tmux send-keys -t bottom C-z "oc logs -f build/$(oc get build | grep -i running | grep api-router | awk '{print $1}')" Enter 41 | 42 | desc "See what we've created" 43 | run "oc get pod" 44 | 45 | tmux send-keys -t bottom C-c 46 | tmux send-keys -t bottom C-z 'exit' Enter 47 | 48 | run "oc get svc" 49 | run "oc expose svc api-router" -------------------------------------------------------------------------------- /service-mesh/istio/echo/app/mixer-config-quota-echo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: mixer-config 6 | data: 7 | globalconfig.yml: |- 8 | subject: "namespace:ns" 9 | revision: "2022" 10 | adapters: 11 | - name: default 12 | kind: quotas 13 | impl: memQuota 14 | params: 15 | - name: default 16 | impl: stdioLogger 17 | params: 18 | logStream: 0 # STDERR 19 | - name: prometheus 20 | kind: metrics 21 | impl: prometheus 22 | params: 23 | - name: default 24 | impl: denyChecker 25 | serviceconfig.yml: |- 26 | subject: namespace:ns 27 | revision: "2022" 28 | rules: 29 | #- selector: service.name == “*” 30 | #- selector: service.name == "myservice" 31 | - selector: true 32 | aspects: 33 | - kind: quotas 34 | params: 35 | - kind: metrics 36 | adapter: prometheus 37 | params: 38 | metrics: 39 | - descriptor_name: request_count 40 | # we want to increment this counter by 1 for each unique (source, target, service, method, response_code) tuple 41 | value: "1" 42 | labels: 43 | source: source.service | "unknown" 44 | target: target.service | "unknown" 45 | service: api.name | "unknown" 46 | method: api.method | "unknown" 47 | response_code: response.http.code | 200 48 | - descriptor_name: request_latency 49 | value: response.latency | "0ms" 50 | labels: 51 | source: source.service | "unknown" 52 | target: target.service | "unknown" 53 | service: api.name | "unknown" 54 | method: api.method | "unknown" 55 | response_code: response.http.code | 200 56 | --- 57 | -------------------------------------------------------------------------------- /bluegreen/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | SOURCE_DIR=$PWD 6 | 7 | desc "Create the blue/v1 service" 8 | run "cat $(relative app-blue-v1.yaml)" 9 | run "kubectl --namespace=demos create -f $(relative app-blue-v1.yaml)" 10 | run "kubectl --namespace=demos get pod" 11 | run "kubectl --namespace=demos get svc" 12 | 13 | desc "expose our service to the outside world" 14 | run "oc expose svc/app-blue-v1 --name bluegreen" 15 | run "oc get route" 16 | 17 | 18 | tmux split-window -v -d -c $SOURCE_DIR 19 | tmux send-keys -t bottom C-z './_bg_1.sh' Enter 20 | 21 | desc "run some load.." 22 | read -s 23 | 24 | desc "Run green/v2 of our app" 25 | run "cat $(relative app-green-v2.yaml)" 26 | run "kubectl --namespace=demos create -f $(relative app-green-v2.yaml)" 27 | run "kubectl --namespace=demos get pod" 28 | run "kubectl --namespace=demos get svc" 29 | 30 | tmux split-window -h -d -c $SOURCE_DIR 31 | tmux send-keys -t right C-z './_bg_2.sh' Enter 32 | 33 | desc "Everything looks good! Let's upgrade!" 34 | read -s 35 | 36 | desc "Patch the route to switch from blue to green" 37 | run "oc patch route/bluegreen -p '{\"spec\": {\"to\": {\"name\": \"app-green-v2\" }}}'" 38 | 39 | tmux send-keys -t right C-c 40 | tmux send-keys -t right C-z 'exit' Enter 41 | 42 | desc "Does everything look okay? Oh, no? fail back!" 43 | read -s 44 | run "oc patch route/bluegreen -p '{\"spec\": {\"to\": {\"name\": \"app-blue-v1\" }}}'" 45 | 46 | desc "BONUS!!!!!! what about A/B weighted routing!?" 47 | read -s 48 | 49 | desc "Maybe we just want to treat this as A/B test?" 50 | desc "Note... Blue/Green is NOT the same as A/B, but we'll illustrated weighted routing here:" 51 | read -s 52 | 53 | run "oc patch route/bluegreen -p '{\"spec\": { \"alternateBackends\" : [{ \"kind\": \"Service\", \"name\": \"app-green-v2\", \"weight\": 25}], \"to\": {\"weight\": 75}}}'" 54 | 55 | tmux send-keys -t bottom C-c 56 | tmux send-keys -t bottom C-z 'exit' Enter 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /service-mesh/istio/setup/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../../util.sh 4 | 5 | VERSION="0.1.6" 6 | ADDONS=$(relative binaries/istio-$VERSION/install/kubernetes/addons) 7 | INSTALL=$(relative binaries/istio-$VERSION/install/kubernetes/istio.yaml) 8 | 9 | if [ "$1" == "--upstream" ]; then 10 | echo "installing from upstream..." 11 | if [ -d "$(relative project/istio)" ]; then 12 | pushd $(relative project/istio) 13 | git reset --hard 14 | git pull 15 | popd 16 | else 17 | git clone https://github.com/istio/istio.git $(relative project/istio) 18 | fi 19 | 20 | ADDONS=$(relative project/istio/install/kubernetes/addons) 21 | INSTALL=$(relative project/istio/install/kubernetes/istio.yaml) 22 | VERSION="master" 23 | 24 | #Download upstream client 25 | source $(relative project/istio/istio.VERSION) 26 | curl -s ${ISTIOCTL_URL}/istioctl-osx > $(relative project/bin/istioctl) 27 | chmod +x $(relative project/bin/istioctl) 28 | else 29 | if [ ! -d "$(relative binaries/istio-$VERSION)" ]; then 30 | rm -fr $(relative binaries/istio-*) 31 | pushd $(relative binaries) 32 | curl -L -O "https://github.com/istio/istio/releases/download/$VERSION/istio-$VERSION-osx.tar.gz" 33 | tar -xzf "istio-$VERSION-osx.tar.gz" 34 | popd 35 | fi 36 | fi 37 | 38 | echo "Using version: $VERSION" 39 | echo "Using $INSTALL for the installation" 40 | echo "Using $ADDONS for the addons" 41 | 42 | echo "Press to continue..." 43 | read -s 44 | 45 | 46 | echo "Let's install the istio addons" 47 | kubectl create -f $ADDONS 48 | 49 | echo "Let's install the istio ingress controller, mixer, and manager" 50 | kubectl create -f $INSTALL 51 | 52 | 53 | ##### 54 | # Install notes 55 | # download 0.1.6 56 | # https://github.com/istio/istio/releases/download/0.1.6/istio-0.1.6-osx.tar.gz 57 | # unpack the tar file 58 | # now the istio-0.1.6 has a folder structure with addons, install, and bin -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/impl.java: -------------------------------------------------------------------------------- 1 | package com.example; 2 | 3 | import com.netflix.hystrix.contrib.javanica.annotation.HystrixCommand; 4 | import org.springframework.boot.SpringApplication; 5 | import org.springframework.boot.autoconfigure.SpringBootApplication; 6 | import org.springframework.cloud.client.circuitbreaker.EnableCircuitBreaker; 7 | import org.springframework.web.bind.annotation.PathVariable; 8 | import org.springframework.web.bind.annotation.RequestMapping; 9 | import org.springframework.web.bind.annotation.RequestMethod; 10 | import org.springframework.web.bind.annotation.RestController; 11 | import org.springframework.web.client.RestTemplate; 12 | 13 | import java.net.InetAddress; 14 | import java.util.HashMap; 15 | import java.util.Map; 16 | 17 | @SpringBootApplication 18 | @EnableCircuitBreaker 19 | public class HystrixHelloWorldApplication { 20 | 21 | public static void main(String[] args) { 22 | SpringApplication.run(HystrixHelloWorldApplication.class, args); 23 | } 24 | } 25 | 26 | 27 | @RestController() 28 | @RequestMapping("/api") 29 | class HelloController { 30 | 31 | 32 | @HystrixCommand(fallbackMethod = "fallback") 33 | @RequestMapping(value = "/ip/{name}", method = RequestMethod.GET) 34 | public Map hello(@PathVariable String name) throws Exception { 35 | RestTemplate template = new RestTemplate(); 36 | HashMap response = new HashMap<>(); 37 | 38 | String url = "http://simple-hello-world/api/hello/" + name; 39 | HashMap hello = template.getForEntity(url, HashMap.class).getBody(); 40 | 41 | response.put("hello", hello); 42 | response.put("ip", InetAddress.getLocalHost().getHostAddress()); 43 | 44 | return response; 45 | } 46 | 47 | public Map fallback(@PathVariable String name) throws Exception { 48 | HashMap response = new HashMap<>(); 49 | response.put("fallback", "could not reach service"); 50 | response.put("name", name); 51 | return response; 52 | 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /replication_controllers/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | desc "Run some pods under a replication controller" 6 | run "cat $(relative rc.yaml)" 7 | run "kubectl --namespace=demos create -f $(relative rc.yaml)" 8 | 9 | desc "Look what I made!" 10 | run "kubectl --namespace=demos describe rc hostnames" 11 | 12 | desc "These are the pods that were created" 13 | run "kubectl --namespace=demos get pods -l run=hostnames" 14 | 15 | trap "" SIGINT 16 | IPS=($(kubectl --namespace=demos get pods -l run=hostnames \ 17 | -o go-template='{{range .items}}{{.status.podIP}}{{"\n"}}{{end}}')) 18 | desc "SSH into my cluster and access the pods" 19 | run "kubectl --namespace=demos get pods -l run=hostnames \\ 20 | -o go-template='{{range .items}}{{.status.podIP}}{{\"\\n\"}}{{end}}'" 21 | run "minishift ssh -- '\\ 22 | for IP in ${IPS[*]}; do \\ 23 | curl --connect-timeout 1 -s \$IP:9376 && echo; \\ 24 | done \\ 25 | '" 26 | 27 | desc "Kill a pod" 28 | VICTIM=$(kubectl --namespace=demos get pods -o name -l run=hostnames | tail -1) 29 | run "kubectl --namespace=demos delete $VICTIM" 30 | run "kubectl --namespace=demos get pods -l run=hostnames" 31 | 32 | desc "Let's cleanup and delete that deployment" 33 | run "kubectl --namespace=demos delete rc hostnames" 34 | 35 | #Leave out kill a node for now as we're doing locally 36 | #desc "Kill a node" 37 | #NODE=$(kubectl --namespace=demos get pods -l run=hostnames -o wide \ 38 | # | tail -1 \ 39 | # | awk '{print $NF}') 40 | #run "kubectl --namespace=demos get pods -l run=hostnames -o wide" 41 | #run "gcloud compute ssh --zone=us-central1-b $NODE --command '\\ 42 | # sudo shutdown -r now; \\ 43 | # '" 44 | #while true; do 45 | # run "kubectl --namespace=demos get node $NODE" 46 | # status=$(kubectl --namespace=demos get node $NODE | tail -1 | awk '{print $3}') 47 | # if [ "$status" == "NotReady" ]; then 48 | # break 49 | # fi 50 | #done 51 | #run "kubectl --namespace=demos get pods -l run=hostnames -o wide" 52 | #run "kubectl --namespace=demos describe rc hostnames" 53 | -------------------------------------------------------------------------------- /services/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../util.sh 4 | 5 | SOURCE_DIR=$PWD 6 | 7 | 8 | if kubectl --namespace=demos get deploy deployment-demo >/dev/null 2>&1; then 9 | desc "Revisit our deployment" 10 | run "kubectl --namespace=demos get deploy deployment-demo" 11 | else 12 | desc "Let's return to deployment ..." 13 | run "kubectl --namespace=demos create -f $(relative ../deployment/deployment.yaml)" 14 | run "kubectl --namespace=demos get pods -l demo=deployment" 15 | 16 | fi 17 | 18 | run "kubectl --namespace=demos get pods -l demo=deployment \\ 19 | -o go-template='{{range .items}}{{.status.podIP}}{{\"\\n\"}}{{end}}'" 20 | 21 | desc "Expose the deployment as a service" 22 | run "kubectl --namespace=demos expose deploy deployment-demo \\ 23 | --port=80" 24 | 25 | desc "Have a look at the service" 26 | run "kubectl --namespace=demos describe svc deployment-demo" 27 | 28 | IP=$(kubectl --namespace=demos get svc deployment-demo \ 29 | -o go-template='{{.spec.clusterIP}}') 30 | desc "See what happens when you access the service's IP" 31 | run "minishift ssh -- '\\ 32 | for i in \$(seq 1 10); do \\ 33 | curl --connect-timeout 1 -s $IP && echo; \\ 34 | done \\ 35 | '" 36 | run "minishift ssh -- '\\ 37 | for i in \$(seq 1 500); do \\ 38 | curl --connect-timeout 1 -s $IP && echo; \\ 39 | done | sort | uniq -c; \\ 40 | '" 41 | 42 | desc "Let's do some scaling" 43 | 44 | tmux split-window -v -d -c $SOURCE_DIR 45 | tmux send-keys -t bottom C-z './_scale_1.sh' Enter 46 | 47 | desc "Resize the RC and watch the service backends change" 48 | run "kubectl --namespace=demos scale deploy deployment-demo --replicas=1" 49 | run "kubectl --namespace=demos scale deploy deployment-demo --replicas=2" 50 | run "kubectl --namespace=demos scale deploy deployment-demo --replicas=5" 51 | 52 | tmux send-keys -t bottom C-c 53 | tmux send-keys -t bottom C-z 'exit' Enter 54 | 55 | desc "Let's cleanup and delete that deployment" 56 | run "kubectl --namespace=demos delete deploy deployment-demo" 57 | run "kubectl --namespace=demos delete svc deployment-demo" 58 | -------------------------------------------------------------------------------- /ticket-monster-msa/admin/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/ticket-monster-admin.git $(relative project/ticket-monster-admin)" 7 | 8 | SOURCE_DIR=$PWD 9 | 10 | desc "We now have a project!" 11 | run "cd $(relative project/ticket-monster-admin) && ls -l" 12 | 13 | desc "Let's build the project and run locally!" 14 | 15 | tmux split-window -v -d -c $SOURCE_DIR 16 | tmux send-keys -t bottom C-z './_admin_1.sh' Enter 17 | 18 | run "mvn wildfly-swarm:run" 19 | tmux send-keys -t bottom C-c 20 | tmux send-keys -t bottom C-z 'exit' Enter 21 | 22 | 23 | desc "let's use mysql instead of hsqldb" 24 | read -s 25 | 26 | 27 | TEMPLATE_EXISTS=$(oc get template | grep ticket-monster-mysql) 28 | if [[ ! $TEMPLATE_EXISTS ]]; then 29 | 30 | if [ ! -d $SOURCE_DIR/../infra/project/ticket-monster-infra ]; then 31 | git clone https://github.com/christian-posta/ticket-monster-infra ../../../infra/project/ticket-monster-infra 32 | fi 33 | desc "Deploy mysqladmin" 34 | run "oc create -f ../../../infra/project/ticket-monster-infra/mysql-openshift-template.yml" 35 | fi 36 | 37 | tmux split-window -v -d 38 | tmux send-keys -t bottom C-z 'oc get pod --watch' Enter 39 | 40 | run "oc process ticket-monster-mysql -v DATABASE_SERVICE_NAME=mysqladmin | oc create -f -" 41 | run "oc deploy mysqladmin --latest" 42 | run "oc logs dc/mysqladmin" 43 | 44 | 45 | desc "Now let's use mysql and deploy to kubernetes" 46 | run "mvn clean -Pf8,mysql fabric8:deploy" 47 | 48 | tmux send-keys -t bottom C-c 49 | tmux send-keys -t bottom C-z 'exit' Enter 50 | 51 | desc "show tables" 52 | run "$SOURCE_DIR/mysql -e 'show tables;'" 53 | 54 | 55 | ## slight of hand.. bounce the UI pod because we've prob changed the tm-search service IP 56 | ## be deleting and restarting it. Typically the UI service would deploy its own tm-search with 57 | ## appropriate selectors, etc. but we'll hide it by bouncing it here: 58 | oc delete pod $(oc get pod | grep ticket-monster-ui | awk '{print $1}') > /dev/null 2>&1 59 | 60 | desc "Go make sure UI works" 61 | read -s 62 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | # we want to be able to interact with the services 6 | oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:demos:exposecontroller > /dev/null 2>&1 7 | oc apply -f http://central.maven.org/maven2/io/fabric8/devops/apps/exposecontroller/2.2.327/exposecontroller-2.2.327-openshift.yml > /dev/null 2>&1 8 | oc get cm/exposecontroller -o yaml | sed s/Route/NodePort/g | oc apply -f - > /dev/null 2>&1 9 | 10 | desc "Getting a project from start.spring.io" 11 | desc "spring init --name simple-hello-world --boot-version 1.3.7.RELEASE --groupId=com.example --artifactId=simple-hello-world --dependencies=web,actuator --build=maven " 12 | read -s 13 | run "spring init --name simple-hello-world --boot-version 1.3.7.RELEASE --groupId=com.example --artifactId=simple-hello-world --dependencies=web,actuator --build=maven --extract $(relative project/simple-hello-world)" 14 | 15 | desc "We now have a project!" 16 | 17 | backtotop 18 | 19 | run "cd $(relative project/simple-hello-world)" 20 | run "ls -l " 21 | 22 | 23 | desc "Let's add some functionality" 24 | run "../../_impl-svc.sh" 25 | desc "Open the project in your IDE if you'd like" 26 | 27 | backtotop 28 | 29 | desc "Build and run the project; query the endpoint in a different screen: curl http://localhost:8080/api/hello/ceposta" 30 | read -s 31 | 32 | tmux split-window -v 33 | tmux select-layout even-vertical 34 | tmux select-pane -t 0 35 | tmux send-keys -t 1 "clear" C-m 36 | tmux send-keys -t 1 "curl -s http://localhost:8080/api/hello/ceposta" 37 | 38 | run "mvn spring-boot:run" 39 | 40 | backtotop 41 | desc "Let's add the fabric8 magic!" 42 | read -s 43 | desc "mvn io.fabric8:fabric8-maven-plugin:LATEST:setup" 44 | read -s 45 | run "mvn io.fabric8:fabric8-maven-plugin:3.2.28:setup" 46 | run "tail -n 30 pom.xml" 47 | 48 | 49 | backtotop 50 | desc "Now that we have our cloud app server up let's build our project" 51 | run "mvn clean install" 52 | run "cat target/classes/META-INF/fabric8/kubernetes.yml" 53 | run "docker images | head -n 10" 54 | 55 | backtotop 56 | desc "Let's deploy our app!" 57 | run "mvn fabric8:run" 58 | -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/_impl-svc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ ! -d .git ]; then 4 | git init 5 | cp ~/dev/.gitignore . 6 | git add . 7 | git commit -m 'initial commit' 8 | else 9 | cp ~/dev/.gitignore . 10 | git add . 11 | fi 12 | 13 | 14 | echo 'spring.application.name=hystrix-hello-world' > src/main/resources/application.properties 15 | echo '#server.port=8081' >> src/main/resources/application.properties 16 | 17 | rm -fr src/test/ 18 | 19 | mkdir -p src/main/fabric8 20 | 21 | 22 | awk '//{x++} x==1{sub(//,"&\n Brixton.SR4 \ 23 | 0.0.15")}1' pom.xml > tmp && mv tmp pom.xml 24 | 25 | 26 | awk '//{x++} x==1{sub(//,"&\n \ 27 | \ 28 | org.springframework.cloud\ 29 | spring-cloud-context\ 30 | \ 31 | \ 32 | io.fabric8\ 33 | spring-cloud-starter-kubernetes\ 34 | \${spring-cloud-kubernetes.version}\ 35 | \ 36 | ")}1' pom.xml > tmp && mv tmp pom.xml 37 | 38 | 39 | cat <> src/main/java/com/example/HystrixHelloWorldApplication.java 40 | 41 | 42 | @RestController() 43 | @RequestMapping("/api") 44 | class HelloController { 45 | 46 | 47 | @RequestMapping(value = "/ip/{name}", method = RequestMethod.GET) 48 | public Map hello(@PathVariable String name) throws Exception { 49 | RestTemplate template = new RestTemplate(); 50 | HashMap response = new HashMap<>(); 51 | 52 | String url = "http://localhost:8080/api/hello/" + name; 53 | HashMap hello = template.getForEntity(url, HashMap.class).getBody(); 54 | 55 | response.put("hello", hello); 56 | response.put("ip", InetAddress.getLocalHost().getHostAddress()); 57 | 58 | return response; 59 | } 60 | } 61 | 62 | 63 | EOF 64 | 65 | awk '/org.springframework.boot.autoconfigure.SpringBootApplication;/{x++} x==1{sub(/org.springframework.boot.autoconfigure.SpringBootApplication;/,"&\nimport org.springframework.web.bind.annotation.PathVariable;\ 66 | import org.springframework.web.bind.annotation.RequestMapping;\ 67 | import org.springframework.web.bind.annotation.RequestMethod;\ 68 | import org.springframework.web.bind.annotation.RestController;\ 69 | import org.springframework.web.client.RestTemplate;\ 70 | \ 71 | import java.net.InetAddress;\ 72 | import java.util.HashMap;\ 73 | import java.util.Map;")}1' src/main/java/com/example/HystrixHelloWorldApplication.java > tmp && mv tmp src/main/java/com/example/HystrixHelloWorldApplication.java 74 | 75 | 76 | sed -i '' 's/SR6/SR4/g' pom.xml -------------------------------------------------------------------------------- /ticket-monster-msa/orders/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | desc "Get project from github" 6 | run "git clone git@github.com:christian-posta/ticket-monster-orders.git $(relative project/ticket-monster-orders)" 7 | 8 | SOURCE_DIR=$PWD 9 | 10 | desc "We now have a project!" 11 | run "cd $(relative project/ticket-monster-orders) && ls -l" 12 | 13 | desc "Let's build the project and run locally!" 14 | 15 | tmux split-window -v -d -c $SOURCE_DIR 16 | tmux send-keys -t bottom C-z './_orders_1.sh' Enter 17 | 18 | run "mvn wildfly-swarm:run" 19 | tmux send-keys -t bottom C-c 20 | tmux send-keys -t bottom C-z 'exit' Enter 21 | 22 | 23 | desc "let's use mysql instead of hsqldb" 24 | read -s 25 | 26 | 27 | TEMPLATE_EXISTS=$(oc get template | grep ticket-monster-mysql) 28 | if [[ ! $TEMPLATE_EXISTS ]]; then 29 | 30 | if [ ! -d $SOURCE_DIR/../infra/project/ticket-monster-infra ]; then 31 | git clone https://github.com/christian-posta/ticket-monster-infra ../../../infra/project/ticket-monster-infra 32 | fi 33 | desc "Deploy mysqlorders" 34 | run "oc create -f ../../../infra/project/ticket-monster-infra/mysql-openshift-template.yml" 35 | fi 36 | 37 | tmux split-window -v -d 38 | tmux send-keys -t bottom C-z 'oc get pod --watch' Enter 39 | 40 | run "oc process ticket-monster-mysql -v DATABASE_SERVICE_NAME=mysqlorders | oc create -f -" 41 | run "oc deploy mysqlorders --latest" 42 | run "oc logs dc/mysqlorders" 43 | 44 | 45 | desc "Now let's use mysql and deploy to kubernetes" 46 | run "mvn clean -Pf8,mysql fabric8:deploy" 47 | 48 | tmux send-keys -t bottom C-c 49 | tmux send-keys -t bottom C-z 'exit' Enter 50 | 51 | desc "show tables" 52 | run "$SOURCE_DIR/mysql -e 'show tables;'" 53 | 54 | read -s 55 | 56 | tmux split-window -v -d -c $SOURCE_DIR 57 | tmux send-keys -t bottom C-z './_port-forward-mysql.sh' Enter 58 | 59 | desc "Let's add tables, data to the database using liquibase" 60 | read -s 61 | 62 | 63 | run "mvn -Pdb-migration-mysql liquibase:status" 64 | run "mvn -Pdb-migration-mysql liquibase:update" 65 | run "mvn -Pdb-migration-mysql liquibase:tag -Dliquibase.tag=v2.0" 66 | 67 | # we need to port forward the mysqlorders mysql 68 | # then we need run import.sql 69 | # CONNECT_POD_NAME=$(kubectl get pod | grep -i running | grep ^mysqlorders| awk '{ print $1 }') 70 | # kubectl port-forward $CONNECT_POD_NAME 3306:3306 71 | # mysql ticketmonster -h127.0.0.1 -uticket -pmonster < src/main/resources/import.sql 72 | 73 | tmux send-keys -t bottom C-c 74 | sleep 1 75 | tmux send-keys -t bottom C-z 'exit' Enter 76 | 77 | 78 | ## slight of hand.. bounce the UI pod because we've prob changed the tm-search service IP 79 | ## be deleting and restarting it. Typically the UI service would deploy its own tm-search with 80 | ## appropriate selectors, etc. but we'll hide it by bouncing it here: 81 | oc delete pod $(oc get pod | grep ticket-monster-ui | awk '{print $1}') > /dev/null 2>&1 82 | 83 | desc "Go make sure UI works" 84 | read -s 85 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/_impl-svc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ ! -d .git ]; then 4 | git init 5 | cp ~/dev/.gitignore . 6 | git add . 7 | git commit -m 'initial commit' 8 | else 9 | cp ~/dev/.gitignore . 10 | git add . 11 | fi 12 | 13 | 14 | echo 'spring.application.name=helloservice' > src/main/resources/application.properties 15 | 16 | rm -fr src/test/ 17 | 18 | mkdir -p src/main/fabric8 19 | 20 | 21 | awk '//{x++} x==1{sub(//,"&\n Brixton.SR4 \ 22 | 0.0.15")}1' pom.xml > tmp && mv tmp pom.xml 23 | 24 | awk '/org.springframework.boot.autoconfigure.SpringBootApplication;/{x++} x==1{sub(/org.springframework.boot.autoconfigure.SpringBootApplication;/,"&\nimport org.springframework.web.bind.annotation.PathVariable;\ 25 | import org.springframework.web.bind.annotation.RequestMapping;\ 26 | import org.springframework.web.bind.annotation.RequestMethod;\ 27 | import org.springframework.web.bind.annotation.RestController;\ 28 | \ 29 | import java.util.HashMap;\ 30 | import java.util.Map;")}1' src/main/java/com/example/simplehelloworld/SimpleHelloWorldApplication.java > tmp && mv tmp src/main/java/com/example/simplehelloworld/SimpleHelloWorldApplication.java 31 | 32 | 33 | awk '/<\/dependencies>/{x++} x==1{sub(/<\/dependencies>/,"&\n \ 34 | \ 35 | \ 36 | org.springframework.cloud \ 37 | spring-cloud-dependencies \ 38 | \${spring-cloud.version} \ 39 | pom \ 40 | import \ 41 | \ 42 | \ 43 | ")}1' pom.xml > tmp && mv tmp pom.xml 44 | 45 | awk '//{x++} x==1{sub(//,"&\n \ 46 | \ 47 | org.springframework.cloud\ 48 | spring-cloud-context\ 49 | \ 50 | \ 51 | io.fabric8\ 52 | spring-cloud-starter-kubernetes\ 53 | \${spring-cloud-kubernetes.version}\ 54 | \ 55 | ")}1' pom.xml > tmp && mv tmp pom.xml 56 | 57 | 58 | cat <> src/main/java/com/example/simplehelloworld/SimpleHelloWorldApplication.java 59 | 60 | 61 | @RestController() 62 | @RequestMapping("/api") 63 | class HelloController { 64 | 65 | private static int counter = 0; 66 | 67 | @RequestMapping(value = "/hello/{name}", method = RequestMethod.GET) 68 | public Map hello(@PathVariable String name) throws Exception { 69 | HashMap response = new HashMap<>(); 70 | response.put("response", "hello"); 71 | response.put("your-name", name); 72 | response.put("count", counter++); 73 | return response; 74 | } 75 | } 76 | 77 | 78 | EOF 79 | 80 | cat <> helloserviceConfigMap.yml 81 | kind: ConfigMap 82 | apiVersion: v1 83 | metadata: 84 | name: helloservice 85 | data: 86 | application.yaml: |- 87 | demo: 88 | message: hello, spring cloud kubernetes from Las Vegas! 89 | EOF 90 | -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | 6 | # we want to be able to interact with the services 7 | oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:demos:exposecontroller > /dev/null 2>&1 8 | oc apply -f http://central.maven.org/maven2/io/fabric8/devops/apps/exposecontroller/2.2.327/exposecontroller-2.2.327-openshift.yml > /dev/null 2>&1 9 | oc get cm/exposecontroller -o yaml | sed s/Route/NodePort/g | oc apply -f - > /dev/null 2>&1 10 | 11 | desc "Getting a project from start.spring.io" 12 | desc "spring init --name hystrix-hello-world --boot-version 1.3.7.RELEASE --groupId=com.example --artifactId=hystrix-hello-world --dependencies=web,actuator,cloud-hystrix --build=maven " 13 | read -s 14 | run "spring init --name hystrix-hello-world --boot-version 1.3.7.RELEASE --groupId=com.example --artifactId=hystrix-hello-world --dependencies=web,actuator,cloud-hystrix --build=maven --extract $(relative project/hystrix-hello-world)" 15 | 16 | pushd $(relative project/hystrix-hello-world) 17 | desc "We now have a project!" 18 | run "ls -l " 19 | 20 | 21 | desc "Let's add some functionality" 22 | run "../../_impl-svc.sh" 23 | 24 | desc "Open the project in your IDE if you'd like" 25 | read -s 26 | 27 | desc "Build and run the project; query the endpoint in a different screen: curl http://localhost:8080/api/hello/ceposta" 28 | 29 | tmux split-window -v 30 | tmux select-layout even-vertical 31 | tmux select-pane -t 0 32 | tmux send-keys -t 1 "clear" C-m 33 | tmux send-keys -t 1 "curl -s http://localhost:8080/api/ip/ceposta" 34 | 35 | run "mvn spring-boot:run" 36 | backtotop 37 | desc "I'll wait for you to add the circuit breaker" 38 | read -s 39 | 40 | desc "Try running again.." 41 | tmux send-keys -t 1 "clear" C-m 42 | tmux send-keys -t 1 "curl -s http://localhost:8080/api/ip/ceposta" 43 | 44 | run "mvn spring-boot:run" 45 | 46 | 47 | desc "Let's add the fabric8 magic!" 48 | desc "mvn io.fabric8:fabric8-maven-plugin:LATEST:setup" 49 | read -s 50 | run "mvn io.fabric8:fabric8-maven-plugin:3.2.28:setup" 51 | run "tail -n 30 pom.xml" 52 | 53 | desc "Go update the service to use k8s service discovery" 54 | run "oc get svc" 55 | read -s 56 | 57 | 58 | desc "Let's deploy our app!" 59 | run "mvn clean install fabric8:deploy" 60 | 61 | # let's enable the hystrix stream now 62 | oc label svc/hystrix-hello-world hystrix.enabled=true > /dev/null 2>&1 63 | 64 | SERVICE_URL=$(oc get svc hystrix-hello-world -o yaml | grep exposeUrl | awk '{print $2}') 65 | 66 | tmux send-keys -t 1 "clear" C-m 67 | tmux send-keys -t 1 "while true; do sleep 1s; curl $SERVICE_URL/api/ip/ceposta; echo; done" 68 | 69 | desc "scale down the simple-hello-world service and watch it hit the fallback" 70 | read -s 71 | run "oc scale dc/simple-hello-world --replicas=0" 72 | 73 | desc "scale it back up" 74 | run "oc scale dc/simple-hello-world --replicas=1" 75 | 76 | tmux send-keys -t 1 C-c 77 | 78 | desc "let's install kubeflix" 79 | popd 80 | run "./setup-kubeflix.sh" 81 | 82 | tmux send-keys -t 1 "clear" C-m 83 | tmux send-keys -t 1 "while true; do sleep 1s; curl $SERVICE_URL/api/ip/ceposta; echo; done" 84 | 85 | desc "scale down the simple-hello-world service and watch it hit the fallback" 86 | read -s 87 | run "oc scale dc/simple-hello-world --replicas=0" 88 | 89 | desc "scale it back up" 90 | read -s 91 | run "oc scale dc/simple-hello-world --replicas=1" -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/impl-pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | com.example 5 | hystrix-hello-world 6 | 0.0.1-SNAPSHOT 7 | jar 8 | 9 | hystrix-hello-world 10 | Demo project for Spring Boot 11 | 12 | 13 | org.springframework.boot 14 | spring-boot-starter-parent 15 | 1.3.7.RELEASE 16 | 17 | 18 | 19 | 20 | Brixton.SR4 21 | 0.0.15 22 | Brixton.SR4 23 | 0.0.15 24 | UTF-8 25 | UTF-8 26 | 1.8 27 | 28 | 29 | 30 | 31 | 32 | org.springframework.cloud 33 | spring-cloud-context 34 | 35 | 36 | io.fabric8 37 | spring-cloud-starter-kubernetes 38 | ${spring-cloud-kubernetes.version} 39 | 40 | 41 | 42 | 43 | org.springframework.cloud 44 | spring-cloud-context 45 | 46 | 47 | io.fabric8 48 | spring-cloud-starter-kubernetes 49 | ${spring-cloud-kubernetes.version} 50 | 51 | 52 | 53 | org.springframework.boot 54 | spring-boot-starter-actuator 55 | 56 | 57 | org.springframework.cloud 58 | spring-cloud-starter-hystrix 59 | 60 | 61 | org.springframework.boot 62 | spring-boot-starter-web 63 | 64 | 65 | 66 | org.springframework.boot 67 | spring-boot-starter-test 68 | test 69 | 70 | 71 | 72 | 73 | 74 | 75 | org.springframework.cloud 76 | spring-cloud-dependencies 77 | Brixton.SR4 78 | pom 79 | import 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | org.springframework.boot 88 | spring-boot-maven-plugin 89 | 90 | 91 | io.fabric8 92 | fabric8-maven-plugin 93 | 3.1.71 94 | 95 | 96 | fmp 97 | 98 | resource 99 | helm 100 | build 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /debezium/docker/value.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": { 3 | "name": "dbserver1.inventory.customers.Envelope", 4 | "version": 1, 5 | "optional": false, 6 | "type": "struct", 7 | "fields": [ 8 | { 9 | "field": "before", 10 | "name": "dbserver1.inventory.customers.Value", 11 | "optional": true, 12 | "type": "struct", 13 | "fields": [ 14 | { 15 | "type": "int32", 16 | "optional": false, 17 | "field": "id" 18 | }, 19 | { 20 | "type": "string", 21 | "optional": false, 22 | "field": "first_name" 23 | }, 24 | { 25 | "type": "string", 26 | "optional": false, 27 | "field": "last_name" 28 | }, 29 | { 30 | "type": "string", 31 | "optional": false, 32 | "field": "email" 33 | } 34 | ] 35 | }, 36 | { 37 | "field": "after", 38 | "name": "dbserver1.inventory.customers.Value", 39 | "optional": true, 40 | "type": "struct", 41 | "fields": [ 42 | { 43 | "type": "int32", 44 | "optional": false, 45 | "field": "id" 46 | }, 47 | { 48 | "type": "string", 49 | "optional": false, 50 | "field": "first_name" 51 | }, 52 | { 53 | "type": "string", 54 | "optional": false, 55 | "field": "last_name" 56 | }, 57 | { 58 | "type": "string", 59 | "optional": false, 60 | "field": "email" 61 | } 62 | ] 63 | }, 64 | { 65 | "field": "source", 66 | "name": "io.debezium.connector.mysql.Source", 67 | "optional": false, 68 | "type": "struct", 69 | "fields": [ 70 | { 71 | "type": "string", 72 | "optional": false, 73 | "field": "name" 74 | }, 75 | { 76 | "type": "int64", 77 | "optional": false, 78 | "field": "server_id" 79 | }, 80 | { 81 | "type": "int64", 82 | "optional": false, 83 | "field": "ts_sec" 84 | }, 85 | { 86 | "type": "string", 87 | "optional": true, 88 | "field": "gtid" 89 | }, 90 | { 91 | "type": "string", 92 | "optional": false, 93 | "field": "file" 94 | }, 95 | { 96 | "type": "int64", 97 | "optional": false, 98 | "field": "pos" 99 | }, 100 | { 101 | "type": "int32", 102 | "optional": false, 103 | "field": "row" 104 | }, 105 | { 106 | "type": "boolean", 107 | "optional": true, 108 | "field": "snapshot" 109 | } 110 | ] 111 | }, 112 | { 113 | "type": "string", 114 | "optional": false, 115 | "field": "op" 116 | }, 117 | { 118 | "type": "int64", 119 | "optional": true, 120 | "field": "ts_ms" 121 | } 122 | ] 123 | }, 124 | "payload": { 125 | "before": null, 126 | "after": { 127 | "id": 1004, 128 | "first_name": "Anne", 129 | "last_name": "Kretchmar", 130 | "email": "annek@noanswer.org" 131 | }, 132 | "source": { 133 | "name": "dbserver1", 134 | "server_id": 0, 135 | "ts_sec": 0, 136 | "gtid": null, 137 | "file": "mysql-bin.000003", 138 | "pos": 154, 139 | "row": 0, 140 | "snapshot": true 141 | }, 142 | "op": "c", 143 | "ts_ms": 1477431285328 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /service-mesh/istio-openshift/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | ISTIOVERSION=istio-0.3.0 6 | APP_NAMESPACE=istio-samples 7 | ISTIOCTL=$ISTIOVERSION/bin/istioctl 8 | APP_DIR=$ISTIOVERSION/samples/bookinfo/kube 9 | 10 | 11 | desc "we should set some routing rules for the istio proxy" 12 | read -s 13 | desc "we currently don't have any rules" 14 | read -s 15 | run "$ISTIOCTL get routerule" 16 | 17 | desc "We need to force all traffic to v1 of the reviews service" 18 | read -s 19 | desc "Let's take a look at the route rules we want to apply" 20 | read -s 21 | run "cat $(relative $APP_DIR/route-rule-all-v1.yaml)" 22 | 23 | desc "update the istio routing rules" 24 | run "$ISTIOCTL create -f $(relative $APP_DIR/route-rule-all-v1.yaml) -n $APP_NAMESPACE" 25 | 26 | backtotop 27 | desc "Now go to the app and make sure all the traffic goes to the v1 reviews" 28 | read -s 29 | 30 | desc "now if we list the route rules, we should see our new rules" 31 | run "$ISTIOCTL get routerule " 32 | 33 | desc "we also see that these rules are stored in kubernetes as 'istioconfig'" 34 | desc "we can use vanilla kubernetes CRD to get these configs" 35 | read -s 36 | run "kubectl get routerule " 37 | run "kubectl get routerule/ratings-default -o yaml" 38 | 39 | backtotop 40 | 41 | desc "Now.. let's say we want to deploy v2 of the reviews service and route certain customers to it" 42 | read -s 43 | desc "We can implement A/B testing like this" 44 | read -s 45 | desc "Let's take a look at the content based routing rule we will use" 46 | read -s 47 | run "cat $APP_DIR/route-rule-reviews-test-v2.yaml" 48 | 49 | desc "Let's make the change" 50 | run "$ISTIOCTL create -f $APP_DIR/route-rule-reviews-test-v2.yaml -n $APP_NAMESPACE" 51 | 52 | desc "let's look at the route rules" 53 | read -s 54 | run "$ISTIOCTL get routerule" 55 | run "$ISTIOCTL get routerule reviews-test-v2 -n $APP_NAMESPACE" 56 | run "$ISTIOCTL get routerule reviews-default -n $APP_NAMESPACE" 57 | 58 | desc "Now go to your browser and refresh the app.. should still see v1 of the reviews" 59 | desc "But if you login as jason, you should see the new, v2" 60 | 61 | read -s 62 | 63 | backtotop 64 | 65 | desc "Now we want to test our services." 66 | read -s 67 | desc "We'll want to test just for the 'jason' user and not everyone" 68 | read -s 69 | desc "let's inject some faults between the reviews v2 service and the ratings service" 70 | desc "we'll delay all traffic for 5s. everything should be okay since we have a 10s timeout'" 71 | read -s 72 | desc "see source here: https://github.com/istio/istio/blob/master/samples/bookinfo/src/reviews/reviews-application/src/main/java/application/rest/LibertyRestEndpoint.java#L79" 73 | read -s 74 | run "cat $(relative $APP_DIR/route-rule-ratings-test-delay.yaml )" 75 | run "$ISTIOCTL create -f $(relative $APP_DIR/route-rule-ratings-test-delay.yaml ) -n $APP_NAMESPACE" 76 | 77 | backtotop 78 | desc "Now go to the productpage and test the delay" 79 | read -s 80 | 81 | desc "We see that the product reviews are not available at all!!" 82 | desc "we've found a bug!" 83 | read -s 84 | desc "Dang! The product page has a timeout of 3s" 85 | desc "https://github.com/istio/istio/blob/master/samples/bookinfo/src/productpage/productpage.py#L231" 86 | 87 | read -s 88 | backtotop 89 | desc "We could change the fault injection to a shorter duration" 90 | read -s 91 | desc "cat $APP_DIR/route-rule-ratings-test-delay.yaml | sed s/5.0/2.5/g | $ISTIOCTL replace" 92 | read -s 93 | desc "Or we should fix the bug in the reviews app (ie, should not be 10s timeout)" 94 | read -s 95 | 96 | desc "We already have v3 of our reviews app deployed which contains the fix" 97 | read -s 98 | desc "Let's route some traffic there to see if it's worth upgrading (canary release)" 99 | read -s 100 | desc "We'll direct 50% of the traffic to this new version" 101 | read -s 102 | run "cat $(relative $APP_DIR/route-rule-reviews-50-v3.yaml)" 103 | 104 | backtotop 105 | desc "Run some tests to verify the 50/50 split" 106 | read -s 107 | 108 | desc "Install our new routing rule" 109 | run "$ISTIOCTL replace -f $(relative $APP_DIR/route-rule-reviews-50-v3.yaml) -n $APP_NAMESPACE" 110 | 111 | desc "If we're confident now this is a good change, we can route all traffic that way" 112 | run "$ISTIOCTL replace -f $(relative $APP_DIR/route-rule-reviews-v3.yaml) -n $APP_NAMESPACE" 113 | -------------------------------------------------------------------------------- /spring-boot/simple-hello-world/Readme.md: -------------------------------------------------------------------------------- 1 | # Simple Spring Boot demo 2 | 3 | We can demo basic developer experience with the fabric8-maven-plugin developing with Kubernetes. 4 | 5 | ## What this demos: 6 | 7 | * Creating and running a spring boot application on Kubernetes 8 | * Debguging the application running in Kubernetes 9 | * Config maps + spring-cloud-kubernetes 10 | * Importing to fabric8 CI/CD 11 | 12 | ### Creating and running Spring Boot microservice in Kubernetes 13 | 14 | ``` 15 | $ ./demo.sh 16 | ``` 17 | 18 | This will do the following: 19 | 20 | * create a new spring-boot application using start.spring.io/spring initializr 21 | * create a hello-world HTTP/JSON service at http://localhost:8080/api/hello/{name} 22 | * build/run inside spring-boot; you can switch to a new window (tmux!) and run 23 | 24 | ``` 25 | $ curl http://localhost:8080/api/hello/ceposta 26 | {"response":"hello","count":0,"your-name":"ceposta"} 27 | ``` 28 | 29 | * add fabric8-maven-plugin to project 30 | * create openshift/kubernetes yaml & build docker image using openshift s2i (when run against minishift) 31 | ** note to point out we didn't touch a Dockerfile or any kubernetes yaml 32 | ** also note that things like readinessProbe, service ports, etc are automatically introspected and created 33 | * do a `fabric8:run` to run the application inside kubernetes 34 | 35 | 36 | ### Debugging our app 37 | 38 | To debug our newly deployed microservice while it's still running from the previous step, switch to a new window (tmux!), navigate to where you ran the `./demo.sh` script, and run: 39 | 40 | ``` 41 | $ ./_debug.sh 42 | ``` 43 | 44 | This will enable debugging, redploy our app, and port forward to `5005` on the localhost. You can fire up your IDE and connect to port `5005` to debug. 45 | 46 | 47 | ### Configuration 48 | 49 | To demonstrate using spring-cloud-kubernetes which automatically looks for a config-map with the name of our application (as defined in application.properties) then do the following: 50 | 51 | Make sure the fabric8:run/fabric8:debug from the previous steps are *stopped**. Switch to a new window (tmux!), navigate to where you ran the `./demo.sh` script, and run: 52 | 53 | ``` 54 | $ ./_config-demo.sh 55 | ``` 56 | 57 | This will step you through creating a config map, etc. At the appropriate step, when prompted to update the source code, make it look like this: 58 | 59 | ``` 60 | --- src/main/java/com/example/SimpleHelloWorldApplication.java (revision ) 61 | +++ src/main/java/com/example/SimpleHelloWorldApplication.java (revision ) 62 | @@ -1,5 +1,6 @@ 63 | package com.example; 64 | 65 | +import org.springframework.beans.factory.annotation.Value; 66 | import org.springframework.boot.SpringApplication; 67 | import org.springframework.boot.autoconfigure.SpringBootApplication; 68 | import org.springframework.web.bind.annotation.PathVariable; 69 | @@ -25,12 +26,16 @@ 70 | 71 | private static int counter = 0; 72 | 73 | + @Value("${demo.message}") 74 | + private String message; 75 | + 76 | @RequestMapping(value = "/hello/{name}", method = RequestMethod.GET) 77 | public Map hello(@PathVariable String name) throws Exception { 78 | HashMap response = new HashMap<>(); 79 | response.put("response", "hello"); 80 | response.put("your-name", name); 81 | response.put("count", counter++); 82 | + response.put("message", message); 83 | return response; 84 | } 85 | } 86 | 87 | ``` 88 | 89 | Then when we run `mvn spring-boot:run` we should be able to hit the service and see that our changes are pulling properties from the config map: 90 | 91 | ``` 92 | $ curl http://localhost:8080/api/hello/ceposta 93 | {"response":"hello","count":0,"message":"hello, spring cloud kubernetes from Las Vegas!","your-name":"ceposta"} 94 | ``` 95 | 96 | You can also hit the `/health` endpoint and show that we're NOT running inside Kubernetes: 97 | 98 | ``` 99 | $ curl http://localhost:8080/health 100 | { 101 | "diskSpace": { 102 | "free": 208848199680, 103 | "status": "UP", 104 | "threshold": 10485760, 105 | "total": 499046809600 106 | }, 107 | "kubernetes": { "inside": false, "status": "UP" }, 108 | "refreshScope": { "status": "UP" }, 109 | "status": "UP" 110 | } 111 | ``` 112 | 113 | Continue the demo to run inside of kubernetes with `fabric8:run`. 114 | 115 | To access the service running inside kubernetes, run this: 116 | 117 | ``` 118 | $ SVC_URL=$(minishift service simple-hello-world -n demos --url=true) 119 | $ curl $SVC_URL/api/hello/ceposta 120 | $ curl $SVC_URL/health 121 | ``` 122 | 123 | ### Importing to fabric8 CI/CD 124 | 125 | To import to fabric8 CI/CD, make sure you've got fabric8 CI/CD running in the default namespace and run: 126 | 127 | ``` 128 | $ ./_import.sh 129 | ``` 130 | 131 | ### Cleanup 132 | 133 | Run the `._cleanup.sh` script to clean up the environment. -------------------------------------------------------------------------------- /spring-boot/hystrix-hello-world/Readme.md: -------------------------------------------------------------------------------- 1 | # Hystrix, Service Discovery, Spring Boot demo 2 | 3 | This demo builds on the previous one. We build a service which calls our hello-world service and we add hystrix and service discovery features. 4 | 5 | ## What this demos: 6 | 7 | * Creating and running a spring boot application on Kubernetes 8 | * Illustrate service chaining and service discvoery 9 | * Illustrate hystrix circuit breaker 10 | 11 | ### Creating and running Spring Boot microservice in Kubernetes 12 | 13 | ``` 14 | $ ./demo.sh 15 | ``` 16 | 17 | This will do the following: 18 | 19 | * create a new spring-boot application using start.spring.io/spring initializr 20 | * create a hello-world HTTP/JSON service at http://localhost:8080/api/ip/{name} 21 | * build/run inside spring-boot; you can switch to a new window (tmux!) and run 22 | 23 | ``` 24 | $ curl http://localhost:8080/api/ip/ceposta 25 | ``` 26 | 27 | This *should* blow up in one of two ways: 28 | 29 | * Either it will not start because we already have another local spring-boot app using port `8080` 30 | * Service comes up properly, but when we call it, it should blow up because it cannot reach our hello-world service 31 | 32 | We blow it up at this point for illustration purposes. We're going to illustrate these things: 33 | 34 | * When we debug/run locally, we need to be mindful of what apps are listening on what ports, etc. In kubernetes, *everything* can listen on port `8080`. Yay! 35 | * Should we really be hardcoding URLs and ports in our service? Should we use k8s service discovery for this? 36 | 37 | Let's run the simple-hello-world and update the `server.port` property to use `8081` and continue on with the demo: 38 | ``` 39 | diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties 40 | index 297dee6..da861de 100644 41 | --- a/src/main/resources/application.properties 42 | +++ b/src/main/resources/application.properties 43 | @@ -1,2 +1,2 @@ 44 | spring.application.name=hystrix-hello-world 45 | -#server.port=8081 46 | +server.port=8081 47 | ``` 48 | 49 | Now when we curl, we should see a good response: 50 | 51 | ``` 52 | $ curl http://localhost:8081/api/ip/ceposta 53 | {"ip":"192.168.99.1","hello":{"response":"hello","count":0,"your-name":"ceposta"}} 54 | ``` 55 | 56 | What if we take down the simple-hello-world service? We'll get bad responses again. Let's add hystrix and a fallback: 57 | 58 | ``` 59 | --- a/src/main/java/com/example/HystrixHelloWorldApplication.java 60 | +++ b/src/main/java/com/example/HystrixHelloWorldApplication.java 61 | @@ -1,7 +1,9 @@ 62 | package com.example; 63 | 64 | +import com.netflix.hystrix.contrib.javanica.annotation.HystrixCommand; 65 | import org.springframework.boot.SpringApplication; 66 | import org.springframework.boot.autoconfigure.SpringBootApplication; 67 | +import org.springframework.cloud.client.circuitbreaker.EnableCircuitBreaker; 68 | import org.springframework.web.bind.annotation.PathVariable; 69 | import org.springframework.web.bind.annotation.RequestMapping; 70 | import org.springframework.web.bind.annotation.RequestMethod; 71 | @@ -9,10 +11,12 @@ import org.springframework.web.bind.annotation.RestController; 72 | import org.springframework.web.client.RestTemplate; 73 | 74 | import java.net.InetAddress; 75 | +import java.net.UnknownHostException; 76 | import java.util.HashMap; 77 | import java.util.Map; 78 | 79 | @SpringBootApplication 80 | +@EnableCircuitBreaker 81 | public class HystrixHelloWorldApplication { 82 | 83 | public static void main(String[] args) { 84 | @@ -26,6 +30,7 @@ public class HystrixHelloWorldApplication { 85 | class HelloController { 86 | 87 | 88 | + @HystrixCommand(fallbackMethod = "generatedResponse") 89 | @RequestMapping(value = "/ip/{name}", method = RequestMethod.GET) 90 | public Map hello(@PathVariable String name) throws Exception { 91 | RestTemplate template = new RestTemplate(); 92 | @@ -39,6 +44,17 @@ class HelloController { 93 | 94 | return response; 95 | } 96 | + 97 | + public Map generatedResponse(@PathVariable String name) throws UnknownHostException { 98 | + HashMap response = new HashMap<>(); 99 | + response.put("hello", "This is a generated response!"); 100 | + response.put("ip", InetAddress.getLocalHost().getHostAddress()); 101 | + 102 | + return response; 103 | + } 104 | } 105 | ``` 106 | 107 | 108 | Now let's take down the simple-hello-world service, and we can run it again and curl to get the generated response: 109 | 110 | ``` 111 | $ curl http://localhost:8081/api/ip/ceposta 112 | {"ip":"10.1.2.1","hello":"This is a generated response!"} 113 | ``` 114 | 115 | Lastly, we'll add the service discovery k8s service: 116 | 117 | ``` 118 | --- a/src/main/java/com/example/HystrixHelloWorldApplication.java 119 | +++ b/src/main/java/com/example/HystrixHelloWorldApplication.java 120 | @@ -36,7 +36,7 @@ class HelloController { 121 | RestTemplate template = new RestTemplate(); 122 | HashMap response = new HashMap<>(); 123 | 124 | - String url = "http://localhost:8080/api/hello/" + name; 125 | + String url = "http://simple-hello-world/api/hello/" + name; 126 | HashMap hello = template.getForEntity(url, HashMap.class).getBody(); 127 | 128 | response.put("hello", hello); 129 | ``` 130 | 131 | Now let the demo continue, but make sure the simple-hello-world service is running inside Kubernetes before you continue. 132 | Then call the ip service once it's running in k8s! -------------------------------------------------------------------------------- /debezium/docker/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../util.sh 4 | 5 | ### Zookeeper 6 | desc "let's start up zookeeper" 7 | run "docker run -itd --name zookeeper -p 2181:2181 -p 2888:2888 -p 3888:3888 debezium/zookeeper:0.3" 8 | 9 | desc "let's make sure ZK started up correctly" 10 | run "docker logs zookeeper" 11 | 12 | backtotop 13 | 14 | ### Kafka 15 | desc "now let's run kafka" 16 | run "docker run -itd --name kafka -p 9092:9092 --link zookeeper:zookeeper debezium/kafka:0.3" 17 | 18 | desc "let's make sure Kafka came up correctly" 19 | run "docker logs kafka" 20 | 21 | backtotop 22 | 23 | ### MySQL 24 | desc "Now, let's create a database" 25 | run "docker run -itd --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=debezium -e MYSQL_USER=mysqluser -e MYSQL_PASSWORD=mysqlpw debezium/example-mysql:0.3" 26 | 27 | desc "Check the mysql logs" 28 | docker logs -f mysql & 29 | TASK_PID=$! 30 | sleep 15s 31 | kill $TASK_PID 32 | 33 | backtotop 34 | 35 | ### MySQL Client 36 | desc "Now let's create a client to the database" 37 | read -s 38 | 39 | tmux split-window -v 40 | tmux select-layout even-vertical 41 | tmux select-pane -t 0 42 | 43 | tmux send-keys -t 1 "docker run -it --rm --name mysqlterm --link mysql --rm mysql:5.7 sh -c 'exec mysql -h\"\$MYSQL_PORT_3306_TCP_ADDR\" -P\"\$MYSQL_PORT_3306_TCP_PORT\" -uroot -p\"\$MYSQL_ENV_MYSQL_ROOT_PASSWORD\"'" C-m 44 | 45 | read -s 46 | 47 | desc "lets use the inventory database and check it out a bit" 48 | tmux send-keys -t 1 C-l 49 | tmux send-keys -t 1 "use inventory\;" 50 | read -s 51 | 52 | tmux send-keys -t 1 C-m 53 | read -s 54 | 55 | desc "list the tables we have in this DB" 56 | tmux send-keys -t 1 C-l 57 | tmux send-keys -t 1 "show tables\;" 58 | read -s 59 | tmux send-keys -t 1 C-m 60 | read -s 61 | 62 | desc "whats in the customer table" 63 | tmux send-keys -t 1 C-l 64 | tmux send-keys -t 1 "SELECT * FROM customers\;" 65 | read -s 66 | tmux send-keys -t 1 C-m 67 | read -s 68 | 69 | ### Kafka Connect 70 | desc "Start up Kafka Connect" 71 | read -s 72 | tmux split-window -v 73 | tmux select-layout even-vertical 74 | tmux select-pane -t 0 75 | 76 | # the kafka connect pane becomes #1 and mysql becomes #2 77 | 78 | tmux send-keys -t 1 "docker run -itd --name connect -p 8083:8083 -e GROUP_ID=1 -e CONFIG_STORAGE_TOPIC=my_connect_configs -e OFFSET_STORAGE_TOPIC=my_connect_offsets --link zookeeper:zookeeper --link kafka:kafka --link mysql:mysql debezium/connect:0.3" C-m 79 | 80 | tmux send-keys -t 1 "docker logs -f connect" C-m 81 | # note: this will port forward to the minishift machine, but we need the port available locally 82 | # so let's do an SSH port forward. We have to make sure to clean this up when we're done. We'll 83 | # also add this to the cleanup.sh script 84 | command minikube ssh -- -vnNTL *:8083:$(minikube ip):8083 > /dev/null 2>&1 & 85 | 86 | 87 | read -s 88 | 89 | desc "Lets see what connectors we have" 90 | run "curl -H \"Accept:application/json\" localhost:8083/connectors/" 91 | 92 | desc "let's see what a connector definition looks like:" 93 | run "cat $(relative inventory-connector.json) | pretty-json" 94 | 95 | backtotop 96 | 97 | CONNECTOR_FILE=$(relative inventory-connector.json) 98 | 99 | desc "Now let's add a connector that monitors our inventory database" 100 | run "curl -i -X POST -H \"Accept:application/json\" -H \"Content-Type:application/json\" localhost:8083/connectors/ -d @$CONNECTOR_FILE" 101 | 102 | backtotop 103 | 104 | desc "Now we should see our inventory connector" 105 | run "curl -H \"Accept:application/json\" localhost:8083/connectors/" 106 | 107 | desc "Lets see the connector itself" 108 | run "curl -H \"Accept:application/json\" localhost:8083/connectors/inventory-connector | pretty-json" 109 | 110 | desc "We're going to leave Kafka Connect with the Debezium connector running, but navigate away from its logs for a second." 111 | read -s 112 | tmux send-keys -t 1 C-c 113 | 114 | desc "We still have the docker container for Kafka Connect running" 115 | run "docker ps" 116 | 117 | 118 | backtotop 119 | 120 | desc "Let's list the topics in Kafka at this moment" 121 | tmux send-keys -t 1 C-l 122 | read -s 123 | 124 | tmux send-keys -t 1 "docker run -it --name watcher --rm --link zookeeper:zookeeper debezium/kafka:0.3 list-topics -a -k dbserver1.inventory.customers" C-m 125 | read -s 126 | 127 | desc "Let's subscribe to a kafka topic that should have the customers table data" 128 | tmux send-keys -t 1 C-l 129 | read -s 130 | 131 | # now the kafka subscription becomes pane #1, the KC window is #2 and mysql is #3 132 | tmux send-keys -t 1 "docker run -it --name watcher --rm --link zookeeper:zookeeper debezium/kafka:0.3 watch-topic -a -k dbserver1.inventory.customers" C-m 133 | 134 | read -s 135 | 136 | desc "Now let's pretend we're an application making changes to the database" 137 | read -s 138 | tmux send-keys -t 2 C-l 139 | tmux send-keys -t 2 "SELECT * FROM customers\;" 140 | read -s 141 | tmux send-keys -t 2 C-m 142 | read -s 143 | 144 | tmux send-keys -t 2 "UPDATE customers SET first_name='Anne Marie' WHERE id=1004\;" 145 | read -s 146 | tmux send-keys -t 2 C-m 147 | read -s 148 | 149 | tmux send-keys -t 2 C-l 150 | tmux send-keys -t 2 "SELECT * FROM customers\;" 151 | read -s 152 | tmux send-keys -t 2 C-m 153 | read -s 154 | 155 | desc "lets try a delete" 156 | tmux send-keys -t 2 C-l 157 | tmux send-keys -t 2 "DELETE FROM customers WHERE id=1004\;" 158 | read -s 159 | tmux send-keys -t 2 C-m 160 | read -s 161 | 162 | desc "Let's try to stop the Connect process and watch that it restarts from where it left off" 163 | run "docker stop connect" 164 | 165 | 166 | desc "Now let's add some more data to mysql" 167 | tmux send-keys -t 2 C-l 168 | read -s 169 | tmux send-keys -t 2 "INSERT INTO customers VALUES (default, \"Sarah\", \"Thompson\", \"kitt@acme.com\")\;" 170 | read -s 171 | tmux send-keys -t 2 C-m 172 | 173 | read -s 174 | tmux send-keys -t 2 "INSERT INTO customers VALUES (default, \"Kenneth\", \"Anderson\", \"kander@acme.com\")\;" 175 | read -s 176 | tmux send-keys -t 2 C-m 177 | 178 | read -s 179 | 180 | desc "Now let's restart the kafka connect process which hosts our debezium connector" 181 | run "docker start connect" 182 | 183 | 184 | -------------------------------------------------------------------------------- /service-mesh/istio/bookinfo/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . $(dirname ${BASH_SOURCE})/../../../util.sh 4 | 5 | VERSION="0.1.6" 6 | APP_DIR=$(relative ../setup/binaries/istio-$VERSION/samples/apps/bookinfo) 7 | ISTIOCTL=$(relative ../setup/binaries/istio-$VERSION/bin/istioctl) 8 | ISTIO_SOURCE=$(relative ../setup/binaries/istio-$VERSION/istio.VERSION) 9 | 10 | if [ "$1" == "--upstream" ]; then 11 | echo "installing demo from upstream..." 12 | APP_DIR=$(relative ../setup/project/istio/samples/apps/bookinfo) 13 | ISTIO_SOURCE=$(relative ../setup/project/istio/istio.VERSION) 14 | ISTIOCTL="$(relative ../setup/project/bin/istioctl)" 15 | fi 16 | 17 | echo "Using APPDIR=$APP_DIR" 18 | echo "Using istioctl from $ISTIOCTL" 19 | echo "Press to continue..." 20 | read -s 21 | 22 | source $ISTIO_SOURCE 23 | # also shoudl know about this: kube-inject --hub $PILOT_HUB --tag $PILOT_TAG 24 | 25 | # Let's find the dashboard URL 26 | GRAFANA_HOST=$(kubectl get pod $(kubectl get pod | grep -i running | grep grafana | awk '{print $1 }') -o yaml | grep hostIP | cut -d ':' -f2 | xargs) 27 | GRAFANA_PORT=$(kubectl get svc/grafana -o yaml | grep nodePort | cut -d ':' -f2 | xargs) 28 | ISTIO_GRAFANA_URL=http://$GRAFANA_HOST\:$GRAFANA_PORT/dashboard/db/istio-dashboard 29 | 30 | SERVICE_GRAPH=$(kubectl get po -l app=servicegraph -o jsonpath={.items[0].status.hostIP}):$(kubectl get svc servicegraph -o jsonpath={.spec.ports[0].nodePort}) 31 | SERVICE_GRAPH_URL=http://$SERVICE_GRAPH/dotviz 32 | 33 | 34 | 35 | ZIPKIN_HOST=$(kubectl get pod $(kubectl get pod | grep -i running | grep zipkin | awk '{print $1 }') -o yaml | grep hostIP | cut -d ':' -f2 | xargs) 36 | ZIPKIN_PORT=$(kubectl get svc/zipkin -o yaml | grep nodePort | cut -d ':' -f2 | xargs) 37 | ISTIO_ZIPKIN_URL=http://$ZIPKIN_HOST\:$ZIPKIN_PORT/ 38 | 39 | 40 | desc "Let's open the grafana and zipkin dashboard" 41 | read -s 42 | 43 | open $ISTIO_GRAFANA_URL; open $SERVICE_GRAPH_URL; open $ISTIO_ZIPKIN_URL 44 | 45 | read -s 46 | 47 | desc "let's take a look at the app" 48 | run "cat $(relative $APP_DIR/bookinfo.yaml)" 49 | 50 | desc "let's add the istio proxy" 51 | 52 | run "$ISTIOCTL kube-inject -f $(relative $APP_DIR/bookinfo.yaml)" 53 | 54 | 55 | 56 | desc "deploy the bookinfo app with istio proxy enabled" 57 | run "kubectl apply -f <($ISTIOCTL kube-inject -f $(relative $APP_DIR/bookinfo.yaml))" 58 | 59 | 60 | desc "take a look at the services we now have" 61 | run "kubectl get services" 62 | 63 | desc "take a look at the pods we now have" 64 | run "kubectl get pods" 65 | 66 | # define the gateway rul 67 | GATEWAY_URL=$(kubectl get po -l istio=ingress -o jsonpath={.items[0].status.hostIP}):$(kubectl get svc istio-ingress -o jsonpath={.spec.ports[0].nodePort}) 68 | 69 | 70 | backtotop 71 | desc "open the bookinfo app in a browser" 72 | read -s 73 | run "open http://$GATEWAY_URL/productpage" 74 | 75 | desc "we should set some routing rules for the istio proxy" 76 | read -s 77 | desc "we currently don't have any rules" 78 | read -s 79 | run "$ISTIOCTL get route-rule" 80 | 81 | desc "We need to force all traffic to v1 of the reviews service" 82 | read -s 83 | desc "Let's take a look at the route rules we want to apply" 84 | read -s 85 | run "cat $(relative $APP_DIR/route-rule-all-v1.yaml)" 86 | 87 | desc "update the istio routing rules" 88 | run "$ISTIOCTL create -f $(relative $APP_DIR/route-rule-all-v1.yaml)" 89 | 90 | backtotop 91 | desc "Now go to the app and make sure all the traffic goes to the v1 reviews" 92 | read -s 93 | 94 | desc "now if we list the route rules, we should see our new rules" 95 | run "$ISTIOCTL get route-rule" 96 | 97 | desc "we also see that these rules are stored in kubernetes as 'istioconfig'" 98 | desc "we can use vanilla kubernetes TPR to get these configs" 99 | read -s 100 | run "kubectl get istioconfig" 101 | run "kubectl get istioconfig/route-rule-ratings-default -o yaml" 102 | 103 | backtotop 104 | 105 | desc "Now.. let's say we want to deploy v2 of the reviews service and route certain customers to it" 106 | read -s 107 | desc "We can implement A/B testing like this" 108 | read -s 109 | desc "Let's take a look at the content based routing rule we will use" 110 | read -s 111 | run "cat $APP_DIR/route-rule-reviews-test-v2.yaml" 112 | 113 | desc "Let's make the change" 114 | run "$ISTIOCTL create -f $APP_DIR/route-rule-reviews-test-v2.yaml" 115 | 116 | desc "let's look at the route rules" 117 | read -s 118 | run "$ISTIOCTL get route-rule" 119 | run "$ISTIOCTL get route-rule reviews-test-v2" 120 | run "$ISTIOCTL get route-rule reviews-default" 121 | 122 | desc "No go to your browser and refresh the app.. should still see v2 of the reviews" 123 | desc "But if you login as jason, you should see the new, v2" 124 | 125 | read -s 126 | 127 | backtotop 128 | 129 | desc "Now we want to test our services." 130 | read -s 131 | desc "We'll want to test just for the 'jason' user and not everyone" 132 | read -s 133 | desc "let's inject some faults between the reviews v2 service and the ratings service" 134 | desc "we'll delay all traffic for 5s. everything should be okay since we have a 10s timeout'" 135 | read -s 136 | desc "see source here: https://github.com/istio/istio/blob/master/samples/apps/bookinfo/src/reviews/reviews-application/src/main/java/application/rest/LibertyRestEndpoint.java#L64" 137 | read -s 138 | run "cat $(relative $APP_DIR/destination-ratings-test-delay.yaml)" 139 | run "$ISTIOCTL create -f $(relative $APP_DIR/destination-ratings-test-delay.yaml)" 140 | 141 | backtotop 142 | desc "Now go to the productpage and test the delay" 143 | read -s 144 | 145 | desc "We see that the product reviews are not available at all!!" 146 | desc "we've found a bug!" 147 | read -s 148 | desc "Dang! The product page has a timeout of 3s" 149 | desc "https://github.com/istio/istio/blob/master/samples/apps/bookinfo/src/productpage/productpage.py#L140" 150 | 151 | read -s 152 | backtotop 153 | desc "We could change the fault injection to a shorter duration" 154 | read -s 155 | desc "cat $APP_DIR/destination-ratings-test-delay.yaml | sed s/5.0/2.5/g | $ISTIOCTL replace" 156 | read -s 157 | desc "Or we should fix the bug in the reviews app (ie, should not be 10s timeout)" 158 | read -s 159 | 160 | desc "We already have v3 of our reviews app deployed which contains the fix" 161 | read -s 162 | desc "Let's route some traffic there to see if it's worth upgrading (canary release)" 163 | read -s 164 | desc "We'll direct 50% of the traffic to this new version" 165 | read -s 166 | run "cat $(relative $APP_DIR/route-rule-reviews-50-v3.yaml)" 167 | 168 | backtotop 169 | desc "Run some tests to verify the 50/50 split" 170 | read -s 171 | 172 | desc "Install our new routing rule" 173 | run "$ISTIOCTL replace -f $(relative $APP_DIR/route-rule-reviews-50-v3.yaml)" 174 | 175 | desc "If we're confident now this is a good change, we can route all traffic that way" 176 | run "$ISTIOCTL replace -f $(relative $APP_DIR/route-rule-reviews-v3.yaml)" 177 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | --------------------------------------------------------------------------------