├── deploy ├── kubernetes │ ├── helm-chart │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── carts-svc.yaml │ │ │ ├── user-svc.yaml │ │ │ ├── orders-svc.yaml │ │ │ ├── front-end-svc.yaml │ │ │ ├── payment-svc.yaml │ │ │ ├── cart-db-svc.yaml │ │ │ ├── catalogue-svc.yaml │ │ │ ├── rabbitmq-svc.yaml │ │ │ ├── orders-db-svc.yaml │ │ │ ├── shipping-svc.yaml │ │ │ ├── user-db-svc.yaml │ │ │ ├── session-db-svc.yaml │ │ │ ├── catalogue-db-svc.yaml │ │ │ ├── ingress.yaml │ │ │ ├── zipkin-mysql-svc.yaml │ │ │ ├── queue-master-svc.yaml │ │ │ ├── zipkin-svc.yaml │ │ │ ├── zipkin-mysql-dep.yaml │ │ │ ├── _helpers.tpl │ │ │ ├── zipkin-dep.yaml │ │ │ ├── catalogue-db-dep.yaml │ │ │ ├── loadtest-dep.yaml │ │ │ ├── rabbitmq-dep.yaml │ │ │ ├── session-db-dep.yaml │ │ │ ├── zipkin-cron-dep.yaml │ │ │ ├── cart-db-dep.yaml │ │ │ ├── orders-db-dep.yaml │ │ │ ├── user-db-dep.yaml │ │ │ ├── queue-master-dep.yaml │ │ │ ├── front-end-dep.yaml │ │ │ ├── payment-dep.yaml │ │ │ ├── catalogue-dep.yaml │ │ │ ├── user-dep.yaml │ │ │ ├── carts-dep.yaml │ │ │ ├── orders-dep.yaml │ │ │ └── shipping-dep.yaml │ │ ├── Chart.yaml │ │ ├── requirements.yaml │ │ ├── .helmignore │ │ └── values.yaml │ ├── manifests │ │ ├── sock-shop-ns.yaml │ │ ├── carts-svc.yml │ │ ├── orders-svc.yaml │ │ ├── user-svc.yaml │ │ ├── payment-svc.yaml │ │ ├── front-end-svc.yaml │ │ ├── carts-db-svc.yaml │ │ ├── catalogue-svc.yaml │ │ ├── shipping-svc.yaml │ │ ├── user-db-svc.yaml │ │ ├── orders-db-svc.yaml │ │ ├── session-db-svc.yaml │ │ ├── catalogue-db-svc.yaml │ │ ├── queue-master-svc.yaml │ │ ├── rabbitmq-svc.yaml │ │ ├── catalogue-db-dep.yaml │ │ ├── loadtest-dep.yaml │ │ ├── session-db-dep.yaml │ │ ├── carts-db-dep.yaml │ │ ├── orders-db-dep.yaml │ │ ├── user-db-dep.yaml │ │ ├── rabbitmq-dep.yaml │ │ ├── front-end-dep.yaml │ │ ├── payment-dep.yaml │ │ ├── queue-master-dep.yaml │ │ ├── user-dep.yaml │ │ ├── catalogue-dep.yaml │ │ ├── carts-dep.yaml │ │ ├── orders-dep.yaml │ │ └── shipping-dep.yaml │ ├── manifests-monitoring │ │ ├── monitoring-ns.yaml │ │ ├── prometheus-sa.yml │ │ ├── prometheus-exporter-kube-state-svc.yaml │ │ ├── grafana-svc.yaml │ │ ├── prometheus-crb.yml │ │ ├── prometheus-svc.yaml │ │ ├── prometheus-cr.yml │ │ ├── prometheus-exporter-kube-state-dep.yaml │ │ ├── prometheus-alertrules.yaml │ │ ├── prometheus-dep.yaml │ │ ├── grafana-dep.yaml │ │ ├── grafana-import-dash-batch.yaml │ │ └── prometheus-exporter-disk-usage-ds.yaml │ ├── autoscaling │ │ ├── heapster-sa.yml │ │ ├── cart-hsc.yaml │ │ ├── heapster-crb.yml │ │ ├── user-hsc.yaml │ │ ├── orders-hsc.yaml │ │ ├── payment-hsc.yaml │ │ ├── catalogue-hsc.yaml │ │ ├── front-end-hsc.yaml │ │ ├── shipping-hsc.yaml │ │ ├── queue-master-hsc.yaml │ │ ├── heapster-service.yaml │ │ ├── influxdb-service.yaml │ │ ├── influxdb-deployment.yaml │ │ ├── grafana-service.yaml │ │ ├── heapster-deployment.yaml │ │ └── grafana-deployment.yaml │ ├── manifests-logging │ │ ├── fluentd-sa.yaml │ │ ├── fluentd-cr.yml │ │ ├── fluentd-crb.yml │ │ ├── kibana.yml │ │ ├── elasticsearch.yml │ │ └── fluentd-daemon.yml │ ├── manifests-policy │ │ ├── netpol-default-deny.yaml │ │ ├── netpol-cortex-access.yaml │ │ ├── netpol-frontend-access.yaml │ │ ├── netpol-orders-access.yaml │ │ ├── netpol-payment-access.yaml │ │ ├── netpol-shipping-access.yaml │ │ ├── netpol-user-db-access.yaml │ │ ├── netpol-cart-db-access.yaml │ │ ├── netpol-catalogue-access.yaml │ │ ├── netpol-orders-db-access.yaml │ │ ├── netpol-catalogue-db-access.yaml │ │ ├── netpol-cart-access.yaml │ │ ├── netpol-user-access.yaml │ │ └── netpol-rabbitmq-access.yaml │ ├── manifests-alerting │ │ ├── README.md │ │ ├── alertmanager-svc.yaml │ │ ├── alertmanager-configmap.yaml │ │ └── alertmanager-dep.yaml │ ├── terraform │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── main.tf │ ├── manifests-jaeger │ │ ├── payment-dep.yaml │ │ ├── catalogue-dep.yaml │ │ ├── user-dep.yaml │ │ └── jaeger.yaml │ └── README.md ├── apcera │ ├── README.md │ ├── sockShop.pol │ ├── stopSockShop.sh │ ├── deleteSockShop.sh │ ├── deploySockShop.sh │ └── startSockShop.sh ├── aws-ecs │ └── README.md ├── docker-swarm-weave │ └── README.md ├── nomad │ ├── README.md │ ├── jobs │ │ ├── netman.nomad │ │ ├── logging-fluentd.nomad │ │ └── logging-elk.nomad │ └── scripts │ │ └── netman.sh ├── docker-swarm │ ├── README.md │ ├── infra │ │ ├── aws │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ ├── gcloud │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ └── local │ │ │ ├── swarm.sh │ │ │ └── Vagrantfile │ ├── packer │ │ ├── preseed.cfg │ │ └── packer.json │ └── docker-compose.yml ├── docker-compose │ ├── README.md │ ├── grafana │ │ ├── prometheus-datasource.json │ │ └── import.sh │ ├── alert.rules │ ├── alertmanager.yml │ ├── docker-compose.monitoring.yml │ ├── prometheus.yml │ ├── docker-compose.logging.yml │ └── docker-compose.yml ├── docker-compose-weave │ └── README.md ├── mesos-cni │ ├── README.md │ ├── provisionWeaveCNI.sh │ └── provisionMesosDns.sh ├── mesos-marathon │ ├── README.md │ └── provisionWeave.sh ├── aws-ecs-shippable │ └── README.md ├── minimesos-marathon │ └── README.md ├── README.md ├── example │ └── README.md └── micro-sock │ └── docker-compose.yaml ├── staging ├── .gitignore ├── terraform.tfvars.example ├── outputs.tf ├── up.sh ├── variables.tf └── README.md ├── openapi ├── .dockerignore ├── Dockerfile ├── package.json └── README.md ├── install ├── README.md └── aws-minimesos │ ├── variables.tf │ ├── outputs.tf │ ├── README.md │ ├── provision.sh │ ├── main.tf │ └── aws.tf ├── .mvn └── wrapper │ ├── maven-wrapper.jar │ └── maven-wrapper.properties ├── .gitmodules ├── .github ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── CONTRIBUTING.md ├── healthcheck ├── Dockerfile └── healthcheck.rb ├── graphs ├── Dockerfile ├── README.md └── sock-shop-performance.dashboard.py ├── shippable.triggers.yml ├── internal-docs └── design.md ├── push.sh ├── README.md ├── .travis.yml ├── shippable.jobs.yml ├── .gitignore └── shippable.resources.yml /deploy/kubernetes/helm-chart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /staging/.gitignore: -------------------------------------------------------------------------------- 1 | terraform.tfvars 2 | *.plan 3 | -------------------------------------------------------------------------------- /openapi/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore nodejs modules 2 | node_modules/ -------------------------------------------------------------------------------- /install/README.md: -------------------------------------------------------------------------------- 1 | # Install utilities 2 | 3 | Contains utility scripts for preparing a target platform. 4 | 5 | -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxacademy/microservices-demo/HEAD/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/sock-shop-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: sock-shop 6 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "docs"] 2 | path = docs 3 | url = https://github.com/microservices-demo/microservices-demo.github.io.git 4 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/monitoring-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart for Sock Shop 3 | name: helm-chart 4 | version: 0.2.0 5 | -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.3/apache-maven-3.3.3-bin.zip -------------------------------------------------------------------------------- /deploy/apcera/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/apcera.html) on how to deploy Sock Shop on Apcera. 2 | -------------------------------------------------------------------------------- /deploy/aws-ecs/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/ecs.html) on how to deploy Sock Shop using AWS ECS. 2 | -------------------------------------------------------------------------------- /deploy/docker-swarm-weave/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](../../docs/deployment/docker-swarm.md) on how to deploy Sock Shop using Docker Swarm. 2 | -------------------------------------------------------------------------------- /deploy/nomad/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/nomad.html) on how to deploy Sock Shop using Nomad. 2 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/heapster-sa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: heapster 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /staging/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | key_name = "" 2 | bastion_cidr_block = "" 3 | bastion_security_group = "" 4 | private_key_file = "" 5 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/fluentd-sa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: fluentd 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /deploy/docker-swarm/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/docker-swarm.html) on how to deploy Sock Shop using Docker Swarm. 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - Add labels appropriate to the issue 2 | - Describe the expected behaviour and the actual behaviour 3 | - Describe steps to reproduce the problem 4 | -------------------------------------------------------------------------------- /deploy/docker-compose/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/docker-compose.html) on how to deploy Sock Shop using Docker Compose. 2 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: nginx-ingress 3 | version: 0.4.2 4 | repository: https://kubernetes-charts.storage.googleapis.com -------------------------------------------------------------------------------- /install/aws-minimesos/variables.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" { 2 | } 3 | variable "secret_key" { 4 | } 5 | variable "private_key_file" { 6 | } 7 | variable "aws_key_name" { 8 | } 9 | 10 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - Read the contribution guidelines 2 | - Include a reference to a related issue in this repository 3 | - A description of the changes proposed in the pull request -------------------------------------------------------------------------------- /deploy/docker-compose-weave/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/docker-compose-weave.html) on how to deploy Sock Shop using Docker Compose and Weave. 2 | -------------------------------------------------------------------------------- /deploy/docker-compose/grafana/prometheus-datasource.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prometheus", 3 | "type": "prometheus", 4 | "url": "http://prometheus:9090", 5 | "access": "proxy", 6 | "basicAuth": false 7 | } 8 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-sa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: prometheus 6 | namespace: monitoring 7 | labels: 8 | app: prometheus 9 | -------------------------------------------------------------------------------- /install/aws-minimesos/outputs.tf: -------------------------------------------------------------------------------- 1 | output "# SSH key" { 2 | value = "\nexport KEY=${var.private_key_file}" 3 | } 4 | 5 | output "# instance" { 6 | value = "\nexport IP=${aws_instance.minimesos.public_dns}" 7 | } 8 | -------------------------------------------------------------------------------- /deploy/mesos-cni/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/mesos-cni.html) on how to deploy Sock Shop using [Mesos](https://mesos.apache.org) and [CNI](https://github.com/containernetworking/cni). 2 | -------------------------------------------------------------------------------- /deploy/mesos-marathon/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/mesos-marathon.html) on how to deploy Sock Shop using [Mesos](https://mesos.apache.org) and [Marathon](https://github.com/mesosphere/marathon). 2 | -------------------------------------------------------------------------------- /healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.5 2 | 3 | RUN apk update && \ 4 | apk add ruby ruby-json ruby-rdoc ruby-irb 5 | 6 | RUN gem install awesome_print 7 | 8 | COPY healthcheck.rb healthcheck.rb 9 | ENTRYPOINT ["ruby", "healthcheck.rb"] 10 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_addresses" { 2 | value = ["${aws_instance.ci-sockshop-docker-swarm-node.*.public_ip}"] 3 | } 4 | 5 | output "master_address" { 6 | value = "${aws_instance.ci-sockshop-docker-swarm-master.public_ip}" 7 | } 8 | -------------------------------------------------------------------------------- /deploy/aws-ecs-shippable/README.md: -------------------------------------------------------------------------------- 1 | This deployment will launch a Weave-enabled Amazon ECS cluster. 2 | 3 | See the [documentation](https://microservices-demo.github.io/deployment/ecs-weave-shippable.html) on how to deploy Sock Shop using AWS ECS with automated CI/CD using Shippable. 4 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/fluentd-cr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRole 4 | metadata: 5 | name: fluentd 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | -------------------------------------------------------------------------------- /deploy/minimesos-marathon/README.md: -------------------------------------------------------------------------------- 1 | See the [documentation](https://microservices-demo.github.io/deployment/minimeos-marathon.html) on how to deploy Sock Shop using [minimesos](https://minimesos.org) and [Marathon](https://github.com/mesosphere/marathon). 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-default-deny.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: sock-shop 6 | annotations: 7 | net.beta.kubernetes.io/network-policy: | 8 | { 9 | "ingress": { 10 | "isolation": "DefaultDeny" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /staging/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_addresses" { 2 | value = ["${aws_instance.k8s-node.*.public_ip}"] 3 | } 4 | 5 | output "master_address" { 6 | value = "${aws_instance.k8s-master.public_ip}" 7 | } 8 | 9 | output "sock_shop_address" { 10 | value = "${aws_elb.microservices-demo-staging-k8s.dns_name}" 11 | } 12 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/carts-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: carts 6 | labels: 7 | name: carts 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: carts 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/user-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: user 6 | labels: 7 | name: user 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: user 15 | 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/orders-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: orders 6 | labels: 7 | name: orders 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: orders 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/front-end-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: front-end 6 | labels: 7 | name: front-end 8 | spec: 9 | type: NodePort 10 | ports: 11 | - port: 80 12 | targetPort: 8079 13 | nodePort: 30001 14 | selector: 15 | name: front-end 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/payment-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: payment 6 | labels: 7 | name: payment 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: payment 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/cart-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: carts-db 6 | labels: 7 | name: carts-db 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 27017 12 | targetPort: 27017 13 | selector: 14 | name: carts-db 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/catalogue-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: catalogue 6 | labels: 7 | name: catalogue 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: catalogue 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/rabbitmq-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: rabbitmq 6 | labels: 7 | name: rabbitmq 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 5672 12 | targetPort: 5672 13 | selector: 14 | name: rabbitmq 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/carts-svc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: carts 6 | labels: 7 | name: carts 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: carts 16 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/gcloud/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_addresses" { 2 | value = ["${google_compute_instance.docker-swarm-node.*.network_interface.0.access_config.0.assigned_nat_ip}"] 3 | } 4 | 5 | output "master_address" { 6 | value = "${google_compute_instance.docker-swarm-master.network_interface.0.access_config.0.assigned_nat_ip}" 7 | } 8 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/orders-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: orders-db 6 | labels: 7 | name: orders-db 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 27017 12 | targetPort: 27017 13 | selector: 14 | name: orders-db 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/shipping-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: shipping 6 | labels: 7 | name: shipping 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 80 12 | targetPort: 80 13 | selector: 14 | name: shipping 15 | 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/user-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: user-db 6 | labels: 7 | name: user-db 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 27017 12 | targetPort: 27017 13 | selector: 14 | name: user-db 15 | 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/orders-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: orders 6 | labels: 7 | name: orders 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: orders 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/user-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: user 6 | labels: 7 | name: user 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: user 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/session-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: session-db 6 | labels: 7 | name: session-db 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 6379 12 | targetPort: 6379 13 | selector: 14 | name: session-db 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-alerting/README.md: -------------------------------------------------------------------------------- 1 | In order for the alerting component to work, a Kubernetes secret called "slack-hook-url" needs to be created. The content of the secret needs to be the Slack Hook API url. 2 | 3 | For more information see 4 | 5 | 1. https://kubernetes.io/docs/user-guide/secrets/ 6 | 2. https://api.slack.com/incoming-webhooks -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/payment-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: payment 6 | labels: 7 | name: payment 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: payment 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/catalogue-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: catalogue-db 6 | labels: 7 | name: catalogue-db 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 3306 12 | targetPort: 3306 13 | selector: 14 | name: catalogue-db 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/front-end-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: front-end 6 | labels: 7 | name: front-end 8 | namespace: sock-shop 9 | spec: 10 | type: LoadBalancer 11 | ports: 12 | - port: 80 13 | targetPort: 8079 14 | nodePort: 30001 15 | selector: 16 | name: front-end 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_addresses" { 2 | value = ["${aws_instance.ci-sockshop-k8s-node.*.public_dns}"] 3 | } 4 | 5 | output "master_address" { 6 | value = "${aws_instance.ci-sockshop-k8s-master.public_dns}" 7 | } 8 | 9 | output "sock_shop_address" { 10 | value = "${aws_elb.ci-sockshop-k8s-elb.dns_name}" 11 | } 12 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/carts-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: carts-db 6 | labels: 7 | name: carts-db 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 27017 13 | targetPort: 27017 14 | selector: 15 | name: carts-db 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/catalogue-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: catalogue 6 | labels: 7 | name: catalogue 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: catalogue 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/shipping-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: shipping 6 | labels: 7 | name: shipping 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 80 13 | targetPort: 80 14 | selector: 15 | name: shipping 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/user-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: user-db 6 | labels: 7 | name: user-db 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 27017 13 | targetPort: 27017 14 | selector: 15 | name: user-db 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/orders-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: orders-db 6 | labels: 7 | name: orders-db 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 27017 13 | targetPort: 27017 14 | selector: 15 | name: orders-db 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/session-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: session-db 6 | labels: 7 | name: session-db 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 6379 13 | targetPort: 6379 14 | selector: 15 | name: session-db 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: socks-ingress 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: front-end 14 | servicePort: 80 -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/fluentd-crb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: fluentd 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: fluentd 10 | subjects: 11 | - kind: ServiceAccount 12 | name: fluentd 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-cortex-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: cortex-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | ingress: 11 | - from: 12 | - podSelector: 13 | matchLabels: 14 | name: cortex 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-frontend-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: front-end-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: front-end 11 | ingress: 12 | - ports: 13 | - protocol: TCP 14 | port: 8079 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/cart-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: cart 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: cart 12 | minReplicas: 1 13 | maxReplicas: 10 14 | targetCPUUtilizationPercentage: 50 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/catalogue-db-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: catalogue-db 6 | labels: 7 | name: catalogue-db 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 3306 13 | targetPort: 3306 14 | selector: 15 | name: catalogue-db 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/heapster-crb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: heapster 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:heapster 10 | subjects: 11 | - kind: ServiceAccount 12 | name: heapster 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /deploy/mesos-marathon/provisionWeave.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Warn: No Master IP passed. Assuming this is master" 5 | fi 6 | 7 | sudo curl -sL git.io/weave -o /usr/local/bin/weave 8 | sudo chmod a+x /usr/local/bin/weave 9 | sudo weave launch $1 10 | 11 | echo "/var/run/weave/weave.sock" | sudo tee /etc/mesos-slave/docker_socket 12 | -------------------------------------------------------------------------------- /deploy/docker-compose/alert.rules: -------------------------------------------------------------------------------- 1 | # Alert for high error rate in the Sock Shop. 2 | ALERT HighErrorRate 3 | IF rate(request_duration_seconds_count{status_code="500"}[1m]) > .1 4 | FOR 1m 5 | LABELS { severity = "email" } 6 | ANNOTATIONS { 7 | summary = "High HTTP 500 error rates", 8 | description = "Rate of HTTP 500 errors per 1 minutes: {{ $value }}", 9 | } 10 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/user-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: user 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: user 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-exporter-kube-state-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | labels: 7 | app: kube-state-metrics 8 | spec: 9 | ports: 10 | - name: kube-state-metrics 11 | port: 8080 12 | protocol: TCP 13 | selector: 14 | app: kube-state-metrics 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/orders-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: orders 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: orders 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/payment-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: payment 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: payment 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/grafana-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | labels: 7 | app: grafana 8 | component: core 9 | spec: 10 | type: LoadBalancer 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 3000 15 | selector: 16 | app: grafana 17 | component: core 18 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/catalogue-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: catalogue 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: catalogue 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/front-end-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: front-end 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: front-end 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/shipping-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: shipping 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: shipping 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/zipkin-mysql-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.zipkin.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: zipkin-mysql 6 | labels: 7 | name: zipkin-mysql 8 | spec: 9 | ports: 10 | # the port that this service should serve on 11 | - port: 3306 12 | targetPort: 3306 13 | selector: 14 | name: zipkin-mysql 15 | {{- end -}} 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/queue-master-hsc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: queue-master 6 | namespace: sock-shop 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1beta1 10 | kind: Deployment 11 | name: queue-master 12 | 13 | minReplicas: 1 14 | maxReplicas: 10 15 | targetCPUUtilizationPercentage: 50 16 | 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/queue-master-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: queue-master 6 | labels: 7 | name: queue-master 8 | annotations: 9 | prometheus.io/path: "/prometheus" 10 | spec: 11 | ports: 12 | # the port that this service should serve on 13 | - port: 80 14 | targetPort: 80 15 | selector: 16 | name: queue-master 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/zipkin-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.zipkin.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: zipkin 6 | labels: 7 | name: zipkin 8 | spec: 9 | type: NodePort 10 | ports: 11 | # the port that this service should serve on 12 | - port: 9411 13 | targetPort: 9411 14 | nodePort: 30002 15 | selector: 16 | name: zipkin 17 | {{- end -}} 18 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-crb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: prometheus 6 | labels: 7 | app: prometheus 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: prometheus 12 | subjects: 13 | - kind: ServiceAccount 14 | name: prometheus 15 | namespace: monitoring 16 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/queue-master-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: queue-master 6 | labels: 7 | name: queue-master 8 | annotations: 9 | prometheus.io/path: "/prometheus" 10 | namespace: sock-shop 11 | spec: 12 | ports: 13 | # the port that this service should serve on 14 | - port: 80 15 | targetPort: 80 16 | selector: 17 | name: queue-master 18 | -------------------------------------------------------------------------------- /openapi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:6.3 2 | 3 | ENV NODE_ENV "development" 4 | ENV NODE_PATH "/usr/src/app/node_modules" 5 | 6 | # Install base dependencies 7 | RUN apk update 8 | RUN apk add git python 9 | 10 | # Prepare app directory 11 | WORKDIR /usr/src/app 12 | COPY . /usr/src/app 13 | RUN npm install 14 | VOLUME /tmp/specs 15 | 16 | # Start the app 17 | ENTRYPOINT ["/usr/src/app/node_modules/dredd/bin/dredd"] 18 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-alerting/alertmanager-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/scrape: 'true' 6 | prometheus.io/path: '/alertmanager/metrics' 7 | labels: 8 | name: alertmanager 9 | name: alertmanager 10 | spec: 11 | selector: 12 | app: alertmanager 13 | ports: 14 | - name: alertmanager 15 | protocol: TCP 16 | port: 9093 17 | targetPort: 9093 18 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/scrape: 'true' 6 | labels: 7 | name: prometheus 8 | name: prometheus 9 | namespace: monitoring 10 | spec: 11 | selector: 12 | app: prometheus 13 | type: NodePort 14 | ports: 15 | - name: prometheus 16 | protocol: TCP 17 | port: 9090 18 | targetPort: 9090 19 | nodePort: 31090 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-orders-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: orders-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: orders 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: front-end 16 | ports: 17 | - protocol: TCP 18 | port: 80 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-payment-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: payment-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: payment 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: orders 16 | ports: 17 | - protocol: TCP 18 | port: 80 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-shipping-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: shipping-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: shipping 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: orders 16 | ports: 17 | - protocol: TCP 18 | port: 80 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-user-db-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: user-db-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: user-db 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: user 16 | ports: 17 | - protocol: TCP 18 | port: 27017 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for sock-shop. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | java: 5 | options: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom 6 | zipkin: 7 | enabled: false 8 | url: zipkin.zipkin.svc.cluster.local 9 | frontend: 10 | replicas: 1 11 | loadtest: 12 | replicas: 2 13 | enabled: false -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-cart-db-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: carts-db-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: carts-db 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: cart 16 | ports: 17 | - protocol: TCP 18 | port: 27017 19 | 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-catalogue-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: catalogue-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: catalogue 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: front-end 16 | ports: 17 | - protocol: TCP 18 | port: 80 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-orders-db-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: orders-db-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: orders-db 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: orders 16 | ports: 17 | - protocol: TCP 18 | port: 27017 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/rabbitmq-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: rabbitmq 6 | labels: 7 | name: rabbitmq 8 | namespace: sock-shop 9 | spec: 10 | ports: 11 | # the port that this service should serve on 12 | - port: 5672 13 | name: rabbitmq 14 | targetPort: 5672 15 | - port: 9090 16 | name: exporter 17 | targetPort: exporter 18 | protocol: TCP 19 | selector: 20 | name: rabbitmq 21 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-catalogue-db-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: catalogue-db-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: catalogue-db 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: catalogue 16 | ports: 17 | - protocol: TCP 18 | port: 3306 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-cr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRole 4 | metadata: 5 | name: prometheus 6 | labels: 7 | app: prometheus 8 | rules: 9 | - apiGroups: [""] # "" indicates the core API group 10 | resources: 11 | - nodes 12 | - nodes/proxy 13 | - services 14 | - endpoints 15 | - pods 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - nonResourceURLs: 21 | - /metrics 22 | verbs: 23 | - get 24 | -------------------------------------------------------------------------------- /deploy/README.md: -------------------------------------------------------------------------------- 1 | # Deployment Configurations 2 | 3 | Sub directories contain scripts and configuration for deployment target platforms. Please see the repository listing for an up to date list of deployments. 4 | 5 | The intention was to also support the following, but have been held up by technical issues: 6 | 7 | - Mesosphere DC/OS (no support in DCOS universe for multi-container packages) 8 | - Docker Swarmkit (see [the swarmkit readme](./swarmkit/README.md)) 9 | - Docker Cloud (does not suit our use case) 10 | -------------------------------------------------------------------------------- /openapi/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "microservices-demo-api-test", 3 | "nicename": "Microservices demo API test", 4 | "version": "0.0.1", 5 | "private": true, 6 | "description": "Verify microservices API endpoints against the specification", 7 | 8 | "engines": { 9 | "node": "6", 10 | "npm": "3" 11 | }, 12 | "license": "MIT", 13 | "dependencies": { 14 | }, 15 | "devDependencies": { 16 | "dredd": "1.5.0", 17 | "mongodb": "", 18 | "mysql": "" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-cart-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: cart-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: cart 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: front-end 16 | - podSelector: 17 | matchLabels: 18 | name: orders 19 | ports: 20 | - protocol: TCP 21 | port: 80 22 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-user-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: user-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: user 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: front-end 16 | - podSelector: 17 | matchLabels: 18 | name: orders 19 | ports: 20 | - protocol: TCP 21 | port: 80 22 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/zipkin-mysql-dep.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.zipkin.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: zipkin-mysql 6 | labels: 7 | name: zipkin-mysql 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: zipkin-mysql 14 | spec: 15 | containers: 16 | - name: zipkin-mysql 17 | image: openzipkin/zipkin-mysql:1.20.0 18 | ports: 19 | - name: mysql 20 | containerPort: 3306 21 | {{- end -}} 22 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-policy/netpol-rabbitmq-access.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: rabbitmq-access 6 | namespace: sock-shop 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: rabbitmq 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | name: shipping 16 | - podSelector: 17 | matchLabels: 18 | name: queue-master 19 | ports: 20 | - protocol: TCP 21 | port: 5672 22 | 23 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/heapster-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | task: monitoring 6 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 7 | # If you are NOT using this as an addon, you should comment out this line. 8 | kubernetes.io/cluster-service: 'true' 9 | kubernetes.io/name: Heapster 10 | name: heapster 11 | namespace: kube-system 12 | spec: 13 | ports: 14 | - port: 80 15 | targetPort: 8082 16 | selector: 17 | k8s-app: heapster 18 | -------------------------------------------------------------------------------- /graphs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.4-alpine 2 | 3 | MAINTAINER Container Solutions info@container-solutions.com 4 | 5 | # Install basic dependencies 6 | RUN apk add -U curl git parallel 7 | 8 | # Silence parallel citation notice 9 | RUN mkdir /root/.parallel && \ 10 | touch /root/.parallel/will-cite 11 | 12 | # Install pip 13 | RUN curl https://bootstrap.pypa.io/get-pip.py | python 14 | 15 | # Install grafanalib 16 | RUN pip install git+https://github.com/weaveworks/grafanalib@82556ddfbbd6134837d280a7999d35c45cc3c87e 17 | 18 | WORKDIR /opt/code 19 | 20 | CMD ["/bin/sh", "-c"] 21 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-exporter-kube-state-dep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics-deployment 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | app: kube-state-metrics 12 | spec: 13 | containers: 14 | - name: kube-state-metrics 15 | image: gcr.io/google_containers/kube-state-metrics:v0.4.1 16 | ports: 17 | - containerPort: 8080 18 | nodeSelector: 19 | beta.kubernetes.io/os: linux -------------------------------------------------------------------------------- /shippable.triggers.yml: -------------------------------------------------------------------------------- 1 | triggers: 2 | # This file contains triggers to be used to manually trigger an automated CI/CD 3 | # workflow using Shippable Pipelines 4 | 5 | ################################ 6 | 7 | - name: trigger-front-end-test 8 | type: trigger 9 | version: 10 | message: 'trigger to deploy to TEST' 11 | 12 | - name: trigger-front-end-prod 13 | type: trigger 14 | version: 15 | message: 'trigger to deploy to PROD' 16 | 17 | - name: trigger-front-end-release 18 | type: trigger 19 | version: 20 | message: 'trigger to create release' 21 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/influxdb-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | task: monitoring 6 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 7 | # If you are NOT using this as an addon, you should comment out this line. 8 | kubernetes.io/cluster-service: 'true' 9 | kubernetes.io/name: monitoring-influxdb 10 | name: monitoring-influxdb 11 | namespace: kube-system 12 | spec: 13 | ports: 14 | - port: 8086 15 | targetPort: 8086 16 | selector: 17 | k8s-app: influxdb 18 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | default = "eu-central-1" 3 | } 4 | 5 | variable "num_nodes" { 6 | description = "Number of nodes besides master" 7 | default = "2" 8 | } 9 | 10 | variable "private_key_name" { 11 | description = "Name of private_key" 12 | default = "docker-swarm" 13 | } 14 | 15 | variable "private_key_path" { 16 | description = "Path to file containing private key" 17 | default = "~/.ssh/docker-swarm.pem" 18 | } 19 | 20 | variable "instance_type" { 21 | description = "AWS Instance size" 22 | default = "t2.micro" 23 | } 24 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/influxdb-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-influxdb 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | task: monitoring 12 | k8s-app: influxdb 13 | spec: 14 | containers: 15 | - name: influxdb 16 | image: gcr.io/google_containers/heapster-influxdb-amd64:v1.1.1 17 | volumeMounts: 18 | - mountPath: /data 19 | name: influxdb-storage 20 | volumes: 21 | - name: influxdb-storage 22 | emptyDir: {} 23 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/zipkin-dep.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.zipkin.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: zipkin 6 | labels: 7 | name: zipkin 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: zipkin 14 | spec: 15 | containers: 16 | - name: zipkin 17 | image: openzipkin/zipkin 18 | ports: 19 | - containerPort: 9411 20 | env: 21 | - name: STORAGE_TYPE 22 | value: mysql 23 | - name: MYSQL_HOST 24 | value: zipkin-mysql 25 | {{- end -}} 26 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/catalogue-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: catalogue-db 6 | labels: 7 | name: catalogue-db 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: catalogue-db 14 | spec: 15 | containers: 16 | - name: catalogue-db 17 | image: weaveworksdemos/catalogue-db:0.3.0 18 | env: 19 | - name: MYSQL_ROOT_PASSWORD 20 | value: fake_password 21 | - name: MYSQL_DATABASE 22 | value: socksdb 23 | ports: 24 | - name: mysql 25 | containerPort: 3306 26 | -------------------------------------------------------------------------------- /install/aws-minimesos/README.md: -------------------------------------------------------------------------------- 1 | # Running the Weave Demo on AWS Using Minimesos and Terraform 2 | 3 | This directory provides code to install the demo on an AWS instance running minimesos. This is primarily intended to help with testing the weave demo on Mesos. 4 | 5 | ## Prerequisites 6 | - Terraform 7 | 8 | ## Quick start 9 | 10 | ``` 11 | export TF_VAR_aws_key_name= ; export TF_VAR_private_key_file=path/to/ssh/pem ; export TF_VAR_access_key= ; export TF_VAR_secret_key= 12 | terraform apply 13 | ``` 14 | 15 | 16 | ## Debugging 17 | 18 | To ssh into the instance, export the variables returned by terraform, then: `ssh -i $KEY ubuntu@$IP` 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/loadtest-dep.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.loadtest.enabled }} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: load-test 6 | labels: 7 | name: load-test 8 | spec: 9 | replicas: {{ .Values.loadtest.replicas }} 10 | template: 11 | metadata: 12 | labels: 13 | name: load-test 14 | spec: 15 | containers: 16 | - name: load-test 17 | image: weaveworksdemos/load-test 18 | command: ["/bin/sh"] 19 | args: ["-c", "while true; do locust --host http://front-end.sock-shop.svc.cluster.local -f /config/locustfile.py --clients 5 --hatch-rate 5 --num-request 100 --no-web; done"] 20 | {{- end }} -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/rabbitmq-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: rabbitmq 6 | labels: 7 | name: rabbitmq 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: rabbitmq 14 | spec: 15 | containers: 16 | - name: rabbitmq 17 | image: rabbitmq:3.6.8 18 | ports: 19 | - containerPort: 5672 20 | securityContext: 21 | capabilities: 22 | drop: 23 | - all 24 | add: 25 | - CHOWN 26 | - SETGID 27 | - SETUID 28 | - DAC_OVERRIDE 29 | readOnlyRootFilesystem: true 30 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/session-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: session-db 6 | labels: 7 | name: session-db 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: session-db 14 | spec: 15 | containers: 16 | - name: session-db 17 | image: redis:alpine 18 | ports: 19 | - name: redis 20 | containerPort: 6379 21 | securityContext: 22 | capabilities: 23 | drop: 24 | - all 25 | add: 26 | - CHOWN 27 | - SETGID 28 | - SETUID 29 | readOnlyRootFilesystem: true 30 | -------------------------------------------------------------------------------- /deploy/nomad/jobs/netman.nomad: -------------------------------------------------------------------------------- 1 | job "netman" { 2 | datacenters = ["dc1"] 3 | type = "system" 4 | 5 | constraint { 6 | attribute = "${attr.kernel.name}" 7 | value = "linux" 8 | } 9 | 10 | update { 11 | stagger = "10s" 12 | max_parallel = 1 13 | } 14 | 15 | # - Frontend group # 16 | group "main" { 17 | count = 1 18 | 19 | # - Main app - # 20 | task "netman" { 21 | driver = "raw_exec" 22 | 23 | config { 24 | command = "/usr/bin/netman" 25 | } 26 | 27 | resources { 28 | cpu = 50 # 50 Mhz 29 | memory = 10 # 10Mb 30 | network { 31 | mbits = 10 32 | } 33 | } 34 | } 35 | # - End main app - # 36 | } 37 | # - End main group - # 38 | } 39 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/catalogue-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: catalogue-db 6 | labels: 7 | name: catalogue-db 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: catalogue-db 15 | spec: 16 | containers: 17 | - name: catalogue-db 18 | image: weaveworksdemos/catalogue-db:0.3.0 19 | env: 20 | - name: MYSQL_ROOT_PASSWORD 21 | value: fake_password 22 | - name: MYSQL_DATABASE 23 | value: socksdb 24 | ports: 25 | - name: mysql 26 | containerPort: 3306 27 | nodeSelector: 28 | beta.kubernetes.io/os: linux 29 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-alertrules.yaml: -------------------------------------------------------------------------------- 1 | # Useful examples on how to configure Prometheus 2 | # * https://www.weave.works/prometheus-and-kubernetes-monitoring-your-applications/ 3 | # * https://grafana.net/dashboards/162 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: prometheus-alertrules 8 | namespace: monitoring 9 | data: 10 | alert.rules: | 11 | # Alert for high error rate in the Sock Shop. 12 | 13 | ALERT HighErrorRate 14 | IF rate(request_duration_seconds_count{status_code="500"}[5m]) > 1 15 | FOR 5m 16 | LABELS { severity = "slack" } 17 | ANNOTATIONS { 18 | summary = "High HTTP 500 error rates", 19 | description = "Rate of HTTP 500 errors per 5 minutes: {{ $value }}", 20 | } 21 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: kibana 6 | labels: 7 | name: kibana 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: kibana 15 | spec: 16 | containers: 17 | - image: kibana 18 | name: kibana 19 | ports: 20 | - name: kibana 21 | containerPort: 5601 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: kibana 27 | labels: 28 | name: kibana 29 | namespace: kube-system 30 | spec: 31 | type: NodePort 32 | ports: 33 | - port: 5601 34 | targetPort: 5601 35 | nodePort: 31601 36 | selector: 37 | name: kibana 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/zipkin-cron-dep.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.zipkin.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: zipkin-cron 6 | labels: 7 | name: zipkin-cron 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: zipkin-cron 14 | spec: 15 | containers: 16 | - name: zipkin-cron 17 | image: openzipkin/zipkin-dependencies:1.4.0 18 | env: 19 | - name: STORAGE_TYPE 20 | value: mysql 21 | - name: MYSQL_HOST 22 | value: zipkin-mysql 23 | - name: MYSQL_USER 24 | value: zipkin 25 | - name: MYSQL_PASS 26 | value: zipkin 27 | command: ["crond"] 28 | args: ["-f"] 29 | {{- end -}} 30 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/loadtest-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: loadtest 6 | --- 7 | apiVersion: extensions/v1beta1 8 | kind: Deployment 9 | metadata: 10 | name: load-test 11 | labels: 12 | name: load-test 13 | namespace: loadtest 14 | spec: 15 | replicas: 2 16 | template: 17 | metadata: 18 | labels: 19 | name: load-test 20 | spec: 21 | containers: 22 | - name: load-test 23 | image: weaveworksdemos/load-test:0.1.1 24 | command: ["/bin/sh"] 25 | args: ["-c", "while true; do locust --host http://front-end.sock-shop.svc.cluster.local -f /config/locustfile.py --clients 5 --hatch-rate 5 --num-request 100 --no-web; done"] 26 | nodeSelector: 27 | beta.kubernetes.io/os: linux 28 | -------------------------------------------------------------------------------- /staging/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WEAVE_SERVICE_TOKEN=$1 4 | KUBE_VERSION=$(kubectl version | base64 | tr -d '\n') 5 | 6 | kubectl apply -f "https://git.io/weave-kube-1.6" 7 | kubectl apply -n kube-system -f "https://cloud.weave.works/k8s/scope.yaml?service-token=$WEAVE_SERVICE_TOKEN&k8s-version=$KUBE_VERSION" 8 | kubectl apply -n kube-system -f "https://cloud.weave.works/k8s/flux.yaml?service-token=$WEAVE_SERVICE_TOKEN&k8s-version=$KUBE_VERSION" 9 | kubectl apply -n kube-system -f "https://cloud.weave.works/k8s/cortex.yaml?service-token=$WEAVE_SERVICE_TOKEN&k8s-version=$KUBE_VERSION" 10 | kubectl apply -f ~/microservices-demo/deploy/kubernetes/manifests/sock-shop-ns.yaml -f ~/microservices-demo/deploy/kubernetes/manifests/zipkin-ns.yaml -f ~/microservices-demo/deploy/kubernetes/manifests 11 | rm join.cmd 12 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 6 | # If you are NOT using this as an addon, you should comment out this line. 7 | kubernetes.io/cluster-service: 'true' 8 | kubernetes.io/name: monitoring-grafana 9 | name: monitoring-grafana 10 | namespace: kube-system 11 | spec: 12 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 13 | # or through a public IP. 14 | # type: LoadBalancer 15 | # You could also use NodePort to expose the service at a randomly-generated port 16 | # type: NodePort 17 | type: NodePort 18 | ports: 19 | - port: 80 20 | targetPort: 3000 21 | nodePort: 30003 22 | selector: 23 | k8s-app: grafana 24 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/local/swarm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [[ -z "$NUM_NODES" ]]; then 5 | NUM_NODES=2 6 | fi 7 | 8 | case $@ in 9 | up) 10 | vagrant up 11 | vagrant ssh swarm-master -c "docker swarm init --advertise-addr 10.0.0.10" 12 | TOKEN=$(vagrant ssh swarm-master -c "docker swarm join-token -q worker" | tr -d '\r') 13 | for i in $(seq $NUM_NODES); do 14 | vagrant ssh swarm-node$i -c 'docker swarm join --listen-addr 10.0.0.1'"'$i'"' --token '"'$TOKEN'"' 10.0.0.10' 15 | done 16 | vagrant ssh swarm-master -c "docker-compose -f /docker-swarm/docker-compose.yml pull" 17 | vagrant ssh swarm-master -c "docker-compose -f /docker-swarm/docker-compose.yml bundle -o dockerswarm.dab" 18 | vagrant ssh swarm-master -c "docker deploy dockerswarm" 19 | ;; 20 | down) 21 | vagrant destroy -f 22 | ;; 23 | esac 24 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/heapster-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: heapster 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | task: monitoring 12 | k8s-app: heapster 13 | spec: 14 | serviceAccountName: heapster 15 | containers: 16 | - name: heapster 17 | image: gcr.io/google_containers/heapster-amd64:v1.4.0 18 | imagePullPolicy: IfNotPresent 19 | command: 20 | - /heapster 21 | - --source=kubernetes:https://kubernetes.default 22 | - --sink=influxdb:http://monitoring-influxdb:8086 23 | resources: 24 | limits: 25 | cpu: 100m 26 | memory: 128Mi 27 | requests: 28 | cpu: 100m 29 | memory: 128Mi 30 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: elasticsearch 6 | labels: 7 | name: elasticsearch 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: elasticsearch 15 | spec: 16 | containers: 17 | - image: elasticsearch 18 | name: elasticsearch 19 | ports: 20 | - name: elasticsearch 21 | containerPort: 9200 22 | nodeSelector: 23 | beta.kubernetes.io/os: linux 24 | --- 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | name: elasticsearch 29 | labels: 30 | name: elasticsearch 31 | namespace: kube-system 32 | spec: 33 | ports: 34 | - port: 9200 35 | targetPort: 9200 36 | selector: 37 | name: elasticsearch 38 | -------------------------------------------------------------------------------- /deploy/docker-compose/grafana/import.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | sleep 3 4 | # Import data sources 5 | for file in *-datasource.json; do 6 | if [ -e "$file" ]; then 7 | echo "importing $file" && 8 | curl --silent --fail --show-error \ 9 | --request POST http://admin:foobar@grafana:3000/api/datasources \ 10 | --header "Content-Type: application/json" \ 11 | --header "Accept: application/json" \ 12 | --data-binary "@$file"; 13 | echo ""; 14 | fi 15 | done; 16 | 17 | # Import dashboards 18 | for file in *-dashboard.json; do 19 | if [ -e "$file" ]; then 20 | echo "importing $file" && 21 | curl --request POST http://admin:foobar@grafana:3000/api/dashboards/import \ 22 | --header "Content-Type: application/json" \ 23 | --header "Accept: application/json" \ 24 | --data-binary "@$file"; 25 | echo ""; 26 | fi 27 | done; 28 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/session-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: session-db 6 | labels: 7 | name: session-db 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: session-db 15 | annotations: 16 | prometheus.io.scrape: "false" 17 | spec: 18 | containers: 19 | - name: session-db 20 | image: redis:alpine 21 | ports: 22 | - name: redis 23 | containerPort: 6379 24 | securityContext: 25 | capabilities: 26 | drop: 27 | - all 28 | add: 29 | - CHOWN 30 | - SETGID 31 | - SETUID 32 | readOnlyRootFilesystem: true 33 | nodeSelector: 34 | beta.kubernetes.io/os: linux 35 | -------------------------------------------------------------------------------- /deploy/apcera/sockShop.pol: -------------------------------------------------------------------------------- 1 | job::/sandbox/[name]/sockshop { 2 | if (auth_server@apcera.me->name beginsWith "[name]") { 3 | permit create, read, update, delete 4 | permit start, stop, promote 5 | permit map, link, bind, ssh 6 | docker.allow "*" 7 | package.allow "package::/apcera" 8 | package.allow "package::/demos/sockshop" 9 | } 10 | if (network beginsWith "network::/sandbox/[name]/sockshop") { 11 | permit join 12 | } 13 | } 14 | 15 | package::/sandbox/[name]/sockshop { 16 | if (auth_server@apcera.me->name beginsWith "[name]") { 17 | permit create, read, update, delete, use 18 | } 19 | } 20 | 21 | network::/sandbox/[name]/sockshop { 22 | if (auth_server@apcera.me->name beginsWith "[name]") { 23 | permit create, join, read, delete 24 | } 25 | } 26 | 27 | route::/http { 28 | if (job beginsWith "job::/sandbox/") { 29 | permit map 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/cart-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: carts-db 6 | labels: 7 | name: carts-db 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: carts-db 14 | spec: 15 | containers: 16 | - name: carts-db 17 | image: mongo 18 | ports: 19 | - name: mongo 20 | containerPort: 27017 21 | securityContext: 22 | capabilities: 23 | drop: 24 | - all 25 | add: 26 | - CHOWN 27 | - SETGID 28 | - SETUID 29 | readOnlyRootFilesystem: true 30 | volumeMounts: 31 | - mountPath: /tmp 32 | name: tmp-volume 33 | volumes: 34 | - name: tmp-volume 35 | emptyDir: 36 | medium: Memory 37 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/orders-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: orders-db 6 | labels: 7 | name: orders-db 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: orders-db 14 | spec: 15 | containers: 16 | - name: orders-db 17 | image: mongo 18 | ports: 19 | - name: mongo 20 | containerPort: 27017 21 | securityContext: 22 | capabilities: 23 | drop: 24 | - all 25 | add: 26 | - CHOWN 27 | - SETGID 28 | - SETUID 29 | readOnlyRootFilesystem: true 30 | volumeMounts: 31 | - mountPath: /tmp 32 | name: tmp-volume 33 | volumes: 34 | - name: tmp-volume 35 | emptyDir: 36 | medium: Memory 37 | -------------------------------------------------------------------------------- /deploy/example/README.md: -------------------------------------------------------------------------------- 1 | # Example documentation 2 | 3 | Hi, cool docs over here. 4 | 5 | 6 | We need to install some tools before we start: 7 | 8 | 9 | apt-get install -yq cowsay 10 | 11 | 12 | 13 | # Provision infrastucture 14 | 15 | We first have to provision some instances. 16 | 17 | 18 | 19 | gcloud instances create blah 20 | 21 | 22 | 23 | Now you can play around with your cluster! 24 | 25 | 26 | 31 | 32 | # Cleaning up 33 | If you're done, clean up your cluster with these commands: 34 | 35 | 36 | 37 | gcloud destroy blah 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/user-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: user-db 6 | labels: 7 | name: user-db 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: user-db 14 | spec: 15 | containers: 16 | - name: user-db 17 | image: weaveworksdemos/user-db:0.3.0 18 | 19 | ports: 20 | - name: mongo 21 | containerPort: 27017 22 | securityContext: 23 | capabilities: 24 | drop: 25 | - all 26 | add: 27 | - CHOWN 28 | - SETGID 29 | - SETUID 30 | readOnlyRootFilesystem: true 31 | volumeMounts: 32 | - mountPath: /tmp 33 | name: tmp-volume 34 | volumes: 35 | - name: tmp-volume 36 | emptyDir: 37 | medium: Memory 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-logging/fluentd-daemon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: DaemonSet 4 | metadata: 5 | name: fluentd 6 | labels: 7 | tier: monitoring 8 | name: fluentd 9 | namespace: kube-system 10 | spec: 11 | selector: 12 | matchLabels: 13 | name: fluentd 14 | template: 15 | metadata: 16 | labels: 17 | name: fluentd 18 | spec: 19 | serviceAccountName: fluentd 20 | containers: 21 | - image: weaveworksdemos/log-server 22 | name: fluentd 23 | env: 24 | - name: FLUENTD_CONF 25 | value: elk.conf 26 | volumeMounts: 27 | - name: varlibdockercontainers 28 | mountPath: /var/lib/docker/containers 29 | readOnly: true 30 | volumes: 31 | - name: varlibdockercontainers 32 | hostPath: 33 | path: /var/lib/docker/containers 34 | nodeSelector: 35 | beta.kubernetes.io/os: linux 36 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/carts-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: carts-db 6 | labels: 7 | name: carts-db 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: carts-db 15 | spec: 16 | containers: 17 | - name: carts-db 18 | image: mongo 19 | ports: 20 | - name: mongo 21 | containerPort: 27017 22 | securityContext: 23 | capabilities: 24 | drop: 25 | - all 26 | add: 27 | - CHOWN 28 | - SETGID 29 | - SETUID 30 | readOnlyRootFilesystem: true 31 | volumeMounts: 32 | - mountPath: /tmp 33 | name: tmp-volume 34 | volumes: 35 | - name: tmp-volume 36 | emptyDir: 37 | medium: Memory 38 | nodeSelector: 39 | beta.kubernetes.io/os: linux 40 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/orders-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: orders-db 6 | labels: 7 | name: orders-db 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: orders-db 15 | spec: 16 | containers: 17 | - name: orders-db 18 | image: mongo 19 | ports: 20 | - name: mongo 21 | containerPort: 27017 22 | securityContext: 23 | capabilities: 24 | drop: 25 | - all 26 | add: 27 | - CHOWN 28 | - SETGID 29 | - SETUID 30 | readOnlyRootFilesystem: true 31 | volumeMounts: 32 | - mountPath: /tmp 33 | name: tmp-volume 34 | volumes: 35 | - name: tmp-volume 36 | emptyDir: 37 | medium: Memory 38 | nodeSelector: 39 | beta.kubernetes.io/os: linux 40 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/gcloud/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "europe-west1" 3 | } 4 | 5 | variable "region_zone" { 6 | default = "europe-west1-b" 7 | } 8 | 9 | variable "num_nodes" { 10 | description = "Number of swarm nodes to spin up" 11 | default = "2" 12 | } 13 | 14 | variable "project_name" { 15 | description = "The ID of the Google Cloud project" 16 | } 17 | 18 | variable "credentials_file_path" { 19 | description = "Path to the JSON file used to describe your account credentials" 20 | default = "~/.config/gcloud/accounts.json" 21 | } 22 | 23 | variable "public_key_path" { 24 | description = "Path to file containing public key" 25 | default = "~/.ssh/gcloud_id_rsa.pub" 26 | } 27 | 28 | variable "private_key_path" { 29 | description = "Path to file containing private key" 30 | default = "~/.ssh/gcloud_id_rsa" 31 | } 32 | 33 | variable "machine_type" { 34 | description = "Google Machine Type to use" 35 | default = "g1-small" 36 | 37 | } 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/user-db-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: user-db 6 | labels: 7 | name: user-db 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: user-db 15 | spec: 16 | containers: 17 | - name: user-db 18 | image: weaveworksdemos/user-db:0.3.0 19 | 20 | ports: 21 | - name: mongo 22 | containerPort: 27017 23 | securityContext: 24 | capabilities: 25 | drop: 26 | - all 27 | add: 28 | - CHOWN 29 | - SETGID 30 | - SETUID 31 | readOnlyRootFilesystem: true 32 | volumeMounts: 33 | - mountPath: /tmp 34 | name: tmp-volume 35 | volumes: 36 | - name: tmp-volume 37 | emptyDir: 38 | medium: Memory 39 | nodeSelector: 40 | beta.kubernetes.io/os: linux -------------------------------------------------------------------------------- /deploy/nomad/scripts/netman.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | function connect { 3 | name=$1 4 | ancestor=$2 5 | networks=$3 6 | 7 | CID=$(docker ps -q -f ancestor=$ancestor) 8 | if [ $CID ] 9 | then 10 | # Go over the networks that we wish the container is connected to 11 | for network in $networks 12 | do 13 | # Get list of containers connected to $network 14 | CIDS=$(docker network inspect -f "{{.Containers}}" $network) 15 | if [[ ! $CIDS =~ $CID ]] # If the container is not yet in the list of containers connected to the network 16 | then 17 | # Connect container $CID to network $network 18 | docker network connect $network $CID 19 | echo "==> $name container successfully connected to network $network" 20 | fi 21 | done 22 | fi 23 | } 24 | 25 | while true 26 | do 27 | connect front-end weaveworksdemos/front-end "secure internal external" 28 | connect orders weaveworksdemos/orders "internal secure backoffice" 29 | sleep 2 30 | done 31 | -------------------------------------------------------------------------------- /deploy/docker-compose/alertmanager.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The default SMTP smarthost used for sending emails. 3 | smtp_smarthost: 'smtp.gmail.com:587' 4 | # SMTP authentication information. 5 | smtp_auth_username: '' 6 | smtp_auth_password: '' 7 | # The default SMTP From header field. 8 | smtp_from: 'alertmanager@example.org' 9 | 10 | route: 11 | group_by: ['cluster'] 12 | receiver: email-all 13 | # Zero or more child routes. 14 | routes: 15 | # A set of equality matchers an alert has to fulfill to match the node. 16 | - match: 17 | severity: email 18 | receiver: email-all 19 | receivers: 20 | # The unique name of the receiver. 21 | - name: 'email-all' 22 | email_configs: 23 | # The email address to send notifications to. 24 | - to: 'alertmanager@example.org' 25 | # Whether or not to notify about resolved alerts. 26 | send_resolved: true 27 | # The HTML body of the email notification. 28 | html: '{{ range .Alerts }}{{ .Annotations.description }}\n{{ end }}' 29 | -------------------------------------------------------------------------------- /deploy/nomad/jobs/logging-fluentd.nomad: -------------------------------------------------------------------------------- 1 | job "logging-fluentd" { 2 | datacenters = ["dc1"] 3 | type = "system" 4 | 5 | constraint { 6 | attribute = "${attr.kernel.name}" 7 | value = "linux" 8 | } 9 | 10 | update { 11 | stagger = "10s" 12 | max_parallel = 1 13 | } 14 | 15 | # - fluentd - # 16 | task "fluentd" { 17 | driver = "docker" 18 | 19 | config { 20 | image = "seqvence/log-server" 21 | hostname = "fluentd.weave.local" 22 | network_mode = "external" 23 | dns_servers = ["172.17.0.1"] 24 | dns_search_domains = ["weave.local."] 25 | logging { 26 | type = "json-file" 27 | } 28 | volumes = [ 29 | "/var/lib/docker/containers:/var/lib/docker/containers" 30 | ] 31 | } 32 | 33 | env { 34 | FLUENTD_CONF = "elk.conf" 35 | } 36 | 37 | resources { 38 | cpu = 100 # 50 Mhz 39 | memory = 300 # 10Mb 40 | network { 41 | mbits = 10 42 | } 43 | } 44 | 45 | } 46 | # - end fluentd - # 47 | 48 | } 49 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-alerting/alertmanager-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: alertmanager 5 | data: 6 | config.yml: |- 7 | global: 8 | slack_api_url: https://hooks.slack.com/services/API_URL_SECRET 9 | 10 | route: 11 | group_by: [cluster] 12 | # If an alert isn't caught by a route, send it slack. 13 | receiver: slack-all 14 | routes: 15 | - match: 16 | severity: slack 17 | receiver: slack-all 18 | 19 | 20 | receivers: 21 | - name: 'slack-all' 22 | slack_configs: 23 | - channel: '#sockshop-ops' 24 | send_resolved: true 25 | title: "{{ range .Alerts }}{{ .Annotations.summary }}\n{{ end }}" 26 | text: "{{ range .Alerts }}{{ .Annotations.description }}\n{{ end }}" 27 | api_url: https://hooks.slack.com/services/API_URL_SECRET 28 | configure_secret.sh: |- 29 | echo 'Configuring Slack hook url.' 30 | sed -i -e s,API_URL_SECRET,"$SLACK_HOOK_URL",g /etc/alertmanager/config.yml 31 | exec /bin/alertmanager $* 32 | -------------------------------------------------------------------------------- /install/aws-minimesos/provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | expose_container() { 4 | CONTAINER_IP=$(sudo docker inspect $(sudo docker ps | grep $1 | awk '{print $1}') | jq '.[0]. NetworkSettings.IPAddress' | tr -d \"); 5 | sudo iptables -t nat -A DOCKER -p tcp --dport $2 -j DNAT --to-destination ${CONTAINER_IP}:$2 6 | } 7 | 8 | sudo apt-get update 9 | 10 | sudo apt-get install -y curl git jq 11 | 12 | # Install docker 13 | curl -sSL https://get.docker.com/ | sh 14 | 15 | # Install minimesos 16 | curl -sSL https://minimesos.org/install | sh 17 | sudo cp ~/.minimesos/bin/minimesos /usr/local/bin/minimesos 18 | 19 | # Install weave 20 | sudo curl -L git.io/weave -o /usr/local/bin/weave 21 | sudo chmod +x /usr/local/bin/weave 22 | 23 | # Clone repo to get deployment scripts 24 | git clone https://github.com/microservices-demo/microservices-demo.git 25 | cd microservices-demo 26 | 27 | cd deploy/minimesos-marathon 28 | ./minimesos-marathon.sh start 29 | 30 | # Expose marathon and mesos. NOT FOR PRODUCTION! 31 | expose_container marathon 8080 32 | expose_container mesos-master 5050 33 | -------------------------------------------------------------------------------- /deploy/apcera/stopSockShop.sh: -------------------------------------------------------------------------------- 1 | # Stops all the Sock Shop apps 2 | 3 | # Set namespace to user's default namespace 4 | apc namespace -d 5 | 6 | # append /sockshop to user's default namespace returned by apc namespace 7 | OUT=`apc namespace` 8 | NAMESPACE=`echo $OUT | cut -f3 -d" " | sed "s/'//g"`/sockshop 9 | echo ${NAMESPACE} 10 | 11 | # Change NAMESPACE if you don't like the default generated above 12 | # But if you change it here, you'll need to change it in other scripts 13 | #NAMESPACE= 14 | 15 | # set actual namespace to $NAMESPACE 16 | apc namespace ${NAMESPACE} 17 | 18 | echo "Stopping Sock Shop apps in namespace: ${NAMESPACE}" 19 | 20 | apc app stop user-sim 21 | apc app stop front-end 22 | apc app stop orders 23 | apc app stop orders-db 24 | apc app stop carts 25 | apc app stop carts-db 26 | apc app stop catalogue 27 | apc app stop catalogue-db 28 | apc app stop user 29 | apc app stop user-db 30 | apc app stop payment 31 | apc app stop shipping 32 | apc app stop queue-master 33 | apc app stop rabbitmq 34 | apc app stop zipkin 35 | 36 | # List the apps to verify they are all stopped 37 | apc app list 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/rabbitmq-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: rabbitmq 6 | labels: 7 | name: rabbitmq 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: rabbitmq 15 | annotations: 16 | prometheus.io/scrape: "false" 17 | spec: 18 | containers: 19 | - name: rabbitmq 20 | image: rabbitmq:3.6.8-management 21 | ports: 22 | - containerPort: 15672 23 | name: management 24 | - containerPort: 5672 25 | name: rabbitmq 26 | securityContext: 27 | capabilities: 28 | drop: 29 | - all 30 | add: 31 | - CHOWN 32 | - SETGID 33 | - SETUID 34 | - DAC_OVERRIDE 35 | readOnlyRootFilesystem: true 36 | - name: rabbitmq-exporter 37 | image: kbudde/rabbitmq-exporter 38 | ports: 39 | - containerPort: 9090 40 | name: exporter 41 | nodeSelector: 42 | beta.kubernetes.io/os: linux 43 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-alerting/alertmanager-dep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: alertmanager 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: alertmanager 10 | template: 11 | metadata: 12 | name: alertmanager 13 | labels: 14 | app: alertmanager 15 | spec: 16 | containers: 17 | - name: alertmanager 18 | image: prom/alertmanager:latest 19 | env: 20 | - name: SLACK_HOOK_URL 21 | valueFrom: 22 | secretKeyRef: 23 | name: slack-hook-url 24 | key: slack-hook-url 25 | command: ['/bin/sh', '/etc/alertmanager/configure_secret.sh'] 26 | args: 27 | - '-config.file=/etc/alertmanager/config.yml' 28 | - '-storage.path=/alertmanager' 29 | ports: 30 | - name: alertmanager 31 | containerPort: 9093 32 | volumeMounts: 33 | - name: config-volume 34 | mountPath: /etc/alertmanager 35 | volumes: 36 | - name: config-volume 37 | configMap: 38 | name: alertmanager 39 | nodeSelector: 40 | beta.kubernetes.io/os: linux 41 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/queue-master-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: queue-master 6 | labels: 7 | name: queue-master 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: queue-master 14 | spec: 15 | containers: 16 | - name: queue-master 17 | image: weaveworksdemos/queue-master:0.3.1 18 | env: 19 | {{- if .Values.zipkin.enabled }} 20 | - name: ZIPKIN 21 | value: {{ .Values.zipkin.url }} 22 | {{- end }} 23 | - name: JAVA_OPTS 24 | value: {{ .Values.java.options }} 25 | resources: 26 | limits: 27 | cpu: 300m 28 | memory: 1000Mi 29 | requests: 30 | cpu: 300m 31 | memory: 1000Mi 32 | ports: 33 | - containerPort: 80 34 | livenessProbe: 35 | httpGet: 36 | path: /health 37 | port: 80 38 | initialDelaySeconds: 300 39 | periodSeconds: 3 40 | readinessProbe: 41 | httpGet: 42 | path: /health 43 | port: 80 44 | initialDelaySeconds: 180 45 | periodSeconds: 3 46 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/front-end-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: front-end 6 | spec: 7 | replicas: {{ .Values.frontend.replicas }} 8 | template: 9 | metadata: 10 | labels: 11 | name: front-end 12 | spec: 13 | containers: 14 | - name: front-end 15 | image: weaveworksdemos/front-end:0.3.12 16 | resources: 17 | limits: 18 | cpu: 300m 19 | memory: 1000Mi 20 | requests: 21 | cpu: 100m 22 | memory: 300Mi 23 | ports: 24 | - containerPort: 8079 25 | env: 26 | - name: SESSION_REDIS 27 | value: "true" 28 | securityContext: 29 | runAsNonRoot: true 30 | runAsUser: 10001 31 | capabilities: 32 | drop: 33 | - all 34 | readOnlyRootFilesystem: true 35 | livenessProbe: 36 | httpGet: 37 | path: / 38 | port: 8079 39 | initialDelaySeconds: 300 40 | periodSeconds: 3 41 | readinessProbe: 42 | httpGet: 43 | path: / 44 | port: 8079 45 | initialDelaySeconds: 30 46 | periodSeconds: 3 47 | -------------------------------------------------------------------------------- /deploy/mesos-cni/provisionWeaveCNI.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Warn: No Master IP passed. Assuming this is master" 5 | fi 6 | 7 | sudo curl -sL git.io/weave -o /usr/local/bin/weave 8 | sudo chmod a+x /usr/local/bin/weave 9 | sudo mkdir -p /opt/cni/bin 10 | sudo mkdir -p /etc/cni/net.d 11 | sudo weave reset --force # Precautionary 12 | sudo weave setup 13 | sudo weave launch --no-dns $1 14 | sudo weave expose 15 | 16 | # Make dirs if they don't exist 17 | sudo mkdir -p /opt/cni/bin 18 | sudo mkdir -p /etc/cni/net.d 19 | 20 | # Add location of binary and conf directories for CNI. 21 | echo '/opt/cni/bin' | sudo tee /etc/mesos-slave/network_cni_plugins_dir 22 | echo '/etc/cni/net.d' | sudo tee /etc/mesos-slave/network_cni_config_dir 23 | 24 | # WORKAROUND TO FIX WEAVE BUG: https://github.com/weaveworks/weave/issues/2394 25 | 26 | echo '#!/bin/sh 27 | docker run --rm --privileged --net=host -v /var/run/docker.sock:/var/run/docker.sock --pid=host -i \ 28 | -e CNI_VERSION -e CNI_COMMAND -e CNI_CONTAINERID -e CNI_NETNS \ 29 | -e CNI_IFNAME -e CNI_ARGS -e CNI_PATH -v /etc/cni:/etc/cni -v /opt/cni:/opt/cni \ 30 | -v /run/mesos/isolators/network/cni:/run/mesos/isolators/network/cni \ 31 | weaveworks/plugin:1.6.0 --cni-net' | sudo tee /opt/cni/bin/weave-net 32 | 33 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/front-end-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: front-end 6 | namespace: sock-shop 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | name: front-end 13 | spec: 14 | containers: 15 | - name: front-end 16 | image: weaveworksdemos/front-end:0.3.12 17 | resources: 18 | limits: 19 | cpu: 300m 20 | memory: 1000Mi 21 | requests: 22 | cpu: 100m 23 | memory: 300Mi 24 | ports: 25 | - containerPort: 8079 26 | env: 27 | - name: SESSION_REDIS 28 | value: "true" 29 | securityContext: 30 | runAsNonRoot: true 31 | runAsUser: 10001 32 | capabilities: 33 | drop: 34 | - all 35 | readOnlyRootFilesystem: true 36 | livenessProbe: 37 | httpGet: 38 | path: / 39 | port: 8079 40 | initialDelaySeconds: 300 41 | periodSeconds: 3 42 | readinessProbe: 43 | httpGet: 44 | path: / 45 | port: 8079 46 | initialDelaySeconds: 30 47 | periodSeconds: 3 48 | nodeSelector: 49 | beta.kubernetes.io/os: linux 50 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/payment-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: payment 6 | labels: 7 | name: payment 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: payment 15 | spec: 16 | containers: 17 | - name: payment 18 | image: weaveworksdemos/payment:0.4.3 19 | resources: 20 | limits: 21 | cpu: 100m 22 | memory: 100Mi 23 | requests: 24 | cpu: 99m 25 | memory: 100Mi 26 | ports: 27 | - containerPort: 80 28 | securityContext: 29 | runAsNonRoot: true 30 | runAsUser: 10001 31 | capabilities: 32 | drop: 33 | - all 34 | add: 35 | - NET_BIND_SERVICE 36 | readOnlyRootFilesystem: true 37 | livenessProbe: 38 | httpGet: 39 | path: /health 40 | port: 80 41 | initialDelaySeconds: 300 42 | periodSeconds: 3 43 | readinessProbe: 44 | httpGet: 45 | path: /health 46 | port: 80 47 | initialDelaySeconds: 180 48 | periodSeconds: 3 49 | nodeSelector: 50 | beta.kubernetes.io/os: linux 51 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/queue-master-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: queue-master 6 | labels: 7 | name: queue-master 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: queue-master 15 | spec: 16 | containers: 17 | - name: queue-master 18 | image: weaveworksdemos/queue-master:0.3.1 19 | env: 20 | - name: ZIPKIN 21 | value: zipkin.jaeger.svc.cluster.local 22 | - name: JAVA_OPTS 23 | value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom 24 | resources: 25 | limits: 26 | cpu: 300m 27 | memory: 500Mi 28 | requests: 29 | cpu: 300m 30 | memory: 500Mi 31 | ports: 32 | - containerPort: 80 33 | livenessProbe: 34 | httpGet: 35 | path: /health 36 | port: 80 37 | initialDelaySeconds: 300 38 | periodSeconds: 3 39 | readinessProbe: 40 | httpGet: 41 | path: /health 42 | port: 80 43 | initialDelaySeconds: 180 44 | periodSeconds: 3 45 | nodeSelector: 46 | beta.kubernetes.io/os: linux 47 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/user-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: user 6 | labels: 7 | name: user 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: user 15 | spec: 16 | containers: 17 | - name: user 18 | image: weaveworksdemos/user:0.4.7 19 | resources: 20 | limits: 21 | cpu: 300m 22 | memory: 100Mi 23 | requests: 24 | cpu: 100m 25 | memory: 100Mi 26 | ports: 27 | - containerPort: 80 28 | env: 29 | - name: MONGO_HOST 30 | value: user-db:27017 31 | securityContext: 32 | runAsNonRoot: true 33 | runAsUser: 10001 34 | capabilities: 35 | drop: 36 | - all 37 | add: 38 | - NET_BIND_SERVICE 39 | readOnlyRootFilesystem: true 40 | livenessProbe: 41 | httpGet: 42 | path: /health 43 | port: 80 44 | initialDelaySeconds: 300 45 | periodSeconds: 3 46 | readinessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 180 51 | periodSeconds: 3 52 | nodeSelector: 53 | beta.kubernetes.io/os: linux 54 | -------------------------------------------------------------------------------- /staging/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_amis" { 2 | description = "The AMI to use for setting up the instances." 3 | default = { 4 | # Ubuntu Xenial 16.04 LTS 5 | "eu-west-1" = "ami-844e0bf7" 6 | } 7 | } 8 | 9 | variable "aws_region" { 10 | description = "The AWS region to create things in." 11 | default = "eu-west-1" 12 | } 13 | 14 | variable "bastion_security_group" { 15 | description = "The id of the security group where the bastion host resides." 16 | } 17 | 18 | variable "instance_user" { 19 | description = "The user account to use on the instances to run the scripts." 20 | default = "ubuntu" 21 | } 22 | 23 | variable "key_name" { 24 | description = "Name of the SSH keypair to use in AWS." 25 | } 26 | 27 | variable "master_instance_type" { 28 | description = "The instance type to use for the Kubernetes master." 29 | default = "m3.large" 30 | } 31 | 32 | variable "node_instance_type" { 33 | description = "The instance type to use for the Kubernetes nodes." 34 | default = "m4.xlarge" 35 | } 36 | 37 | variable "nodecount" { 38 | description = "The number of nodes in the cluster." 39 | default = "4" 40 | } 41 | 42 | variable "private_key_file" { 43 | description = "The private key for connection to the instances as the user. Corresponds to the key_name variable." 44 | } 45 | 46 | variable "weave_cloud_token" { 47 | description = "Token from Weave Cloud" 48 | } 49 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-dep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-deployment 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | strategy: 9 | rollingUpdate: 10 | maxSurge: 0 11 | maxUnavailable: 1 12 | type: RollingUpdate 13 | selector: 14 | matchLabels: 15 | app: prometheus 16 | template: 17 | metadata: 18 | name: prometheus 19 | labels: 20 | app: prometheus 21 | spec: 22 | serviceAccount: prometheus 23 | containers: 24 | - name: prometheus 25 | image: prom/prometheus:v1.5.2 26 | args: 27 | - '-storage.local.retention=360h' 28 | - '-storage.local.memory-chunks=1048576' 29 | - '-config.file=/etc/prometheus/prometheus.yml' 30 | - '-alertmanager.url=http://alertmanager:9093' 31 | ports: 32 | - name: web 33 | containerPort: 9090 34 | volumeMounts: 35 | - name: config-volume 36 | mountPath: /etc/prometheus 37 | - name: alertrules-volume 38 | mountPath: /etc/prometheus-rules 39 | volumes: 40 | - name: config-volume 41 | configMap: 42 | name: prometheus-configmap 43 | - name: alertrules-volume 44 | configMap: 45 | name: prometheus-alertrules 46 | nodeSelector: 47 | beta.kubernetes.io/os: linux 48 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/catalogue-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: catalogue 6 | labels: 7 | name: catalogue 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: catalogue 15 | spec: 16 | containers: 17 | - name: catalogue 18 | image: weaveworksdemos/catalogue:0.3.5 19 | command: ["/app"] 20 | args: 21 | - -port=80 22 | resources: 23 | limits: 24 | cpu: 100m 25 | memory: 100Mi 26 | requests: 27 | cpu: 100m 28 | memory: 100Mi 29 | ports: 30 | - containerPort: 80 31 | securityContext: 32 | runAsNonRoot: true 33 | runAsUser: 10001 34 | capabilities: 35 | drop: 36 | - all 37 | add: 38 | - NET_BIND_SERVICE 39 | readOnlyRootFilesystem: true 40 | livenessProbe: 41 | httpGet: 42 | path: /health 43 | port: 80 44 | initialDelaySeconds: 300 45 | periodSeconds: 3 46 | readinessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 180 51 | periodSeconds: 3 52 | nodeSelector: 53 | beta.kubernetes.io/os: linux 54 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/payment-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: payment 6 | labels: 7 | name: payment 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: payment 14 | spec: 15 | containers: 16 | - name: payment 17 | image: weaveworksdemos/payment:0.4.3 18 | resources: 19 | limits: 20 | cpu: 100m 21 | memory: 100Mi 22 | requests: 23 | cpu: 100m 24 | memory: 100Mi 25 | ports: 26 | - containerPort: 80 27 | {{- if .Values.zipkin.enabled }} 28 | env: 29 | - name: ZIPKIN 30 | value: http://{{ .Values.zipkin.url }}:9411/api/v1/spans 31 | {{- end }} 32 | securityContext: 33 | runAsNonRoot: true 34 | runAsUser: 10001 35 | capabilities: 36 | drop: 37 | - all 38 | add: 39 | - NET_BIND_SERVICE 40 | readOnlyRootFilesystem: true 41 | livenessProbe: 42 | httpGet: 43 | path: /health 44 | port: 80 45 | initialDelaySeconds: 300 46 | periodSeconds: 3 47 | readinessProbe: 48 | httpGet: 49 | path: /health 50 | port: 80 51 | initialDelaySeconds: 180 52 | periodSeconds: 3 53 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/catalogue-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: catalogue 6 | labels: 7 | name: catalogue 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: catalogue 14 | spec: 15 | containers: 16 | - name: catalogue 17 | image: weaveworksdemos/catalogue:0.3.5 18 | {{- if .Values.zipkin.enabled }} 19 | env: 20 | - name: ZIPKIN 21 | value: http://{{ .Values.zipkin.url }}:9411/api/v1/spans 22 | {{- end }} 23 | resources: 24 | limits: 25 | cpu: 100m 26 | memory: 100Mi 27 | requests: 28 | cpu: 100m 29 | memory: 100Mi 30 | ports: 31 | - containerPort: 80 32 | securityContext: 33 | runAsNonRoot: true 34 | runAsUser: 10001 35 | capabilities: 36 | drop: 37 | - all 38 | add: 39 | - NET_BIND_SERVICE 40 | readOnlyRootFilesystem: true 41 | livenessProbe: 42 | httpGet: 43 | path: /health 44 | port: 80 45 | initialDelaySeconds: 300 46 | periodSeconds: 3 47 | readinessProbe: 48 | httpGet: 49 | path: /health 50 | port: 80 51 | initialDelaySeconds: 180 52 | periodSeconds: 3 53 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-jaeger/payment-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: payment 6 | labels: 7 | name: payment 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: payment 15 | spec: 16 | containers: 17 | - name: payment 18 | image: weaveworksdemos/payment:0.4.3 19 | resources: 20 | limits: 21 | cpu: 100m 22 | memory: 100Mi 23 | requests: 24 | cpu: 100m 25 | memory: 100Mi 26 | ports: 27 | - containerPort: 80 28 | env: 29 | - name: ZIPKIN 30 | value: http://zipkin.jaeger.svc.cluster.local:9411/api/v1/spans 31 | securityContext: 32 | runAsNonRoot: true 33 | runAsUser: 10001 34 | capabilities: 35 | drop: 36 | - all 37 | add: 38 | - NET_BIND_SERVICE 39 | readOnlyRootFilesystem: true 40 | livenessProbe: 41 | httpGet: 42 | path: /health 43 | port: 80 44 | initialDelaySeconds: 300 45 | periodSeconds: 3 46 | readinessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 180 51 | periodSeconds: 3 52 | nodeSelector: 53 | beta.kubernetes.io/os: linux 54 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-jaeger/catalogue-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: catalogue 6 | labels: 7 | name: catalogue 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: catalogue 15 | spec: 16 | containers: 17 | - name: catalogue 18 | image: weaveworksdemos/catalogue:0.3.5 19 | env: 20 | - name: ZIPKIN 21 | value: http://zipkin.jaeger.svc.cluster.local:9411/api/v1/spans 22 | resources: 23 | limits: 24 | cpu: 100m 25 | memory: 100Mi 26 | requests: 27 | cpu: 100m 28 | memory: 100Mi 29 | ports: 30 | - containerPort: 80 31 | securityContext: 32 | runAsNonRoot: true 33 | runAsUser: 10001 34 | capabilities: 35 | drop: 36 | - all 37 | add: 38 | - NET_BIND_SERVICE 39 | readOnlyRootFilesystem: true 40 | livenessProbe: 41 | httpGet: 42 | path: /health 43 | port: 80 44 | initialDelaySeconds: 300 45 | periodSeconds: 3 46 | readinessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 180 51 | periodSeconds: 3 52 | nodeSelector: 53 | beta.kubernetes.io/os: linux 54 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/user-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: user 6 | labels: 7 | name: user 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: user 14 | spec: 15 | containers: 16 | - name: user 17 | image: weaveworksdemos/user:0.4.4 18 | resources: 19 | limits: 20 | cpu: 300m 21 | memory: 1000Mi 22 | requests: 23 | cpu: 100m 24 | memory: 400Mi 25 | ports: 26 | - containerPort: 80 27 | env: 28 | - name: MONGO_HOST 29 | value: user-db:27017 30 | {{- if .Values.zipkin.enabled }} 31 | - name: ZIPKIN 32 | value: http://{{ .Values.zipkin.url }}:9411/api/v1/spans 33 | {{- end }} 34 | securityContext: 35 | runAsNonRoot: true 36 | runAsUser: 10001 37 | capabilities: 38 | drop: 39 | - all 40 | add: 41 | - NET_BIND_SERVICE 42 | readOnlyRootFilesystem: true 43 | livenessProbe: 44 | httpGet: 45 | path: /health 46 | port: 80 47 | initialDelaySeconds: 300 48 | periodSeconds: 3 49 | readinessProbe: 50 | httpGet: 51 | path: /health 52 | port: 80 53 | initialDelaySeconds: 180 54 | periodSeconds: 3 55 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-jaeger/user-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: user 6 | labels: 7 | name: user 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: user 15 | spec: 16 | containers: 17 | - name: user 18 | image: weaveworksdemos/user:0.4.7 19 | resources: 20 | limits: 21 | cpu: 300m 22 | memory: 100Mi 23 | requests: 24 | cpu: 100m 25 | memory: 100Mi 26 | ports: 27 | - containerPort: 80 28 | env: 29 | - name: MONGO_HOST 30 | value: user-db:27017 31 | - name: ZIPKIN 32 | value: http://zipkin.jaeger.svc.cluster.local:9411/api/v1/spans 33 | securityContext: 34 | runAsNonRoot: true 35 | runAsUser: 10001 36 | capabilities: 37 | drop: 38 | - all 39 | add: 40 | - NET_BIND_SERVICE 41 | readOnlyRootFilesystem: true 42 | livenessProbe: 43 | httpGet: 44 | path: /health 45 | port: 80 46 | initialDelaySeconds: 300 47 | periodSeconds: 3 48 | readinessProbe: 49 | httpGet: 50 | path: /health 51 | port: 80 52 | initialDelaySeconds: 180 53 | periodSeconds: 3 54 | nodeSelector: 55 | beta.kubernetes.io/os: linux 56 | -------------------------------------------------------------------------------- /deploy/apcera/deleteSockShop.sh: -------------------------------------------------------------------------------- 1 | # Deletes all the Sock Shop apps 2 | # Automatically detects correct namespace 3 | 4 | # Set namespace to user's default namespace 5 | apc namespace -d 6 | 7 | # append /sockshop to user's default namespace returned by apc namespace 8 | OUT=`apc namespace` 9 | NAMESPACE=`echo $OUT | cut -f3 -d" " | sed "s/'//g"`/sockshop 10 | echo ${NAMESPACE} 11 | 12 | # Change NAMESPACE if you don't like the default generated above 13 | # But if you change it here, you'll need to change it in other scripts 14 | #NAMESPACE= 15 | 16 | # set actual namespace to $NAMESPACE 17 | apc namespace ${NAMESPACE} 18 | 19 | echo "Deleting Sock Shop apps in namespace: ${NAMESPACE}" 20 | 21 | apc app delete user-sim --batch 22 | apc app delete front-end --batch 23 | apc app delete carts --batch 24 | apc app delete carts-db --batch 25 | apc app delete catalogue --batch 26 | apc app delete catalogue-db --batch 27 | apc app delete orders --batch 28 | apc app delete orders-db --batch 29 | apc app delete payment --batch 30 | apc app delete shipping --batch 31 | apc app delete queue-master --batch 32 | apc app delete rabbitmq --batch 33 | apc app delete user --batch 34 | apc app delete user-db --batch 35 | apc app delete zipkin --batch 36 | 37 | # List the remaining apps to verify that there are none. 38 | echo "Here are the remaining apps in namespace: ${NAMESPACE}" 39 | apc app list 40 | 41 | # Delete the sockshop-network too 42 | echo "Deleting the sockshop network in namespace: ${NAMESPACE}" 43 | apc network delete sockshop-network -- batch 44 | -------------------------------------------------------------------------------- /install/aws-minimesos/main.tf: -------------------------------------------------------------------------------- 1 | // This script expects the aws key and secret as variables. This is necessary because we need access to the aws varialbes during the provisioning step. 2 | // e.g. terraform apply -var 'access_key=......' -var 'secret_key=......' . 3 | 4 | // So, first run 'terraform get' 5 | // Finally, run the plan and apply steps to create the cluster. You can also provide the aws credentials with environmental variables like: TF_VAR_access_keys... 6 | 7 | resource "aws_instance" "minimesos" { 8 | 9 | count = 1 10 | ami = "${lookup(var.aws_amis, var.aws_region)}" 11 | availability_zone = "eu-west-1b" 12 | 13 | root_block_device { 14 | volume_size = 50 15 | volume_type = "gp2" 16 | } 17 | 18 | instance_type = "m4.xlarge" 19 | key_name = "${var.aws_key_name}" 20 | subnet_id = "${aws_subnet.terraform.id}" 21 | 22 | vpc_security_group_ids = [ 23 | "${aws_security_group.terraform.id}"] 24 | 25 | tags { 26 | Name = "minimesos-${count.index}" 27 | } 28 | 29 | connection { 30 | user = "ubuntu" 31 | private_key = "${var.private_key_file}" 32 | } 33 | 34 | provisioner "remote-exec" { 35 | inline = [ 36 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 37 | ] 38 | } 39 | provisioner "file" { 40 | source = "provision.sh" 41 | destination = "/tmp/provision.sh" 42 | } 43 | 44 | provisioner "remote-exec" { 45 | inline = [ 46 | "chmod +x /tmp/provision.sh", 47 | "/tmp/provision.sh" 48 | ] 49 | } 50 | } 51 | 52 | 53 | -------------------------------------------------------------------------------- /deploy/kubernetes/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_amis" { 2 | description = "The AMI to use for setting up the instances." 3 | default = { 4 | # Ubuntu Xenial 16.04 LTS 5 | "eu-west-1" = "ami-58b7972b" 6 | "eu-west-2" = "ami-ede2e889" 7 | "eu-central-1" = "ami-1535f57a" 8 | "us-east-1" = "ami-bcd7c3ab" 9 | "us-east-2" = "ami-fcc19b99" 10 | "us-west-1" = "ami-ed50018d" 11 | "us-west-2" = "ami-15d76075" 12 | } 13 | } 14 | 15 | data "aws_availability_zones" "available" {} 16 | 17 | variable "aws_region" { 18 | description = "The AWS region to create things in." 19 | default = "eu-central-1" 20 | } 21 | 22 | variable "instance_user" { 23 | description = "The user account to use on the instances to run the scripts." 24 | default = "ubuntu" 25 | } 26 | 27 | variable "key_name" { 28 | description = "Name of the SSH keypair to use in AWS." 29 | default = "deploy-docs-k8s" 30 | } 31 | 32 | variable "master_instance_type" { 33 | description = "The instance type to use for the Kubernetes master." 34 | default = "m3.large" 35 | } 36 | 37 | variable "node_instance_type" { 38 | description = "The instance type to use for the Kubernetes nodes." 39 | default = "m3.large" 40 | } 41 | 42 | variable "node_count" { 43 | description = "The number of nodes in the cluster." 44 | default = "3" 45 | } 46 | 47 | variable "private_key_path" { 48 | description = "The private key for connection to the instances as the user. Corresponds to the key_name variable." 49 | default = "~/.ssh/deploy-docs-k8s.pem" 50 | } 51 | -------------------------------------------------------------------------------- /deploy/docker-compose/docker-compose.monitoring.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | prometheus: 5 | image: prom/prometheus 6 | container_name: prometheus 7 | volumes: 8 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 9 | - ./alert.rules:/etc/prometheus/alert.rules 10 | command: 11 | - '-config.file=/etc/prometheus/prometheus.yml' 12 | - '-storage.local.path=/prometheus' 13 | - '-alertmanager.url=http://alertmanager:9093' 14 | expose: 15 | - 9090 16 | ports: 17 | - 9090:9090 18 | links: 19 | - alertmanager:alertmanager 20 | alertmanager: 21 | image: prom/alertmanager 22 | ports: 23 | - 9093:9093 24 | volumes: 25 | - ./alertmanager.yml:/etc/alertmanager/config.yml 26 | command: 27 | - '-config.file=/etc/alertmanager/config.yml' 28 | - '-storage.path=/alertmanager' 29 | grafana: 30 | image: grafana/grafana 31 | depends_on: 32 | - prometheus 33 | ports: 34 | - 3000:3000 35 | environment: 36 | - GF_SECURITY_ADMIN_PASSWORD=foobar 37 | - GF_USERS_ALLOW_SIGN_UP=false 38 | links: 39 | - prometheus:prometheus 40 | importer: 41 | image: giantswarm/tiny-tools 42 | depends_on: 43 | - grafana 44 | entrypoint: /bin/sh import.sh 45 | working_dir: /opt/grafana-import-dashboards 46 | volumes: 47 | - ./grafana:/opt/grafana-import-dashboards 48 | links: 49 | - grafana:grafana 50 | -------------------------------------------------------------------------------- /internal-docs/design.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | --- 4 | 5 | ## Design 6 | 7 | ### Direction 8 | 9 | The goal of this project is to become a "reference microservices demo". 10 | To this end, it aims to: 11 | 12 | - Demonstrate microservice best practices (and mistakes!) 13 | - Be cross-platform: deploy to all orchestrators 14 | - Show the benefits of continuous integration/deployment 15 | - Demonstrate how dev-ops and microservices compliment each other 16 | - Provide a "real-life" testable application for various orchestration 17 | platforms 18 | 19 | ### Architecture 20 | 21 | ![Architecture diagram](https://github.com/microservices-demo/microservices-demo.github.io/blob/HEAD/assets/Architecture.png "Architecture") 22 | 23 | The architecture of the demo microserivces application was intentionally designed to provide as many microservices as possible. If you are considering your own design, we would recommend the iterative approach, whereby you only define new microservices when you see issues (performance/testing/coupling) developing in your application. 24 | 25 | Furthermore, it is intentionally polyglot to exercise a number of different technologies. Again, we'd recommend that you only consider new technologies based upon a need. 26 | 27 | As seen in the image above, the microservices are roughly defined by the function in an ECommerce site. Networks are specified, but due to technology limitations may not be implemented in some deployments. 28 | 29 | All services communicate using REST over HTTP. This was chosen due to the simplicity of development and testing. Their API specifications are under development. 30 | -------------------------------------------------------------------------------- /deploy/kubernetes/autoscaling/grafana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-grafana 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | task: monitoring 12 | k8s-app: grafana 13 | spec: 14 | containers: 15 | - name: grafana 16 | image: gcr.io/google_containers/heapster-grafana-amd64:v4.0.2 17 | ports: 18 | - containerPort: 3000 19 | protocol: TCP 20 | volumeMounts: 21 | - mountPath: /var 22 | name: grafana-storage 23 | env: 24 | - name: INFLUXDB_HOST 25 | value: monitoring-influxdb 26 | - name: GRAFANA_PORT 27 | value: "3000" 28 | # The following env variables are required to make Grafana accessible via 29 | # the kubernetes api-server proxy. On production clusters, we recommend 30 | # removing these env variables, setup auth for grafana, and expose the grafana 31 | # service using a LoadBalancer or a public IP. 32 | - name: GF_AUTH_BASIC_ENABLED 33 | value: "false" 34 | - name: GF_AUTH_ANONYMOUS_ENABLED 35 | value: "true" 36 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 37 | value: Admin 38 | - name: GF_SERVER_ROOT_URL 39 | # If you're only using the API Server proxy, set this value instead: 40 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 41 | value: / 42 | volumes: 43 | - name: grafana-storage 44 | emptyDir: {} 45 | -------------------------------------------------------------------------------- /deploy/nomad/jobs/logging-elk.nomad: -------------------------------------------------------------------------------- 1 | job "logging-elk" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | 5 | constraint { 6 | attribute = "${attr.kernel.name}" 7 | value = "linux" 8 | } 9 | 10 | update { 11 | stagger = "10s" 12 | max_parallel = 1 13 | } 14 | 15 | # - logging-elk - # 16 | group "logging-elk" { 17 | 18 | # - elasticsearch - # 19 | task "elasticsearch" { 20 | driver = "docker" 21 | 22 | config { 23 | image = "elasticsearch" 24 | hostname = "elasticsearch.weave.local" 25 | network_mode = "external" 26 | dns_servers = ["172.17.0.1"] 27 | dns_search_domains = ["weave.local."] 28 | logging { 29 | type = "json-file" 30 | } 31 | } 32 | 33 | resources { 34 | memory = 3000 35 | network { 36 | mbits = 50 37 | } 38 | } 39 | 40 | } 41 | # - end elasticsearch - # 42 | 43 | # - kibana - # 44 | task "kibana" { 45 | driver = "docker" 46 | 47 | config { 48 | image = "kibana" 49 | hostname = "kibana.weave.local" 50 | network_mode = "external" 51 | dns_servers = ["172.17.0.1"] 52 | dns_search_domains = ["weave.local."] 53 | logging { 54 | type = "json-file" 55 | } 56 | } 57 | 58 | resources { 59 | memory = 2000 60 | network { 61 | mbits = 50 62 | port "kibana" { 63 | static = "5601" 64 | } 65 | } 66 | } 67 | 68 | } 69 | # - end kibana - # 70 | 71 | } # - end logging-elk - # 72 | 73 | } 74 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/carts-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: carts 6 | labels: 7 | name: carts 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: carts 14 | spec: 15 | containers: 16 | - name: carts 17 | image: weaveworksdemos/carts:0.4.8 18 | env: 19 | {{- if .Values.zipkin.enabled }} 20 | - name: ZIPKIN 21 | value: {{ .Values.zipkin.url }} 22 | {{- end }} 23 | - name: JAVA_OPTS 24 | value: {{ .Values.java.options }} 25 | resources: 26 | limits: 27 | cpu: 300m 28 | memory: 2000Mi 29 | requests: 30 | cpu: 300m 31 | memory: 2000Mi 32 | ports: 33 | - containerPort: 80 34 | securityContext: 35 | runAsNonRoot: true 36 | runAsUser: 10001 37 | capabilities: 38 | drop: 39 | - all 40 | add: 41 | - NET_BIND_SERVICE 42 | readOnlyRootFilesystem: true 43 | volumeMounts: 44 | - mountPath: /tmp 45 | name: tmp-volume 46 | livenessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 300 51 | periodSeconds: 3 52 | readinessProbe: 53 | httpGet: 54 | path: /health 55 | port: 80 56 | initialDelaySeconds: 180 57 | periodSeconds: 3 58 | volumes: 59 | - name: tmp-volume 60 | emptyDir: 61 | medium: Memory 62 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/orders-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: orders 6 | labels: 7 | name: orders 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: orders 14 | spec: 15 | containers: 16 | - name: orders 17 | image: weaveworksdemos/orders:0.4.7 18 | env: 19 | {{- if .Values.zipkin.enabled }} 20 | - name: ZIPKIN 21 | value: {{ .Values.zipkin.url }} 22 | {{- end }} 23 | - name: JAVA_OPTS 24 | value: {{ .Values.java.options }} 25 | resources: 26 | limits: 27 | cpu: 500m 28 | memory: 2000Mi 29 | requests: 30 | cpu: 200m 31 | memory: 2000Mi 32 | ports: 33 | - containerPort: 80 34 | securityContext: 35 | runAsNonRoot: true 36 | runAsUser: 10001 37 | capabilities: 38 | drop: 39 | - all 40 | add: 41 | - NET_BIND_SERVICE 42 | readOnlyRootFilesystem: true 43 | volumeMounts: 44 | - mountPath: /tmp 45 | name: tmp-volume 46 | livenessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 300 51 | periodSeconds: 3 52 | readinessProbe: 53 | httpGet: 54 | path: /health 55 | port: 80 56 | initialDelaySeconds: 180 57 | periodSeconds: 3 58 | volumes: 59 | - name: tmp-volume 60 | emptyDir: 61 | medium: Memory 62 | 63 | -------------------------------------------------------------------------------- /push.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then 4 | echo "Not pushing artifacts in cron jobs"; 5 | exit 0; 6 | fi; 7 | 8 | if [ -z "$DOCKER_PASS" ] ; then 9 | echo "This is a build triggered by an external PR. Skipping docker push."; 10 | exit 0; 11 | fi; 12 | 13 | echo $DOCKER_PASS | docker login -u $DOCKER_USER --password-stdin 14 | 15 | for svc in openapi healthcheck; do 16 | export REPO=${GROUP}/$(basename $svc); 17 | echo "Building ${REPO}:$TRAVIS_COMMIT"; 18 | docker build -t ${REPO}:$TRAVIS_COMMIT ./$svc; DOCKER_EXIT=$(echo $?); if [[ "$DOCKER_EXIT" > 0 ]] ; then 19 | echo "Docker build failed with exit code $DOCKER_EXIT"; 20 | exit 1; 21 | fi; 22 | export DOCKER_PUSH=1; 23 | while [ "$DOCKER_PUSH" -gt 0 ] ; do 24 | echo "Pushing $REPO:$TRAVIS_COMMIT"; 25 | docker push $REPO:$TRAVIS_COMMIT; 26 | DOCKER_PUSH=$(echo $?); 27 | if [[ "$DOCKER_PUSH" -gt 0 ]] ; then 28 | echo "Docker push failed with exit code $DOCKER_PUSH"; 29 | fi; 30 | done; 31 | if [ "$TRAVIS_BRANCH" == "master" ]; then 32 | docker tag $REPO:$TRAVIS_COMMIT $REPO:snapshot; 33 | echo "Pushing $REPO:snapshot"; 34 | docker push $REPO:snapshot; 35 | fi; 36 | if [ ! -z "$TRAVIS_TAG" ]; 37 | then docker tag $REPO:$TRAVIS_COMMIT $REPO:$TRAVIS_TAG; 38 | docker push $REPO:$TRAVIS_TAG; 39 | docker tag $REPO:$TRAVIS_COMMIT $REPO:latest; 40 | docker push $REPO:latest; 41 | fi; 42 | done 43 | 44 | mkdir cfn-to-publish 45 | jq ".Description += \" (microservices-demo/microservices-demo@${TRAVIS_COMMIT})\"" "deploy/aws-ecs/cloudformation.json" > cfn-to-publish/microservices-demo.json 46 | -------------------------------------------------------------------------------- /deploy/kubernetes/helm-chart/templates/shipping-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: shipping 6 | labels: 7 | name: shipping 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: shipping 14 | spec: 15 | containers: 16 | - name: shipping 17 | image: weaveworksdemos/shipping:0.4.8 18 | env: 19 | {{- if .Values.zipkin.enabled }} 20 | - name: ZIPKIN 21 | value: {{ .Values.zipkin.urlj }} 22 | {{- end }} 23 | - name: JAVA_OPTS 24 | value: {{ .Values.java.options }} 25 | resources: 26 | limits: 27 | cpu: 300m 28 | memory: 2000Mi 29 | requests: 30 | cpu: 300m 31 | memory: 2000Mi 32 | ports: 33 | - containerPort: 80 34 | securityContext: 35 | runAsNonRoot: true 36 | runAsUser: 10001 37 | capabilities: 38 | drop: 39 | - all 40 | add: 41 | - NET_BIND_SERVICE 42 | readOnlyRootFilesystem: true 43 | volumeMounts: 44 | - mountPath: /tmp 45 | name: tmp-volume 46 | livenessProbe: 47 | httpGet: 48 | path: /health 49 | port: 80 50 | initialDelaySeconds: 300 51 | periodSeconds: 3 52 | readinessProbe: 53 | httpGet: 54 | path: /health 55 | port: 80 56 | initialDelaySeconds: 180 57 | periodSeconds: 3 58 | volumes: 59 | - name: tmp-volume 60 | emptyDir: 61 | medium: Memory 62 | 63 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/grafana-dep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: grafana-core 5 | namespace: monitoring 6 | labels: 7 | app: grafana 8 | component: core 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | app: grafana 15 | component: core 16 | spec: 17 | containers: 18 | - image: grafana/grafana:4.1.2 19 | name: grafana-core 20 | imagePullPolicy: IfNotPresent 21 | # env: 22 | resources: 23 | # keep request = limit to keep this container in guaranteed class 24 | limits: 25 | cpu: 100m 26 | memory: 100Mi 27 | requests: 28 | cpu: 100m 29 | memory: 100Mi 30 | env: 31 | # The following env variables set up basic auth twith the default admin user and admin password. 32 | - name: GF_AUTH_BASIC_ENABLED 33 | value: "true" 34 | - name: GF_AUTH_ANONYMOUS_ENABLED 35 | value: "false" 36 | # - name: GF_AUTH_ANONYMOUS_ORG_ROLE 37 | # value: Admin 38 | # does not really work, because of template variables in exported dashboards: 39 | # - name: GF_DASHBOARDS_JSON_ENABLED 40 | # value: "true" 41 | readinessProbe: 42 | httpGet: 43 | path: /login 44 | port: 3000 45 | # initialDelaySeconds: 30 46 | # timeoutSeconds: 1 47 | volumeMounts: 48 | - name: grafana-persistent-storage 49 | mountPath: /var 50 | volumes: 51 | - name: grafana-persistent-storage 52 | emptyDir: {} 53 | nodeSelector: 54 | beta.kubernetes.io/os: linux 55 | -------------------------------------------------------------------------------- /openapi/README.md: -------------------------------------------------------------------------------- 1 | # Testing API endpoints with Dredd 2 | 3 | This directory contains: 4 | - Data fixtures for microservices-demo services 5 | - Testing framework (Dredd) hooks.js file which adds fixtures 6 | - OpenAPI (Swagger 2.0) specification for each services 7 | - generate-server.sh generates Go server based on the specs 8 | 9 | # Prerequisites 10 | - ECMA2015 compatible runtime (NodeJS >= v6.x.x) 11 | 12 | # How to run 13 | 14 | In this directiory run 15 | ``` 16 | npm install 17 | ``` 18 | to install the dependencies 19 | 20 | Then: 21 | ``` 22 | dredd -f hooks.js 23 | ``` 24 | 25 | Success output: 26 | ``` 27 | info: Beginning Dredd testing... 28 | info: Found Hookfiles: hooks.js 29 | MongoEndpoint: mongodb://localhost:32771/data 30 | pass: GET /carts/1 duration: 141ms 31 | pass: DELETE /carts/1 duration: 32ms 32 | pass: POST /carts/579f21ae98684924944651bf/items duration: 133ms 33 | skip: PATCH /carts/579f21ae98684924944651bf/items 34 | pass: DELETE /carts/579f21ae98684924944651bf/items/819e1fbf-8b7e-4f6d-811f-693534916a8b duration: 31ms 35 | complete: 4 passing, 0 failing, 0 errors, 1 skipped, 5 total 36 | complete: Tests took 792ms 37 | ``` 38 | 39 | 40 | # Run with docker 41 | Start microservices demo app with docker compose: 42 | ``` 43 | cd /path/to/microservices-demo/deploy/docker-only/ 44 | docker-compose up -d 45 | ``` 46 | 47 | Build included docker image with: 48 | ``` 49 | docker build -t "weaveworksdemos/openapi:latest" . 50 | ``` 51 | 52 | Run the openapi testing container: 53 | ``` 54 | docker run -h openapi --name openapi-tmqpb -v /path/to/api-specs/:/tmp/specs/ --link catalogue --link catalogue-db-zbqsv:mysql weaveworksdemos/openapi /tmp/specs/catalogue.json http://catalogue/ -f /tmp/specs/hooks.js 55 | ``` 56 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/carts-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: carts 6 | labels: 7 | name: carts 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: carts 15 | spec: 16 | containers: 17 | - name: carts 18 | image: weaveworksdemos/carts:0.4.8 19 | env: 20 | - name: ZIPKIN 21 | value: zipkin.jaeger.svc.cluster.local 22 | - name: JAVA_OPTS 23 | value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom 24 | resources: 25 | limits: 26 | cpu: 300m 27 | memory: 500Mi 28 | requests: 29 | cpu: 300m 30 | memory: 500Mi 31 | ports: 32 | - containerPort: 80 33 | securityContext: 34 | runAsNonRoot: true 35 | runAsUser: 10001 36 | capabilities: 37 | drop: 38 | - all 39 | add: 40 | - NET_BIND_SERVICE 41 | readOnlyRootFilesystem: true 42 | volumeMounts: 43 | - mountPath: /tmp 44 | name: tmp-volume 45 | livenessProbe: 46 | httpGet: 47 | path: /health 48 | port: 80 49 | initialDelaySeconds: 300 50 | periodSeconds: 3 51 | readinessProbe: 52 | httpGet: 53 | path: /health 54 | port: 80 55 | initialDelaySeconds: 180 56 | periodSeconds: 3 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: 60 | medium: Memory 61 | nodeSelector: 62 | beta.kubernetes.io/os: linux 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/microservices-demo/microservices-demo.svg?branch=master)](https://travis-ci.org/microservices-demo/microservices-demo) 2 | 3 | # Sock Shop : A Microservice Demo Application 4 | 5 | The application is the user-facing part of an online shop that sells socks. It is intended to aid the demonstration and testing of microservice and cloud native technologies. 6 | 7 | It is built using [Spring Boot](http://projects.spring.io/spring-boot/), [Go kit](http://gokit.io) and [Node.js](https://nodejs.org/) and is packaged in Docker containers. 8 | 9 | You can read more about the [application design](./internal-docs/design.md). 10 | 11 | ## Deployment Platforms 12 | 13 | The [deploy folder](./deploy/) contains scripts and instructions to provision the application onto your favourite platform. 14 | 15 | Please let us know if there is a platform that you would like to see supported. 16 | 17 | ## Bugs, Feature Requests and Contributing 18 | 19 | We'd love to see community contributions. We like to keep it simple and use Github issues to track bugs and feature requests and pull requests to manage contributions. See the [contribution information](.github/CONTRIBUTING.md) for more information. 20 | 21 | ## Screenshot 22 | 23 | ![Sock Shop frontend](https://github.com/microservices-demo/microservices-demo.github.io/raw/master/assets/sockshop-frontend.png) 24 | 25 | ## Visualizing the application 26 | 27 | Use [Weave Scope](http://weave.works/products/weave-scope/) or [Weave Cloud](http://cloud.weave.works/) to visualize the application once it's running in the selected [target platform](./deploy/). 28 | 29 | ![Sock Shop in Weave Scope](https://github.com/microservices-demo/microservices-demo.github.io/raw/master/assets/sockshop-scope.png) 30 | 31 | ## 32 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/orders-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: orders 6 | labels: 7 | name: orders 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: orders 15 | spec: 16 | containers: 17 | - name: orders 18 | image: weaveworksdemos/orders:0.4.7 19 | env: 20 | - name: ZIPKIN 21 | value: zipkin.jaeger.svc.cluster.local 22 | - name: JAVA_OPTS 23 | value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom 24 | resources: 25 | limits: 26 | cpu: 500m 27 | memory: 500Mi 28 | requests: 29 | cpu: 200m 30 | memory: 500Mi 31 | ports: 32 | - containerPort: 80 33 | securityContext: 34 | runAsNonRoot: true 35 | runAsUser: 10001 36 | capabilities: 37 | drop: 38 | - all 39 | add: 40 | - NET_BIND_SERVICE 41 | readOnlyRootFilesystem: true 42 | volumeMounts: 43 | - mountPath: /tmp 44 | name: tmp-volume 45 | livenessProbe: 46 | httpGet: 47 | path: /health 48 | port: 80 49 | initialDelaySeconds: 300 50 | periodSeconds: 3 51 | readinessProbe: 52 | httpGet: 53 | path: /health 54 | port: 80 55 | initialDelaySeconds: 180 56 | periodSeconds: 3 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: 60 | medium: Memory 61 | nodeSelector: 62 | beta.kubernetes.io/os: linux 63 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests/shipping-dep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: shipping 6 | labels: 7 | name: shipping 8 | namespace: sock-shop 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | name: shipping 15 | spec: 16 | containers: 17 | - name: shipping 18 | image: weaveworksdemos/shipping:0.4.8 19 | env: 20 | - name: ZIPKIN 21 | value: zipkin.jaeger.svc.cluster.local 22 | - name: JAVA_OPTS 23 | value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom 24 | resources: 25 | limits: 26 | cpu: 300m 27 | memory: 500Mi 28 | requests: 29 | cpu: 300m 30 | memory: 500Mi 31 | ports: 32 | - containerPort: 80 33 | securityContext: 34 | runAsNonRoot: true 35 | runAsUser: 10001 36 | capabilities: 37 | drop: 38 | - all 39 | add: 40 | - NET_BIND_SERVICE 41 | readOnlyRootFilesystem: true 42 | volumeMounts: 43 | - mountPath: /tmp 44 | name: tmp-volume 45 | livenessProbe: 46 | httpGet: 47 | path: /health 48 | port: 80 49 | initialDelaySeconds: 300 50 | periodSeconds: 3 51 | readinessProbe: 52 | httpGet: 53 | path: /health 54 | port: 80 55 | initialDelaySeconds: 180 56 | periodSeconds: 3 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: 60 | medium: Memory 61 | nodeSelector: 62 | beta.kubernetes.io/os: linux 63 | -------------------------------------------------------------------------------- /staging/README.md: -------------------------------------------------------------------------------- 1 | # Staging Environment for the Microservice Demo 2 | 3 | 4 | # Setup cluster 5 | 6 | Use the scripts in this directory to set up a Kubernetes cluster on AWS from a Bastion host. 7 | 8 | * Create a bastion host in AWS 9 | 10 | * Install terraform 11 | 12 | * Clone this repository 13 | 14 | * Copy [terraform.tfvars.example](./terraform.tfvars.example) to terraform.tfvars and enter the missing information. Look for a description of the variables in [variables.tf](./variables.tf). 15 | 16 | * Plan the terraform run: `terraform plan -out staging.plan` 17 | 18 | * If all looks well, apply the plan: `terraform apply staging.plan`. 19 | 20 | If it somehow fails, destroy the cluster with `terraform destroy -force` and try again. 21 | 22 | * Access the Sock Shop via the elb url displayed when you run `terraform output`. Weave Scope/Flux should be visible from Weave Cloud. 23 | 24 | # Kubectl 25 | 26 | * kubectl should work from the bastion to control the kubernetes cluster 27 | 28 | # Setup/Control Weave Flux 29 | 30 | * To gain control of flux download the binary from [here](https://github.com/weaveworks/flux/releases/latest), and export the Weave Cloud flux token. 31 | 32 | ``` 33 | export FLUX_SERVICE_TOKEN= 34 | ``` 35 | 36 | * To make changes to the flux config you can run `get-config` to download the current config. 37 | 38 | ``` 39 | fluxctl get-config > flux.conf 40 | ``` 41 | 42 | * Fill in missing values, and run `fluxctl set-config --file=flux.conf` 43 | 44 | 45 | * To set sock-shop services to update automatically you can set it with the command below 46 | 47 | ``` 48 | for svc in front-end catalogue orders queue-master user cart catalogue user-db catalogue-db payment shipping; do 49 | fluxctl automate --service=sock-shop/$svc 50 | done 51 | ``` 52 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | sudo: required 3 | services: 4 | - docker 5 | jdk: 6 | - oraclejdk8 7 | env: 8 | global: 9 | - PATH="${PATH}:${HOME}/.local/bin" 10 | install: 11 | - pip install --user awscli 12 | script: 13 | - set -o pipefail 14 | - if [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then 15 | gem install travis-cron_tools; 16 | ruby .travis-cron.rb; 17 | fi; 18 | - if [ ! -z "$AWS_ACCESS_KEY" ]; then aws cloudformation validate-template --template-body 19 | "file:////${PWD}/deploy/aws-ecs/cloudformation.json" | jq . ; fi; 20 | after_success: 21 | - export GROUP=weaveworksdemos 22 | - "./push.sh" 23 | deploy: 24 | provider: s3 25 | access_key_id: "$AWS_ACCESS_KEY_ID" 26 | secret_access_key: "$AWS_SECRET_ACCESS_KEY" 27 | bucket: weaveworks-cfn-public 28 | skip_cleanup: true 29 | local_dir: cfn-to-publish 30 | upload_dir: microservices-demo 31 | acl: public_read 32 | on: 33 | branch: master 34 | condition: '"$TRAVIS_EVENT_TYPE" != "cron" && -z "$DEPLOY_DOC"' 35 | notifications: 36 | slack: 37 | rooms: 38 | secure: p9hoJ6bSxBNdRqrnOFQC+FHAkfhRAw+nxy27lCBwWRVTimB03Ja14RWUKIYkmmEt0WCAW7gQxPM4JHmoczIyaDjNmk5F+mw584ctqeBlKhdIq73RIKSilBwdo9aTCgTVPVuKyRqNIaESWmA95zs1NqTi1Hbf0ER22pFszetqfrQwdDpVK8siwLV6pOtqG+ugz9XWksCYbD+86PA9j9SNuVDTbBF2oI9xuXQ9tmubbJCoRTFBrDPiGMTd2pFqNUmL2naXVrNqNbhI5uTu2wKxGUTU9KZeRDN/a+M1nGh0Aegi+b8khioQ5/TmOfLALya/spLGqKGDK16TIAQXiVenaXlUkQ089td9jOMs8X/dk3fVsnq8hObLS5b//waSqU/x9miEGcDFiEWke8N+IG2e1PB/UjVyI02tdwQ/2XLMWuZIZtxHhcpLArCV/QZNvza0OhshvIQD+2e5kVD6er2iuXjJ3kex6rufAkMXNI1YzbHofLnmoH6XwZMaBWUa4yhivFp9vkggwEWUN/ZIgJnCmy8I9qM82IHlLDl3hdklRZlhexNhXnBxgc+bK0duMIC2qzQJq1Cfb555D5DWqph1PlrcMnrKmKvC2uJFhhsKHuo3jSa/WR/JHUg1WEWdh4gQVFIz0f/FgGM5wXm20pZn8l4rVkEoUT3KWztINNFjA9E= 39 | on_success: always 40 | on_failure: always 41 | on_start: always 42 | on_pull_requests: false 43 | -------------------------------------------------------------------------------- /deploy/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Installing sock-shop on Kubernetes 2 | 3 | See the [documentation](https://microservices-demo.github.io/deployment/kubernetes.html) on how to deploy Sock Shop using Minikube. 4 | 5 | ## Kubernestes manifests 6 | 7 | There are 2 sets of manifests for deploying Sock Shop on Kubernetes: one in the [manifests directory](manifests/), and complete-demo.yaml. The complete-demo.yaml is a single file manifest 8 | made by concatenating all the manifests from the manifests directory, so please regenerate it when changing files in the manifests directory. 9 | 10 | ## Monitoring 11 | 12 | All monitoring is performed by prometheus. All services expose a `/metrics` endpoint. All services have a Prometheus Histogram called `request_duration_seconds`, which is automatically appended to create the metrics `_count`, `_sum` and `_bucket`. 13 | 14 | The manifests for the monitoring are spread across the [manifests-monitoring](./manifests-monitoring) and [manifests-alerting](./manifests-alerting/) directories. 15 | 16 | To use them, please run `kubectl create -f `. 17 | 18 | ### What's Included? 19 | 20 | * Sock-shop grafana dashboards 21 | * Alertmanager with 500 alert connected to slack 22 | * Prometheus with config to scrape all k8s pods, connected to local alertmanager. 23 | 24 | ### Ports 25 | 26 | Grafana will be exposed on the NodePort `31300` and Prometheus is exposed on `31090`. If running on a real cluster, the easiest way to connect to these ports is by port forwarding in a ssh command: 27 | ``` 28 | ssh -i $KEY -L 3000:$NODE_IN_CLUSTER:31300 -L 9090:$NODE_IN_CLUSTER:31090 ubuntu@$BASTION_IP 29 | ``` 30 | Where all the pertinent information should be entered. Grafana and Prometheus will be available on `http://localhost:3000` or `:9090`. 31 | 32 | If on Minikube, you can connect via the VM IP address and the NodePort. 33 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution guidelines 2 | 3 | We'd love to accept your contributions; large or small. Simply submit an issue or pull request via Github and involve one of the active members. Simple! But please read the rest of this document to ensure we're all on the same page. 4 | 5 | ## General Rules 6 | 7 | - Be kind and polite. Written language often does not convey the sentiment, so make sure you use lots of jokes and emoticons to get the sentiment across. 8 | - Prefer best practice. Everyone has their preferred style, but try to conform to current best practices. We don't enforce any strict rules. 9 | - Test your code to the best of your abilities. See the testing documentation for the correct scope of your test. 10 | 11 | ## Bug reports or feature requests 12 | 13 | Please open an issue if you have found an issue or have an idea for a new feature. Please follow the bug reporting guidelines if you submit an issue. 14 | 15 | ## New Contributors 16 | 17 | We have a list of issues on Github with "HelpWanted" labels attributed to them. These represent tasks that we don't have time to do, are self-contained and relatively easy to implement. If you'd like to contribute, but don't know where to start, [look here](https://github.com/microservices-demo/microservices-demo/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aopen%20label%3AHelpWanted). 18 | 19 | ## Direction 20 | 21 | This project does have a general direction, but we're happy to consider deviating or pivoting from the direction we're currently heading. See the introductory material for details regarding direction. 22 | 23 | With that said, there is absolutely nothing stopping you from submitting a PR. If you've taken the effort to contribute, someone will make the effort to review. 24 | 25 | ## License 26 | 27 | This project is Apache v2.0 licenced. Submitting and merging a PR implies you accept these terms. 28 | -------------------------------------------------------------------------------- /deploy/apcera/deploySockShop.sh: -------------------------------------------------------------------------------- 1 | # deploys the Sock Shop apps in targeted cluster 2 | # Note that the first time you run this against a cluster could be slower 3 | # than following times since none of the docker layers will be cached 4 | 5 | # The code below will set the CLUSTER and NAMESPACE variables for you. 6 | # These are used in the sockshop-docker.json Multi-Resource Manifest file. 7 | 8 | # Set namespace to user's default namespace 9 | apc namespace -d 10 | 11 | # Run apc target command and parse results to determine current cluster 12 | OUT=`apc target` 13 | CLUSTER=`echo $OUT | cut -f2 -d" " | sed 's/[http[s]*:\/\///' | sed 's/]//' | cut -f1 -d:` 14 | 15 | # append /sockshop to user's default namespace returned by apc target 16 | NAMESPACE=`echo $OUT | cut -f9 -d" " | sed 's/"//g'`/sockshop 17 | 18 | # echo the variables that were set automatically 19 | echo Setting CLUSTER to $CLUSTER 20 | echo Setting NAMESPACE to $NAMESPACE 21 | 22 | # Change NAMESPACE if you don't like the default generated above 23 | # But if you change it here, you'll need to change it in other scripts 24 | # Also, if you are not an admin user, you might need policy modifications 25 | #NAMESPACE= 26 | 27 | # set actual namespace to the targeted namespace 28 | apc namespace ${NAMESPACE} 29 | 30 | # This command loads all the Docker images from the sockshop-docker.json manifest file 31 | apc manifest deploy sockshop-docker.json -- --NAMESPACE ${NAMESPACE} --CLUSTER ${CLUSTER} 32 | 33 | # Add affinity tags to the main services to keep them with their databases 34 | apc app attract carts --to carts-db --hard --batch --restart --silent 35 | apc app attract catalogue --to catalogue-db --hard --batch --restart --silent 36 | apc app attract orders --to orders-db --hard --batch --restart --silent 37 | apc app attract user --to user-db --hard --batch --restart --silent 38 | 39 | # Start the apps 40 | ./startSockShop.sh 41 | -------------------------------------------------------------------------------- /deploy/docker-compose/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # How frequently to scrape targets by default. 3 | scrape_interval: 1m 4 | # How long until a scrape request times out. 5 | scrape_timeout: 10s 6 | # How frequently to evaluate rules. 7 | evaluation_interval: 1m 8 | 9 | rule_files: 10 | - "/etc/prometheus/alert.rules" 11 | 12 | alerting: 13 | 14 | scrape_configs: 15 | - job_name: "frontend" 16 | scrape_interval: 5s 17 | metrics_path: 'metrics' 18 | static_configs: 19 | - targets: ['edge-router'] 20 | 21 | # The job name assigned to scraped metrics by default. 22 | - job_name: "catalogue" 23 | # How frequently to scrape targets from this job. 24 | scrape_interval: 5s 25 | # List of labeled statically configured targets for this job. 26 | static_configs: 27 | # The targets specified by the static config. 28 | - targets: ['catalogue'] 29 | 30 | - job_name: "payment" 31 | scrape_interval: 5s 32 | static_configs: 33 | - targets: ['payment'] 34 | 35 | - job_name: "user" 36 | scrape_interval: 5s 37 | static_configs: 38 | - targets: ['user'] 39 | 40 | - job_name: "orders" 41 | scrape_interval: 5s 42 | # The HTTP resource path on which to fetch metrics from targets. 43 | metrics_path: 'metrics' 44 | static_configs: 45 | - targets: ['orders'] 46 | 47 | - job_name: "cart" 48 | scrape_interval: 5s 49 | metrics_path: 'metrics' 50 | static_configs: 51 | - targets: ['carts'] 52 | 53 | - job_name: "shipping" 54 | scrape_interval: 5s 55 | metrics_path: 'metrics' 56 | static_configs: 57 | - targets: ['shipping'] 58 | 59 | - job_name: "queue-master" 60 | scrape_interval: 5s 61 | metrics_path: 'prometheus' 62 | static_configs: 63 | - targets: ['queue-master'] 64 | 65 | -------------------------------------------------------------------------------- /shippable.jobs.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | # This file contains jobs for an automated CI/CD workflow using Shippable 3 | # Pipelines 4 | 5 | ################################ 6 | 7 | # add pipeline workflow for Sock Shop FRONT-END component 8 | 9 | # manifest gen 10 | - name: man-front-end 11 | type: manifest 12 | steps: 13 | - IN: img-front-end 14 | - IN: img-opts-front-end-test 15 | - TASK: managed 16 | 17 | # TEST deployment to Amazon ECS 18 | - name: ecs-deploy-test-front-end 19 | type: deploy 20 | steps: 21 | - IN: man-front-end 22 | - IN: params-front-end-test 23 | - IN: trigger-front-end-test 24 | - IN: cluster-demo-ecs 25 | - IN: alb-front-end-test 26 | applyTo: 27 | - manifest: man-front-end 28 | image: img-front-end 29 | port: 8080 30 | - TASK: managed 31 | 32 | # # Create release for PROD 33 | # - name: release-front-end 34 | # type: release 35 | # steps: 36 | # - IN: ver-front-end 37 | # switch: off 38 | # - IN: ecs-deploy-test-front-end 39 | # switch: off 40 | # - IN: trigger-front-end-release 41 | # - TASK: managed 42 | # bump: patch 43 | 44 | # move this block to separate repo to limit PROD deployment to different users 45 | # PROD deployment to Amazon ECS 46 | - name: ecs-deploy-prod-front-end 47 | type: deploy 48 | steps: 49 | - IN: ecs-deploy-test-front-end 50 | switch: off 51 | # - IN: release-front-end 52 | # switch: off 53 | - IN: img-opts-front-end-prod 54 | - IN: params-front-end-prod 55 | - IN: replicas-front-end-prod 56 | - IN: trigger-front-end-prod 57 | - IN: alb-front-end-prod 58 | applyTo: 59 | - manifest: man-front-end 60 | image: img-front-end 61 | port: 8080 62 | - IN: cluster-demo-ecs 63 | - TASK: managed 64 | 65 | ################################ 66 | -------------------------------------------------------------------------------- /deploy/micro-sock/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # minimal compose for a simulation of https://github.com/weaveworks/weaveDemo 2 | 3 | version: '2' 4 | 5 | services: 6 | front_end: 7 | image: weaveworksdemos/micro-sock 8 | container_name: front_end 9 | command: -l login catalogue orders 10 | edge_router: 11 | image: weaveworksdemos/micro-sock 12 | container_name: edge_router 13 | command: front_end 14 | catalogue: 15 | image: weaveworksdemos/micro-sock 16 | container_name: catalogue 17 | command: -l 18 | accounts: 19 | image: weaveworksdemos/micro-sock 20 | container_name: accounts 21 | command: -l accounts-db 22 | accounts-db: 23 | image: weaveworksdemos/micro-sock 24 | container_name: accounts-db 25 | command: -l 26 | cart: 27 | image: weaveworksdemos/micro-sock 28 | container_name: cart 29 | command: -l carts-db 30 | carts-db: 31 | image: weaveworksdemos/micro-sock 32 | container_name: carts-db 33 | command: -l 34 | orders: 35 | image: weaveworksdemos/micro-sock 36 | container_name: orders 37 | command: -l orders-db accounts cart payment shipping 38 | orders-db: 39 | image: weaveworksdemos/micro-sock 40 | container_name: orders-db 41 | command: -l 42 | shipping: 43 | image: weaveworksdemos/micro-sock 44 | container_name: shipping 45 | command: -l rabbitmq 46 | queue-master: 47 | image: weaveworksdemos/micro-sock 48 | container_name: queue-master 49 | command: -l rabbitmq 50 | queue-worker: 51 | image: weaveworksdemos/micro-sock 52 | container_name: queue-worker 53 | command: -l rabbitmq 54 | rabbitmq: 55 | image: weaveworksdemos/micro-sock 56 | container_name: rabbitmq 57 | command: -l 58 | payment: 59 | image: weaveworksdemos/micro-sock 60 | container_name: payment 61 | command: -l 62 | login: 63 | image: weaveworksdemos/micro-sock 64 | container_name: login 65 | command: -l 66 | -------------------------------------------------------------------------------- /deploy/docker-swarm/packer/preseed.cfg: -------------------------------------------------------------------------------- 1 | choose-mirror-bin mirror/http/proxy string 2 | d-i base-installer/kernel/override-image string linux-server 3 | d-i clock-setup/utc boolean true 4 | d-i clock-setup/utc-auto boolean true 5 | d-i finish-install/reboot_in_progress note 6 | d-i grub-installer/only_debian boolean true 7 | d-i grub-installer/with_other_os boolean true 8 | d-i partman-auto-lvm/guided_size string max 9 | d-i partman-auto/choose_recipe select atomic 10 | d-i partman-auto/method string lvm 11 | d-i partman-lvm/confirm boolean true 12 | d-i partman-lvm/confirm boolean true 13 | d-i partman-lvm/confirm_nooverwrite boolean true 14 | d-i partman-lvm/device_remove_lvm boolean true 15 | d-i partman/choose_partition select finish 16 | d-i partman/confirm boolean true 17 | d-i partman/confirm_nooverwrite boolean true 18 | d-i partman/confirm_write_new_label boolean true 19 | d-i pkgsel/include string openssh-server cryptsetup build-essential libssl-dev libreadline-dev zlib1g-dev linux-source dkms nfs-common 20 | d-i pkgsel/install-language-support boolean false 21 | d-i pkgsel/update-policy select none 22 | d-i pkgsel/upgrade select full-upgrade 23 | d-i time/zone string UTC 24 | tasksel tasksel/first multiselect standard, ubuntu-server 25 | 26 | d-i console-setup/ask_detect boolean false 27 | d-i keyboard-configuration/layoutcode string us 28 | d-i keyboard-configuration/modelcode string pc105 29 | d-i debian-installer/locale string en_US 30 | 31 | # Create vagrant user account. 32 | d-i passwd/user-fullname string vagrant 33 | d-i passwd/username string vagrant 34 | d-i passwd/user-password password vagrant 35 | d-i passwd/user-password-again password vagrant 36 | d-i user-setup/allow-password-weak boolean true 37 | d-i user-setup/encrypt-home boolean false 38 | d-i passwd/user-default-groups vagrant sudo 39 | d-i passwd/user-uid string 900 40 | d-i preseed/late_command string \ 41 | echo "%vagrant ALL=(ALL:ALL) NOPASSWD:ALL" > /target/etc/sudoers.d/vagrant && chmod 0440 /target/etc/sudoers.d/vagrant 42 | -------------------------------------------------------------------------------- /deploy/apcera/startSockShop.sh: -------------------------------------------------------------------------------- 1 | # Starts all the Sock Shop apps 2 | 3 | # Automatically detects correct namespace 4 | 5 | # Set namespace to user's default namespace 6 | apc namespace -d 7 | 8 | # append /sockshop to user's default namespace returned by apc namespace 9 | OUT=`apc namespace` 10 | NAMESPACE=`echo $OUT | cut -f3 -d" " | sed "s/'//g"`/sockshop 11 | echo ${NAMESPACE} 12 | 13 | # Change NAMESPACE if you don't like the default generated above 14 | # But if you change it here, you'll need to change it in other scripts 15 | #NAMESPACE= 16 | 17 | # set actual namespace to $NAMESPACE 18 | apc namespace ${NAMESPACE} 19 | 20 | echo "Starting Sock Shop apps in namespace: ${NAMESPACE}" 21 | 22 | # If any of the apps have trouble connecting to other apps or 23 | # databases, you could increase the sleep times below or add more of them. 24 | # echoing of logs is suppressed here, but they can be viewed in the Apcera Web Console 25 | # or with the apc app logs command 26 | echo starting catalogue-db 27 | apc app start catalogue-db --silent & 28 | echo starting user-db 29 | apc app start user-db --silent & 30 | echo starting orders-db 31 | apc app start orders-db --silent & 32 | echo starting carts-db 33 | apc app start carts-db --silent & 34 | echo starting rabbitmq 35 | apc app start rabbitmq --silent & 36 | echo starting zipkin 37 | apc app start zipkin --silent & 38 | sleep 15 39 | echo starting catalogue 40 | apc app start catalogue --silent & 41 | echo starting payment 42 | apc app start payment --silent & 43 | echo starting shipping 44 | apc app start shipping --silent & 45 | echo starting user 46 | apc app start user --silent & 47 | echo starting carts 48 | apc app start carts --silent & 49 | echo starting orders 50 | apc app start orders --silent & 51 | echo starting queue-master 52 | apc app start queue-master --silent & 53 | sleep 10 54 | echo starting front-end 55 | apc app start front-end --silent 56 | sleep 10 57 | echo starting user-sim 58 | apc app start user-sim --silent 59 | 60 | # List the apps to verify they are all started 61 | apc app list 62 | -------------------------------------------------------------------------------- /deploy/docker-compose/docker-compose.logging.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | elasticsearch: 5 | image: elasticsearch 6 | hostname: elasticsearch 7 | kibana: 8 | image: kibana 9 | hostname: kibana 10 | depends_on: 11 | - elasticsearch 12 | ports: 13 | - '5601:5601' 14 | log-server: 15 | image: weaveworksdemos/log-server 16 | hostname: log-server 17 | depends_on: 18 | - elasticsearch 19 | ports: 20 | - '24224:24224' 21 | front-end: 22 | logging: 23 | driver: fluentd 24 | depends_on: 25 | - log-server 26 | edge-router: 27 | logging: 28 | driver: fluentd 29 | depends_on: 30 | - log-server 31 | catalogue: 32 | logging: 33 | driver: fluentd 34 | depends_on: 35 | - log-server 36 | catalogue-db: 37 | logging: 38 | driver: fluentd 39 | depends_on: 40 | - log-server 41 | carts: 42 | logging: 43 | driver: fluentd 44 | depends_on: 45 | - log-server 46 | carts-db: 47 | logging: 48 | driver: fluentd 49 | depends_on: 50 | - log-server 51 | orders: 52 | logging: 53 | driver: fluentd 54 | depends_on: 55 | - log-server 56 | orders-db: 57 | logging: 58 | driver: fluentd 59 | depends_on: 60 | - log-server 61 | shipping: 62 | logging: 63 | driver: fluentd 64 | depends_on: 65 | - log-server 66 | queue-master: 67 | logging: 68 | driver: fluentd 69 | depends_on: 70 | - log-server 71 | rabbitmq: 72 | logging: 73 | driver: fluentd 74 | depends_on: 75 | - log-server 76 | payment: 77 | logging: 78 | driver: fluentd 79 | depends_on: 80 | - log-server 81 | user: 82 | logging: 83 | driver: fluentd 84 | depends_on: 85 | - log-server 86 | user-db: 87 | logging: 88 | driver: fluentd 89 | depends_on: 90 | - log-server 91 | user-sim: 92 | logging: 93 | driver: fluentd 94 | depends_on: 95 | - log-server 96 | -------------------------------------------------------------------------------- /graphs/README.md: -------------------------------------------------------------------------------- 1 | # Grafana Dashboards 2 | We have included a set of dashboards for Grafana in this demo application. 3 | Most of the source of these dashboards look exactly the same. For the sake of 4 | keeping our code DRY, we have decided to generate our dashboards using [grafanalib](https://github.com/weaveworks/grafanalib). 5 | 6 | # Requirements 7 | | what | version | 8 | | ------ | --------- | 9 | | docker | `>= 17` | 10 | 11 | # Getting Started 12 | Make sure that you run the following commands from within the `graphs/` directory: 13 | 14 | ``` 15 | cd graphs/ 16 | ``` 17 | 18 | ## The Base Image 19 | All the tooling required to generate the dashboards is inside a container. Build it with like this: 20 | 21 | ``` 22 | docker build -t weaveworks/grafanalib . 23 | ``` 24 | 25 | ## Generating Dashboards 26 | 27 | ``` 28 | docker run --rm -it -v ${PWD}:/opt/code weaveworks/grafanalib /bin/sh -c 'ls /opt/code/*.dashboard.py | parallel generate-dashboard -o {.}.json {}' 29 | ``` 30 | 31 | This will output all the dashboards for Grafana in JSON format, ready to be imported. 32 | 33 | ``` 34 | ls -l *.json 35 | -rw-r--r-- 1 john admin 31361 Aug 30 16:12 kubernetes.dashboard.json 36 | -rw-r--r-- 1 john admin 16729 Aug 30 16:12 prometheus.dashboard.json 37 | -rw-r--r-- 1 john admin 40797 Aug 30 16:12 sock-shop-performance.dashboard.json 38 | -rw-r--r-- 1 john admin 17859 Aug 30 16:12 sock-shop-resources.dashboard.json 39 | ``` 40 | 41 | ## Importing the dashboards 42 | To import the dashboards, update the `deploy/kubernetes/manifests-monitoring/grafana-configmap.yaml` file with 43 | each dashboard JSON accordingly. 44 | For example, to update the *Sock Shop Performance* dashboard, find the `sock-shop-performance-dashboard.json` line on the `grafana-configmap.yaml` 45 | file and fill the field with the contents of the `sock-sock-performance-dashboard.json` file. 46 | 47 | The same process needs to be followed for the rest of the dashboards. 48 | 49 | Find the instructions on how to deploy Grafana and the dashboards [here](https://microservices-demo.github.io/deployment/monitoring-kubernetes.html). 50 | -------------------------------------------------------------------------------- /healthcheck/healthcheck.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | # Check Health of each service 4 | ###################################### 5 | require 'net/http' 6 | require 'optparse' 7 | require 'json' 8 | require 'awesome_print' 9 | 10 | $stdout.sync = true 11 | options = {} 12 | health = {} 13 | 14 | OptionParser.new do |opts| 15 | opts.banner = "Usage healthcheck.rb -h [host] -t [timeout] -r [retry]" 16 | opts.on("-h", "--hostname localhost", "Specify hostname") do |v| 17 | options[:hostname] = v 18 | end 19 | opts.on("-t", "--timeout 60", OptionParser::DecimalInteger, "Specify timeout in seconds") do |v| 20 | options[:timeout] = v 21 | end 22 | opts.on("-r", "--retry n", OptionParser::DecimalInteger, "Specify number of times to retry") do |v| 23 | options[:retry] = v 24 | end 25 | opts.on("-d", "--delay 60", OptionParser::DecimalInteger, "Specify seconds to delay") do |v| 26 | options[:delay] = v 27 | end 28 | opts.on("-s", "--services X,Y", "Specify services to check") do |v| 29 | options[:services] = v 30 | end 31 | end.parse! 32 | 33 | unless options.key?(:services) 34 | puts "\e[31mno services specified\e[0m" 35 | exit! 36 | end 37 | 38 | unless options.key?(:retry) 39 | options[:retry] = 1 40 | end 41 | 42 | services = options[:services].split(',') 43 | (1..options[:retry]).each do |i| 44 | 45 | if options.key?(:delay) 46 | puts "\e[35mSleeping for #{options[:delay]}s...\e[0m" 47 | sleep options[:delay] 48 | end 49 | 50 | services.each do |service| 51 | begin 52 | url = service 53 | if options.key?(:hostname) 54 | url = "#{options[:hostname]}/#{url}" 55 | end 56 | resp = Net::HTTP.get_response(url, '/health') 57 | rescue 58 | health[service] = "err" 59 | else 60 | json = JSON.parse(resp.body)['health'] 61 | json.each do |item| 62 | health[item["service"]] = item["status"] 63 | end 64 | end 65 | end 66 | ap health 67 | 68 | if health.all? { |service, status| status == "OK" } 69 | break 70 | end 71 | end 72 | 73 | unless health.all? {|service, status| status == "OK" } 74 | exit(1) 75 | end 76 | -------------------------------------------------------------------------------- /install/aws-minimesos/aws.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | default = "eu-west-1" 3 | } 4 | 5 | provider "aws" { 6 | region = "${var.aws_region}" 7 | access_key = "${var.access_key}" 8 | secret_key = "${var.secret_key}" 9 | } 10 | 11 | // Ubuntu 14.04 official hvm:ssd volumes to their region. 12 | variable "aws_amis" { 13 | default = { 14 | ap-northeast-1 = "ami-63b44a02" 15 | ap-southeast-1 = "ami-21d30f42" 16 | eu-central-1 = "ami-26c43149" 17 | eu-west-1 = "ami-ed82e39e" 18 | sa-east-1 = "ami-dc48dcb0" 19 | us-east-1 = "ami-3bdd502c" 20 | us-west-1 = "ami-48db9d28" 21 | cn-north-1 = "ami-bead78d3" 22 | us-gov-west-1 = "ami-6770ce06" 23 | ap-southeast-2 = "ami-ba3e14d9" 24 | us-west-2 = "ami-d732f0b7" 25 | 26 | } 27 | } 28 | 29 | resource "aws_vpc" "terraform" { 30 | cidr_block = "10.0.0.0/16" 31 | enable_dns_hostnames = true 32 | 33 | tags { 34 | Name = "terraform" 35 | } 36 | } 37 | 38 | resource "aws_internet_gateway" "terraform" { 39 | vpc_id = "${aws_vpc.terraform.id}" 40 | tags { 41 | Name = "terraform" 42 | } 43 | } 44 | 45 | resource "aws_subnet" "terraform" { 46 | vpc_id = "${aws_vpc.terraform.id}" 47 | cidr_block = "10.0.0.0/24" 48 | tags { 49 | Name = "terraform" 50 | } 51 | availability_zone = "eu-west-1b" 52 | 53 | map_public_ip_on_launch = true 54 | } 55 | 56 | resource "aws_route_table" "terraform" { 57 | vpc_id = "${aws_vpc.terraform.id}" 58 | 59 | route { 60 | cidr_block = "0.0.0.0/0" 61 | gateway_id = "${aws_internet_gateway.terraform.id}" 62 | } 63 | 64 | tags { 65 | Name = "terraform" 66 | } 67 | } 68 | 69 | // The Route Table Association binds our subnet and route together. 70 | resource "aws_route_table_association" "terraform" { 71 | subnet_id = "${aws_subnet.terraform.id}" 72 | route_table_id = "${aws_route_table.terraform.id}" 73 | } 74 | 75 | // The AWS Security Group is akin to a firewall. It specifies the inbound 76 | // only open required ports in a production environment. 77 | resource "aws_security_group" "terraform" { 78 | name = "terraform-web" 79 | vpc_id = "${aws_vpc.terraform.id}" 80 | 81 | ingress { 82 | protocol = -1 83 | from_port = 0 84 | to_port = 0 85 | cidr_blocks = [ 86 | "0.0.0.0/0"] 87 | } 88 | 89 | egress { 90 | protocol = -1 91 | from_port = 0 92 | to_port = 0 93 | cidr_blocks = [ 94 | "0.0.0.0/0"] 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /deploy/mesos-cni/provisionMesosDns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ARGS="$@" 4 | COMMAND="${1}" 5 | ADDRESS="${2}" 6 | SCRIPT_NAME=`basename "$0"` 7 | SCRIPT_DIR=`dirname "$0"` 8 | APP_NAME=mesos-dns 9 | 10 | if [ -z "$1" ]; then 11 | echo "Must pass master IP" 12 | exit 1 13 | fi 14 | 15 | do_provision() { 16 | sudo mkdir -p /etc/mesos-dns 17 | echo '{ 18 | "zk": "zk://'$ADDRESS':2181/mesos", 19 | "domain": "weave.local", 20 | "port": 53, 21 | "resolvers": ["8.8.8.8"], 22 | "httpport": 8123, 23 | "externalon": true 24 | }' | sudo tee /etc/mesos-dns/config.json 25 | 26 | IFACE='weave' 27 | IP=$(ip -4 address show $IFACE | grep 'inet' | sed 's/.*inet \([0-9\.]\+\).*/\1/') 28 | 29 | if grep -q nameserver "/etc/resolvconf/resolv.conf.d/head" 30 | then 31 | echo "/etc/resolvconf/resolv.conf.d/head not empty. Skipping." 32 | else 33 | echo "/etc/resolvconf/resolv.conf.d/head empty. Writing." 34 | echo "nameserver $IP" | sudo tee -a /etc/resolvconf/resolv.conf.d/head 35 | sudo rm /etc/resolv.conf 36 | sudo ln -s ../run/resolvconf/resolv.conf /etc/resolv.conf 37 | sudo resolvconf -u 38 | fi 39 | 40 | if grep -q $(hostname) "/etc/hosts" 41 | then 42 | echo "/etc/hosts not empty. Skipping." 43 | else 44 | # Workaround to fix strange localhost hostname issue 45 | echo "$(ifconfig ens3 | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}') $(hostname)" | sudo tee -a /etc/hosts 46 | fi 47 | } 48 | 49 | do_launch() { 50 | curl -s -X POST -H "Content-type: application/json" $ADDRESS:8080/v2/apps -d '{ "id": "'$APP_NAME'", "user": "root", "cpus": 0.1, "mem": 256, "uris": [ "https://github.com/mesosphere/mesos-dns/releases/download/v0.5.2/mesos-dns-v0.5.2-linux-amd64" ], "cmd": "mv mesos-dns-v* mesos-dns ; chmod +x mesos-dns ; ./mesos-dns -v=2 -config=/etc/mesos-dns/config.json", "instances": 3, "constraints": [["hostname", "UNIQUE"]] }' 51 | } 52 | 53 | do_stop() { 54 | curl -s -X DELETE -H "Content-type: application/json" $ADDRESS:8080/v2/apps/$APP_NAME 55 | } 56 | 57 | do_usage() { 58 | echo "Usage: $SCRIPT_DIR/$SCRIPT_NAME [provision|launch] [MASTER_IP]" 59 | } 60 | 61 | case "$COMMAND" in 62 | launch) 63 | do_launch 64 | ;; 65 | stop) 66 | do_stop 67 | ;; 68 | provision) 69 | do_stop 70 | do_provision 71 | ;; 72 | *) 73 | do_usage 74 | ;; 75 | esac 76 | -------------------------------------------------------------------------------- /deploy/docker-swarm/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | front-end: 5 | image: weaveworksdemos/front-end:0.3.12 6 | ports: 7 | - '80:8079' 8 | environment: 9 | - reschedule=on-node-failure 10 | catalogue: 11 | image: weaveworksdemos/catalogue:0.3.5 12 | environment: 13 | - reschedule=on-node-failure 14 | catalogue-db: 15 | image: weaveworksdemos/catalogue-db:0.3.0 16 | environment: 17 | - reschedule=on-node-failure 18 | - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 19 | - MYSQL_ALLOW_EMPTY_PASSWORD=true 20 | - MYSQL_DATABASE=socksdb 21 | carts: 22 | image: weaveworksdemos/carts:0.4.8 23 | environment: 24 | - reschedule=on-node-failure 25 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 26 | carts-db: 27 | image: mongo:3.4 28 | environment: 29 | - reschedule=on-node-failure 30 | orders: 31 | image: weaveworksdemos/orders:0.4.7 32 | environment: 33 | - reschedule=on-node-failure 34 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 35 | orders-db: 36 | image: mongo:3.4 37 | environment: 38 | - reschedule=on-node-failure 39 | shipping: 40 | image: weaveworksdemos/shipping:0.4.8 41 | environment: 42 | - reschedule=on-node-failure 43 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 44 | # @pidster: I've added this back in a comment, because I don't know why it is missing from this file 45 | # queue-master: 46 | # image: weaveworksdemos/queue-master:0.3.1 47 | # hostname: queue-master 48 | # volumes: 49 | # - /var/run/docker.sock:/var/run/docker.sock 50 | # dns: 172.17.0.1 51 | # environment: 52 | # - reschedule=on-node-failure 53 | rabbitmq: 54 | image: rabbitmq:3.6.8 55 | environment: 56 | - reschedule=on-node-failure 57 | payment: 58 | image: weaveworksdemos/payment:0.4.3 59 | environment: 60 | - reschedule=on-node-failure 61 | user: 62 | image: weaveworksdemos/user:0.4.4 63 | environment: 64 | - MONGO_HOST=user-db:27017 65 | - reschedule=on-node-failure 66 | user-db: 67 | image: weaveworksdemos/user-db:0.4.0 68 | environment: 69 | - reschedule=on-node-failure 70 | user-sim: 71 | image: weaveworksdemos/load-test:0.1.1 72 | command: "-d 60 -r 200 -c 2 -h front-end:8079" 73 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | 6 | # Dependency directories 7 | node_modules 8 | jspm_packages 9 | 10 | # Optional npm cache directory 11 | .npm 12 | 13 | ### Go template 14 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 15 | *.o 16 | *.a 17 | *.so 18 | 19 | # Folders 20 | _obj 21 | _test 22 | 23 | # Architecture specific extensions/prefixes 24 | *.[568vq] 25 | [568vq].out 26 | 27 | *.cgo1.go 28 | *.cgo2.c 29 | _cgo_defun.c 30 | _cgo_gotypes.go 31 | _cgo_export.* 32 | 33 | _testmain.go 34 | 35 | *.exe 36 | *.test 37 | *.prof 38 | ### Java template 39 | /target/ 40 | *.class 41 | 42 | # Mobile Tools for Java (J2ME) 43 | .mtj.tmp/ 44 | 45 | # Package Files # 46 | *.war 47 | *.ear 48 | 49 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 50 | hs_err_pid* 51 | ### OSX template 52 | .DS_Store 53 | .AppleDouble 54 | .LSOverride 55 | 56 | # Icon must end with two \r 57 | Icon 58 | 59 | # Thumbnails 60 | ._* 61 | 62 | # Files that might appear in the root of a volume 63 | .DocumentRevisions-V100 64 | .fseventsd 65 | .Spotlight-V100 66 | .TemporaryItems 67 | .Trashes 68 | .VolumeIcon.icns 69 | 70 | # Directories potentially created on remote AFP share 71 | .AppleDB 72 | .AppleDesktop 73 | Network Trash Folder 74 | Temporary Items 75 | .apdisk 76 | ### JetBrains template 77 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 78 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 79 | 80 | # User-specific stuff: 81 | .idea 82 | 83 | ## File-based project format: 84 | *.iws 85 | *.iml 86 | 87 | ## Plugin-specific files: 88 | 89 | # IntelliJ 90 | /out/ 91 | 92 | # mpeltonen/sbt-idea plugin 93 | .idea_modules/ 94 | 95 | # JIRA plugin 96 | atlassian-ide-plugin.xml 97 | 98 | # Crashlytics plugin (for Android Studio and IntelliJ) 99 | com_crashlytics_export_strings.xml 100 | crashlytics.properties 101 | crashlytics-build.properties 102 | fabric.properties 103 | # Created by .ignore support plugin (hsz.mobi) 104 | 105 | # Maven builds 106 | */target 107 | */*/target 108 | 109 | # AWS ECS install scripts generates an SSH key file 110 | weave-ecs-demo-key.pem 111 | 112 | # Load test generates pyc files 113 | *.pyc 114 | 115 | # Ignore Vagrant cache files 116 | *.vagrant/ 117 | 118 | # Ignore coverage reports 119 | coverage/ 120 | 121 | # Tern 122 | .tern-port 123 | 124 | # docker-swarm infra 125 | packer_cache 126 | docker-swarm.box 127 | *.tfvars 128 | join.sh 129 | output-virtualbox-iso 130 | 131 | # Ignore grafanalib output 132 | /graphs/__pycache__/ 133 | /graphs/*.json 134 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/grafana-import-dash-batch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: grafana-import-dashboards 5 | namespace: monitoring 6 | labels: 7 | app: grafana 8 | component: import-dashboards 9 | spec: 10 | template: 11 | metadata: 12 | name: grafana-import-dashboards 13 | labels: 14 | app: grafana 15 | component: import-dashboards 16 | annotations: 17 | pod.beta.kubernetes.io/init-containers: '[ 18 | { 19 | "name": "wait-for-endpoints", 20 | "image": "giantswarm/tiny-tools", 21 | "imagePullPolicy": "IfNotPresent", 22 | "command": ["fish", "-c", "echo \"waiting for endpoints...\"; while true; set endpoints (curl -s --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header \"Authorization: Bearer \"(cat /var/run/secrets/kubernetes.io/serviceaccount/token) https://kubernetes.default.svc/api/v1/namespaces/monitoring/endpoints/grafana); echo $endpoints | jq \".\"; if test (echo $endpoints | jq -r \".subsets[].addresses | length\") -gt 0; exit 0; end; echo \"waiting...\";sleep 1; end"], 23 | "args": ["monitoring", "grafana"] 24 | } 25 | ]' 26 | spec: 27 | containers: 28 | - name: grafana-import-dashboards 29 | image: giantswarm/tiny-tools 30 | command: ["/bin/sh", "-c"] 31 | workingDir: /opt/grafana-import-dashboards 32 | args: 33 | - > 34 | for file in *-datasource.json ; do 35 | if [ -e "$file" ] ; then 36 | echo "importing $file" && 37 | curl --silent --fail --show-error \ 38 | --request POST http://admin:admin@grafana/api/datasources \ 39 | --header "Content-Type: application/json" \ 40 | --header "Accept: application/json" \ 41 | --data-binary "@$file" ; 42 | echo "" ; 43 | fi 44 | done ; 45 | for file in *-dashboard.json ; do 46 | if [ -e "$file" ] ; then 47 | echo "importing $file" && 48 | curl --silent --fail --show-error \ 49 | --request POST http://admin:admin@grafana/api/dashboards/import \ 50 | --header "Content-Type: application/json" \ 51 | --header "Accept: application/json" \ 52 | --data-binary "@$file" ; 53 | echo "" ; 54 | fi 55 | done ; 56 | volumeMounts: 57 | - name: config-volume 58 | mountPath: /opt/grafana-import-dashboards 59 | restartPolicy: Never 60 | volumes: 61 | - name: config-volume 62 | configMap: 63 | name: grafana-import-dashboards 64 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-monitoring/prometheus-exporter-disk-usage-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-directory-size-metrics 5 | namespace: monitoring 6 | annotations: 7 | description: | 8 | This `DaemonSet` provides metrics in Prometheus format about disk usage on the nodes. 9 | The container `read-du` reads in sizes of all directories below /mnt and writes that to `/tmp/metrics`. It only reports directories larger then `100M` for now. 10 | The other container `caddy` just hands out the contents of that file on request via `http` on `/metrics` at port `9102` which are the defaults for Prometheus. 11 | These are scheduled on every node in the Kubernetes cluster. 12 | To choose directories from the node to check, just mount them on the `read-du` container below `/mnt`. 13 | spec: 14 | template: 15 | metadata: 16 | labels: 17 | app: node-directory-size-metrics 18 | annotations: 19 | prometheus.io/scrape: 'true' 20 | prometheus.io/port: '9102' 21 | description: | 22 | This `Pod` provides metrics in Prometheus format about disk usage on the node. 23 | The container `read-du` reads in sizes of all directories below /mnt and writes that to `/tmp/metrics`. It only reports directories larger then `100M` for now. 24 | The other container `caddy` just hands out the contents of that file on request on `/metrics` at port `9102` which are the defaults for Prometheus. 25 | This `Pod` is scheduled on every node in the Kubernetes cluster. 26 | To choose directories from the node to check just mount them on `read-du` below `/mnt`. 27 | spec: 28 | containers: 29 | - name: read-du 30 | image: giantswarm/tiny-tools 31 | imagePullPolicy: Always 32 | # FIXME threshold via env var 33 | # The 34 | command: 35 | - fish 36 | - --command 37 | - | 38 | while true 39 | for directory in (du --bytes --separate-dirs --threshold=100M /mnt) 40 | echo $directory | read size path 41 | echo "node_directory_size_bytes{path=\"$path\"} $size" \ 42 | >> /tmp/metrics-temp 43 | end 44 | mv /tmp/metrics-temp /tmp/metrics 45 | sleep 300 46 | end 47 | volumeMounts: 48 | - name: host-fs-var 49 | mountPath: /mnt/var 50 | readOnly: true 51 | - name: metrics 52 | mountPath: /tmp 53 | - name: caddy 54 | image: dockermuenster/caddy:0.9.3 55 | command: 56 | - "caddy" 57 | - "-port=9102" 58 | - "-root=/var/www" 59 | ports: 60 | - containerPort: 9102 61 | volumeMounts: 62 | - name: metrics 63 | mountPath: /var/www 64 | volumes: 65 | - name: host-fs-var 66 | hostPath: 67 | path: /var 68 | - name: metrics 69 | emptyDir: 70 | medium: Memory 71 | -------------------------------------------------------------------------------- /shippable.resources.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | # This file contains resources to be used as inputs to an automated CI/CD 3 | # workflow using Shippable Pipelines 4 | 5 | ################################ 6 | 7 | # Specify shared infrastructure resources 8 | 9 | # AWS cluster information 10 | - name: cluster-demo-ecs 11 | type: cluster 12 | integration: shippable-aws 13 | pointer: 14 | sourceName : "ecs-weave-shippable-demo" 15 | region: "us-east-1" 16 | 17 | # AWS ALB target group for TEST environment 18 | - name: alb-front-end-test #required 19 | type: loadBalancer #required 20 | pointer: 21 | sourceName: "arn:aws:elasticloadbalancing:us-east-1:288971733297:targetgroup/frontendTESTTG/815ea30a8dcbaabc" 22 | method: application 23 | 24 | # AWS ALB target group for PROD environment 25 | - name: alb-front-end-prod #required 26 | type: loadBalancer #required 27 | pointer: 28 | sourceName: "arn:aws:elasticloadbalancing:us-east-1:288971733297:targetgroup/frontendPRODTG/dea24e9a89ef88f4" 29 | method: application 30 | 31 | ################################ 32 | 33 | # Specify shared application resources 34 | 35 | # Docker Image Options 36 | - name: img-opts-shared-test 37 | type: dockerOptions 38 | version: 39 | memory: 64 40 | # cpuShares: 128 41 | 42 | - name: img-opts-shared-prod 43 | type: dockerOptions 44 | version: 45 | memory: 128 46 | # cpuShares: 256 47 | 48 | ################################ 49 | # Specify CD pipeline resources 50 | 51 | # Pipelines to configure: 52 | # * FRONT-END 53 | 54 | #-------------------- 55 | 56 | # CI/CD pipeline resources for FRONT-END component 57 | 58 | # Component version seed for release management 59 | - name: ver-front-end 60 | type: version 61 | seed: 62 | versionName: "1.0.0" 63 | 64 | # Docker image information 65 | - name: img-front-end 66 | type: image 67 | pointer: 68 | sourceName: 288971733297.dkr.ecr.us-east-1.amazonaws.com/front-end 69 | isPull: false 70 | seed: 71 | versionName: master.1 72 | 73 | # Docker image options for TEST environment 74 | - name: img-opts-front-end-test 75 | type: dockerOptions 76 | version: 77 | portMappings: 78 | - 0:8080 79 | 80 | # Environment variables for TEST environment 81 | - name: params-front-end-test 82 | type: params 83 | version: 84 | params: 85 | ENVIRONMENT: "development" 86 | NODE_ENV: "development" 87 | PORT: 8080 88 | 89 | # Docker image options for PROD environment 90 | - name: img-opts-front-end-prod 91 | type: dockerOptions 92 | version: 93 | portMappings: 94 | - 0:8080 95 | 96 | # Environment variables for PROD environment 97 | - name: params-front-end-prod 98 | type: params 99 | version: 100 | params: 101 | ENVIRONMENT: "production" 102 | NODE_ENV: "production" 103 | PORT: 8080 104 | 105 | # PROD replica controller 106 | - name: replicas-front-end-prod 107 | type: replicas 108 | version: 109 | count: 2 110 | 111 | #-------------------- 112 | -------------------------------------------------------------------------------- /graphs/sock-shop-performance.dashboard.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import operator 3 | import os 4 | 5 | from grafanalib.core import * 6 | 7 | def service_row(datasource, serviceTitle, serviceName): 8 | return Row( 9 | title=serviceTitle, 10 | showTitle=True, 11 | panels=[ 12 | service_qps_graph(datasource, serviceTitle, serviceName), 13 | service_latency_graph(datasource, serviceTitle, serviceName), 14 | ], 15 | ) 16 | 17 | def service_qps_graph(datasource, serviceTitle, serviceName): 18 | title = serviceTitle + " QPS" 19 | return Graph( 20 | title=title, 21 | dataSource=datasource, 22 | span=6, 23 | lineWidth=1, 24 | legend=Legend( 25 | show=True, 26 | alignAsTable=True, 27 | ), 28 | targets=[ 29 | Target( 30 | expr='sum(rate(request_duration_seconds_count{name="%s",status_code=~"2..",route!="metrics"}[1m])) * 100' % (serviceName), 31 | legendFormat="2xx", 32 | refId='A', 33 | ), 34 | Target( 35 | expr='sum(rate(request_duration_seconds_count{name="%s",status_code=~"4.+|5.+"}[1m])) * 100' % (serviceName), 36 | legendFormat="4xx/5xx", 37 | refId='B', 38 | ), 39 | ], 40 | xAxis=XAxis(mode="time"), 41 | yAxes=[ 42 | YAxis(format=OPS_FORMAT, show=True, label="QPS (1 min)", min=0), 43 | YAxis(format=SHORT_FORMAT, show=True, min=None), 44 | ], 45 | ) 46 | 47 | def service_latency_graph(datasource, serviceTitle, serviceName): 48 | title = serviceTitle + " Latency" 49 | return Graph( 50 | title=title, 51 | dataSource=datasource, 52 | span=6, 53 | lineWidth=1, 54 | targets=[ 55 | Target( 56 | expr='histogram_quantile(0.99, sum(rate(request_duration_seconds_bucket{name="%s"}[1m])) by (name, le))' % (serviceName), 57 | legendFormat="99th quantile", 58 | refId='A', 59 | ), 60 | Target( 61 | expr='histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket{name="%s"}[1m])) by (name, le))' % (serviceName), 62 | legendFormat="50th quantile", 63 | refId='B', 64 | ), 65 | Target( 66 | expr='sum(rate(request_duration_seconds_sum{name="%s"}[1m])) / sum(rate(request_duration_seconds_count{name="%s"}[1m]))' % (serviceName, serviceName), 67 | legendFormat="mean", 68 | refId='C', 69 | ), 70 | ], 71 | xAxis=XAxis(mode="time"), 72 | yAxes=[ 73 | YAxis(format=SECONDS_FORMAT, show=True, min=0), 74 | YAxis(format=SHORT_FORMAT, show=True, min=None), 75 | ], 76 | ) 77 | 78 | datasource = "prometheus" 79 | rows = [] 80 | services = [ 81 | {"name": "catalogue", "title": "Catalogue"}, 82 | {"name": "carts", "title": "Cart"}, 83 | {"name": "orders", "title": "Orders"}, 84 | {"name": "payment", "title": "Payment"}, 85 | {"name": "shipping", "title": "Shipping"}, 86 | {"name": "user", "title": "User"}, 87 | {"name": "front-end", "title": "Front End"}, 88 | ] 89 | 90 | for service in services: 91 | rows.append(service_row(datasource, service["title"], service["name"])) 92 | 93 | dashboard = Dashboard( 94 | title="Sock Shop Performance", 95 | time=Time("now-30m", "now"), 96 | timezone="browser", 97 | refresh="5s", 98 | rows=rows, 99 | ).auto_panel_ids() 100 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/gcloud/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = "${var.project_name}" 3 | region = "${var.region}" 4 | credentials = "${file("${var.credentials_file_path}")}" 5 | } 6 | 7 | resource "google_compute_firewall" "default" { 8 | name = "docker-swarm-firewall" 9 | network = "default" 10 | 11 | allow { 12 | protocol = "tcp" 13 | ports = ["80", "30000", "22", "2377", "7946", "4789"] 14 | } 15 | 16 | source_ranges = ["0.0.0.0/0"] 17 | target_tags = ["docker-swarm-nodes", "docker-swarm-master"] 18 | 19 | } 20 | 21 | resource "google_compute_instance" "docker-swarm-node" { 22 | depends_on = [ "google_compute_instance.docker-swarm-master" ] 23 | count = "${var.num_nodes}" 24 | machine_type = "${var.machine_type}" 25 | name = "docker-swarm-node-${count.index}" 26 | zone = "${var.region_zone}" 27 | tags = [ "docker-swarm-nodes" ] 28 | 29 | 30 | disk { 31 | image = "docker-swarm" 32 | } 33 | 34 | network_interface { 35 | network = "default" 36 | access_config { 37 | # Ephemeral 38 | } 39 | } 40 | 41 | metadata { 42 | ssh-keys = "ubuntu:${file("${var.public_key_path}")}" 43 | } 44 | 45 | connection { 46 | user = "ubuntu" 47 | private_key = "${file("${var.private_key_path}")}" 48 | } 49 | 50 | provisioner "file" { 51 | source = "join.sh", 52 | destination = "/tmp/join.sh" 53 | } 54 | 55 | provisioner "remote-exec" { 56 | inline = [ 57 | "sudo service docker start", 58 | "chmod +x /tmp/join.sh", 59 | "/tmp/join.sh" 60 | ] 61 | } 62 | } 63 | 64 | resource "google_compute_instance" "docker-swarm-master" { 65 | name = "docker-swarm-master" 66 | machine_type = "${var.machine_type}" 67 | zone = "${var.region_zone}" 68 | tags = [ "docker-swarm-master" ] 69 | 70 | 71 | disk { 72 | image = "docker-swarm" 73 | } 74 | 75 | network_interface { 76 | network = "default" 77 | access_config { 78 | # Ephemeral 79 | } 80 | } 81 | 82 | metadata { 83 | ssh-keys = "ubuntu:${file("${var.public_key_path}")}" 84 | } 85 | 86 | connection { 87 | user = "ubuntu" 88 | private_key = "${file("${var.private_key_path}")}" 89 | } 90 | 91 | provisioner "file" { 92 | source = "./deploy/docker-swarm/docker-compose.yml" 93 | destination = "/tmp/docker-compose.yml" 94 | } 95 | 96 | provisioner "remote-exec" { 97 | inline = [ 98 | "sudo service docker start", 99 | "sudo docker swarm init", 100 | ] 101 | } 102 | 103 | provisioner "local-exec" { 104 | command = "TOKEN=$(ssh -i \"${var.private_key_path}\" -o \"StrictHostKeyChecking no\" -o \"UserKnownHostsFile /dev/null\" ubuntu@${self.network_interface.0.access_config.0.assigned_nat_ip} sudo docker swarm join-token -q worker); echo \"#!/usr/bin/env bash\nsudo docker swarm join --token $TOKEN ${self.network_interface.0.access_config.0.assigned_nat_ip}:2377\" >| join.sh" 105 | } 106 | } 107 | 108 | resource "null_resource" "docker-swarm" { 109 | depends_on = [ "google_compute_instance.docker-swarm-node" ] 110 | connection { 111 | user = "ubuntu" 112 | private_key = "${file("${var.private_key_path}")}" 113 | host = "${google_compute_instance.docker-swarm-master.network_interface.0.access_config.0.assigned_nat_ip}" 114 | } 115 | provisioner "remote-exec" { 116 | inline = [ 117 | "sudo docker-compose -f /tmp/docker-compose.yml pull", 118 | "sudo docker-compose -f /tmp/docker-compose.yml bundle -o dockerswarm.dab", 119 | "sudo docker deploy dockerswarm" 120 | ] 121 | } 122 | 123 | provisioner "local-exec" { 124 | command = "rm join.sh" 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/local/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.ssh.username = "vagrant" 16 | config.ssh.password = "vagrant" 17 | 18 | config.vm.define "swarm-master" do |master| 19 | master.vm.box = "docker-swarm" 20 | master.vm.network "private_network", ip: "10.0.0.10", auto_config: false 21 | master.vm.hostname = "master" 22 | master.vm.synced_folder "../..", "/docker-swarm" 23 | master.vm.provision "shell", inline: <<-EOF 24 | sudo ifconfig enp0s8 10.0.0.10 netmask 255.255.255.0 up 25 | EOF 26 | end 27 | 28 | num_nodes = ENV["NUM_NODES"] || 2 29 | num_nodes.to_i.times do |n| 30 | config.vm.define "swarm-node#{n+1}" do |node| 31 | node.vm.box = "docker-swarm" 32 | node.vm.network "private_network", ip: "10.0.0.#{n+11}", auto_config: false 33 | node.vm.hostname = "node#{n+1}" 34 | node.vm.provision "shell", inline: <<-EOF 35 | sudo ifconfig enp0s8 10.0.0.#{n+11} netmask 255.255.255.0 up 36 | EOF 37 | end 38 | end 39 | 40 | # Disable automatic box update checking. If you disable this, then 41 | # boxes will only be checked for updates when the user runs 42 | # `vagrant box outdated`. This is not recommended. 43 | # config.vm.box_check_update = false 44 | 45 | # Create a forwarded port mapping which allows access to a specific port 46 | # within the machine from a port on the host machine. In the example below, 47 | # accessing "localhost:8080" will access port 80 on the guest machine. 48 | # config.vm.network "forwarded_port", guest: 80, host: 8080 49 | 50 | # Create a private network, which allows host-only access to the machine 51 | # using a specific IP. 52 | # config.vm.network "private_network", ip: "192.168.33.10" 53 | 54 | # Create a public network, which generally matched to bridged network. 55 | # Bridged networks make the machine appear as another physical device on 56 | # your network. 57 | # config.vm.network "public_network" 58 | 59 | # Share an additional folder to the guest VM. The first argument is 60 | # the path on the host to the actual folder. The second argument is 61 | # the path on the guest to mount the folder. And the optional third 62 | # argument is a set of non-required options. 63 | # config.vm.synced_folder "../data", "/vagrant_data" 64 | 65 | # Provider-specific configuration so you can fine-tune various 66 | # backing providers for Vagrant. These expose provider-specific options. 67 | # Example for VirtualBox: 68 | # 69 | # config.vm.provider "virtualbox" do |vb| 70 | # # Display the VirtualBox GUI when booting the machine 71 | # vb.gui = true 72 | # 73 | # # Customize the amount of memory on the VM: 74 | # vb.memory = "1024" 75 | # end 76 | # 77 | # View the documentation for the provider you are using for more 78 | # information on available options. 79 | 80 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 81 | # such as FTP and Heroku are also available. See the documentation at 82 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 83 | # config.push.define "atlas" do |push| 84 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 85 | # end 86 | 87 | # Enable provisioning with a shell script. Additional provisioners such as 88 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 89 | # documentation for more information about their specific syntax and use. 90 | # config.vm.provision "shell", inline: <<-SHELL 91 | # apt-get update 92 | # apt-get install -y apache2 93 | # SHELL 94 | end 95 | -------------------------------------------------------------------------------- /deploy/kubernetes/manifests-jaeger/jaeger.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017 The Jaeger Authors 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 | # in compliance with the License. You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software distributed under the License 10 | # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 | # or implied. See the License for the specific language governing permissions and limitations under 12 | # the License. 13 | # 14 | 15 | apiVersion: v1 16 | kind: List 17 | items: 18 | - apiVersion: v1 19 | kind: Namespace 20 | metadata: 21 | name: jaeger 22 | - apiVersion: extensions/v1beta1 23 | kind: Deployment 24 | metadata: 25 | name: jaeger-deployment 26 | namespace: jaeger 27 | labels: 28 | app: jaeger 29 | jaeger-infra: jaeger-deployment 30 | spec: 31 | replicas: 1 32 | strategy: 33 | type: Recreate 34 | template: 35 | metadata: 36 | labels: 37 | app: jaeger 38 | jaeger-infra: jaeger-pod 39 | spec: 40 | containers: 41 | - env: 42 | - name: COLLECTOR_ZIPKIN_HTTP_PORT 43 | value: "9411" 44 | image: jaegertracing/all-in-one 45 | name: jaeger 46 | ports: 47 | - containerPort: 5775 48 | protocol: UDP 49 | - containerPort: 6831 50 | protocol: UDP 51 | - containerPort: 6832 52 | protocol: UDP 53 | - containerPort: 16686 54 | protocol: TCP 55 | - containerPort: 9411 56 | protocol: TCP 57 | readinessProbe: 58 | httpGet: 59 | path: "/" 60 | port: 16686 61 | initialDelaySeconds: 5 62 | - apiVersion: v1 63 | kind: Service 64 | metadata: 65 | name: jaeger-query 66 | namespace: jaeger 67 | labels: 68 | app: jaeger 69 | jaeger-infra: jaeger-service 70 | spec: 71 | ports: 72 | - name: query-http 73 | port: 80 74 | protocol: TCP 75 | targetPort: 16686 76 | selector: 77 | jaeger-infra: jaeger-pod 78 | type: LoadBalancer 79 | - apiVersion: v1 80 | kind: Service 81 | metadata: 82 | name: jaeger-collector 83 | namespace: jaeger 84 | labels: 85 | app: jaeger 86 | jaeger-infra: collector-service 87 | spec: 88 | ports: 89 | - name: jaeger-collector-tchannel 90 | port: 14267 91 | protocol: TCP 92 | targetPort: 14267 93 | - name: jaeger-collector-http 94 | port: 14268 95 | protocol: TCP 96 | targetPort: 14268 97 | - name: jaeger-collector-zipkin 98 | port: 9411 99 | protocol: TCP 100 | targetPort: 9411 101 | selector: 102 | jaeger-infra: jaeger-pod 103 | type: ClusterIP 104 | - apiVersion: v1 105 | kind: Service 106 | metadata: 107 | name: jaeger-agent 108 | namespace: jaeger 109 | labels: 110 | app: jaeger 111 | jaeger-infra: agent-service 112 | spec: 113 | ports: 114 | - name: agent-zipkin-thrift 115 | port: 5775 116 | protocol: UDP 117 | targetPort: 5775 118 | - name: agent-compact 119 | port: 6831 120 | protocol: UDP 121 | targetPort: 6831 122 | - name: agent-binary 123 | port: 6832 124 | protocol: UDP 125 | targetPort: 6832 126 | clusterIP: None 127 | selector: 128 | jaeger-infra: jaeger-pod 129 | - apiVersion: v1 130 | kind: Service 131 | metadata: 132 | name: zipkin 133 | namespace: jaeger 134 | labels: 135 | app: jaeger 136 | jaeger-infra: zipkin-service 137 | spec: 138 | ports: 139 | - name: jaeger-collector-zipkin 140 | port: 9411 141 | protocol: TCP 142 | targetPort: 9411 143 | clusterIP: None 144 | selector: 145 | jaeger-infra: jaeger-pod 146 | -------------------------------------------------------------------------------- /deploy/kubernetes/terraform/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.aws_region}" 3 | } 4 | 5 | resource "aws_security_group" "k8s-security-group" { 6 | name = "md-k8s-security-group" 7 | description = "allow all internal traffic, ssh, http from anywhere" 8 | ingress { 9 | from_port = 0 10 | to_port = 0 11 | protocol = "-1" 12 | self = "true" 13 | } 14 | ingress { 15 | from_port = 22 16 | to_port = 22 17 | protocol = "tcp" 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | ingress { 21 | from_port = 80 22 | to_port = 80 23 | protocol = "tcp" 24 | cidr_blocks = ["0.0.0.0/0"] 25 | } 26 | ingress { 27 | from_port = 9411 28 | to_port = 9411 29 | protocol = "tcp" 30 | cidr_blocks = ["0.0.0.0/0"] 31 | } 32 | ingress { 33 | from_port = 30001 34 | to_port = 30001 35 | protocol = "tcp" 36 | cidr_blocks = ["0.0.0.0/0"] 37 | } 38 | ingress { 39 | from_port = 30002 40 | to_port = 30002 41 | protocol = "tcp" 42 | cidr_blocks = ["0.0.0.0/0"] 43 | } 44 | ingress { 45 | from_port = 31601 46 | to_port = 31601 47 | protocol = "tcp" 48 | cidr_blocks = ["0.0.0.0/0"] 49 | } 50 | egress { 51 | from_port = 0 52 | to_port = 0 53 | protocol = "-1" 54 | cidr_blocks = ["0.0.0.0/0"] 55 | } 56 | } 57 | 58 | resource "aws_instance" "ci-sockshop-k8s-master" { 59 | instance_type = "${var.master_instance_type}" 60 | ami = "${lookup(var.aws_amis, var.aws_region)}" 61 | key_name = "${var.key_name}" 62 | security_groups = ["${aws_security_group.k8s-security-group.name}"] 63 | tags { 64 | Name = "ci-sockshop-k8s-master" 65 | } 66 | 67 | connection { 68 | user = "ubuntu" 69 | private_key = "${file("${var.private_key_path}")}" 70 | } 71 | 72 | provisioner "file" { 73 | source = "deploy/kubernetes/manifests" 74 | destination = "/tmp/" 75 | } 76 | 77 | provisioner "remote-exec" { 78 | inline = [ 79 | "sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -", 80 | "sudo echo \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee --append /etc/apt/sources.list.d/kubernetes.list", 81 | "sudo apt-get update", 82 | "sudo apt-get install -y docker.io", 83 | "sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni" 84 | ] 85 | } 86 | } 87 | 88 | resource "aws_instance" "ci-sockshop-k8s-node" { 89 | instance_type = "${var.node_instance_type}" 90 | count = "${var.node_count}" 91 | ami = "${lookup(var.aws_amis, var.aws_region)}" 92 | key_name = "${var.key_name}" 93 | security_groups = ["${aws_security_group.k8s-security-group.name}"] 94 | tags { 95 | Name = "ci-sockshop-k8s-node" 96 | } 97 | 98 | connection { 99 | user = "ubuntu" 100 | private_key = "${file("${var.private_key_path}")}" 101 | } 102 | 103 | provisioner "remote-exec" { 104 | inline = [ 105 | "sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -", 106 | "sudo echo \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee --append /etc/apt/sources.list.d/kubernetes.list", 107 | "sudo apt-get update", 108 | "sudo apt-get install -y docker.io", 109 | "sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni", 110 | "sudo sysctl -w vm.max_map_count=262144" 111 | ] 112 | } 113 | } 114 | 115 | resource "aws_elb" "ci-sockshop-k8s-elb" { 116 | depends_on = [ "aws_instance.ci-sockshop-k8s-node" ] 117 | name = "ci-sockshop-k8s-elb" 118 | instances = ["${aws_instance.ci-sockshop-k8s-node.*.id}"] 119 | availability_zones = ["${data.aws_availability_zones.available.names}"] 120 | security_groups = ["${aws_security_group.k8s-security-group.id}"] 121 | listener { 122 | lb_port = 80 123 | instance_port = 30001 124 | lb_protocol = "http" 125 | instance_protocol = "http" 126 | } 127 | 128 | listener { 129 | lb_port = 9411 130 | instance_port = 30002 131 | lb_protocol = "http" 132 | instance_protocol = "http" 133 | } 134 | 135 | } 136 | 137 | -------------------------------------------------------------------------------- /deploy/docker-swarm/infra/aws/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.aws_region}" 3 | } 4 | 5 | data "aws_ami" "ci-sockshop-docker-swarm" { 6 | most_recent = true 7 | filter { 8 | name = "name" 9 | values = ["docker-swarm"] 10 | } 11 | } 12 | 13 | resource "aws_security_group" "ci-sockshop-docker-swarm" { 14 | name = "ci-sockshop-docker-swarm" 15 | description = "allow all internal traffic, all traffic http from anywhere" 16 | ingress { 17 | from_port = 0 18 | to_port = 0 19 | protocol = "-1" 20 | self = "true" 21 | } 22 | ingress { 23 | from_port = 80 24 | to_port = 80 25 | protocol = "tcp" 26 | cidr_blocks = ["0.0.0.0/0"] 27 | } 28 | ingress { 29 | from_port = 30000 30 | to_port = 30000 31 | protocol = "tcp" 32 | cidr_blocks = ["0.0.0.0/0"] 33 | } 34 | ingress { 35 | from_port = 22 36 | to_port = 22 37 | protocol = "tcp" 38 | cidr_blocks = ["0.0.0.0/0"] 39 | } 40 | ingress { 41 | from_port = 2377 42 | to_port = 2377 43 | protocol = "tcp" 44 | cidr_blocks = ["0.0.0.0/0"] 45 | } 46 | ingress { 47 | from_port = 7946 48 | to_port = 7946 49 | protocol = "tcp" 50 | cidr_blocks = ["0.0.0.0/0"] 51 | } 52 | ingress { 53 | from_port = 4789 54 | to_port = 4789 55 | protocol = "tcp" 56 | cidr_blocks = ["0.0.0.0/0"] 57 | } 58 | egress { 59 | from_port = 0 60 | to_port = 0 61 | protocol = "-1" 62 | cidr_blocks = ["0.0.0.0/0"] 63 | } 64 | } 65 | 66 | resource "aws_instance" "ci-sockshop-docker-swarm-node" { 67 | depends_on = [ "aws_instance.ci-sockshop-docker-swarm-master" ] 68 | count = "${var.num_nodes}" 69 | instance_type = "${var.instance_type}" 70 | ami = "${data.aws_ami.ci-sockshop-docker-swarm.id}" 71 | key_name = "${var.private_key_name}" 72 | security_groups = ["${aws_security_group.ci-sockshop-docker-swarm.name}"] 73 | tags { 74 | Name = "ci-sockshop-docker-swarm-node" 75 | } 76 | 77 | connection { 78 | user = "ubuntu" 79 | private_key = "${file("${var.private_key_path}")}" 80 | } 81 | 82 | provisioner "file" { 83 | source = "join.sh", 84 | destination = "/tmp/join.sh" 85 | } 86 | 87 | provisioner "remote-exec" { 88 | inline = [ 89 | "sudo service docker start", 90 | "chmod +x /tmp/join.sh", 91 | "/tmp/join.sh" 92 | ] 93 | } 94 | } 95 | 96 | resource "aws_instance" "ci-sockshop-docker-swarm-master" { 97 | instance_type = "${var.instance_type}" 98 | ami = "${data.aws_ami.ci-sockshop-docker-swarm.id}" 99 | key_name = "${var.private_key_name}" 100 | security_groups = ["${aws_security_group.ci-sockshop-docker-swarm.name}"] 101 | tags { 102 | Name = "ci-sockshop-docker-swarm-master" 103 | } 104 | 105 | connection { 106 | user = "ubuntu" 107 | private_key = "${file("${var.private_key_path}")}" 108 | } 109 | 110 | provisioner "file" { 111 | source = "deploy/docker-swarm/docker-compose.yml" 112 | destination = "/tmp/docker-compose.yml" 113 | } 114 | 115 | provisioner "remote-exec" { 116 | inline = [ 117 | "sudo service docker start", 118 | "docker swarm init", 119 | ] 120 | } 121 | 122 | provisioner "local-exec" { 123 | command = "TOKEN=$(ssh -i ${var.private_key_path} -o StrictHostKeyChecking=no ubuntu@${aws_instance.ci-sockshop-docker-swarm-master.public_ip} docker swarm join-token -q worker); echo \"#!/usr/bin/env bash\ndocker swarm join --token $TOKEN ${aws_instance.ci-sockshop-docker-swarm-master.public_ip}:2377\" >| join.sh" 124 | } 125 | } 126 | 127 | resource "null_resource" "docker-swarm" { 128 | depends_on = [ "aws_instance.ci-sockshop-docker-swarm-node" ] 129 | connection { 130 | user = "ubuntu" 131 | private_key = "${file("${var.private_key_path}")}" 132 | host = "${aws_instance.ci-sockshop-docker-swarm-master.public_ip}" 133 | } 134 | provisioner "remote-exec" { 135 | inline = [ 136 | "docker-compose -f /tmp/docker-compose.yml pull", 137 | "docker-compose -f /tmp/docker-compose.yml bundle -o dockerswarm.dab", 138 | "docker deploy dockerswarm" 139 | ] 140 | } 141 | 142 | provisioner "local-exec" { 143 | command = "rm join.sh" 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /deploy/docker-swarm/packer/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "", 4 | "aws_secret_key": "", 5 | "aws_region": "{{env `AWS_DEFAULT_REGION`}}", 6 | "gcloud_project_id": "{{env `TF_VAR_project_name`}}", 7 | "gcloud_zone": "europe-west1-b" 8 | }, 9 | "builders": [{ 10 | "type": "amazon-ebs", 11 | "access_key": "{{user `aws_access_key`}}", 12 | "secret_key": "{{user `aws_secret_key`}}", 13 | "region": "{{user `aws_region`}}", 14 | "source_ami_filter": { 15 | "filters": { 16 | "virtualization-type": "hvm", 17 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 18 | "root-device-type": "ebs" 19 | }, 20 | "owners": ["099720109477"], 21 | "most_recent": true 22 | }, 23 | "instance_type": "t2.micro", 24 | "ssh_username": "ubuntu", 25 | "ami_name": "docker-swarm" 26 | },{ 27 | "type": "googlecompute", 28 | "project_id": "{{user `gcloud_project_id`}}", 29 | "source_image": "ubuntu-1604-xenial-v20161020", 30 | "image_name": "docker-swarm", 31 | "ssh_username": "ubuntu", 32 | "zone": "{{user `gcloud_zone`}}" 33 | },{ 34 | "type": "virtualbox-iso", 35 | "guest_os_type": "Ubuntu_64", 36 | "vm_name": "docker-swarm", 37 | "iso_url": "http://releases.ubuntu.com/16.04/ubuntu-16.04-server-amd64.iso", 38 | "iso_checksum": "23e97cd5d4145d4105fbf29878534049", 39 | "iso_checksum_type": "md5", 40 | "disk_size" : 10000, 41 | "ssh_username": "vagrant", 42 | "ssh_password": "vagrant", 43 | "ssh_wait_timeout": "10000s", 44 | "http_directory": "packer", 45 | "headless": "true", 46 | "boot_wait": "5s", 47 | "shutdown_command": "echo 'vagrant' | sudo -S shutdown -P now", 48 | "boot_command": [ 49 | "", 50 | "", 51 | "", 52 | "", 53 | "/install/vmlinuz", 54 | " auto", 55 | " console-setup/ask_detect=false", 56 | " console-setup/layoutcode=us", 57 | " console-setup/modelcode=pc105", 58 | " debconf/frontend=noninteractive", 59 | " debian-installer=en_US", 60 | " fb=false", 61 | " initrd=/install/initrd.gz", 62 | " kbd-chooser/method=us", 63 | " keyboard-configuration/layout=USA", 64 | " keyboard-configuration/variant=USA", 65 | " locale=en_US", 66 | " netcfg/get_domain=vm", 67 | " netcfg/get_hostname=vagrant", 68 | " grub-installer/bootdev=/dev/sda", 69 | " noapic", 70 | " preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg", 71 | " -- ", 72 | " " 73 | ] 74 | }], 75 | "provisioners": [{ 76 | "type": "shell", 77 | "inline": [ 78 | "sleep 30", 79 | "sudo curl -fsSL https://experimental.docker.com/ | sh", 80 | "sudo usermod -aG docker $(whoami)", 81 | "sudo curl -L \"https://github.com/docker/compose/releases/download/1.8.1/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose", 82 | "sudo chmod +x /usr/local/bin/docker-compose", 83 | "sudo curl -L git.io/weave -o /usr/local/bin/weave", 84 | "sudo chmod +x /usr/local/bin/weave" 85 | ] 86 | }], 87 | "post-processors": [{ 88 | "type": "vagrant", 89 | "only": ["virtualbox-iso"], 90 | "include": ["docker-compose.yml"], 91 | "output": "infra/local/docker-swarm.box" 92 | },{ 93 | "type": "shell-local", 94 | "only": ["virtualbox-iso"], 95 | "inline": [ 96 | "vagrant box add --force docker-swarm infra/local/docker-swarm.box" 97 | ] 98 | }] 99 | } 100 | -------------------------------------------------------------------------------- /deploy/docker-compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | front-end: 5 | image: weaveworksdemos/front-end:0.3.12 6 | hostname: front-end 7 | restart: always 8 | cap_drop: 9 | - all 10 | read_only: true 11 | edge-router: 12 | image: weaveworksdemos/edge-router:0.1.1 13 | ports: 14 | - '80:80' 15 | - '8080:8080' 16 | cap_drop: 17 | - all 18 | cap_add: 19 | - NET_BIND_SERVICE 20 | - CHOWN 21 | - SETGID 22 | - SETUID 23 | - DAC_OVERRIDE 24 | read_only: true 25 | tmpfs: 26 | - /var/run:rw,noexec,nosuid 27 | hostname: edge-router 28 | restart: always 29 | catalogue: 30 | image: weaveworksdemos/catalogue:0.3.5 31 | hostname: catalogue 32 | restart: always 33 | cap_drop: 34 | - all 35 | cap_add: 36 | - NET_BIND_SERVICE 37 | read_only: true 38 | catalogue-db: 39 | image: weaveworksdemos/catalogue-db:0.3.0 40 | hostname: catalogue-db 41 | restart: always 42 | environment: 43 | - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 44 | - MYSQL_ALLOW_EMPTY_PASSWORD=true 45 | - MYSQL_DATABASE=socksdb 46 | carts: 47 | image: weaveworksdemos/carts:0.4.8 48 | hostname: carts 49 | restart: always 50 | cap_drop: 51 | - all 52 | cap_add: 53 | - NET_BIND_SERVICE 54 | read_only: true 55 | tmpfs: 56 | - /tmp:rw,noexec,nosuid 57 | environment: 58 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 59 | carts-db: 60 | image: mongo:3.4 61 | hostname: carts-db 62 | restart: always 63 | cap_drop: 64 | - all 65 | cap_add: 66 | - CHOWN 67 | - SETGID 68 | - SETUID 69 | read_only: true 70 | tmpfs: 71 | - /tmp:rw,noexec,nosuid 72 | orders: 73 | image: weaveworksdemos/orders:0.4.7 74 | hostname: orders 75 | restart: always 76 | cap_drop: 77 | - all 78 | cap_add: 79 | - NET_BIND_SERVICE 80 | read_only: true 81 | tmpfs: 82 | - /tmp:rw,noexec,nosuid 83 | environment: 84 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 85 | orders-db: 86 | image: mongo:3.4 87 | hostname: orders-db 88 | restart: always 89 | cap_drop: 90 | - all 91 | cap_add: 92 | - CHOWN 93 | - SETGID 94 | - SETUID 95 | read_only: true 96 | tmpfs: 97 | - /tmp:rw,noexec,nosuid 98 | shipping: 99 | image: weaveworksdemos/shipping:0.4.8 100 | hostname: shipping 101 | restart: always 102 | cap_drop: 103 | - all 104 | cap_add: 105 | - NET_BIND_SERVICE 106 | read_only: true 107 | tmpfs: 108 | - /tmp:rw,noexec,nosuid 109 | environment: 110 | - JAVA_OPTS=-Xms64m -Xmx128m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom -Dspring.zipkin.enabled=false 111 | queue-master: 112 | image: weaveworksdemos/queue-master:0.3.1 113 | hostname: queue-master 114 | volumes: 115 | - /var/run/docker.sock:/var/run/docker.sock 116 | restart: always 117 | cap_drop: 118 | - all 119 | cap_add: 120 | - NET_BIND_SERVICE 121 | read_only: true 122 | tmpfs: 123 | - /tmp:rw,noexec,nosuid 124 | rabbitmq: 125 | image: rabbitmq:3.6.8 126 | hostname: rabbitmq 127 | restart: always 128 | cap_drop: 129 | - all 130 | cap_add: 131 | - CHOWN 132 | - SETGID 133 | - SETUID 134 | - DAC_OVERRIDE 135 | read_only: true 136 | payment: 137 | image: weaveworksdemos/payment:0.4.3 138 | hostname: payment 139 | restart: always 140 | cap_drop: 141 | - all 142 | cap_add: 143 | - NET_BIND_SERVICE 144 | read_only: true 145 | user: 146 | image: weaveworksdemos/user:0.4.4 147 | hostname: user 148 | restart: always 149 | cap_drop: 150 | - all 151 | cap_add: 152 | - NET_BIND_SERVICE 153 | read_only: true 154 | environment: 155 | - MONGO_HOST=user-db:27017 156 | user-db: 157 | image: weaveworksdemos/user-db:0.4.0 158 | hostname: user-db 159 | restart: always 160 | cap_drop: 161 | - all 162 | cap_add: 163 | - CHOWN 164 | - SETGID 165 | - SETUID 166 | read_only: true 167 | tmpfs: 168 | - /tmp:rw,noexec,nosuid 169 | user-sim: 170 | image: weaveworksdemos/load-test:0.1.1 171 | cap_drop: 172 | - all 173 | read_only: true 174 | hostname: user-simulator 175 | command: "-d 60 -r 200 -c 2 -h edge-router" 176 | --------------------------------------------------------------------------------