├── .gitignore
├── README.md
├── artifacts
├── circuit-breakers
│ ├── cb-web-frontend.yaml
│ ├── fortio.yaml
│ ├── outlier-web-frontend.yaml
│ └── web-frontend-failing.yaml
├── discovery
│ ├── helloworld-dr.yaml
│ ├── helloworld-lb.yaml
│ └── helloworld-vs.yaml
├── ingress
│ ├── gateway-tls.yaml
│ ├── gateway.yaml
│ ├── k8s-gw-tls.yaml
│ ├── k8s-gw.yaml
│ ├── web-frontend-route.yaml
│ └── web-frontend-virtualservice.yaml
├── observability
│ ├── enable-tracing.yaml
│ └── trace-config.yaml
├── security
│ ├── authz-policy-customers.yaml
│ └── mtls-strict.yaml
├── sidecar-injection
│ └── nginx-pod.yaml
├── the-app
│ ├── customers.yaml
│ ├── sleep.yaml
│ └── web-frontend.yaml
└── traffic-shifting
│ ├── customers-destinationrule.yaml
│ ├── customers-route-canary.yaml
│ ├── customers-route-debug.yaml
│ ├── customers-route-final.yaml
│ ├── customers-route.yaml
│ ├── customers-subsets.yaml
│ ├── customers-v2.yaml
│ ├── customers-virtualservice-final.yaml
│ ├── customers-virtualservice.yaml
│ ├── customers-vs-canary.yaml
│ └── customers-vs-debug.yaml
├── docs
├── assets
│ ├── tetrate-logo-black.png
│ └── tetrate-logo-white.png
├── circuit-breakers.md
├── css
│ └── custom.css
├── dashboards.md
├── discovery.md
├── environment.md
├── index.md
├── ingress-gwapi.md
├── ingress.md
├── install.md
├── security.md
├── sidecar-injection.md
├── summary.md
├── the-app.md
├── traffic-shifting-gwapi.md
└── traffic-shifting.md
├── mkdocs.yml
└── overrides
└── partials
└── logo.html
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | site/
3 | .envrc
4 | notes
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Istio 0 to 60 Workshop labs
2 |
3 | This repository contains the labs for the Istio 0 to 60 workshop.
4 |
5 | First deliveries of this workshop took place in early 2022.
6 | The labs are tested against each new release of Istio, and have been refined and updated over time.
7 | New labs have been added that go beyond the original "0 to 60" scope.
8 |
9 | The labs are published on GitHub pages [here](https://tetratelabs.github.io/istio-0to60/).
10 |
11 | To work through the workshop:
12 |
13 | 1. Visit the [workshop labs site](https://tetratelabs.github.io/istio-0to60/).
14 | 1. Select a [lab environment](https://tetratelabs.github.io/istio-0to60/environment/) option.
15 | 1. Obtain a copy of the [artifacts](https://tetratelabs.github.io/istio-0to60/environment/#artifacts) you'll need.
16 | 1. Work through the labs.
17 |
18 | ## Development
19 |
20 | The project board is [here](https://github.com/orgs/tetratelabs/projects/9/views/4).
21 |
--------------------------------------------------------------------------------
/artifacts/circuit-breakers/cb-web-frontend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: web-frontend
5 | spec:
6 | host: web-frontend.default.svc.cluster.local
7 | trafficPolicy:
8 | connectionPool:
9 | http:
10 | http1MaxPendingRequests: 1 # (1)
11 | http2MaxRequests: 1 # (2)
12 | maxRequestsPerConnection: 1 # (3)
13 |
--------------------------------------------------------------------------------
/artifacts/circuit-breakers/fortio.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: fortio
6 | labels:
7 | app: fortio
8 | service: fortio
9 | spec:
10 | ports:
11 | - port: 8080
12 | name: http
13 | selector:
14 | app: fortio
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: fortio
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: fortio
24 | template:
25 | metadata:
26 | annotations:
27 | # This annotation causes Envoy to serve cluster.outbound statistics via 15000/stats
28 | # in addition to the stats normally served by Istio.
29 | proxy.istio.io/config: |-
30 | proxyStatsMatcher:
31 | inclusionPrefixes:
32 | - "cluster.outbound"
33 | - "cluster_manager"
34 | - "listener_manager"
35 | - "server"
36 | - "cluster.xds-grpc"
37 | labels:
38 | app: fortio
39 | spec:
40 | containers:
41 | - name: fortio
42 | image: fortio/fortio:latest
43 | imagePullPolicy: Always
44 | ports:
45 | - containerPort: 8080
46 | name: http-fortio
47 | - containerPort: 8079
48 | name: grpc-ping
49 |
--------------------------------------------------------------------------------
/artifacts/circuit-breakers/outlier-web-frontend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: web-frontend
5 | spec:
6 | host: web-frontend.default.svc.cluster.local
7 | trafficPolicy:
8 | outlierDetection:
9 | consecutive5xxErrors: 1 # (1)
10 | interval: 5s # (2)
11 | baseEjectionTime: 60s # (3)
12 | maxEjectionPercent: 100 # (4)
13 |
--------------------------------------------------------------------------------
/artifacts/circuit-breakers/web-frontend-failing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: web-frontend-failing
5 | labels:
6 | app: web-frontend
7 | spec:
8 | replicas: 4
9 | selector:
10 | matchLabels:
11 | app: web-frontend
12 | template:
13 | metadata:
14 | labels:
15 | app: web-frontend
16 | version: v1
17 | spec:
18 | serviceAccountName: web-frontend
19 | containers:
20 | - image: gcr.io/tetratelabs/web-frontend:1.0.0
21 | imagePullPolicy: Always
22 | name: web
23 | ports:
24 | - containerPort: 8080
25 | env:
26 | - name: CUSTOMER_SERVICE_URL
27 | value: 'http://customers.default.svc.cluster.local'
28 | - name: ERROR_RATE
29 | value: '100'
30 | - name: ERROR_STATUS_CODE
31 | value: '500'
--------------------------------------------------------------------------------
/artifacts/discovery/helloworld-dr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: DestinationRule
4 | metadata:
5 | name: helloworld-trafficpolicy
6 | spec:
7 | host: helloworld.default.svc.cluster.local
8 | trafficPolicy:
9 | loadBalancer:
10 | simple: LEAST_REQUEST
11 | subsets:
12 | - name: v1
13 | labels:
14 | version: v1
15 | - name: v2
16 | labels:
17 | version: v2
18 |
--------------------------------------------------------------------------------
/artifacts/discovery/helloworld-lb.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: DestinationRule
4 | metadata:
5 | name: helloworld-trafficpolicy
6 | spec:
7 | host: helloworld.default.svc.cluster.local
8 | trafficPolicy:
9 | loadBalancer:
10 | simple: RANDOM
11 |
--------------------------------------------------------------------------------
/artifacts/discovery/helloworld-vs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1beta1
3 | kind: VirtualService
4 | metadata:
5 | name: hello-routing
6 | spec:
7 | hosts:
8 | - helloworld.default.svc.cluster.local
9 | http:
10 | - route:
11 | - destination:
12 | host: helloworld.default.svc.cluster.local
13 | subset: v1
14 | weight: 25
15 | - destination:
16 | host: helloworld.default.svc.cluster.local
17 | subset: v2
18 | weight: 75
19 |
20 |
--------------------------------------------------------------------------------
/artifacts/ingress/gateway-tls.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: Gateway
4 | metadata:
5 | name: frontend-gateway
6 | spec:
7 | selector:
8 | istio: ingressgateway
9 | servers:
10 | - port:
11 | number: 443
12 | name: https
13 | protocol: HTTPS
14 | tls:
15 | mode: SIMPLE
16 | credentialName: webfrontend-credential
17 | hosts:
18 | - webfrontend.example.com
19 |
--------------------------------------------------------------------------------
/artifacts/ingress/gateway.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: Gateway
4 | metadata:
5 | name: frontend-gateway
6 | spec:
7 | selector:
8 | istio: ingressgateway
9 | servers:
10 | - port:
11 | number: 80
12 | name: http
13 | protocol: HTTP
14 | hosts:
15 | - "*"
16 |
--------------------------------------------------------------------------------
/artifacts/ingress/k8s-gw-tls.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1
3 | kind: Gateway
4 | metadata:
5 | name: frontend-gateway
6 | spec:
7 | gatewayClassName: istio
8 | listeners:
9 | - name: https
10 | hostname: webfrontend.example.com
11 | port: 443
12 | protocol: HTTPS
13 | tls:
14 | certificateRefs:
15 | - name: webfrontend-credential
16 | allowedRoutes:
17 | namespaces:
18 | from: Same
19 |
--------------------------------------------------------------------------------
/artifacts/ingress/k8s-gw.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1
3 | kind: Gateway
4 | metadata:
5 | name: frontend-gateway
6 | spec:
7 | gatewayClassName: istio
8 | listeners:
9 | - name: http
10 | port: 80
11 | protocol: HTTP
12 | allowedRoutes:
13 | namespaces:
14 | from: Same
15 |
--------------------------------------------------------------------------------
/artifacts/ingress/web-frontend-route.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1
3 | kind: HTTPRoute
4 | metadata:
5 | name: web-frontend
6 | spec:
7 | parentRefs:
8 | - name: frontend-gateway
9 | rules:
10 | - backendRefs:
11 | - name: web-frontend
12 | port: 80
13 |
--------------------------------------------------------------------------------
/artifacts/ingress/web-frontend-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: web-frontend
6 | spec:
7 | hosts:
8 | - "*"
9 | gateways:
10 | - frontend-gateway
11 | http:
12 | - route:
13 | - destination:
14 | host: web-frontend.default.svc.cluster.local
15 | port:
16 | number: 80
17 |
--------------------------------------------------------------------------------
/artifacts/observability/enable-tracing.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: telemetry.istio.io/v1alpha1
3 | kind: Telemetry
4 | metadata:
5 | name: mesh-default
6 | namespace: istio-system
7 | spec:
8 | tracing:
9 | - providers:
10 | - name: zipkin
11 | randomSamplingPercentage: 100.0
12 | accessLogging:
13 | - providers:
14 | - name: envoy
15 |
--------------------------------------------------------------------------------
/artifacts/observability/trace-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: install.istio.io/v1alpha1
3 | kind: IstioOperator
4 | spec:
5 | profile: default
6 | meshConfig:
7 | enableTracing: true
8 | defaultConfig:
9 | tracing:
10 | sampling: 100.0
11 | extensionProviders:
12 | - name: zipkin
13 | zipkin:
14 | service: zipkin.istio-system.svc.cluster.local
15 | port: 9411
16 |
--------------------------------------------------------------------------------
/artifacts/security/authz-policy-customers.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: security.istio.io/v1beta1
3 | kind: AuthorizationPolicy
4 | metadata:
5 | name: allowed-customers-clients
6 | namespace: default
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: customers
11 | action: ALLOW
12 | rules:
13 | - from:
14 | - source:
15 | principals: ["cluster.local/ns/default/sa/web-frontend"]
16 |
--------------------------------------------------------------------------------
/artifacts/security/mtls-strict.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: security.istio.io/v1beta1
3 | kind: PeerAuthentication
4 | metadata:
5 | name: default
6 | namespace: default
7 | spec:
8 | mtls:
9 | mode: STRICT
10 |
--------------------------------------------------------------------------------
/artifacts/sidecar-injection/nginx-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | labels:
6 | run: mywebserver
7 | name: mywebserver
8 | spec:
9 | containers:
10 | - name: mywebserver
11 | image: nginx
12 |
--------------------------------------------------------------------------------
/artifacts/the-app/customers.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: customers
6 | ---
7 | apiVersion: apps/v1
8 | kind: Deployment
9 | metadata:
10 | name: customers-v1
11 | labels:
12 | app: customers
13 | version: v1
14 | spec:
15 | replicas: 1
16 | selector:
17 | matchLabels:
18 | app: customers
19 | version: v1
20 | template:
21 | metadata:
22 | labels:
23 | app: customers
24 | version: v1
25 | spec:
26 | serviceAccountName: customers
27 | containers:
28 | - image: gcr.io/tetratelabs/customers:1.0.0
29 | imagePullPolicy: Always
30 | name: svc
31 | ports:
32 | - containerPort: 3000
33 | ---
34 | kind: Service
35 | apiVersion: v1
36 | metadata:
37 | name: customers
38 | labels:
39 | app: customers
40 | spec:
41 | selector:
42 | app: customers
43 | ports:
44 | - port: 80
45 | name: http
46 | targetPort: 3000
47 |
--------------------------------------------------------------------------------
/artifacts/the-app/sleep.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | spec:
51 | terminationGracePeriodSeconds: 0
52 | serviceAccountName: sleep
53 | containers:
54 | - name: sleep
55 | image: curlimages/curl
56 | command: ["/bin/sleep", "infinity"]
57 | imagePullPolicy: IfNotPresent
58 | volumeMounts:
59 | - mountPath: /etc/sleep/tls
60 | name: secret-volume
61 | volumes:
62 | - name: secret-volume
63 | secret:
64 | secretName: sleep-secret
65 | optional: true
66 | ---
67 |
--------------------------------------------------------------------------------
/artifacts/the-app/web-frontend.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: web-frontend
6 | ---
7 | apiVersion: apps/v1
8 | kind: Deployment
9 | metadata:
10 | name: web-frontend
11 | labels:
12 | app: web-frontend
13 | spec:
14 | replicas: 1
15 | selector:
16 | matchLabels:
17 | app: web-frontend
18 | template:
19 | metadata:
20 | labels:
21 | app: web-frontend
22 | version: v1
23 | spec:
24 | serviceAccountName: web-frontend
25 | containers:
26 | - name: web
27 | image: gcr.io/tetratelabs/web-frontend:1.0.0
28 | imagePullPolicy: Always
29 | ports:
30 | - containerPort: 8080
31 | env:
32 | - name: CUSTOMER_SERVICE_URL
33 | value: "http://customers.default.svc.cluster.local"
34 | ---
35 | kind: Service
36 | apiVersion: v1
37 | metadata:
38 | name: web-frontend
39 | labels:
40 | app: web-frontend
41 | spec:
42 | selector:
43 | app: web-frontend
44 | ports:
45 | - port: 80
46 | name: http
47 | targetPort: 8080
48 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-destinationrule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: DestinationRule
4 | metadata:
5 | name: customers
6 | spec:
7 | host: customers.default.svc.cluster.local
8 | subsets:
9 | - name: v1
10 | labels:
11 | version: v1
12 | - name: v2
13 | labels:
14 | version: v2
15 |
16 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-route-canary.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1beta1
3 | kind: HTTPRoute
4 | metadata:
5 | name: customers
6 | spec:
7 | parentRefs:
8 | - group: ""
9 | kind: Service
10 | name: customers
11 | rules:
12 | - backendRefs:
13 | - name: customers-v2
14 | port: 80
15 | weight: 10
16 | - name: customers-v1
17 | port: 80
18 | weight: 90
19 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-route-debug.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1beta1
3 | kind: HTTPRoute
4 | metadata:
5 | name: customers
6 | spec:
7 | parentRefs:
8 | - group: ""
9 | kind: Service
10 | name: customers
11 | rules:
12 | - matches:
13 | - headers:
14 | - type: Exact
15 | name: user-agent
16 | value: debug
17 | backendRefs:
18 | - name: customers-v2
19 | port: 80
20 | - backendRefs:
21 | - name: customers-v1
22 | port: 80
23 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-route-final.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1beta1
3 | kind: HTTPRoute
4 | metadata:
5 | name: customers
6 | spec:
7 | parentRefs:
8 | - group: ""
9 | kind: Service
10 | name: customers
11 | rules:
12 | - backendRefs:
13 | - name: customers-v2
14 | port: 80
15 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-route.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: gateway.networking.k8s.io/v1beta1
3 | kind: HTTPRoute
4 | metadata:
5 | name: customers
6 | spec:
7 | parentRefs:
8 | - group: ""
9 | kind: Service
10 | name: customers
11 | rules:
12 | - backendRefs:
13 | - name: customers-v1
14 | port: 80
15 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-subsets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: customers-v1
6 | spec:
7 | selector:
8 | app: customers
9 | version: v1
10 | ports:
11 | - port: 80
12 | name: http
13 | targetPort: 3000
14 | ---
15 | apiVersion: v1
16 | kind: Service
17 | metadata:
18 | name: customers-v2
19 | spec:
20 | selector:
21 | app: customers
22 | version: v2
23 | ports:
24 | - port: 80
25 | name: http
26 | targetPort: 3000
27 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-v2.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: customers-v2
6 | labels:
7 | app: customers
8 | version: v2
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app: customers
14 | version: v2
15 | template:
16 | metadata:
17 | labels:
18 | app: customers
19 | version: v2
20 | spec:
21 | serviceAccountName: customers
22 | containers:
23 | - image: gcr.io/tetratelabs/customers:2.0.0
24 | imagePullPolicy: Always
25 | name: svc
26 | ports:
27 | - containerPort: 3000
28 |
29 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-virtualservice-final.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: customers
6 | spec:
7 | hosts:
8 | - customers.default.svc.cluster.local
9 | http:
10 | - route:
11 | - destination:
12 | host: customers.default.svc.cluster.local
13 | subset: v2
14 |
15 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: customers
6 | spec:
7 | hosts:
8 | - customers.default.svc.cluster.local
9 | http:
10 | - route:
11 | - destination:
12 | host: customers.default.svc.cluster.local
13 | subset: v1
14 |
15 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-vs-canary.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: customers
6 | spec:
7 | hosts:
8 | - customers.default.svc.cluster.local
9 | http:
10 | - route:
11 | - destination:
12 | host: customers.default.svc.cluster.local
13 | subset: v2
14 | weight: 10
15 | - destination:
16 | host: customers.default.svc.cluster.local
17 | subset: v1
18 | weight: 90
19 |
--------------------------------------------------------------------------------
/artifacts/traffic-shifting/customers-vs-debug.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: customers
6 | spec:
7 | hosts:
8 | - customers.default.svc.cluster.local
9 | http:
10 | - match:
11 | - headers:
12 | user-agent:
13 | exact: debug
14 | route:
15 | - destination:
16 | host: customers.default.svc.cluster.local
17 | subset: v2
18 | - route:
19 | - destination:
20 | host: customers.default.svc.cluster.local
21 | subset: v1
22 |
--------------------------------------------------------------------------------
/docs/assets/tetrate-logo-black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tetratelabs/istio-0to60/64496479e1e9f1947d47959cbd512f52b09566ab/docs/assets/tetrate-logo-black.png
--------------------------------------------------------------------------------
/docs/assets/tetrate-logo-white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tetratelabs/istio-0to60/64496479e1e9f1947d47959cbd512f52b09566ab/docs/assets/tetrate-logo-white.png
--------------------------------------------------------------------------------
/docs/circuit-breakers.md:
--------------------------------------------------------------------------------
1 | # Circuit breakers
2 |
3 | This lab demonstrate how to configure [circuit breaking](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking){target=_blank} both with and without outlier detection in Istio.
4 |
5 | ## Prerequisites and setup
6 |
7 | - Kubernetes with Istio and other tools (Prometheus, Zipkin, Grafana) installed
8 | - `web-frontend` and `customers` workloads already deployed and running.
9 |
10 | ## Revise the Istio installation configuration
11 |
12 | Modify the installation of Istio to use the [demo profile](https://istio.io/latest/docs/setup/additional-setup/config-profiles/){target=_blank} which enables high levels of tracing, which is convenient for this lab.
13 |
14 | ```shell
15 | istioctl install --set profile=demo
16 | ```
17 |
18 | ## Install Fortio
19 |
20 | Let us try and generate some load to the `web-frontend` workload and see the distribution of responses.
21 |
22 | We'll use [Fortio](https://fortio.org/){target=_blank} to generate load on the `web-frontend` service.
23 |
24 | 1. Deploy Fortio
25 |
26 | ```yaml linenums="1" title="fortio.yaml" hl_lines="27-28"
27 | --8<-- "circuit-breakers/fortio.yaml"
28 | ```
29 |
30 | Above, notice the annotation, which configures the inclusion of additional Envoy metrics (aka statistics) including circuit breaking.
31 |
32 | Save the above file to `fortio.yaml` and deploy it:
33 |
34 | ```shell
35 | kubectl apply -f fortio.yaml
36 | ```
37 |
38 | 1. Make a single request to make sure everything is working:
39 |
40 | ```shell
41 | kubectl exec deploy/fortio -c fortio -- fortio curl web-frontend
42 | ```
43 |
44 | The above command should result in an HTTP 200 "OK" response from the `web-frontend` app.
45 |
46 | 1. With fortio, we can generate a load of 50 requests with two concurrent connections like this:
47 |
48 | ```shell
49 | kubectl exec deploy/fortio -c fortio -- \
50 | fortio load -c 2 -qps 0 -n 50 -quiet web-frontend
51 | ```
52 |
53 | All 50 requests should succeed. That is the meaning of `Code 200 : 50` in the output.
54 |
55 |
56 | !!! info
57 |
58 | Fortio also has a GUI, to access it:
59 |
60 | 1. Port-forward the deployment's port
61 |
62 | ```shell
63 | kubectl port-forward deploy/fortio 8080
64 | ```
65 |
66 | 1. In a browser, visit [http://localhost:8080/fortio](http://localhost:8080/fortio){target=_blank}
67 |
68 |
69 | ## Circuit breaker - connection pool settings
70 |
71 | Study the following DestionationRule:
72 |
73 | ```yaml linenums="1" title="cb-web-frontend.yaml"
74 | --8<-- "circuit-breakers/cb-web-frontend.yaml"
75 | ```
76 |
77 | 1. The maximum number of pending HTTP requests to a destination.
78 | 1. The maximum number of concurrent requests to a destination.
79 | 1. The maximum number of requests per connection.
80 |
81 | It configures the connection pool for `web-frontend` with very low thresholds, to easily trigger the circuit breaker.
82 |
83 | Save the above YAML to `cb-web-frontend.yaml` and apply the changes:
84 |
85 | ```shell
86 | kubectl apply -f cb-web-frontend.yaml
87 | ```
88 |
89 | Since all values are set to 1, we won't trigger the circuit breaker if we send the request using one connection and one request per second.
90 |
91 | If we increase the number of connections and send more requests (i.e. 2 workers sending requests concurrently, and sending 50 requests), we'll start getting errors.
92 |
93 | The errors happen because the `http2MaxRequests` is set to 1 and we have more than 1 concurrent request being sent. Additionally, we're exceeding the `maxRequestsPerConnection` limit.
94 |
95 | ```shell
96 | kubectl exec deploy/fortio -c fortio -- \
97 | fortio load -c 2 -qps 0 -n 50 -quiet web-frontend
98 | ```
99 |
100 | ```console
101 | ...
102 | Code 200 : 24 (48.0 %)
103 | Code 503 : 26 (52.0 %)
104 | ```
105 |
106 | !!! Tip
107 |
108 | To reset the metric counters, run:
109 |
110 | ```shell
111 | kubectl exec deploy/fortio -c istio-proxy -- curl -sX POST localhost:15000/reset_counters
112 | ```
113 |
114 | ## The `x-envoy-overloaded` header
115 |
116 | When a request is dropped due to circuit breaking, the response will contain a response header `x-envoy-overloaded` with value "true".
117 |
118 | One way to see this header is to run a fortio load with two concurrent connections in one terminal for a couple of minutes:
119 |
120 | ```shell
121 | kubectl exec deploy/fortio -c fortio -- \
122 | fortio load -c 2 -qps 0 -t 2m --allow-initial-errors -quiet http://web-frontend
123 | ```
124 |
125 | In a separate terminal, invoke a single request:
126 |
127 | ```shell
128 | kubectl exec deploy/fortio -c fortio -- fortio curl http://web-frontend
129 | ```
130 |
131 | Here is an example response to a dropped request:
132 |
133 | ```console hl_lines="2"
134 | > HTTP/1.1 503 Service Unavailable
135 | > x-envoy-overloaded: true
136 | > content-length: 81
137 | > content-type: text/plain
138 | > date: Thu, 10 Aug 2023 18:25:37 GMT
139 | > server: envoy
140 | >
141 | > upstream connect error or disconnect/reset before headers. reset reason: overflowcommand terminated with exit code 1
142 | ```
143 |
144 | Then press ++ctrl+c++ to interrupt the load generation.
145 |
146 | ## Observe failures in Zipkin
147 |
148 | Open the Zipkin dashboard:
149 |
150 | ```shell
151 | istioctl dash zipkin
152 | ```
153 |
154 | In the Zipkin UI, list failing traces by clicking the "+" button in the search field and specifying the query: `tagQuery=error`. Then click the **Run Query** button.
155 |
156 | Pick a failing trace to view the details.
157 |
158 | The requests are failing because the circuit breaker is tripped. [Response flags](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-response-flags){target=_blank} are set to `UO` (Upstream Overflow) and the status code is 503 (service unavailable).
159 |
160 | ## Prometheus metrics
161 |
162 | Another option is looking at the Prometheus metrics directly.
163 |
164 | Open the Prometheus dashboard:
165 |
166 | ```shell
167 | istioctl dash prometheus
168 | ```
169 |
170 | Apply the following PromQL query:
171 |
172 | ```promql
173 | envoy_cluster_upstream_rq_pending_overflow{app="fortio", cluster_name="outbound|80||web-frontend.default.svc.cluster.local"}
174 | ```
175 |
176 | The query shows the metrics for requests originating from the `fortio` app and going to the `web-frontend` service.
177 |
178 | The `upstream_rq_pending_overflow` and other metrics are described [in the Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#general){target=_blank}.
179 |
180 | Noteworthy are [circuit-breaking specific metrics](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#circuit-breakers-statistics){target=_blank} showing the state of various circuit breakers. For example `rq_open` indicates whether the "requests" circuit breaker is open, and its companion `remaining_rq` indicates how many requests remain to trip the corresponding circuit breaker.
181 |
182 | We can also look at the metrics directly from the `istio-proxy` container in the Fortio Pod:
183 |
184 | ```shell
185 | kubectl exec deploy/fortio -c istio-proxy -- \
186 | pilot-agent request GET stats | grep web-frontend | grep pending
187 | ```
188 |
189 | ```console
190 | cluster.outbound|80||web-frontend.default.svc.cluster.local.circuit_breakers.default.remaining_pending: 1
191 | cluster.outbound|80||web-frontend.default.svc.cluster.local.circuit_breakers.default.rq_pending_open: 0
192 | cluster.outbound|80||web-frontend.default.svc.cluster.local.circuit_breakers.high.rq_pending_open: 0
193 | cluster.outbound|80||web-frontend.default.svc.cluster.local.upstream_rq_pending_active: 0
194 | cluster.outbound|80||web-frontend.default.svc.cluster.local.upstream_rq_pending_failure_eject: 0
195 | cluster.outbound|80||web-frontend.default.svc.cluster.local.upstream_rq_pending_overflow: 26
196 | cluster.outbound|80||web-frontend.default.svc.cluster.local.upstream_rq_pending_total: 24
197 | ```
198 |
199 | !!! info
200 |
201 | Yet another convenient way to look at the stats emitted by an Envoy sidecar is via the Envoy dashboard:
202 |
203 | ```shell
204 | istioctl dashboard envoy deploy/fortio
205 | ```
206 |
207 | In the web ui, click on the "stats" endpoint, and filter by target outbound cluster "web-frontend".
208 |
209 | ## Resolving the errors
210 |
211 | To resolve these errors, we can adjust the circuit breaker settings.
212 |
213 | Increase the maximum number of concurrent requests to 2 (`http2MaxRequests`), as shown below:
214 |
215 | ```yaml linenums="1" hl_lines="11"
216 | apiVersion: networking.istio.io/v1alpha3
217 | kind: DestinationRule
218 | metadata:
219 | name: web-frontend
220 | spec:
221 | host: web-frontend.default.svc.cluster.local
222 | trafficPolicy:
223 | connectionPool:
224 | http:
225 | http1MaxPendingRequests: 1
226 | http2MaxRequests: 2
227 | maxRequestsPerConnection: 1
228 | ```
229 |
230 | Save the above YAML to `cb-web-frontend.yaml` and apply the changes:
231 |
232 | ```shell
233 | kubectl apply -f cb-web-frontend.yaml
234 | ```
235 |
236 | If we re-run Fortio with the same parameters, we'll notice less failures this time:
237 |
238 | ```shell
239 | kubectl exec deploy/fortio -c fortio -- \
240 | fortio load -c 2 -qps 0 -n 50 -quiet web-frontend
241 | ```
242 |
243 | ```console
244 | ...
245 | Code 200 : 39 (78.0 %)
246 | Code 503 : 11 (22.0 %)
247 | ```
248 |
249 | Since we're sending more than 1 request per connection, we can increase the `maxRequestsPerConnection` to 2:
250 |
251 | ```yaml linenums="1" hl_lines="12"
252 | apiVersion: networking.istio.io/v1alpha3
253 | kind: DestinationRule
254 | metadata:
255 | name: web-frontend
256 | spec:
257 | host: web-frontend.default.svc.cluster.local
258 | trafficPolicy:
259 | connectionPool:
260 | http:
261 | http1MaxPendingRequests: 1
262 | http2MaxRequests: 2
263 | maxRequestsPerConnection: 2
264 | ```
265 |
266 | Save the above YAML to `cb-web-frontend.yaml` and apply the changes:
267 |
268 | ```shell
269 | kubectl apply -f cb-web-frontend.yaml
270 | ```
271 |
272 | If we re-run Fortio this time, we'll get zero or close to zero HTTP 503 responses. Even if we increase the number of requests per second, we should only get a small number of 503 responses. To get rid of the remaining failing requests, we can increase the `http1MaxPendingRequests` to 2:
273 |
274 | ```yaml linenums="1" hl_lines="10"
275 | apiVersion: networking.istio.io/v1alpha3
276 | kind: DestinationRule
277 | metadata:
278 | name: web-frontend
279 | spec:
280 | host: web-frontend.default.svc.cluster.local
281 | trafficPolicy:
282 | connectionPool:
283 | http:
284 | http1MaxPendingRequests: 2
285 | http2MaxRequests: 2
286 | maxRequestsPerConnection: 2
287 | ```
288 |
289 | With these settings (assuming 2 concurrent connections), we can easily handle a higher number of requests.
290 |
291 | To be clear, the numbers we used in settings are just examples and are not realistic - we set them intentionally low to make the circuit breaker easier to trip.
292 |
293 | Before continuing, delete the DestinationRule:
294 |
295 | ```shell
296 | kubectl delete destinationrule web-frontend
297 | ```
298 |
299 | Reset the metric counters:
300 |
301 | ```shell
302 | kubectl exec deploy/fortio -c istio-proxy -- \
303 | curl -X POST localhost:15000/reset_counters
304 | ```
305 |
306 | ## Outlier detection
307 |
308 | The circuit breaker is great when we want to protect the services from a sudden burst of requests. However, how can we protect the services in case of failures?
309 |
310 | For example, if we have a service that is still failing after multiple requests, it doesn't make sense to send even more requests to it. Instead, we can remove the instance of the failing service from the load balancing pool for a certain period of time. That way, we know that the requests will go to other instances of the service. After a pre-defined period of time, we can bring the failing service back into the load balancing pool.
311 |
312 | This process is called [outlier detection](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/outlier){target=_blank}. Just like in the connection pool settings, we can configure outlier detection in the DestinationRule.
313 |
314 | To see the outlier detection in action we need a service that is failing. We'll create a `web-frontend-failing` deployment and configure it to return HTTP 503 responses:
315 |
316 | ??? note "Click for web-frontend-failing.yaml"
317 |
318 | ```yaml linenums="1" title="web-frontend-failing.yaml"
319 | --8<-- "circuit-breakers/web-frontend-failing.yaml"
320 | ```
321 |
322 | Save the above YAML to `web-frontend-failing.yaml` and apply it to the cluster:
323 |
324 | ```shell
325 | kubectl apply -f web-frontend-failing.yaml
326 | ```
327 |
328 | If we run Fortio we'll see that majority (roughly, 80%) of the requests will be failing. That's because the `web-frontend-failing` deployment has more replicas than the "good" deployment.
329 |
330 | ```shell
331 | kubectl exec deploy/fortio -c fortio -- \
332 | fortio load -c 2 -qps 0 -n 50 -quiet web-frontend
333 | ```
334 |
335 | ```console
336 | ...
337 | Code 200 : 9 (18.0 %)
338 | Code 500 : 41 (82.0 %)
339 | ```
340 |
341 | Let's look at an example of outlier detection configuration:
342 |
343 | ```yaml linenums="1" title="outlier-web-frontend.yaml"
344 | --8<-- "circuit-breakers/outlier-web-frontend.yaml"
345 | ```
346 |
347 | 1. Number of 5xx errors in a row that will trigger the outlier detection.
348 | 2. The interval at which the hosts are checked whether they need to be ejected.
349 | 3. The duration of time an outlier is ejected from the load balancing pool. If the same host is ejected multiple times, the ejection time increases by multiplying the base ejection time by the number of times the host is ejected.
350 | 4. The maximum percentage of hosts that can be ejected.
351 |
352 | Save the YAML to `outlier-web-frontend.yaml` and apply it:
353 |
354 | ```shell
355 | kubectl apply -f outlier-web-frontend.yaml
356 | ```
357 |
358 | If we repeat the test, we might get a similar distribution of responses the first time. However, if we repeat the command (once the outliers were ejected), we'll get a much better distribution:
359 |
360 | ```shell
361 | kubectl exec deploy/fortio -c fortio -- \
362 | fortio load -c 2 -qps 0 -n 50 -quiet web-frontend
363 | ```
364 |
365 | ```console
366 | ...
367 | Code 200 : 50 (100.0 %)
368 | ```
369 |
370 | The reason for more HTTP 200 responses is because as soon as the failing hosts were ejected (failing Pods from the `web-frontend-failing` deployment), the requests were sent to the other host that doesn't fail. If we waited until after the 60 second `baseEjectionTime` expired, the failing hosts would be brought back into the load balancing pool and we'd get a similar distribution of responses as before (majority of them failing).
371 |
372 | We can also look at the metrics from the outlier detection in the same way we did for the circuit breakers:
373 |
374 | ```shell
375 | kubectl exec deploy/fortio -c istio-proxy -- \
376 | pilot-agent request GET stats | grep web-frontend | grep ejections_total
377 | ```
378 |
379 | Produces output similar to this:
380 |
381 | ```console
382 | cluster.outbound|80||web-frontend.default.svc.cluster.local.outlier_detection.ejections_total: 4
383 | ```
384 |
385 | !!! Note
386 | Other metrics that we can look at are `ejections_consecutive_5xx`, `ejections_enforced_total` or any other metric with `outlier_detection` in its name. The full list of metric names and their descriptions can be found in the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#config-cluster-manager-cluster-stats-outlier-detection){target=_blank}.
387 |
388 | ## Cleanup
389 |
390 | To clean up resources created in this lab, run:
391 |
392 | ```shell
393 | kubectl delete destinationrule web-frontend
394 | kubectl delete -f web-frontend-failing.yaml
395 | kubectl delete -f fortio.yaml
396 | ```
397 |
--------------------------------------------------------------------------------
/docs/css/custom.css:
--------------------------------------------------------------------------------
1 |
2 | :root {
3 | /*
4 | the primary fg color is used not only for the header bg color,
5 | but also for the fg color of hyperlinks.
6 | making it black has the side effect of making links invisible.
7 | so for now going back to the default primary color
8 | */
9 | /* --md-primary-fg-color: #000717; */
10 | --md-accent-fg-color: #F05623;
11 | }
12 |
13 | [data-md-color-scheme="slate"] {
14 | --md-primary-fg-color: #F05623;
15 | --md-footer-logo-slate: block;
16 | --md-footer-logo-default: none;
17 | }
18 |
19 | [data-md-color-scheme="default"] {
20 | --md-primary-fg-color: #F05623;
21 | --md-footer-logo-slate: none;
22 | --md-footer-logo-default: block;
23 | }
24 |
25 | #logo_light_mode {
26 | display: var(--md-footer-logo-default);
27 | }
28 |
29 | #logo_dark_mode {
30 | display: var(--md-footer-logo-slate);
31 | }
32 |
33 |
34 |
35 |
36 | .gcp-blue {
37 | color: #3870e0;
38 | }
39 |
40 | /*
41 | .highlight.language-shell code::before {
42 | content: '$ ';
43 | }
44 | */
45 |
46 | /*
47 | requires adding to the page
48 | but material icons were already bundled with mkdocs-material ?
49 |
50 | a[target]::after {
51 | font-family: 'Material Icons';
52 | content: "\e89e";
53 | padding-left: 5px;
54 | }
55 | */
56 |
--------------------------------------------------------------------------------
/docs/dashboards.md:
--------------------------------------------------------------------------------
1 | # Observability
2 |
3 | This lab explores one of the main strengths of Istio: observability.
4 |
5 | The services in our mesh are automatically made observable, without adding any burden on devops teams.
6 |
7 | ## Deploy the Addons
8 |
9 | The Istio distribution provides addons for a number of systems that together provide observability for the service mesh:
10 |
11 | - [Zipkin](https://zipkin.io/){target=_blank} or [Jaeger](https://www.jaegertracing.io/){target=_blank} for distributed tracing
12 | - [Prometheus](https://prometheus.io/){target=_blank} for metrics collection
13 | - [Grafana](https://grafana.com/){target=_blank} provides dashboards for monitoring, using Prometheus as the data source
14 | - [Kiali](https://kiali.io/){target=_blank} allows us to visualize the mesh
15 |
16 | These addons are located in the `samples/addons/` folder of the distribution.
17 |
18 | 1. Navigate to the addons directory
19 |
20 | ```{.shell .language-shell}
21 | cd ~/istio-{{istio.version}}/samples/addons
22 | ```
23 |
24 | 1. Deploy each addon:
25 |
26 | ```{.shell .language-shell}
27 | kubectl apply -f prometheus.yaml
28 | ```
29 |
30 | ```{.shell .language-shell}
31 | kubectl apply -f grafana.yaml
32 | ```
33 |
34 | ```{.shell .language-shell}
35 | kubectl apply -f extras/zipkin.yaml
36 | ```
37 |
38 | ```{.shell .language-shell}
39 | kubectl apply -f kiali.yaml
40 | ```
41 |
42 | 1. To enable distributed tracing, we must explicitly define a provider, and enable it in the mesh, as follows:
43 |
44 | ```yaml linenums="1" title="trace-config.yaml"
45 | --8<-- "observability/trace-config.yaml"
46 | ```
47 |
48 | ```shell
49 | istioctl install -f observability/trace-config.yaml
50 | ```
51 |
52 | Then:
53 |
54 | ```yaml linenums="1" title="enable-tracing.yaml"
55 | --8<-- "observability/enable-tracing.yaml"
56 | ```
57 |
58 | ```shell
59 | kubectl apply -f observability/enable-tracing.yaml
60 | ```
61 |
62 | 1. Verify that the `istio-system` namespace is now running additional workloads for each of the addons.
63 |
64 | ```{.shell .language-shell}
65 | kubectl get pod -n istio-system
66 | ```
67 |
68 | The `istioctl` CLI provides convenience commands for accessing the web UIs for each dashboard.
69 |
70 | Take a moment to review the help information for the `istioctl dashboard` command:
71 |
72 | ```{.shell .language-shell}
73 | istioctl dashboard --help
74 | ```
75 |
76 | ## Generate a load
77 |
78 | In order to have something to observe, we need to generate a load on our system.
79 |
80 | Use a simple bash `while` loop to make repeated `curl` requests to the app:
81 |
82 | ```{.shell .language-shell}
83 | while true; do curl -I http://$GATEWAY_IP; sleep 0.5; done
84 | ```
85 |
86 | The curl requests will be running in foreground.
87 | It may be simplest to obtain a new shell prompt by opening a second, separate terminal.
88 |
89 | ## Kiali
90 |
91 | Launch the Kiali dashboard:
92 |
93 | ```{.shell .language-shell}
94 | istioctl dashboard kiali
95 | ```
96 |
97 | !!! warning
98 |
99 | If the dashboard page fails to open, just click on the hyperlink in the console output.
100 |
101 | !!! note
102 |
103 | The `istioctl dashboard` command also blocks.
104 | Leave it running until you're finished using the dashboard, at which time
105 | press ++ctrl+c++ to interrupt the process and get back to the terminal prompt.
106 |
107 | The Kiali dashboard displays.
108 |
109 | Customize the view as follows:
110 |
111 | 1. Select the _Traffic Graph_ section from the sidebar.
112 | 1. Under _Select Namespaces_ (at the top of the page), select the `default` namespace, the location where the application's pods are running.
113 | 1. From the third "pulldown" menu, select _App graph_.
114 | 1. From the _Display_ "pulldown", toggle on _Traffic Animation_ and _Security_.
115 | 1. From the footer, toggle the legend so that it is visible. Take a moment to familiarize yourself with the legend.
116 |
117 | Observe the visualization and note the following:
118 |
119 | - We can see traffic coming in through the ingress gateway to the `web-frontend`, and the subsequent calls from the `web-frontend` to the `customers` service.
120 | - The lines connecting the services are green, indicating healthy requests.
121 | - The small lock icon on each edge in the graph indicates that the traffic is secured with mutual TLS.
122 |
123 | Such visualizations are helpful with understanding the flow of requests in the mesh, and with diagnosis.
124 |
125 | Feel free to spend more time exploring Kiali.
126 |
127 | We will revisit Kiali in a later lab to visualize traffic shifting such as when performing a blue-green or canary deployment.
128 |
129 | ### Kiali Cleanup
130 |
131 | Close the Kiali dashboard. Interrupt the `istioctl dashboard kiali` command by pressing ++ctrl+c++.
132 |
133 |
134 | ## Zipkin
135 |
136 | Launch the Zipkin dashboard:
137 |
138 | ```{.shell .language-shell}
139 | istioctl dashboard zipkin
140 | ```
141 |
142 | The Zipkin dashboard displays.
143 |
144 | - Click on the red '+' button and select _serviceName_.
145 | - Select the service named `web-frontend.default` and click on the _Run Query_ button (lightblue) on the right.
146 |
147 | A number of query results will display. Each row is expandable and will display more detail in terms of the services participating in that particular trace.
148 |
149 | - Click the _Show_ button to the right of one of the traces having four (4) spans.
150 |
151 | The resulting view shows spans that are part of the trace, and more importantly how much time was spent within each span. Such information can help diagnose slow requests and pin-point where the latency lies.
152 |
153 | Distributed tracing also helps us make sense of the flow of requests in a microservice architecture.
154 |
155 | ### Zipkin Cleanup
156 |
157 | Close the Zipkin dashboard. Interrupt the `istioctl dashboard zipkin` command with ++ctrl+c++.
158 |
159 |
160 | ## Prometheus
161 |
162 | Prometheus works by periodically calling a metrics endpoint against each running service (this endpoint is termed the "scrape" endpoint). Developers normally have to instrument their applications to expose such an endpoint and return metrics information in the format the Prometheus expects.
163 |
164 | With Istio, this is done automatically by the Envoy sidecar.
165 |
166 | ### Observe how Envoy exposes a Prometheus scrape endpoint
167 |
168 | 1. Run the following command:
169 |
170 | ```{.shell .language-shell}
171 | kubectl exec svc/web-frontend -- wget -qO - localhost:15020/stats/prometheus \
172 | | grep istio_requests
173 | ```
174 |
175 | !!! info "Why port 15020?"
176 |
177 | See [Ports used by Istio](https://istio.io/latest/docs/ops/deployment/application-requirements/#ports-used-by-istio){target=_blank} sidecar proxy.
178 |
179 |
180 | The list of metrics returned by the endpoint is rather lengthy, so we just peek at "istio_requests" metric. The full response contains many more metrics.
181 |
182 | ### Access the dashboard
183 |
184 | 1. Start the prometheus dashboard
185 |
186 | ```{.shell .language-shell}
187 | istioctl dashboard prometheus
188 | ```
189 |
190 | 1. In the search field enter the metric named `istio_requests_total`, and click the _Execute_ button (on the right).
191 |
192 | 1. Select the tab named _Graph_ to obtain a graphical representation of this metric over time.
193 |
194 | Note that you are looking at requests across the entire mesh, i.e. this includes both requests to `web-frontend` and to `customers`.
195 |
196 | 1. As an example of Prometheus' dimensional metrics capability, we can ask for total requests having a response code of 200:
197 |
198 | ```text
199 | istio_requests_total{response_code="200"}
200 | ```
201 |
202 | 1. With respect to requests, it's more interesting to look at the rate of incoming requests over a time window. Try:
203 |
204 | ```text
205 | rate(istio_requests_total[5m])
206 | ```
207 |
208 | There's much more to the Prometheus query language ([this](https://prometheus.io/docs/prometheus/latest/querying/basics/){target=_blank} may be a good place to start).
209 |
210 | Grafana consumes these metrics to produce graphs on our behalf.
211 |
212 | Close the Prometheus dashboard and terminate the corresponding `istioctl dashboard` command.
213 |
214 | ## Grafana
215 |
216 | 1. Launch the Grafana dashboard
217 |
218 | ```{.shell .language-shell}
219 | istioctl dashboard grafana
220 | ```
221 |
222 | 1. From the sidebar, select _Dashboards_
223 | 1. Click on the folder named _Istio_ to reveal pre-designed Istio-specific Grafana dashboards
224 | 1. Explore the _Istio Mesh Dashboard_. Note the Global Request Volume and Global Success Rate.
225 | 1. Explore the _Istio Service Dashboard_. First select the service `web-frontend` and inspect its metrics, then switch to the `customers` service and review its dashboard.
226 | 1. Explore the _Istio Workload Dashboard_. Select the `web-frontend` workload. Look at Outbound Services and note the outbound requests to the customers service. Select the `customers` workload and note that it makes no Outbound Services calls.
227 |
228 | Feel free to further explore these dashboards.
229 |
230 | ## Cleanup
231 |
232 | 1. Terminate the `istioctl dashboard` command (++ctrl+c++)
233 | 1. Likewise, terminate the bash while loop.
234 |
235 | ## Next
236 |
237 | We turn our attention next to security features of a service mesh.
238 |
--------------------------------------------------------------------------------
/docs/discovery.md:
--------------------------------------------------------------------------------
1 | # Service discovery and load balancing
2 |
3 | This lab is a standalone exploration of service discovery and load balancing in Istio.
4 |
5 | ## Clusters and endpoints
6 |
7 | The `istioctl` CLI's diagnostic command `proxy-status` provides a simple way to list all proxies that Istio knows about.
8 |
9 | Run and study the output of the `proxy-status` command:
10 |
11 | ```shell
12 | istioctl proxy-status
13 | ```
14 |
15 | Since We have not yet deployed any workloads, the output should be rather anemic, citing the lone ingress gateway that was deployed when we installed Istio in the previous lab.
16 |
17 | ## Enable automatic sidecar injection
18 |
19 | There are two options for [sidecar injection](https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/){target=_blank}: automatic and manual.
20 |
21 | In this lab we will use automatic injection, which involves labeling the namespace where the pods are to reside.
22 |
23 | 1. Label the default namespace
24 |
25 | ```{.shell .language-shell}
26 | kubectl label namespace default istio-injection=enabled
27 | ```
28 |
29 | 1. Verify that the label has been applied:
30 |
31 | ```{.shell .language-shell}
32 | kubectl get ns -Listio-injection
33 | ```
34 |
35 | ## Deploy the `helloworld` sample
36 |
37 | The Istio distribution comes with a sample application "helloworld".
38 |
39 | ```shell
40 | cd ~/istio-{{istio.version}}
41 | ```
42 |
43 | Deploy `helloworld` to the default namespace.
44 |
45 | ```shell
46 | kubectl apply -f samples/helloworld/helloworld.yaml
47 | ```
48 |
49 | Check the output of `proxy-status` again:
50 |
51 | ```shell
52 | istioctl proxy-status
53 | ```
54 |
55 | Confirm that the two `helloworld` workloads are listed and marked as "SYNCED".
56 |
57 | While here, let us also deploy the sample app called `sleep`, that will serve the purpose of a client from which we might call the `helloworld` app:
58 |
59 | ```shell
60 | kubectl apply -f samples/sleep/sleep.yaml
61 | ```
62 |
63 |
64 | ## The service registry
65 |
66 | Istio maintains an internal service registry which can be observed through a debug endpoint `/debug/registryz` exposed by `istiod`:
67 |
68 | `curl` the registry endpoint:
69 |
70 | ```shell
71 | kubectl exec -n istio-system deploy/istiod -- \
72 | curl -s localhost:15014/debug/registryz
73 | ```
74 |
75 | The output can be prettified, and filtered (to highlight the list of host names in the registry) with a tool such as [`jq`](https://stedolan.github.io/jq/){target=_blank}.
76 |
77 | ```shell
78 | kubectl exec -n istio-system deploy/istiod -- \
79 | curl -s localhost:15014/debug/registryz | jq '.[].hostname'
80 | ```
81 |
82 | Confirm that the `helloworld` service is listed in the output.
83 |
84 | ## The sidecar configuration
85 |
86 | Review the deployments in the `default` namespace:
87 |
88 | ```shell
89 | kubectl get deploy
90 | ```
91 |
92 | The `istioctl` CLI's diagnostic command `proxy-config` will help us inspect the configuration of proxies.
93 |
94 | Envoy's term for a service is "cluster".
95 |
96 | Confirm that `sleep` knows about other services (`helloworld`, mainly):
97 |
98 | ```shell
99 | istioctl proxy-config clusters deploy/sleep
100 | ```
101 |
102 | List the endpoints backing each "cluster":
103 |
104 | ```shell
105 | istioctl proxy-config endpoints deploy/sleep
106 | ```
107 |
108 | Zero in on the endpoints for the `helloworld` service:
109 |
110 | ```shell
111 | istioctl proxy-config endpoints deploy/sleep \
112 | --cluster "outbound|5000||helloworld.default.svc.cluster.local"
113 | ```
114 |
115 | We learn that Istio has communicated to the `sleep` workload information about both `helloworld` endpoints.
116 |
117 | ## Load balancing
118 |
119 | The `sleep` pod's container image has `curl` pre-installed.
120 |
121 | Make repeated calls to the `helloworld` service from the `sleep` pod:
122 |
123 | ```shell
124 | for i in {1..3}; do
125 | kubectl exec deploy/sleep -- curl -s helloworld:5000/hello
126 | done
127 | ```
128 |
129 | Some responses will be from `helloworld-v1` while others from `helloworld-v2`, an indication that Envoy is load-balancing requests between these two endpoints.
130 |
131 | Envoy does not use the ClusterIP service. It performs client-side load-balancing using the endpoints you resolved above.
132 |
133 | We can examine the `helloworld` "cluster" definition in a sample client to see what load balancing policy is in effect:
134 |
135 | ```shell
136 | istioctl proxy-config cluster deploy/sleep \
137 | --fqdn helloworld.default.svc.cluster.local -o yaml | grep lbPolicy
138 | ```
139 |
140 | To influence the load balancing algorithm that Envoy uses when calling `helloworld`, we can define a [traffic policy](https://istio.io/latest/docs/reference/config/networking/destination-rule/#LoadBalancerSettings-SimpleLB){target=_blank}, like so:
141 |
142 | ```yaml linenums="1" title="helloworld-lb.yaml"
143 | --8<-- "discovery/helloworld-lb.yaml"
144 | ```
145 |
146 | Apply the above traffic policy to the cluster:
147 |
148 | ```shell
149 | kubectl apply -f helloworld-lb.yaml
150 | ```
151 |
152 | Examine the updated load-balancer policy:
153 |
154 | ```shell
155 | istioctl proxy-config cluster deploy/sleep \
156 | --fqdn helloworld.default.svc.cluster.local -o yaml | grep lbPolicy
157 | ```
158 |
159 | Confirm that it now reads "RANDOM".
160 |
161 | For more insight into the merits of the different load balancing options, read the blog entry [Examining Load Balancing Algorithms with Envoy](https://blog.envoyproxy.io/examining-load-balancing-algorithms-with-envoy-1be643ea121c){target=_blank} from the Envoy Proxy blog.
162 |
163 |
164 | ## Traffic distribution
165 |
166 | We can go a step further and control how much traffic to send to version v1 and how much to v2.
167 |
168 | First, define the two subsets, v1 and v2:
169 |
170 | ```yaml linenums="1" title="helloworld-dr.yaml"
171 | --8<-- "discovery/helloworld-dr.yaml"
172 | ```
173 |
174 | Apply the updated destination rule to the cluster:
175 |
176 | ```shell
177 | kubectl apply -f helloworld-dr.yaml
178 | ```
179 |
180 | If we now inspect the list of clusters, note that there's one for each subset:
181 |
182 | ```shell
183 | istioctl proxy-config cluster deploy/sleep \
184 | --fqdn helloworld.default.svc.cluster.local
185 | ```
186 |
187 | With the subsets defined, we turn our attention to the routing specification. We use a [VirtualService](https://istio.io/latest/docs/reference/config/networking/virtual-service/){target=_blank}, in this case to direct 25% of traffic to v1 and 75% to v2:
188 |
189 | ```yaml linenums="1" title="helloworld-vs.yaml"
190 | --8<-- "discovery/helloworld-vs.yaml"
191 | ```
192 |
193 | Apply the VirtualService to the cluster:
194 |
195 | ```shell
196 | kubectl apply -f helloworld-vs.yaml
197 | ```
198 |
199 | Finally, we can inspect the routing rules applied to an Envoy client with our `proxy-config` diagnostic command:
200 |
201 | ```shell
202 | istioctl proxy-config routes deploy/sleep --name 5000 -o yaml
203 | ```
204 |
205 | Note the `weightedClusters` section in the routes output.
206 |
207 | The `istioctl` CLI provides a convenient command to inspect the configuration of a service:
208 |
209 | ```shell
210 | istioctl x describe svc helloworld
211 | ```
212 |
213 | I think you'll agree the output of the `istioctl x describe` command is a little easier to parse in comparison.
214 |
--------------------------------------------------------------------------------
/docs/environment.md:
--------------------------------------------------------------------------------
1 | # Lab environment
2 |
3 | ## Options
4 |
5 | === "BYO K8S"
6 |
7 | If you brought your own Kubernetes cluster:
8 |
9 | - Istio version {{istio.version}} officially supports Kubernetes versions 1.27 - 1.30. Feel free to consult the Istio [support status of Istio releases page](https://istio.io/latest/docs/releases/supported-releases/#support-status-of-istio-releases){target=_blank} for more information.
10 |
11 | - We recommend a 3-worker node cluster of machine type "e2-standard-2" or similar, though a smaller cluster will likely work just fine.
12 |
13 | If you have your own public cloud account:
14 |
15 | - On GCP, the following command should provision a GKE cluster of adequate size for the workshop:
16 |
17 | ```shell
18 | gcloud container clusters create my-istio-cluster \
19 | --cluster-version latest \
20 | --machine-type "e2-standard-2" \
21 | --num-nodes "3" \
22 | --network "default"
23 | ```
24 |
25 | - **Feel free to provision a K8S cluster on any infrastructure of your choosing.**
26 |
27 | Be sure to configure your `kubeconfig` file to point to your cluster.
28 |
29 | === "GCP"
30 |
31 | If you received Google credentials from the workshop instructors:
32 |
33 | - A Kubernetes cluster has already been provisioned for you.
34 | - Your instructor will demonstrate the process of accessing and configuring your environment, described below.
35 | - The instructions below explain in detail how to access your account, select your project, and launch the cloud shell.
36 |
37 | ## Log in to GCP
38 |
39 | 1. Log in to [GCP](https://console.cloud.google.com/){target=_blank} using credentials provided by your instructor.
40 | 1. Agree to the terms
41 | 1. You will be prompted to select your country, click "Agree and continue"
42 |
43 | ## Select your project
44 |
45 | Select the GCP project you have been assigned, as follows:
46 |
47 | 1. Click the project selector "pulldown" menu from the top banner, which will open a popup dialog
48 | 1. Make sure the _Select from_ organization is set to _tetratelabs.com_
49 | 1. Select the tab named _All_
50 | 1. You will see your GCP project name (istio-0to60..) listed under the organization tetratelabs.com
51 | 1. Select the project from the list
52 |
53 | Verify that your project is selected:
54 |
55 | - If you look in the banner now, you will see your selected project displayed.
56 |
57 | ## Launch the Cloud Shell
58 |
59 | The Google Cloud Shell will serve as your terminal environment for these labs.
60 |
61 | - Click the _Activate cloud shell_ icon (top right); the icon looks like this: :material-console:{.gcp-blue}
62 | - A dialog may pop up, click _Continue_
63 | - Your cloud shell terminal should appear at the bottom of the screen
64 | - Feel free to expand the size of the cloud shell, or even open it in a separate window (locate the icon button :material-open-in-new: in the terminal header, on the right)
65 |
66 | !!! warning
67 |
68 | Your connection to the Cloud Shell gets severed after a period of inactivity.
69 | Click on the _Reconnect_ button when this happens.
70 |
71 | ## Configure cluster access
72 |
73 | 1. Check that the `kubectl` CLI is installed
74 |
75 | ```{.shell .language-shell}
76 | kubectl version --short
77 | ```
78 |
79 | 1. Generate a `kubeconfig` entry
80 |
81 | === "With the user interface"
82 |
83 | 1. Activate the top navigation menu (++context-menu++ icon on the top left hand side of the page)
84 | 1. Locate and click on the product _Kubernetes Engine_ (you may have to scroll down until you see it)
85 | 1. Your pre-provisioned 3-node Kubernetes cluster should appear in the main view
86 | 1. Click on that row's "three dot" menu and select the _Connect_ option
87 | 1. A dialog prompt will appear with instructions
88 | 1. Copy the `gcloud` command shown and paste it in your cloud shell
89 |
90 | === "From the command line"
91 |
92 | ```{.shell .language-shell}
93 | gcloud container clusters get-credentials \
94 | $(gcloud container clusters list --format="value(name)") \
95 | --zone $(gcloud container clusters list --format="value(location)") \
96 | --project $(gcloud config get-value project)
97 | ```
98 |
99 | Click _Authorize_ when prompted
100 |
101 | The console message will state that a _kubeconfig entry [was] generated for [your project]_
102 |
103 | 1. Verify that your Kubernetes context is set for your cluster
104 |
105 | ```{.shell .language-shell}
106 | kubectl config get-contexts
107 | ```
108 |
109 | 1. Run a token command such as `kubectl get node` or `kubectl get ns` to ensure that you can communicate with the Kubernetes API Server.
110 |
111 | ```{.shell .language-shell}
112 | kubectl get ns
113 | ```
114 |
115 | Instructions in subsequent labs assume you will be working from the Google Cloud Shell.
116 |
117 | === "Killercoda"
118 |
119 | If you prefer to do away with having to setup your own Kubernetes environment, Killercoda offers a simple browser-based interactive environment. The Istio 0 to 60 scenarios have been ported to Killercoda and can be launched from [here](https://killercoda.com/eitansuez/){target=_blank}.
120 |
121 | If you choose this option, please disregard this page's remaining instructions.
122 |
123 | === "Local"
124 |
125 | Yet another option is to run a Kubernetes cluster on your local machine using Minikube, Kind, or similar tooling. This option entails minimum resource (cpu and memory) requirements *and* you will need to ensure that ingress to loadbalancer-type services functions. Here is a recipe for creating a local Kubernetes cluster with [k3d](https://k3d.io/):
126 |
127 | ```shell
128 | k3d cluster create my-istio-cluster \
129 | --api-port 6443 \
130 | --k3s-arg "--disable=traefik@server:0" \
131 | --port 80:80@loadbalancer \
132 | --port 443:443@loadbalancer
133 | ```
134 |
135 | !!! tip
136 |
137 | This workshop makes extensive use of the `kubectl` CLI.
138 |
139 | Consider configuring an alias to make typing a little easier. Here are commands to configure the "k" alias with command completion, for the bash shell:
140 |
141 | ```shell
142 | cat << EOF >> ~/.bashrc
143 |
144 | source <(kubectl completion bash)
145 | alias k=kubectl
146 | complete -F __start_kubectl k
147 |
148 | EOF
149 |
150 | source ~/.bashrc
151 | ```
152 |
153 |
154 | ## Artifacts
155 |
156 | The lab instructions reference Kubernetes yaml artifacts that you will need to apply to your cluster at specific points in time.
157 |
158 | You have the option of copying and pasting the yaml snippets directly from the lab instructions as you encounter them.
159 |
160 | Another option is to clone the [GitHub repository for this workshop](https://github.com/tetratelabs/istio-0to60){target=_blank} from the Cloud Shell. You will find all yaml artifacts in the subdirectory named `artifacts`.
161 |
162 | ```shell
163 | git clone https://github.com/tetratelabs/istio-0to60.git && \
164 | mv istio-0to60/artifacts . && \
165 | rm -rf istio-0to60
166 | ```
167 |
168 | ## Next
169 |
170 | Now that we have access to our environment and to our Kubernetes cluster, we can proceed to install Istio.
171 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Welcome!
2 |
3 | Welcome to the Istio 0 to 60 workshop!
4 |
5 | On this site you will find the hands-on labs for the workshop.
6 |
7 | In the first lab, we walk you through accessing and configuring your [lab environment](environment.md).
8 |
9 | Let's begin.
10 |
--------------------------------------------------------------------------------
/docs/ingress-gwapi.md:
--------------------------------------------------------------------------------
1 | # Ingress with the Kubernetes Gateway API
2 |
3 | Like the previous lab, the objective of this lab is to expose the `web-frontend` service to the internet.
4 |
5 | Rather than use the Istio native `Gateway` and `VirtualService` resources, in this lab the implementation leverages the [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io/){target=_blank}.
6 |
7 | !!! warning "When using K3D"
8 |
9 | This lab is not yet compatible with a local Kubernetes cluster setup using K3D.
10 |
11 | ## Prerequisites
12 |
13 | Install the Kubernetes Gateway API Custom Resource Definitions (CRDs):
14 |
15 | ```shell
16 | kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl apply -f -
17 | ```
18 |
19 | ## The Ingress gateway
20 |
21 | When you installed Istio, in addition to deploying `istiod` to Kubernetes, the installation also provisioned an Ingress Gateway.
22 |
23 | Unlike Istio, with the Kubernetes Gateway API, a Gateway is deployed implicitly when a Gateway resource is applied to the Kubernetes cluster.
24 |
25 | That is, this solution will not utilize the ingress gateway already deployed and running in the `istio-system` namespace.
26 |
27 | ## Configuring the Gateway
28 |
29 | The K8S Gateway API preserves Istio's design of separating the configuration of the Gateway from the routing concerns by using two distinct resources. The Gateway API of course defines its own CRDs:
30 |
31 | 1. `Gateway` (although the resource name is the same, the `apiVersion` field is `gateway.networking.k8s.io/v1beta1`)
32 | 4. `HttpRoute`
33 |
34 | ### Create a Gateway resource
35 |
36 | 1. Review the following Gateway specification.
37 |
38 | !!! tldr "k8s-gw.yaml"
39 | ```yaml linenums="1"
40 | --8<-- "ingress/k8s-gw.yaml"
41 | ```
42 |
43 | Above, we specify the HTTP protocol and port 80. We leave the optional `hostname` field blank, to allow any ingress request to match. This is similar to using a wildcard ("*") host matcher in Istio.
44 |
45 | 1. Apply the gateway resource to your cluster.
46 |
47 | ```{.shell .language-shell}
48 | kubectl apply -f k8s-gw.yaml
49 | ```
50 |
51 | Before proceeding, wait until the associated Gateway deployment is provisioned:
52 |
53 | ```shell
54 | kubectl wait --for=condition=programmed gtw frontend-gateway
55 | ```
56 |
57 | Note above how the short name for this Gateway resource is `gtw` (Istio's Gateway resource's short name is `gw`).
58 |
59 | ### Verify the Gateway deployment
60 |
61 | Note that a gateway deployment by the name of `frontend-gateway-istio` has been created:
62 |
63 | ```{.shell .language-shell}
64 | kubectl get deploy
65 | ```
66 |
67 | A corresponding _LoadBalancer_ type service was also created:
68 |
69 | ```{.shell .language-shell}
70 | kubectl get svc
71 | ```
72 |
73 | Make a note of the external IP address for the load balancer.
74 |
75 | Assign it to an environment variable.
76 |
77 | ```{.shell .language-shell}
78 | export GATEWAY_IP=$(kubectl get svc frontend-gateway-istio -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
79 | ```
80 |
81 | ??? tip ":material-console:{.gcp-blue} A small investment"
82 |
83 | When the cloud shell connection is severed, or when opening a new terminal tab, `$GATEWAY_IP` will no longer be in scope.
84 |
85 | Ensure `GATEWAY_IP` is set each time we start a new shell:
86 |
87 | ```{.shell .language-shell}
88 | cat << EOF >> ~/.bashrc
89 |
90 | export GATEWAY_IP=$(kubectl get svc frontend-gateway-istio -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
91 |
92 | EOF
93 | ```
94 |
95 | In normal circumstances we associate this IP address with a hostname via DNS.
96 | For the sake of simplicity, in this workshop we use the gateway public IP address directly.
97 |
98 | ## Configure routing
99 |
100 | Attempt an HTTP request in your browser to the gateway IP address. It should return a 404 (not found).
101 |
102 | Let us "fix" that issue by defining a route to the `web-frontend` service.
103 |
104 | 1. Review the following `HttpRoute` specification.
105 |
106 | ???+ tldr "web-frontend-route.yaml"
107 | ```yaml linenums="1"
108 | --8<-- "ingress/web-frontend-route.yaml"
109 | ```
110 |
111 | Note how this specification references the name of the gateway ("frontend-gateway"). The absence of a matching hostname will direct all requests to the `web-frontend` service, irrespective of host name.
112 |
113 | 1. Apply the `HttpRoute` resource to your cluster.
114 |
115 | ```{.shell .language-shell}
116 | kubectl apply -f web-frontend-route.yaml
117 | ```
118 |
119 | 1. List HTTP routes in the default namespace.
120 |
121 | ```{.shell .language-shell}
122 | kubectl get httproute
123 | ```
124 |
125 | The output indicates that the HTTP route named `web-frontend` is bound to the gateway, as well as any hostname that routes to the load balancer IP address.
126 |
127 | Finally, verify that you can now access `web-frontend` from your web browser using the gateway IP address.
128 |
129 |
130 |
131 | !!! question "What if I wanted to configure ingress with TLS?"
132 |
133 | Here is a recipe that illustrates how to configure secure ingress with a self-signed certificate:
134 |
135 | 1. Generate the certificate:
136 |
137 | 1. Generate a self-signed root certificate in the folder `example_certs`
138 |
139 | ```{.shell .language-shell}
140 | mkdir example_certs
141 | openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -subj '/O=example Inc./CN=example.com' -keyout example_certs/example.com.key -out example_certs/example.com.crt
142 | ```
143 |
144 | 1. Generate a certificate and private key for the hostname `webfrontend.example.com`:
145 |
146 | ```{.shell .language-shell}
147 | openssl req -out example_certs/webfrontend.example.com.csr -newkey rsa:2048 -nodes -keyout example_certs/webfrontend.example.com.key -subj "/CN=webfrontend.example.com/O=webfrontend organization"
148 | openssl x509 -req -sha256 -days 365 -CA example_certs/example.com.crt -CAkey example_certs/example.com.key -set_serial 0 -in example_certs/webfrontend.example.com.csr -out example_certs/webfrontend.example.com.crt
149 | ```
150 |
151 | 1. Store the certificate as a secret in your Kubernetes cluster:
152 |
153 | ```{.shell .language-shell}
154 | kubectl create -n default secret tls webfrontend-credential \
155 | --key=example_certs/webfrontend.example.com.key \
156 | --cert=example_certs/webfrontend.example.com.crt
157 | ```
158 |
159 | 1. Revise the gateway configuration to listen on port 443, and to reference the secret that the envoy listeners will present to incoming requests:
160 |
161 | ```yaml linenums="1" hl_lines="9-15"
162 | --8<-- "ingress/k8s-gw-tls.yaml"
163 | ```
164 |
165 | 1. Apply the revised gateway configuration:
166 |
167 | ```{.shell .language-shell}
168 | kubectl apply -f k8s-gw-tls.yaml
169 | ```
170 |
171 | 1. Test your implementation by making a request to the ingress gateway:
172 |
173 | ```{.shell .language-shell}
174 | curl -s -v --head -k https://webfrontend.example.com/ --resolve webfrontend.example.com:443:$GATEWAY_IP
175 | ```
176 |
177 | See the [Istio documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/){target=_blank} for additional examples relating to the topic of configuring secure gateways.
178 |
179 | ## Next
180 |
181 | The application is now running and exposed on the internet.
182 |
183 | In the next lab, we turn our attention to the observability features that are built in to Istio.
184 |
--------------------------------------------------------------------------------
/docs/ingress.md:
--------------------------------------------------------------------------------
1 | # Ingress
2 |
3 | The objective of this lab is to expose the `web-frontend` service to the internet.
4 |
5 | ## The Ingress gateway
6 |
7 | When you installed Istio, in addition to deploying `istiod` to Kubernetes, the installation also provisioned an Ingress Gateway.
8 |
9 | View the corresponding Istio ingress gateway pod in the `istio-system` namespace.
10 |
11 | ```{.shell .language-shell}
12 | kubectl get pod -n istio-system
13 | ```
14 |
15 | A corresponding _LoadBalancer_ type service was also created:
16 |
17 | ```{.shell .language-shell}
18 | kubectl get svc -n istio-system
19 | ```
20 |
21 | Make a note of the external IP address for the load balancer.
22 |
23 | Assign it to an environment variable.
24 |
25 | ```{.shell .language-shell}
26 | export GATEWAY_IP=$(kubectl get svc -n istio-system istio-ingressgateway \
27 | -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
28 | ```
29 |
30 | !!! warning "When using K3D"
31 |
32 | If you have opted to run Kubernetes directly on your local machine with K3D, use "127.0.0.1" instead:
33 |
34 | ```{.shell .language-shell}
35 | export GATEWAY_IP=127.0.0.1
36 | ```
37 |
38 | ??? tip ":material-console:{.gcp-blue} A small investment"
39 |
40 | When the cloud shell connection is severed, or when opening a new terminal tab, `$GATEWAY_IP` will no longer be in scope.
41 |
42 | Ensure `GATEWAY_IP` is set each time we start a new shell:
43 |
44 | ```{.shell .language-shell}
45 | cat << EOF >> ~/.bashrc
46 |
47 | export GATEWAY_IP=$(kubectl get svc -n istio-system istio-ingressgateway -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
48 |
49 | EOF
50 | ```
51 |
52 | In normal circumstances we associate this IP address with a hostname via DNS.
53 | For the sake of simplicity, in this workshop we will use the gateway public IP address directly.
54 |
55 | ## Configuring ingress
56 |
57 | Configuring ingress with Istio is performed in two parts:
58 |
59 | 1. Define a `Gateway` Custom Resource that governs the specific host, port, and protocol to expose.
60 | 1. Specify how requests should be routed with a `VirtualService` Custom Resource.
61 |
62 | ### Create a Gateway resource
63 |
64 | 1. Review the following Gateway specification.
65 |
66 | !!! tldr "gateway.yaml"
67 | ```yaml linenums="1"
68 | --8<-- "ingress/gateway.yaml"
69 | ```
70 |
71 | Above, we specify the HTTP protocol, port 80, and a wildcard ("*") host matcher which ensures that HTTP requests using the load balancer IP address `$GATEWAY_IP` will match.
72 |
73 | The selector _istio: ingressgateway_ selects the Envoy gateway workload to be configured, the one residing in the `istio-system` namespace.
74 |
75 | 1. Apply the gateway resource to your cluster.
76 |
77 | ```{.shell .language-shell}
78 | kubectl apply -f gateway.yaml
79 | ```
80 |
81 | 1. Attempt an HTTP request in your browser to the gateway IP address.
82 |
83 | ```shell
84 | curl -sv http://$GATEWAY_IP/ | head
85 | ```
86 |
87 | It should return a 404: not found.
88 |
89 | ### Create a VirtualService resource
90 |
91 | 1. Review the following VirtualService specification.
92 |
93 | ???+ tldr "web-frontend-virtualservice.yaml"
94 | ```yaml linenums="1"
95 | --8<-- "ingress/web-frontend-virtualservice.yaml"
96 | ```
97 |
98 | Note how this specification references the name of the gateway ("frontend-gateway"), a matching host ("*"), and specifies a route for requests to be directed to the `web-frontend` service.
99 |
100 | 1. Apply the VirtualService resource to your cluster.
101 |
102 | ```{.shell .language-shell}
103 | kubectl apply -f web-frontend-virtualservice.yaml
104 | ```
105 |
106 | 1. List virtual services in the default namespace.
107 |
108 | ```{.shell .language-shell}
109 | kubectl get virtualservice
110 | ```
111 |
112 | The output indicates that the VirtualService named `web-frontend` is bound to the gateway `frontend-gateway`, as well as any hostname that routes to the load balancer IP address.
113 |
114 | Finally, verify that you can now access `web-frontend` from your web browser using the gateway IP address.
115 |
116 |
117 |
118 | !!! question "What if I wanted to configure ingress with TLS?"
119 |
120 | Here is a recipe that illustrates how to configure secure ingress with a self-signed certificate:
121 |
122 | 1. Generate the certificate:
123 |
124 | 1. Generate a self-signed root certificate in the folder `example_certs`
125 |
126 | ```{.shell .language-shell}
127 | mkdir example_certs
128 | openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -subj '/O=example Inc./CN=example.com' -keyout example_certs/example.com.key -out example_certs/example.com.crt
129 | ```
130 |
131 | 1. Generate a certificate and private key for the hostname `webfrontend.example.com`:
132 |
133 | ```{.shell .language-shell}
134 | openssl req -out example_certs/webfrontend.example.com.csr -newkey rsa:2048 -nodes -keyout example_certs/webfrontend.example.com.key -subj "/CN=webfrontend.example.com/O=webfrontend organization"
135 | openssl x509 -req -sha256 -days 365 -CA example_certs/example.com.crt -CAkey example_certs/example.com.key -set_serial 0 -in example_certs/webfrontend.example.com.csr -out example_certs/webfrontend.example.com.crt
136 | ```
137 |
138 | 1. Store the certificate as a secret in your Kubernetes cluster:
139 |
140 | ```{.shell .language-shell}
141 | kubectl create -n istio-system secret tls webfrontend-credential \
142 | --key=example_certs/webfrontend.example.com.key \
143 | --cert=example_certs/webfrontend.example.com.crt
144 | ```
145 |
146 | 1. Revise the gateway configuration to listen on port 443, and to reference the secret that the envoy listeners will present to incoming requests:
147 |
148 | ```yaml linenums="1" hl_lines="10-18"
149 | --8<-- "ingress/gateway-tls.yaml"
150 | ```
151 |
152 | 1. Apply the revised gateway configuration:
153 |
154 | ```{.shell .language-shell}
155 | kubectl apply -f gateway-tls.yaml
156 | ```
157 |
158 | 1. Test your implementation by making a request to the ingress gateway:
159 |
160 | ```{.shell .language-shell}
161 | curl -k https://webfrontend.example.com/ --resolve webfrontend.example.com:443:$GATEWAY_IP
162 | ```
163 |
164 | See the [Istio documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/){target=_blank} for additional examples relating to the topic of configuring secure gateways.
165 |
166 | ## Next
167 |
168 | The application is now running and exposed on the internet.
169 |
170 | In the next lab, we turn our attention to the observability features that are built into Istio.
171 |
--------------------------------------------------------------------------------
/docs/install.md:
--------------------------------------------------------------------------------
1 | # Install Istio
2 |
3 | In this lab you will install Istio.
4 |
5 |
6 | ## Download Istio
7 |
8 | 1. Run the following command from your home directory.
9 |
10 | ```{.shell .language-shell}
11 | curl -L https://istio.io/downloadIstio | ISTIO_VERSION={{istio.version}} sh -
12 | ```
13 |
14 | 1. Navigate into the directory created by the above command.
15 |
16 | ```{.shell .language-shell}
17 | cd istio-{{istio.version}}
18 | ```
19 |
20 |
21 | ## Add `istioctl` to your PATH
22 |
23 | The `istioctl` CLI is located in the `bin/` subdirectory.
24 |
25 | !!! note ":material-console:{.gcp-blue} Workaround for the Google Cloud Shell"
26 |
27 | Cloud Shell only preserves files located inside your home directory across sessions.
28 |
29 | This means that if you install a binary to a `PATH` such as `/usr/local/bin`, after your session times out that file may no longer be there!
30 |
31 | As a workaround, you will add `${HOME}/bin` to your `PATH` and place the binary there.
32 |
33 |
34 | 1. Create a `bin` subdirectory in your home directory:
35 |
36 | ```{.shell .language-shell}
37 | mkdir ~/bin
38 | ```
39 |
40 | 1. Copy the CLI to that subdirectory:
41 |
42 | ```{.shell .language-shell}
43 | cp ./bin/istioctl ~/bin
44 | ```
45 |
46 | 1. Add your home `bin` subdirectory to your `PATH`
47 |
48 | ```shell
49 | cat << EOF >> ~/.bashrc
50 |
51 | export PATH="~/bin:\$PATH"
52 |
53 | EOF
54 | ```
55 |
56 | And then:
57 |
58 | ```shell
59 | source ~/.bashrc
60 | ```
61 |
62 | Verify that `istioctl` is installed with:
63 |
64 | ```{.shell .language-shell}
65 | istioctl version
66 | ```
67 |
68 | The output should indicate that the version is {{istio.version}}.
69 |
70 | With the CLI installed, proceed to install Istio to Kubernetes.
71 |
72 | ## Pre-check
73 |
74 | The `istioctl` CLI provides a convenient `precheck` command that can be used to "_inspect a Kubernetes cluster for Istio install and upgrade requirements._"
75 |
76 | To verify whether it is safe to install Istio on your Kubernetes cluster, run:
77 |
78 | ```shell
79 | istioctl x precheck
80 | ```
81 |
82 | Make sure that the output of the above command returns a green "checkmark" stating that no issues were found when checking the cluster.
83 |
84 | ## Install Istio
85 |
86 | 1. Istio can be installed directly with the CLI:
87 |
88 | ```{.shell .language-shell}
89 | istioctl install
90 | ```
91 |
92 | 1. When prompted, enter `y` to proceed to install Istio.
93 |
94 | Take a moment to learn more about [Istio installation profiles](https://istio.io/latest/docs/setup/additional-setup/config-profiles/){target=_blank}.
95 |
96 | ## Verify that Istio is installed
97 |
98 | Post-installation, Istio provides the command `verify-install`: it runs a series of checks to ensure that the installation was successful and complete.
99 |
100 | Go ahead and run it:
101 |
102 | ```shell
103 | istioctl verify-install
104 | ```
105 |
106 | Inspect the output and confirm that the it states that "_✔ Istio is installed and verified successfully._"
107 |
108 | Keep probing:
109 |
110 | 1. List Kubernetes namespaces and note the new namespace `istio-system`
111 |
112 | ```{.shell .language-shell}
113 | kubectl get ns
114 | ```
115 |
116 | 1. Verify that the `istiod` controller pod is running in that namespace
117 |
118 | ```{.shell .language-shell}
119 | kubectl get pod -n istio-system
120 | ```
121 |
122 | 1. Re-run `istioctl version`. The output should include a _control plane_ version, indicating that Istio is indeed present in the cluster.
123 |
124 | ## Next
125 |
126 | With Istio installed, we are ready to deploy an application to the mesh.
127 |
--------------------------------------------------------------------------------
/docs/security.md:
--------------------------------------------------------------------------------
1 | # Security
2 |
3 | In this lab we explore some of the security features of the Istio service mesh.
4 |
5 | ## Mutual TLS
6 |
7 | By default, Istio is configured such that when a service is deployed onto the mesh, it will take advantage of mutual TLS:
8 |
9 | - Workloads are given an identity as a function of their associated service account and namespace.
10 | - An x.509 certificate is issued to the workload (and regularly rotated) and used to identify the workload in calls to other services.
11 |
12 | In the [observability](dashboards.md#kiali) lab, we looked at the Kiali dashboard and noted the :material-lock: icons indicating that traffic was secured with mTLS.
13 |
14 | ### Can a workload receive plain-text requests?
15 |
16 | We can test whether a mesh workload, such as the `customers` service, will allow a plain-text request as follows:
17 |
18 | 1. Create a separate namespace that is not configured with automatic injection.
19 |
20 | ```{.shell .language-shell}
21 | kubectl create ns other-ns
22 | ```
23 |
24 | 1. Deploy `sleep` to that namespace
25 |
26 | ```{.shell .language-shell}
27 | kubectl apply -f sleep.yaml -n other-ns
28 | ```
29 |
30 | 1. Verify that the sleep pod has no sidecars:
31 |
32 | ```{.shell .language-shell}
33 | kubectl get pod -n other-ns
34 | ```
35 |
36 | 1. Call the customer service from that pod:
37 |
38 | ```{.shell .language-shell}
39 | kubectl exec -n other-ns deploy/sleep -- curl -s customers.default | jq
40 | ```
41 |
42 | The output is a JSON-formatted list of customers.
43 |
44 | We conclude that Istio is configured by default to allow plain-text request.
45 | This is called _permissive mode_ and is specifically designed to allow services that have not yet fully on-boarded onto the mesh to participate.
46 |
47 | ### Enable strict mode
48 |
49 | Istio provides the `PeerAuthentication` custom resource to specify peer authentication policy.
50 |
51 | 1. Review the following policy.
52 |
53 | !!! tldr "mtls-strict.yaml"
54 | ```yaml linenums="1"
55 | --8<-- "security/mtls-strict.yaml"
56 | ```
57 |
58 | !!! info
59 |
60 | Strict mtls can be enabled globally by setting the namespace to the name of the Istio root namespace, which by default is `istio-system`
61 |
62 | 1. Apply the `PeerAuthentication` resource to the cluster.
63 |
64 | ```{.shell .language-shell}
65 | kubectl apply -f mtls-strict.yaml
66 | ```
67 |
68 | 1. Verify that the peer authentication has been applied.
69 |
70 | ```{.shell .language-shell}
71 | kubectl get peerauthentication
72 | ```
73 |
74 | ### Verify that plain-text requests are no longer permitted
75 |
76 | ```{.shell .language-shell}
77 | kubectl exec -n other-ns deploy/sleep -- curl customers.default
78 | ```
79 |
80 | The console output should indicate that the _connection was reset by peer_.
81 |
82 | ## Inspecting a workload certificate
83 |
84 | 1. Capture the certificate returned by the `customers` workload:
85 |
86 | ```{.shell .language-shell}
87 | kubectl exec deploy/sleep -c istio-proxy -- \
88 | openssl s_client -showcerts -connect customers:80 > cert.txt
89 | ```
90 |
91 | 1. Inspect the certificate with:
92 |
93 | ```{.shell .language-shell}
94 | openssl x509 -in cert.txt -text -noout
95 | ```
96 |
97 | 1. Review the certificate fields:
98 |
99 | 1. The certificate validity period should be 24 hrs.
100 | 1. The _Subject Alternative Name_ field should contain the spiffe URI.
101 |
102 |
103 |
104 | !!! question "How do I know that traffic is mTls-encrypted?"
105 |
106 | Here is a recipe that uses the [`tcpdump`](https://www.tcpdump.org/){target=_blank} utility to spy on traffic to a service to verify that it is indeed encrypted.
107 |
108 | 1. Update the Istio installation with the configuration field `values.proxy.privileged` set to `true`:
109 |
110 | ```{.shell .language-shell}
111 | istioctl install --set values.global.proxy.privileged=true
112 | ```
113 |
114 | For a description of this configuration field, see the output of `helm show values istio/istiod | grep privileged`.
115 |
116 | 1. Restart the `customers` deployment:
117 |
118 | ```{.shell .language-shell}
119 | kubectl rollout restart deploy customers-v1
120 | ```
121 |
122 | 1. Grab the IP address of the customers Pod:
123 |
124 | ```{.shell .language-shell}
125 | IP_ADDRESS=$(kubectl get pod -l app=customers -o jsonpath='{.items[0].status.podIP}')
126 | ```
127 |
128 | 1. Shell into the `customers` sidecar container:
129 |
130 | ```{.shell .language-shell}
131 | kubectl exec -it svc/customers -c istio-proxy -- env IP_ADDRESS=$IP_ADDRESS /bin/bash
132 | ```
133 |
134 | 1. Start `tcpdump` on the port that the `customers` service is listening on:
135 |
136 | ```{.shell .language-shell}
137 | sudo tcpdump -vvvv -A -i eth0 "((dst port 3000) and (net ${IP_ADDRESS}))"
138 | ```
139 |
140 | 1. In separate terminal make a call to the customers service:
141 |
142 | ```{.shell .language-shell}
143 | kubectl exec deploy/sleep -- curl customers
144 | ```
145 |
146 | You will see encrypted text in the `tcpdump` output.
147 |
148 | ## Security in depth
149 |
150 | Another important layer of security is to define an authorization policy, in which we allow only specific services to communicate with other services.
151 |
152 | At the moment, any container can, for example, call the customers service or the web-frontend service.
153 |
154 | 1. Call the `customers` service.
155 |
156 | ```{.shell .language-shell}
157 | kubectl exec deploy/sleep -- curl -s customers | jq
158 | ```
159 |
160 | 1. Call the `web-frontend` service.
161 |
162 | ```{.shell .language-shell}
163 | kubectl exec deploy/sleep -- curl -s web-frontend | head
164 | ```
165 |
166 | Both calls succeed.
167 |
168 | We wish to apply a policy in which _only `web-frontend` is allowed to call `customers`_, and _only the ingress gateway can call `web-frontend`_.
169 |
170 | Study the below authorization policy.
171 |
172 | !!! tldr "authz-policy-customers.yaml"
173 | ```yaml linenums="1"
174 | --8<-- "security/authz-policy-customers.yaml"
175 | ```
176 |
177 | - The `selector` section specifies that the policy applies to the `customers` service.
178 | - Note how the rules have a "from: source: " section indicating who is allowed in.
179 | - The nomenclature for the value of the `principals` field comes from the [spiffe](https://spiffe.io/docs/latest/spiffe-about/overview/){target=_blank} standard. Note how it captures the service account name and namespace associated with the `web-frontend` service. This identity is associated with the x.509 certificate used by each service when making secure mtls calls to one another.
180 |
181 | Tasks:
182 |
183 | - [ ] Apply the policy to your cluster.
184 | - [ ] Verify that you are no longer able to reach the `customers` pod from the `sleep` pod
185 |
186 | ### Challenge
187 |
188 | Can you come up with a similar authorization policy for `web-frontend`?
189 |
190 | - Use a copy of the `customers` authorization policy as a starting point
191 | - Give the resource an apt name
192 | - Revise the selector to match the `web-frontend` service
193 | - Revise the rule to match the principal of the ingress gateway
194 |
195 | !!! hint
196 |
197 | The ingress gateway has its own identity.
198 |
199 | Here is a command which can help you find the name of the service account associated with its identity:
200 |
201 | ```{.shell .language-shell}
202 | kubectl get pod -n istio-system -l app=istio-ingressgateway -o yaml | grep serviceAccountName
203 | ```
204 |
205 | Use this service account name together with the namespace that the ingress gateway is running in to specify the value for the `principals` field.
206 |
207 |
208 | ### Test it
209 |
210 | Don't forget to verify that the policy is enforced.
211 |
212 | - Call both services again from the sleep pod and ensure communication is no longer allowed.
213 | - The console output should contain the message _RBAC: access denied_.
214 |
215 | ## Next
216 |
217 | In the next lab we show how to use Istio's traffic management features to upgrade the `customers` service with zero downtime.
218 |
--------------------------------------------------------------------------------
/docs/sidecar-injection.md:
--------------------------------------------------------------------------------
1 | # Sidecar injection
2 |
3 | This lab explores sidecar injection in Istio.
4 |
5 | ## Preface
6 |
7 | Istio provides both a manual and an automatic mechanism for injecting sidecars alongside workloads.
8 |
9 | In this lab you will use the manual method, because it provides the opportunity to inspect the transformed deployment manifest even before applying it to a target Kubernetes cluster.
10 |
11 | You will learn about automatic sidecar injection in the next lab.
12 |
13 | ## Generate a Pod spec
14 |
15 | The `kubectl` command's `dry-run` flag provides a simple way to generate and capture a simple pod specification.
16 |
17 | Generate a Pod spec for a simple web server, as follows:
18 |
19 | ```shell
20 | kubectl run mywebserver --image nginx \
21 | --dry-run=client -oyaml > nginx-pod.yaml
22 | ```
23 |
24 | Inspect the contents of the generated file. Here it is below, slightly cleaned up:
25 |
26 | ```yaml linenums="1" title="nginx-pod.yaml"
27 | --8<-- "sidecar-injection/nginx-pod.yaml"
28 | ```
29 |
30 | The main thing to note at this point is that this Pod spec consists of a single container using the image `nginx`.
31 |
32 | ## Transform the Pod spec
33 |
34 | The `istioctl` command provides the convenient `kube-inject` subcommand, that can transform such a specification into one that includes the necessary sidecar.
35 |
36 | 1. Learn the `kube-inject` command's usage:
37 |
38 | ```shell
39 | istioctl kube-inject --help
40 | ```
41 |
42 | 1. Use the command to generate and capture the full sidecar-injected manifest to a new file named `transformed.yaml`.
43 |
44 | ??? help "Show me how"
45 | ```shell
46 | istioctl kube-inject --filename ./nginx-pod.yaml > transformed.yaml
47 | ```
48 |
49 | ## Study the sidecar container specification
50 |
51 | The modified Pod specification now includes a second container.
52 |
53 | Here is the salient part:
54 |
55 | ```yaml linenums="1"
56 | - name: istio-proxy
57 | image: docker.io/istio/proxyv2:{{istio.version}}
58 | args:
59 | - proxy
60 | - sidecar
61 | - --domain
62 | - $(POD_NAMESPACE).svc.cluster.local
63 | - --proxyLogLevel=warning
64 | - --proxyComponentLogLevel=misc:error
65 | - --log_output_level=default:info
66 | env:
67 | - ...
68 | ```
69 |
70 | The container name is `istio-proxy` and the docker image is `istio/proxyv2`.
71 |
72 | ???+ info "What command is actually run?"
73 |
74 | To find out what command actually runs inside that container, we can inspect the docker container specification and view the Entrypoint field:
75 |
76 | ```shell
77 | docker pull docker.io/istio/proxyv2:{{istio.version}}
78 | docker inspect istio/proxyv2:{{istio.version}} | grep Entrypoint -A 2
79 | ```
80 |
81 | Here is the output:
82 |
83 | ```json
84 | "Entrypoint": [
85 | "/usr/local/bin/pilot-agent"
86 | ],
87 | ```
88 |
89 | We learn that the name of the command is [`pilot-agent`](https://istio.io/latest/docs/reference/commands/pilot-agent/){target=_blank}.
90 |
91 | By extracting the arguments from the yaml, we can reconstitute the full command executed inside the sidecar container:
92 |
93 | ```shell
94 | pilot-agent proxy sidecar \
95 | --domain $(POD_NAMESPACE).svc.cluster.local \
96 | --proxyLogLevel=warning \
97 | --proxyComponentLogLevel=misc:error \
98 | --log_output_level=default:info
99 | ```
100 |
101 | ## Apply the manifest
102 |
103 | 1. Deploy the transformed manifest to Kubernetes:
104 |
105 | ```shell
106 | kubectl apply -f transformed.yaml
107 | ```
108 |
109 | 1. List pods in the `default` namespace
110 |
111 | ```shell
112 | kubectl get pod
113 | ```
114 |
115 | Once the pod reaches `Running` state, note the `READY` column in the output displays 2 out of 2 containers:
116 |
117 | ```console
118 | NAME READY STATUS RESTARTS AGE
119 | mywebserver 2/2 Running 0 36s
120 | ```
121 |
122 | ## Study the running processes
123 |
124 | Run the `ps` command from inside the sidecar container, like so:
125 |
126 | ```shell
127 | kubectl exec mywebserver -c istio-proxy -- ps -ef
128 | ```
129 |
130 | Here is the output, slightly cleaned up, showing both the `pilot-agent` process, and the `envoy` process that it bootstrapped:
131 |
132 | ```console
133 | PID PPID CMD
134 | 1 0 /usr/local/bin/pilot-agent proxy sidecar --domain ...
135 | 16 1 /usr/local/bin/envoy -c etc/istio/proxy/envoy-rev.json ...
136 | ```
137 |
138 | We can learn more about the `pilot-agent` command by running `pilot-agent --help` from inside the sidecar container:
139 |
140 | ```shell
141 | kubectl exec mywebserver -c istio-proxy -- pilot-agent --help
142 | ```
143 |
144 | ## Study the `initContainers` specification
145 |
146 | Besides injecting a sidecar container, the transformation operation also adds an [initContainers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/){target=_blank} section.
147 |
148 | Here is the relevant section:
149 |
150 | ```yaml
151 | initContainers:
152 | - name: istio-init
153 | image: docker.io/istio/proxyv2:{{istio.version}}
154 | args:
155 | - istio-iptables
156 | - -p
157 | - "15001"
158 | - -z
159 | - "15006"
160 | - -u
161 | - "1337"
162 | - -m
163 | - REDIRECT
164 | - -i
165 | - '*'
166 | - -x
167 | - ""
168 | - -b
169 | - '*'
170 | - -d
171 | - 15090,15021,15020
172 | - --log_output_level=default:info
173 | ```
174 |
175 | The "initContainer" uses the same image as the sidecar container: `istio/proxyv2`. The difference lies in the command that is run when the Pod initializes.
176 |
177 | Here is the reconstituted command with long-form versions of each option, to clarify the instruction:
178 |
179 | ```shell
180 | pilot-agent istio-iptables \
181 | --envoy-port "15001" \
182 | --inbound-capture-port "15006" \
183 | --proxy-uid "1337" \
184 | --istio-inbound-interception-mode REDIRECT \
185 | --istio-service-cidr '*' \
186 | --istio-service-exclude-cidr "" \
187 | --istio-inbound-ports '*' \
188 | --istio-local-exclude-ports 15090,15021,15020 \
189 | --log_output_level=default:info
190 | ```
191 |
192 | ???+ tip
193 |
194 | For a full description of the `istio-iptables` subcommand and its options, run:
195 |
196 | ```shell
197 | kubectl exec mywebserver -c istio-proxy -- pilot-agent istio-iptables --help
198 | ```
199 |
200 | The gist of the command is that, through `iptables` rules, the routing of network packets inside the Pod is reconfigured to give Envoy the chance to intercept and proxy inbound and outbound traffic.
201 |
202 | We need not concern ourselves with the specific port numbers, exclusions, and other low-level details at this time.
203 |
204 | The lesson of this exercise is to learn how to get at these details.
205 |
206 | ## Going forward..
207 |
208 | The above process of transforming a deployment manifest on its way to the Kube API Server is streamlined when using automatic sidecar injection.
209 |
210 | The next lab will walk you through how automatic sidecar injection is accomplished.
211 |
212 | From here on, we will use automatic sidecar injection when deploying workloads to the mesh.
213 |
214 | ## Cleanup
215 |
216 | ```shell
217 | kubectl delete -f transformed.yaml
218 | ```
219 |
--------------------------------------------------------------------------------
/docs/summary.md:
--------------------------------------------------------------------------------
1 | # Congratulations
2 |
3 | Well-done on making it all the way to the end of the Istio 0 to 60 workshop!
4 |
5 | In this workshop, you have covered a lot of ground!
6 |
7 | Let's summarize. You have:
8 |
9 | - [x] Installed Istio
10 | - [x] Deployed an application
11 | - [x] Exposed the application to the internet (Ingress)
12 | - [x] Deployed and studied observability addons including Kiali, Zipkin, Prometheus, and Grafana
13 | - [x] Studied facets of service mesh security including mutual TLS and authorization policies
14 | - [x] Performed a traffic shifting exercise
15 | - [x] Circuit breakers
16 |
17 | Istio has many more features whose scope is beyond the 0 to 60 workshop, including but not limited to:
18 |
19 | - [ ] Fault injection
20 | - [ ] Extensibility with WASM
21 | - [ ] Egress gateways
22 | - [ ] Onboarding VM Workloads
23 | - [ ] Istio [deployment models](https://istio.io/latest/docs/ops/deployment/deployment-models/){target=_blank}
24 |
25 | We encourage you to dig deeper into the [Istio docs](https://istio.io/latest/docs/){target=_blank} yourself.
26 |
27 | You might also be interested in the free courses offered at the [Tetrate Academy](https://academy.tetrate.io/){target=\_blank}, including _Istio Fundamentals_, and _Envoy Fundamentals_.
28 |
29 | If you're interested in certification, check out the [Istio Certified Associate](https://training.linuxfoundation.org/certification/istio-certified-associate-ica/){target=_blank} exam.
30 |
31 | Finally, if you're looking for a supported, tested, and secure Istio to run in production, check out the [Tetrate Istio Subscription](https://docs.tetrate.io/istio-subscription){target=_blank}.
32 |
33 | Thanks!
34 |
--------------------------------------------------------------------------------
/docs/the-app.md:
--------------------------------------------------------------------------------
1 | # The application
2 |
3 | In this lab you will deploy an application to your mesh.
4 |
5 | - The application consists of two microservices, `web-frontend` and `customers`.
6 |
7 | !!! note "Aside"
8 |
9 | The official Istio docs canonical example is the [BookInfo application](https://istio.io/latest/docs/examples/bookinfo/){target=_blank}.
10 |
11 | For this workshop, we felt that an application involving fewer microservices would be more clear.
12 |
13 | - The `customers` service exposes a REST endpoint that returns a list of customers in JSON format. The `web-frontend` calls `customers` to retrieve the list, which it uses to render to HTML.
14 |
15 | - The respective Docker images for these services have already been built and pushed to a Docker registry.
16 |
17 | - You will deploy the application to the `default` Kubernetes namespace.
18 |
19 | But before proceeding, we must enable sidecar injection.
20 |
21 | ## Enable automatic sidecar injection
22 |
23 | There are two options for [sidecar injection](https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/){target=_blank}: automatic and manual.
24 |
25 | In this lab we will use automatic injection, which involves labeling the namespace where the pods are to reside.
26 |
27 | 1. Label the default namespace
28 |
29 | ```{.shell .language-shell}
30 | kubectl label namespace default istio-injection=enabled
31 | ```
32 |
33 | 1. Verify that the label has been applied:
34 |
35 | ```{.shell .language-shell}
36 | kubectl get ns -Listio-injection
37 | ```
38 |
39 | ## Deploy the application
40 |
41 | 1. Study the two Kubernetes yaml files: `web-frontend.yaml` and `customers.yaml`.
42 |
43 | ??? tldr "web-frontend.yaml"
44 | ```yaml linenums="1"
45 | --8<-- "the-app/web-frontend.yaml"
46 | ```
47 |
48 | ??? tldr "customers.yaml"
49 | ```yaml linenums="1"
50 | --8<-- "the-app/customers.yaml"
51 | ```
52 |
53 | Each file defines its corresponding deployment, service account, and ClusterIP service.
54 |
55 | 1. Apply the two files to your Kubernetes cluster.
56 |
57 | ```{.shell .language-shell}
58 | kubectl apply -f customers.yaml
59 | ```
60 |
61 | ```{.shell .language-shell}
62 | kubectl apply -f web-frontend.yaml
63 | ```
64 |
65 | Confirm that:
66 |
67 | - Two pods are running, one for each service
68 | - Each pod consists of two containers, one running the service image, the other runs the Envoy sidecar
69 |
70 | ```{.shell .language-shell}
71 | kubectl get pod
72 | ```
73 |
74 | !!! question "How did each pod end up with two containers?"
75 |
76 | Istio installs a Kubernetes object known as a [mutating webhook admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/){ target=_blank }: logic that intercepts Kubernetes object creation requests and that has the permission to alter (mutate) what ends up stored in etcd (the pod spec).
77 |
78 | You can list the mutating webhooks in your Kubernetes cluster and confirm that the sidecar injector is present.
79 |
80 | ```{.shell .language-shell}
81 | kubectl get mutatingwebhookconfigurations
82 | ```
83 |
84 | ## Verify access to each service
85 |
86 | We wish to deploy a pod that runs a `curl` image so we can verify that each service is reachable from within the cluster.
87 | The Istio distribution provides a sample app called `sleep` that will serve this purpose.
88 |
89 | 1. Deploy `sleep` to the default namespace.
90 |
91 | ??? tldr "sleep.yaml"
92 | ```yaml linenums="1"
93 | --8<-- "the-app/sleep.yaml"
94 | ```
95 |
96 | ```{.shell .language-shell}
97 | kubectl apply -f sleep.yaml
98 | ```
99 |
100 | 1. Use the `kubectl exec` command to call the `customers` service.
101 |
102 | ```{.shell .language-shell}
103 | kubectl exec deploy/sleep -- curl -s customers | jq
104 | ```
105 |
106 | The console output should show a list of customers in JSON format.
107 |
108 | 1. Call the `web-frontend` service
109 |
110 | ```{.shell .language-shell}
111 | kubectl exec deploy/sleep -- curl -s web-frontend | head
112 | ```
113 |
114 | The console output should show the start of an HTML page listing customers in an HTML table.
115 |
116 | The application is now deployed and functioning.
117 |
118 | ## Next
119 |
120 | In the next lab, we expose the `web-frontend` using an Istio Ingress Gateway. This will allow us to access this application on the web.
121 |
122 | Alternatively, you have the option of exploring the future [Kubernetes API Gateway](https://gateway-api.sigs.k8s.io/){target=_blank} version of the Ingress lab.
123 |
124 |
--------------------------------------------------------------------------------
/docs/traffic-shifting-gwapi.md:
--------------------------------------------------------------------------------
1 | # Traffic shifting with the Gateway API
2 |
3 | Version 2 of the customers service has been developed, and it's time to deploy it to production.
4 | Whereas version 1 returned a list of customer names, version 2 also includes each customer's city.
5 |
6 | ## Deploying customers, v2
7 |
8 | We wish to deploy the new service but aren't yet ready to direct traffic to it.
9 |
10 | It would be prudent to separate the task of deploying the new service from the task of directing traffic to it.
11 |
12 | ### Labels
13 |
14 | The customers service is labeled with `app=customers`.
15 |
16 | Verify this with:
17 |
18 | ```{.shell .language-shell}
19 | kubectl get pod -Lapp,version
20 | ```
21 |
22 | Note the selector on the `customers` service in the output to the following command:
23 |
24 | ```{.shell .language-shell}
25 | kubectl get svc customers -o wide
26 | ```
27 |
28 | If we were to just deploy v2, the selector would match both versions.
29 |
30 | ### Version-specific services
31 |
32 | Istio has the concept of a subset, defined through a `DestinationRule` resource.
33 | The Kubernetes Gateway API does not have that concept.
34 | Instead, one can define a subset of a service's endpoints by simply defining another service with a more specific selector.
35 |
36 | So, for the `customers` service, we can define two subsets by using the `version` label as a discriminator, like this:
37 |
38 | ```yaml linenums="1" title="customers-subsets.yaml" hl_lines="5 9 18 22"
39 | --8<-- "traffic-shifting/customers-subsets.yaml"
40 | ```
41 |
42 | 1. Apply the above resources to the cluster.
43 |
44 | 1. Verify that two new services `customers-v1` and `customers-v2` are now defined and present:
45 |
46 | ```{.shell .language-shell}
47 | kubectl get svc -o wide
48 | ```
49 |
50 | ### HttpRoutes
51 |
52 | Armed with two distinct destinations, the `HttpRoute` custom resource allows us to define a routing rule that sends all traffic to the `customers-v1` Service.
53 |
54 | ```yaml linenums="1" title="customers-route.yaml"
55 | --8<-- "traffic-shifting/customers-route.yaml"
56 | ```
57 |
58 | Above, note how the targeted "backend ref" is the service `customers-v1`.
59 |
60 | 1. Apply the HttpRoute to the cluster.
61 |
62 | 1. Verify that it's been applied.
63 |
64 | ```{.shell .language-shell}
65 | kubectl get httproute
66 | ```
67 |
68 | ### Finally deploy customers, v2
69 |
70 | Apply the following Kubernetes deployment to the cluster.
71 |
72 | ??? tldr "customers-v2.yaml"
73 | ```yaml linenums="1"
74 | --8<-- "traffic-shifting/customers-v2.yaml"
75 | ```
76 |
77 | ### Check that traffic routes strictly to v1
78 |
79 | 1. Generate some traffic.
80 |
81 | ```{.shell .language-shell}
82 | while true; do curl -I http://$GATEWAY_IP/; sleep 0.5; done
83 | ```
84 |
85 | 1. Open a separate terminal and launch the Kiali dashboard
86 |
87 | ```{.shell .language-shell}
88 | istioctl dashboard kiali
89 | ```
90 |
91 | Take a look at the graph, and select the `default` namespace.
92 |
93 | The graph should show all traffic going to v1.
94 |
95 | ## Route to customers, v2
96 |
97 | We wish to proceed with caution. Before customers can see version 2, we want to make sure that the service functions properly.
98 |
99 | ## Expose "debug" traffic to v2
100 |
101 | Review this proposed updated routing specification.
102 |
103 | ```yaml linenums="1" title="customers-route-debug.yaml"
104 | --8<-- "traffic-shifting/customers-route-debug.yaml"
105 | ```
106 |
107 | We are telling Istio to check an HTTP header: if the `user-agent` is set to `debug`, route to v2, otherwise route to v1.
108 |
109 | Open a new terminal and apply the above resource to the cluster; it will overwrite the currently defined VirtualService as both yaml files use the same resource name.
110 |
111 | ```{.shell .language-shell}
112 | kubectl apply -f customers-route-debug.yaml
113 | ```
114 |
115 | ### Test it
116 |
117 | Open a browser and visit the application.
118 |
119 | ??? tip "If you need it"
120 |
121 | ```{.shell .language-shell}
122 | GATEWAY_IP=$(kubectl get svc -n istio-system istio-ingressgateway -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
123 | ```
124 |
125 | We can tell v1 and v2 apart in that v2 displays not only customer names but also their city (in two columns).
126 |
127 | The `user-agent` header can be included in a request in a number of ways:
128 |
129 | === "Developer Tools"
130 |
131 | If you're using Chrome or Firefox, you can customize the `user-agent` header as follows:
132 |
133 | 1. Open the browser's developer tools
134 | 2. Open the "three dots" menu, and select _More tools --> Network conditions_
135 | 3. The network conditions panel will open
136 | 4. Under _User agent_, uncheck _Use browser default_
137 | 5. Select _Custom..._ and in the text field enter `debug`
138 |
139 | Refresh the page; traffic should be directed to v2.
140 |
141 | === "`curl`"
142 |
143 | ```{.shell .language-shell}
144 | curl -H "user-agent: debug" http://$GATEWAY_IP
145 | ```
146 |
147 | === "Using a custom browser extension"
148 |
149 | Check out [modheader](https://modheader.com/){target=_blank}, a convenient browser extension for modifying HTTP headers in-browser.
150 |
151 |
152 | !!! tip
153 |
154 | If you refresh the page a good dozen times and then wait ~15-30 seconds, you should see some of that v2 traffic appear in Kiali.
155 |
156 |
157 | ## Canary
158 |
159 | Well, v2 looks good; we decide to expose the new version to the public, but we're still prudent.
160 |
161 | Start by siphoning 10% of traffic over to v2.
162 |
163 | ```yaml linenums="1" title="customers-route-canary.yaml"
164 | --8<-- "traffic-shifting/customers-route-canary.yaml"
165 | ```
166 |
167 | Above, note the `weight` field specifying 10 percent of traffic to subset `v2`.
168 | Kiali should now show traffic going to both v1 and v2.
169 |
170 | - Apply the above resource.
171 | - In your browser: undo the injection of the `user-agent` header, and refresh the page a bunch of times.
172 |
173 | In Kiali, under the _Display_ pulldown menu, you can turn on traffic distribution, to see how much traffic is sent to each subset.
174 |
175 | Most of the requests still go to v1, but some (10%) are directed to v2.
176 |
177 |
178 | ## Check Grafana
179 |
180 | Before we open the floodgates, we wish to determine how v2 is faring.
181 |
182 | ```{.shell .language-shell}
183 | istioctl dashboard grafana
184 | ```
185 |
186 | In Grafana, visit the Istio Workload Dashboard and specifically look at the customers v2 workload.
187 | Look at the request rate and the incoming success rate, also the latencies.
188 |
189 | If all looks good, up the percentage from 90/10 to, say 50/50.
190 |
191 | Watch the request volume change (you may need to click on the "refresh dashboard" button in the upper right-hand corner).
192 |
193 | Finally, switch all traffic over to v2.
194 |
195 | ```yaml linenums="1" title="customers-route-final.yaml"
196 | --8<-- "traffic-shifting/customers-route-final.yaml"
197 | ```
198 |
199 | After you apply the above yaml, go to your browser and make sure all requests land on v2 (2-column output).
200 | Within a minute or so, the Kiali dashboard should also reflect the fact that all traffic is going to the customers v2 service.
201 |
202 | Though it no longer receives any traffic, we decide to leave v1 running a while longer before retiring it.
203 |
204 | ## Going further
205 |
206 | Investigate [Flagger](https://flagger.app/){target=_blank}, an Istio-compatible tool that can be used to automate the process of progressive delivery (aka Canary rollouts). [Here](https://github.com/eitansuez/istio-flagger) is an exploration of Flagger with Istio and its `bookinfo` sample application.
207 |
208 | ## Cleanup
209 |
210 | After completing this lab, reset your application to its initial state:
211 |
212 | 1. Delete the `customers` virtual service:
213 |
214 | ```shell
215 | kubectl delete httproute customers
216 | ```
217 |
218 | 1. Delete the destination rule for the customers service:
219 |
220 | ```shell
221 | kubectl delete service customers-v1
222 | kubectl delete service customers-v2
223 | ```
224 |
225 | 1. Delete the `customer-v2` deployment:
226 |
227 | ```shell
228 | kubectl delete deploy customers-v2
229 | ```
230 |
--------------------------------------------------------------------------------
/docs/traffic-shifting.md:
--------------------------------------------------------------------------------
1 | # Traffic shifting
2 |
3 | Version 2 of the customers service has been developed, and it's time to deploy it to production.
4 | Whereas version 1 returned a list of customer names, version 2 also includes each customer's city.
5 |
6 | ## Deploying customers, v2
7 |
8 | We wish to deploy the new service but aren't yet ready to direct traffic to it.
9 |
10 | It would be prudent to separate the task of deploying the new service from the task of directing traffic to it.
11 |
12 | ### Labels
13 |
14 | The customers service is labeled with `app=customers`.
15 |
16 | Verify this with:
17 |
18 | ```{.shell .language-shell}
19 | kubectl get pod -Lapp,version
20 | ```
21 |
22 | Note the selector on the `customers` service in the output to the following command:
23 |
24 | ```{.shell .language-shell}
25 | kubectl get svc customers -o wide
26 | ```
27 |
28 | If we were to just deploy v2, the selector would match both versions.
29 |
30 | ### DestinationRules
31 |
32 | We can inform Istio that two distinct subsets of the `customers` service exist, and we can use the `version` label as the discriminator.
33 |
34 | ```yaml linenums="1" title="customers-destinationrule.yaml"
35 | --8<-- "traffic-shifting/customers-destinationrule.yaml"
36 | ```
37 |
38 | 1. Apply the above destination rule to the cluster.
39 |
40 | 1. Verify that it's been applied.
41 |
42 | ```{.shell .language-shell}
43 | kubectl get destinationrule
44 | ```
45 |
46 | It's also worthwhile to invoke the `istioctl x describe` command on the `customers` service:
47 |
48 | ```shell
49 | istioctl x describe svc customers
50 | ```
51 |
52 | Notice how the output references the newly-created subsets v1 and v2.
53 |
54 | ### VirtualServices
55 |
56 | Armed with two distinct destinations, the `VirtualService` Custom Resource allows us to define a routing rule that sends all traffic to the v1 subset.
57 |
58 | ```yaml linenums="1" title="customers-virtualservice.yaml"
59 | --8<-- "traffic-shifting/customers-virtualservice.yaml"
60 | ```
61 |
62 | Above, note how the route specifies subset v1.
63 |
64 | 1. Apply the virtual service to the cluster.
65 |
66 | 1. Verify that it's been applied.
67 |
68 | ```{.shell .language-shell}
69 | kubectl get virtualservice
70 | ```
71 |
72 | We can now safely proceed to deploy v2, without having to worry about the new workload receiving traffic.
73 |
74 | ### Finally deploy customers, v2
75 |
76 | Apply the following Kubernetes deployment to the cluster.
77 |
78 | ??? tldr "customers-v2.yaml"
79 | ```yaml linenums="1"
80 | --8<-- "traffic-shifting/customers-v2.yaml"
81 | ```
82 |
83 | ### Check that traffic routes strictly to v1
84 |
85 | 1. Generate some traffic.
86 |
87 | ```{.shell .language-shell}
88 | while true; do curl -I http://$GATEWAY_IP/; sleep 0.5; done
89 | ```
90 |
91 | 1. Open a separate terminal and launch the Kiali dashboard
92 |
93 | ```{.shell .language-shell}
94 | istioctl dashboard kiali
95 | ```
96 |
97 | Take a look at the graph, and select the `default` namespace.
98 |
99 | The graph should show all traffic going to v1.
100 |
101 | ## Route to customers, v2
102 |
103 | We wish to proceed with caution. Before customers can see version 2, we want to make sure that the service functions properly.
104 |
105 | ## Expose "debug" traffic to v2
106 |
107 | Review this proposed updated routing specification.
108 |
109 | ```yaml linenums="1" title="customers-vs-debug.yaml"
110 | --8<-- "traffic-shifting/customers-vs-debug.yaml"
111 | ```
112 |
113 | We are telling Istio to check an HTTP header: if the `user-agent` is set to `debug`, route to v2, otherwise route to v1.
114 |
115 | Open a new terminal and apply the above resource to the cluster; it will overwrite the currently defined VirtualService as both yaml files use the same resource name.
116 |
117 | ```{.shell .language-shell}
118 | kubectl apply -f customers-vs-debug.yaml
119 | ```
120 |
121 | ### Test it
122 |
123 | Open a browser and visit the application.
124 |
125 | ??? tip "If you need it"
126 |
127 | ```{.shell .language-shell}
128 | GATEWAY_IP=$(kubectl get svc -n istio-system istio-ingressgateway -ojsonpath='{.status.loadBalancer.ingress[0].ip}')
129 | ```
130 |
131 | We can tell v1 and v2 apart in that v2 displays not only customer names but also their city (in two columns).
132 |
133 | The `user-agent` header can be included in a request in a number of ways:
134 |
135 | === "Developer Tools"
136 |
137 | If you're using Chrome or Firefox, you can customize the `user-agent` header as follows:
138 |
139 | 1. Open the browser's developer tools
140 | 2. Open the "three dots" menu, and select _More tools --> Network conditions_
141 | 3. The network conditions panel will open
142 | 4. Under _User agent_, uncheck _Use browser default_
143 | 5. Select _Custom..._ and in the text field enter `debug`
144 |
145 | Refresh the page; traffic should be directed to v2.
146 |
147 | ===+ "`curl`"
148 |
149 | ```{.shell .language-shell}
150 | curl -H "user-agent: debug" http://$GATEWAY_IP
151 | ```
152 |
153 | === "Using a custom browser extension"
154 |
155 | Check out [modheader](https://modheader.com/){target=_blank}, a convenient browser extension for modifying HTTP headers in-browser.
156 |
157 |
158 | !!! tip
159 |
160 | If you refresh the page a good dozen times and then wait ~15-30 seconds, you should see some of that v2 traffic appear in Kiali.
161 |
162 |
163 | ## Canary
164 |
165 | Well, v2 looks good; we decide to expose the new version to the public, but we're still prudent.
166 |
167 | Start by siphoning 10% of traffic over to v2.
168 |
169 | ```yaml linenums="1" title="customers-vs-canary.yaml"
170 | --8<-- "traffic-shifting/customers-vs-canary.yaml"
171 | ```
172 |
173 | Above, note the `weight` field specifying 10 percent of traffic to subset `v2`.
174 | Kiali should now show traffic going to both v1 and v2.
175 |
176 | - Apply the above resource.
177 | - In your browser: undo the injection of the `user-agent` header, and refresh the page a bunch of times.
178 |
179 | In Kiali, under the _Display_ pulldown menu, you can turn on "Traffic Distribution", to view the relative percentage of traffic sent to each subset.
180 |
181 | Most of the requests still go to v1, but some (10%) are directed to v2.
182 |
183 |
184 | ## Check Grafana
185 |
186 | Before we open the floodgates, we wish to determine how v2 is faring.
187 |
188 | ```{.shell .language-shell}
189 | istioctl dashboard grafana
190 | ```
191 |
192 | In Grafana, visit the Istio Workload Dashboard and specifically look at the customers v2 workload.
193 | Look at the request rate and the incoming success rate, also the latencies.
194 |
195 | If all looks good, up the percentage from 90/10 to, say 50/50.
196 |
197 | Watch the request volume change (you may need to click on the "refresh dashboard" button in the upper right-hand corner).
198 |
199 | Finally, switch all traffic over to v2.
200 |
201 | ```yaml linenums="1" title="customers-virtualservice-final.yaml"
202 | --8<-- "traffic-shifting/customers-virtualservice-final.yaml"
203 | ```
204 |
205 | After applying the above resource, go to your browser and make sure all requests land on v2 (two-column output).
206 | Within a minute or so, the Kiali dashboard should also reflect the fact that all traffic is going to the customers v2 service.
207 |
208 | Though it no longer receives any traffic, we decide to leave v1 running a while longer before retiring it.
209 |
210 | ## Going further
211 |
212 | Investigate [Flagger](https://flagger.app/){target=_blank}, an Istio-compatible tool that can be used to automate the process of progressive delivery (aka Canary rollouts). [Here](https://github.com/eitansuez/istio-flagger){target=_blank} is an exploration of Flagger with Istio and its `bookinfo` sample application.
213 |
214 | ## Cleanup
215 |
216 | After completing this lab, reset your application to its initial state:
217 |
218 | 1. Delete the `customers` virtual service:
219 |
220 | ```shell
221 | kubectl delete virtualservice customers
222 | ```
223 |
224 | 1. Delete the destination rule for the customers service:
225 |
226 | ```shell
227 | kubectl delete destinationrule customers
228 | ```
229 |
230 | 1. Delete the `customer-v2` deployment:
231 |
232 | ```shell
233 | kubectl delete deploy customers-v2
234 | ```
235 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Istio 0 to 60 Labs
2 | repo_url: https://github.com/tetratelabs/istio-0to60
3 | theme:
4 | name: material
5 | custom_dir: overrides
6 | logo_light_mode: assets/tetrate-logo-white.png
7 | logo_dark_mode: assets/tetrate-logo-black.png
8 | palette:
9 | - scheme: default
10 | toggle:
11 | icon: material/toggle-switch-off-outline
12 | name: Switch to dark mode
13 | - scheme: slate
14 | toggle:
15 | icon: material/toggle-switch
16 | name: Switch to light mode
17 | features:
18 | - navigation.top
19 | - navigation.instant
20 | - navigation.tracking
21 | - navigation.footer
22 | - content.code.copy
23 | # - navigation.tabs
24 | extra_css:
25 | - css/custom.css
26 | nav:
27 | - environment.md
28 | - install.md
29 | - sidecar-injection.md
30 | - discovery.md
31 | - the-app.md
32 | - ingress.md
33 | - ingress-gwapi.md
34 | - dashboards.md
35 | - security.md
36 | - traffic-shifting.md
37 | - traffic-shifting-gwapi.md
38 | - circuit-breakers.md
39 | - Summary: summary.md
40 | markdown_extensions:
41 | - admonition
42 | - pymdownx.details
43 | - pymdownx.superfences
44 | - attr_list
45 | - pymdownx.highlight:
46 | anchor_linenums: true
47 | use_pygments: true
48 | - pymdownx.inlinehilite
49 | - pymdownx.snippets:
50 | base_path: artifacts
51 | check_paths: true
52 | - pymdownx.tabbed:
53 | alternate_style: true
54 | - pymdownx.caret
55 | - pymdownx.mark
56 | - pymdownx.tilde
57 | - pymdownx.keys
58 | - pymdownx.emoji:
59 | emoji_index: !!python/name:material.extensions.emoji.twemoji
60 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
61 | - pymdownx.tasklist:
62 | custom_checkbox: true
63 | - pymdownx.smartsymbols
64 | - toc:
65 | permalink: true
66 | plugins:
67 | - search
68 | # https://mkdocs-macros-plugin.readthedocs.io/en/latest/
69 | - macros
70 | extra:
71 | istio:
72 | version: 1.23.0
73 |
--------------------------------------------------------------------------------
/overrides/partials/logo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------