├── .gitignore
├── 01-intro
├── README.md
├── cors.yaml
├── diagram.drawio
├── diagram.svg
├── intro.odp
├── intro.pdf
├── response-header.yaml
├── server.go
├── server_tcp.go
├── simple.yaml
├── simple_fault.yaml
└── simple_tcp.yaml
├── 02-observe
├── README.md
├── accesslogs.yaml
├── jaeger.yaml
├── libjaegertracing.so.0.4.2
├── libjaegertracing_plugin.linux_amd64.so
├── observe.odp
├── observe.pdf
├── prometheus.yml
├── rl
│ ├── .vscode
│ │ └── launch.json
│ ├── go.mod
│ ├── go.sum
│ ├── rl-demo
│ └── rl.go
├── server.go
├── simple.yaml
└── stats.yaml
├── 03-security
├── README.md
├── cert.pem
├── deployment.yaml
├── edge.yaml
├── example_com_cert.pem
├── example_com_key.pem
├── key.pem
├── makecerts.sh
├── securing.odp
├── securing.pdf
├── server.go
├── simple.yaml
└── svcaccnt.yaml
├── 04-xds
├── README.md
├── server.go
├── slides.odp
├── slides.pdf
├── xds.yaml
└── xds
│ ├── go.mod
│ ├── go.sum
│ └── xds.go
├── 05-filters
├── README.md
├── slides.odp
└── slides.pdf
├── 06-wasmfilters
├── slides.odp
└── slides.pdf
├── 07-intro-to-opa
├── README.md
├── basic-data.json
├── data.json
├── input.json
├── query.rego
└── query_test.rego
├── 08-opa-envoy
├── README.md
├── diagram.drawio
├── diagram.svg
├── envoy.yaml
├── opa-config.yaml
├── policy.rego
└── slide.odp
├── 09-gitops
├── README.md
├── slides.odp
└── slides.pdf
├── 10-waypoint
├── README.md
├── slides.odp
└── slides.pdf
├── 11-envoyfilter
├── Extending Istio with the EnvoyFilter CRD.odp
├── Extending Istio with the EnvoyFilter CRD.pdf
├── README.md
├── slides.odp
└── slides.pdf
├── 12-hitless-delpoy
├── README.md
├── envoy.yaml
├── slides.odp
├── slides.pdf
└── xds
│ ├── go.mod
│ ├── go.sum
│ └── xds.go
├── 14-istio-debugging
├── README.md
├── destination-rule-all.yaml
├── setup.sh
├── slides.odp
├── slides.pdf
└── virtual-service-all-v1.yaml
├── 15-envoy-external-services
├── README.md
├── envoy.yaml
├── server
│ ├── go.mod
│ ├── go.sum
│ └── server.go
├── slides.odp
└── slides.pdf
├── 17-ratelimit
├── README.md
├── envoy.yaml
├── go.mod
├── go.sum
├── rlconfig
│ └── config
│ │ └── rl.yaml
└── slides.odp
├── 18-istio-envoy-filter
├── README.md
├── envoyfilter.yaml
├── setup.sh
├── slides.odp
├── slides.pdf
└── srvconfig.yaml
├── 19-ebpf-top-down
├── README.md
├── ebpf-slides.pdf
└── probe.c
├── 20-one-click-istio-install-helm
├── 1-click-helm-slides.pdf
├── README.md
├── SHOWNOTES.md
├── helmfile.yaml
├── images
│ ├── dashboard.png
│ └── thumbnail.png
├── istioperformance.json
└── values
│ └── kube-prometheus-stack
│ └── values.yaml
├── 21-istio-in-action-book
├── Optimizing-the-control-plane-performance.pdf
├── README.md
└── SHOWNOTES.md
├── 22-ebpf-merbridge-istio
├── README.md
├── SHOWNOTES.md
├── merbridge.jpg
└── merbridge.pdf
├── 23-app-resiliency-envoy
├── README.md
└── SHOWNOTES.md
├── 24-debug-envoy-config-access-logs
├── README.md
└── SHOWNOTES.md
├── 25-istio-spire-integration
├── README.md
├── SHOWNOTES.md
├── demo
│ ├── bookinfo
│ │ ├── bookinfo.yaml
│ │ ├── cleanup-bookinfo
│ │ └── gateway.yaml
│ ├── cleanup-all
│ ├── create-registration-entries
│ ├── create-registration-entry-details
│ ├── delete-registration-entry
│ ├── deploy-bookinfo
│ ├── deploy-istio
│ ├── deploy-spire
│ ├── download-istioctl
│ ├── forward-port
│ ├── istio
│ │ ├── auth.yaml
│ │ ├── cleanup-istio
│ │ └── istio-config.yaml
│ ├── kill-forward-process
│ ├── show-registration-entries
│ ├── show-spire-cluster-id
│ └── spire
│ │ ├── agent-account.yaml
│ │ ├── agent-cluster-role.yaml
│ │ ├── agent-configmap.yaml
│ │ ├── agent-daemonset.yaml
│ │ ├── cleanup-spire
│ │ ├── server-account.yaml
│ │ ├── server-cluster-role.yaml
│ │ ├── server-configmap.yaml
│ │ ├── server-service.yaml
│ │ ├── server-statefulset.yaml
│ │ ├── spiffe-csi-driver.yaml
│ │ └── spire-bundle-configmap.yaml
└── slides
│ └── istio-spire-slides.pdf
├── 26-cluster-api-k8s
├── README.md
├── SHOWNOTES.md
└── hoot-capi.pdf
├── 27-gloo-cilium-and-istio
├── README.md
└── SHOWNOTES.md
├── 28-what-is-new-istio-1.14
├── README.md
├── SHOWNOTES.md
└── demo
│ └── kube-prometheus-stack
│ ├── helmfile.yaml
│ └── values.yaml
├── 29-port-ebpf-app-to-bumblebee
├── README.md
└── SHOWNOTES.md
├── 30-https-envoy-explained
├── README.md
└── SHOWNOTES.md
├── 31-cilium-istio-l7-policies
├── README.md
├── SHOWNOTES.md
├── cilium-policy-l4.yaml
├── cilium-policy-l7.yaml
└── samples
│ ├── helloworld-with-affinity.yaml
│ ├── helloworld.yaml
│ ├── notsleep.yaml
│ └── sleep.yaml
├── 32-graphql-dev-ops
├── README.md
└── SHOWNOTES.md
├── 33-vcluster-istio
├── README.md
└── SHOWNOTES.md
├── 41-what-is-new-istio-1.16
├── README.md
├── SHOWNOTES.md
└── demo
│ └── wasm
│ ├── Cargo.lock
│ ├── Cargo.toml
│ ├── Dockerfile
│ ├── LICENSE
│ ├── Makefile
│ ├── README.md
│ ├── build.sh
│ ├── src
│ └── lib.rs
│ ├── wasmplugin-broken.yaml
│ └── wasmplugin.yaml
├── 44-overview-of-spire
├── README.md
└── demo
│ ├── clusterspiffeid.yaml
│ ├── crds.yaml
│ ├── csidriver.yaml
│ ├── deploy-prereqs.sh
│ ├── istio-spire-config.yaml
│ ├── sleep-spire.yaml
│ ├── spire-agent.yaml
│ ├── spire-controller-manager-config.yaml
│ ├── spire-controller-manager-webhook.yaml
│ └── spire-server.yaml
├── 47-certificates-in-istio
└── README.md
├── 49-serviceentries
└── README.md
├── 50-kube-networking-cilium
├── README.md
└── files
│ ├── kind-iptables.yaml
│ ├── kind-ipvs.yaml
│ └── kind-nocni.yaml
├── 51-kube-networking-cilium-2
├── README.md
└── files
│ ├── cilium-values.yaml
│ ├── kind-nocni.yaml
│ └── sleep.yaml
├── 52-opa
├── README.md
└── files
│ ├── authz
│ ├── data.json
│ └── policy.rego
│ ├── opa-bundle.yaml
│ ├── opa-cm.yaml
│ ├── tests
│ ├── policy.rego
│ └── policy_test.rego
│ └── tokens.md
├── 53-ambient-cut-sm-cost
└── SHOWNOTES.md
├── 57-whats-new-in-istio-1-19
└── README.md
├── 58-bgpandcilium
└── README.md
├── 67-envoy-extproc
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── envoy.yaml
├── go.mod
├── go.sum
└── pkg
│ ├── httptarget
│ ├── handler.go
│ └── main.go
│ └── processor
│ ├── main.go
│ └── service.go
├── README.md
└── images
└── hoot-background.png
/.gitignore:
--------------------------------------------------------------------------------
1 | 09-gitops/gitserver/gitserver
2 | dump.rdb
3 |
4 |
5 | .idea/
--------------------------------------------------------------------------------
/01-intro/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Diagrams
3 | ```
4 | +-------------------+ +----------+ +----------------------------+ +------------------+
5 | | | | | | | | |
6 | | downstream client +--------->+ listener +---------->+ filters (routing decision) +---------->+ upstream cluster |
7 | | | | | | | | |
8 | +-------------------+ +----------+ +----------------------------+ +------------------+
9 |
10 | How is routing decision done?
11 | +-------------+ +------------+ +--------------+ +---------------+ +----------------+
12 | | | | | | | | | | |
13 | | TCP filters +-------->+ HCM filter +--------->+ http filters +---------->+ router filter +--------->+ host selection |
14 | | | | | | | | | | |
15 | +-------------+ +------------+ +--------------+ +---------------+ +----------------+
16 | ```
17 | ## Simple config
18 |
19 | run the following:
20 | ```
21 | fuser -k 8082/tcp
22 | fuser -k 10000/tcp
23 | fuser -k 10004/tcp
24 |
25 | go run server_tcp.go&
26 | envoy -c simple_tcp.yaml&
27 | echo hi | nc localhost 10000
28 | ```
29 |
30 | ## Http simple config:
31 | ```
32 | fuser -k 8082/tcp
33 | fuser -k 10000/tcp
34 | fuser -k 10004/tcp
35 |
36 | go run server.go&
37 | envoy -c simple.yaml&
38 | curl http://localhost:10000 -dhi
39 | ```
40 |
41 | ## Let's use a filter!
42 |
43 |
44 | ### cors
45 |
46 | note: you can provide route level configuration for a filter
47 |
48 | ```
49 | fuser -k 8082/tcp
50 | fuser -k 10000/tcp
51 | fuser -k 10004/tcp
52 |
53 | go run server.go&
54 | envoy -c cors.yaml&
55 | curl -XOPTIONS http://localhost:10000 -H"Origin: solo.io" -v
56 | curl -XOPTIONS http://localhost:10000 -H"Origin: example.com" -v
57 | ```
58 |
59 | ## fault filter
60 |
61 | remember: filter order matters.
62 |
63 | ```
64 | fuser -k 8082/tcp
65 | fuser -k 10000/tcp
66 | fuser -k 10004/tcp
67 |
68 | go run server.go&
69 | envoy -c simple_fault.yaml -l debug&
70 |
71 | for i in $(seq 10); do
72 | curl http://localhost:10000 -s -o /dev/null -w "%{http_code}"
73 | echo
74 | done
75 | ```
76 |
77 | ### header manipulation
78 |
79 | note: some route level configuration is handled by the router filter
80 |
81 | ```
82 | fuser -k 8082/tcp
83 | fuser -k 10000/tcp
84 | fuser -k 10004/tcp
85 |
86 | go run server.go&
87 | envoy -c response-header.yaml&
88 | curl http://localhost:10000 -v
89 | ```
90 |
--------------------------------------------------------------------------------
/01-intro/cors.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: ingress_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | domains: ["*"]
22 | routes:
23 | - match: { prefix: "/" }
24 | route:
25 | cluster: somecluster
26 | cors:
27 | allow_origin_string_match:
28 | - prefix: solo.io
29 | http_filters:
30 | - name: envoy.filters.http.cors
31 | - name: envoy.filters.http.router
32 | clusters:
33 | - name: somecluster
34 | connect_timeout: 0.25s
35 | type: STRICT_DNS
36 | lb_policy: ROUND_ROBIN
37 | hosts: [{ socket_address: { address: 127.0.0.1, port_value: 8082 }}]
38 |
--------------------------------------------------------------------------------
/01-intro/diagram.drawio:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/01-intro/intro.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/01-intro/intro.odp
--------------------------------------------------------------------------------
/01-intro/intro.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/01-intro/intro.pdf
--------------------------------------------------------------------------------
/01-intro/response-header.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: ingress_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | domains: ["*"]
22 | routes:
23 | - match: { prefix: "/" }
24 | route:
25 | cluster: somecluster
26 | response_headers_to_add:
27 | header:
28 | key: x-solo.io
29 | value: it-works!
30 | http_filters:
31 | - name: envoy.filters.http.router
32 | clusters:
33 | - name: somecluster
34 | connect_timeout: 0.25s
35 | type: STRICT_DNS
36 | lb_policy: ROUND_ROBIN
37 | load_assignment:
38 | cluster_name: rate-limit
39 | endpoints:
40 | - lb_endpoints:
41 | - endpoint:
42 | address:
43 | socket_address:
44 | address: 127.0.0.1
45 | port_value: 8082
46 |
47 |
--------------------------------------------------------------------------------
/01-intro/server.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net"
7 | "net/http"
8 | )
9 |
10 | func echoServer(rw http.ResponseWriter, r *http.Request) {
11 | defer r.Body.Close()
12 | io.Copy(rw, r.Body)
13 | }
14 |
15 | func main() {
16 | l, err := net.Listen("tcp", ":8082")
17 | if err != nil {
18 | log.Fatal("listen error:", err)
19 | }
20 |
21 | http.Serve(l, http.HandlerFunc(echoServer))
22 | }
23 |
--------------------------------------------------------------------------------
/01-intro/server_tcp.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net"
7 | )
8 |
9 | func echoServer(conn net.Conn) {
10 | defer conn.Close()
11 | io.Copy(conn, conn)
12 | }
13 |
14 | func main() {
15 | l, err := net.Listen("tcp", ":8082")
16 | if err != nil {
17 | log.Fatal("listen error:", err)
18 | }
19 | for {
20 | conn, err := l.Accept()
21 | if err != nil {
22 | log.Panicln(err)
23 | }
24 |
25 | go echoServer(conn)
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/01-intro/simple.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: ingress_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | domains: ["*"]
22 | routes:
23 | - match: { prefix: "/" }
24 | route: { cluster: somecluster }
25 | http_filters:
26 | - name: envoy.filters.http.router
27 | clusters:
28 | - name: somecluster
29 | connect_timeout: 0.25s
30 | type: STRICT_DNS
31 | lb_policy: ROUND_ROBIN
32 | load_assignment:
33 | cluster_name: somecluster
34 | endpoints:
35 | - lb_endpoints:
36 | - endpoint:
37 | address:
38 | socket_address:
39 | address: 127.0.0.1
40 | port_value: 8082
41 |
42 |
--------------------------------------------------------------------------------
/01-intro/simple_fault.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: ingress_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | domains: ["*"]
22 | routes:
23 | - match: { prefix: "/" }
24 | route: { cluster: somecluster }
25 | http_filters:
26 | - name: envoy.filters.http.fault
27 | typed_config:
28 | "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault
29 | abort:
30 | http_status: 418
31 | percentage:
32 | numerator: 50
33 | denominator: HUNDRED
34 | - name: envoy.filters.http.router
35 | clusters:
36 | - name: somecluster
37 | connect_timeout: 0.25s
38 | type: STRICT_DNS
39 | lb_policy: ROUND_ROBIN
40 | load_assignment:
41 | cluster_name: somecluster
42 | endpoints:
43 | - lb_endpoints:
44 | - endpoint:
45 | address:
46 | socket_address:
47 | address: 127.0.0.1
48 | port_value: 8082
49 |
50 |
--------------------------------------------------------------------------------
/01-intro/simple_tcp.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.tcp_proxy
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy
16 | stat_prefix: ingress_tcp
17 | cluster: somecluster
18 | clusters:
19 | - name: somecluster
20 | connect_timeout: 0.25s
21 | type: STRICT_DNS
22 | lb_policy: ROUND_ROBIN
23 | hosts: [{ socket_address: { address: 127.0.0.1, port_value: 8082 }}]
24 |
--------------------------------------------------------------------------------
/02-observe/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Simple config
3 |
4 | run the following in the background, to generate some traffic:
5 | ```
6 | go run server.go&
7 | (cd rl; go run rl.go)&
8 | while true; do
9 | curl localhost:10000/
10 | curl localhost:10000/foo
11 | sleep 1
12 | done
13 | ```
14 |
15 | # Admin page
16 | ```
17 | envoy -c stats.yaml
18 | ```
19 | http://localhost:9901/
20 |
21 |
22 | # Envoy debug logs
23 |
24 | ```
25 | envoy -c simple.yaml -l debug
26 | ```
27 |
28 | OR
29 |
30 | ```
31 | envoy -c simple.yaml
32 | ```
33 | and:
34 | ```
35 | curl -XPOST "localhost:9901/logging?level=debug"
36 | ```
37 |
38 | Envoy will now output logs in debug level.
39 |
40 | # Access Logs
41 |
42 | ```
43 | envoy -c accesslogs.yaml --file-flush-interval-msec 1
44 | ```
45 |
46 | # Promethues
47 | ```
48 | prometheus --config.file=prometheus.yml --web.listen-address="127.0.0.1:9090" --storage.tsdb.path=$(mktemp -d)
49 | ```
50 | ```
51 | envoy -c stats.yaml
52 | ```
53 |
54 | UI is in:
55 | http://localhost:9090/
56 |
57 | example query:
58 | ```
59 | rate(envoy_listener_http_downstream_rq_xx{envoy_response_code_class="2"}[10s])
60 | ```
61 |
62 | # Jaeger
63 | Distributed tracing the the distributed analogy to a stack trace.
64 | For example, you can see a regular go program stack trace here:
65 | http://localhost:6060/debug/pprof/goroutine?debug=2
66 |
67 | To generate some distributed traces:
68 |
69 |
70 | Run jaeger:
71 | ```
72 | docker run --rm --name jaeger \
73 | -e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
74 | -p 5775:5775/udp \
75 | -p 6831:6831/udp \
76 | -p 6832:6832/udp \
77 | -p 5778:5778 \
78 | -p 16686:16686 \
79 | -p 14268:14268 \
80 | -p 14250:14250 \
81 | -p 9411:9411 \
82 | jaegertracing/all-in-one:1.18
83 | ```
84 |
85 | Run envoy:
86 | ```
87 | envoy -c jaeger.yaml
88 | ```
89 |
90 | Wait a few seconds, for the traffic generating command from the first section to
91 | generate some traffic.
92 |
93 | See the traces in the jaeger UI: http://localhost:16686/
94 |
95 | # Notes:
96 | ## tracing
97 | The jaeger shared object for envoy with libstdc++:
98 | https://github.com/jaegertracing/jaeger-client-cpp/releases/download/v0.4.2/libjaegertracing_plugin.linux_amd64.so
99 |
100 | Or, for libc++ envoy, see here:
101 | https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072
102 | (https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz)
103 |
104 |
105 | ## access logs
106 | more info here:
107 | https://www.envoyproxy.io/docs/envoy/v1.15.0/api-v3/config/accesslog/v3/accesslog.proto#envoy-v3-api-msg-config-accesslog-v3-statuscodefilter
108 | format string:
109 | https://www.envoyproxy.io/docs/envoy/v1.15.0/configuration/observability/access_log/usage#config-access-log-format-strings
--------------------------------------------------------------------------------
/02-observe/accesslogs.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | access_log:
17 | - name: "envoy.access_loggers.file"
18 | filter:
19 | status_code_filter:
20 | comparison:
21 | op: GE
22 | value:
23 | default_value: 400
24 | runtime_key: "filter.request_type"
25 | typed_config:
26 | "@type": "type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog"
27 | path: /dev/stdout
28 | stat_prefix: edge_http
29 | route_config:
30 | name: local_route
31 | virtual_hosts:
32 | - name: namespace.local_service
33 | domains: ["*"]
34 | routes:
35 | - match: { prefix: "/" }
36 | route: { cluster: somecluster }
37 | http_filters:
38 | - name: envoy.filters.http.router
39 | clusters:
40 | - name: somecluster
41 | connect_timeout: 0.25s
42 | type: STRICT_DNS
43 | lb_policy: ROUND_ROBIN
44 | load_assignment:
45 | cluster_name: somecluster
46 | endpoints:
47 | - lb_endpoints:
48 | - endpoint:
49 | address:
50 | socket_address:
51 | address: 127.0.0.1
52 | port_value: 8082
53 |
54 |
--------------------------------------------------------------------------------
/02-observe/libjaegertracing.so.0.4.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/02-observe/libjaegertracing.so.0.4.2
--------------------------------------------------------------------------------
/02-observe/libjaegertracing_plugin.linux_amd64.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/02-observe/libjaegertracing_plugin.linux_amd64.so
--------------------------------------------------------------------------------
/02-observe/observe.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/02-observe/observe.odp
--------------------------------------------------------------------------------
/02-observe/observe.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/02-observe/observe.pdf
--------------------------------------------------------------------------------
/02-observe/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | # How frequently to scrape targets by default.
3 | scrape_interval: 1s
4 |
5 | # How long until a scrape request times out.
6 | scrape_timeout: 1s
7 |
8 | scrape_configs:
9 | - job_name: envoy
10 | metrics_path: /stats/prometheus
11 | static_configs:
12 | - targets:
13 | - 127.0.0.1:9901
--------------------------------------------------------------------------------
/02-observe/rl/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "Launch",
9 | "type": "go",
10 | "request": "launch",
11 | "mode": "auto",
12 | "program": "${workspaceFolder}",
13 | "env": {},
14 | "args": []
15 | }
16 | ]
17 | }
--------------------------------------------------------------------------------
/02-observe/rl/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/solo-io/rl-demo
2 |
3 | go 1.13
4 |
5 | require (
6 | contrib.go.opencensus.io/exporter/zipkin v0.1.2
7 | github.com/envoyproxy/go-control-plane v0.9.0
8 | github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
9 | github.com/opentracing/opentracing-go v1.2.0 // indirect
10 | github.com/openzipkin/zipkin-go v0.2.2
11 | github.com/pkg/errors v0.9.1 // indirect
12 | github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
13 | github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
14 | github.com/yuval-k/go-control-plane v0.6.100 // indirect
15 | go.opencensus.io v0.22.4
16 | go.uber.org/atomic v1.6.0 // indirect
17 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
18 | google.golang.org/grpc v1.23.1
19 | )
20 |
--------------------------------------------------------------------------------
/02-observe/rl/rl-demo:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/02-observe/rl/rl-demo
--------------------------------------------------------------------------------
/02-observe/rl/rl.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "net"
8 | "net/http"
9 |
10 | pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
11 |
12 | "time"
13 |
14 | "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
15 | "github.com/uber/jaeger-client-go"
16 | jaegercfg "github.com/uber/jaeger-client-go/config"
17 | "go.opencensus.io/plugin/ocgrpc"
18 | "go.opencensus.io/stats/view"
19 | "go.opencensus.io/zpages"
20 | "google.golang.org/grpc"
21 | )
22 |
23 | var (
24 | localEndpointURI = "127.0.0.1:10004"
25 | )
26 |
27 | type service struct{}
28 |
29 | func (s *service) ShouldRateLimit(ctx context.Context, r *pb.RateLimitRequest) (*pb.RateLimitResponse, error) {
30 | // this is done automatically:
31 | // ctx, span := trace.StartSpan(ctx, "ShouldRateLimit")
32 | // defer span.End()
33 |
34 | time.Sleep(time.Second)
35 | return &pb.RateLimitResponse{
36 | OverallCode: pb.RateLimitResponse_OK,
37 | }, nil
38 | }
39 |
40 | func setupZpage() {
41 | mux := http.NewServeMux()
42 | zpages.Handle(mux, "/debug")
43 |
44 | // Change the address as needed
45 | addr := ":8888"
46 | if err := http.ListenAndServe(addr, mux); err != nil {
47 | log.Fatalf("Failed to serve zPages " + err.Error())
48 | }
49 | }
50 |
51 | func main() {
52 | if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
53 | log.Fatalf("Failed to register ocgrpc server views: %v", err)
54 | }
55 | cfg := jaegercfg.Configuration{
56 | ServiceName: "ratelimit",
57 | Sampler: &jaegercfg.SamplerConfig{
58 | Type: jaeger.SamplerTypeConst,
59 | Param: 1,
60 | },
61 | Reporter: &jaegercfg.ReporterConfig{
62 | LogSpans: true,
63 | },
64 | }
65 | tracer, closer, err := cfg.NewTracer()
66 | if err != nil {
67 | panic("Could not initialize jaeger tracer: " + err.Error())
68 | }
69 | defer closer.Close()
70 |
71 | grpcServer := grpc.NewServer(grpc.UnaryInterceptor(
72 | otgrpc.OpenTracingServerInterceptor(tracer)),
73 | grpc.StreamInterceptor(
74 | otgrpc.OpenTracingStreamServerInterceptor(tracer)))
75 |
76 | lis, err := net.Listen("tcp", localEndpointURI)
77 | if err != nil {
78 | panic(err)
79 | }
80 | pb.RegisterRateLimitServiceServer(grpcServer, &service{})
81 | fmt.Println("Starting")
82 | go setupZpage()
83 | grpcServer.Serve(lis)
84 | }
85 |
--------------------------------------------------------------------------------
/02-observe/server.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "math/rand"
7 | "net"
8 | "net/http"
9 | _ "net/http/pprof"
10 | )
11 |
12 | func echoServer(rw http.ResponseWriter, r *http.Request) {
13 | defer r.Body.Close()
14 | if rand.Int()%5 == 0 {
15 | http.Error(rw, "error", http.StatusBadRequest)
16 | return
17 | }
18 | io.Copy(rw, r.Body)
19 | }
20 |
21 | func main() {
22 | l, err := net.Listen("tcp", ":8082")
23 | if err != nil {
24 | log.Fatal("listen error:", err)
25 | }
26 |
27 | go func() {
28 | log.Println(http.ListenAndServe("localhost:6060", nil))
29 | }()
30 | http.Serve(l, http.HandlerFunc(echoServer))
31 | }
32 |
--------------------------------------------------------------------------------
/02-observe/simple.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: edge_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | domains: ["*"]
22 | routes:
23 | - match: { prefix: "/" }
24 | route: { cluster: somecluster }
25 | http_filters:
26 | - name: envoy.filters.http.router
27 | clusters:
28 | - name: somecluster
29 | connect_timeout: 0.25s
30 | type: STRICT_DNS
31 | lb_policy: ROUND_ROBIN
32 | load_assignment:
33 | cluster_name: somecluster
34 | endpoints:
35 | - lb_endpoints:
36 | - endpoint:
37 | address:
38 | socket_address:
39 | address: 127.0.0.1
40 | port_value: 8082
41 |
42 |
--------------------------------------------------------------------------------
/02-observe/stats.yaml:
--------------------------------------------------------------------------------
1 | admin:
2 | access_log_path: /dev/stdout
3 | address:
4 | socket_address: { address: 127.0.0.1, port_value: 9901 }
5 |
6 | static_resources:
7 | listeners:
8 | - name: listener_0
9 | address:
10 | socket_address: { address: 0.0.0.0, port_value: 10000 }
11 | filter_chains:
12 | - filters:
13 | - name: envoy.filters.network.http_connection_manager
14 | typed_config:
15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
16 | stat_prefix: edge_http
17 | route_config:
18 | name: local_route
19 | virtual_hosts:
20 | - name: namespace.local_service
21 | virtual_clusters:
22 | - name: actions
23 | headers:
24 | - name: ":path"
25 | prefix_match: "/foo"
26 | domains: ["*"]
27 | routes:
28 | - match: { prefix: "/" }
29 | route: { cluster: somecluster }
30 | http_filters:
31 | - name: envoy.filters.http.router
32 | clusters:
33 | - name: somecluster
34 | connect_timeout: 0.25s
35 | type: STRICT_DNS
36 | lb_policy: ROUND_ROBIN
37 | load_assignment:
38 | cluster_name: somecluster
39 | endpoints:
40 | - lb_endpoints:
41 | - endpoint:
42 | address:
43 | socket_address:
44 | address: 127.0.0.1
45 | port_value: 8082
46 |
47 |
--------------------------------------------------------------------------------
/03-security/README.md:
--------------------------------------------------------------------------------
1 | run ssl server:
2 |
3 | ```
4 | rm /tmp/envoy_admin.log
5 | go run server.go&
6 | envoy -l debug -c edge.yaml
7 | ```
8 |
9 | See that access admin interface is logged:
10 | ```
11 | curl localhost:9090/help
12 | cat /tmp/envoy_admin.log
13 | ```
14 |
15 |
16 | Sanity check, that everything is working:
17 | ```
18 | curl --connect-to example.com:443:127.0.0.1:8443 -k -v https://example.com
19 | ```
20 | (we use connect-to flag, so that curl creates the correct SNI record)
21 |
22 |
23 | See envoy rejects request with an invalid header:
24 | ```
25 | curl --connect-to example.com:443:127.0.0.1:8443 -k -v https://example.com -H"invalid_header: foo"
26 | ```
27 |
28 | See how the XFF header is processed:
29 | ```
30 | curl --connect-to example.com:443:127.0.0.1:8443 -k https://example.com -H"x-forwarded-for: 1.2.3.4"
31 | envoy -c simple.yaml --disable-hot-restart &
32 | curl http://localhost:10000 -H"x-forwarded-for: 1.2.3.4"
33 | ```
34 |
35 | You can impact XFF processing using `use_remote_address` and `xff_num_trusted_hops`
--------------------------------------------------------------------------------
/03-security/cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDEjCCAfqgAwIBAgIUAJBAlTZkRndLn4kQzoD+kdMug5kwDQYJKoZIhvcNAQEL
3 | BQAwNTELMAkGA1UEBhMCVVMxEDAOBgNVBAoMB0V4YW1wbGUxFDASBgNVBAMMC2V4
4 | YW1wbGUuY29tMB4XDTIwMDgxODExMzUxOFoXDTMwMDgxNjExMzUxOFowNTELMAkG
5 | A1UEBhMCVVMxEDAOBgNVBAoMB0V4YW1wbGUxFDASBgNVBAMMC2V4YW1wbGUuY29t
6 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuNuXtMBTMLN0NJicbcNx
7 | KfjIFFF9czX4lWn0PHvxgVWwO9CLRm4k5/xfHc4/Y9NhH9oWRRyK3fwJ6Zb7qLpq
8 | HQi1VyyR4CCTavw0/Y/sAx6S0WhkDrhKToXZ3qHOWhmv1RGyHZPgiesjtjd+S0Ea
9 | IBxTIJKg/ZLelLMgvMUhL3qgTIW25EcdRQATiKZ7Vqz5EbXNNAXRrpvtXdLcjsPR
10 | uuGM15CK8pcfR1RK8V8JJ9CT/SOHWfsY4wIFj35jLkEapdfUvWDwZFDJhUxn/X9o
11 | e3TlxuKw/HYKeBiBmMjolbpdEcJgzfNMfH3vJKZQNeKF6zNoNEe2P4DHTHKV5Dx4
12 | VQIDAQABoxowGDAWBgNVHREEDzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsF
13 | AAOCAQEAI0NuA2u6Tf0+70+CoXnaTbHJQzQjcwDZkpQqEJXoHBNnmuocVRSHdHmJ
14 | Urs26+MEKPLoqmWsNeKXM8fuju+AcJkpDdXCm5szsj3YwTCacV+sijEFD3vXAV2p
15 | AyHnvuLWkPR8ZC5+I1n/K7xYNa62xNLL8riEN1T7dvFdLx8pDhhPVefb1tukEL2H
16 | siY4YBfOWhNm2TTqJpoGhdLyHanjolers0reT27Oo17WPnZONacywgVNFo4IJYu7
17 | S/fVTq8iX/vEQuZolAKA6gH3aU72wqYxKoiRLMnJ54GcTuU+9ifxo9vfPX/21Ct7
18 | kIyrv5YLw4Vod6u0x+Z4pwMPyuSPWA==
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/03-security/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: gloo
6 | gateway-proxy-id: gateway-proxy
7 | gloo: gateway-proxy
8 | name: gateway-proxy
9 | namespace: gloo-system
10 | spec:
11 | progressDeadlineSeconds: 600
12 | replicas: 1
13 | revisionHistoryLimit: 10
14 | selector:
15 | matchLabels:
16 | gateway-proxy-id: gateway-proxy
17 | gloo: gateway-proxy
18 | template:
19 | metadata:
20 | annotations:
21 | prometheus.io/path: /metrics
22 | prometheus.io/port: "8081"
23 | prometheus.io/scrape: "true"
24 | labels:
25 | gateway-proxy: live
26 | gateway-proxy-id: gateway-proxy
27 | gloo: gateway-proxy
28 | spec:
29 | containers:
30 | - args:
31 | - --disable-hot-restart
32 | env:
33 | - name: POD_NAMESPACE
34 | valueFrom:
35 | fieldRef:
36 | apiVersion: v1
37 | fieldPath: metadata.namespace
38 | - name: POD_NAME
39 | valueFrom:
40 | fieldRef:
41 | apiVersion: v1
42 | fieldPath: metadata.name
43 | image: quay.io/solo-io/gloo-ee-envoy-wrapper:1.4.8
44 | imagePullPolicy: IfNotPresent
45 | name: gateway-proxy
46 | ports:
47 | - containerPort: 8080
48 | name: http
49 | protocol: TCP
50 | - containerPort: 8443
51 | name: https
52 | protocol: TCP
53 | resources:
54 | requests:
55 | cpu: 500m
56 | memory: 256Mi
57 | limits:
58 | cpu: 1000m
59 | memory: 1Gi
60 | securityContext:
61 | allowPrivilegeEscalation: false
62 | runAsUser: 10101
63 | capabilities:
64 | add:
65 | - NET_BIND_SERVICE
66 | drop:
67 | - ALL
68 | readOnlyRootFilesystem: true
69 | volumeMounts:
70 | - mountPath: /etc/envoy
71 | name: envoy-config
72 | dnsPolicy: ClusterFirst
73 | serviceAccountName: gateway-proxy
74 | volumes:
75 | - configMap:
76 | defaultMode: 420
77 | name: gateway-proxy-envoy-config
78 | name: envoy-config
--------------------------------------------------------------------------------
/03-security/example_com_cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDEjCCAfqgAwIBAgIUAJBAlTZkRndLn4kQzoD+kdMug5kwDQYJKoZIhvcNAQEL
3 | BQAwNTELMAkGA1UEBhMCVVMxEDAOBgNVBAoMB0V4YW1wbGUxFDASBgNVBAMMC2V4
4 | YW1wbGUuY29tMB4XDTIwMDgxODExMzUxOFoXDTMwMDgxNjExMzUxOFowNTELMAkG
5 | A1UEBhMCVVMxEDAOBgNVBAoMB0V4YW1wbGUxFDASBgNVBAMMC2V4YW1wbGUuY29t
6 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuNuXtMBTMLN0NJicbcNx
7 | KfjIFFF9czX4lWn0PHvxgVWwO9CLRm4k5/xfHc4/Y9NhH9oWRRyK3fwJ6Zb7qLpq
8 | HQi1VyyR4CCTavw0/Y/sAx6S0WhkDrhKToXZ3qHOWhmv1RGyHZPgiesjtjd+S0Ea
9 | IBxTIJKg/ZLelLMgvMUhL3qgTIW25EcdRQATiKZ7Vqz5EbXNNAXRrpvtXdLcjsPR
10 | uuGM15CK8pcfR1RK8V8JJ9CT/SOHWfsY4wIFj35jLkEapdfUvWDwZFDJhUxn/X9o
11 | e3TlxuKw/HYKeBiBmMjolbpdEcJgzfNMfH3vJKZQNeKF6zNoNEe2P4DHTHKV5Dx4
12 | VQIDAQABoxowGDAWBgNVHREEDzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsF
13 | AAOCAQEAI0NuA2u6Tf0+70+CoXnaTbHJQzQjcwDZkpQqEJXoHBNnmuocVRSHdHmJ
14 | Urs26+MEKPLoqmWsNeKXM8fuju+AcJkpDdXCm5szsj3YwTCacV+sijEFD3vXAV2p
15 | AyHnvuLWkPR8ZC5+I1n/K7xYNa62xNLL8riEN1T7dvFdLx8pDhhPVefb1tukEL2H
16 | siY4YBfOWhNm2TTqJpoGhdLyHanjolers0reT27Oo17WPnZONacywgVNFo4IJYu7
17 | S/fVTq8iX/vEQuZolAKA6gH3aU72wqYxKoiRLMnJ54GcTuU+9ifxo9vfPX/21Ct7
18 | kIyrv5YLw4Vod6u0x+Z4pwMPyuSPWA==
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/03-security/example_com_key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC425e0wFMws3Q0
3 | mJxtw3Ep+MgUUX1zNfiVafQ8e/GBVbA70ItGbiTn/F8dzj9j02Ef2hZFHIrd/Anp
4 | lvuoumodCLVXLJHgIJNq/DT9j+wDHpLRaGQOuEpOhdneoc5aGa/VEbIdk+CJ6yO2
5 | N35LQRogHFMgkqD9kt6UsyC8xSEveqBMhbbkRx1FABOIpntWrPkRtc00BdGum+1d
6 | 0tyOw9G64YzXkIrylx9HVErxXwkn0JP9I4dZ+xjjAgWPfmMuQRql19S9YPBkUMmF
7 | TGf9f2h7dOXG4rD8dgp4GIGYyOiVul0RwmDN80x8fe8kplA14oXrM2g0R7Y/gMdM
8 | cpXkPHhVAgMBAAECggEAbaXK+WqurY90GRKAjtbDk+q9+tdPOvxDY5TCSvIVkOqb
9 | qw8K3Y7Nk1Dttkc08GhcDsGUPhAtCnjKBGULBszzzh7xOTD1oeSQrH/GirRIgJ0s
10 | 5ssttlF3udXJIjYFqQndctuZM9QX9pajyxxlbAvEjFFguUxf86ifH8KUY4PmKDtn
11 | r3o23vQTJKLr44kSaFxHzS1JPN1/YPTzUDajeiI6P0DmTthl8EN3S7xB43+RjymZ
12 | OCuETYfelZecrMsyFS+0Hy6tAqqN45cnxIU11Fk+DAo26wXv6HQGhUF+egTZ9pPl
13 | 52VIm76fbwIEGTkuAeqHCc6E7lgu5zLPTOtyPYBzbQKBgQDyXvcFhIAyK62vWqvi
14 | vyJIxCA0T26y5BaaDbY0pCMN9pAaHmsOP/hbxgYrDixs4DaQ8Sl7Gp/rcdk7rn5j
15 | 7OiBzhx0iPrCOKIP33S6CZlThMYZYWWcr/uCrPdpwblL4s9T1z8ZEZaeLmQdf5OC
16 | 7l9wDsn/3iV9NL31JiFOdlvVkwKBgQDDQLNaMYv9gDbJr6zIFzYTlXH+N5inMc5u
17 | CTgFWpCCUrdr8RlkEDzW6VBM3k7mz2NSr/Vok5jsP6eLot9ur9HvCPyQmzBx+Xep
18 | O3CgZV+1MCjlDsjCx79SwGob3feRgdmglRAXpd7xHH7cO5j0XSsb/xt51czkakyo
19 | 4uwz9nGrdwKBgQCurbS4vEssXYB7Dg0tiXVrHjbbnJf6xBpqAxHblKRTDu3CdUPQ
20 | 7FfRxMFzTG7GNT3+bmDoAUNajB31PoiG+hqEWwj1EDgxvGFIsVLONdbUc6/WYPS7
21 | 3bWV85UJGn6ya4/jLBGkvqFf9a1f+S9WQPh1qaL6Dp/UQivZwBZnFzSOkwKBgEon
22 | dnZYvuwGF8i9eYd8mUMSzPpZrxZAqj/MLISMArocJCFQre385L5SWfb3r0ulk/lB
23 | bpjLBZckDcPQlb7582cvKIzbA6klr7aMJpgkKBcuNTnZRZIEPrt07FetE2slv7tn
24 | IXd5vZQg2GVYFR0pjUfSuWKqOmi9nmO92ytY+469AoGBALdPqXPza4uCV5mhNJTk
25 | 6DAP1F62NNNNllkihnzn0GxJGP6ja/GugYysgKtVOm4dM49bTuqou7VEaxp83bBG
26 | eY8lwpsdnfoOlAslUuvDQpHxMZPpnzyf4KaJOJ8t0kYjiq+tslxGVcuQDsxl1N8T
27 | L7q2TOS/vhKMN/kcZLSWQ9ny
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/03-security/key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC425e0wFMws3Q0
3 | mJxtw3Ep+MgUUX1zNfiVafQ8e/GBVbA70ItGbiTn/F8dzj9j02Ef2hZFHIrd/Anp
4 | lvuoumodCLVXLJHgIJNq/DT9j+wDHpLRaGQOuEpOhdneoc5aGa/VEbIdk+CJ6yO2
5 | N35LQRogHFMgkqD9kt6UsyC8xSEveqBMhbbkRx1FABOIpntWrPkRtc00BdGum+1d
6 | 0tyOw9G64YzXkIrylx9HVErxXwkn0JP9I4dZ+xjjAgWPfmMuQRql19S9YPBkUMmF
7 | TGf9f2h7dOXG4rD8dgp4GIGYyOiVul0RwmDN80x8fe8kplA14oXrM2g0R7Y/gMdM
8 | cpXkPHhVAgMBAAECggEAbaXK+WqurY90GRKAjtbDk+q9+tdPOvxDY5TCSvIVkOqb
9 | qw8K3Y7Nk1Dttkc08GhcDsGUPhAtCnjKBGULBszzzh7xOTD1oeSQrH/GirRIgJ0s
10 | 5ssttlF3udXJIjYFqQndctuZM9QX9pajyxxlbAvEjFFguUxf86ifH8KUY4PmKDtn
11 | r3o23vQTJKLr44kSaFxHzS1JPN1/YPTzUDajeiI6P0DmTthl8EN3S7xB43+RjymZ
12 | OCuETYfelZecrMsyFS+0Hy6tAqqN45cnxIU11Fk+DAo26wXv6HQGhUF+egTZ9pPl
13 | 52VIm76fbwIEGTkuAeqHCc6E7lgu5zLPTOtyPYBzbQKBgQDyXvcFhIAyK62vWqvi
14 | vyJIxCA0T26y5BaaDbY0pCMN9pAaHmsOP/hbxgYrDixs4DaQ8Sl7Gp/rcdk7rn5j
15 | 7OiBzhx0iPrCOKIP33S6CZlThMYZYWWcr/uCrPdpwblL4s9T1z8ZEZaeLmQdf5OC
16 | 7l9wDsn/3iV9NL31JiFOdlvVkwKBgQDDQLNaMYv9gDbJr6zIFzYTlXH+N5inMc5u
17 | CTgFWpCCUrdr8RlkEDzW6VBM3k7mz2NSr/Vok5jsP6eLot9ur9HvCPyQmzBx+Xep
18 | O3CgZV+1MCjlDsjCx79SwGob3feRgdmglRAXpd7xHH7cO5j0XSsb/xt51czkakyo
19 | 4uwz9nGrdwKBgQCurbS4vEssXYB7Dg0tiXVrHjbbnJf6xBpqAxHblKRTDu3CdUPQ
20 | 7FfRxMFzTG7GNT3+bmDoAUNajB31PoiG+hqEWwj1EDgxvGFIsVLONdbUc6/WYPS7
21 | 3bWV85UJGn6ya4/jLBGkvqFf9a1f+S9WQPh1qaL6Dp/UQivZwBZnFzSOkwKBgEon
22 | dnZYvuwGF8i9eYd8mUMSzPpZrxZAqj/MLISMArocJCFQre385L5SWfb3r0ulk/lB
23 | bpjLBZckDcPQlb7582cvKIzbA6klr7aMJpgkKBcuNTnZRZIEPrt07FetE2slv7tn
24 | IXd5vZQg2GVYFR0pjUfSuWKqOmi9nmO92ytY+469AoGBALdPqXPza4uCV5mhNJTk
25 | 6DAP1F62NNNNllkihnzn0GxJGP6ja/GugYysgKtVOm4dM49bTuqou7VEaxp83bBG
26 | eY8lwpsdnfoOlAslUuvDQpHxMZPpnzyf4KaJOJ8t0kYjiq+tslxGVcuQDsxl1N8T
27 | L7q2TOS/vhKMN/kcZLSWQ9ny
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/03-security/makecerts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # temp dir: https://stackoverflow.com/a/53063602/328631
4 | # Create a temporary directory and store its name in a variable ...
5 | TMPDIR=$(mktemp -d)
6 |
7 | # Bail out if the temp directory wasn't created successfully.
8 | if [ ! -e $TMPDIR ]; then
9 | >&2 echo "Failed to create temp directory"
10 | exit 1
11 | fi
12 |
13 | # Make sure it gets removed even if the script exits abnormally.
14 | trap "exit 1" HUP INT PIPE QUIT TERM
15 | trap 'rm -rf "$TMPDIR"' EXIT
16 |
17 | # note \$ is for bash.
18 | # source https://www.switch.ch/pki/manage/request/csr-openssl/
19 | # http://apetec.com/support/GenerateSAN-CSR.htm
20 | # https://stackoverflow.com/questions/21488845/how-can-i-generate-a-self-signed-certificate-with-subjectaltname-using-openssl
21 | # https://stackoverflow.com/questions/6194236/openssl-certificate-version-3-with-subject-alternative-name
22 | cat > $TMPDIR/openssl.cnf < Envoy
9 | Configure the envoy health check filter.
10 | Before existing envoy, fail health checks (`POST /healthcheck/fail` on the admin page)
11 |
12 |
13 | # Envoy -> LB
14 |
15 | Configure active or passive health checks, and retries.
16 |
17 | # Caveats
18 |
19 | If exposing envoy as a NodePort k8s service, then using standard health from the cloud load balancer
20 | is probably something you want to avoid. The load balancer will send health checks to each k8s node.
21 | The node in turn, will send the request to a random pod, resulting in inconsistent health info given
22 | to the cloud load balancer.
23 |
24 | Note that envoy is not aware of k8s readiness/liveness probes. Either have your control plane propagate
25 | this info to envoy via EDS, or configure separate health checks on envoy, regardless of the k8s probes.
26 |
27 | Also note, that in distributed systems, each component has an eventual consistent state.
28 | This means that when you want to remove a pod, you want to give the pod some time to drain requests.
29 | During this time, the pod should fail health checks, and the control plane should remove it from envoy
30 | This gives enough time for components sending traffic to the pod (i.e. envoy) to reconcile their state
31 | and stop sending traffic without disruption.
32 |
33 | # Demo
34 |
35 | ## Envoy to upstream
36 | Run the xds server:
37 | ```
38 | (cd xds; go run xds.go)
39 | ```
40 |
41 | Run envoy:
42 | ```
43 | envoy -c envoy.yaml
44 | ```
45 |
46 | See failing requests stats
47 |
48 | ```
49 | curl -s http://localhost:8001/stats | grep listener.0.0.0.0_8000.http.ingress_http.downstream_rq_5xx
50 | ```
51 |
52 | send requests:
53 | ```
54 | while true; do hey -n 100 http://localhost:8000/ ; sleep 1;done
55 | ```
56 |
57 | ## LB to envoy
58 |
59 | Check health:
60 | ```
61 | curl -v http://localhost:8000/health
62 | ```
63 |
64 | Fail health checks:
65 | ```
66 | curl -XPOST http://localhost:8001/healthcheck/fail
67 | ```
68 |
69 |
70 | # More resources:
71 |
72 | https://www.envoyproxy.io/docs/envoy/latest/faq/load_balancing/transient_failures.html
73 | https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/health_checking
74 | https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/health_check_filter
--------------------------------------------------------------------------------
/12-hitless-delpoy/slides.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/12-hitless-delpoy/slides.odp
--------------------------------------------------------------------------------
/12-hitless-delpoy/slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/12-hitless-delpoy/slides.pdf
--------------------------------------------------------------------------------
/12-hitless-delpoy/xds/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/solo-io/xds
2 |
3 | go 1.13
4 |
5 | require (
6 | github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect
7 | github.com/davecgh/go-spew v1.1.1 // indirect
8 | github.com/envoyproxy/go-control-plane v0.9.7-0.20200831211728-bff20ab2355c
9 | github.com/golang/protobuf v1.4.2
10 | go.uber.org/zap v1.15.0
11 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect
12 | golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
13 | golang.org/x/text v0.3.2 // indirect
14 | google.golang.org/grpc v1.27.0
15 | google.golang.org/protobuf v1.23.0
16 |
17 | )
18 |
--------------------------------------------------------------------------------
/14-istio-debugging/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 | Use istioctl analyze to detect issues
3 |
4 | ```
5 | istioctl analyze
6 | ```
7 |
8 | Pilot's ControlZ interface:
9 |
10 | ```bash
11 | kubectl port-forward -n istio-system deploy/istiod 9876
12 | ```
13 |
14 | Or
15 |
16 | ```bash
17 | istioctl dashboard controlz deployment/istiod.istio-system
18 | ```
19 |
20 | ## Envoy admin page:
21 |
22 | ```bash
23 | kubectl port-forward -n default deploy/reviews-v1 15000 &
24 | ```
25 |
26 | ## Envoy bootstrap config
27 |
28 | ```bash
29 | kubectl exec -n default deploy/reviews-v1 -c istio-proxy -- cat /etc/istio/proxy/envoy-rev0.json
30 | ```
31 |
32 | Or:
33 |
34 | ```bash
35 | istioctl proxy-config bootstrap -n default deploy/reviews-v1
36 | ```
37 |
38 | ## Envoy in sync:
39 |
40 | ```bash
41 | istioctl proxy-status
42 | ```
43 |
44 | Or see if update_rejected gets incremented for a pdo:
45 |
46 | ```bash
47 | curl localhost:15000/stats | grep update_rejected
48 | ```
49 |
50 | ## Envoy config dump
51 |
52 | ```bash
53 | curl localhost:15000/config_dump
54 | ```
55 |
56 | Or
57 |
58 | ```bash
59 | istioctl proxy-status deploy/reviews-v1
60 | istioctl proxy-config cluster deploy/reviews-v1
61 | istioctl proxy-config route deploy/productpage-v1
62 | istioctl proxy-config listener deploy/details-v1
63 | ```
64 |
65 | # Debug logs
66 |
67 | Control plane, through the ControlZ interface
68 |
69 | Data plane:
70 |
71 | ```bash
72 | curl 'localhost:15000/logging?level=debug'
73 | kubectl logs -n default deploy/productpage-v1 -c istio-proxy
74 | ```
75 |
76 |
77 |
78 | # Further reading
79 |
80 | - https://istio.io/latest/docs/ops/diagnostic-tools/
81 | - https://istio.io/latest/docs/ops/common-problems/
--------------------------------------------------------------------------------
/14-istio-debugging/destination-rule-all.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: productpage
5 | spec:
6 | host: productpage
7 | subsets:
8 | - name: v1
9 | labels:
10 | version: v1
11 | ---
12 | apiVersion: networking.istio.io/v1alpha3
13 | kind: DestinationRule
14 | metadata:
15 | name: reviews
16 | spec:
17 | host: reviews
18 | subsets:
19 | - name: v1
20 | labels:
21 | version: v1
22 | - name: v2
23 | labels:
24 | version: v2
25 | - name: v3
26 | labels:
27 | version: v3
28 | ---
29 | apiVersion: networking.istio.io/v1alpha3
30 | kind: DestinationRule
31 | metadata:
32 | name: ratings
33 | spec:
34 | host: ratings
35 | subsets:
36 | - name: v1
37 | labels:
38 | version: v1
39 | - name: v2
40 | labels:
41 | version: v2
42 | - name: v2-mysql
43 | labels:
44 | version: v2-mysql
45 | - name: v2-mysql-vm
46 | labels:
47 | version: v2-mysql-vm
48 | ---
49 | apiVersion: networking.istio.io/v1alpha3
50 | kind: DestinationRule
51 | metadata:
52 | name: details
53 | spec:
54 | host: details
55 | subsets:
56 | - name: v1
57 | labels:
58 | version: v1
59 | - name: v2
60 | labels:
61 | version: v2
62 | ---
63 |
--------------------------------------------------------------------------------
/14-istio-debugging/setup.sh:
--------------------------------------------------------------------------------
1 | kind create cluster
2 |
3 | $ISTIO_HOME/bin/istioctl install --set profile=minimal --set meshConfig.accessLogFile=/dev/stdout
4 | kubectl label namespace default istio-injection=enabled
5 | sleep 1
6 | kubectl apply -f $ISTIO_HOME/samples/bookinfo/platform/kube/bookinfo.yaml
7 |
8 | export PATH=$ISTIO_HOME/bin:$PATH
9 |
10 |
11 | kubectl port-forward -n default deploy/productpage-v1 9080 &
12 |
13 |
14 | # apply vs with the destination rule to show an error
15 | kubectl apply -f virtual-service-all-v1.yaml
16 |
17 | # you can fix this error with:
18 | # kubectl apply -f destination-rule-all.yaml
19 |
20 | # after the error is fixed you can see the route name in the logs
21 | # kubectl logs -n default deploy/productpage-v1 -c istio-proxy
22 |
--------------------------------------------------------------------------------
/14-istio-debugging/slides.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/14-istio-debugging/slides.odp
--------------------------------------------------------------------------------
/14-istio-debugging/slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/14-istio-debugging/slides.pdf
--------------------------------------------------------------------------------
/14-istio-debugging/virtual-service-all-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: productpage
5 | spec:
6 | hosts:
7 | - productpage
8 | http:
9 | - route:
10 | - destination:
11 | host: productpage
12 | subset: v1
13 | ---
14 | apiVersion: networking.istio.io/v1alpha3
15 | kind: VirtualService
16 | metadata:
17 | name: reviews
18 | spec:
19 | hosts:
20 | - reviews
21 | http:
22 | - route:
23 | - destination:
24 | host: reviews
25 | subset: v1
26 | ---
27 | apiVersion: networking.istio.io/v1alpha3
28 | kind: VirtualService
29 | metadata:
30 | name: ratings
31 | spec:
32 | hosts:
33 | - ratings
34 | http:
35 | - route:
36 | - destination:
37 | host: ratings
38 | subset: v1
39 | ---
40 | apiVersion: networking.istio.io/v1alpha3
41 | kind: VirtualService
42 | metadata:
43 | name: details
44 | spec:
45 | hosts:
46 | - details
47 | http:
48 | - route:
49 | - destination:
50 | host: details
51 | subset: v1
52 | name: details-default-route
53 |
--------------------------------------------------------------------------------
/15-envoy-external-services/README.md:
--------------------------------------------------------------------------------
1 | # Demo
2 |
3 | Start envoy
4 | ```
5 | python -m http.server --bind 127.0.0.1 8082 &
6 | podman run -ti --rm --net=host -v ${PWD}:${PWD} -w ${PWD} docker.io/envoyproxy/envoy:v1.18.2 -c envoy.yaml &
7 | ```
8 | Start server:
9 | ```
10 | (cd server; go run server.go)
11 | ```
12 |
13 | Now you can curl
14 | ```
15 | curl http://localhost:10000
16 | ```
17 |
18 | # References
19 |
20 | https://www.envoyproxy.io/docs/envoy/v1.18.2/api-v3/extensions/filters/http/ext_authz/v3/ext_authz.proto.html
21 |
22 | https://www.envoyproxy.io/docs/envoy/v1.18.2/api-v3/extensions/filters/http/ratelimit/v3/rate_limit.proto#extensions-filters-http-ratelimit-v3-ratelimit
23 |
24 | https://www.envoyproxy.io/docs/envoy/v1.18.2/api-v3/extensions/access_loggers/grpc/v3/als.proto#envoy-v3-api-msg-extensions-access-loggers-grpc-v3-httpgrpcaccesslogconfig
25 |
26 | https://www.envoyproxy.io/docs/envoy/v1.18.2/api-v3/config/metrics/v3/metrics_service.proto
27 |
28 | https://github.com/open-policy-agent/opa-envoy-plugin
29 |
30 | https://github.com/envoyproxy/envoy/tree/main/api/envoy/service
--------------------------------------------------------------------------------
/15-envoy-external-services/server/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/solo-io/ext-svc-demo
2 |
3 | go 1.16
4 |
5 | require (
6 | contrib.go.opencensus.io/exporter/zipkin v0.1.2
7 | github.com/envoyproxy/go-control-plane v0.9.8
8 | github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
9 | github.com/opentracing/opentracing-go v1.2.0 // indirect
10 | github.com/openzipkin/zipkin-go v0.2.2
11 | github.com/pkg/errors v0.9.1 // indirect
12 | github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
13 | github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
14 | go.opencensus.io v0.22.4
15 | go.uber.org/atomic v1.6.0 // indirect
16 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
17 | google.golang.org/grpc v1.27.0
18 | )
19 |
--------------------------------------------------------------------------------
/15-envoy-external-services/server/server.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net"
7 |
8 | als_pb "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3"
9 | auth_pb "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
10 | metrics_pb "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v3"
11 | rl_pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
12 |
13 | "google.golang.org/grpc"
14 | )
15 |
16 | var (
17 | localEndpointURI = "127.0.0.1:10004"
18 | )
19 |
20 | type service struct{}
21 |
22 | func (s *service) ShouldRateLimit(ctx context.Context, r *rl_pb.RateLimitRequest) (*rl_pb.RateLimitResponse, error) {
23 | fmt.Println("received should rate limit")
24 | // return nil, fmt.Errorf("error")
25 | return &rl_pb.RateLimitResponse{
26 | OverallCode: rl_pb.RateLimitResponse_OK,
27 | }, nil
28 | }
29 | func (s *service) Check(ctx context.Context, r *auth_pb.CheckRequest) (*auth_pb.CheckResponse, error) {
30 | fmt.Println("received check request")
31 | // return nil, fmt.Errorf("error")
32 | return &auth_pb.CheckResponse{}, nil
33 | }
34 |
35 | func (s *service) StreamAccessLogs(r als_pb.AccessLogService_StreamAccessLogsServer) error {
36 | for {
37 | msg, err := r.Recv()
38 | if err != nil {
39 | return err
40 | }
41 | for _, le := range msg.GetHttpLogs().LogEntry {
42 | fmt.Println("received access log", le.GetRequest().GetPath(), le.GetResponse().GetResponseCode().GetValue())
43 | }
44 | }
45 | }
46 |
47 | func (s *service) StreamMetrics(r metrics_pb.MetricsService_StreamMetricsServer) error {
48 | for {
49 | msg, err := r.Recv()
50 | if err != nil {
51 | return err
52 | }
53 | metrics := msg.EnvoyMetrics
54 | for _, em := range metrics {
55 | name := em.GetName()
56 | if name == "cluster.somecluster.upstream_rq_total" {
57 | for _, m := range em.Metric {
58 | fmt.Println("received metric", name, m.GetCounter().GetValue())
59 | }
60 | }
61 | }
62 | }
63 | }
64 |
65 | func main() {
66 |
67 | grpcServer := grpc.NewServer()
68 |
69 | lis, err := net.Listen("tcp", localEndpointURI)
70 | if err != nil {
71 | panic(err)
72 | }
73 | s := &service{}
74 | rl_pb.RegisterRateLimitServiceServer(grpcServer, s)
75 | auth_pb.RegisterAuthorizationServer(grpcServer, s)
76 | als_pb.RegisterAccessLogServiceServer(grpcServer, s)
77 | metrics_pb.RegisterMetricsServiceServer(grpcServer, s)
78 | fmt.Println("Starting")
79 | grpcServer.Serve(lis)
80 | }
81 |
--------------------------------------------------------------------------------
/15-envoy-external-services/slides.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/15-envoy-external-services/slides.odp
--------------------------------------------------------------------------------
/15-envoy-external-services/slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/15-envoy-external-services/slides.pdf
--------------------------------------------------------------------------------
/17-ratelimit/README.md:
--------------------------------------------------------------------------------
1 | # Intro
2 |
3 | Global rate limiting is performed using a server external to envoy that holds the rate limit counter state. The envoy projects provides an implementation that uses Redis. You can create your own server by
4 | implementing the Rate Limit gRPC API.
5 |
6 | Rate limits Descriptors are configured in envoy using a list of actions.
7 | Each Action may generate a Descriptor Entry (a key and value). If it does not, the descriptor is not generated (usually).
8 |
9 | So for a request that envoy wants to rate limit, it sends a list of descriptors, each descriptor has
10 | an ordered list of descriptor entries, and each descriptor entry has a string key and value.
11 |
12 | A visual example for 2 descriptors:
13 | ```
14 | [(generic_key, foo), (header_value, bar)]
15 | [(generic_key, abc), (remote_address, 1.2.3.4), (header_value, edf)]
16 | ```
17 |
18 | The rate limit server then increments a counter for each descriptor. If for any of the descriptors the
19 | counter goes above the limit defined in the server config, the request is rate limited.
20 |
21 | # Demo
22 | Run some server (doesn't really matter what, just so we get an OK response):
23 | ```
24 | python -m http.server --bind 127.0.0.1 8082&
25 | ```
26 |
27 | Run Redis, for the rate limit server:
28 | ```
29 | redis-server &
30 | ```
31 |
32 | Export these environment to configure the rate limit server, and run it:
33 | ```
34 | export REDIS_SOCKET_TYPE=tcp
35 | export REDIS_URL=localhost:6379
36 | export LOG_LEVEL=debug
37 | export USE_STATSD=false
38 | export GRPC_PORT=10004
39 | export RUNTIME_ROOT=$PWD
40 | export RUNTIME_SUBDIRECTORY=rlconfig
41 | export RUNTIME_WATCH_ROOT=false
42 |
43 | go run github.com/envoyproxy/ratelimit/src/service_cmd
44 | ```
45 |
46 | Run envoy:
47 | ```
48 | envoy -c envoy.yaml&
49 | ```
50 |
51 | Curl to the upstream
52 | ```
53 | curl localhost:10000/
54 | curl -XPOST http://localhost:10000/resources
55 | # or use hey to send a bunch of requests
56 | hey http://localhost:10000
57 | hey -m POST http://localhost:10000/resources
58 | ```
59 | Curl to the stats page
60 | ```
61 | curl localhost:9901/stats|grep ratelimit
62 | ```
63 |
64 | Rate limit debug page: http://localhost:6070/rlconfig
65 |
66 | References:
67 | - https://www.envoyproxy.io/docs/envoy/latest/configuration/other_features/rate_limit
68 | - https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/rate_limit_filter
69 | - https://github.com/envoyproxy/ratelimit
70 | - https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-ratelimit-action
71 |
--------------------------------------------------------------------------------
/17-ratelimit/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/solo-io/hoot/rl
2 |
3 | go 1.16
4 |
5 | require (
6 | github.com/envoyproxy/ratelimit v1.4.1-0.20210528154549-c0cdd752f8d5 // indirect
7 | github.com/gogo/protobuf v1.3.1 // indirect
8 | github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect
9 | github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 // indirect
10 | github.com/onsi/ginkgo v1.12.0 // indirect
11 | github.com/onsi/gomega v1.9.0 // indirect
12 | golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 // indirect
13 | gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
14 | gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
15 | )
16 |
--------------------------------------------------------------------------------
/17-ratelimit/rlconfig/config/rl.yaml:
--------------------------------------------------------------------------------
1 | domain: domain1
2 | descriptors:
3 | - key: generic_key
4 | value: client
5 | descriptors:
6 | - key: remote_address
7 | rate_limit:
8 | unit: SECOND
9 | requests_per_unit: 1
10 | - key: generic_key
11 | value: resources
12 | rate_limit:
13 | unit: SECOND
14 | requests_per_unit: 1
15 | descriptors:
16 | - key: header_match
17 | value: post_request
18 | rate_limit:
19 | unit: MINUTE
20 | requests_per_unit: 10
--------------------------------------------------------------------------------
/17-ratelimit/slides.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/17-ratelimit/slides.odp
--------------------------------------------------------------------------------
/18-istio-envoy-filter/README.md:
--------------------------------------------------------------------------------
1 | # Envoy Filter
2 | Applies envoy config patches to a workload
3 |
4 | # Demo Setup
5 |
6 | ```
7 | . ./setup.sh
8 | kubectl port-forward deploy/productpage-v1 9080 &
9 | ```
10 |
11 | Apply filter:
12 | ```
13 | kubectl apply -f envoyfilter.yaml
14 | ```
15 |
16 | Curl product page
17 | ```
18 | curl -v http://localhost:9080/api/v1/products/0/reviews
19 |
20 | curl -v http://localhost:9080/api/v1/products/1/reviews
21 | for i in $(seq 100); do curl -s http://localhost:9080/api/v1/products/1/reviews > /dev/null; done
22 | curl -v http://localhost:9080/api/v1/products/1/reviews
23 | ```
24 |
25 | # Debugging
26 |
27 | Port forward debug ports
28 | ```
29 | kubectl port-forward deploy/productpage-v1 15000 &
30 | kubectl port-forward deploy/reviews-v1 15001:15000 &
31 | kubectl port-forward deploy/ratelimit 6070:6070 &
32 | ```
33 |
34 | See redis state:
35 | ```
36 | for key in $(kubectl exec deploy/redis -- redis-cli keys '*'); do
37 | echo $key
38 | kubectl exec deploy/redis -- redis-cli get $key
39 | done
40 | ```
41 |
42 | Enable debug log level:
43 | ```
44 | curl -XPOST "localhost:15000/logging?level=debug"
45 | curl -XPOST "localhost:15001/logging?level=debug"
46 | ```
47 |
48 | Config dump:
49 | ```
50 | curl "localhost:15001/config_dump"
51 | ```
52 |
53 | Get envoy logs
54 | ```
55 | kubectl logs -f --tail=10 deploy/productpage-v1 -c istio-proxy
56 | kubectl logs -f --tail=10 deploy/reviews-v1 -c istio-proxy
57 | ```
58 |
59 | # Config Change
60 |
61 | When changing rate limit config in `srvconfig.yaml`, restart it to apply the new config:
62 | ```
63 | kubectl apply -f srvconfig.yaml
64 | kubectl rollout restart deployment ratelimit
65 | ```
66 |
67 | Reference docs:
68 | https://istio.io/latest/docs/reference/config/networking/envoy-filter/
69 | https://istio.io/latest/docs/tasks/policy-enforcement/rate-limit/
--------------------------------------------------------------------------------
/18-istio-envoy-filter/envoyfilter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: EnvoyFilter
3 | metadata:
4 | name: filter-ratelimit
5 | spec:
6 | workloadSelector:
7 | # select by label in the same namespace
8 | labels:
9 | app: reviews
10 | configPatches:
11 | # The Envoy config you want to modify
12 | - applyTo: HTTP_FILTER
13 | match:
14 | context: SIDECAR_INBOUND
15 | listener:
16 | filterChain:
17 | filter:
18 | name: "envoy.filters.network.http_connection_manager"
19 | subFilter:
20 | name: "envoy.filters.http.router"
21 | patch:
22 | operation: INSERT_BEFORE
23 | # Adds the Envoy Rate Limit Filter in HTTP filter chain.
24 | value:
25 | name: envoy.filters.http.ratelimit
26 | typed_config:
27 | "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
28 | # domain can be anything! Match it to the ratelimter service config
29 | domain: reviews-ratelimit
30 | failure_mode_deny: true
31 | timeout: 10s
32 | rate_limit_service:
33 | grpc_service:
34 | envoy_grpc:
35 | cluster_name: rate_limit_cluster
36 | transport_api_version: V3
37 | - applyTo: CLUSTER
38 | match:
39 | cluster:
40 | service: ratelimit.default.svc.cluster.local
41 | patch:
42 | operation: ADD
43 | # Adds the rate limit service cluster for rate limit service defined in step 1.
44 | value:
45 | name: rate_limit_cluster
46 | type: STRICT_DNS
47 | connect_timeout: 10s
48 | lb_policy: ROUND_ROBIN
49 | http2_protocol_options: {}
50 | load_assignment:
51 | cluster_name: rate_limit_cluster
52 | endpoints:
53 | - lb_endpoints:
54 | - endpoint:
55 | address:
56 | socket_address:
57 | address: ratelimit.default
58 | port_value: 8081
59 | ---
60 | apiVersion: networking.istio.io/v1alpha3
61 | kind: EnvoyFilter
62 | metadata:
63 | name: filter-ratelimit-svc
64 | spec:
65 | workloadSelector:
66 | labels:
67 | app: reviews
68 | configPatches:
69 | - applyTo: VIRTUAL_HOST
70 | match:
71 | context: SIDECAR_INBOUND
72 | routeConfiguration:
73 | vhost:
74 | name: ""
75 | route:
76 | action: ANY
77 | patch:
78 | operation: MERGE
79 | # Applies the rate limit rules.
80 | value:
81 | rate_limits:
82 | - actions: # any actions in here
83 | - request_headers:
84 | header_name: ":path"
85 | descriptor_key: "PATH"
--------------------------------------------------------------------------------
/18-istio-envoy-filter/setup.sh:
--------------------------------------------------------------------------------
1 | kind create cluster
2 |
3 | # tested with istio 1.11.4
4 |
5 | $ISTIO_HOME/bin/istioctl install --set profile=minimal -y
6 | kubectl label namespace default istio-injection=enabled
7 | sleep 1
8 | kubectl apply -f $ISTIO_HOME/samples/bookinfo/platform/kube/bookinfo.yaml
9 | # for simplicity, delete other reviews:
10 | kubectl delete deployment reviews-v2
11 | kubectl delete deployment reviews-v3
12 |
13 | kubectl apply -f srvconfig.yaml
14 | kubectl apply -f $ISTIO_HOME/samples/ratelimit/rate-limit-service.yaml
15 |
16 | export PATH=$ISTIO_HOME/bin:$PATH
17 |
18 |
--------------------------------------------------------------------------------
/18-istio-envoy-filter/slides.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/18-istio-envoy-filter/slides.odp
--------------------------------------------------------------------------------
/18-istio-envoy-filter/slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/18-istio-envoy-filter/slides.pdf
--------------------------------------------------------------------------------
/18-istio-envoy-filter/srvconfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: ratelimit-config
5 | data:
6 | config.yaml: |
7 | domain: reviews-ratelimit
8 | descriptors:
9 | - key: PATH
10 | value: "/reviews/0"
11 | rate_limit:
12 | unit: minute
13 | requests_per_unit: 1
14 | - key: PATH
15 | rate_limit:
16 | unit: minute
17 | requests_per_unit: 100
--------------------------------------------------------------------------------
/19-ebpf-top-down/README.md:
--------------------------------------------------------------------------------
1 | # Overview
2 |
3 | eBPF is a Linux kernel technology that is quickly growing in popularity as it provides developers with the ability to inject custom logic into running kernels in a safe and efficient way.
4 |
5 | In the application networking space there are a few common use cases such as:
6 | * tracing/observability
7 | * security enforcement
8 | * network acceleration
9 |
10 | Due to the breadth of eBPF as a technology it can be challenging to learn and get started.
11 | The slides/video give a quick overview of eBPF as well a top-down view of a specific example -- tracing network connections.
12 | The example was built and run via our open-source tool [BumbleBee](https://bumblebee.io/).
13 |
14 | # Example
15 | A [simple example](probe.c) is included that is based off a `bee init` template.
16 |
17 | It can be built and run easily via `bee`, e.g.
18 | ```bash
19 | bee build probe.c my_probe:v1
20 | bee run my_probe:v1
21 | ```
22 |
23 | See the [BumbleBee getting started guide](https://github.com/solo-io/bumblebee/blob/main/docs/getting_started.md) for more info.
24 |
25 |
26 | # References
27 | * https://ebpf.io/
28 | * https://github.com/iovisor/bcc
29 | * https://github.com/libbpf/libbpf
30 | * https://nakryiko.com/posts/libbpf-bootstrap/
31 | * https://github.com/solo-io/bumblebee
32 |
--------------------------------------------------------------------------------
/19-ebpf-top-down/ebpf-slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/19-ebpf-top-down/ebpf-slides.pdf
--------------------------------------------------------------------------------
/19-ebpf-top-down/probe.c:
--------------------------------------------------------------------------------
1 | #include "vmlinux.h"
2 | #include "bpf/bpf_helpers.h"
3 | #include "bpf/bpf_core_read.h"
4 | #include "bpf/bpf_tracing.h"
5 | #include "solo_types.h"
6 |
7 | // 1. Change the license if necessary
8 | char __license[] SEC("license") = "Dual MIT/GPL";
9 |
10 | struct event_t {
11 | ipv4_addr daddr;
12 | u32 pid;
13 | } __attribute__((packed));
14 |
15 | // This is the definition for the global map which both our
16 | // bpf program and user space program can access.
17 | // More info and map types can be found here: https://www.man7.org/linux/man-pages/man2/bpf.2.html
18 | struct {
19 | __uint(max_entries, 1 << 24);
20 | __uint(type, BPF_MAP_TYPE_RINGBUF);
21 | __type(value, struct event_t);
22 | } events SEC(".maps.print");
23 |
24 | SEC("kprobe/tcp_v4_connect")
25 | int BPF_KPROBE(tcp_v4_connect, struct sock *sk, struct sockaddr *uaddr)
26 | {
27 | // Init event pointer
28 | struct event_t *event;
29 | __u32 daddr;
30 |
31 | struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
32 | daddr = BPF_CORE_READ(usin, sin_addr.s_addr);
33 |
34 | // Reserve a spot in the ringbuffer for our event
35 | event = bpf_ringbuf_reserve(&events, sizeof(struct event_t), 0);
36 | if (!event) {
37 | return 0;
38 | }
39 |
40 | // 3. set data for our event,
41 | event->daddr = daddr;
42 | event->pid = bpf_get_current_pid_tgid();
43 |
44 | bpf_ringbuf_submit(event, 0);
45 |
46 | return 0;
47 | }
48 |
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/1-click-helm-slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/20-one-click-istio-install-helm/1-click-helm-slides.pdf
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | ## Show notes ##
2 | 1 min intro to today's hoot.
3 | over 50% environments are not updated to the supported versions by upstream Istio. I hope these environments are getting security patches! today, we will unveil some of the confusion around Istio upgrade and dive into simple Istio upgrade using helm!
4 |
5 | **speaker intro**
6 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job!
7 |
8 | **News (3 mins)**
9 |
10 | - SoloCon
11 | - eBPF blog on istio.io: https://istio.io/latest/blog/2022/merbridge/
12 | - CVE March 9th: https://istio.io/latest/news/security/istio-security-2022-004/
13 | - SPIRE integration with Istio
14 | - Workgroup/TOC meeting time changes
15 |
16 | **What is new in Istio 1.13? (5 mins)**
17 | - Pass to Krisztian first.
18 | - Highlight: Added an option to set whether the Request ID generated by the sidecar should be used when determining the sampling strategy for tracing.
19 | - Lin to discuss more: https://istio.io/latest/news/releases/1.13.x/announcing-1.13/
20 | - Check upgrade note first
21 | - Others:
22 | - WorkloadGroup beta
23 | - lots of fixes.
24 | - Added TLS settings to the sidecar API in order to enable TLS/mTLS termination on the sidecar proxy for requests coming from outside the mesh.
25 | - a bunch of istioctl enhancements: https://istio.io/latest/news/releases/1.13.x/announcing-1.13/change-notes/#istioctl
26 |
27 | **What are options for upgrade Istio? (10 mins)**
28 | - Pass to Krisztian first.
29 |
30 | Components:
31 | - Control plane
32 | - Istiod
33 | - Istio CNI (only in place is supported)
34 | - data plane:
35 | - Ingress gateway
36 | - users applications
37 |
38 | Strategy:
39 | - In place
40 | - Canary
41 |
42 | Methods:
43 | - istioctl
44 | - helm
45 | - istio operator controller
46 | - How does IstioOperator API fit in?
47 |
48 | Update Considerations:
49 | - Upgrade Ordering among components
50 | - Highlight gateway update (IP change or not)
51 | - CNI update - https://istio.io/latest/docs/setup/additional-setup/cni/#operation-details
52 | - rollback (can't use any newer functions)
53 |
54 | **Let us dive into Helm. (20 mins)**
55 | - Pass to Krisztian for demo.
56 | - helm file: https://github.com/roboll/helmfile
57 |
58 | **Wrap up**
59 | - Thank speaker!
60 | - Is this interesting? What other topics do you want to see to help you on your application networking? Remind folks to comment, like and subscribe.
61 |
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | # Repositories of Helm charts
3 | - name: istio
4 | url: https://istio-release.storage.googleapis.com/charts
5 | - name: prometheus-community
6 | url: https://prometheus-community.github.io/helm-charts
7 |
8 | releases:
9 | # Deploys base Istio components
10 | - name: istio-base
11 | chart: istio/base
12 | version: 1.12.5
13 | namespace: istio-system
14 | createNamespace: true
15 |
16 | # Deploys Istio control-plane
17 | - name: istio-discovery
18 | chart: istio/istiod
19 | version: 1.12.5
20 | namespace: istio-system
21 | needs:
22 | - istio-system/istio-base
23 | values:
24 | - pilot:
25 | resources:
26 | requests:
27 | cpu: 10m
28 | memory: 100Mi
29 | autoscaleEnabled: false
30 |
31 | # Deploys istio-ingressgateway for inbound traffic
32 | - name: istio-ingressgateway
33 | chart: istio/gateway
34 | version: 1.12.5
35 | namespace: istio-system
36 | needs:
37 | - istio-system/istio-base
38 | - istio-system/istio-discovery
39 |
40 | # Monitoring and logging components
41 | #
42 | # Deploys Prometheus stack into monitoring namespace
43 | - name: kube-prometheus-stack
44 | chart: prometheus-community/kube-prometheus-stack
45 | version: 32.3.0
46 | namespace: monitoring
47 | createNamespace: true
48 | values:
49 | - values/kube-prometheus-stack/values.yaml
50 |
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/images/dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/20-one-click-istio-install-helm/images/dashboard.png
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/images/thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/20-one-click-istio-install-helm/images/thumbnail.png
--------------------------------------------------------------------------------
/20-one-click-istio-install-helm/values/kube-prometheus-stack/values.yaml:
--------------------------------------------------------------------------------
1 | alertmanager:
2 | enabled: false
3 |
4 | grafana:
5 | enabled: true
6 | image:
7 | repository: grafana/grafana
8 | tag: 8.3.6
9 |
10 | defaultDashboardsEnabled: true
11 |
12 | kube-state-metrics:
13 | enabled: true
14 |
15 | kubeControllerManager:
16 | enabled: false
17 |
18 | kubeScheduler:
19 | enabled: false
20 |
21 | kubeProxy:
22 | enabled: false
23 |
24 | ## Deploy a Prometheus instance
25 | ##
26 | prometheus:
27 | prometheusSpec:
28 | ruleSelectorNilUsesHelmValues: false
29 | serviceMonitorSelectorNilUsesHelmValues: false
30 | podMonitorSelectorNilUsesHelmValues: false
31 |
32 | retention: 6h
33 |
34 | additionalScrapeConfigs:
35 | - job_name: 'istiod'
36 | kubernetes_sd_configs:
37 | - role: endpoints
38 | namespaces:
39 | names:
40 | - istio-system
41 | relabel_configs:
42 | - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
43 | action: keep
44 | regex: istiod;http-monitoring
45 |
46 | - job_name: 'envoy-stats'
47 | metrics_path: /stats/prometheus
48 | kubernetes_sd_configs:
49 | - role: pod
50 | relabel_configs:
51 | - source_labels: [__meta_kubernetes_pod_container_port_name]
52 | action: keep
53 | regex: '.*-envoy-prom'
54 |
--------------------------------------------------------------------------------
/21-istio-in-action-book/Optimizing-the-control-plane-performance.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/21-istio-in-action-book/Optimizing-the-control-plane-performance.pdf
--------------------------------------------------------------------------------
/21-istio-in-action-book/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 21, March 22, 2022
2 |
3 | 1 min hook to today's hoot.
4 |
5 | Istio in action book was officially out last week! It already has 2000+ views in the last week! (https://www.manning.com/books/istio-in-action) Should you add the book to your reading list? Today we will discuss some of the contents from the book so you can make an informative decision.
6 |
7 | **speaker intro** (2 mins)
8 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
9 | Lin: host for hoot livestream today.
10 | speakers: intro
11 |
12 | **News (2 mins)**
13 | - Episode 20 hoot steps updated: https://twitter.com/linsun_unc/status/1504535664129462280?s=20&t=ZLKyHCQRoFRrGBEVOwvJfQ
14 | - IstioCon registration: get the tshirt: https://istio.io/latest/blog/2022/istiocon-register/
15 | - Istio 1.11 EOL: https://istio.io/latest/news/support/announcing-1.11-eol/
16 | - Greg joins Solo: https://twitter.com/christianposta/status/1505911681729241098?s=20&t=ZLKyHCQRoFRrGBEVOwvJfQ
17 |
18 | **General questions about the book** (15 mins)
19 | - Who is the book for?
20 |
21 | - A few Istio books out, what is so special about this one?
22 |
23 | - Discuss time and effort of the book: I recall you started the book before I started my istio book. How much time you spent on the book?
24 |
25 | - What are the favorite parts of the book?
26 |
27 | feel free to share a few slides
28 |
29 | - I am very impressed by the breath and depth of the book, covering beginner, intermediate and advanced topics. Anything you want to highlight for beginner and intermediate readers?
30 |
31 | - Any advice for folks who are interested in writing a book?
32 |
33 | ** Dive into chapter 11 (advanced topic)** (10 mins)
34 | - can you talk about phantom workloads and its impact?
35 |
36 | - what are the four golden signals of control plane?
37 |
38 | - What are the techniques to improve Istio control plane performance?
39 |
40 | - Any guildance on scale out(horizontal) or scale up(vertical) Istiod? What about auto scaling of Istiod?
41 |
42 |
43 | **Let us dive into demo** (5-10 mins)
44 | - Pass to Rino for a demo.
45 |
46 |
47 | **wrap up** (2 mins)
48 | - Thank speakers! Ask speakers: How do folks reach out to you?
49 | - Is this interesting? What other topics do you want to see to help you on your application networking? Remind folks to comment, like and subscribe. See you next Tues (accelerate service mesh adoption with eBPF & merbridge)!
--------------------------------------------------------------------------------
/22-ebpf-merbridge-istio/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 22, March 29, 2022
2 |
3 | 1 min hook to today's hoot.
4 | Service mesh & eBPF - these are super hot topics, are they competing, are they complementary to each other? Can you add ebpf support to Istio without changing Istio at all?
5 |
6 | **speaker intro** (2 mins)
7 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
8 | Lin: your host for hoot livestream today.
9 | speakers: intro
10 |
11 | **News (2 mins)**
12 |
13 | Episode 21 steps are published: https://github.com/solo-io/hoot/tree/master/21-istio-in-action-book
14 |
15 | IstioCon and SMC EU speakers are notified, congratulations!
16 |
17 | https://istio.io/latest/blog/2022/istioctl-proxy/
18 |
19 | https://venturebeat.com/2022/03/21/report-89-of-orgs-have-been-attacked-by-kubernetes-ransomware/
20 |
21 | https://www.businesswire.com/news/home/20220323005341/en/Spectro-Cloud-Announces-T-Mobile-Ventures-Investment-in-its-Series-B-Funding-Round-to-Drive-Innovation-in-Kubernetes-Management-at-5GEdge-Locations
22 |
23 |
24 | **General Questions** (5 mins)
25 | DaoCloud gained a steering contribution seat, congrats!
26 |
27 | - What is DaoCloud?
28 |
29 | - Why Istio for DaoCloud?
30 |
31 | - What triggered you to start merbridge project?
32 |
33 | ** merbridge ** (10 mins)
34 |
35 | https://istio.io/latest/blog/2022/merbridge/
36 |
37 | - How merbridge works without modifying Istio?
38 | -- what is the role of init container?
39 | -- how does it work with istio CNI? maybe no need for istio CNI?
40 |
41 | - Can you explain how users can accelerate SM adoption with eBPF and merbridge?
42 |
43 | - How does merbridge work for services in the service mesh vs not in the service mesh?
44 |
45 | - How do I know merbridge is working?
46 |
47 | - From your performance test, i think it is about 10% latency improvement. are those for same node or different nodes?
48 | -- if you could modify Istio, would performance be better? for example, init container may not be needed?
49 |
50 | - open to share challenges when building merbridge?
51 |
52 | - We've got folks in the community exploring merbridge. Is merbridge ready for production?
53 |
54 | **Let us dive into merbridge demo** (5-10 mins)
55 |
56 | **wrap up** (2 mins)
57 | - Thank speakers! Ask speakers: How do folks reach out to you?
58 | -- https://join.slack.com/t/merbridge/shared_invite/zt-11uc3z0w7-DMyv42eQ6s5YUxO5mZ5hwQ
59 | -- https://github.com/merbridge/merbridge
60 | - Is this interesting? What other topics do you want to see to help you on your application networking? Remind folks to comment, like and subscribe. See you next Tues (increase application resiliency with spot vms)!
--------------------------------------------------------------------------------
/22-ebpf-merbridge-istio/merbridge.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/22-ebpf-merbridge-istio/merbridge.jpg
--------------------------------------------------------------------------------
/22-ebpf-merbridge-istio/merbridge.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/22-ebpf-merbridge-istio/merbridge.pdf
--------------------------------------------------------------------------------
/23-app-resiliency-envoy/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 23 - HOW to Increase service resiliency for services running on Kubernetes on Spot VMs
2 |
3 |
4 | ## Recording ##
5 | https://youtu.be/WIcWekCQTJU
6 |
7 | [show notes](SHOWNOTES.md)
8 |
9 | [slides]()
10 |
11 | ## Hands-on: Steps from the demo
12 |
--------------------------------------------------------------------------------
/23-app-resiliency-envoy/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 23, April 5, 2022
2 | HOW to Increase service resiliency for services running on Kubernetes on Spot VMs
3 |
4 | https://github.com/murphye/cheap-gke-cluster
5 | https://thenewstack.io/run-a-google-kubernetes-engine-cluster-for-under-25-month/
6 |
7 | 1 min hook to today's hoot.
8 |
9 | Running your app in cloud? Is cost a concern for you? Is high resiliency app a priority for you? This episode, we will introduce methods to increase resiliency for your app, while on spot vms to cut costs!
10 |
11 | **speaker intro** (2 mins)
12 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
13 | Lin: your host for hoot livestream today.
14 | speakers: intro
15 |
16 | **News (2 mins)**
17 |
18 | Episode 22 steps are published: https://github.com/solo-io/hoot/tree/master/22-ebpf-merbridge-istio
19 |
20 | https://dagger.io/blog/public-launch-announcement
21 |
22 | https://techcrunch.com/2022/03/31/as-docker-gains-momentum-it-hauls-in-105m-series-c-on-2b-valuation/
23 |
24 | https://twitter.com/evan2645/status/1509607415011954690?s=20&t=u9fkrb4wjtdDHbiQHCeijg
25 |
26 | IstioCon schedule out!
27 |
28 | https://lp.solo.io/devopsdays-raleigh-networking
29 |
30 | **General Questions** (5 mins)
31 |
32 | What is spot VM?
33 |
34 | Why spot VM for average folks?
35 | - Why spot VM is perfect for development and testing of your services?
36 |
37 | I am convinced Spot VMs are interesting, how to
38 | to increase resiliency?
39 | - Retry
40 | - Replica numbers
41 | - Anti-Affinity
42 |
43 | What if I have larger clusters? Can I continue to use this?
44 |
45 | Is spot VM avail only for Google cloud?
46 |
47 | Strategy to cut cost and ensure app healthy all the time?
48 | - Should I consider a mixed of regular VM and spot VM?
49 |
50 | **Let us dive into demo** (5-10 mins)
51 |
52 | - gloo edge VS: can you show envoy config there?
53 |
54 |
55 | Any other tips you would like to share before we wrap up?
56 |
57 | **wrap up** (2 mins)
58 | - Thank speakers! Ask speakers: How do folks reach out to you?
59 | - Is this interesting? What other topics do you want to see to help you on your application networking? Remind folks to comment, like and subscribe. See you next next Tues (Debug Envoy Configs and Analyze Access Logs)!
--------------------------------------------------------------------------------
/24-debug-envoy-config-access-logs/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 24 - Debug Envoy Configs and Analyze Access Logs
2 |
3 |
4 | ## Recording ##
5 | https://youtu.be/OQFFIXFeZns
6 |
7 | [show notes](SHOWNOTES.md)
8 |
9 | [slides]()
10 |
11 | ## Hands-on: Steps from the demo
12 |
13 | Check out these tools that are used in the demo:
14 |
15 | https://envoyui.solo.io/
16 | https://github.com/djannot/envoyctl
17 | https://github.com/GregHanson/engarde-viewer
--------------------------------------------------------------------------------
/24-debug-envoy-config-access-logs/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 24, April 19, 2022
2 | Debug Envoy Configs and Analyze Access Logs
3 |
4 | 1 min hook to today's hoot.
5 |
6 | Can you run Istio successfully without knowing Envoy configurations or understand Envoy access logs? Maybe in demo, POC, but sooner or later, you will find you need to understand them. In this livestream, we will dive into debug envoy configs and analyze access logs so you can be confident when hitting issues while running Istio in production.
7 |
8 | **speaker intro** (2 mins)
9 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
10 | Lin: your host for hoot livestream today.
11 | speakers: intro
12 |
13 | **News (2 mins)**
14 |
15 | IstioCon next week!!
16 |
17 | Istio 1.13.3: https://istio.io/latest/news/releases/1.13.x/announcing-1.13.3/
18 | https://istio.io/latest/news/releases/1.12.x/announcing-1.12.6/
19 |
20 | Gloo graphql GA: https://www.solo.io/blog/announcing-gloo-graphql/
21 |
22 | hoot update: https://github.com/solo-io/hoot/blob/master/README.md#upcoming-episodes
23 |
24 | **General Questions** (5-10 mins)
25 | - How long have you been debugging Istio and Envoy?
26 | - What are the tools out there to help users debug Istio and Envoy today?
27 | - Ask Denis: Why did you develop the envoy UI tool?
28 | - Ask Greg: What motivated you to develop the engarde-viewer tool?
29 | - Ask Denis: How would envoy UI tool help you to debug?
30 | - Ask Greg: How would the engarde-viewer tool help you to debug?
31 | -- be prepared to discuss how to enable access logs.
32 |
33 | **Let us dive into demo** (5-10 mins)
34 |
35 |
36 | Any other tips you would like to share before we wrap up?
37 |
38 | **wrap up** (2 mins)
39 | - Thank speakers! Ask speakers: How do folks reach out to you?
40 | - Is this interesting? What other topics do you want to see to help you on your application networking? Remind folks to comment, like and subscribe. Happy learning at IstioCon, and see you next next Tues!
--------------------------------------------------------------------------------
/25-istio-spire-integration/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 25, May 3, 2022
2 | Istio Spire Integration With Workloads on K8s & VMs
3 |
4 | 1 min hook to today's hoot.
5 |
6 | Interested in using Spire as your identity provider for your Istio service mesh? How does it work with your workloads running on Kubernetes and/or VMs? In this hoot livestream,
7 | Max Lambrecht and Christian Posta will join Lin to discuss all things you need to know about the newly added Spire integration to Istio 1.14 release.
8 |
9 | **speaker intro** (2 mins)
10 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
11 | Lin: your host for hoot livestream today.
12 | speakers: intro
13 |
14 | **News (2 mins)**
15 |
16 | IstioCon last week!! close to 4000 registrations, day 1 keynote over 1000 attendees
17 |
18 | [Istio -> CNCF](https://istio.io/latest/blog/2022/istio-has-applied-to-join-the-cncf/)
19 |
20 | VMWare state of k8s 2022 report: https://tanzu.vmware.com/content/blog/state-of-kubernetes-2022
21 |
22 | hoot update: https://github.com/solo-io/hoot/blob/master/README.md#upcoming-episodes
23 |
24 | **General Questions** (20 mins)
25 | - Max: How is your experience contributing the spire integration to the Istio project?
26 |
27 | - Both: What problem does the spire integration solve for users?
28 |
29 | - Max: can you describe the architecture of this integration work?
30 |
31 | - Max: How does it work for workloads running in Istio with Spire? (Max, feel free to show a live demo here too, sometimes it is easier to explain with a simple demo)
32 |
33 | - Christian: What if I have workloads running on VM and I want to use Spire as my identity provider?
34 |
35 | **Let us dive into demo** (5-10 mins)
36 | - Max: any demo you want to show?
37 | - Christian: user cases with workloads on VM?
38 |
39 | Any other tips you would like to share before we wrap up?
40 |
41 | **wrap up** (2 mins)
42 | - Thank speakers! Ask speakers: How do folks reach out to you?
43 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you next next Tues!
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/bookinfo/cleanup-bookinfo:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
4 |
5 | NAMESPACE=default
6 |
7 | protos=( destinationrules virtualservices gateways )
8 | for proto in "${protos[@]}"; do
9 | for resource in $(kubectl get -n ${NAMESPACE} "$proto" -o name); do
10 | kubectl delete -n ${NAMESPACE} "$resource";
11 | done
12 | done
13 |
14 | OUTPUT=$(mktemp)
15 | export OUTPUT
16 | kubectl delete -n ${NAMESPACE} -f "$SCRIPTDIR/bookinfo.yaml" > "${OUTPUT}" 2>&1
17 | ret=$?
18 | function cleanup() {
19 | rm -f "${OUTPUT}"
20 | }
21 |
22 | trap cleanup EXIT
23 |
24 | if [[ ${ret} -eq 0 ]];then
25 | cat "${OUTPUT}"
26 | else
27 | OUT2=$(grep -v NotFound "${OUTPUT}")
28 | if [[ -n ${OUT2} ]];then
29 | cat "${OUTPUT}"
30 | exit ${ret}
31 | fi
32 | fi
33 |
34 | echo "Application cleanup successful"
35 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/bookinfo/gateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: bookinfo-gateway
5 | spec:
6 | selector:
7 | istio: ingressgateway
8 | servers:
9 | - port:
10 | number: 8080
11 | name: http
12 | protocol: HTTP
13 | hosts:
14 | - "*"
15 | ---
16 | apiVersion: networking.istio.io/v1alpha3
17 | kind: Gateway
18 | metadata:
19 | name: bookinfo-gateway-mtls
20 | spec:
21 | selector:
22 | istio: ingressgateway
23 | servers:
24 | - port:
25 | number: 7080
26 | name: https
27 | protocol: HTTPS
28 | tls:
29 | mode: ISTIO_MUTUAL
30 | hosts:
31 | - "*"
32 | ---
33 | apiVersion: networking.istio.io/v1alpha3
34 | kind: VirtualService
35 | metadata:
36 | name: bookinfo-service
37 | spec:
38 | hosts:
39 | - "*"
40 | gateways:
41 | - bookinfo-gateway
42 | - bookinfo-gateway-mtls
43 | http:
44 | - match:
45 | - uri:
46 | exact: /productpage
47 | - uri:
48 | prefix: /static
49 | - uri:
50 | exact: /login
51 | - uri:
52 | exact: /logout
53 | - uri:
54 | prefix: /api/v1/products
55 | route:
56 | - destination:
57 | host: productpage.default.svc.cluster.local
58 | port:
59 | number: 9080
60 | ---
61 | apiVersion: "networking.istio.io/v1alpha3"
62 | kind: "DestinationRule"
63 | metadata:
64 | name: "enable-mtls"
65 | spec:
66 | host: "*.svc.cluster.local"
67 | trafficPolicy:
68 | tls:
69 | mode: ISTIO_MUTUAL
70 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/cleanup-all:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | (cd bookinfo ; ./cleanup-bookinfo)
4 | (cd istio ; ./cleanup-istio)
5 | (cd spire ; ./cleanup-spire)
6 |
7 | ./kill-forward-process
8 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/create-registration-entry-details:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to create SPIRE registration entry for bookinfo-details service identity
4 |
5 | if [[ -z $1 ]]; then
6 | echo "Error: cluster id should be provided as parameter"
7 | exit 1;
8 | fi
9 |
10 | kubectl exec -ti spire-server-0 -n spire -c spire-server -- /opt/spire/bin/spire-server entry create \
11 | -socketPath /run/spire/sockets/server.sock \
12 | -spiffeID spiffe://example.org/ns/default/sa/bookinfo-details \
13 | -parentID spiffe://example.org/spire/agent/k8s_psat/demo-cluster/$1 \
14 | -selector k8s:ns:default \
15 | -selector k8s:sa:bookinfo-details \
16 | -selector unix:uid:1337 \
17 | -selector k8s:pod-label:app:details \
18 | -selector k8s:pod-image:docker.io/istio/examples-bookinfo-details-v1@sha256:18e54f81689035019e1ac78f6d2e6483fcf1d94072d047315ab193cb2ab89ae5
19 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/delete-registration-entry:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | if [[ -z $1 ]]; then
5 | echo "Error: please specify registration entry ID"
6 | exit 1;
7 | fi
8 |
9 | kubectl exec --stdin --tty -n spire spire-server-0 -- /opt/spire/bin/spire-server entry delete \
10 | -socketPath /run/spire/sockets/server.sock \
11 | -entryID $1
12 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/deploy-bookinfo:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./istioctl kube-inject --filename bookinfo/bookinfo.yaml | kubectl apply -f -
4 | kubectl apply -f bookinfo/gateway.yaml
5 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/deploy-istio:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | kubectl create ns istio-system
4 | sleep 2
5 |
6 | ./istioctl install -f istio/istio-config.yaml --skip-confirmation
7 | kubectl apply -f istio/auth.yaml
8 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/deploy-spire:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | cd spire
6 |
7 | kubectl create ns spire
8 | sleep 2
9 |
10 | # Create the server’s service account, configmap and associated role bindings
11 | kubectl apply \
12 | -f server-account.yaml \
13 | -f spire-bundle-configmap.yaml \
14 | -f server-cluster-role.yaml
15 |
16 | # Deploy the server configmap and statefulset
17 | kubectl apply \
18 | -f server-configmap.yaml \
19 | -f server-statefulset.yaml \
20 | -f server-service.yaml
21 |
22 | # Configuring and deploying the SPIRE Agent
23 | kubectl apply \
24 | -f agent-account.yaml \
25 | -f agent-cluster-role.yaml
26 |
27 | sleep 2
28 |
29 | kubectl apply \
30 | -f agent-configmap.yaml \
31 | -f agent-daemonset.yaml
32 |
33 | # Applying SPIFFE CSI Driver configuration
34 | kubectl apply -f spiffe-csi-driver.yaml
35 |
36 | kubectl wait pod spire-server-0 -n spire --timeout=-1s --for=condition=ready
37 | kubectl wait pod -n spire -l app=spire-agent --for=condition=ready
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/download-istioctl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ -z $1 ]]; then
4 | echo "Error: please specify platform ('macos', or 'linux')"
5 | exit 1;
6 | fi
7 |
8 | if [ "$1" = "macos" ]; then
9 | wget -c https://github.com/istio/istio/releases/download/1.15.0/istioctl-1.15.0-osx.tar.gz -O - | tar -xz
10 | fi
11 |
12 | if [ "$1" = "linux" ]; then
13 | wget -c https://github.com/istio/istio/releases/download/1.15.0/istioctl-1.15.0-linux-amd64.tar.gz -O - | tar -xz
14 | fi
15 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/forward-port:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Forwards from port 8000 on the host to port 8080 inside the cluster
4 |
5 | INGRESS_POD=$(kubectl get pod -l istio=ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}")
6 | kubectl port-forward --address 0.0.0.0 "$INGRESS_POD" 8000:8080 -n istio-system &
7 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/istio/auth.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1beta1
2 | kind: PeerAuthentication
3 | metadata:
4 | name: default
5 | namespace: istio-system
6 | spec:
7 | mtls:
8 | mode: STRICT
9 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/istio/cleanup-istio:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | istioctl x uninstall --purge --skip-confirmation
4 | kubectl delete namespaces istio-system
5 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/istio/istio-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operator.istio.io/v1alpha1
2 | kind: IstioOperator
3 | metadata:
4 | namespace: istio-system
5 | spec:
6 | profile: default
7 | meshConfig:
8 | trustDomain: example.org
9 | values:
10 | global:
11 | # hub: localhost:5000
12 | tag: 1.14.0-alpha.0
13 | imagePullPolicy: "Always"
14 | imagePullSecrets:
15 | - secret-registry
16 |
17 | # This is used to customize the sidecar template
18 | sidecarInjectorWebhook:
19 | templates:
20 | spire: |
21 | spec:
22 | containers:
23 | - name: istio-proxy
24 | volumeMounts:
25 | - name: workload-socket
26 | mountPath: /run/secrets/workload-spiffe-uds
27 | readOnly: true
28 | volumes:
29 | - name: workload-socket
30 | csi:
31 | driver: "csi.spiffe.io"
32 | readOnly: true
33 | components:
34 | ingressGateways:
35 | - name: istio-ingressgateway
36 | enabled: true
37 | label:
38 | istio: ingressgateway
39 | k8s:
40 | overlays:
41 | - apiVersion: apps/v1
42 | kind: Deployment
43 | name: istio-ingressgateway
44 | patches:
45 | - path: spec.template.spec.volumes.[name:workload-socket]
46 | value:
47 | name: workload-socket
48 | csi:
49 | driver: "csi.spiffe.io"
50 | readOnly: true
51 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
52 | value:
53 | name: workload-socket
54 | mountPath: "/run/secrets/workload-spiffe-uds"
55 | readOnly: true
56 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/kill-forward-process:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | kill $(lsof -ti:8000)
4 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/show-registration-entries:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Show SPIRE registration entries
4 | kubectl exec --stdin --tty -n spire spire-server-0 -- /opt/spire/bin/spire-server entry show -socketPath /run/spire/sockets/server.sock
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/show-spire-cluster-id:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Look into the SPIRE Agent logs for the demo-cluster id
4 |
5 | agentpod="$(kubectl get pods --selector=app=spire-agent -n spire -o jsonpath='{.items[0].metadata.name}')"
6 | kubectl logs $agentpod -n spire -c spire-agent |grep demo-cluster
7 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/agent-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: spire-agent
5 | namespace: spire
6 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/agent-cluster-role.yaml:
--------------------------------------------------------------------------------
1 | # Required cluster role to allow spire-agent to query k8s API server
2 | kind: ClusterRole
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | metadata:
5 | name: spire-agent-cluster-role
6 | rules:
7 | - apiGroups: [""]
8 | resources: ["pods","nodes","nodes/proxy"]
9 | verbs: ["get"]
10 |
11 | ---
12 | # Binds above cluster role to spire-agent service account
13 | kind: ClusterRoleBinding
14 | apiVersion: rbac.authorization.k8s.io/v1
15 | metadata:
16 | name: spire-agent-cluster-role-binding
17 | subjects:
18 | - kind: ServiceAccount
19 | name: spire-agent
20 | namespace: spire
21 | roleRef:
22 | kind: ClusterRole
23 | name: spire-agent-cluster-role
24 | apiGroup: rbac.authorization.k8s.io
25 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/agent-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: spire-agent
5 | namespace: spire
6 | data:
7 | agent.conf: |
8 | agent {
9 | data_dir = "/run/spire"
10 | log_level = "DEBUG"
11 | server_address = "spire-server"
12 | server_port = "8081"
13 | socket_path = "/run/secrets/workload-identity/socket"
14 | trust_bundle_path = "/run/spire/bundle/root-cert.pem"
15 | trust_domain = "example.org"
16 | }
17 |
18 | plugins {
19 | NodeAttestor "k8s_psat" {
20 | plugin_data {
21 | cluster = "demo-cluster"
22 | }
23 | }
24 |
25 | KeyManager "memory" {
26 | plugin_data {
27 | }
28 | }
29 |
30 | WorkloadAttestor "k8s" {
31 | plugin_data {
32 | # Defaults to the secure kubelet port by default.
33 | # Minikube does not have a cert in the cluster CA bundle that
34 | # can authenticate the kubelet cert, so skip validation.
35 | skip_kubelet_verification = true
36 | }
37 | }
38 |
39 | WorkloadAttestor "unix" {
40 | plugin_data {
41 | }
42 | }
43 | }
44 |
45 | health_checks {
46 | listener_enabled = true
47 | bind_address = "0.0.0.0"
48 | bind_port = "8080"
49 | live_path = "/live"
50 | ready_path = "/ready"
51 | }
52 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/cleanup-spire:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | kubectl delete clusterrole spire-server-trust-role spire-agent-cluster-role
4 | kubectl delete clusterrolebinding spire-server-trust-role-binding spire-agent-cluster-role-binding
5 | kubectl delete namespace spire
6 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/server-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: spire-server
5 | namespace: spire
6 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/server-cluster-role.yaml:
--------------------------------------------------------------------------------
1 | # ClusterRole to allow spire-server node attestor to query Token Review API
2 | # and to be able to push certificate bundles to a configmap
3 | kind: ClusterRole
4 | apiVersion: rbac.authorization.k8s.io/v1
5 | metadata:
6 | name: spire-server-trust-role
7 | rules:
8 | - apiGroups: ["authentication.k8s.io"]
9 | resources: ["tokenreviews"]
10 | verbs: ["create"]
11 | - apiGroups: [""]
12 | resources: ["configmaps", "pods", "nodes"]
13 | verbs: ["patch", "get", "list"]
14 |
15 | ---
16 | # Binds above cluster role to spire-server service account
17 | kind: ClusterRoleBinding
18 | apiVersion: rbac.authorization.k8s.io/v1
19 | metadata:
20 | name: spire-server-trust-role-binding
21 | subjects:
22 | - kind: ServiceAccount
23 | name: spire-server
24 | namespace: spire
25 | roleRef:
26 | kind: ClusterRole
27 | name: spire-server-trust-role
28 | apiGroup: rbac.authorization.k8s.io
29 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/server-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: spire-server
5 | namespace: spire
6 | data:
7 | server.conf: |
8 | server {
9 | bind_address = "0.0.0.0"
10 | bind_port = "8081"
11 | socket_path = "/run/spire/sockets/server.sock"
12 | trust_domain = "example.org"
13 | data_dir = "/run/spire/data"
14 | log_level = "DEBUG"
15 | ca_key_type = "rsa-2048"
16 |
17 | default_svid_ttl = "10m"
18 | ca_ttl = "1h"
19 |
20 | ca_subject = {
21 | country = ["US"],
22 | organization = ["SPIFFE"],
23 | common_name = "",
24 | }
25 | }
26 |
27 | plugins {
28 | DataStore "sql" {
29 | plugin_data {
30 | database_type = "sqlite3"
31 | connection_string = "/run/spire/data/datastore.sqlite3"
32 | }
33 | }
34 |
35 | NodeAttestor "k8s_psat" {
36 | plugin_data {
37 | clusters = {
38 | "demo-cluster" = {
39 | use_token_review_api_validation = true
40 | service_account_allow_list = ["spire:spire-agent"]
41 | }
42 | }
43 | }
44 | }
45 |
46 | KeyManager "disk" {
47 | plugin_data {
48 | keys_path = "/run/spire/data/keys.json"
49 | }
50 | }
51 |
52 | Notifier "k8sbundle" {
53 | plugin_data {
54 | namespace = "spire"
55 | config_map = "trust-bundle"
56 | config_map_key = "root-cert.pem"
57 | }
58 | }
59 | }
60 |
61 | health_checks {
62 | listener_enabled = true
63 | bind_address = "0.0.0.0"
64 | bind_port = "8080"
65 | live_path = "/live"
66 | ready_path = "/ready"
67 | }
68 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/server-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: spire-server
5 | namespace: spire
6 | spec:
7 | type: NodePort
8 | ports:
9 | - name: grpc
10 | port: 8081
11 | targetPort: 8081
12 | protocol: TCP
13 | selector:
14 | app: spire-server
15 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/server-statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: spire-server
5 | namespace: spire
6 | labels:
7 | app: spire-server
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: spire-server
13 | serviceName: spire-server
14 | template:
15 | metadata:
16 | namespace: spire
17 | labels:
18 | app: spire-server
19 | spec:
20 | serviceAccountName: spire-server
21 | shareProcessNamespace: true
22 | containers:
23 | - name: spire-server
24 | image: gcr.io/spiffe-io/spire-server:1.4.3
25 | args:
26 | - -config
27 | - /run/spire/config/server.conf
28 | livenessProbe:
29 | httpGet:
30 | path: /live
31 | port: 8080
32 | failureThreshold: 2
33 | initialDelaySeconds: 15
34 | periodSeconds: 60
35 | timeoutSeconds: 3
36 | readinessProbe:
37 | httpGet:
38 | path: /ready
39 | port: 8080
40 | initialDelaySeconds: 5
41 | periodSeconds: 5
42 | ports:
43 | - containerPort: 8081
44 | volumeMounts:
45 | - name: spire-config
46 | mountPath: /run/spire/config
47 | readOnly: true
48 | - name: spire-data
49 | mountPath: /run/spire/data
50 | readOnly: false
51 | - name: spire-registration-socket
52 | mountPath: /run/spire/sockets
53 | readOnly: false
54 | volumes:
55 | - name: spire-config
56 | configMap:
57 | name: spire-server
58 | - name: k8s-workload-registrar-config
59 | configMap:
60 | name: k8s-workload-registrar
61 | - name: spire-registration-socket
62 | hostPath:
63 | path: /run/spire/server-sockets
64 | type: DirectoryOrCreate
65 | volumeClaimTemplates:
66 | - metadata:
67 | name: spire-data
68 | namespace: spire
69 | spec:
70 | accessModes:
71 | - ReadWriteOnce
72 | resources:
73 | requests:
74 | storage: 1Gi
75 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/spiffe-csi-driver.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: CSIDriver
3 | metadata:
4 | name: "csi.spiffe.io"
5 | spec:
6 | # Only ephemeral, inline volumes are supported. There is no need for a
7 | # controller to provision and attach volumes.
8 | attachRequired: false
9 |
10 | # Request the pod information which the CSI driver uses to verify that an
11 | # ephemeral mount was requested.
12 | podInfoOnMount: true
13 |
14 | # Don't change ownership on the contents of the mount since the Workload API
15 | # Unix Domain Socket is typically open to all (i.e. 0777).
16 | fsGroupPolicy: None
17 |
18 | # Declare support for ephemeral volumes only.
19 | volumeLifecycleModes:
20 | - Ephemeral
21 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/demo/spire/spire-bundle-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: trust-bundle
5 | namespace: spire
6 |
--------------------------------------------------------------------------------
/25-istio-spire-integration/slides/istio-spire-slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/25-istio-spire-integration/slides/istio-spire-slides.pdf
--------------------------------------------------------------------------------
/26-cluster-api-k8s/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 26 - Declarative Kubernetes Lifecycle Management Across Multi-Clusters/Clouds with Cluster API
2 |
3 | ## Recording ##
4 | https://www.youtube.com/watch?v=fKeqbkGiHog
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 | [slides](hoot-capi.pdf)
9 |
10 | ## Hands-on: Steps from the demo
11 |
12 | Please refer to this official quick start guide for [Cluster API](https://cluster-api.sigs.k8s.io/user/quick-start.html)
13 |
--------------------------------------------------------------------------------
/26-cluster-api-k8s/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 26, May 10, 2022
2 | Declarative Kubernetes Lifecycle Management Across Multi-Clusters/Clouds with Cluster API
3 |
4 | 1 min hook to today's hoot.
5 |
6 | Interested in learning the best way to manage clusters seamlessly across multiclusters and multicloud? KubeFed no more... in this episode, Lin catches up with Jun from SpectroCloud to learn everything about Cluster API.
7 |
8 | **speaker intro** (2 mins)
9 | Welcome to hoot livestream, where we bring istio, envoy, k8s, ebpf & graphql technologies to you so you can be well prepared at your job, be the best cloud native developer/operator/architect!
10 | Lin: your host for hoot livestream today.
11 | speakers: intro
12 |
13 | **News (2 mins)**
14 |
15 | https://buoyant.io/2022/05/04/announcing-fully-managed-linkerd-with-buoyant-cloud/
16 |
17 | https://blogs.mulesoft.com/news/introducing-universal-api-management-on-anypoint-platform/
18 |
19 | https://isovalent.com/blog/post/2022-05-03-servicemesh-security
20 |
21 | https://twitter.com/louiscryan/status/1522661442138238976?s=20&t=8DDNNd9Av9ByUlaz0N5O5w
22 |
23 | https://twitter.com/KohaviYuval/status/1521832245450919936?s=20&t=8DDNNd9Av9ByUlaz0N5O5w
24 |
25 | hoot update: No hoot next week, hope to see you at KubeCon EU!
26 |
27 | **General Questions** (20 mins)
28 | What is the persona targted for cluster API?
29 |
30 | Why do they need to pay attention to cluster API?
31 |
32 | Can you explain how cluster API work at a high level?
33 |
34 | Does cluster API handle multicluster/multiclouds?
35 |
36 | Does cluster API handle lifecycle management beyond initial provision?
37 |
38 | How are you leveraging cluster API at spectrocloud?
39 |
40 | Where do you see service mesh fit in the puzzle here?
41 | - Good place to mention the work spectro cloud is doing with Solo?
42 |
43 | Any future direction for cluster API you want to share?
44 |
45 | **Let us dive into demo** (5-10 mins)
46 | - any demo you want to show?
47 |
48 | Anything else you would like to share before we wrap up?
49 |
50 | **wrap up** (2 mins)
51 | - Thank speakers! Ask speakers: How do folks reach out to you?
52 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you the Tues after KubeCon EU!
--------------------------------------------------------------------------------
/26-cluster-api-k8s/hoot-capi.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/26-cluster-api-k8s/hoot-capi.pdf
--------------------------------------------------------------------------------
/27-gloo-cilium-and-istio/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 27 - Gloo Cilium and Istio Seamlessly Together
2 |
3 | ## Recording ##
4 | https://www.youtube.com/watch?v=bAjAJtQioPU
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 | [slides]()
9 |
10 | ## Hands-on: Steps from the demo
11 | N/A: to be released in an upcoming Gloo Mesh release.
12 |
--------------------------------------------------------------------------------
/27-gloo-cilium-and-istio/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 27, May 24, 2022
2 | Gloo Cilium and Istio Seamlessly Together
3 |
4 | 1 min hook to today's hoot.
5 | Network policy are highly recommended along with L7 security policies based on Istio's security best practice doc. These resources are vastly different, how can we make this easier for our users? Welcome to hoot livestream episode 27, today we will discuss Cilium and Istio Seamlessly Together. I'm your host and look forward to learning this topic with you together.
6 |
7 | **News (2 mins)**
8 | Solo.io adds Cilium to Gloo Mesh:
9 | https://www.devopsdigest.com/soloio-adds-cilium-to-gloo-mesh
10 |
11 | https://www.solo.io/blog/enabling-cilium-gloo-application-networking-platform/
12 |
13 | CNCF survey on SM: https://www.cncf.io/wp-content/uploads/2022/05/CNCF_Service_Mesh_MicroSurvey_Final.pdf
14 |
15 | Envoy gateway: https://blog.envoyproxy.io/introducing-envoy-gateway-ad385cc59532
16 |
17 | https://isovalent.com/blog/post/2022-05-16-tetragon
18 |
19 | https://techcrunch.com/2022/05/18/apollo-graphql-launches-its-supergraph/
20 |
21 | hoot update:
22 | Spire+Istio demo scripts are provided!
23 |
24 | **speaker intro** (2 mins)
25 | speakers: intro
26 |
27 | General Questions (20 mins)
28 |
29 | what is cilium?
30 | - L3, IP based
31 | - CNI, best CNI out there?
32 |
33 | Can you describe Cilium's security model?
34 |
35 | what are issues with network identity?
36 | - overlapping IPs
37 | - Does label help here?
38 |
39 | How does network based identity work with multicluster?
40 | - Is flat network required?
41 | - access to k8s API server across multiclusters
42 | https://docs.cilium.io/en/stable/gettingstarted/clustermesh/clustermesh/#limitations
43 |
44 | Thoughts on how cilium and Istio be integrated together? Why would someone wants to use one or the other or both?
45 |
46 | Why Cilium and Istio with Gloo mesh?
47 |
48 | Gloo mesh has workspaces which provides multi-tenancy to different teams, are we also bringing tenancy to Cilium?
49 |
50 | Can I use Gloo network for Cilium without Istio?
51 |
52 | **Let us dive into demo** (5-10 mins)
53 |
54 | Any demo you want to show?
55 |
56 |
57 | **wrap up** (2 mins)
58 | - Thank speakers! Ask speakers: How do folks reach out to you?
59 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
60 |
61 |
--------------------------------------------------------------------------------
/28-what-is-new-istio-1.14/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 28, June 7, 2022
2 | What is new in Istio 1.14
3 |
4 | 1 min hook to today's hoot.
5 | Istio just turned 5 and the community released 1.14. Wanna to know what is new with the release? Today, I am so excited to discuss the Istio 1.14 release with Faseela!
6 |
7 | **speaker intro** (2 mins)
8 | speakers: intro
9 |
10 | **News (2 mins)**
11 |
12 | Istio 1.14 is out, thank RM & community!
13 |
14 | Broadcom aquired VMware:
15 | https://investors.broadcom.com/news-releases/news-release-details/broadcom-acquire-vmware-approximately-61-billion-cash-and-stock
16 |
17 | IstioCon recap: https://www.youtube.com/watch?v=PyXxLXJRMoU
18 |
19 | hoot update: https://github.com/solo-io/hoot/#upcoming-episodes
20 |
21 | Cilium workshop: https://app.livestorm.co/solo-io/introduction-to-ebpf-and-cilium-amer-060922
22 |
23 | General Questions (20 mins)
24 |
25 | what is your contribution experience to Istio?
26 |
27 | Discuss istio 1.14, Pull out release blog, release note and upgrade note
28 |
29 | Discuss upgrade:
30 |
31 | - First minor release without any upgrade caveat.
32 | - Kubernetes warning of removal of deprecated APIs. https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/#api-changes. Istio prior to 1.10 won't work with k8s 1.22 or newer.
33 |
34 | Discuss highlights of releases
35 | - Spire: refer to episode 26
36 | - Faseela: auto SNI
37 | - Lin: min TLS version: https://preliminary.istio.io/latest/docs/tasks/security/tls-configuration/workload-min-tls-version/
38 | -- Q: does it work for gateway?
39 | - Lin: Telemetry API improvement
40 |
41 | Other features of releases that are interesting?
42 | - Faseela?
43 | -- workload selector for DestinationRule
44 | -- credential name support for sidecar egress TLS origination
45 | - Lin?
46 | -- PILOT_SEND_UNHEALTHY_ENDPOINTS
47 | -- ProxyConfig - envoy runtime values: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/operations/runtime
48 | -- always disable protocol sniffing in production: **Fixed** an issue causing traffic from a gateway to a service with an [undeclared protocol]
49 | -- Istio's default load balancing algorithm from `ROUND_ROBIN` to `LEAST_REQUEST`
50 | -- **Added** support for WasmPlugin pulling image from private repository with `imagePullSecret`.
51 | -- **Added** support of installing gateway helm chart as `daemonset`.
52 | ([Issue #37610](https://github.com/istio/istio/issues/37610))
53 | -- anything interesting from istioctl?
54 |
55 | **Let us dive into demo** (10-15 mins)
56 | - Faseela: auto SNI etc
57 | - Lin: upgrade to 1.14, min TLS, and telemetry API improvement
58 |
59 |
60 | **wrap up** (2 mins)
61 | - Thank speakers! Ask speakers: How do folks reach out to you?
62 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
63 |
64 |
--------------------------------------------------------------------------------
/28-what-is-new-istio-1.14/demo/kube-prometheus-stack/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | # Repositories of Helm charts
3 | - name: istio
4 | url: https://istio-release.storage.googleapis.com/charts
5 | - name: prometheus-community
6 | url: https://prometheus-community.github.io/helm-charts
7 |
8 | releases:
9 | # Deploys base Istio components
10 | - name: istio-base
11 | chart: istio/base
12 | version: 1.14.0
13 | namespace: istio-system
14 | createNamespace: true
15 |
16 | # Deploys Istio control-plane
17 | - name: istio-discovery
18 | chart: istio/istiod
19 | version: 1.14.0
20 | namespace: istio-system
21 | needs:
22 | - istio-system/istio-base
23 | values:
24 | - pilot:
25 | resources:
26 | requests:
27 | cpu: 10m
28 | memory: 100Mi
29 | autoscaleEnabled: false
30 |
31 | # Deploys istio-ingressgateway for inbound traffic
32 | - name: istio-ingressgateway
33 | chart: istio/gateway
34 | version: 1.14.0
35 | namespace: istio-system
36 | needs:
37 | - istio-system/istio-base
38 | - istio-system/istio-discovery
39 |
40 | # Monitoring and logging components
41 | #
42 | # Deploys Prometheus stack into monitoring namespace
43 | - name: kube-prometheus-stack
44 | chart: prometheus-community/kube-prometheus-stack
45 | version: 32.3.0
46 | namespace: monitoring
47 | createNamespace: true
48 | values:
49 | - values/kube-prometheus-stack/values.yaml
50 |
--------------------------------------------------------------------------------
/28-what-is-new-istio-1.14/demo/kube-prometheus-stack/values.yaml:
--------------------------------------------------------------------------------
1 | alertmanager:
2 | enabled: false
3 |
4 | grafana:
5 | enabled: true
6 | image:
7 | repository: grafana/grafana
8 | tag: 8.3.6
9 |
10 | defaultDashboardsEnabled: true
11 |
12 | kube-state-metrics:
13 | enabled: true
14 |
15 | kubeControllerManager:
16 | enabled: false
17 |
18 | kubeScheduler:
19 | enabled: false
20 |
21 | kubeProxy:
22 | enabled: false
23 |
24 | ## Deploy a Prometheus instance
25 | ##
26 | prometheus:
27 | prometheusSpec:
28 | ruleSelectorNilUsesHelmValues: false
29 | serviceMonitorSelectorNilUsesHelmValues: false
30 | podMonitorSelectorNilUsesHelmValues: false
31 |
32 | retention: 6h
33 |
34 | additionalScrapeConfigs:
35 | - job_name: 'istiod'
36 | kubernetes_sd_configs:
37 | - role: endpoints
38 | namespaces:
39 | names:
40 | - istio-system
41 | relabel_configs:
42 | - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
43 | action: keep
44 | regex: istiod;http-monitoring
45 |
46 | - job_name: 'envoy-stats'
47 | metrics_path: /stats/prometheus
48 | kubernetes_sd_configs:
49 | - role: pod
50 | relabel_configs:
51 | - source_labels: [__meta_kubernetes_pod_container_port_name]
52 | action: keep
53 | regex: '.*-envoy-prom'
54 |
--------------------------------------------------------------------------------
/29-port-ebpf-app-to-bumblebee/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 29 - Porting eBPF applications to BumbleBee
2 |
3 | ## Recording ##
4 | https://www.youtube.com/watch?v=NQcOQ1-sJII
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 | ## Hands-on: Steps from the demo
9 | https://www.solo.io/blog/porting-ebpf-applications-to-bumblebee/
--------------------------------------------------------------------------------
/29-port-ebpf-app-to-bumblebee/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 29, June 14, 2022
2 | Porting eBPF applications to BumbleBee
3 |
4 | 1 min hook to today's hoot.
5 | Excited about eBPF and get started with understanding and writing your own eBPF applications? Krisztian recently portied an eBPF applications to BumbleBee Today, I am so excited to discuss eBPF and Bumblebee with Krisztian!
6 |
7 | **speaker intro** (2 mins)
8 | speakers: intro
9 |
10 | **News (2 mins)**
11 | Hoot steps for episode 28 are updated
12 | SD Times released its SD Times 100 list and Solo.io made the APIs and Integrations category! In fact Solo.io and SnapLogic were highlighted for first time inclusion on the list:
13 | https://sdtimes.com/sdtimes-100/2022/best-in-show/apis-and-integration-2022/
14 | New blog from Buoyant (Linkerd) on eBPF and Service Mesh: https://buoyant.io/2022/06/07/ebpf-sidecars-and-the-future-of-the-service-mesh/
15 |
16 | General Questions (20 mins)
17 | - What is bumblebee?
18 | - What is new about bumbleebee?
19 | -- Roadmap: https://github.com/solo-io/bumblebee/blob/main/ROADMAP.md
20 | -- co-sign feature added: https://github.com/solo-io/bumblebee/blob/main/docs/getting_started.md#security
21 | - oomkill example added
22 | -- - What does oomkill do?
23 | - Why someone would want to port eBPF apps to Bumblebee?
24 | - What is your experience of porting oomkill example? what are the gotchas?
25 | - what is the difference of cloudflare exporter and bumblebee?
26 |
27 |
28 |
29 | **Let us dive into demo** (10-15 mins)
30 |
31 | https://play.instruqt.com/soloio/tracks/developing-ebpf-apps
32 |
33 | - Is there anything else you want to add?
34 | -- mention workshop, and badges: https://www.credly.com/org/solo-io/badge/fundamentals-for-ebpf-by-solo-io
35 |
36 | **wrap up** (2 mins)
37 | - Thank speakers! Ask speakers: How do folks reach out to you?
38 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
39 |
40 |
41 |
--------------------------------------------------------------------------------
/30-https-envoy-explained/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 30 - HTTP/3 With Envoy Explained
2 |
3 | ## Recording ##
4 | https://www.youtube.com/watch?v=TjaJ5oMxNpc
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 | ## Hands-on: Steps from the demo
9 | https://github.com/bcollard/http3-envoy-demo
10 | https://baptistout.net/posts/upgrade-envoy-http3/
--------------------------------------------------------------------------------
/30-https-envoy-explained/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 30, July 5, 2022
2 | HTTP/3 With Envoy Explained
3 |
4 | 1 min hook to today's hoot.
5 |
6 | Why HTTP/3, how to test HTTP/3 with Envoy, and use Prometheus and Grafana to observe metrics? In this livestream, I am so excited to have Baptiste to join me to discuss HTTP/3 with Envoy and do a live demo of running Envoy with HTTP/3 and observing Envoy metrics.
7 |
8 | https://baptistout.net/posts/upgrade-envoy-http3/
9 |
10 | **speaker intro** (2 mins)
11 | speakers: intro
12 |
13 | **News (2 mins)**
14 | Bumblebee blog: https://www.solo.io/blog/porting-ebpf-applications-to-bumblebee/
15 | CFP for gitops con: https://twitter.com/OpenGitOps/status/1541455286585593856
16 | Solo.io added to vendors for cilium: https://github.com/cilium/cilium/blob/master/USERS.md#users-alphabetically
17 | https://www.solo.io/blog/solo-io-joins-the-graphql-foundation/
18 |
19 | General Questions (10-15 mins)
20 | - What is http/3?
21 | - Why do I need to care about http/3?
22 | - How does http/3 impact folks using Envoy?
23 | - Interested in learning more on http/3 and envoy, how can I get started?
24 | - Do I get all the Envoy metrics with http/3?
25 | - Any limitations of Envoy http/3 support?
26 |
27 | **Let us dive into demo** (10-15 mins)
28 | - Is there anything else you want to add?
29 |
30 | **wrap up** (2 mins)
31 | - Thank speakers! Ask speakers: How do folks reach out to you?
32 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
33 |
34 | **Resources**
35 | https://github.com/bcollard/http3-envoy-demo
36 | https://github.com/istio/istio/wiki/Experimental-QUIC-and-HTTP-3-support-in-Istio-gateways
37 | https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http/http3
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 31, July 12, 2022
2 | Cilium L7 Policies vs Istio's
3 |
4 | 1 min hook to today's hoot.
5 |
6 | Istio offers rich L7 traffic management and security policies. Cilium also offers L7 policies and we have gotten a lot of questions from users if they still need Istio's L7 policies. In this livestream, Yuval will join Lin to explain and dive into the difference of the two focusing on security perspective.
7 |
8 | **speaker intro** (2 mins)
9 | speakers: intro
10 |
11 | General Discussions (10-15 mins)
12 |
13 | - What is Cilium L7 policy?
14 | - Doc: https://docs.cilium.io/en/stable/concepts/ebpf/intro/, search for L7 policy
15 | - Examples: https://docs.cilium.io/en/stable/policy/language/#layer-7-examples
16 | - I can use Kubernetes constructs for example SA in my network policy - example: https://docs.cilium.io/en/stable/policy/kubernetes/#serviceaccounts
17 | - Lin: a quick demo
18 |
19 | - What is Istio's L7 policy?
20 | - Lin: explain authz policy: deny all then explicitly allow access, with a quick demo
21 |
22 | - How do they compare?
23 | - problem with identity based on label or service account name
24 | - Encryption, mutual TLS?
25 | - Wireguard: https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/
26 | - wireguard limitation: https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/#limitations
27 | - ipsec: https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/#encryption-ipsec
28 | - FIPS compliance: google search is wireguard fips compliant
29 | - Interoperatable
30 | - Can Envoy handle Multi-tenancy for L7?
31 | - Eventual consistency?
32 |
33 | - Is there anything else you want to add?
34 |
35 | - recap: the CNI is responsible for L3/L4 traffic, and the service mesh for L7
36 |
37 | **wrap up** (2 mins)
38 | - Thank speakers! Ask speakers: How do folks reach out to you?
39 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
40 |
41 | Resources:
42 | Yuval SMC EU 2022 slide: https://docs.google.com/presentation/d/1y7nTtpmSSJdeZrvFDibLJDEDMEfqRLKn5-DysGVc-Ws/edit#slide=id.g13698316493_2_447
43 |
44 | Louis' tweet: https://twitter.com/louiscryan/status/1522661442138238976?s=20&t=nxDYj8oTdN7UHIZCOKqPRQ
45 |
46 | Matt's viewpoint on multi-tenancy envoy: https://twitter.com/mattklein123/status/1522757356857085952?s=20&t=ACVDWbAoSYexcosvNXd3_Q
47 |
48 | William Morgan's blog: https://buoyant.io/2022/06/07/ebpf-sidecars-and-the-future-of-the-service-mesh/
49 |
50 | DPV2 & Cilium: https://www.doit-intl.com/ebpf-cilium-dataplane-v2-and-all-that-buzz-part-2/
51 |
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/cilium-policy-l4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "cilium.io/v2"
2 | kind: CiliumNetworkPolicy
3 | metadata:
4 | name: "service-account"
5 | spec:
6 | endpointSelector:
7 | matchLabels:
8 | io.cilium.k8s.policy.serviceaccount: helloworld
9 | ingress:
10 | - fromEndpoints:
11 | - matchLabels:
12 | io.cilium.k8s.policy.serviceaccount: sleep
13 | toPorts:
14 | - ports:
15 | - port: "5000"
16 | protocol: TCP
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/cilium-policy-l7.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "cilium.io/v2"
2 | kind: CiliumNetworkPolicy
3 | metadata:
4 | name: "service-account"
5 | spec:
6 | endpointSelector:
7 | matchLabels:
8 | io.cilium.k8s.policy.serviceaccount: helloworld
9 | ingress:
10 | - fromEndpoints:
11 | - matchLabels:
12 | io.cilium.k8s.policy.serviceaccount: sleep
13 | toPorts:
14 | - ports:
15 | - port: "5000"
16 | protocol: TCP
17 | rules:
18 | http:
19 | - method: GET
20 | path: "/hello"
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/samples/helloworld-with-affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: helloworld
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: helloworld
10 | labels:
11 | app: helloworld
12 | service: helloworld
13 | spec:
14 | ports:
15 | - port: 5000
16 | name: http
17 | selector:
18 | app: helloworld
19 | ---
20 | apiVersion: v1
21 | kind: Service
22 | metadata:
23 | name: helloworld-v1
24 | labels:
25 | app: helloworld
26 | service: helloworld
27 | spec:
28 | ports:
29 | - port: 5000
30 | name: http
31 | selector:
32 | app: helloworld
33 | version: v1
34 | ---
35 | apiVersion: v1
36 | kind: Service
37 | metadata:
38 | name: helloworld-v2
39 | labels:
40 | app: helloworld
41 | service: helloworld
42 | spec:
43 | ports:
44 | - port: 5000
45 | name: http
46 | selector:
47 | app: helloworld
48 | version: v2
49 | ---
50 | apiVersion: apps/v1
51 | kind: Deployment
52 | metadata:
53 | name: helloworld-v1-cross-node
54 | labels:
55 | app: helloworld
56 | version: v1
57 | spec:
58 | replicas: 1
59 | selector:
60 | matchLabels:
61 | app: helloworld
62 | version: v1
63 | template:
64 | metadata:
65 | labels:
66 | app: helloworld
67 | version: v1
68 | spec:
69 | affinity:
70 | podAntiAffinity:
71 | requiredDuringSchedulingIgnoredDuringExecution:
72 | - labelSelector:
73 | matchExpressions:
74 | - key: version
75 | operator: In
76 | values:
77 | - v2
78 | topologyKey: kubernetes.io/hostname
79 | serviceAccountName: helloworld
80 | containers:
81 | - name: helloworld
82 | image: docker.io/istio/examples-helloworld-v1
83 | imagePullPolicy: IfNotPresent #Always
84 | ports:
85 | - containerPort: 5000
86 | ---
87 | apiVersion: apps/v1
88 | kind: Deployment
89 | metadata:
90 | name: helloworld-v2-same-node
91 | labels:
92 | app: helloworld
93 | version: v2
94 | spec:
95 | replicas: 1
96 | selector:
97 | matchLabels:
98 | app: helloworld
99 | version: v2
100 | template:
101 | metadata:
102 | labels:
103 | app: helloworld
104 | version: v2
105 | spec:
106 | affinity:
107 | podAntiAffinity:
108 | requiredDuringSchedulingIgnoredDuringExecution:
109 | - labelSelector:
110 | matchExpressions:
111 | - key: version
112 | operator: In
113 | values:
114 | - v1
115 | topologyKey: kubernetes.io/hostname
116 | serviceAccountName: helloworld
117 | containers:
118 | - name: helloworld
119 | image: docker.io/istio/examples-helloworld-v2
120 | imagePullPolicy: IfNotPresent #Always
121 | ports:
122 | - containerPort: 5000
123 |
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/samples/helloworld.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: helloworld
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: helloworld
10 | labels:
11 | app: helloworld
12 | service: helloworld
13 | spec:
14 | ports:
15 | - port: 5000
16 | name: http
17 | selector:
18 | app: helloworld
19 | ---
20 | apiVersion: v1
21 | kind: Service
22 | metadata:
23 | name: helloworld-v1
24 | labels:
25 | app: helloworld
26 | service: helloworld
27 | spec:
28 | ports:
29 | - port: 5000
30 | name: http
31 | selector:
32 | app: helloworld
33 | version: v1
34 | ---
35 | apiVersion: v1
36 | kind: Service
37 | metadata:
38 | name: helloworld-v2
39 | labels:
40 | app: helloworld
41 | service: helloworld
42 | spec:
43 | ports:
44 | - port: 5000
45 | name: http
46 | selector:
47 | app: helloworld
48 | version: v2
49 | ---
50 | apiVersion: apps/v1
51 | kind: Deployment
52 | metadata:
53 | name: helloworld-v1-cross-node
54 | labels:
55 | app: helloworld
56 | version: v1
57 | spec:
58 | replicas: 1
59 | selector:
60 | matchLabels:
61 | app: helloworld
62 | version: v1
63 | template:
64 | metadata:
65 | labels:
66 | app: helloworld
67 | version: v1
68 | spec:
69 | serviceAccountName: helloworld
70 | containers:
71 | - name: helloworld
72 | image: docker.io/istio/examples-helloworld-v1
73 | imagePullPolicy: IfNotPresent #Always
74 | ports:
75 | - containerPort: 5000
76 | ---
77 | apiVersion: apps/v1
78 | kind: Deployment
79 | metadata:
80 | name: helloworld-v2-same-node
81 | labels:
82 | app: helloworld
83 | version: v2
84 | spec:
85 | replicas: 1
86 | selector:
87 | matchLabels:
88 | app: helloworld
89 | version: v2
90 | template:
91 | metadata:
92 | labels:
93 | app: helloworld
94 | version: v2
95 | spec:
96 | serviceAccountName: helloworld
97 | containers:
98 | - name: helloworld
99 | image: docker.io/istio/examples-helloworld-v2
100 | imagePullPolicy: IfNotPresent #Always
101 | ports:
102 | - containerPort: 5000
103 |
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/samples/notsleep.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: notsleep
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: notsleep
10 | labels:
11 | app: notsleep
12 | service: notsleep
13 | spec:
14 | ports:
15 | - port: 80
16 | name: http
17 | selector:
18 | app: notsleep
19 | ---
20 | apiVersion: apps/v1
21 | kind: Deployment
22 | metadata:
23 | name: notsleep
24 | spec:
25 | replicas: 1
26 | selector:
27 | matchLabels:
28 | app: notsleep
29 | template:
30 | metadata:
31 | labels:
32 | app: notsleep
33 | spec:
34 | terminationGracePeriodSeconds: 0
35 | serviceAccountName: notsleep
36 | containers:
37 | - name: notsleep
38 | image: governmentpaas/curl-ssl:terraform-14
39 | command: ["/bin/sleep", "3650d"]
40 | imagePullPolicy: IfNotPresent
41 |
42 |
--------------------------------------------------------------------------------
/31-cilium-istio-l7-policies/samples/sleep.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | spec:
51 | terminationGracePeriodSeconds: 0
52 | serviceAccountName: sleep
53 | containers:
54 | - name: sleep
55 | image: curlimages/curl
56 | command: ["/bin/sleep", "3650d"]
57 | imagePullPolicy: IfNotPresent
58 | volumeMounts:
59 | - mountPath: /etc/sleep/tls
60 | name: secret-volume
61 | volumes:
62 | - name: secret-volume
63 | secret:
64 | secretName: sleep-secret
65 | optional: true
66 |
--------------------------------------------------------------------------------
/32-graphql-dev-ops/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 32 - GraphQL for Developers or Platform Team?
2 |
3 | ## Recording ##
4 | https://youtu.be/cFcIb1mh998
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 |
--------------------------------------------------------------------------------
/32-graphql-dev-ops/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 32, July 19, 2022
2 | GraphQL for Developers or Platform Team?
3 |
4 | 1 min hook to today's hoot.
5 |
6 | While GraphQL is cool in providing the exact data of what client asks for, does GraphQL help front-end developers or backend developers? What about operators? What are the challenges in adopting GraphQL? In this hoot livestream, Keith Babo is joining Lin to discuss the overall GraphQL landscape, challenges in adopting GraphQL and some innovative solutions to tackle these challenges.
7 |
8 |
9 | **speaker intro** (2 mins)
10 | speakers: intro
11 |
12 | **News (2 mins)**
13 | hoot: https://github.com/solo-io/hoot/tree/master/31-cilium-istio-l7-policies
14 | https://istio.io/latest/news/support/announcing-1.12-eol/
15 | gateway API beta news:
16 | https://kubernetes.io/blog/2022/07/13/gateway-api-graduates-to-beta/
17 | https://istio.io/latest/blog/2022/gateway-api-beta/
18 | https://gateway-api.sigs.k8s.io/contributing/gamma/
19 | - https://smi-spec.io/blog/announcing-smi-gateway-api-gamma/
20 |
21 | General Discussions (10-15 mins)
22 | - What exactly is GraphQL?
23 | - Why GraphQL?
24 | - How does GraphQL work?
25 | - How do I get started with learning GraphQL?
26 |
27 | * optional: a quick demo of a simple graphql service *
28 |
29 | - How is GraphQL market out there?
30 | - Any stats interesting?
31 | - When and when not to use GraphQL? When should I consider Rest vs GraphQL?
32 | - this may be useful: https://blog.logrocket.com/why-you-shouldnt-use-graphql/ or https://dev.to/andyrewlee/why-i-no-longer-use-graphql-for-new-projects-1oig
33 |
34 | - Who are the personas and what are the challenges when running GraphQL?
35 | - code development?
36 | - operation?
37 | - security?
38 |
39 | - How can we make this easier?
40 | - plugin to what Solo is doing in this space.
41 |
42 | **wrap up** (2 mins)
43 | - Thank speakers! Ask speakers: How do folks reach out to you?
44 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
45 |
--------------------------------------------------------------------------------
/33-vcluster-istio/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 33 - Speed your Istio development environment with vcluster
2 |
3 | ## Recording ##
4 | https://youtu.be/b7OkYjvLf4Y
5 |
6 | [show notes](SHOWNOTES.md)
7 |
8 | ## Hands-on: Steps from the demo
9 |
10 | ### vcluster Demo
11 |
12 | Requirements:
13 | - A k8s cluster
14 | - [Istioctl](https://istio.io/latest/docs/setup/install/istioctl/)
15 |
16 | Review vcluster values:
17 | ```
18 | cat vcluster-values.yaml
19 | ```
20 |
21 | Get current context for the main cluster:
22 | ```
23 | export MAIN_CLUSTER=$(kubectl config current-context)
24 | ```
25 |
26 | Create a vcluster
27 | ```
28 | vcluster create hoot-istio-test --expose -f vcluster-values.yaml --upgrade -n hoot-istio-test --connect=false --context $MAIN_CLUSTER
29 | ```
30 |
31 | Make sure you do not have the context name already taken:
32 | ```
33 | kubectl config delete-context hoot-istio-test
34 | ```
35 |
36 | Connect to vcluster:
37 | ```
38 | vcluster connect hoot-istio-test -n hoot-istio-test --update-current --kube-config-context-name hoot-istio-test --context $MAIN_CLUSTER
39 | ```
40 |
41 | Install istio
42 | ```
43 | istioctl install
44 | ```
45 |
46 | Label the namespace to istio to inject the proxy (sidecar)
47 | ```
48 | kubectl label namespace default istio-injection=enabled
49 | ```
50 |
51 | Install httpbin app:
52 | ```
53 | kubectl apply -f httpbin.yaml
54 | ```
55 |
56 | Configure Istio to route traffic to httpbin app:
57 | ```
58 | kubectl apply -f istio-httpbin-resources.yaml
59 | ```
60 |
61 | Access nhttpbin app through istio
62 | ```
63 | kubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80
64 | ```
65 |
66 | ```
67 | curl localhost:8080/get
68 | ```
69 |
70 | Pause vcluster (all the resources will be scale to 0):
71 | ```
72 | kubectl config use-context $MAIN_CLUSTER
73 |
74 | vcluster pause hoot-istio-test
75 | ```
76 |
77 | Try to connect to the paused cluster to check that it is down:
78 | ```
79 | kubectl config use-context hoot-istio-test
80 |
81 | kubectl get ns
82 | ```
83 | The connection should fail.
84 |
85 |
86 | Resume vcluster:
87 | ```
88 | kubectl config use-context $MAIN_CLUSTER
89 |
90 | vcluster resume hoot-istio-test
91 | ```
92 |
93 | Wait until the cluster is back and try to connect:
94 | ```
95 | kubectl config use-context hoot-istio-test
96 |
97 | kubectl get ns
98 | ```
99 |
100 | Connect to httpbin app:
101 | ```
102 | kubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80
103 | ```
104 |
105 | ```
106 | curl localhost:8080/get
107 | ```
108 |
109 |
110 | Delete the vcluster:
111 | ```
112 | kubectl config use-context $MAIN_CLUSTER
113 | vcluster delete hoot-istio-test -n hoot-istio-test
114 | ```
115 |
--------------------------------------------------------------------------------
/33-vcluster-istio/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 33, July 26, 2022
2 | Speed your Istio development environment with vcluster
3 |
4 | Do you have concerns with cost, CPU and networking for your Kubernetes cluster? What is a cluster within a cluster? In this livestream, Fabian and Rich from Loft Labs along with Antonio from Solo.io will join Lin to discuss what, why and when to use vCluster and live demo how to speed your Istio (or others) dev environment with vCluster to ease your cost, CPU and networking concerns.
5 |
6 | **speaker intro** (2 mins)
7 | speakers: intro
8 |
9 | **News (2 mins)**
10 | Cilium 1.12: https://isovalent.com/blog/post/cilium-service-mesh/?utm_campaign=1.12-release
11 | - "This will make the Cilium Service Mesh data plane compatible with the service meshes such as Istio which are already migrating to Gateway API."
12 | - call out Istio portion: Istio is the existing service mesh control plane that is supported. It currently requires to be run with the sidecar-based datapath.
13 | - call out mixed mode: With Cilium Service Mesh, you have both options available in your platform and can even run a mix of the two
14 | My blog: https://www.cncf.io/blog/2022/07/22/exploring-cilium-layer-7-capabilities-compared-to-istio/
15 |
16 | General Discussions (10-15 mins)
17 | - vcluster team: What exactly is vcluster? Is it free, what license?
18 | - vcluser team: Why vcluster?
19 | - vcluster team: How does vcluster work?
20 | - vcluster team and solo: How do I get started with learning vcluster?
21 |
22 | * a quick demo vcluster + Istio in local env *
23 |
24 | - Does it work with multiclusters?
25 |
26 | * a quick demo vcluster + Istio multiclusters in local env *
27 |
28 | - all: When and when not to use vcluster?
29 | - vcluster team: Can I use vCluster as my team boundry when the boundry is more than 1 namespaces? Also, does it offer better isolation than namespace?
30 | - Could this be useful for Istio multi-cluster in prod?
31 |
32 | **wrap up** (2 mins)
33 | - Thank speakers! Ask speakers: How do folks reach out to you?
34 | - Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you at the next episode!
35 |
36 | Resources:
37 | https://loft.sh/blog/development-environments-with-vcluster-a/
38 | https://www.vcluster.com/
39 | https://istio.io/latest/docs/setup/install/multicluster/multi-primary/
40 |
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 41
2 | Nov 29, 2022
3 |
4 | Istio 1.16
5 | Istio 1.16 is out recently! Very excited I have our lead release manager, Daniel Hawton, who is also an Istio ambient maintainer, joining me to discuss what is NEW in Istio 1.16 and how it relates to ambient mesh, with Daniel and myself live demo some of our favorite Istio 1.16 features!
6 |
7 | speaker intro (2 mins)
8 | speakers: intro
9 |
10 | News:
11 | KubeCon recap:
12 | https://www.solo.io/blog/kubecon-cloudnativecon-2022-application-networking-day/
13 |
14 | 1.16 blogs:
15 | https://istio.io/latest/news/releases/1.16.x/announcing-1.16/
16 | https://www.solo.io/blog/istio-1-16-ambient-mesh/
17 |
18 | Ambient News: ztunnel project in Rust
19 | https://github.com/istio/ztunnel/
20 | Ram's multicluster blog
21 | https://www.solo.io/blog/istio-multi-cluster-traffic-debugging/
22 |
23 |
24 | General Discussions (20 mins)
25 | - What is your responsiblity as lead release mgr?
26 | - Was this an interesting experience?
27 | - What are the release qualification processes?
28 | - What are the most exciting features of 1.16?
29 | - What users should pay attention to regarding upgrades to 1.16?
30 | - How is 1.16 related to ambient mesh?
31 |
32 | Live Demo
33 | Lin (ambient profile + discovery selector)
34 | Daniel (Wasm)
35 |
36 | Any questions the audiences have?
37 |
38 | wrap up (2 mins)
39 |
40 | Thank speakers! Ask speakers: How do folks reach out to you?
41 | Is this interesting? What other topics do you want to see to help you on your application networking? I am super grateful for everyone who liked our past hoot livestream and subscribed to our channel. Happy learning, and see you in the next episode!
42 |
43 | Resources:
44 |
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/Cargo.lock:
--------------------------------------------------------------------------------
1 | # This file is automatically @generated by Cargo.
2 | # It is not intended for manual editing.
3 | version = 3
4 |
5 | [[package]]
6 | name = "ahash"
7 | version = "0.7.6"
8 | source = "registry+https://github.com/rust-lang/crates.io-index"
9 | checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
10 | dependencies = [
11 | "getrandom",
12 | "once_cell",
13 | "version_check",
14 | ]
15 |
16 | [[package]]
17 | name = "cfg-if"
18 | version = "1.0.0"
19 | source = "registry+https://github.com/rust-lang/crates.io-index"
20 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
21 |
22 | [[package]]
23 | name = "getrandom"
24 | version = "0.2.8"
25 | source = "registry+https://github.com/rust-lang/crates.io-index"
26 | checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
27 | dependencies = [
28 | "cfg-if",
29 | "libc",
30 | "wasi",
31 | ]
32 |
33 | [[package]]
34 | name = "hashbrown"
35 | version = "0.12.3"
36 | source = "registry+https://github.com/rust-lang/crates.io-index"
37 | checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
38 | dependencies = [
39 | "ahash",
40 | ]
41 |
42 | [[package]]
43 | name = "libc"
44 | version = "0.2.137"
45 | source = "registry+https://github.com/rust-lang/crates.io-index"
46 | checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
47 |
48 | [[package]]
49 | name = "log"
50 | version = "0.4.17"
51 | source = "registry+https://github.com/rust-lang/crates.io-index"
52 | checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
53 | dependencies = [
54 | "cfg-if",
55 | ]
56 |
57 | [[package]]
58 | name = "once_cell"
59 | version = "1.16.0"
60 | source = "registry+https://github.com/rust-lang/crates.io-index"
61 | checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
62 |
63 | [[package]]
64 | name = "proxy-wasm"
65 | version = "0.2.0"
66 | source = "registry+https://github.com/rust-lang/crates.io-index"
67 | checksum = "3927081c2674366adadef4d5c5d34c4d849ab764a17bfe4ff2bd04436efb593d"
68 | dependencies = [
69 | "hashbrown",
70 | "log",
71 | ]
72 |
73 | [[package]]
74 | name = "rust-test"
75 | version = "0.1.0"
76 | dependencies = [
77 | "log",
78 | "proxy-wasm",
79 | ]
80 |
81 | [[package]]
82 | name = "version_check"
83 | version = "0.9.4"
84 | source = "registry+https://github.com/rust-lang/crates.io-index"
85 | checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
86 |
87 | [[package]]
88 | name = "wasi"
89 | version = "0.11.0+wasi-snapshot-preview1"
90 | source = "registry+https://github.com/rust-lang/crates.io-index"
91 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
92 |
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "rust-test"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [lib]
9 | path = "src/lib.rs"
10 | crate-type = ["cdylib"]
11 |
12 | [dependencies]
13 | log = "0.4.17"
14 | proxy-wasm = "0.2.0"
15 |
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rust:1.65 as builder
2 |
3 | WORKDIR /usr/src/app
4 | COPY . .
5 | RUN rustup target add wasm32-unknown-unknown && \
6 | make build
7 |
8 | FROM scratch
9 | COPY --from=builder /usr/src/app/target/wasm32-unknown-unknown/release/rust_test.wasm ./plugin.wasm
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | cargo build --target wasm32-unknown-unknown --release
3 | build-push:
4 | docker build -t dhawton/wasm-rust-test:v1 .
5 | docker push dhawton/wasm-rust-test:v1
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/README.md:
--------------------------------------------------------------------------------
1 | # wasm/rust-test
2 |
3 | This is a quick test of Wasm for Istio.
4 |
5 | ## Building
6 |
7 | To build the Wasm module, run:
8 |
9 | ```bash
10 | ./build.sh
11 | ```
12 |
13 | Requirements:
14 |
15 | - A *nix based system
16 | - Docker
17 |
18 | If you only need the .wasm file and not a docker image, run:
19 |
20 | ```bash
21 | make build
22 | cp target/wasm32-unknown-unknown/release/rust_test.wasm plugin.wasm
23 | ```
24 |
25 | ## Using
26 |
27 | Apply the WasmPlugin yaml, for example:
28 |
29 | ```yaml
30 | apiVersion: extensions.istio.io/v1alpha1
31 | kind: WasmPlugin
32 | metadata:
33 | name: httpbin-rust-test
34 | namespace: httpbin
35 | spec:
36 | selector:
37 | matchLabels:
38 | app: httpbin
39 | match:
40 | - ports:
41 | - number: 80
42 | url: oci://docker.io/dhawton/wasm-rust-test:v1.0
43 | ```
44 |
45 | Requests to httpbin's /get should now output HTTP status code 418 along with a teapot ASCII art if the WasmPlugin was successful.
46 |
47 | ## License
48 |
49 | This project is licensed by [Apache 2.0](LICENSE)
50 |
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2022 Daniel Hawton
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | set -ex
17 |
18 | dir="$(cd "$(dirname "$0")" && pwd)"
19 |
20 | while [ $# -gt 0 ]; do
21 | case "$1" in
22 | --tag)
23 | TAG="$2"
24 | shift 2
25 | ;;
26 | --image)
27 | IMAGE="$2"
28 | shift 2
29 | ;;
30 | --hub)
31 | HUB="$2"
32 | shift 2
33 | ;;
34 | --push)
35 | PUSH=1
36 | shift
37 | ;;
38 | --help)
39 | echo "Usage: $0 [--tag ] [--image ] [--hub ] [--push]"
40 | echo " --tag Tag to use for the image (defaults to 'latest')"
41 | echo " --image Docker image to build (defaults to 'wasm-rust-test')"
42 | echo " --hub Docker hub to push to (defaults to 'docker.io/dhawton') [example: docker.io/username]"
43 | echo " --push Push the image to the hub"
44 | echo " --help: show this help message and exit"
45 | exit 0
46 | ;;
47 | *)
48 | echo "Unknown argument: $1"
49 | exit 1
50 | ;;
51 | esac
52 | done
53 |
54 | HUB=${HUB:-docker.io/dhawton}
55 | IMAGE=${IMAGE:-wasm-rust-test}
56 | TAG=${TAG:-latest}
57 |
58 | if [[ ! -z "$HUB" ]]; then
59 | HUB="$HUB/"
60 | fi
61 |
62 | pushd $dir
63 |
64 | docker build . -t $HUB$IMAGE:$TAG
65 |
66 | if [[ ! -z "$PUSH" ]]; then
67 | docker push $HUB$IMAGE:$TAG
68 | fi
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2022 Daniel Hawton
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | use log::{info, trace};
16 | use proxy_wasm as wasm;
17 | use wasm::{types::Action, types::ContextType};
18 |
19 | wasm::main! {{
20 | wasm::set_log_level(wasm::types::LogLevel::Trace);
21 | wasm::set_root_context(|_| -> Box {
22 | Box::new(RustTest)
23 | });
24 | }}
25 |
26 | struct RustTest;
27 |
28 | struct HttpHeaders {
29 | context_id: u32,
30 | }
31 |
32 | impl wasm::traits::Context for RustTest {}
33 |
34 | impl wasm::traits::RootContext for RustTest {
35 | fn on_vm_start(&mut self, _vm_configuration_size: usize) -> bool {
36 | info!("on_vm_start");
37 | true
38 | }
39 |
40 | fn get_type(&self) -> Option {
41 | Some(ContextType::HttpContext)
42 | }
43 |
44 | fn create_http_context(&self, context_id: u32) -> Option> {
45 | Some(Box::new(HttpHeaders { context_id }))
46 | }
47 | }
48 |
49 | const TEAPOT_ASCII: &[u8; 295] = b"I'm a teapot
50 |
51 | (
52 | _ ) )
53 | _,(_)._ ((
54 | ___,(_______). )
55 | ,'__. / \\ /\\_
56 | /,' / |\"\"| \\ / /
57 | | | | |__| |,' /
58 | \\`.| /
59 | `. : : /
60 | `. :.,'
61 | `-.________,-'
62 | ";
63 |
64 | impl wasm::traits::Context for HttpHeaders {}
65 |
66 | impl wasm::traits::HttpContext for HttpHeaders {
67 | fn on_http_request_headers(&mut self, _: usize, _: bool) -> wasm::types::Action {
68 | info!("on_http_request_headers: {}", self.context_id);
69 | for (name, value) in &self.get_http_request_headers() {
70 | trace!("#{} - {} = {}", self.context_id, name, value);
71 | }
72 |
73 | match self.get_http_request_header(":path") {
74 | Some(path) if path == "/get" => {
75 | info!("on_http_request_headers: {} - /get intercepted", self.context_id);
76 | self.send_http_response(
77 | 418,
78 | vec![("x-powered-by", "rust"), ("content-type", "text/plain")],
79 | Some(TEAPOT_ASCII),
80 | );
81 | Action::Pause
82 | }
83 | _ => Action::Continue,
84 | }
85 | }
86 |
87 | fn on_log(&mut self) {
88 | info!("#{} completed.", self.context_id);
89 | }
90 | }
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/wasmplugin-broken.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions.istio.io/v1alpha1
2 | kind: WasmPlugin
3 | metadata:
4 | name: httpbin-rust-test
5 | namespace: httpbin
6 | spec:
7 | #selector:
8 | # matchLabels:
9 | # app: httpbin
10 | # Define traffic selector match conditions
11 | # All conditions must evaluate to true for the plugin to be applied
12 | match:
13 | # Define the workload mode, valid options are: SERVER, CLIENT, CLIENT_AND_SERVER
14 | # CLIENT and SERVER are analogous to inbound and outbound traffic
15 | # Gateways should set CLIENT
16 | # Default is CLIENT_AND_SERVER
17 | - mode: SERVER
18 | # Define port numbers to match. If not specified, all ports are matched
19 | # If any one port matches, this condition evaluates to true
20 | - ports:
21 | - number: 81
22 | url: oci://docker.io/dhawton/wasm-rust-test:v1
23 | imagePullPolicy: Always
--------------------------------------------------------------------------------
/41-what-is-new-istio-1.16/demo/wasm/wasmplugin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions.istio.io/v1alpha1
2 | kind: WasmPlugin
3 | metadata:
4 | name: httpbin-rust-test
5 | namespace: httpbin
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: httpbin
10 | # Define traffic selector match conditions
11 | # All conditions must evaluate to true for the plugin to be applied
12 | match:
13 | # Define the workload mode, valid options are: SERVER, CLIENT, CLIENT_AND_SERVER
14 | # CLIENT and SERVER are analogous to inbound and outbound traffic
15 | # Gateways should set CLIENT
16 | # Default is CLIENT_AND_SERVER
17 | - mode: SERVER
18 | # Define port numbers to match. If not specified, all ports are matched
19 | # If any one port matches, this condition evaluates to true
20 | - ports:
21 | - number: 80
22 | url: oci://docker.io/dhawton/wasm-rust-test:v1
23 | imagePullPolicy: Always
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/clusterspiffeid.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: spire.spiffe.io/v1alpha1
2 | kind: ClusterSPIFFEID
3 | metadata:
4 | name: service-account-spiffeid
5 | spec:
6 | spiffeIDTemplate: "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}"
7 | podSelector:
8 | matchLabels:
9 | spiffe.io/spire-managed-identity: "true"
10 |
11 | # OTHER EXAMPLES:
12 | # apiVersion: spire.spiffe.io/v1alpha1
13 | # kind: ClusterSPIFFEID
14 | # metadata:
15 | # name: backend-workloads
16 | # spec:
17 | # spiffeIDTemplate: "spiffe://domain.test/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}"
18 | # podSelector:
19 | # matchLabels:
20 | # banking: "true"
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/csidriver.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: CSIDriver
3 | metadata:
4 | name: "csi.spiffe.io"
5 | spec:
6 | # Only ephemeral, inline volumes are supported. There is no need for a
7 | # controller to provision and attach volumes.
8 | attachRequired: false
9 |
10 | # Request the pod information which the CSI driver uses to verify that an
11 | # ephemeral mount was requested.
12 | podInfoOnMount: true
13 |
14 | # Don't change ownership on the contents of the mount since the Workload API
15 | # Unix Domain Socket is typically open to all (i.e. 0777).
16 | fsGroupPolicy: None
17 |
18 | # Declare support for ephemeral volumes only.
19 | volumeLifecycleModes:
20 | - Ephemeral
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/deploy-prereqs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | kubectl create ns spire
6 |
7 | kubectl apply -f csidriver.yaml
8 | kubectl apply -f crds.yaml
9 | kubectl apply -f spire-controller-manager-config.yaml
10 | kubectl apply -f spire-controller-manager-webhook.yaml
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/istio-spire-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: install.istio.io/v1alpha1
2 | kind: IstioOperator
3 | metadata:
4 | namespace: istio-system
5 | spec:
6 | profile: default
7 | meshConfig:
8 | # TODO: Update trust domain here
9 | trustDomain: "hoot.solo.io"
10 | values:
11 | # This is used to customize the sidecar template, to allow mounting the Workload API socket.
12 | sidecarInjectorWebhook:
13 | templates:
14 | spire: |
15 | spec:
16 | containers:
17 | - name: istio-proxy
18 | volumeMounts:
19 | - name: workload-socket
20 | mountPath: /run/secrets/workload-spiffe-uds
21 | readOnly: true
22 | volumes:
23 | - name: workload-socket
24 | csi:
25 | driver: "csi.spiffe.io"
26 | readOnly: true
27 | components:
28 | ingressGateways:
29 | - name: istio-ingressgateway
30 | enabled: true
31 | label:
32 | istio: ingressgateway
33 | k8s:
34 | overlays:
35 | - apiVersion: apps/v1
36 | kind: Deployment
37 | name: istio-ingressgateway
38 | patches:
39 | - path: spec.template.spec.volumes.[name:workload-socket]
40 | value:
41 | name: workload-socket
42 | csi:
43 | driver: "csi.spiffe.io"
44 | readOnly: true
45 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
46 | value:
47 | name: workload-socket
48 | mountPath: "/run/secrets/workload-spiffe-uds"
49 | readOnly: true
50 | - path: spec.template.spec.initContainers
51 | value:
52 | - name: wait-for-spire-socket
53 | image: busybox:1.28
54 | volumeMounts:
55 | - name: workload-socket
56 | mountPath: /run/secrets/workload-spiffe-uds
57 | readOnly: true
58 | env:
59 | - name: CHECK_FILE
60 | value: /run/secrets/workload-spiffe-uds/socket
61 | command:
62 | - sh
63 | - "-c"
64 | - |-
65 | echo `date -Iseconds` Waiting for: ${CHECK_FILE}
66 | while [[ ! -e ${CHECK_FILE} ]] ; do
67 | echo `date -Iseconds` File does not exist: ${CHECK_FILE}
68 | sleep 15
69 | done
70 | ls -l ${CHECK_FILE}
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/sleep-spire.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | # Injects custom sidecar template
51 | annotations:
52 | inject.istio.io/templates: "sidecar,spire"
53 | spec:
54 | terminationGracePeriodSeconds: 0
55 | serviceAccountName: sleep
56 | containers:
57 | - name: sleep
58 | image: curlimages/curl
59 | command: ["/bin/sleep", "infinity"]
60 | imagePullPolicy: IfNotPresent
61 | volumeMounts:
62 | - name: tmp
63 | mountPath: /tmp
64 | securityContext:
65 | runAsUser: 1000
66 | volumes:
67 | - name: tmp
68 | emptyDir: {}
69 | ---
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/spire-controller-manager-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: spire-controller-manager-config
5 | namespace: spire
6 | data:
7 | spire-controller-manager-config.yaml: |
8 | apiVersion: spire.spiffe.io/v1alpha1
9 | kind: ControllerManagerConfig
10 | metrics:
11 | bindAddress: 127.0.0.1:8082
12 | healthProbe:
13 | bindAddress: 127.0.0.1:8083
14 | leaderElection:
15 | leaderElect: true
16 | resourceName: 98c9c988.spiffe.io
17 | resourceNamespace: spire
18 | # TODO: Update cluster name and trust domain here
19 | clusterName: hoot-spire
20 | trustDomain: hoot.solo.io
21 | ignoreNamespaces:
22 | - kube-system
23 | - kube-public
24 | - spire
25 | - local-path-storage
--------------------------------------------------------------------------------
/44-overview-of-spire/demo/spire-controller-manager-webhook.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: admissionregistration.k8s.io/v1
2 | kind: ValidatingWebhookConfiguration
3 | metadata:
4 | name: spire-controller-manager-webhook
5 | webhooks:
6 | - admissionReviewVersions: ["v1"]
7 | clientConfig:
8 | service:
9 | name: spire-controller-manager-webhook-service
10 | namespace: spire
11 | path: /validate-spire-spiffe-io-v1alpha1-clusterfederatedtrustdomain
12 | failurePolicy: Fail
13 | name: vclusterfederatedtrustdomain.kb.io
14 | rules:
15 | - apiGroups: ["spire.spiffe.io"]
16 | apiVersions: ["v1alpha1"]
17 | operations: ["CREATE", "UPDATE"]
18 | resources: ["clusterfederatedtrustdomains"]
19 | sideEffects: None
20 | - admissionReviewVersions: ["v1"]
21 | clientConfig:
22 | service:
23 | name: spire-controller-manager-webhook-service
24 | namespace: spire
25 | path: /validate-spire-spiffe-io-v1alpha1-clusterspiffeid
26 | failurePolicy: Fail
27 | name: vclusterspiffeid.kb.io
28 | rules:
29 | - apiGroups: ["spire.spiffe.io"]
30 | apiVersions: ["v1alpha1"]
31 | operations: ["CREATE", "UPDATE"]
32 | resources: ["clusterspiffeids"]
33 | sideEffects: None
--------------------------------------------------------------------------------
/47-certificates-in-istio/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 47 - Certificates in Istio
2 |
3 | ## Recording
4 | https://www.youtube.com/watch?v=hD7L-haWJew
5 |
6 | ## Tools used
7 | - [cert-manager](https://cert-manager.io/)
8 | - [istio-csr](https://github.com/cert-manager/istio-csr)
--------------------------------------------------------------------------------
/50-kube-networking-cilium/files/kind-iptables.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | - role: worker
6 | - role: worker
--------------------------------------------------------------------------------
/50-kube-networking-cilium/files/kind-ipvs.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | networking:
4 | kubeProxyMode: "ipvs"
5 | nodes:
6 | - role: control-plane
7 | - role: worker
8 | - role: worker
9 |
--------------------------------------------------------------------------------
/50-kube-networking-cilium/files/kind-nocni.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | - role: worker
6 | - role: worker
7 | networking:
8 | disableDefaultCNI: true
9 | kubeProxyMode: none
--------------------------------------------------------------------------------
/51-kube-networking-cilium-2/files/cilium-values.yaml:
--------------------------------------------------------------------------------
1 | operator:
2 | prometheus:
3 | enabled: true
4 | kubeProxyReplacement: strict
5 | k8sServiceHost: kind-control-plane
6 | k8sServicePort: 6443
7 | hubble:
8 | enabled: true
9 | relay:
10 | enabled: true
11 | prometheus:
12 | enabled: true
13 | ui:
14 | enabled: true
15 | metrics:
16 | enableOpenMetrics: true
17 | enabled:
18 | - dns
19 | - drop
20 | - tcp
21 | - icmp
22 | - "httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction;sourceContext=workload-name|reserved-identity;destinationContext=workload-name|reserved-identity"
23 |
--------------------------------------------------------------------------------
/51-kube-networking-cilium-2/files/kind-nocni.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | - role: worker
6 | - role: worker
7 | networking:
8 | disableDefaultCNI: true
9 | kubeProxyMode: none
--------------------------------------------------------------------------------
/51-kube-networking-cilium-2/files/sleep.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | spec:
51 | terminationGracePeriodSeconds: 0
52 | serviceAccountName: sleep
53 | containers:
54 | - name: sleep
55 | image: curlimages/curl:8.1.1
56 | command: ["/bin/sleep", "infinity"]
57 | imagePullPolicy: IfNotPresent
58 | volumeMounts:
59 | - mountPath: /etc/sleep/tls
60 | name: secret-volume
61 | volumes:
62 | - name: secret-volume
63 | secret:
64 | secretName: sleep-secret
65 | optional: true
66 | ---
--------------------------------------------------------------------------------
/52-opa/files/authz/data.json:
--------------------------------------------------------------------------------
1 | {}
--------------------------------------------------------------------------------
/52-opa/files/authz/policy.rego:
--------------------------------------------------------------------------------
1 | package istio.authz
2 |
3 | import future.keywords
4 | import input.attributes.request.http as http_request
5 |
6 | default allow := false
7 |
8 | # Allow if token is valid, not expired, and if the action is allowed
9 | allow if {
10 | is_token_valid
11 | not is_token_expired
12 | action_allowed
13 | }
14 |
15 | # Token is valid if the signature is valid
16 | is_token_valid if {
17 | print("entering is_token_valid")
18 | # Verify the signature (note the secret shouldn't be hardcoded like this!)
19 | v := io.jwt.verify_hs256(token, "qwertyuiopasdfghjklzxcvbnm123456")
20 | print("is_token_valid result", v)
21 | v == true
22 | }
23 |
24 | # Check whether the token is expired
25 | is_token_expired if {
26 | # Check the expiration date
27 | now := time.now_ns() / 1000000000
28 | now > token_payload.exp
29 | }
30 |
31 | # Admin role, if the role in the token payload is admin
32 | is_admin if {
33 | token_payload.role == "admin"
34 | }
35 |
36 | # Guest role, if the role in the token payload is guest
37 | is_guest if {
38 | token_payload.role == "guest"
39 | }
40 |
41 | # Solo audience, if the audience in the token payload is www.solo.io
42 | is_solo_audience if {
43 | # Check the audience
44 | token_payload.aud == "www.solo.io"
45 | }
46 |
47 | # Action is allowed if:
48 | # - aud is set to www.solo.io and
49 | # - role is set to admin and
50 | # - method is POST and
51 | # - path is /post and
52 | # - schema is valid
53 | action_allowed if {
54 | is_solo_audience
55 | is_admin
56 | http_request.method == "POST"
57 | http_request.path == "/post"
58 | is_schema_valid
59 | }
60 |
61 | # Action is allowed if:
62 | # - aud is set to www.solo.io and
63 | # - role is set to guest and
64 | # - method is GET and
65 | # - path is /headers
66 | action_allowed if {
67 | is_solo_audience
68 | is_guest
69 | http_request.method == "GET"
70 | http_request.path == "/headers"
71 | }
72 |
73 | # Schema is valid if the body matches the schema
74 | is_schema_valid if {
75 | [match, _] := json.match_schema(http_request.body, schema)
76 | match == true
77 | }
78 |
79 | # Decode the token and return the payload
80 | token_payload := payload if {
81 | [_, payload, _] := io.jwt.decode(token)
82 | }
83 |
84 | # GEt the token from the Authorization header
85 | token := t if {
86 | # "Authorization": "Bearer "
87 | t := split(http_request.headers.authorization, " ")[1]
88 | }
89 |
90 | # Schema to validate the request body against
91 | schema := {
92 | "properties": {"id": {"type": "string"}},
93 | "required": ["id"],
94 | }
95 |
--------------------------------------------------------------------------------
/52-opa/files/opa-bundle.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: opa
5 | namespace: opa
6 | labels:
7 | app: opa
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: opa
13 | template:
14 | metadata:
15 | labels:
16 | app: opa
17 | spec:
18 | containers:
19 | - name: opa
20 | image: openpolicyagent/opa:0.53.1-envoy-2-static
21 | securityContext:
22 | runAsUser: 1111
23 | volumeMounts:
24 | - name: opa-config
25 | mountPath: /config
26 | readOnly: true
27 | args:
28 | - "run"
29 | - "--server"
30 | - "--config-file=/config/config.yaml"
31 | - "--addr=0.0.0.0:8181"
32 | - "--diagnostic-addr=0.0.0.0:8282"
33 | - "--ignore=.*"
34 | volumes:
35 | - name: opa-config
36 | configMap:
37 | name: opa-config
38 | ---
39 | apiVersion: v1
40 | kind: Service
41 | metadata:
42 | name: opa
43 | namespace: opa
44 | labels:
45 | app: opa
46 | spec:
47 | ports:
48 | - name: grpc
49 | port: 9191
50 | targetPort: 9191
51 | selector:
52 | app: opa
53 | ---
54 | apiVersion: v1
55 | kind: ConfigMap
56 | metadata:
57 | name: opa-config
58 | namespace: opa
59 | data:
60 | config.yaml: |
61 | services:
62 | - name: controller
63 | url: https://www.openpolicyagent.org
64 | services:
65 | - name: google
66 | url: # e.g. https://storage.googleapis.com/opademo
67 | bundles:
68 | authz:
69 | service: google
70 | # This will read the bundle from e.g. https://storage.googleapis.com/opademo/opa-bundles/bundle.tar.gz
71 | resource: # e.g. opa-bundles/bundle.tar.gz
72 | polling:
73 | min_delay_seconds: 30
74 | max_delay_seconds: 60
75 | plugins:
76 | envoy_ext_authz_grpc:
77 | path: istio/authz/allow
78 | decision_logs:
79 | console: true
80 | status:
81 | console: true
--------------------------------------------------------------------------------
/52-opa/files/opa-cm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: opa
5 | namespace: opa
6 | labels:
7 | app: opa
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: opa
13 | template:
14 | metadata:
15 | labels:
16 | app: opa
17 | spec:
18 | containers:
19 | - name: opa
20 | image: openpolicyagent/opa:0.53.1-envoy-2-static
21 | securityContext:
22 | runAsUser: 1111
23 | volumeMounts:
24 | - readOnly: true
25 | mountPath: /policy
26 | name: opa-policy
27 | args:
28 | - "run"
29 | - "--server"
30 | - "--addr=0.0.0.0:8181"
31 | - "--diagnostic-addr=0.0.0.0:8282"
32 | # This path has to match the package and rule name in the policy file
33 | - "--set=plugins.envoy_ext_authz_grpc.path=istio/authz/allow"
34 | - "--set=decision_logs.console=true"
35 | - "--set=status.console=true"
36 | - "--ignore=.*"
37 | - "/policy/policy.rego"
38 | volumes:
39 | - name: opa-policy
40 | configMap:
41 | name: opa-policy
42 | ---
43 | apiVersion: v1
44 | kind: Service
45 | metadata:
46 | name: opa
47 | namespace: opa
48 | labels:
49 | app: opa
50 | spec:
51 | ports:
52 | - name: grpc
53 | port: 9191
54 | targetPort: 9191
55 | selector:
56 | app: opa
57 | ---
58 | apiVersion: v1
59 | kind: ConfigMap
60 | metadata:
61 | name: opa-policy
62 | namespace: opa
63 | data:
64 | policy.rego: |
65 | package istio.authz
66 |
67 | import future.keywords
68 | import input.attributes.request.http as http_request
69 |
70 | default allow := false
71 |
72 | allow if {
73 | http_request.method == "GET"
74 | http_request.path == "/headers"
75 | }
--------------------------------------------------------------------------------
/52-opa/files/tokens.md:
--------------------------------------------------------------------------------
1 | Created from: http://jwtbuilder.jamiekurtz.com/
2 |
3 |
4 | ### Peter (admin)
5 |
6 | ```console
7 | eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJzb2xvLmlvIiwiaWF0IjoxNjg3NDY5NzU1LCJleHAiOjE3MTkwMDc3ODIsImF1ZCI6Ind3dy5zb2xvLmlvIiwic3ViIjoicGV0ZXJAc29sby5pbyIsIkdpdmVuTmFtZSI6IlBldGVyIiwicm9sZSI6ImFkbWluIn0.8OwUnlJUoW0eBOtA6tK7fBfAGzXOkiCcttwSkmZTVgY
8 | ```
9 |
10 | ```json
11 | {
12 | "iss": "solo.io",
13 | "iat": 1687469755,
14 | "exp": 1719007782,
15 | "aud": "www.solo.io",
16 | "sub": "peter@solo.io",
17 | "GivenName": "Peter",
18 | "role": "admin"
19 | }
20 | ```
21 |
22 | ### Paul (non-admin)
23 |
24 | ```console
25 | eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJzb2xvLmlvIiwiaWF0IjoxNjg3NDY5NzU1LCJleHAiOjE3MTkwMDc3ODIsImF1ZCI6Ind3dy5zb2xvLmlvIiwic3ViIjoicGF1bEBzb2xvLmlvIiwiR2l2ZW5OYW1lIjoiUGF1bCIsInJvbGUiOiJndWVzdCJ9.JMbwsbPBS6_9wPQtbZ9jVqr3hHme2VUJzYShhhQudnQ
26 | ```
27 |
28 | ```json
29 | {
30 | "iss": "solo.io",
31 | "iat": 1687469755,
32 | "exp": 1719007782,
33 | "aud": "www.solo.io",
34 | "sub": "paul@solo.io",
35 | "GivenName": "Paul",
36 | "role": "guest"
37 | }
38 | ```
39 |
40 | ### Expired admin token
41 |
42 | ```
43 | eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJzb2xvLmlvIiwiaWF0IjoxNjg3NDY5NzU1LCJleHAiOjE2ODc0NzIyNzcsImF1ZCI6Ind3dy5zb2xvLmlvIiwic3ViIjoicGV0ZXJAc29sby5pbyIsIkdpdmVuTmFtZSI6IlBldGVyIiwicm9sZSI6ImFkbWluIn0.An4P2MfQJD40frOSOMZC0ar-N-R7YjseG5RIJ8EBxn0
44 | ```
45 |
46 | ```json
47 | {
48 | "iss": "solo.io",
49 | "iat": 1687469755,
50 | "exp": 1687472277,
51 | "aud": "www.solo.io",
52 | "sub": "peter@solo.io",
53 | "GivenName": "Peter",
54 | "role": "admin"
55 | }
56 | ```
57 |
58 | ### Non-solo token
59 |
60 |
61 | ```
62 | eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJzb2xvLmlvIiwiaWF0IjoxNjg3NDY5NzU1LCJleHAiOjE3MTkwMDgzMTgsImF1ZCI6Ind3dy5ibGFoLmlvIiwic3ViIjoicGV0ZXJAc29sby5pbyIsIkdpdmVuTmFtZSI6IlBldGVyIiwicm9sZSI6ImFkbWluIn0.MmYP_VhcihkusQXTS6hD1oNET0Pxj4HfmohbOH6v0zo
63 | ```
64 |
65 | ```json
66 | {
67 | "iss": "solo.io",
68 | "iat": 1687469755,
69 | "exp": 1719008318,
70 | "aud": "www.blah.io",
71 | "sub": "peter@solo.io",
72 | "GivenName": "Peter",
73 | "role": "admin"
74 | }
75 | ```
76 |
--------------------------------------------------------------------------------
/53-ambient-cut-sm-cost/SHOWNOTES.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 53 - Cut Service Mesh Overhead by 90% or More with Istio Ambient Mesh
2 |
3 | Are you concerned about your service mesh overhead? Want to pay less for cloud infrastructure resources from service mesh? Join us in this upcoming hoot livestream where Greg Hanson and Lin Sun, maintainers of Istio, provide the latest update on how Istio ambient mesh can cut service mesh overhead significantly with the new Rust-based ztunnel.
4 |
5 | News:
6 |
7 | - Istio 1.18 is out on June 7 and ambient is included in the release!
8 | - Istioday is calling for submissions!
9 | - Istio CNCF graduation is progressing well, passed required TOC votes.
10 |
11 | Bring in Greg: quick intro
12 |
13 | Q/A with Greg:
14 | - Can you explain what service mesh overheads are?
15 | - in terms of sidecars
16 | - in terms of ambient
17 | - What are the savings with ambient from your testing?
18 | - explain resource usage vs allocation
19 | - what do you think are the key reasons for these savings from ambient when comparing with sidecars?
20 | - Can you walk through the details of your testing?
21 | - Conclusion.
22 |
23 | Thanks everyone for joining, see you in 2 weeks!
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/57-whats-new-in-istio-1-19/README.md:
--------------------------------------------------------------------------------
1 | # Hoot Episode 57- What's new in Istio 1.19.0
2 |
3 | ## Recording
4 |
5 | https://www.youtube.com/watch?v=tHuSirVv3rc
6 |
7 | ## Demo
8 |
9 | Lab environments with demos are available at [Solo Academy](https://academy.solo.io/learn/courses/73/explore-new-features-in-istio-119).
--------------------------------------------------------------------------------
/58-bgpandcilium/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/67-envoy-extproc/.gitignore:
--------------------------------------------------------------------------------
1 | logs/*
2 | envoy
3 |
--------------------------------------------------------------------------------
/67-envoy-extproc/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: run-servers
2 | run-servers: run-processor run-httptarget run-envoy
3 |
4 | .PHONY: restart-servers
5 | restart-servers: restart-processor restart-httptarget restart-envoy
6 |
7 | .PHONY: stop-servers
8 | stop-servers: stop-processor stop-httptarget stop-envoy
9 |
10 | .PHONY: run-processor
11 | run-processor:
12 | go run ./pkg/processor -s /tmp/processor.sock -d 1>logs/processor.log 2>&1 &
13 |
14 | .PHONY: stop-processor
15 | stop-processor:
16 | kill -s SIGINT $(shell pidof processor)
17 |
18 | .PHONY: restart-processor
19 | restart-processor: stop-processor
20 | restart-processor: run-processor
21 |
22 | .PHONY: run-httptarget
23 | run-httptarget:
24 | go run ./pkg/httptarget -p 9001 -d 1>logs/httptarget.log 2>&1 &
25 |
26 | .PHONY: stop-httptarget
27 | stop-httptarget:
28 | kill -s SIGINT $(shell pidof httptarget)
29 |
30 | .PHONY: restart-httptarget
31 | restart-httptarget: stop-httptarget
32 | restart-httptarget: run-httptarget
33 |
34 | .PHONY: download-envoy
35 | download-envoy:
36 | ([ ! -e ./envoy ] && \
37 | docker run -d --entrypoint /bin/sh --name get-envoy-binary envoyproxy/envoy:dev && \
38 | docker cp get-envoy-binary:/usr/local/bin/envoy ./envoy && \
39 | docker stop get-envoy-binary && \
40 | docker rm get-envoy-binary) || echo "envoy exists or copy from docker failed"
41 |
42 | .PHONY: run-envoy
43 | run-envoy: download-envoy
44 | mkdir -p ./logs
45 | ./envoy -c ./envoy.yaml --component-log-level 'ext_proc:debug' 1>logs/envoy.log 2>&1 &
46 |
47 | .PHONY: stop-envoy
48 | stop-envoy:
49 | kill -s SIGINT $(shell pidof envoy)
50 |
51 | .PHONY: restart-envoy
52 | restart-envoy: stop-envoy
53 | restart-envoy: run-envoy
54 |
--------------------------------------------------------------------------------
/67-envoy-extproc/README.md:
--------------------------------------------------------------------------------
1 | This is demonstration code used for [Hoot 67](https://www.linkedin.com/events/7160706454570577920/comments/)
2 |
3 | Most of this is adapted from https://github.com/GoogleCloudPlatform/envoy-processor-examples
4 |
5 | Notable adjustments from the GCP example:
6 | - Processing server uses a unix socket instead of TCP
7 | - A few additional processing targets were added to the processing server
8 |
9 |
10 | The three servers can be run by `make run-servers` which will launch the servers in the shell and output logs into the `./logs` directory.
11 |
12 | Some of the examples used in the demo:
13 |
14 | The following will add headers to the request AND response with the hash of the body
15 | ```shell
16 | curl -v -X POST -d 'hello, world' http://127.0.0.1:10000/echohashbuffered
17 | ```
18 | The following will block request since it comes from source address beginning `127`
19 | ```shell
20 | curl -v http://127.0.0.1:10000/blockLocalHost
21 | ```
22 | The following will set dynamic metadata in the filter which is seen by logging it in the access log
23 | ```shell
24 | curl -v http://127.0.0.1:10000/dynamicMetadata -H 'x-set-metadata: scoobydoo'
25 |
--------------------------------------------------------------------------------
/67-envoy-extproc/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/jbohanon/ext-proc-demo
2 |
3 | go 1.22.0
4 |
5 | require (
6 | github.com/envoyproxy/go-control-plane v0.12.1-0.20240217204037-bc093a22968f
7 | github.com/julienschmidt/httprouter v1.3.0
8 | github.com/tetratelabs/proxy-wasm-go-sdk v0.23.0
9 | go.uber.org/zap v1.26.0
10 | google.golang.org/grpc v1.61.1
11 | )
12 |
13 | require (
14 | github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect
15 | github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect
16 | github.com/golang/protobuf v1.5.3 // indirect
17 | github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect
18 | go.uber.org/multierr v1.10.0 // indirect
19 | golang.org/x/net v0.18.0 // indirect
20 | golang.org/x/sys v0.14.0 // indirect
21 | golang.org/x/text v0.14.0 // indirect
22 | google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
23 | google.golang.org/protobuf v1.32.0 // indirect
24 | )
25 |
--------------------------------------------------------------------------------
/67-envoy-extproc/pkg/httptarget/main.go:
--------------------------------------------------------------------------------
1 | // Copyright 2022 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package main
16 |
17 | import (
18 | "flag"
19 | "fmt"
20 | "net/http"
21 | "os"
22 |
23 | "go.uber.org/zap"
24 | "golang.org/x/net/http2"
25 | "golang.org/x/net/http2/h2c"
26 | )
27 |
28 | var logger *zap.SugaredLogger = nil
29 |
30 | func main() {
31 | var doHelp bool
32 | var debug bool
33 | var port int
34 |
35 | flag.BoolVar(&doHelp, "h", false, "Print help message")
36 | flag.BoolVar(&debug, "d", false, "Enable debug logging")
37 | flag.IntVar(&port, "p", -1, "TCP listen port")
38 | flag.Parse()
39 |
40 | if !flag.Parsed() || doHelp || port < 0 {
41 | flag.PrintDefaults()
42 | os.Exit(2)
43 | }
44 |
45 | var err error
46 | var zapLogger *zap.Logger
47 | if debug {
48 | zapLogger, err = zap.NewDevelopment()
49 | } else {
50 | zapLogger, err = zap.NewProduction()
51 | }
52 | if err != nil {
53 | panic(fmt.Sprintf("Can't initialize logger: %s", err))
54 | }
55 | logger = zapLogger.Sugar()
56 |
57 | // This extra stuff lets us support HTTP/2 without
58 | // TLS using the "h2c" extension.
59 | handler := createHandler()
60 | h2Server := http2.Server{}
61 | server := http.Server{
62 | Addr: fmt.Sprintf(":%d", port),
63 | Handler: h2c.NewHandler(handler, &h2Server),
64 | }
65 | server.ListenAndServe()
66 | }
67 |
--------------------------------------------------------------------------------
/67-envoy-extproc/pkg/processor/main.go:
--------------------------------------------------------------------------------
1 | // Copyright 2022 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package main
16 |
17 | import (
18 | "flag"
19 | "fmt"
20 | "net"
21 | "os"
22 | "os/signal"
23 |
24 | extproc "github.com/envoyproxy/go-control-plane/envoy/service/ext_proc/v3"
25 | "go.uber.org/zap"
26 | "google.golang.org/grpc"
27 | )
28 |
29 | var logger *zap.SugaredLogger = nil
30 |
31 | func main() {
32 | var help bool
33 | var debug bool
34 | var port int
35 | var err error
36 | var sock string
37 |
38 | flag.IntVar(&port, "p", -1, "Listen port; only one of -p and -s may be specified")
39 | flag.BoolVar(&debug, "d", false, "Enable debug logging")
40 | flag.BoolVar(&help, "h", false, "Print help")
41 | flag.StringVar(&sock, "s", "", "Listen socket; only one of -p and -s may be specified")
42 | flag.Parse()
43 | if !flag.Parsed() || help || (port < 0 && sock == "") || (port > 0 && sock != "") {
44 | flag.PrintDefaults()
45 | os.Exit(2)
46 | }
47 |
48 | var zapLogger *zap.Logger
49 | if debug {
50 | zapLogger, err = zap.NewDevelopment()
51 | } else {
52 | zapLogger, err = zap.NewProduction()
53 | }
54 | if err != nil {
55 | panic(fmt.Sprintf("Can't initialize logger: %s", err))
56 | }
57 | logger = zapLogger.Sugar()
58 |
59 | var listener net.Listener
60 | if port > 0 {
61 | listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port))
62 | } else if sock != "" {
63 | listener, err = net.Listen("unix", sock)
64 | sigChan := make(chan os.Signal)
65 | signal.Notify(sigChan, os.Interrupt)
66 | go func() {
67 | select {
68 | case <-sigChan:
69 | fmt.Printf("\nreceived interrupt; removing %s\n", sock)
70 | if err := os.Remove(sock); err != nil {
71 | fmt.Println(err)
72 | os.Exit(1)
73 | }
74 | os.Exit(0)
75 | }
76 | }()
77 | }
78 | if err != nil {
79 | logger.Fatalf("Can't listen on socket: %s", err)
80 | os.Exit(3)
81 | }
82 |
83 | server := grpc.NewServer()
84 | service := processorService{}
85 | extproc.RegisterExternalProcessorServer(server, &service)
86 |
87 | logger.Infof("Listening on %s", listener.Addr())
88 |
89 | server.Serve(listener)
90 | }
91 |
--------------------------------------------------------------------------------
/images/hoot-background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/solo-io/hoot/478d9c6f5f2a2e9641e1a5056f922f5519fbea15/images/hoot-background.png
--------------------------------------------------------------------------------