├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── NOTICE
├── README.md
├── apps
├── catalog_detail
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── app.js
│ ├── package-lock.json
│ ├── package.json
│ └── readiness.txt
├── frontend_node
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── index.html
│ ├── package-lock.json
│ ├── package.json
│ ├── public
│ │ ├── arch.png
│ │ ├── architecture.png
│ │ └── css
│ │ │ └── styles.css
│ ├── server.js
│ └── views
│ │ └── index.ejs
└── product_catalog
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── app.py
│ ├── app_aurora.py
│ ├── app_ebs.py
│ ├── app_efs.py
│ ├── app_secrets.py
│ ├── bootstrap.sh
│ ├── rds-combined-ca-bundle.pem
│ └── requirements.txt
├── images
├── 01-kiali-console.png
├── 01-kiali-traffic-flow.gif
├── 02-default-route-v1-traffic-distribution.png
├── 02-initial-traffic-distribution.png
├── 02-setup-mesh-resources.png
├── 02-shift-traffic-v1-path-traffic-distribution.png
├── 02-shift-traffic-v2-header-traffic-distribution.png
├── 02-shift-traffic-v2-path-traffic-distribution.png
├── 02-shift-traffic-v2-weight-traffic-distribution.png
├── 03-Retries.png
├── 03-circuitbreaking.png
├── 03-timeouts.png
├── 04-external-authorization.png
├── 04-fault-injection-app-snapshot.png
├── 04-kiali-auto-mtls-application-graph.png
├── 04-kiali-mast-head-lock-auto-mtls.png
├── 04-kiali-mast-head-lock-default-strict-mode.png
├── 04-peer-authentication-mtls-sidecar-connections.png
├── 04-request-authentication-1.png
├── 04-request-authentication-2.png
├── 04-request-authentication-3.png
├── 04-request-authentication-4.png
├── eks-istio-spire.png
├── istio-spire-1.png
├── istio-spire-2.png
├── istio-spire-3.png
├── istio-spire-4.png
└── istio-spire-5.png
├── modules
├── 00-setup-mesh-resources
│ ├── catalogdetail-destinationrule.yaml
│ ├── catalogdetail-virtualservice.yaml
│ ├── frontend-virtualservice.yaml
│ └── productcatalog-virtualservice.yaml
├── 01-getting-started
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── catalogdetail-deployment.yaml
│ │ ├── catalogdetail-sa.yaml
│ │ ├── catalogdetail-service.yaml
│ │ ├── catalogdetail2-deployment.yaml
│ │ ├── frontend-deployment.yaml
│ │ ├── frontend-sa.yaml
│ │ ├── frontend-service.yaml
│ │ ├── productapp-gateway.yaml
│ │ ├── productapp-virtualservice.yaml
│ │ ├── productcatalog-deployment.yaml
│ │ ├── productcatalog-sa.yaml
│ │ └── productcatalog-service.yaml
│ └── values.yaml
├── 02-traffic-management
│ ├── README.md
│ ├── header-based-routing
│ │ ├── catalogdetail-virtualservice.yaml
│ │ └── productcatalog-envoyfilter.yaml
│ ├── path-based-routing
│ │ └── catalogdetail-virtualservice.yaml
│ ├── route-traffic-to-version-v1
│ │ └── catalogdetail-virtualservice.yaml
│ ├── setup-mesh-resources
│ │ ├── catalogdetail-destinationrule.yaml
│ │ ├── catalogdetail-virtualservice.yaml
│ │ ├── frontend-virtualservice.yaml
│ │ └── productcatalog-virtualservice.yaml
│ ├── traffic-mirroring
│ │ └── catalogdetail-virtualservice.yaml
│ └── weight-based-routing
│ │ └── catalogdetail-virtualservice.yaml
├── 03-network-resiliency
│ ├── README.md
│ ├── fault-injection
│ │ ├── README.md
│ │ ├── abort
│ │ │ └── catalogdetail-virtualservice.yaml
│ │ └── delay
│ │ │ └── catalogdetail-virtualservice.yaml
│ ├── rate-limiting
│ │ ├── README.md
│ │ ├── global-ratelimit
│ │ │ ├── filter-ratelimit-svc.yaml
│ │ │ ├── filter-ratelimit.yaml
│ │ │ ├── global-ratelimit-config.yaml
│ │ │ └── global-ratelimit-service.yaml
│ │ └── local-ratelimit
│ │ │ └── local-ratelimit.yaml
│ └── timeouts-retries-circuitbreaking
│ │ ├── README.md
│ │ ├── circuitbreaking
│ │ └── catalogdetail-destinationrule.yaml
│ │ ├── retries
│ │ └── productcatalog-virtualservice.yaml
│ │ └── timeouts
│ │ ├── catalogdetail-virtualservice.yaml
│ │ └── productcatalog-virtualservice.yaml
└── 04-security
│ ├── README.md
│ ├── ingress-security
│ └── README.md
│ ├── opa-external-authorization
│ ├── README.md
│ ├── kustomization.yaml
│ ├── opa-ext-authz-serviceentry.yaml
│ ├── opa-ext-authz-sidecar-assign.yaml
│ ├── policy.rego
│ ├── policy_test.rego
│ └── productapp-authorizationpolicy.yaml
│ ├── peer-authentication
│ └── README.md
│ ├── request-authentication
│ ├── README.md
│ ├── ingress-authorizationpolicy.yaml
│ └── ingress-requestauthentication-template.yaml
│ ├── scripts
│ ├── cleanup-crds.sh
│ └── helpers.sh
│ └── terraform
│ ├── eks.tf
│ ├── istio.tf
│ ├── keycloak.tf
│ ├── lb.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ ├── versions.tf
│ ├── vpc.tf
│ └── workshop.tf
├── patterns
├── eks-istio-mesh-spire-federation
│ ├── README.md
│ ├── cert-manager
│ │ └── cert-rotation.yaml
│ ├── examples
│ │ ├── bookinfo-with-spire-template.yaml
│ │ ├── delete-helloworld.sh
│ │ ├── deploy-bookinfo.sh
│ │ ├── deploy-helloworld.sh
│ │ ├── helloworld-bar.yaml
│ │ ├── helloworld-foo.yaml
│ │ ├── helloworld-gateway.yaml
│ │ ├── sleep-bar.yaml
│ │ └── sleep-foo.yaml
│ ├── istio
│ │ ├── auth.yaml
│ │ ├── bar-istio-conf.yaml
│ │ ├── cleanup-istio.sh
│ │ ├── foo-istio-conf.yaml
│ │ ├── install-istio.sh
│ │ └── istio-ew-gw.yaml
│ ├── spire
│ │ ├── bar-spire.yaml
│ │ ├── cleanup-spire.sh
│ │ ├── configmaps.yaml
│ │ ├── foo-spire.yaml
│ │ └── install-spire.sh
│ └── terraform
│ │ ├── 0.vpc
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ ├── 1.foo-eks
│ │ ├── cert-manager-manifests
│ │ │ ├── istio-cert.yaml
│ │ │ └── self-signed-ca.yaml
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ └── 2.bar-eks
│ │ ├── cert-manager-manifests
│ │ ├── istio-cert.yaml
│ │ └── self-signed-ca.yaml
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── versions.tf
└── multi-cluster-multi-primary
│ ├── README.md
│ ├── istio-multi-cluster-architecture.png
│ ├── multi-network
│ ├── README.md
│ ├── charts
│ │ └── multicluster-gateway-n-apps
│ │ │ ├── Chart.yaml
│ │ │ ├── templates
│ │ │ ├── gateway.yaml
│ │ │ ├── helloworld-deployment.yaml
│ │ │ ├── helloworld-service.yaml
│ │ │ ├── remote-secret.yaml
│ │ │ ├── sleep-deployment.yaml
│ │ │ ├── sleep-service.yaml
│ │ │ └── sleep-serviceaccount.yaml
│ │ │ └── values.yaml
│ ├── data.tf
│ ├── eks_1.tf
│ ├── eks_2.tf
│ ├── locals.tf
│ ├── providers.tf
│ ├── root_cert.tf
│ ├── scripts
│ │ ├── check-cross-cluster-sync.sh
│ │ ├── check-lb-readiness.sh
│ │ ├── deploy.sh
│ │ ├── destroy.sh
│ │ └── set-cluster-contexts.sh
│ └── versions.tf
│ └── single-network
│ ├── README.md
│ ├── charts
│ └── multicluster-gateway-n-apps
│ │ ├── Chart.yaml
│ │ ├── templates
│ │ ├── gateway.yaml
│ │ ├── helloworld-deployment.yaml
│ │ ├── helloworld-service.yaml
│ │ ├── remote-secret.yaml
│ │ ├── sleep-deployment.yaml
│ │ ├── sleep-service.yaml
│ │ └── sleep-serviceaccount.yaml
│ │ └── values.yaml
│ ├── data.tf
│ ├── eks_1.tf
│ ├── eks_2.tf
│ ├── locals.tf
│ ├── providers.tf
│ ├── root_cert.tf
│ ├── scripts
│ ├── check-cross-cluster-sync.sh
│ ├── check-lb-readiness.sh
│ ├── deploy.sh
│ ├── destroy.sh
│ └── set-cluster-contexts.sh
│ ├── versions.tf
│ └── vpc.tf
└── terraform-blueprint
├── README.md
├── ambient
├── README.md
├── main.tf
├── outputs.tf
├── variables.tf
└── versions.tf
└── sidecar
├── README.md
├── main.tf
├── outputs.tf
├── variables.tf
└── versions.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_STORE
2 | node_modules
3 | .python-version
4 | venv
5 | __pycache__
6 |
7 | # Local .terraform directories
8 | **/.terraform
9 |
10 | # .tfstate files
11 | *.tfstate
12 | *.tfstate.*
13 |
14 | # Crash log files
15 | crash.log
16 | crash.*.log
17 |
18 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
19 | # password, private keys, and other secrets. These should not be part of version
20 | # control as they are data points which are potentially sensitive and subject
21 | # to change depending on the environment.
22 | *.tfvars
23 | *.tfvars.json
24 |
25 | # Ignore override files as they are usually used to override resources locally and so
26 | # are not checked in
27 | override.tf
28 | override.tf.json
29 | *_override.tf
30 | *_override.tf.json
31 |
32 | # Include override files you do wish to add to version control using negated pattern
33 | # !example_override.tf
34 |
35 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
36 | # example: *tfplan*
37 |
38 | # Ignore CLI configuration files
39 | .terraformrc
40 | terraform.rc
41 |
42 | *.hcl
43 |
44 | *.bak
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
4 | opensource-codeofconduct@amazon.com with any additional questions or comments.
5 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
18 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Istio on EKS
2 |
3 | Run your containerized workloads and microservices as part of a service-mesh
4 | with Istio on EKS! 🚀
5 |
6 | Istio plays a crucial role in enhancing and simplifying microservices-based
7 | application architectures by providing a powerful and comprehensive service mesh
8 | solution. Istio abstracts away many of the networking and security complexities
9 | in microservices-based applications, allowing developers to focus on business
10 | logic and application functionality. It provides a unified and robust platform
11 | for managing microservices at scale, improving reliability, security, and
12 | observability in the modern distributed application landscape.
13 |
14 | This repository, organized in modules, will guide you step-by-step in setting
15 | Istio on EKS and working with the most commonly observed service-mesh use cases.
16 |
17 | ## Modules
18 |
19 | #### [Getting Started](modules/01-getting-started/README.md)
20 | #### [Traffic Management](modules/02-traffic-management/README.md)
21 | #### [Network Resiliency](modules/03-network-resiliency/README.md)
22 | #### [Security](modules/04-security/README.md)
23 |
24 | ## Patterns
25 |
26 | #### [Multi-Primary, Multi-Network](patterns/multi-cluster-multinetwork-multiprimary/README.md)
27 | #### [Spiffe/Spire Federation between EKS clusters](patterns/eks-istio-mesh-spire-federation/README.md)
28 |
29 | ## Terraform Modules
30 |
31 | #### [Sidecar Istio deployment](terraform-blueprint/sidecar/README.md)
32 | #### [Ambient Istio deployment](terraform-blueprint/ambient/README.md)
33 |
34 | ## Contributions
35 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information.
36 |
37 | ## License
38 | This library is licensed under the Apache 2.0 License.
39 |
40 | ## 🙌 Community
41 | We welcome all individuals who are enthusiastic about Service Mesh and Istio patterns to become a part of this open source community. Your contributions and participation are invaluable to the success of this project.
42 |
43 | Built with ❤️ at AWS.
44 |
--------------------------------------------------------------------------------
/apps/catalog_detail/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | node_modules
3 | npm-debug.log
--------------------------------------------------------------------------------
/apps/catalog_detail/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | FROM node:14
4 |
5 | # Create app directory
6 | WORKDIR /usr/src/app
7 |
8 | # Install app dependencies
9 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
10 | # where available (npm@5+)
11 | COPY readiness.txt package.json package-lock.json ./
12 |
13 |
14 | RUN npm install
15 | # If you are building your code for production
16 | # RUN npm ci --only=production
17 |
18 | # Bundle app source
19 | COPY . .
20 |
21 | EXPOSE 3000
22 | CMD [ "node", "app.js" ]
--------------------------------------------------------------------------------
/apps/catalog_detail/app.js:
--------------------------------------------------------------------------------
1 | //Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | //SPDX-License-Identifier: MIT-0
3 |
4 | var express = require("express");
5 | var app = express();
6 | var XRay = require('aws-xray-sdk');
7 | var AWS = XRay.captureAWS(require('aws-sdk'));
8 | XRay.captureHTTPsGlobal(require('http'));
9 | var http = require('http');
10 | var os = require("os");
11 |
12 | var responseStatus = 200;
13 |
14 | app.use(XRay.express.openSegment('Product-Detail'));
15 |
16 | app.get("/catalogDetail", (req, res, next) => {
17 | res.status(responseStatus)
18 | if (responseStatus == 200) {
19 | console.log("Catalog Detail Get Request Successful");
20 | res.json({
21 | "version":"1",
22 | "vendors":[ "ABC.com" ]
23 | } )
24 | } else {
25 | console.log("Catalog Detail Get Request has error 500");
26 | res.json("Error")
27 | }
28 | });
29 |
30 | app.get("/ping", (req, res, next) => {
31 | res.status(responseStatus)
32 | if (responseStatus == 200) {
33 | res.json("Healthy")
34 | } else {
35 | console.log("Returning unhealthy");
36 | res.json("UnHealthy")
37 | }
38 | });
39 |
40 | app.get("/injectFault", (req, res, next) => {
41 | console.log("host: " + os.hostname() + " will now respond with 500 error.");
42 | responseStatus=500;
43 | res.status(500);
44 | next(new Error("host: " + os.hostname() + " will now respond with 500 error."));
45 | });
46 |
47 | app.get("/resetFault", (req, res, next) => {
48 | console.log("Removed fault injection from host: " + os.hostname());
49 | responseStatus=200;
50 | res.json("Removed fault injection from host: " + os.hostname());
51 | });
52 |
53 | app.use(XRay.express.closeSegment());
54 |
55 | app.listen(3000, () => {
56 | console.log("Server running on port 3000");
57 | });
--------------------------------------------------------------------------------
/apps/catalog_detail/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "nodejs_app_2",
3 | "version": "1.0.0",
4 | "description": "catalog detail",
5 | "main": " app.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "Praseeda Sathaye",
10 | "license": "MIT-0",
11 | "dependencies": {
12 | "aws-sdk": "^2.657.0",
13 | "aws-xray-sdk": "^3.5.1",
14 | "express": "^4.18.2"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/apps/catalog_detail/readiness.txt:
--------------------------------------------------------------------------------
1 | ready
--------------------------------------------------------------------------------
/apps/frontend_node/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | node_modules
3 | npm-debug.log
--------------------------------------------------------------------------------
/apps/frontend_node/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | FROM node:14
4 |
5 | # Create app directory
6 | WORKDIR /usr/src/app
7 |
8 | # Install app dependencies
9 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
10 | # where available (npm@5+)
11 | COPY package.json package-lock.json ./
12 |
13 | RUN npm install
14 | # If you are building your code for production
15 | # RUN npm ci --only=production
16 |
17 | # Bundle app source
18 | COPY . .
19 |
20 | EXPOSE 9000
21 | CMD [ "node", "server.js" ]
--------------------------------------------------------------------------------
/apps/frontend_node/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Product Catalog
6 |
7 |
8 | Product Catalog
9 |
10 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/apps/frontend_node/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "frontend-node",
3 | "version": "1.0.0",
4 | "description": "front end application for microservices in app mesh",
5 | "main": "index.js",
6 | "scripts": {
7 | "dev": "nodemon server.js",
8 | "test": "echo \"Error: no test specified\" && exit 1"
9 | },
10 | "author": "Praseeda Sathaye",
11 | "license": "MIT-0",
12 | "dependencies": {
13 | "aws-xray-sdk": "^3.5.1",
14 | "axios": "^0.28.0",
15 | "aws-sdk": "^2.657.0",
16 | "body-parser": "^1.20.2",
17 | "ejs": "^3.1.10",
18 | "express": "^4.19.2",
19 | "prom-client": "^12.0.0"
20 | },
21 | "devDependencies": {
22 | "nodemon": "^3.0.1"
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/apps/frontend_node/public/arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/apps/frontend_node/public/arch.png
--------------------------------------------------------------------------------
/apps/frontend_node/public/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/apps/frontend_node/public/architecture.png
--------------------------------------------------------------------------------
/apps/frontend_node/public/css/styles.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: "Lato", "Helvetica", Helvetica, sans-serif;
3 | background: #E6E6E6;
4 | color: #000000B;
5 | padding: 15px;
6 | letter-spacing: 1px;
7 | }
8 | table, th, td {
9 | border: 1px solid black;
10 | border-collapse: collapse;
11 | }
12 | th, td {
13 | padding: 8px;
14 | text-align: left;
15 | }
16 | tr:nth-child(even){background-color: #F0F8FF}
17 | tr:nth-child(odd){background-color: #E6E6FA}
18 | th {
19 | background-color: #1E90FF;
20 | color: white;
21 | }
22 |
23 | h3 {
24 | color: #800000;
25 | }
26 | button {
27 | background-color: #00BFFF; /* blue */
28 | border: none;
29 | color: white;
30 | padding: 6px 9px;
31 | text-align: center;
32 | text-decoration: none;
33 | display: inline-block;
34 | font-size: 16px;
35 | }
36 | input[type=id], select {
37 | width: 20%;
38 | padding: 5px 8px;
39 | margin: 8px 0;
40 | display: inline-block;
41 | border: 1px solid #ccc;
42 | border-radius: 4px;
43 | box-sizing: border-box;
44 | }
45 | input[type=name], select {
46 | width: 50%;
47 | padding: 5px 8px;
48 | margin: 8px 0;
49 | display: inline-block;
50 | border: 1px solid #ccc;
51 | border-radius: 4px;
52 | box-sizing: border-box;
53 | }
54 |
55 | input,
56 | input::-webkit-input-placeholder {
57 | font-size: 16px;
58 | line-height: 1;
59 | }
60 | input:read-only {
61 | width: 50%;
62 | padding: 12px 15px;
63 | margin: 8px 0;
64 | display: inline-block;
65 | border: 1px solid #ccc;
66 | border-radius: 4px;
67 | box-sizing: border-box;
68 | color: #000000B;
69 | background-color: #E6E6FA;
70 | }
71 |
72 | mark.red {
73 | color:#ff0000;
74 | background: none;
75 | }
76 |
77 | mark.blue {
78 | color:#0000FF;
79 | background: none;
80 | }
81 |
82 | label{
83 | display: inline-block;
84 | vertical-align: middle;
85 | padding: 10px 5px;
86 | margin-right: 15px;
87 | color: #000000B;
88 | background-color: #E6E6FA;
89 | }
90 |
--------------------------------------------------------------------------------
/apps/frontend_node/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const bodyParser= require('body-parser')
3 | const axios = require('axios')
4 | const app = express()
5 | const path = require("path");
6 | const Prometheus = require('prom-client')
7 |
8 | var XRay = require('aws-xray-sdk');
9 | var AWS = XRay.captureAWS(require('aws-sdk'));
10 | XRay.captureHTTPsGlobal(require('http'));
11 | var http = require('http');
12 |
13 | app.use(bodyParser.urlencoded({extended: false}));
14 | app.use(XRay.express.openSegment('Frontend'));
15 |
16 | Prometheus.collectDefaultMetrics();
17 |
18 | var baseProductUrl = process.env.BASE_URL;
19 |
20 | if(baseProductUrl === undefined) {
21 | baseProductUrl = 'http://localhost:5000/products/';
22 | }
23 |
24 | console.log(baseProductUrl);
25 |
26 | // ========================
27 | // Middlewares
28 | // ========================
29 | app.set('view engine', 'ejs')
30 | app.use(express.static(path.join(__dirname, "public")));
31 |
32 | app.use(bodyParser.urlencoded({extended: true}))
33 |
34 | app.get('/', (req, res) => {
35 | var seg = XRay.getSegment();
36 | seg.addAnnotation('service', 'prodcatalog-request');
37 | let query = req.query.queryStr;
38 |
39 | const requestOne = axios.get(baseProductUrl);
40 | //const requestTwo = axios.get(baseSummaryUrl);
41 | //axios.all([requestOne, requestTwo]).then(axios.spread((...responses) => {
42 | axios.all([requestOne]).then(axios.spread((...responses) => {
43 | const responseOne = responses[0]
44 | // const responseTwo = responses[1]
45 |
46 | // console.log(responseOne.data.products, responseOne.data.details.vendors, responseOne.data.details.version);
47 | res.render('index.ejs', {products: responseOne.data.products, vendors:responseOne.data.details.vendors, version:responseOne.data.details.version})
48 | console.log("Product Catalog get call was Successful from frontend");
49 | })).catch(errors => {
50 |
51 | // console.log("baseSummaryUrl " + baseSummaryUrl);
52 | console.log(errors);
53 | console.log("There was error in Product Catalog get call from frontend");
54 | })
55 |
56 | })
57 |
58 | app.post('/products', (req, res) => {
59 | var headers = {
60 | 'Content-Type': 'application/json'
61 | }
62 | axios
63 | .post(`${baseProductUrl}${req.body.id}`, JSON.stringify({ name: `${req.body.name}` }), {"headers" : headers})
64 | .then(response => {
65 | //console.log(`statusCode: ${response}`)
66 | //console.log(response)
67 | res.redirect(req.get('referer'));
68 | console.log("Product Catalog post call was Successful from frontend");
69 | })
70 | .catch(error => {
71 | console.error(error)
72 | })
73 |
74 | })
75 |
76 | app.get("/ping", (req, res, next) => {
77 | res.json("Healthy")
78 | });
79 |
80 | // Export Prometheus metrics from /metrics endpoint
81 | app.get('/metrics', (req, res, next) => {
82 | res.set('Content-Type', Prometheus.register.contentType)
83 | res.end(Prometheus.register.metrics())
84 | })
85 |
86 |
87 | app.use(XRay.express.closeSegment());
88 |
89 | app.listen(9000, function() {
90 | console.log('listening on 9000')
91 | })
--------------------------------------------------------------------------------
/apps/frontend_node/views/index.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Product Catalog
8 |
9 |
10 |
11 |
16 |
17 |
18 | Product Catalog Application
19 |
20 |
21 |
22 | Product Catalog
23 |
24 |
29 |
30 | <%if (Object.keys(products).length > 0) { %>
31 |
32 |
34 | Product ID |
35 | Product Name |
36 |
37 | <% for(var i = 0; i < Object.keys(products).length; i++) { %>
38 |
39 | <%= Object.keys(products)[i] %> |
40 | <%= Object.values(products)[i] %> |
41 |
42 |
43 | <% }; %>
44 |
45 |
46 | <% } else { %>
47 | No Products found in the Product Catalog
48 | <% } %>
49 | <%if (Object.keys(products).length > 0) { %>
50 | Catalog Detail
51 |
52 |
62 | <% } %>
63 |
64 | |
65 |
66 | Architecture
67 |
68 | |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
--------------------------------------------------------------------------------
/apps/product_catalog/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | venv
3 | __pycache__
4 | .python-version
--------------------------------------------------------------------------------
/apps/product_catalog/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | # Use an official Python runtime as an image
4 | FROM python:3.9-slim
5 |
6 | RUN apt-get update \
7 | && apt-get install curl -y \
8 | && rm -rf /var/lib/apt/lists/*
9 |
10 | RUN mkdir /app
11 | WORKDIR /app
12 |
13 | # We copy just the requirements.txt first to leverage Docker cache
14 | COPY requirements.txt /app
15 | RUN pip install -r requirements.txt
16 |
17 | COPY . /app
18 |
19 | # ENV AGG_APP_URL='http://prodinfo.octank-mesh-ns.svc.cluster.local:3000/productAgreement'
20 |
21 | #WORKDIR /docker_app
22 | EXPOSE 8080
23 | ENTRYPOINT ["/app/bootstrap.sh"]
--------------------------------------------------------------------------------
/apps/product_catalog/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | import werkzeug
4 | werkzeug.cached_property = werkzeug.utils.cached_property
5 | from flask import Flask, request, url_for
6 | from flask_restx import Api, Resource, fields
7 | from flask_cors import CORS
8 | import requests
9 | import os
10 | import logging
11 | from aws_xray_sdk.core import xray_recorder
12 | from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
13 | xray_recorder.configure(context_missing='LOG_ERROR')
14 | from aws_xray_sdk.core import patch_all
15 |
16 | patch_all()
17 |
18 | flask_app = Flask(__name__)
19 |
20 | log_level = logging.INFO
21 | flask_app.logger.setLevel(log_level)
22 | # enable CORS
23 | CORS(flask_app, resources={r'/*': {'origins': '*'}})
24 |
25 | #configure SDK code
26 | xray_recorder.configure(service='Product-Catalog')
27 | XRayMiddleware(flask_app, xray_recorder)
28 |
29 | AGG_APP_URL = os.environ.get("AGG_APP_URL")
30 |
31 | if AGG_APP_URL is None:
32 | AGG_APP_URL="http://localhost:3000/catalogDetail"
33 |
34 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
35 |
36 | # Fix of returning swagger.json on HTTP
37 | @property
38 | def specs_url(self):
39 | """
40 | The Swagger specifications absolute url (ie. `swagger.json`)
41 |
42 | :rtype: str
43 | """
44 | return url_for(self.endpoint('specs'), _external=False)
45 |
46 | Api.specs_url = specs_url
47 | app = Api(app = flask_app,
48 | version = "1.0",
49 | title = "Product Catalog",
50 | description = "Complete dictionary of Products available in the Product Catalog")
51 |
52 | name_space = app.namespace('products', description='Products from Product Catalog')
53 |
54 | model = app.model('Name Model',
55 | {'name': fields.String(required = True,
56 | description="Name of the Product",
57 | help="Product Name cannot be blank.")})
58 |
59 | list_of_names = {}
60 |
61 | @name_space.route('/')
62 | class Products(Resource):
63 | """
64 | Manipulations with products.
65 | """
66 | def get(self):
67 | """
68 | List of products.
69 | Returns a list of products
70 | """
71 | try:
72 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
73 | response = requests.get(str(AGG_APP_URL))
74 | content = response.json()
75 | flask_app.logger.info('Get-All Request succeeded')
76 | return {
77 | "products": list_of_names,
78 | "details" : content
79 | }
80 | except KeyError as e:
81 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
82 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
83 | except Exception as e:
84 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
85 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
86 |
87 | @name_space.route('/ping')
88 | class Ping(Resource):
89 | def get(self):
90 | return "healthy"
91 |
92 | @name_space.route("/")
93 | @name_space.param('id', 'Specify the ProductId')
94 | class MainClass(Resource):
95 |
96 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
97 | def get(self, id=None):
98 | try:
99 | name = list_of_names[id]
100 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
101 | response = requests.get(str(AGG_APP_URL))
102 | content = response.json()
103 | flask_app.logger.info('Get Request succeeded ' + list_of_names[id])
104 | return {
105 | "status": "Product Details retrieved",
106 | "name" : list_of_names[id],
107 | "details" : content['details']
108 | }
109 | except KeyError as e:
110 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
111 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
112 | except Exception as e:
113 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
114 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
115 |
116 |
117 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
118 | @app.expect(model)
119 | def post(self, id):
120 | try:
121 | list_of_names[id] = request.json['name']
122 | flask_app.logger.info('Post Request succeeded ' + list_of_names[id])
123 | return {
124 | "status": "New Product added to Product Catalog",
125 | "name": list_of_names[id]
126 | }
127 | except KeyError as e:
128 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
129 | name_space.abort(500, e.__doc__, status = "Could not save information", statusCode = "500")
130 | except Exception as e:
131 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
132 | name_space.abort(400, e.__doc__, status = "Could not save information", statusCode = "400")
133 |
134 | if __name__ == '__main__':
135 | app.run(host="0.0.0.0", debug=True)
--------------------------------------------------------------------------------
/apps/product_catalog/app_efs.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | import werkzeug
4 | import glob
5 | werkzeug.cached_property = werkzeug.utils.cached_property
6 | from flask import Flask, request, url_for
7 | from flask_restx import Api, Resource, fields
8 | from flask_cors import CORS
9 | import requests
10 | import os
11 | import logging
12 | from aws_xray_sdk.core import xray_recorder
13 | from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
14 | xray_recorder.configure(context_missing='LOG_ERROR')
15 | from aws_xray_sdk.core import patch_all
16 |
17 | patch_all()
18 |
19 | flask_app = Flask(__name__)
20 |
21 | log_level = logging.INFO
22 | flask_app.logger.setLevel(log_level)
23 | # enable CORS
24 | CORS(flask_app, resources={r'/*': {'origins': '*'}})
25 |
26 | #configure SDK code
27 | xray_recorder.configure(service='Product-Catalog')
28 | XRayMiddleware(flask_app, xray_recorder)
29 |
30 | AGG_APP_URL = os.environ.get("AGG_APP_URL")
31 |
32 | if AGG_APP_URL is None:
33 | AGG_APP_URL="http://localhost:3000/catalogDetail"
34 |
35 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
36 |
37 | filepath = os.path.join('/products', 'products.txt')
38 |
39 | # Fix of returning swagger.json on HTTP
40 | @property
41 | def specs_url(self):
42 | """
43 | The Swagger specifications absolute url (ie. `swagger.json`)
44 |
45 | :rtype: str
46 | """
47 | return url_for(self.endpoint('specs'), _external=False)
48 |
49 | Api.specs_url = specs_url
50 | app = Api(app = flask_app,
51 | version = "1.0",
52 | title = "Product Catalog",
53 | description = "Complete dictionary of Products available in the Product Catalog")
54 |
55 | name_space = app.namespace('products', description='Products from Product Catalog')
56 |
57 | model = app.model('Name Model',
58 | {'name': fields.String(required = True,
59 | description="Name of the Product",
60 | help="Product Name cannot be blank.")})
61 |
62 | list_of_names = {}
63 |
64 | def read_file():
65 | flask_app.logger.info(filepath)
66 | if not os.path.exists(filepath):
67 | open(filepath, 'w').close()
68 | else:
69 | with open(filepath, "r") as f:
70 | for line in f:
71 | (key, val) = line.split()
72 | list_of_names[int(key)] = val
73 |
74 | @name_space.route('/')
75 | class Products(Resource):
76 | """
77 | Manipulations with products.
78 | """
79 | def get(self):
80 | """
81 | List of products.
82 | Returns a list of products
83 | """
84 | try:
85 | read_file()
86 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
87 | response = requests.get(str(AGG_APP_URL))
88 | content = response.json()
89 | flask_app.logger.info('Get-All Request succeeded')
90 | return {
91 | "products": list_of_names,
92 | "details" : content
93 | }
94 | except KeyError as e:
95 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
96 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
97 | except Exception as e:
98 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
99 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
100 |
101 | @name_space.route('/ping')
102 | class Ping(Resource):
103 | def get(self):
104 | return "healthy"
105 |
106 | @name_space.route("/")
107 | @name_space.param('id', 'Specify the ProductId')
108 | class MainClass(Resource):
109 |
110 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
111 | def get(self, id=None):
112 | try:
113 | name = list_of_names[id]
114 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
115 | response = requests.get(str(AGG_APP_URL))
116 | content = response.json()
117 | flask_app.logger.info('Get Request succeeded ' + list_of_names[id])
118 | return {
119 | "status": "Product Details retrieved",
120 | "name" : list_of_names[id],
121 | "details" : content['details']
122 | }
123 | except KeyError as e:
124 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
125 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
126 | except Exception as e:
127 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
128 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
129 |
130 |
131 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
132 | @app.expect(model)
133 | def post(self, id):
134 | try:
135 | f = open(filepath, "a")
136 | flask_app.logger.info(id, request.json['name'])
137 | f.write('{} {}'.format(id, request.json['name']))
138 | f.write('\n')
139 | list_of_names[id] = request.json['name']
140 | flask_app.logger.info('Post Request succeeded ' + list_of_names[id])
141 | return {
142 | "status": "New Product added to Product Catalog",
143 | "name": list_of_names[id]
144 | }
145 | except KeyError as e:
146 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
147 | name_space.abort(500, e.__doc__, status = "Could not save information", statusCode = "500")
148 | except Exception as e:
149 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
150 | name_space.abort(400, e.__doc__, status = "Could not save information", statusCode = "400")
151 |
152 | if __name__ == '__main__':
153 | app.run(host="0.0.0.0", debug=True)
--------------------------------------------------------------------------------
/apps/product_catalog/app_secrets.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 | import werkzeug
4 | werkzeug.cached_property = werkzeug.utils.cached_property
5 | from flask import Flask, request, url_for
6 | from flask_restx import Api, Resource, fields
7 | from flask_cors import CORS
8 | import requests
9 | import os
10 | import logging
11 | import pymysql
12 | from pymysql.err import DatabaseError
13 | import json
14 |
15 | flask_app = Flask(__name__)
16 |
17 | log_level = logging.INFO
18 | flask_app.logger.setLevel(log_level)
19 | # enable CORS
20 | CORS(flask_app, resources={r'/*': {'origins': '*'}})
21 |
22 | AGG_APP_URL = os.environ.get("AGG_APP_URL")
23 | DB_APP_URL = os.environ.get("DATABASE_SERVICE_URL")
24 | MYSQL_ROOT_PASSWORD = os.environ.get("MYSQL_ROOT_PASSWORD")
25 |
26 | list_of_names = ""
27 |
28 | if AGG_APP_URL is None:
29 | AGG_APP_URL="http://localhost:3000/catalogDetail"
30 |
31 | if MYSQL_ROOT_PASSWORD is None:
32 | MYSQL_ROOT_PASSWORD=""
33 |
34 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
35 | flask_app.logger.info('DB_APP_URL is ' + str(DB_APP_URL))
36 |
37 | # Connect to the database
38 | def create_connection():
39 | return pymysql.connect(host=DB_APP_URL,
40 | user='root',
41 | password=MYSQL_ROOT_PASSWORD,
42 | db='dev',
43 | charset='utf8mb4',
44 | cursorclass=pymysql.cursors.DictCursor
45 | )
46 |
47 | # Fix of returning swagger.json on HTTP
48 | @property
49 | def specs_url(self):
50 | """
51 | The Swagger specifications absolute url (ie. `swagger.json`)
52 | :rtype: str
53 | """
54 | return url_for(self.endpoint('specs'), _external=False)
55 |
56 | Api.specs_url = specs_url
57 | app = Api(app = flask_app,
58 | version = "1.0",
59 | title = "Product Catalog",
60 | description = "Complete dictionary of Products available in the Product Catalog")
61 |
62 | name_space = app.namespace('products', description='Products from Product Catalog')
63 |
64 | model = app.model('Name Model',
65 | {'name': fields.String(required = True,
66 | description="Name of the Product",
67 | help="Product Name cannot be blank.")})
68 |
69 | class create_dict(dict):
70 |
71 | # __init__ function
72 | def __init__(self):
73 | self = dict()
74 |
75 | # Function to add key:value
76 | def add(self, key, value):
77 | self[key] = value
78 |
79 | @name_space.route('/')
80 | class Products(Resource):
81 | """
82 | Manipulations with products.
83 | """
84 | def get(self):
85 | """
86 | List of products.
87 | Returns a list of products
88 | """
89 | try:
90 | flask_app.logger.info('Inside Get request')
91 | response = requests.get(str(AGG_APP_URL))
92 | detailsContent = response.json()
93 | connection = create_connection()
94 | cursor = connection.cursor()
95 | cursor.execute("SELECT `prodId`, `prodName` FROM `product`")
96 |
97 | payload = []
98 | content = {}
99 | #mydict = create_dict()
100 | list_of_names = {}
101 | for row in cursor.fetchall():
102 | prodId = str(row["prodId"])
103 | prodName = str(row["prodName"])
104 | list_of_names[prodId] = prodName
105 | #content = {row['prodId']:row['prodName']}
106 | #payload.append(content)
107 | flask_app.logger.info(list_of_names)
108 | #prod_json = json.dumps(mydict, indent=2, sort_keys=True)
109 | #flask_app.logger.info(mydict)
110 | return {
111 | "products": list_of_names,
112 | "details" : detailsContent
113 | }
114 | cursor.close()
115 | connection.close()
116 | except KeyError as e:
117 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
118 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
119 | except Exception as e:
120 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
121 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
122 |
123 | @name_space.route('/ping')
124 | class Ping(Resource):
125 | def get(self):
126 | return "healthy"
127 |
128 | @name_space.route("/")
129 | @name_space.param('id', 'Specify the ProductId')
130 | class MainClass(Resource):
131 |
132 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
133 | def get(self, id=None):
134 | try:
135 | name = list_of_names[id]
136 | flask_app.logger.info('AGG_APP_URL is ' + str(AGG_APP_URL))
137 | response = requests.get(str(AGG_APP_URL))
138 | content = response.json()
139 | flask_app.logger.info('Get Request succeeded ' + list_of_names[id])
140 | return {
141 | "status": "Product Details retrieved",
142 | "name" : list_of_names[id],
143 | "details" : content['details']
144 | }
145 | except KeyError as e:
146 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
147 | name_space.abort(500, e.__doc__, status = "Could not retrieve information", statusCode = "500")
148 | except Exception as e:
149 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
150 | name_space.abort(400, e.__doc__, status = "Could not retrieve information", statusCode = "400")
151 |
152 |
153 | @app.doc(responses={ 200: 'OK', 400: 'Invalid Argument', 500: 'Mapping Key Error' })
154 | @app.expect(model)
155 | def post(self, id):
156 | try:
157 | connection = create_connection()
158 |
159 | cursor = connection.cursor()
160 | sql = ("INSERT INTO product (prodId, prodName) VALUES (%s, %s)")
161 | data = (id, request.json['name'])
162 | cursor.execute(sql, data)
163 | connection.commit()
164 | cursor.close()
165 | connection.close()
166 | flask_app.logger.info('Post Request succeeded ' + request.json['name'])
167 | return {
168 | "status": "New Product added to Product Catalog",
169 | "name": request.json['name']
170 | }
171 | except DatabaseError as e:
172 | err_code = e.args[0]
173 | if err_code == 2003:
174 | print('bad connection string')
175 | else:
176 | raise
177 | except KeyError as e:
178 | flask_app.logger.error('Error 500 Could not retrieve information ' + e.__doc__ )
179 | name_space.abort(500, e.__doc__, status = "Could not save information", statusCode = "500")
180 | except Exception as e:
181 | flask_app.logger.error('Error 400 Could not retrieve information ' + e.__doc__ )
182 | name_space.abort(400, e.__doc__, status = "Could not save information", statusCode = "400")
183 |
184 |
185 | if __name__ == '__main__':
186 | app.run(host="0.0.0.0", debug=True)
--------------------------------------------------------------------------------
/apps/product_catalog/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | export FLASK_APP=./app.py
3 | export FLASK_DEBUG=1
4 | flask run -h 0.0.0.0
--------------------------------------------------------------------------------
/apps/product_catalog/requirements.txt:
--------------------------------------------------------------------------------
1 | flask-restx==1.1.0
2 | flask==2.3.3
3 | markupsafe==2.1.3
4 | werkzeug==3.0.3
5 | gunicorn==22.0.0
6 | requests==v2.24.0
7 | aws-xray-sdk==2.6.0
8 | flask-cors==4.0.1
9 | PyMySQL==1.1.1
10 | boto3==1.9.220
11 |
--------------------------------------------------------------------------------
/images/01-kiali-console.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/01-kiali-console.png
--------------------------------------------------------------------------------
/images/01-kiali-traffic-flow.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/01-kiali-traffic-flow.gif
--------------------------------------------------------------------------------
/images/02-default-route-v1-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-default-route-v1-traffic-distribution.png
--------------------------------------------------------------------------------
/images/02-initial-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-initial-traffic-distribution.png
--------------------------------------------------------------------------------
/images/02-setup-mesh-resources.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-setup-mesh-resources.png
--------------------------------------------------------------------------------
/images/02-shift-traffic-v1-path-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-shift-traffic-v1-path-traffic-distribution.png
--------------------------------------------------------------------------------
/images/02-shift-traffic-v2-header-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-shift-traffic-v2-header-traffic-distribution.png
--------------------------------------------------------------------------------
/images/02-shift-traffic-v2-path-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-shift-traffic-v2-path-traffic-distribution.png
--------------------------------------------------------------------------------
/images/02-shift-traffic-v2-weight-traffic-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/02-shift-traffic-v2-weight-traffic-distribution.png
--------------------------------------------------------------------------------
/images/03-Retries.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/03-Retries.png
--------------------------------------------------------------------------------
/images/03-circuitbreaking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/03-circuitbreaking.png
--------------------------------------------------------------------------------
/images/03-timeouts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/03-timeouts.png
--------------------------------------------------------------------------------
/images/04-external-authorization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-external-authorization.png
--------------------------------------------------------------------------------
/images/04-fault-injection-app-snapshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-fault-injection-app-snapshot.png
--------------------------------------------------------------------------------
/images/04-kiali-auto-mtls-application-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-kiali-auto-mtls-application-graph.png
--------------------------------------------------------------------------------
/images/04-kiali-mast-head-lock-auto-mtls.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-kiali-mast-head-lock-auto-mtls.png
--------------------------------------------------------------------------------
/images/04-kiali-mast-head-lock-default-strict-mode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-kiali-mast-head-lock-default-strict-mode.png
--------------------------------------------------------------------------------
/images/04-peer-authentication-mtls-sidecar-connections.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-peer-authentication-mtls-sidecar-connections.png
--------------------------------------------------------------------------------
/images/04-request-authentication-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-request-authentication-1.png
--------------------------------------------------------------------------------
/images/04-request-authentication-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-request-authentication-2.png
--------------------------------------------------------------------------------
/images/04-request-authentication-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-request-authentication-3.png
--------------------------------------------------------------------------------
/images/04-request-authentication-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/04-request-authentication-4.png
--------------------------------------------------------------------------------
/images/eks-istio-spire.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/eks-istio-spire.png
--------------------------------------------------------------------------------
/images/istio-spire-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/istio-spire-1.png
--------------------------------------------------------------------------------
/images/istio-spire-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/istio-spire-2.png
--------------------------------------------------------------------------------
/images/istio-spire-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/istio-spire-3.png
--------------------------------------------------------------------------------
/images/istio-spire-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/istio-spire-4.png
--------------------------------------------------------------------------------
/images/istio-spire-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/images/istio-spire-5.png
--------------------------------------------------------------------------------
/modules/00-setup-mesh-resources/catalogdetail-destinationrule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | host: catalogdetail.workshop.svc.cluster.local
8 | subsets:
9 | - name: v1
10 | labels:
11 | version: v1
12 | - name: v2
13 | labels:
14 | version: v2
--------------------------------------------------------------------------------
/modules/00-setup-mesh-resources/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - route:
11 | - destination:
12 | host: catalogdetail
13 | port:
14 | number: 3000
15 |
--------------------------------------------------------------------------------
/modules/00-setup-mesh-resources/frontend-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: frontend
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - frontend
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | route:
14 | - destination:
15 | host: frontend
16 | port:
17 | number: 9000
--------------------------------------------------------------------------------
/modules/00-setup-mesh-resources/productcatalog-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: productcatalog
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - productcatalog
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | route:
14 | - destination:
15 | host: productcatalog
16 | port:
17 | number: 5000
--------------------------------------------------------------------------------
/modules/01-getting-started/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: "1.0"
3 | description: Helm Chart for Istio basic usecase
4 | name: istio_basic_workshop
5 | version: 1.0.0
--------------------------------------------------------------------------------
/modules/01-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # Module 1 - Getting Started
2 |
3 | This module shows how to deploy microservices as part of Istio service-mesh on
4 | EKS
5 |
6 | ## Prerequisites:
7 |
8 | To be able to work on this module you should have an EKS cluster with Istio deployed by following below steps.
9 | 1. You will need to clone the below repo.
10 | ```sh
11 | git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git
12 | cd terraform-aws-eks-blueprints/patterns/istio
13 | ```
14 | 2. Then follow the [Istio EKS Blueprint](https://aws-ia.github.io/terraform-aws-eks-blueprints/patterns/istio/#deploy) setup.
15 |
16 | 3. Ensure that you have the following tools installed locally:
17 |
18 | 1. [kubectl](https://Kubernetes.io/docs/tasks/tools/)
19 | 2. [helm](https://helm.sh/docs/intro/install/)
20 | 3. [jq](https://jqlang.github.io/jq/download/)
21 | 4. [siege](https://github.com/JoeDog/siege)
22 |
23 | ## Deploy
24 |
25 | ```sh
26 | # Change directory to the right folder
27 | cd modules/01-getting-started
28 |
29 | # Create workshop namespace
30 | kubectl create namespace workshop
31 | kubectl label namespace workshop istio-injection=enabled
32 |
33 | # Install all the microservices in one go
34 | helm install mesh-basic . -n workshop
35 | ```
36 |
37 | Output should be similar to:
38 | ```
39 | namespace/workshop created
40 | namespace/workshop labeled
41 | NAME: mesh-basic
42 | LAST DEPLOYED: Mon Aug 21 18:08:29 2023
43 | NAMESPACE: workshop
44 | STATUS: deployed
45 | REVISION: 1
46 | TEST SUITE: None
47 | NOTES:
48 | 1. Get the application URL by running the following command:
49 |
50 | ISTIO_INGRESS_URL=$(kubectl get svc istio-ingress -n istio-ingress -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
51 | echo "http://$ISTIO_INGRESS_URL"
52 |
53 | 2. Access the displayed URL in a terminal using cURL or via a browser window
54 |
55 | Note: It may take a few minutes for the istio-ingress Network LoadBalancer to associate to the instance-mode targetGroup after the application is deployed.
56 | ```
57 |
58 | ## Validate
59 |
60 | Validate the install of microservices in the `workshop` namespace by running:
61 |
62 | ```sh
63 | kubectl get pods -n workshop
64 | ```
65 |
66 | Output should be similar to:
67 |
68 | ```
69 | NAME READY STATUS RESTARTS AGE
70 | catalogdetail-658d6dbc98-q544p 2/2 Running 0 7m19s
71 | catalogdetail2-549877454d-kqk9b 2/2 Running 0 7m19s
72 | frontend-7cc46889c8-qdhht 2/2 Running 0 7m19s
73 | productcatalog-5b79cb8dbb-t9dfl 2/2 Running 0 7m19s
74 | ```
75 |
76 | As can be noted in the output, each of the application pods is running two
77 | containers, an application container and an Istio proxy.
78 |
79 | > Note: You can run the command `kubectl get pod -n workshop -o yaml`
80 | on any of the above listed pods for further inspection of pod contents.
81 |
82 | ### Istio Resources
83 |
84 | Run the following command to list all the Istio resources created.
85 |
86 | ```sh
87 | kubectl get Gateway,VirtualService,DestinationRule -n workshop
88 | ```
89 |
90 | Output should be similar to:
91 | ```
92 | NAME AGE
93 | gateway.networking.istio.io/productapp-gateway 7m50s
94 |
95 | NAME GATEWAYS HOSTS AGE
96 | virtualservice.networking.istio.io/productapp ["productapp-gateway"] ["*"] 7m50s
97 | ```
98 |
99 | ## Test
100 |
101 | We will be using the deployed `kiali` to verify the interaction between the
102 | microservices that are deployed.
103 |
104 | ### Configure Kiali
105 |
106 | Run the following command in a terminal session to port-forward `kiali` traffic
107 | on to a designated port on your localhost
108 |
109 | ```sh
110 | kubectl port-forward svc/kiali 20001:20001 -n istio-system
111 | ```
112 |
113 | Use your browser to navigate to `http://localhost:20001`. At the `kiali` console
114 | carefully observe the highlighted portions of the image below and replicate that
115 | in your environment.
116 |
117 | 
118 |
119 | ### Generating Traffic
120 |
121 | Use the `siege` command line tool, generate traffic to the HTTP endpoint
122 | `http://$ISTIO_INGRESS_URL` noted above in the deployment output by running the following
123 | command in a separate terminal session.
124 |
125 | ```sh
126 | # Generate load for 2 minute, with 5 concurrent threads and with a delay of 10s
127 | # between successive requests
128 | ISTIO_INGRESS_URL=$(kubectl get service/istio-ingress -n istio-ingress -o json | jq -r '.status.loadBalancer.ingress[0].hostname')
129 | siege http://$ISTIO_INGRESS_URL -c 5 -d 10 -t 2M
130 | ```
131 |
132 | While the load is being generated access the `kiali` console you previously
133 | configured and you should notice the traffic to be flowing in the manner shown
134 | below:
135 |
136 | 
137 |
138 | Based on animation shown we conclude that:
139 | 1. The Ingress traffic directed towards the `istio-ingress` is captured by the
140 | Gateway `productapp-gateway` as it handles traffic for all hosts (*)
141 | 2. Traffic is then directed towards to `productapp` VirtualService as its
142 | `host` definition matches all hosts (*)
143 | 3. Traffic is then forwarded to `frontend` microservice as the context-path
144 | matches `/` and moves between microservices as shown in the GIF above.
145 | 4. The `catalogdetail` service, as expected, randomly splits the traffic between
146 | `v1` and `v2` versions.
147 |
148 | ## Destroy
149 |
150 | ```sh
151 | helm uninstall mesh-basic -n workshop
152 | kubectl delete namespace workshop
153 | ```
154 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 |
2 | 1. Get the application URL by running the following command:
3 |
4 | ISTIO_INGRESS_URL=$(kubectl get svc istio-ingress -n istio-ingress -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
5 | echo "http://$ISTIO_INGRESS_URL"
6 |
7 | 2. Access the displayed URL in a terminal using cURL or via a browser window
8 |
9 | Note: It may take a few minutes for the istio-ingress Network LoadBalancer to associate to the instance-mode targetGroup after the application is deployed.
10 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "helm-chart.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "helm-chart.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "helm-chart.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end -}}
33 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/catalogdetail-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: catalogdetail
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: catalogdetail
11 | version: v1
12 | template:
13 | metadata:
14 | labels:
15 | app: catalogdetail
16 | version: v1
17 | spec:
18 | serviceAccountName: catalogdetail-sa
19 | containers:
20 | - name: catalogdetail
21 | image: "{{ .Values.catalogdetail1.image.repository }}:{{ .Values.catalogdetail1.image.tag }}"
22 | imagePullPolicy: Always
23 | livenessProbe:
24 | httpGet:
25 | path: /ping
26 | port: 3000
27 | initialDelaySeconds: 0
28 | periodSeconds: 10
29 | timeoutSeconds: 1
30 | failureThreshold: 3
31 | readinessProbe:
32 | httpGet:
33 | path: /ping
34 | port: 3000
35 | successThreshold: 3
36 | ports:
37 | - containerPort: 3000
38 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/catalogdetail-sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: catalogdetail-sa
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | account: catalogdetail
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/catalogdetail-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | #annotations:
5 | # This annotation is only required if you are creating an internal facing ELB. Remove this annotation to create public facing ELB.
6 | #service.beta.kubernetes.io/aws-load-balancer-internal: "true"
7 | name: catalogdetail
8 | namespace: {{ .Release.Namespace }}
9 | labels:
10 | app: catalogdetail
11 | spec:
12 | ports:
13 | - name: "http"
14 | port: 3000
15 | targetPort: 3000
16 | selector:
17 | app: catalogdetail
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/catalogdetail2-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: catalogdetail2
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: catalogdetail
11 | version: v2
12 | template:
13 | metadata:
14 | labels:
15 | app: catalogdetail
16 | version: v2
17 | spec:
18 | serviceAccountName: catalogdetail-sa
19 | containers:
20 | - name: catalogdetail
21 | image: "{{ .Values.catalogdetail2.image.repository }}:{{ .Values.catalogdetail2.image.tag }}"
22 | imagePullPolicy: Always
23 | livenessProbe:
24 | httpGet:
25 | path: /ping
26 | port: 3000
27 | initialDelaySeconds: 0
28 | periodSeconds: 10
29 | timeoutSeconds: 1
30 | failureThreshold: 3
31 | readinessProbe:
32 | httpGet:
33 | path: /ping
34 | port: 3000
35 | successThreshold: 3
36 | ports:
37 | - containerPort: 3000
38 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/frontend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: frontend
11 | version: v1
12 | template:
13 | metadata:
14 | annotations:
15 | prometheus.io/scrape: 'true'
16 | prometheus.io/path: '/stats/prometheus'
17 | labels:
18 | app: frontend
19 | version: v1
20 | spec:
21 | serviceAccountName: frontend-sa
22 | containers:
23 | - name: frontend
24 | image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
25 | imagePullPolicy: Always
26 | livenessProbe:
27 | httpGet:
28 | path: /ping
29 | port: 9000
30 | initialDelaySeconds: 0
31 | periodSeconds: 10
32 | timeoutSeconds: 1
33 | failureThreshold: 3
34 | readinessProbe:
35 | httpGet:
36 | path: /ping
37 | port: 9000
38 | successThreshold: 3
39 | env:
40 | {{ .Values.frontend.env | toYaml | nindent 12 }}
41 | ports:
42 | - containerPort: 9000
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/frontend-sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: frontend-sa
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | account: frontend
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/frontend-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | #annotations:
5 | # This annotation is only required if you are creating an internal facing ELB. Remove this annotation to create public facing ELB.
6 | #service.beta.kubernetes.io/aws-load-balancer-internal: "true"
7 | name: frontend
8 | namespace: {{ .Release.Namespace }}
9 | labels:
10 | app: frontend
11 | spec:
12 | ports:
13 | - name: "http"
14 | port: 9000
15 | targetPort: 9000
16 | selector:
17 | app: frontend
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/productapp-gateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: productapp-gateway
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | # The selector matches the ingress gateway pod labels.
8 | # If you installed Istio using Helm following the standard documentation, this would be "istio=ingress"
9 | selector:
10 | istio: ingressgateway # use istio default controller
11 | servers:
12 | - port:
13 | number: 80
14 | name: http
15 | protocol: HTTP
16 | hosts:
17 | - "*"
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/productapp-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: productapp
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | hosts:
8 | - "*"
9 | gateways:
10 | - productapp-gateway
11 | http:
12 | - match:
13 | - uri:
14 | prefix: /
15 | route:
16 | - destination:
17 | host: frontend
18 | port:
19 | number: 9000
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/productcatalog-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: productcatalog
5 | namespace: {{ .Release.Namespace }}
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: productcatalog
11 | version: v1
12 | template:
13 | metadata:
14 | labels:
15 | app: productcatalog
16 | version: v1
17 | annotations:
18 | sidecar.opentelemetry.io/inject: "true"
19 | spec:
20 | serviceAccountName: productcatalog-sa
21 | containers:
22 | - name: productcatalog
23 | image: "{{ .Values.productcatalog.image.repository }}:{{ .Values.productcatalog.image.tag }}"
24 | imagePullPolicy: Always
25 | env:
26 | {{ .Values.productcatalog.env | toYaml | nindent 12 }}
27 | livenessProbe:
28 | httpGet:
29 | path: /products/ping
30 | port: 5000
31 | initialDelaySeconds: 0
32 | periodSeconds: 10
33 | timeoutSeconds: 1
34 | failureThreshold: 3
35 | readinessProbe:
36 | httpGet:
37 | path: /products/ping
38 | port: 5000
39 | successThreshold: 3
40 | ports:
41 | - containerPort: 5000
42 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/productcatalog-sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: productcatalog-sa
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | account: productcatalog
8 |
--------------------------------------------------------------------------------
/modules/01-getting-started/templates/productcatalog-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: productcatalog
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | app: productcatalog
8 | spec:
9 | ports:
10 | - name: "http"
11 | port: 5000
12 | targetPort: 5000
13 | selector:
14 | app: productcatalog
--------------------------------------------------------------------------------
/modules/01-getting-started/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for helm-chart.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | catalogdetail1:
6 | image:
7 | repository: public.ecr.aws/u2g6w7p2/eks-workshop-demo/catalog_detail
8 | tag: "1.0"
9 |
10 | catalogdetail2:
11 | image:
12 | repository: public.ecr.aws/u2g6w7p2/eks-workshop-demo/catalog_detail
13 | tag: "2.0"
14 |
15 | productcatalog:
16 | image:
17 | repository: public.ecr.aws/u2g6w7p2/eks-workshop-demo/product_catalog
18 | tag: "1.0"
19 |
20 | env:
21 | - name: AGG_APP_URL
22 | value: "http://catalogdetail.workshop.svc.cluster.local:3000/catalogDetail"
23 |
24 | frontend:
25 | image:
26 | repository: public.ecr.aws/u2g6w7p2/eks-workshop-demo/frontend_node
27 | tag: "2.0"
28 |
29 | env:
30 | - name: BASE_URL
31 | value: "http://productcatalog.workshop.svc.cluster.local:5000/products/"
32 |
--------------------------------------------------------------------------------
/modules/02-traffic-management/header-based-routing/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - match:
11 | - headers:
12 | user-type:
13 | exact: internal
14 | route:
15 | - destination:
16 | host: catalogdetail
17 | port:
18 | number: 3000
19 | subset: v2
20 | - route:
21 | - destination:
22 | host: catalogdetail
23 | port:
24 | number: 3000
25 | subset: v1
--------------------------------------------------------------------------------
/modules/02-traffic-management/header-based-routing/productcatalog-envoyfilter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: EnvoyFilter
3 | metadata:
4 | name: productcatalog
5 | namespace: workshop
6 | spec:
7 | workloadSelector:
8 | labels:
9 | app: productcatalog
10 | configPatches:
11 | - applyTo: HTTP_FILTER
12 | match:
13 | context: SIDECAR_OUTBOUND
14 | listener:
15 | filterChain:
16 | filter:
17 | name: "envoy.filters.network.http_connection_manager"
18 | subFilter:
19 | name: "envoy.filters.http.router"
20 | patch:
21 | operation: INSERT_BEFORE
22 | value:
23 | name: envoy.filters.http.lua
24 | typed_config:
25 | "@type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua"
26 | defaultSourceCode:
27 | inlineString: |-
28 | function envoy_on_request(request_handle)
29 | math.randomseed(os.clock()*100000000000);
30 | local r = math.random(1, 100);
31 | if r <= 30 then
32 | request_handle:headers():add("USER-TYPE", "internal");
33 | else
34 | request_handle:headers():add("USER-TYPE", "external");
35 | end
36 | end
--------------------------------------------------------------------------------
/modules/02-traffic-management/path-based-routing/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - match:
11 | - uri:
12 | exact: /v2/catalogDetail
13 | rewrite:
14 | uri: /catalogDetail
15 | route:
16 | - destination:
17 | host: catalogdetail
18 | port:
19 | number: 3000
20 | subset: v2
21 | - match:
22 | - uri:
23 | exact: /v1/catalogDetail
24 | rewrite:
25 | uri: /catalogDetail
26 | route:
27 | - destination:
28 | host: catalogdetail
29 | port:
30 | number: 3000
31 | subset: v1
--------------------------------------------------------------------------------
/modules/02-traffic-management/route-traffic-to-version-v1/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - route:
11 | - destination:
12 | host: catalogdetail
13 | port:
14 | number: 3000
15 | subset: v1
16 |
--------------------------------------------------------------------------------
/modules/02-traffic-management/setup-mesh-resources/catalogdetail-destinationrule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | host: catalogdetail.workshop.svc.cluster.local
8 | subsets:
9 | - name: v1
10 | labels:
11 | version: v1
12 | - name: v2
13 | labels:
14 | version: v2
--------------------------------------------------------------------------------
/modules/02-traffic-management/setup-mesh-resources/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - route:
11 | - destination:
12 | host: catalogdetail
13 | port:
14 | number: 3000
15 |
--------------------------------------------------------------------------------
/modules/02-traffic-management/setup-mesh-resources/frontend-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: frontend
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - frontend
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | route:
14 | - destination:
15 | host: frontend
16 | port:
17 | number: 9000
--------------------------------------------------------------------------------
/modules/02-traffic-management/setup-mesh-resources/productcatalog-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: productcatalog
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - productcatalog
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | route:
14 | - destination:
15 | host: productcatalog
16 | port:
17 | number: 5000
--------------------------------------------------------------------------------
/modules/02-traffic-management/traffic-mirroring/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - route:
11 | - destination:
12 | host: catalogdetail
13 | port:
14 | number: 3000
15 | subset: v1
16 | weight: 100
17 | mirror:
18 | host: catalogdetail
19 | port:
20 | number: 3000
21 | subset: v2
22 | mirrorPercentage:
23 | value: 50
--------------------------------------------------------------------------------
/modules/02-traffic-management/weight-based-routing/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - route:
11 | - destination:
12 | host: catalogdetail
13 | port:
14 | number: 3000
15 | subset: v1
16 | weight: 90
17 | - destination:
18 | host: catalogdetail
19 | port:
20 | number: 3000
21 | subset: v2
22 | weight: 10
--------------------------------------------------------------------------------
/modules/03-network-resiliency/README.md:
--------------------------------------------------------------------------------
1 | # Network Resiliency
2 |
3 | This module will cover the network resiliency capabilities of Istio service-mesh on Amazon EKS.
4 |
5 |
6 | ## Prerequisites:
7 | 1. [Module 1 - Getting Started](../01-getting-started/)
8 | 2. [Install `istioctl` and add it to the $PATH](https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/#install-hahahugoshortcode860s2hbhb)
9 |
10 | >Note: This module will build on the application resources deployed in
11 | [Module 1 - Getting Started](../01-getting-started/). That means you **don't** have to execute the [Destroy](../01-getting-started/README.md#destroy) section in Module 1.
12 |
13 | ## Initial state setup
14 |
15 | In this step we add the Istio mesh resources to wrap the `frontend`, `productcatalog` and
16 | `catalogdetail` services.
17 |
18 | A [`DestinationRule`](https://istio.io/latest/docs/reference/config/networking/destination-rule/) is created for [`catalogdetail`](../../00-setup-mesh-resources/catalogdetail-destinationrule.yaml) to select subsets
19 | based on the `version` label of the destination pods. However, the initial [`VirtualService`](../../00-setup-mesh-resources/catalogdetail-virtualservice.yaml) definition does not specify any
20 | subset configuration thereby leading to a uniform traffic spread across both subsets.
21 |
22 | ```bash
23 | # This assumes that you are currently in "istio-on-eks/modules/01-getting-started" folder
24 | cd ../03-network-resiliency
25 |
26 | # Install the mesh resources
27 | kubectl apply -f ../00-setup-mesh-resources/
28 | ```
29 |
30 | Output should be similar to:
31 |
32 | ```
33 | destinationrule.networking.istio.io/catalogdetail created
34 | virtualservice.networking.istio.io/catalogdetail created
35 | virtualservice.networking.istio.io/frontend created
36 | virtualservice.networking.istio.io/productcatalog created
37 | ```
38 |
39 | ## 🧱 Sub Modules of Network Resiliency
40 |
41 | ### [1. Fault Injection](fault-injection/README.md)
42 | ### [2. Timeout, Retries and Circuit Breaking](timeouts-retries-circuitbreaking/README.md)
43 | ### [3. Rate Limiting](rate-limiting/README.md)
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/fault-injection/abort/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - match:
11 | - headers:
12 | user:
13 | exact: "internal"
14 | fault:
15 | abort:
16 | percentage:
17 | value: 100
18 | httpStatus: 500
19 | route:
20 | - destination:
21 | host: catalogdetail
22 | port:
23 | number: 3000
24 | - route:
25 | - destination:
26 | host: catalogdetail
27 | port:
28 | number: 3000
29 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/fault-injection/delay/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - match:
11 | - headers:
12 | user:
13 | exact: "internal"
14 | fault:
15 | delay:
16 | percentage:
17 | value: 100
18 | fixedDelay: 15s
19 | route:
20 | - destination:
21 | host: catalogdetail
22 | port:
23 | number: 3000
24 | - route:
25 | - destination:
26 | host: catalogdetail
27 | port:
28 | number: 3000
29 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/README.md:
--------------------------------------------------------------------------------
1 | # Network Resiliency - Rate Limiting
2 | This sub-module will cover the Istio service-mesh feature of rate limiting for network resiliency on Amazon EKS.
3 |
4 | Use the following links to quickly jump to the desired section:
5 | 1. [Local Rate Limiting](#local-rate-limiting)
6 | 2. [Global Rate Limiting](#global-rate-limiting)
7 | 3. [Reset the environment](#reset-the-environment)
8 |
9 | ## Local Rate Limiting
10 |
11 | Apply Local Rate Limiting to the `productcatalog` Service
12 |
13 | ```sh
14 | kubectl apply -f local-ratelimit/local-ratelimit.yaml
15 | ```
16 |
17 | Looking into the contents of the file [local-ratelimit.yaml](local-ratelimit/local-ratelimit.yaml)
18 |
19 | 1. The **HTTP_FILTER** patch inserts the `envoy.filters.http.local_ratelimit` [local envoy filter](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/local_rate_limit_filter#config-http-filters-local-rate-limit) into the HTTP connection manager filter chain.
20 | 2. The local rate limit filter’s [token bucket](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto#envoy-v3-api-field-extensions-filters-http-local-ratelimit-v3-localratelimit-token-bucket) is configured to allow **10 requests/min**.
21 | 3. The filter is also configured to add an `x-local-rate-limit` response header to requests that are blocked.
22 |
23 | ### Test
24 |
25 | To test the rate limiter in action, exec into the `frontend` pod and send requests to the `prodcatalog` service to trigger the rate limiter.
26 |
27 | ```sh
28 | POD_NAME=$(kubectl get pod -l app=frontend -o jsonpath='{.items[0].metadata.name}' -n workshop)
29 |
30 | kubectl exec $POD_NAME -n workshop -c frontend -- \
31 | bash -c "for i in {1..20}; do curl -sI http://productcatalog:5000/products/; done"
32 | ```
33 |
34 | Since the 20 requests are sent in less than a minute, after the first 10 requests are accepted by the service you’ll start seeing **HTTP 429** response codes from the service.
35 |
36 | Successful requests will return the following output:
37 |
38 | ```
39 | HTTP/1.1 200 OK
40 | content-type: application/json
41 | content-length: 124
42 | x-amzn-trace-id: Root=1-6502273f-8dd970ab66ed073ccd2519c7
43 | access-control-allow-origin: *
44 | server: envoy
45 | date: Wed, 13 Sep 2023 21:18:55 GMT
46 | x-envoy-upstream-service-time: 15
47 | x-ratelimit-limit: 10
48 | x-ratelimit-remaining: 9
49 | x-ratelimit-reset: 45
50 | ```
51 |
52 | While requests that are rate limited will return the following output:
53 |
54 | ```
55 | HTTP/1.1 429 Too Many Requests
56 | x-local-rate-limit: true
57 | content-length: 18
58 | content-type: text/plain
59 | x-ratelimit-limit: 10
60 | x-ratelimit-remaining: 0
61 | x-ratelimit-reset: 45
62 | date: Wed, 13 Sep 2023 21:18:55 GMT
63 | server: envoy
64 | x-envoy-upstream-service-time: 0
65 | ```
66 |
67 | Similarly, if you run the same command without `-I` flag, you will see the
68 | responses as shown below:
69 |
70 | For successful requests:
71 |
72 | ```
73 | {
74 | "products": {},
75 | "details": {
76 | "version": "2",
77 | "vendors": [
78 | "ABC.com, XYZ.com"
79 | ]
80 | }
81 | }
82 | ```
83 | And for rate-limited requests:
84 |
85 | ```
86 | local_rate_limited
87 | ```
88 |
89 | ## Global Rate Limiting
90 |
91 | ### Setup Global Rate Limiting service
92 |
93 | To be able to use the Global Rate Limit in our Istio service-mesh we need a global
94 | rate limit service that implements Envoy’s rate limit service protocol.
95 |
96 | 1. Configuration for the Global Rate Limit service
97 | * Configuration is captured in `ratelimit-config` **ConfigMap** in the file
98 | [global-ratelimit-config.yaml](global-ratelimit/global-ratelimit-config.yaml)
99 | * As can be observed in the file, rate limit requests to the `/` path is set to
100 | **5 requests/minute** and all other requests at **100 requests/minute**.
101 | 2. Global Rate Limit service with Redis
102 | * File [global-ratelimit-service.yaml](global-ratelimit/global-ratelimit-service.yaml)
103 | has **Deployment** and **Service** definitions for
104 | * Central Rate Limit Service
105 | * Redis
106 |
107 |
108 | Apply the Global Rate Limiting configuration and deploy the dependent services
109 | as shown below to the EKS cluster and Istio service-mesh.
110 |
111 | ```sh
112 | kubectl apply -f global-ratelimit/global-ratelimit-config.yaml
113 | kubectl apply -f global-ratelimit/global-ratelimit-service.yaml
114 | ```
115 |
116 | ### Apply the Global Rate Limits
117 |
118 | Applying global rate limits is done in two steps:
119 |
120 | 1. Apply an EnvoyFilter to the ingressgateway to enable global rate limiting
121 | using Envoy’s global rate limit filter.
122 |
123 | ```sh
124 | kubectl apply -f global-ratelimit/filter-ratelimit.yaml
125 | ```
126 | Looking at the file [filter-ratelimit.yaml](global-ratelimit/filter-ratelimit.yaml)
127 | * The configuration inserts the `envoy.filters.http.ratelimit` [global envoy filter](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/ratelimit/v3/rate_limit.proto#envoy-v3-api-msg-extensions-filters-http-ratelimit-v3-ratelimit) into the **HTTP_FILTER** chain.
128 | * The `rate_limit_service` field specifies the external rate limit service, `outbound|8081||ratelimit.workshop.svc.cluster.local` in this case.
129 |
130 | 2. Apply another EnvoyFilter to the ingressgateway that defines the route configuration on which to rate limit.
131 |
132 | Looking at the file [filter-ratelimit-svc.yaml](global-ratelimit/filter-ratelimit-svc.yaml)
133 | * The configuration adds rate limit actions for any route from a virtual host.
134 | ```sh
135 | kubectl apply -f global-ratelimit/filter-ratelimit-svc.yaml
136 | ```
137 |
138 |
139 | ### Test
140 |
141 | To test the global rate limit in action, run the following command in a terminal
142 | session:
143 |
144 | ```sh
145 | ISTIO_INGRESS_URL=$(kubectl get svc istio-ingress -n istio-ingress -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
146 |
147 | for i in {1..6}; do curl -Is $ISTIO_INGRESS_URL; done
148 | ```
149 |
150 | In the output you should notice that the first 5 requests will generate
151 | output similar to the one below:
152 |
153 | ```
154 | HTTP/1.1 200 OK
155 | x-powered-by: Express
156 | content-type: text/html; charset=utf-8
157 | content-length: 1203
158 | etag: W/"4b3-KO/ZeBhhZHNNKPbDwPiV/CU2EDU"
159 | date: Wed, 17 Jan 2024 16:53:23 GMT
160 | x-envoy-upstream-service-time: 34
161 | server: istio-envoy
162 | ```
163 |
164 | And the last request should generate output similar to:
165 |
166 | ```
167 | HTTP/1.1 429 Too Many Requests
168 | x-envoy-ratelimited: true
169 | date: Wed, 17 Jan 2024 16:53:35 GMT
170 | server: istio-envoy
171 | transfer-encoding: chunked
172 | ```
173 |
174 | We see this behavior because of the global rate limiting that is in effect that
175 | is allowing only a max of **5 requests/minute** when the context-path is `/`
176 |
177 | ## Reset the environment
178 |
179 | Execute the following command to remove all rate-limiting configuration and
180 | services and then run the same steps as in the [Initial state setup](#initial-state-setup)
181 | to reset the environment one last time.
182 |
183 | ```sh
184 | # Delete all rate limiting configuration and services
185 | kubectl delete -f ./local-ratelimit
186 | kubectl delete -f ./global-ratelimit
187 | ```
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/global-ratelimit/filter-ratelimit-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: EnvoyFilter
3 | metadata:
4 | name: filter-ratelimit-svc
5 | namespace: istio-system
6 | spec:
7 | workloadSelector:
8 | labels:
9 | istio: ingressgateway
10 | configPatches:
11 | - applyTo: VIRTUAL_HOST
12 | match:
13 | context: GATEWAY
14 | routeConfiguration:
15 | vhost:
16 | name: ""
17 | route:
18 | action: ANY
19 | patch:
20 | operation: MERGE
21 | # Applies the rate limit rules.
22 | value:
23 | rate_limits:
24 | - actions: # any actions in here
25 | - request_headers:
26 | header_name: ":path"
27 | descriptor_key: "PATH"
28 |
29 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/global-ratelimit/filter-ratelimit.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: EnvoyFilter
3 | metadata:
4 | name: filter-ratelimit
5 | namespace: istio-system
6 | spec:
7 | workloadSelector:
8 | labels:
9 | istio: ingressgateway
10 | configPatches:
11 | # The Envoy config you want to modify
12 | - applyTo: HTTP_FILTER
13 | match:
14 | context: GATEWAY
15 | listener:
16 | filterChain:
17 | filter:
18 | name: "envoy.filters.network.http_connection_manager"
19 | subFilter:
20 | name: "envoy.filters.http.router"
21 | patch:
22 | operation: INSERT_BEFORE
23 | # Adds the Envoy Rate Limit Filter in HTTP filter chain.
24 | value:
25 | name: envoy.filters.http.ratelimit
26 | typed_config:
27 | "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
28 | # domain can be anything! Match it to the ratelimter service config
29 | domain: prodcatalog-ratelimit
30 | failure_mode_deny: true
31 | timeout: 10s
32 | rate_limit_service:
33 | grpc_service:
34 | envoy_grpc:
35 | cluster_name: outbound|8081||ratelimit.workshop.svc.cluster.local
36 | authority: ratelimit.workshop.svc.cluster.local
37 | transport_api_version: V3
38 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/global-ratelimit/global-ratelimit-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: ratelimit-config
5 | namespace: workshop
6 | data:
7 | config.yaml: |
8 | domain: prodcatalog-ratelimit
9 | descriptors:
10 | - key: PATH
11 | value: "/"
12 | rate_limit:
13 | unit: minute
14 | requests_per_unit: 5
15 | - key: PATH
16 | rate_limit:
17 | unit: minute
18 | requests_per_unit: 100
19 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/global-ratelimit/global-ratelimit-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: redis
5 | namespace: workshop
6 | ---
7 | apiVersion: v1
8 | kind: Service
9 | metadata:
10 | name: redis
11 | namespace: workshop
12 | labels:
13 | app: redis
14 | spec:
15 | ports:
16 | - name: redis
17 | port: 6379
18 | selector:
19 | app: redis
20 | ---
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: redis
25 | namespace: workshop
26 | spec:
27 | replicas: 1
28 | selector:
29 | matchLabels:
30 | app: redis
31 | template:
32 | metadata:
33 | labels:
34 | app: redis
35 | spec:
36 | containers:
37 | - image: redis:alpine
38 | imagePullPolicy: Always
39 | name: redis
40 | ports:
41 | - name: redis
42 | containerPort: 6379
43 | restartPolicy: Always
44 | serviceAccountName: redis
45 | ---
46 | apiVersion: v1
47 | kind: Service
48 | metadata:
49 | name: ratelimit
50 | namespace: workshop
51 | labels:
52 | app: ratelimit
53 | spec:
54 | ports:
55 | - name: http-port
56 | port: 8080
57 | targetPort: 8080
58 | protocol: TCP
59 | - name: grpc-port
60 | port: 8081
61 | targetPort: 8081
62 | protocol: TCP
63 | - name: http-debug
64 | port: 6070
65 | targetPort: 6070
66 | protocol: TCP
67 | selector:
68 | app: ratelimit
69 | ---
70 | apiVersion: apps/v1
71 | kind: Deployment
72 | metadata:
73 | name: ratelimit
74 | namespace: workshop
75 | spec:
76 | replicas: 1
77 | selector:
78 | matchLabels:
79 | app: ratelimit
80 | strategy:
81 | type: Recreate
82 | template:
83 | metadata:
84 | labels:
85 | app: ratelimit
86 | spec:
87 | containers:
88 | - image: envoyproxy/ratelimit:9d8d70a8 # 2022/08/16
89 | imagePullPolicy: Always
90 | name: ratelimit
91 | command: ["/bin/ratelimit"]
92 | env:
93 | - name: LOG_LEVEL
94 | value: debug
95 | - name: REDIS_SOCKET_TYPE
96 | value: tcp
97 | - name: REDIS_URL
98 | value: redis:6379
99 | - name: USE_STATSD
100 | value: "false"
101 | - name: RUNTIME_ROOT
102 | value: /data
103 | - name: RUNTIME_SUBDIRECTORY
104 | value: ratelimit
105 | - name: RUNTIME_WATCH_ROOT
106 | value: "false"
107 | - name: RUNTIME_IGNOREDOTFILES
108 | value: "true"
109 | - name: HOST
110 | value: "::"
111 | - name: GRPC_HOST
112 | value: "::"
113 | ports:
114 | - containerPort: 8080
115 | - containerPort: 8081
116 | - containerPort: 6070
117 | volumeMounts:
118 | - name: config-volume
119 | mountPath: /data/ratelimit/config
120 | volumes:
121 | - name: config-volume
122 | configMap:
123 | name: ratelimit-config
124 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/rate-limiting/local-ratelimit/local-ratelimit.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: EnvoyFilter
3 | metadata:
4 | name: filter-local-ratelimit-svc
5 | namespace: istio-system
6 | spec:
7 | workloadSelector:
8 | labels:
9 | app: productcatalog
10 | configPatches:
11 | - applyTo: HTTP_FILTER
12 | match:
13 | context: SIDECAR_INBOUND
14 | listener:
15 | filterChain:
16 | filter:
17 | name: "envoy.filters.network.http_connection_manager"
18 | patch:
19 | operation: INSERT_BEFORE
20 | value:
21 | name: envoy.filters.http.local_ratelimit
22 | typed_config:
23 | "@type": type.googleapis.com/udpa.type.v1.TypedStruct
24 | type_url: type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit
25 | value:
26 | stat_prefix: http_local_rate_limiter
27 | enable_x_ratelimit_headers: DRAFT_VERSION_03
28 | token_bucket:
29 | max_tokens: 10
30 | tokens_per_fill: 10
31 | fill_interval: 60s
32 | filter_enabled:
33 | runtime_key: local_rate_limit_enabled
34 | default_value:
35 | numerator: 100
36 | denominator: HUNDRED
37 | filter_enforced:
38 | runtime_key: local_rate_limit_enforced
39 | default_value:
40 | numerator: 100
41 | denominator: HUNDRED
42 | response_headers_to_add:
43 | - append: false
44 | header:
45 | key: x-local-rate-limit
46 | value: 'true'
47 |
48 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/timeouts-retries-circuitbreaking/circuitbreaking/catalogdetail-destinationrule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: DestinationRule
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | host: catalogdetail.workshop.svc.cluster.local
8 | subsets:
9 | - name: v1
10 | labels:
11 | version: v1
12 | - name: v2
13 | labels:
14 | version: v2
15 | trafficPolicy:
16 | connectionPool:
17 | tcp:
18 | maxConnections: 1
19 | http:
20 | http1MaxPendingRequests: 1
21 | maxRequestsPerConnection: 1
22 | outlierDetection:
23 | consecutive5xxErrors: 1
24 | interval: 1s
25 | baseEjectionTime: 3m
26 | maxEjectionPercent: 100
27 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/timeouts-retries-circuitbreaking/retries/productcatalog-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1beta1
2 | kind: VirtualService
3 | metadata:
4 | name: productcatalog
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - productcatalog
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | retries:
14 | attempts: 2
15 | route:
16 | - destination:
17 | host: productcatalog
18 | port:
19 | number: 5000
20 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/timeouts-retries-circuitbreaking/timeouts/catalogdetail-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: catalogdetail
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - catalogdetail
9 | http:
10 | - fault:
11 | delay:
12 | percentage:
13 | value: 100
14 | fixedDelay: 5s
15 | route:
16 | - destination:
17 | host: catalogdetail
18 | subset: v2
19 |
--------------------------------------------------------------------------------
/modules/03-network-resiliency/timeouts-retries-circuitbreaking/timeouts/productcatalog-virtualservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: productcatalog
5 | namespace: workshop
6 | spec:
7 | hosts:
8 | - productcatalog
9 | http:
10 | - match:
11 | - uri:
12 | prefix: /
13 | route:
14 | - destination:
15 | host: productcatalog
16 | port:
17 | number: 5000
18 | timeout: 2s
19 |
--------------------------------------------------------------------------------
/modules/04-security/ingress-security/README.md:
--------------------------------------------------------------------------------
1 | # Ingress Gateway Certificate Management
2 |
3 | Typically, to protect publicly accessible `istio-ingress` service load balancer endpoints on the internet, you will issue certificates from a well-known, trusted third party root CA or an intermediate CA and associate it with the load balancer HTTPS listener. Refer to [Issuing and managing certificates using AWS Certificate Manager (ACM)](https://docs.aws.amazon.com/acm/latest/userguide/gs.html) for issuing or importing certificates. Refer to [AWS Load Balancer Controller service annotations TLS](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/annotations/#tls) section for details on how to associate ACM certificates with service load balancer listeners using service annotations.
4 |
5 | You can also import certificates issued by AWS Private CA configured in standard mode into ACM. AWS Private CA configured in short-lived mode is not supported. However, for this module a self-signed certificate is used for the internet facing `istio-ingress` load balancer endpoint to avoid creating another Private CA resource. The self-signed certificate has been generated and imported into ACM. The generated PEM-encoded self-signed certificate (`lb_ingress_cert.pem`) is also exported in the module directory (`04-security`).
6 |
7 | As part of the setup process, the imported self-signed ACM certificate is associated with the HTTPS listener of the `istio-ingress` load balancer resource using annotations on the `istio-ingress` service.
8 |
9 | 
10 |
11 | *Figure: Istio Ingress Gateway using ACM
12 |
13 |
14 |
15 | ## Prerequisites
16 |
17 | **Note:** Make sure that the required resources have been created following the [setup instructions](../README.md#setup).
18 |
19 | **:warning: WARN:** Some of the commands shown in this section refer to relative file paths that assume the current directory is `istio-on-eks/modules/04-security/terraform`. If your current directory does not match this path, then either change to the above directory to execute the commands or if executing from any other directory, then adjust the file paths like `../scripts/helpers.sh` and `../lb_ingress_cert.pem` accordingly.
20 |
21 |
22 | ## Verify Setup
23 |
24 | **Describe the service to verify the annotations**
25 |
26 | ```bash
27 | kubectl get svc/istio-ingress -n istio-ingress -o jsonpath='{.metadata.annotations}' | jq -r
28 | ```
29 |
30 | The output should look similar to the below sample output.
31 |
32 | ```json
33 | {
34 | "meta.helm.sh/release-name": "istio-ingress",
35 | "meta.helm.sh/release-namespace": "istio-ingress",
36 | "service.beta.kubernetes.io/aws-load-balancer-attributes": "load_balancing.cross_zone.enabled=true",
37 | "service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "tcp",
38 | "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "ip",
39 | "service.beta.kubernetes.io/aws-load-balancer-scheme": "internet-facing",
40 | "service.beta.kubernetes.io/aws-load-balancer-ssl-cert": "arn:aws:acm:REGION:ACCOUNT_ID:certificate/CERT_ID",
41 | "service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy": "ELBSecurityPolicy-TLS13-1-2-2021-06",
42 | "service.beta.kubernetes.io/aws-load-balancer-ssl-ports": "https",
43 | "service.beta.kubernetes.io/aws-load-balancer-type": "external"
44 | }
45 | ```
46 |
47 | Note the below annotation values
48 |
49 | | Annotation | Value |
50 | |------------|-------|
51 | | `service.beta.kubernetes.io/aws-load-balancer-ssl-cert` | ARN of imported self-signed ACM certificate |
52 | | `service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy` | `ELBSecurityPolicy-TLS13-1-2-2021-06` |
53 | | `service.beta.kubernetes.io/aws-load-balancer-ssl-ports` | `https` |
54 |
55 | The application gateway definition is patched to add a server route for HTTPS traffic on port 443.
56 |
57 | **Describe the `gateway` resource and verify that there are routes for port 80 and port 443 respectively**
58 |
59 | ```bash
60 | kubectl get gateway/productapp-gateway -n workshop -o jsonpath='{.spec.servers}' | jq -r
61 | ```
62 |
63 | The output should be similar to below sample.
64 |
65 | ```json
66 | [
67 | {
68 | "hosts": [
69 | "*"
70 | ],
71 | "port": {
72 | "name": "http",
73 | "number": 80,
74 | "protocol": "HTTP"
75 | }
76 | },
77 | {
78 | "hosts": [
79 | "*"
80 | ],
81 | "port": {
82 | "name": "https",
83 | "number": 443,
84 | "protocol": "HTTP"
85 | }
86 | }
87 | ]
88 | ```
89 |
90 | **Verify that the gateway is accepting HTTPS traffic and forwarding to the right application**
91 |
92 | First export the ingress gateway load balancer endpoint.
93 |
94 | ```bash
95 | ISTIO_INGRESS_URL=$(kubectl get service/istio-ingress -n istio-ingress -o json | jq -r '.status.loadBalancer.ingress[0].hostname')
96 | ```
97 |
98 | Next, send a request to the ingress gateway load balancer endpoint using `curl` referring to the exported self-signed certificate using the `--cacert` flag for certificate verification.
99 |
100 | ```bash
101 | curl --cacert ../lb_ingress_cert.pem https://$ISTIO_INGRESS_URL -s -o /dev/null -w "HTTP Response: %{http_code}\n"
102 | ```
103 |
104 | The output should look similar to the sample output below.
105 |
106 | ```
107 | HTTP Response: 200
108 | ```
109 |
110 | **Run a load test against the ingress gateway, so that its easy to visual the traffic in Kiali**
111 |
112 | ```bash
113 | siege https://$ISTIO_INGRESS_URL -c 5 -d 10 -t 2M
114 | ```
115 |
116 | Check the status of each connection in Kiali. Navigate to the Graph tab and enable Security in the Display menu. Then you will see a Lock icon to show where mTLS encryption is happening in the traffic flow graph.
117 |
118 | 
119 |
120 | Congratulations!!! You've now successfully validated ingress security in Istio on Amazon EKS. :tada:
121 |
122 | You can either move on to the other sub-modules or if you're done with this module then refer to [Clean up](https://github.com/aws-samples/istio-on-eks/blob/main/modules/04-security/README.md#clean-up) to clean up all the resources provisioned in this module.
123 |
--------------------------------------------------------------------------------
/modules/04-security/opa-external-authorization/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | configMapGenerator:
4 | - name: opa-policy
5 | namespace: workshop
6 | files:
7 | - policy.rego
8 | generatorOptions:
9 | disableNameSuffixHash: true
--------------------------------------------------------------------------------
/modules/04-security/opa-external-authorization/opa-ext-authz-serviceentry.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1beta1
2 | kind: ServiceEntry
3 | metadata:
4 | name: opa-ext-authz-grpc-local
5 | spec:
6 | hosts:
7 | - "opa-ext-authz-grpc.local"
8 | endpoints:
9 | - address: "127.0.0.1"
10 | ports:
11 | - name: grpc
12 | number: 9191
13 | protocol: GRPC
14 | resolution: DNS
--------------------------------------------------------------------------------
/modules/04-security/opa-external-authorization/opa-ext-authz-sidecar-assign.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: mutations.gatekeeper.sh/v1
2 | kind: Assign
3 | metadata:
4 | name: opa-istio
5 | spec:
6 | applyTo:
7 | - groups: [""]
8 | kinds: ["Pod"]
9 | versions: ["v1"]
10 | match:
11 | scope: Namespaced
12 | kinds:
13 | - apiGroups: ["*"]
14 | kinds: ["Pod"]
15 | namespaceSelector:
16 | matchLabels:
17 | opa-istio-injection: enabled
18 | location: "spec.containers[name:opa-istio]"
19 | parameters:
20 | pathTests:
21 | - subPath: "spec.containers[name:opa-istio]"
22 | condition: MustNotExist
23 | assign:
24 | value:
25 | image: openpolicyagent/opa:0.60.0-istio-static
26 | name: opa-istio
27 | args:
28 | - run
29 | - --server
30 | - --addr=localhost:8181
31 | - --diagnostic-addr=0.0.0.0:8282
32 | - --disable-telemetry
33 | - --set
34 | - "plugins.envoy_ext_authz_grpc.addr=:9191"
35 | - --set
36 | - "plugins.envoy_ext_authz_grpc.path=istio/authz/allow"
37 | - --set
38 | - "plugins.envoy_ext_authz_grpc.enable-reflection=true"
39 | - --set
40 | - "decision_logs.console=true"
41 | - --watch
42 | - /policy/policy.rego
43 | volumeMounts:
44 | - mountPath: /policy
45 | name: opa-policy
46 | readinessProbe:
47 | httpGet:
48 | path: /health?plugins
49 | port: 8282
50 | livenessProbe:
51 | httpGet:
52 | path: /health?plugins
53 | port: 8282
54 | ---
55 | apiVersion: mutations.gatekeeper.sh/v1
56 | kind: Assign
57 | metadata:
58 | name: opa-policy
59 | spec:
60 | applyTo:
61 | - groups: [""]
62 | kinds: ["Pod"]
63 | versions: ["v1"]
64 | match:
65 | scope: Namespaced
66 | kinds:
67 | - apiGroups: ["*"]
68 | kinds: ["Pod"]
69 | namespaceSelector:
70 | matchLabels:
71 | opa-istio-injection: enabled
72 | location: "spec.volumes[name:opa-policy]"
73 | parameters:
74 | pathTests:
75 | - subPath: "spec.volumes[name:opa-policy]"
76 | condition: MustNotExist
77 | assign:
78 | value:
79 | name: opa-policy
80 | configMap:
81 | name: opa-policy
82 | ---
--------------------------------------------------------------------------------
/modules/04-security/opa-external-authorization/policy.rego:
--------------------------------------------------------------------------------
1 | package istio.authz
2 |
3 | import future.keywords
4 |
5 | import input.attributes.destination.principal as principal
6 | import input.attributes.request.http as http_request
7 |
8 | default allow := false
9 |
10 | allow if {
11 | some unprotected_operation in unprotected_operations
12 | unprotected_operation.method = http_request.method
13 | unprotected_operation.principal = principal
14 | regex.match(unprotected_operation.path, http_request.path)
15 | }
16 |
17 | allow if {
18 | some r in roles_for_user
19 | required_roles[r]
20 | }
21 |
22 | roles_for_user contains user_role
23 |
24 | required_roles contains r if {
25 | perm := role_perms[r][_]
26 | perm.method = http_request.method
27 | perm.path = http_request.path
28 | perm.principal = principal
29 | }
30 |
31 | user_role := payload.realm_access.roles[0] if {
32 | [_, encoded] := split(http_request.headers.authorization, " ")
33 | [_, payload, _] := io.jwt.decode(encoded)
34 | }
35 |
36 | role_perms := {
37 | "guest": [{
38 | "method": "GET",
39 | "path": "/",
40 | "principal": "spiffe://cluster.local/ns/workshop/sa/frontend-sa",
41 | }],
42 | "admin": [
43 | {
44 | "method": "GET",
45 | "path": "/",
46 | "principal": "spiffe://cluster.local/ns/workshop/sa/frontend-sa",
47 | },
48 | {
49 | "method": "POST",
50 | "path": "/products",
51 | "principal": "spiffe://cluster.local/ns/workshop/sa/frontend-sa",
52 | },
53 | ],
54 | }
55 |
56 | unprotected_operations := [
57 | {
58 | "method": "GET",
59 | "path": "^/products/$",
60 | "principal": "spiffe://cluster.local/ns/workshop/sa/productcatalog-sa",
61 | },
62 | {
63 | "method": "GET",
64 | "path": "^/products/\\d+$",
65 | "principal": "spiffe://cluster.local/ns/workshop/sa/productcatalog-sa",
66 | },
67 | {
68 | "method": "POST",
69 | "path": "^/products/\\d+$",
70 | "principal": "spiffe://cluster.local/ns/workshop/sa/productcatalog-sa",
71 | },
72 | {
73 | "method": "GET",
74 | "path": "^/catalogDetail$",
75 | "principal": "spiffe://cluster.local/ns/workshop/sa/catalogdetail-sa",
76 | },
77 | ]
78 |
--------------------------------------------------------------------------------
/modules/04-security/opa-external-authorization/productapp-authorizationpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1
2 | kind: AuthorizationPolicy
3 | metadata:
4 | name: productapp
5 | namespace: workshop
6 | spec:
7 | action: CUSTOM
8 | provider:
9 | name: opa-ext-authz-grpc
10 | rules:
11 | - to:
12 | - operation:
13 | paths: ["*"]
14 | ---
--------------------------------------------------------------------------------
/modules/04-security/request-authentication/ingress-authorizationpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1
2 | kind: AuthorizationPolicy
3 | metadata:
4 | name: istio-ingress
5 | namespace: istio-ingress
6 | spec:
7 | selector:
8 | matchLabels:
9 | istio: ingressgateway
10 | action: DENY
11 | rules:
12 | - from:
13 | - source:
14 | notRequestPrincipals: ["*"]
15 | to:
16 | - operation:
17 | ports: ["80","443"]
18 |
--------------------------------------------------------------------------------
/modules/04-security/request-authentication/ingress-requestauthentication-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1
2 | kind: RequestAuthentication
3 | metadata:
4 | name: istio-ingress
5 | namespace: istio-ingress
6 | spec:
7 | selector:
8 | matchLabels:
9 | istio: ingressgateway
10 | jwtRules:
11 | - issuer: ISSUER
12 | jwksUri: JWKS_URI
13 | audiences:
14 | - productapp
15 | forwardOriginalToken: true
--------------------------------------------------------------------------------
/modules/04-security/scripts/cleanup-crds.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for CRD in 'secrets-store.csi.x-k8s.io' 'cert-manager' 'gatekeeper'; do
3 | kubectl get crd -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep $CRD | xargs kubectl delete crd
4 | done
--------------------------------------------------------------------------------
/modules/04-security/terraform/eks.tf:
--------------------------------------------------------------------------------
1 | module "eks" {
2 | source = "terraform-aws-modules/eks/aws"
3 | version = "~> 20.0"
4 |
5 | cluster_name = var.name
6 | cluster_version = "1.29"
7 | cluster_endpoint_public_access = true
8 |
9 | # Give the Terraform identity admin access to the cluster
10 | # which will allow resources to be deployed into the cluster
11 | enable_cluster_creator_admin_permissions = true
12 |
13 | cluster_addons = {
14 | coredns = {}
15 | kube-proxy = {}
16 | vpc-cni = {}
17 | }
18 |
19 | vpc_id = module.vpc.vpc_id
20 | subnet_ids = module.vpc.private_subnets
21 |
22 | eks_managed_node_groups = {
23 | initial = {
24 | instance_types = ["m5.large"]
25 |
26 | min_size = 1
27 | max_size = 5
28 | desired_size = 2
29 | }
30 | }
31 |
32 | # EKS K8s API cluster needs to be able to talk with the EKS worker nodes with port 15017/TCP and 15012/TCP which is used by Istio
33 | # Istio in order to create sidecar needs to be able to communicate with webhook and for that network passage to EKS is needed.
34 | node_security_group_additional_rules = {
35 | ingress_15017 = {
36 | description = "Cluster API - Istio Webhook namespace.sidecar-injector.istio.io"
37 | protocol = "TCP"
38 | from_port = 15017
39 | to_port = 15017
40 | type = "ingress"
41 | source_cluster_security_group = true
42 | }
43 | ingress_15012 = {
44 | description = "Cluster API to nodes ports/protocols"
45 | protocol = "TCP"
46 | from_port = 15012
47 | to_port = 15012
48 | type = "ingress"
49 | source_cluster_security_group = true
50 | }
51 | }
52 |
53 | depends_on = [module.vpc]
54 | }
55 |
56 | resource "null_resource" "export_kube_config" {
57 | provisioner "local-exec" {
58 | command = "aws eks --region=${var.aws_region} update-kubeconfig --name=${module.eks.cluster_name}"
59 | interpreter = ["/bin/bash", "-c"]
60 | environment = {
61 | AWS_REGION = var.aws_region
62 | }
63 | }
64 | }
65 |
66 | # Create AWS Private CA in short-lived CA mode for mTLS
67 | # https://aws.github.io/aws-eks-best-practices/security/docs/network/#short-lived-ca-mode-for-mutual-tls-between-workloads
68 | resource "aws_acmpca_certificate_authority" "mtls" {
69 | type = "ROOT"
70 | usage_mode = "SHORT_LIVED_CERTIFICATE"
71 |
72 | certificate_authority_configuration {
73 | key_algorithm = "RSA_4096"
74 | signing_algorithm = "SHA512WITHRSA"
75 |
76 | subject {
77 | common_name = var.name
78 | }
79 | }
80 | }
81 |
82 | # Issue Root CA Certificate
83 | resource "aws_acmpca_certificate" "mtls" {
84 | certificate_authority_arn = aws_acmpca_certificate_authority.mtls.arn
85 | certificate_signing_request = aws_acmpca_certificate_authority.mtls.certificate_signing_request
86 | signing_algorithm = "SHA512WITHRSA"
87 |
88 | template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1"
89 |
90 | validity {
91 | type = "YEARS"
92 | value = 10
93 | }
94 | }
95 |
96 | # Associate the Root CA Certificate with the CA
97 | resource "aws_acmpca_certificate_authority_certificate" "mtls" {
98 | certificate_authority_arn = aws_acmpca_certificate_authority.mtls.arn
99 |
100 | certificate = aws_acmpca_certificate.mtls.certificate
101 | certificate_chain = aws_acmpca_certificate.mtls.certificate_chain
102 | }
103 |
104 | resource "kubernetes_namespace_v1" "aws_privateca_issuer" {
105 | metadata {
106 | name = "aws-privateca-issuer"
107 | }
108 | }
109 |
110 | resource "kubernetes_namespace_v1" "cert_manager" {
111 | metadata {
112 | name = "cert-manager"
113 | }
114 | }
115 |
116 | resource "kubernetes_namespace_v1" "external_secrets" {
117 | metadata {
118 | name = "external-secrets"
119 | }
120 | }
121 |
122 | resource "kubernetes_namespace_v1" "gatekeeper" {
123 | metadata {
124 | name = "gatekeeper-system"
125 | labels = {
126 | "admission.gatekeeper.sh/ignore" = "no-self-managing"
127 | }
128 | }
129 | }
130 |
131 | module "ebs_csi_driver_irsa" {
132 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/irsa?ref=v4.32.1"
133 |
134 | create_kubernetes_namespace = false
135 | create_kubernetes_service_account = false
136 | kubernetes_namespace = "kube-system"
137 | kubernetes_service_account = "ebs-csi-controller-sa"
138 | irsa_iam_policies = [
139 | "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
140 | ]
141 |
142 | eks_cluster_id = module.eks.cluster_name
143 | eks_oidc_provider_arn = module.eks.oidc_provider_arn
144 | }
145 |
146 | # module "eks_blueprints_addons_core" {
147 | # source = "aws-ia/eks-blueprints-addons/aws"
148 | # version = "~> 1.16.2"
149 |
150 | # cluster_name = module.eks.cluster_name
151 | # cluster_endpoint = module.eks.cluster_endpoint
152 | # cluster_version = module.eks.cluster_version
153 | # oidc_provider_arn = module.eks.oidc_provider_arn
154 |
155 | # # Add-ons
156 | # enable_aws_load_balancer_controller = true
157 | # }
158 |
159 |
160 |
161 | module "eks_blueprints_addons" {
162 | source = "aws-ia/eks-blueprints-addons/aws"
163 | version = "~> 1.16.2"
164 |
165 | cluster_name = module.eks.cluster_name
166 | cluster_endpoint = module.eks.cluster_endpoint
167 | cluster_version = module.eks.cluster_version
168 | oidc_provider_arn = module.eks.oidc_provider_arn
169 |
170 | # EKS Add-on
171 | eks_addons = {
172 | aws-ebs-csi-driver = {
173 | preserve = false
174 | service_account_role_arn = module.ebs_csi_driver_irsa.irsa_iam_role_arn
175 | }
176 | }
177 |
178 | enable_external_secrets = true
179 | external_secrets = {
180 | namespace = kubernetes_namespace_v1.external_secrets.metadata[0].name
181 | create_namespace = false
182 | }
183 |
184 | enable_gatekeeper = true
185 | gatekeeper = {
186 | namespace = kubernetes_namespace_v1.gatekeeper.metadata[0].name
187 | create_namespace = false
188 | }
189 |
190 | enable_secrets_store_csi_driver = true
191 | enable_secrets_store_csi_driver_provider_aws = true
192 |
193 | enable_cert_manager = true
194 | cert_manager = {
195 | namespace = kubernetes_namespace_v1.cert_manager.metadata[0].name
196 | create_namespace = false
197 | }
198 |
199 | enable_aws_privateca_issuer = true
200 | aws_privateca_issuer = {
201 | acmca_arn = aws_acmpca_certificate_authority.mtls.arn
202 | namespace = kubernetes_namespace_v1.aws_privateca_issuer.metadata[0].name
203 | create_namespace = false
204 | }
205 |
206 | helm_releases = {
207 | cert-manager-csi-driver = {
208 | description = "Cert Manager CSI Driver Add-on"
209 | chart = "cert-manager-csi-driver"
210 | namespace = "cert-manager"
211 | chart_version = "v0.5.0"
212 | repository = "https://charts.jetstack.io"
213 | }
214 | }
215 |
216 | depends_on = [ module.aws_load_balancer_controller, aws_acmpca_certificate_authority_certificate.mtls ]
217 | }
--------------------------------------------------------------------------------
/modules/04-security/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.aws_region
3 | default_tags {
4 | tags = {
5 | Project = "Istio-on-EKS"
6 | Module = "04-security"
7 | GithubRepo = "github.com/aws-samples/istio-on-eks"
8 | Name = var.name
9 | }
10 | }
11 | }
12 |
13 | provider "kubernetes" {
14 | host = module.eks.cluster_endpoint
15 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
16 | exec {
17 | api_version = "client.authentication.k8s.io/v1beta1"
18 | command = "aws"
19 | # This requires the awscli to be installed locally where Terraform is executed
20 | args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
21 | }
22 | }
23 |
24 | provider "helm" {
25 | kubernetes {
26 | host = module.eks.cluster_endpoint
27 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
28 | exec {
29 | api_version = "client.authentication.k8s.io/v1beta1"
30 | command = "aws"
31 | # This requires the awscli to be installed locally where Terraform is executed
32 | args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
33 | }
34 | }
35 | }
36 |
37 | provider "kubectl" {
38 | apply_retry_count = 10
39 | host = module.eks.cluster_endpoint
40 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
41 | load_config_file = false
42 | exec {
43 | api_version = "client.authentication.k8s.io/v1beta1"
44 | command = "aws"
45 | # This requires the awscli to be installed locally where Terraform is executed
46 | args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
47 | }
48 | }
49 |
50 | data "aws_partition" "current" {}
51 |
52 | data "aws_availability_zones" "available" {}
--------------------------------------------------------------------------------
/modules/04-security/terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "configure_kubectl" {
2 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
3 | value = "aws eks --region ${var.aws_region} update-kubeconfig --name ${module.eks.cluster_name}"
4 | }
5 |
6 | output "ca_cert_export_path" {
7 | description = "CA certificate has been exported at the following location."
8 | value = "${path.module}/${local_file.lb_ingress_cert.filename}"
9 | }
10 |
11 | output "next_steps" {
12 | description = "Next steps"
13 | value = <>>Check whether SPIRE has issued an identity to the workload"
12 |
13 | kubectl exec -i -t -n spire -c spire-server \
14 | "$(kubectl get pod -n spire -l app=spire-server -o jsonpath='{.items[0].metadata.name}')" \
15 | -- ./bin/spire-server entry show -socketPath /run/spire/sockets/server.sock
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/deploy-helloworld.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 |
3 | export CTX_CLUSTER1=foo-eks-cluster
4 | export CTX_CLUSTER2=bar-eks-cluster
5 |
6 |
7 | kubectl create --context="${CTX_CLUSTER1}" namespace sleep
8 | kubectl create --context="${CTX_CLUSTER1}" namespace helloworld
9 | kubectl create --context="${CTX_CLUSTER2}" namespace sleep
10 | kubectl create --context="${CTX_CLUSTER2}" namespace helloworld
11 |
12 | kubectl label --context="${CTX_CLUSTER1}" namespace sleep \
13 | istio-injection=enabled
14 | kubectl label --context="${CTX_CLUSTER1}" namespace helloworld \
15 | istio-injection=enabled
16 |
17 | kubectl label --context="${CTX_CLUSTER2}" namespace sleep \
18 | istio-injection=enabled
19 | kubectl label --context="${CTX_CLUSTER2}" namespace helloworld \
20 | istio-injection=enabled
21 |
22 | kubectl apply --context="${CTX_CLUSTER1}" \
23 | -f ./examples/helloworld-foo.yaml \
24 | -l service=helloworld -n helloworld
25 | kubectl apply --context="${CTX_CLUSTER2}" \
26 | -f ./examples/helloworld-bar.yaml \
27 | -l service=helloworld -n helloworld
28 |
29 | kubectl apply --context="${CTX_CLUSTER1}" \
30 | -f ./examples/helloworld-foo.yaml -n helloworld
31 |
32 | kubectl -n helloworld --context="${CTX_CLUSTER1}" rollout status deploy helloworld-v1
33 | kubectl -n helloworld get pod --context="${CTX_CLUSTER1}" -l app=helloworld
34 |
35 | kubectl apply --context="${CTX_CLUSTER2}" \
36 | -f ./examples/helloworld-bar.yaml -n helloworld
37 |
38 | kubectl -n helloworld --context="${CTX_CLUSTER2}" rollout status deploy helloworld-v2
39 | kubectl -n helloworld get pod --context="${CTX_CLUSTER2}" -l app=helloworld
40 |
41 |
42 | kubectl apply --context="${CTX_CLUSTER1}" \
43 | -f ./examples/sleep-foo.yaml -n sleep
44 | kubectl apply --context="${CTX_CLUSTER2}" \
45 | -f ./examples/sleep-bar.yaml -n sleep
46 |
47 | kubectl -n sleep --context="${CTX_CLUSTER1}" rollout status deploy sleep
48 | kubectl -n sleep get pod --context="${CTX_CLUSTER1}" -l app=sleep
49 |
50 | kubectl -n sleep --context="${CTX_CLUSTER2}" rollout status deploy sleep
51 | kubectl -n sleep get pod --context="${CTX_CLUSTER2}" -l app=sleep
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/helloworld-bar.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: helloworld
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: helloworld
10 | labels:
11 | app: helloworld
12 | service: helloworld
13 | spec:
14 | ports:
15 | - port: 5000
16 | name: http
17 | selector:
18 | app: helloworld
19 | ---
20 | apiVersion: apps/v1
21 | kind: Deployment
22 | metadata:
23 | name: helloworld-v2
24 | labels:
25 | app: helloworld
26 | version: v2
27 | spec:
28 | replicas: 1
29 | selector:
30 | matchLabels:
31 | app: helloworld
32 | version: v2
33 | template:
34 | metadata:
35 | labels:
36 | app: helloworld
37 | version: v2
38 | annotations:
39 | inject.istio.io/templates: "sidecar,spire"
40 | spiffe.io/federatesWith: "foo.com"
41 | spec:
42 | serviceAccountName: helloworld
43 | containers:
44 | - name: helloworld
45 | image: docker.io/istio/examples-helloworld-v2
46 | resources:
47 | requests:
48 | cpu: "100m"
49 | imagePullPolicy: IfNotPresent #Always
50 | ports:
51 | - containerPort: 5000
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/helloworld-foo.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: helloworld
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: helloworld
10 | labels:
11 | app: helloworld
12 | service: helloworld
13 | spec:
14 | ports:
15 | - port: 5000
16 | name: http
17 | selector:
18 | app: helloworld
19 | ---
20 | apiVersion: apps/v1
21 | kind: Deployment
22 | metadata:
23 | name: helloworld-v1
24 | labels:
25 | app: helloworld
26 | version: v1
27 | spec:
28 | replicas: 1
29 | selector:
30 | matchLabels:
31 | app: helloworld
32 | version: v1
33 | template:
34 | metadata:
35 | labels:
36 | app: helloworld
37 | version: v1
38 | annotations:
39 | inject.istio.io/templates: "sidecar,spire"
40 | spiffe.io/federatesWith: "bar.com"
41 | spec:
42 | serviceAccountName: helloworld
43 | containers:
44 | - name: helloworld
45 | image: docker.io/istio/examples-helloworld-v1
46 | resources:
47 | requests:
48 | cpu: "100m"
49 | imagePullPolicy: IfNotPresent #Always
50 | ports:
51 | - containerPort: 5000
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/helloworld-gateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: helloworld-gateway
5 | spec:
6 | selector:
7 | istio: ingressgateway # use istio default controller
8 | servers:
9 | - port:
10 | number: 80
11 | name: http
12 | protocol: HTTP
13 | hosts:
14 | - "*"
15 | ---
16 | apiVersion: networking.istio.io/v1alpha3
17 | kind: VirtualService
18 | metadata:
19 | name: helloworld
20 | spec:
21 | hosts:
22 | - "*"
23 | gateways:
24 | - helloworld-gateway
25 | http:
26 | - match:
27 | - uri:
28 | exact: /hello
29 | route:
30 | - destination:
31 | host: helloworld
32 | port:
33 | number: 5000
34 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/sleep-bar.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | annotations:
51 | inject.istio.io/templates: "sidecar,spire"
52 | spiffe.io/federatesWith: "foo.com"
53 | spec:
54 | terminationGracePeriodSeconds: 0
55 | serviceAccountName: sleep
56 | containers:
57 | - name: sleep
58 | image: curlimages/curl
59 | command: ["/bin/sleep", "infinity"]
60 | imagePullPolicy: IfNotPresent
61 | volumeMounts:
62 | - mountPath: /tmp
63 | name: tmp
64 | volumes:
65 | - name: tmp
66 | emptyDir: {}
67 | - name: workload-socket
68 | csi:
69 | driver: "csi.spiffe.io"
70 | readOnly: true
71 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/examples/sleep-foo.yaml:
--------------------------------------------------------------------------------
1 | # Copyright Istio Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ##################################################################################################
16 | # Sleep service
17 | ##################################################################################################
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: sleep
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sleep
27 | labels:
28 | app: sleep
29 | service: sleep
30 | spec:
31 | ports:
32 | - port: 80
33 | name: http
34 | selector:
35 | app: sleep
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: sleep
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: sleep
46 | template:
47 | metadata:
48 | labels:
49 | app: sleep
50 | annotations:
51 | inject.istio.io/templates: "sidecar,spire"
52 | spiffe.io/federatesWith: "bar.com"
53 | spec:
54 | terminationGracePeriodSeconds: 0
55 | serviceAccountName: sleep
56 | containers:
57 | - name: sleep
58 | image: curlimages/curl
59 | command: ["/bin/sleep", "infinity"]
60 | imagePullPolicy: IfNotPresent
61 | volumeMounts:
62 | - mountPath: /tmp
63 | name: tmp
64 | volumes:
65 | - name: tmp
66 | emptyDir: {}
67 | - name: workload-socket
68 | csi:
69 | driver: "csi.spiffe.io"
70 | readOnly: true
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/auth.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1beta1
2 | kind: PeerAuthentication
3 | metadata:
4 | name: default
5 | namespace: istio-system
6 | spec:
7 | mtls:
8 | mode: STRICT
9 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/bar-istio-conf.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: install.istio.io/v1alpha1
2 | kind: IstioOperator
3 | metadata:
4 | namespace: istio-system
5 | spec:
6 | profile: default
7 | meshConfig:
8 | trustDomain: bar.com
9 | trustDomainAliases:
10 | - foo.com
11 | values:
12 | global:
13 | meshID: devup-mesh
14 | multiCluster:
15 | clusterName: bar-eks-cluster
16 | network: bar-network
17 | # This is used to customize the sidecar template
18 | sidecarInjectorWebhook:
19 | templates:
20 | spire: |
21 | spec:
22 | containers:
23 | - name: istio-proxy
24 | volumeMounts:
25 | - name: workload-socket
26 | mountPath: /run/secrets/workload-spiffe-uds
27 | readOnly: true
28 | volumes:
29 | - name: workload-socket
30 | csi:
31 | driver: "csi.spiffe.io"
32 | readOnly: true
33 | components:
34 | pilot:
35 | k8s:
36 | env:
37 | # If enabled, if user introduces new intermediate plug-in CA, user need not to restart istiod to pick up certs. Istiod picks newly added intermediate plug-in CA certs and updates it. Plug-in new Root-CA not supported.
38 | - name: AUTO_RELOAD_PLUGIN_CERTS
39 | value: "true"
40 | ingressGateways:
41 | - name: istio-ingressgateway
42 | enabled: true
43 | label:
44 | istio: ingressgateway
45 | k8s:
46 | podAnnotations:
47 | spiffe.io/federatesWith: "foo.com"
48 | overlays:
49 | - apiVersion: apps/v1
50 | kind: Deployment
51 | name: istio-ingressgateway
52 | patches:
53 | - path: spec.template.spec.volumes.[name:workload-socket]
54 | value:
55 | name: workload-socket
56 | csi:
57 | driver: "csi.spiffe.io"
58 | readOnly: true
59 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
60 | value:
61 | name: workload-socket
62 | mountPath: "/run/secrets/workload-spiffe-uds"
63 | readOnly: true
64 | - name: istio-eastwestgateway
65 | enabled: true
66 | label:
67 | istio: eastwestgateway
68 | app: istio-eastwestgateway
69 | topology.istio.io/network: bar-network
70 | k8s:
71 | podAnnotations:
72 | spiffe.io/federatesWith: "foo.com"
73 | overlays:
74 | - apiVersion: apps/v1
75 | kind: Deployment
76 | name: istio-eastwestgateway
77 | patches:
78 | - path: spec.template.spec.volumes.[name:workload-socket]
79 | value:
80 | name: workload-socket
81 | csi:
82 | driver: "csi.spiffe.io"
83 | readOnly: true
84 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
85 | value:
86 | name: workload-socket
87 | mountPath: "/run/secrets/workload-spiffe-uds"
88 | readOnly: true
89 | env:
90 | - name: ISTIO_META_ROUTER_MODE
91 | value: "sni-dnat"
92 | - name: ISTIO_META_REQUESTED_NETWORK_VIEW
93 | value: bar-network
94 | service:
95 | ports:
96 | - name: status-port
97 | port: 15021
98 | targetPort: 15021
99 | - name: tls
100 | port: 15443
101 | targetPort: 15443
102 | - name: tls-istiod
103 | port: 15012
104 | targetPort: 15012
105 | - name: tls-webhook
106 | port: 15017
107 | targetPort: 15017
108 | serviceAnnotations:
109 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"
110 | service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
111 | service.beta.kubernetes.io/aws-load-balancer-type: "alb"
112 | service.beta.kubernetes.io/aws-load-balancer-internal: "true"
113 |
114 | # These configurations fixed target group health checks
115 | service.beta.kubernetes.io/aws-load-balancer-healthcheck-port: "traffic-port"
116 | service.beta.kubernetes.io/aws-load-balancer-healthcheck-protocol: "tcp"
117 | service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/cleanup-istio.sh:
--------------------------------------------------------------------------------
1 | export CTX_CLUSTER1=foo-eks-cluster
2 | export CTX_CLUSTER2=bar-eks-cluster
3 |
4 | istioctl uninstall --purge --context $CTX_CLUSTER1 --skip-confirmation
5 | istioctl uninstall --purge --context $CTX_CLUSTER2 --skip-confirmation
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/foo-istio-conf.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: install.istio.io/v1alpha1
2 | kind: IstioOperator
3 | metadata:
4 | namespace: istio-system
5 | spec:
6 | profile: default
7 | meshConfig:
8 | trustDomain: foo.com
9 | trustDomainAliases:
10 | - bar.com
11 | values:
12 | global:
13 | meshID: devup-mesh
14 | multiCluster:
15 | clusterName: foo-eks-cluster
16 | network: foo-network
17 | # This is used to customize the sidecar template
18 | sidecarInjectorWebhook:
19 | templates:
20 | spire: |
21 | spec:
22 | containers:
23 | - name: istio-proxy
24 | volumeMounts:
25 | - name: workload-socket
26 | mountPath: /run/secrets/workload-spiffe-uds
27 | readOnly: true
28 | volumes:
29 | - name: workload-socket
30 | csi:
31 | driver: "csi.spiffe.io"
32 | readOnly: true
33 | components:
34 | pilot:
35 | k8s:
36 | env:
37 | # If enabled, if user introduces new intermediate plug-in CA, user need not to restart istiod to pick up certs. Istiod picks newly added intermediate plug-in CA certs and updates it. Plug-in new Root-CA not supported.
38 | - name: AUTO_RELOAD_PLUGIN_CERTS
39 | value: "true"
40 | ingressGateways:
41 | - name: istio-ingressgateway
42 | enabled: true
43 | label:
44 | istio: ingressgateway
45 | k8s:
46 | podAnnotations:
47 | spiffe.io/federatesWith: "bar.com"
48 | overlays:
49 | - apiVersion: apps/v1
50 | kind: Deployment
51 | name: istio-ingressgateway
52 | patches:
53 | - path: spec.template.spec.volumes.[name:workload-socket]
54 | value:
55 | name: workload-socket
56 | csi:
57 | driver: "csi.spiffe.io"
58 | readOnly: true
59 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
60 | value:
61 | name: workload-socket
62 | mountPath: "/run/secrets/workload-spiffe-uds"
63 | readOnly: true
64 | - name: istio-eastwestgateway
65 | enabled: true
66 | label:
67 | istio: eastwestgateway
68 | app: istio-eastwestgateway
69 | topology.istio.io/network: foo-network
70 | k8s:
71 | podAnnotations:
72 | spiffe.io/federatesWith: "bar.com"
73 | overlays:
74 | - apiVersion: apps/v1
75 | kind: Deployment
76 | name: istio-eastwestgateway
77 | patches:
78 | - path: spec.template.spec.volumes.[name:workload-socket]
79 | value:
80 | name: workload-socket
81 | csi:
82 | driver: "csi.spiffe.io"
83 | readOnly: true
84 | - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts.[name:workload-socket]
85 | value:
86 | name: workload-socket
87 | mountPath: "/run/secrets/workload-spiffe-uds"
88 | readOnly: true
89 | env:
90 | - name: ISTIO_META_ROUTER_MODE
91 | value: "sni-dnat"
92 | - name: ISTIO_META_REQUESTED_NETWORK_VIEW
93 | value: foo-network
94 | service:
95 | ports:
96 | - name: status-port
97 | port: 15021
98 | targetPort: 15021
99 | - name: tls
100 | port: 15443
101 | targetPort: 15443
102 | - name: tls-istiod
103 | port: 15012
104 | targetPort: 15012
105 | - name: tls-webhook
106 | port: 15017
107 | targetPort: 15017
108 | serviceAnnotations:
109 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"
110 | service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
111 | service.beta.kubernetes.io/aws-load-balancer-type: "alb"
112 | service.beta.kubernetes.io/aws-load-balancer-internal: "true"
113 |
114 | # These configurations fixed target group health checks
115 | service.beta.kubernetes.io/aws-load-balancer-healthcheck-port: "traffic-port"
116 | service.beta.kubernetes.io/aws-load-balancer-healthcheck-protocol: "tcp"
117 | service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/install-istio.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 |
3 | set -e
4 |
5 |
6 | export CTX_CLUSTER1=foo-eks-cluster
7 | export CTX_CLUSTER2=bar-eks-cluster
8 |
9 | eks_api_foo="$1"
10 | eks_api_bar="$2"
11 |
12 | istioctl install -f ./istio/foo-istio-conf.yaml --context="${CTX_CLUSTER1}" --skip-confirmation
13 | kubectl apply -f ./istio/auth.yaml --context="${CTX_CLUSTER1}"
14 | kubectl apply -f ./istio/istio-ew-gw.yaml --context="${CTX_CLUSTER1}"
15 |
16 | istioctl install -f ./istio/bar-istio-conf.yaml --context="${CTX_CLUSTER2}" --skip-confirmation
17 | kubectl apply -f ./istio/auth.yaml --context="${CTX_CLUSTER2}"
18 | kubectl apply -f ./istio/istio-ew-gw.yaml --context="${CTX_CLUSTER2}"
19 |
20 |
21 | istioctl x create-remote-secret --context="${CTX_CLUSTER1}" --name=foo-eks-cluster \
22 | --server="${eks_api_foo}" \
23 | | kubectl apply -f - --context="${CTX_CLUSTER2}"
24 |
25 | istioctl x create-remote-secret --context="${CTX_CLUSTER2}" --name=bar-eks-cluster \
26 | --server="${eks_api_bar}" \
27 | | kubectl apply -f - --context="${CTX_CLUSTER1}"
28 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/istio/istio-ew-gw.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: cross-network-gateway
5 | spec:
6 | selector:
7 | istio: eastwestgateway
8 | servers:
9 | - port:
10 | number: 15443
11 | name: tls
12 | protocol: TLS
13 | tls:
14 | mode: AUTO_PASSTHROUGH
15 | hosts:
16 | - "*.local"
17 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/spire/cleanup-spire.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | export CTX_CLUSTER1=foo-eks-cluster
5 | export CTX_CLUSTER2=bar-eks-cluster
6 |
7 | kubectl config use-context ${CTX_CLUSTER1}
8 |
9 | kubectl delete CustomResourceDefinition spiffeids.spiffeid.spiffe.io
10 | kubectl delete -n spire configmap k8s-workload-registrar
11 | kubectl delete -n spire configmap trust-bundle
12 | kubectl delete -n spire serviceaccount spire-agent
13 | kubectl delete -n spire configmap spire-agent
14 | kubectl delete -n spire daemonset spire-agent
15 | kubectl delete csidriver csi.spiffe.io
16 | kubectl delete -n spire configmap spire-server
17 | kubectl delete -n spire serviceaccount spire-server
18 | kubectl delete -n spire service spire-server
19 | kubectl delete -n spire service spire-server-bundle-endpoint
20 | kubectl delete -n spire statefulset spire-server
21 | kubectl delete clusterrole k8s-workload-registrar-role spire-server-trust-role spire-agent-cluster-role
22 | kubectl delete clusterrolebinding k8s-workload-registrar-role-binding spire-server-trust-role-binding spire-agent-cluster-role-binding
23 | kubectl delete namespace spire
24 |
25 | kubectl config use-context ${CTX_CLUSTER2}
26 |
27 | kubectl delete CustomResourceDefinition spiffeids.spiffeid.spiffe.io
28 | kubectl delete -n spire configmap k8s-workload-registrar
29 | kubectl delete -n spire configmap trust-bundle
30 | kubectl delete -n spire serviceaccount spire-agent
31 | kubectl delete -n spire configmap spire-agent
32 | kubectl delete -n spire daemonset spire-agent
33 | kubectl delete csidriver csi.spiffe.io
34 | kubectl delete -n spire configmap spire-server
35 | kubectl delete -n spire serviceaccount spire-server
36 | kubectl delete -n spire service spire-server
37 | kubectl delete -n spire service spire-server-bundle-endpoint
38 | kubectl delete -n spire statefulset spire-server
39 | kubectl delete clusterrole k8s-workload-registrar-role spire-server-trust-role spire-agent-cluster-role
40 | kubectl delete clusterrolebinding k8s-workload-registrar-role-binding spire-server-trust-role-binding spire-agent-cluster-role-binding
41 | kubectl delete namespace spire
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/spire/configmaps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # This remains empty but needs to be present for proper ca functioning
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: istio-ca-root-cert
7 | namespace: default
8 | ---
9 | # This remains empty but needs to be present for proper ca functioning
10 | apiVersion: v1
11 | kind: ConfigMap
12 | metadata:
13 | name: istio-ca-root-cert
14 | namespace: istio-system
15 | ---
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/spire/install-spire.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 |
3 | set -e
4 |
5 | export CTX_CLUSTER1=foo-eks-cluster
6 | export CTX_CLUSTER2=bar-eks-cluster
7 |
8 | spire_server_node_foo=$(kubectl get nodes -l dedicated=spire-server --context="${CTX_CLUSTER1}" -o jsonpath='{.items[*].metadata.name}')
9 | spire_server_node_bar=$(kubectl get nodes -l dedicated=spire-server --context="${CTX_CLUSTER2}" -o jsonpath='{.items[*].metadata.name}')
10 |
11 | # Install Spire on foo cluster
12 | kubectl apply -f ./spire/configmaps.yaml --context="${CTX_CLUSTER1}"
13 | cat ./spire/foo-spire.yaml | sed "s//$spire_server_node_bar/g" | kubectl apply -f - --context="${CTX_CLUSTER1}"
14 |
15 | kubectl -n spire rollout status statefulset spire-server --context="${CTX_CLUSTER1}"
16 | kubectl -n spire rollout status daemonset spire-agent --context="${CTX_CLUSTER1}"
17 |
18 | foo_bundle=$(kubectl exec --context="${CTX_CLUSTER1}" -c spire-server -n spire --stdin spire-server-0 -- /opt/spire/bin/spire-server bundle show -format spiffe -socketPath /run/spire/sockets/server.sock)
19 |
20 | # Install Spire on bar cluster
21 | kubectl apply -f ./spire/configmaps.yaml --context="${CTX_CLUSTER2}"
22 | cat ./spire/bar-spire.yaml | sed "s//$spire_server_node_foo/g" | kubectl apply -f - --context="${CTX_CLUSTER2}"
23 |
24 | kubectl -n spire rollout status statefulset spire-server --context="${CTX_CLUSTER2}"
25 | kubectl -n spire rollout status daemonset spire-agent --context="${CTX_CLUSTER2}"
26 |
27 | bar_bundle=$(kubectl exec --context="${CTX_CLUSTER2}" -c spire-server -n spire --stdin spire-server-0 -- /opt/spire/bin/spire-server bundle show -format spiffe -socketPath /run/spire/sockets/server.sock)
28 |
29 | # Set foo.com bundle to bar.com SPIRE bundle endpoint
30 | kubectl exec --context="${CTX_CLUSTER2}" -c spire-server -n spire --stdin spire-server-0 \
31 | -- /opt/spire/bin/spire-server bundle set -format spiffe -id spiffe://foo.com -socketPath /run/spire/sockets/server.sock <<< "$foo_bundle"
32 |
33 | # Set bar.com bundle to foo.com SPIRE bundle endpoint
34 | kubectl exec --context="${CTX_CLUSTER1}" -c spire-server -n spire --stdin spire-server-0 \
35 | -- /opt/spire/bin/spire-server bundle set -format spiffe -id spiffe://bar.com -socketPath /run/spire/sockets/server.sock <<< "$bar_bundle"
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/0.vpc/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | data "aws_caller_identity" "current" {}
6 | data "aws_availability_zones" "available" {}
7 |
8 | locals {
9 | foo_name = "foo-eks-cluster"
10 | bar_name = "bar-eks-cluster"
11 | region = "eu-west-2"
12 |
13 | foo_vpc_cidr = "10.2.0.0/16"
14 | bar_vpc_cidr = "10.3.0.0/16"
15 | azs = slice(data.aws_availability_zones.available.names, 0, 3)
16 |
17 | account_id = data.aws_caller_identity.current.account_id
18 |
19 | }
20 |
21 | ################################################################################
22 | # FOO VPC
23 | ################################################################################
24 |
25 | module "foo_vpc" {
26 | source = "terraform-aws-modules/vpc/aws"
27 | version = "~> 5.0"
28 |
29 | name = local.foo_name
30 | cidr = local.foo_vpc_cidr
31 |
32 | azs = local.azs
33 | private_subnets = [for k, v in local.azs : cidrsubnet(local.foo_vpc_cidr, 4, k)]
34 | public_subnets = [for k, v in local.azs : cidrsubnet(local.foo_vpc_cidr, 8, k + 48)]
35 |
36 | enable_nat_gateway = true
37 | single_nat_gateway = true
38 |
39 | public_subnet_tags = {
40 | "kubernetes.io/role/elb" = 1
41 | }
42 |
43 | private_subnet_tags = {
44 | "kubernetes.io/role/internal-elb" = 1
45 | }
46 |
47 | tags = {
48 | Blueprint = local.foo_name
49 | GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
50 | }
51 | }
52 |
53 | ################################################################################
54 | # BAR VPC
55 | ################################################################################
56 |
57 | module "bar_vpc" {
58 | source = "terraform-aws-modules/vpc/aws"
59 | version = "~> 5.0"
60 |
61 | name = local.bar_name
62 | cidr = local.bar_vpc_cidr
63 |
64 | azs = local.azs
65 | private_subnets = [for k, v in local.azs : cidrsubnet(local.bar_vpc_cidr, 4, k)]
66 | public_subnets = [for k, v in local.azs : cidrsubnet(local.bar_vpc_cidr, 8, k + 48)]
67 |
68 | enable_nat_gateway = true
69 | single_nat_gateway = true
70 |
71 | public_subnet_tags = {
72 | "kubernetes.io/role/elb" = 1
73 | }
74 |
75 | private_subnet_tags = {
76 | "kubernetes.io/role/internal-elb" = 1
77 | }
78 |
79 | tags = {
80 | Blueprint = local.bar_name
81 | GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
82 | }
83 | }
84 |
85 | ################################################################################
86 | # Create VPC peering and update the private subnets route tables
87 | ################################################################################
88 |
89 | resource "aws_vpc_peering_connection" "foo_bar" {
90 | peer_owner_id = local.account_id
91 | vpc_id = module.foo_vpc.vpc_id
92 | peer_vpc_id = module.bar_vpc.vpc_id
93 | auto_accept = true
94 | tags = {
95 | Name = "foo-bar"
96 | }
97 | }
98 |
99 | resource "aws_vpc_peering_connection_options" "foo_bar" {
100 | vpc_peering_connection_id = aws_vpc_peering_connection.foo_bar.id
101 | accepter {
102 | allow_remote_vpc_dns_resolution = true
103 | }
104 | requester {
105 | allow_remote_vpc_dns_resolution = true
106 | }
107 | }
108 |
109 | resource "aws_route" "foo_bar" {
110 | route_table_id = module.foo_vpc.private_route_table_ids[0]
111 | destination_cidr_block = local.bar_vpc_cidr
112 | vpc_peering_connection_id = aws_vpc_peering_connection_options.foo_bar.id
113 | }
114 |
115 | resource "aws_route" "bar_foo" {
116 | route_table_id = module.bar_vpc.private_route_table_ids[0]
117 | destination_cidr_block = local.foo_vpc_cidr
118 | vpc_peering_connection_id = aws_vpc_peering_connection_options.foo_bar.id
119 | }
120 |
121 | ################################################################################
122 | # Foo cluster additional security group for cross cluster communication
123 | ################################################################################
124 |
125 | resource "aws_security_group" "foo_eks_cluster_additional_sg" {
126 | name = "foo_eks_cluster_additional_sg"
127 | description = "Allow communication from bar eks cluster SG to foo eks cluster SG"
128 | vpc_id = module.foo_vpc.vpc_id
129 | tags = {
130 | Name = "foo_eks_cluster_additional_sg"
131 | }
132 | }
133 |
134 | resource "aws_vpc_security_group_egress_rule" "foo_eks_cluster_additional_sg_allow_all_4" {
135 | security_group_id = aws_security_group.foo_eks_cluster_additional_sg.id
136 |
137 | ip_protocol = "-1"
138 | cidr_ipv4 = "0.0.0.0/0"
139 | }
140 |
141 | resource "aws_vpc_security_group_egress_rule" "foo_eks_cluster_additional_sg_allow_all_6" {
142 | security_group_id = aws_security_group.foo_eks_cluster_additional_sg.id
143 |
144 | ip_protocol = "-1"
145 | cidr_ipv6 = "::/0"
146 | }
147 |
148 | ################################################################################
149 | # Bar cluster additional security group for cross cluster communication
150 | ################################################################################
151 |
152 | resource "aws_security_group" "bar_eks_cluster_additional_sg" {
153 | name = "bar_eks_cluster_additional_sg"
154 | description = "Allow communication from foo eks cluster SG to bar eks cluster SG"
155 | vpc_id = module.bar_vpc.vpc_id
156 | tags = {
157 | Name = "bar_eks_cluster_additional_sg"
158 | }
159 | }
160 |
161 | resource "aws_vpc_security_group_egress_rule" "bar_eks_cluster_additional_sg_allow_all_4" {
162 | security_group_id = aws_security_group.bar_eks_cluster_additional_sg.id
163 |
164 | ip_protocol = "-1"
165 | cidr_ipv4 = "0.0.0.0/0"
166 | }
167 | resource "aws_vpc_security_group_egress_rule" "bar_eks_cluster_additional_sg_allow_all_6" {
168 | security_group_id = aws_security_group.bar_eks_cluster_additional_sg.id
169 |
170 | ip_protocol = "-1"
171 | cidr_ipv6 = "::/0"
172 | }
173 |
174 | ################################################################################
175 | # cross SG ingress rules bar eks cluster allow to foo eks cluster
176 | ################################################################################
177 |
178 | resource "aws_vpc_security_group_ingress_rule" "bar_eks_cluster_to_cluster_1" {
179 | security_group_id = aws_security_group.foo_eks_cluster_additional_sg.id
180 |
181 | cidr_ipv4 = local.bar_vpc_cidr
182 | ip_protocol = "-1"
183 | }
184 |
185 | ################################################################################
186 | # cross SG ingress rules foo eks cluster allow to bar eks cluster
187 | ################################################################################
188 |
189 | resource "aws_vpc_security_group_ingress_rule" "foo_eks_cluster_to_cluster_2" {
190 | security_group_id = aws_security_group.bar_eks_cluster_additional_sg.id
191 |
192 | cidr_ipv4 = local.foo_vpc_cidr
193 | ip_protocol = "-1"
194 | }
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/0.vpc/outputs.tf:
--------------------------------------------------------------------------------
1 | output "foo_vpc_id" {
2 | description = "Amazon EKS VPC ID"
3 | value = module.foo_vpc.vpc_id
4 | }
5 |
6 | output "bar_vpc_id" {
7 | description = "Amazon EKS VPC ID"
8 | value = module.bar_vpc.vpc_id
9 | }
10 |
11 | output "foo_subnet_ids" {
12 | description = "Amazon EKS Subnet IDs"
13 | value = module.foo_vpc.private_subnets
14 | }
15 |
16 | output "bar_subnet_ids" {
17 | description = "Amazon EKS Subnet IDs"
18 | value = module.bar_vpc.private_subnets
19 | }
20 |
21 | output "foo_vpc_cidr" {
22 | description = "Amazon EKS VPC CIDR Block."
23 | value = local.foo_vpc_cidr
24 | }
25 |
26 | output "bar_vpc_cidr" {
27 | description = "Amazon EKS VPC CIDR Block."
28 | value = local.bar_vpc_cidr
29 | }
30 |
31 | output "foo_private_route_table_ids" {
32 | description = "List of IDs of private route tables"
33 | value = module.foo_vpc.private_route_table_ids
34 | }
35 |
36 | output "bar_private_route_table_ids" {
37 | description = "List of IDs of private route tables"
38 | value = module.bar_vpc.private_route_table_ids
39 | }
40 |
41 | output "foo_additional_sg_id" {
42 | description = "foo cluster additional SG"
43 | value = aws_security_group.foo_eks_cluster_additional_sg.id
44 | }
45 |
46 | output "bar_additional_sg_id" {
47 | description = "bar cluster additional SG"
48 | value = aws_security_group.bar_eks_cluster_additional_sg.id
49 | }
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/0.vpc/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/patterns/eks-istio-mesh-spire-federation/terraform/0.vpc/variables.tf
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/0.vpc/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.9"
8 | }
9 | }
10 | }
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/cert-manager-manifests/istio-cert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: Certificate
4 | metadata:
5 | name: cacerts
6 | namespace: istio-system
7 | spec:
8 | secretName: cacerts
9 | duration: 1440h
10 | renewBefore: 360h
11 | commonName: istiod.istio-system.svc
12 | isCA: true
13 | usages:
14 | - digital signature
15 | - key encipherment
16 | - cert sign
17 | dnsNames:
18 | - istiod.istio-system.svc
19 | issuerRef:
20 | name: selfsigned-ca
21 | kind: ClusterIssuer
22 | group: cert-manager.io
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/cert-manager-manifests/self-signed-ca.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: Issuer
4 | metadata:
5 | name: selfsigned
6 | namespace: cert-manager
7 | spec:
8 | selfSigned: {}
9 | ---
10 | apiVersion: cert-manager.io/v1
11 | kind: Certificate
12 | metadata:
13 | name: selfsigned-ca
14 | namespace: cert-manager
15 | spec:
16 | isCA: true
17 | duration: 21600h
18 | secretName: selfsigned-ca
19 | commonName: certmanager-ca
20 | subject:
21 | organizations:
22 | - cert-manager
23 | issuerRef:
24 | name: selfsigned
25 | kind: Issuer
26 | group: cert-manager.io
27 | ---
28 | apiVersion: cert-manager.io/v1
29 | kind: ClusterIssuer
30 | metadata:
31 | name: selfsigned-ca
32 | spec:
33 | ca:
34 | secretName: selfsigned-ca
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_endpoint" {
2 | description = "Endpoint for your Kubernetes API server"
3 | value = module.eks.cluster_endpoint
4 | }
5 |
6 | output "configure_kubectl" {
7 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
8 | value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}"
9 | }
10 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/variables.tf
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/1.foo-eks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.9"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.17"
12 | }
13 | helm = {
14 | source = "hashicorp/helm"
15 | version = ">= 2.8"
16 | }
17 | kubectl = {
18 | source = "gavinbunney/kubectl"
19 | version = ">= 1.14"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/cert-manager-manifests/istio-cert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: Certificate
4 | metadata:
5 | name: cacerts
6 | namespace: istio-system
7 | spec:
8 | secretName: cacerts
9 | duration: 1440h
10 | renewBefore: 360h
11 | commonName: istiod.istio-system.svc
12 | isCA: true
13 | usages:
14 | - digital signature
15 | - key encipherment
16 | - cert sign
17 | dnsNames:
18 | - istiod.istio-system.svc
19 | issuerRef:
20 | name: selfsigned-ca
21 | kind: ClusterIssuer
22 | group: cert-manager.io
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/cert-manager-manifests/self-signed-ca.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: Issuer
4 | metadata:
5 | name: selfsigned
6 | namespace: cert-manager
7 | spec:
8 | selfSigned: {}
9 | ---
10 | apiVersion: cert-manager.io/v1
11 | kind: Certificate
12 | metadata:
13 | name: selfsigned-ca
14 | namespace: cert-manager
15 | spec:
16 | isCA: true
17 | duration: 21600h
18 | secretName: selfsigned-ca
19 | commonName: certmanager-ca
20 | subject:
21 | organizations:
22 | - cert-manager
23 | issuerRef:
24 | name: selfsigned
25 | kind: Issuer
26 | group: cert-manager.io
27 | ---
28 | apiVersion: cert-manager.io/v1
29 | kind: ClusterIssuer
30 | metadata:
31 | name: selfsigned-ca
32 | spec:
33 | ca:
34 | secretName: selfsigned-ca
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_endpoint" {
2 | description = "Endpoint for your Kubernetes API server"
3 | value = module.eks.cluster_endpoint
4 | }
5 |
6 | output "configure_kubectl" {
7 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
8 | value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}"
9 | }
10 |
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/variables.tf
--------------------------------------------------------------------------------
/patterns/eks-istio-mesh-spire-federation/terraform/2.bar-eks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.9"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.17"
12 | }
13 | helm = {
14 | source = "hashicorp/helm"
15 | version = ">= 2.8"
16 | }
17 | kubectl = {
18 | source = "gavinbunney/kubectl"
19 | version = ">= 1.14"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/README.md:
--------------------------------------------------------------------------------
1 | ## Business Use Cases for Istio Multi-Cluster
2 |
3 | **Fault Tolerance and High Availability:** For applications that require global reach or disaster recovery capabilities, Istio Multi-Cluster allows you to distribute your services across clusters in multiple geographic regions or availability zones enhancing resilience to failures and outages.. This improves availability, fault tolerance, and reduces latency by serving users from the nearest cluster.
4 |
5 | **Isolation and Multitenancy:** In large organizations with multiple teams or business units, Istio Multi-Cluster allows you to isolate workloads into separate clusters while still maintaining a unified service mesh. This promotes multitenancy, security, and resource isolation between different applications or environments. It also strengthens security posture by implementing strict access controls and network segmentation, preventing unauthorized inter-service communications.
6 |
7 | **Compliance and Regulatory Requirements:** In some industries, there may be requirements to keep certain data or workloads in specific geographic regions or environments. Istio Multi-Cluster enables you to deploy and manage applications across these isolated environments while still maintaining a unified service mesh. Istio's multi-cluster architecture ensures data segregation and compliance with regulations like GDPR and PCI DSS through Istio's multi-cluster architecture.
8 |
9 | **Scalability, Performance and Load Balancing:** Istio Multi-Cluster achieves horizontal scaling and performance optimization by adding clusters in different regions, catering to traffic surges and reducing latency. Istio Multi-Cluster also provides load balancing across clusters, enabling you to distribute the load and avoid overloading a single cluster.
10 |
11 | **Canary Deployments and A/B Testing:** When rolling out new versions of applications, you can use Istio Multi-Cluster to deploy the new version to a subset of clusters, allowing you to test and validate the changes before rolling them out to all clusters. This enables canary deployments and A/B testing across multiple clusters.
12 |
13 | **Migration and Modernization:** If you're migrating applications from legacy systems to Kubernetes or modernizing your applications, Istio Multi-Cluster can help you manage the transition by allowing you to run the old and new versions of your applications in separate clusters while still maintaining connectivity and consistent policies.
14 |
15 | 
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/istio-multi-cluster-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/patterns/multi-cluster-multi-primary/istio-multi-cluster-architecture.png
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: multicluster-gateway-n-apps
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/gateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: cross-network-gateway
5 | namespace: istio-ingress
6 | spec:
7 | selector:
8 | istio: eastwestgateway
9 | servers:
10 | - port:
11 | number: 15443
12 | name: tls
13 | protocol: TLS
14 | tls:
15 | mode: AUTO_PASSTHROUGH
16 | hosts:
17 | - "*.local"
18 |
19 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/helloworld-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: helloworld-{{ .Values.version }}
5 | labels:
6 | app: helloworld
7 | version: {{ .Values.version }}
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: helloworld
13 | version: {{ .Values.version }}
14 | template:
15 | metadata:
16 | labels:
17 | app: helloworld
18 | version: {{ .Values.version }}
19 | spec:
20 | containers:
21 | - name: helloworld
22 | image: docker.io/istio/examples-helloworld-{{ .Values.version }}
23 | resources:
24 | requests:
25 | cpu: "100m"
26 | imagePullPolicy: IfNotPresent #Always
27 | ports:
28 | - containerPort: 5000
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/helloworld-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: helloworld
5 | labels:
6 | app: helloworld
7 | service: helloworld
8 | spec:
9 | ports:
10 | - port: 5000
11 | name: http
12 | selector:
13 | app: helloworld
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/remote-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | annotations:
5 | networking.istio.io/cluster: {{ .Values.clusterName }}
6 | labels:
7 | istio/multiCluster: "true"
8 | name: istio-remote-secret-{{ .Values.clusterName }}
9 | namespace: istio-system
10 | stringData:
11 | {{ .Values.clusterName }}: |
12 | apiVersion: v1
13 | clusters:
14 | - cluster:
15 | certificate-authority-data: {{ .Values.certificateAuthorityData }}
16 | server: {{ .Values.server }}
17 | name: {{ .Values.clusterName }}
18 | contexts:
19 | - context:
20 | cluster: {{ .Values.clusterName }}
21 | user: {{ .Values.clusterName }}
22 | name: {{ .Values.clusterName }}
23 | current-context: {{ .Values.clusterName }}
24 | kind: Config
25 | preferences: {}
26 | users:
27 | - name: {{ .Values.clusterName }}
28 | user:
29 | token: {{ .Values.token }}
30 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/sleep-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: sleep
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: sleep
10 | template:
11 | metadata:
12 | labels:
13 | app: sleep
14 | spec:
15 | terminationGracePeriodSeconds: 0
16 | serviceAccountName: sleep
17 | containers:
18 | - name: sleep
19 | image: curlimages/curl
20 | command: ["/bin/sleep", "infinity"]
21 | imagePullPolicy: IfNotPresent
22 | volumeMounts:
23 | - mountPath: /etc/sleep/tls
24 | name: secret-volume
25 | volumes:
26 | - name: secret-volume
27 | secret:
28 | secretName: sleep-secret
29 | optional: true
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/sleep-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: sleep
5 | labels:
6 | app: sleep
7 | service: sleep
8 | spec:
9 | ports:
10 | - port: 80
11 | name: http
12 | selector:
13 | app: sleep
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/templates/sleep-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sleep
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/charts/multicluster-gateway-n-apps/values.yaml:
--------------------------------------------------------------------------------
1 | version: v1
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_availability_zones" "available" {}
2 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-west-2"
3 | azs = slice(data.aws_availability_zones.available.names, 0, 3)
4 |
5 | # VPC specific settings
6 | vpc_cidr = "10.1.0.0/16"
7 | vpc_2_cidr = "10.2.0.0/16"
8 |
9 | # EKS specific settings
10 | eks_1_name = "eks-1"
11 | eks_2_name = "eks-2"
12 | eks_1_IPv6 = true
13 | eks_2_IPv6 = true
14 | eks_cluster_version = "1.32"
15 |
16 | # Istio specific settings
17 | meshID = "mesh1"
18 | networkName1 = "network1"
19 | networkName2 = "network2"
20 | clusterName1 = "cluster1"
21 | clusterName2 = "cluster2"
22 |
23 | istio_chart_url = "https://istio-release.storage.googleapis.com/charts"
24 | istio_chart_version = "1.24.3"
25 |
26 | tags = {
27 | GithubRepo = "github.com/aws_ia/terraform-aws-eks-blueprints"
28 | }
29 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/root_cert.tf:
--------------------------------------------------------------------------------
1 | resource "tls_private_key" "root_ca_key" {
2 | algorithm = "RSA"
3 | rsa_bits = 2048
4 | }
5 |
6 | resource "tls_self_signed_cert" "root_ca" {
7 | private_key_pem = tls_private_key.root_ca_key.private_key_pem
8 | is_ca_certificate = true
9 |
10 | subject {
11 | common_name = "multicluster.istio.io"
12 | }
13 |
14 | validity_period_hours = 87600
15 |
16 | allowed_uses = [
17 | "cert_signing",
18 | "crl_signing",
19 | "code_signing",
20 | "server_auth",
21 | "client_auth",
22 | "digital_signature",
23 | "key_encipherment",
24 | ]
25 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/scripts/check-cross-cluster-sync.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | cross_cluster_sync() {
8 | ctx=$1
9 | POD_NAME=$(kubectl get pod --context=$ctx -l app=sleep -o jsonpath='{.items[0].metadata.name}' -n sample)
10 | istioctl --context $ctx proxy-config endpoint $POD_NAME -n sample | grep helloworld
11 | }
12 |
13 | for ctx in $CTX_CLUSTER1 $CTX_CLUSTER2
14 | do
15 | echo "\nCross cluster sync check for $ctx:"
16 | cross_cluster_sync $ctx
17 | done
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/scripts/check-lb-readiness.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | readiness() {
8 | CTX=$1
9 |
10 | EW_LB_NAME=$(kubectl get svc istio-eastwestgateway -n istio-ingress --context $CTX -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}')
11 |
12 | EW_LB_ARN=$(aws elbv2 describe-load-balancers | \
13 | jq -r --arg EW_LB_NAME "$EW_LB_NAME" \
14 | '.LoadBalancers[] | select(.DNSName == $EW_LB_NAME) | .LoadBalancerArn')
15 |
16 | TG_ARN=$(aws elbv2 describe-listeners --load-balancer-arn $EW_LB_ARN | jq -r '.Listeners[] | select(.Port == 15443) | .DefaultActions[0].TargetGroupArn')
17 |
18 | aws elbv2 describe-target-health --target-group-arn $TG_ARN | jq -r '.TargetHealthDescriptions[0]'
19 | }
20 |
21 | for ctx in $CTX_CLUSTER1 $CTX_CLUSTER2
22 | do
23 | echo "\nReadiness check for $ctx:"
24 | readiness $ctx
25 | done
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/scripts/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | terraform init
6 |
7 | terraform apply --auto-approve \
8 | -target=module.vpc_1 \
9 | -target=module.vpc_2 \
10 | -target=module.eks_1 \
11 | -target=module.eks_2 \
12 | -target=kubernetes_secret.cacerts_cluster1 \
13 | -target=kubernetes_secret.cacerts_cluster2 \
14 | -target=module.eks_1_addons.module.aws_load_balancer_controller \
15 | -target=module.eks_2_addons.module.aws_load_balancer_controller
16 |
17 | terraform apply --auto-approve \
18 | -target="module.eks_1_addons.helm_release.this[\"istiod\"]" \
19 | -target="module.eks_2_addons.helm_release.this[\"istiod\"]" \
20 |
21 | sleep 5
22 |
23 | terraform apply --auto-approve
24 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/scripts/destroy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | kubectl delete svc --all -n istio-ingress --context $CTX_CLUSTER1
8 | kubectl delete svc --all -n istio-ingress --context $CTX_CLUSTER2
9 |
10 | lbServicesExist() {
11 | o1=`kubectl get svc -n istio-ingress --context $CTX_CLUSTER1 -o json | jq '.items[] | select(.spec.type=="LoadBalancer") // ""' | jq -s length`
12 | o2=`kubectl get svc -n istio-ingress --context $CTX_CLUSTER2 -o json | jq '.items[] | select(.spec.type=="LoadBalancer") // ""' | jq -s length`
13 | if [[ $o1 -gt 0 || $o2 -gt 0 ]]; then
14 | echo "There are $o1 and $o2 LB services in $CTX_CLUSTER1 and $CTX_CLUSTER2 respectively"
15 | true
16 | else
17 | false
18 | fi
19 | }
20 |
21 | while lbServicesExist
22 | do
23 | echo "Waiting for 5 (more) seconds for the LB services to clear up ..."
24 | sleep 5
25 | done
26 |
27 | terraform destroy --auto-approve
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/scripts/set-cluster-contexts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | CLUSTER_1_NAME=${1:-eks-1}
4 | CLUSTER_2_NAME=${2:-eks-2}
5 |
6 | aws eks update-kubeconfig --region us-west-2 --name $CLUSTER_1_NAME
7 | aws eks update-kubeconfig --region us-west-2 --name $CLUSTER_2_NAME
8 |
9 | export CTX_CLUSTER1=`aws eks describe-cluster --name $CLUSTER_1_NAME | jq -r '.cluster.arn'`
10 | export CTX_CLUSTER2=`aws eks describe-cluster --name $CLUSTER_2_NAME | jq -r '.cluster.arn'`
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/multi-network/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.47"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.9"
12 | }
13 | kubernetes = {
14 | source = "hashicorp/kubernetes"
15 | version = ">= 2.20"
16 | }
17 | }
18 |
19 | # ## Used for end-to-end testing on project; update to suit your needs
20 | # backend "s3" {
21 | # bucket = "terraform-ssp-github-actions-state"
22 | # region = "us-west-2"
23 | # key = "e2e/istio/terraform.tfstate"
24 | # }
25 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: multicluster-gateway-n-apps
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/gateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: cross-network-gateway
5 | namespace: istio-ingress
6 | spec:
7 | selector:
8 | istio: eastwestgateway
9 | servers:
10 | - port:
11 | number: 15443
12 | name: tls
13 | protocol: TLS
14 | tls:
15 | mode: AUTO_PASSTHROUGH
16 | hosts:
17 | - "*.local"
18 |
19 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/helloworld-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: helloworld-{{ .Values.version }}
5 | labels:
6 | app: helloworld
7 | version: {{ .Values.version }}
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: helloworld
13 | version: {{ .Values.version }}
14 | template:
15 | metadata:
16 | labels:
17 | app: helloworld
18 | version: {{ .Values.version }}
19 | spec:
20 | containers:
21 | - name: helloworld
22 | image: docker.io/istio/examples-helloworld-{{ .Values.version }}
23 | resources:
24 | requests:
25 | cpu: "100m"
26 | imagePullPolicy: IfNotPresent #Always
27 | ports:
28 | - containerPort: 5000
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/helloworld-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: helloworld
5 | labels:
6 | app: helloworld
7 | service: helloworld
8 | spec:
9 | ports:
10 | - port: 5000
11 | name: http
12 | selector:
13 | app: helloworld
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/remote-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | annotations:
5 | networking.istio.io/cluster: {{ .Values.clusterName }}
6 | labels:
7 | istio/multiCluster: "true"
8 | name: istio-remote-secret-{{ .Values.clusterName }}
9 | namespace: istio-system
10 | stringData:
11 | {{ .Values.clusterName }}: |
12 | apiVersion: v1
13 | clusters:
14 | - cluster:
15 | certificate-authority-data: {{ .Values.certificateAuthorityData }}
16 | server: {{ .Values.server }}
17 | name: {{ .Values.clusterName }}
18 | contexts:
19 | - context:
20 | cluster: {{ .Values.clusterName }}
21 | user: {{ .Values.clusterName }}
22 | name: {{ .Values.clusterName }}
23 | current-context: {{ .Values.clusterName }}
24 | kind: Config
25 | preferences: {}
26 | users:
27 | - name: {{ .Values.clusterName }}
28 | user:
29 | token: {{ .Values.token }}
30 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/sleep-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: sleep
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: sleep
10 | template:
11 | metadata:
12 | labels:
13 | app: sleep
14 | spec:
15 | terminationGracePeriodSeconds: 0
16 | serviceAccountName: sleep
17 | containers:
18 | - name: sleep
19 | image: curlimages/curl
20 | command: ["/bin/sleep", "infinity"]
21 | imagePullPolicy: IfNotPresent
22 | volumeMounts:
23 | - mountPath: /etc/sleep/tls
24 | name: secret-volume
25 | volumes:
26 | - name: secret-volume
27 | secret:
28 | secretName: sleep-secret
29 | optional: true
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/sleep-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: sleep
5 | labels:
6 | app: sleep
7 | service: sleep
8 | spec:
9 | ports:
10 | - port: 80
11 | name: http
12 | selector:
13 | app: sleep
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/templates/sleep-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sleep
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/charts/multicluster-gateway-n-apps/values.yaml:
--------------------------------------------------------------------------------
1 | version: v1
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_availability_zones" "available" {}
2 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-west-2"
3 | azs = slice(data.aws_availability_zones.available.names, 0, 3)
4 |
5 | # VPC specific settings
6 | vpc_cidr = "10.0.0.0/16"
7 | vpc_IPv6 = local.eks_1_IPv6 || local.eks_2_IPv6
8 |
9 | # EKS specific settings
10 | eks_1_IPv6 = true
11 | eks_2_IPv6 = true
12 | eks_1_name = "eks-1"
13 | eks_2_name = "eks-2"
14 | eks_cluster_version = "1.32"
15 |
16 | # Istio specific settings
17 | meshID = "mesh1"
18 | networkName = "network1"
19 | clusterName1 = "cluster1"
20 | clusterName2 = "cluster2"
21 |
22 | istio_chart_url = "https://istio-release.storage.googleapis.com/charts"
23 | istio_chart_version = "1.24.3"
24 |
25 | tags = {
26 | GithubRepo = "github.com/aws_ia/terraform-aws-eks-blueprints"
27 | }
28 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/root_cert.tf:
--------------------------------------------------------------------------------
1 | resource "tls_private_key" "root_ca_key" {
2 | algorithm = "RSA"
3 | rsa_bits = 2048
4 | }
5 |
6 | resource "tls_self_signed_cert" "root_ca" {
7 | private_key_pem = tls_private_key.root_ca_key.private_key_pem
8 | is_ca_certificate = true
9 |
10 | subject {
11 | common_name = "multicluster.istio.io"
12 | }
13 |
14 | validity_period_hours = 87600
15 |
16 | allowed_uses = [
17 | "cert_signing",
18 | "crl_signing",
19 | "code_signing",
20 | "server_auth",
21 | "client_auth",
22 | "digital_signature",
23 | "key_encipherment",
24 | ]
25 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/scripts/check-cross-cluster-sync.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | cross_cluster_sync() {
8 | ctx=$1
9 | POD_NAME=$(kubectl get pod --context=$ctx -l app=sleep -o jsonpath='{.items[0].metadata.name}' -n sample)
10 | istioctl --context $ctx proxy-config endpoint $POD_NAME -n sample | grep helloworld
11 | }
12 |
13 | for ctx in $CTX_CLUSTER1 $CTX_CLUSTER2
14 | do
15 | echo "\nCross cluster sync check for $ctx:"
16 | cross_cluster_sync $ctx
17 | done
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/scripts/check-lb-readiness.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | readiness() {
8 | CTX=$1
9 |
10 | EW_LB_NAME=$(kubectl get svc istio-eastwestgateway -n istio-ingress --context $CTX -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}')
11 |
12 | EW_LB_ARN=$(aws elbv2 describe-load-balancers | \
13 | jq -r --arg EW_LB_NAME "$EW_LB_NAME" \
14 | '.LoadBalancers[] | select(.DNSName == $EW_LB_NAME) | .LoadBalancerArn')
15 |
16 | TG_ARN=$(aws elbv2 describe-listeners --load-balancer-arn $EW_LB_ARN | jq -r '.Listeners[] | select(.Port == 15443) | .DefaultActions[0].TargetGroupArn')
17 |
18 | aws elbv2 describe-target-health --target-group-arn $TG_ARN | jq -r '.TargetHealthDescriptions[0]'
19 | }
20 |
21 | for ctx in $CTX_CLUSTER1 $CTX_CLUSTER2
22 | do
23 | echo "\nReadiness check for $ctx:"
24 | readiness $ctx
25 | done
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/scripts/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | terraform init
6 |
7 | terraform apply --auto-approve \
8 | -target=module.vpc \
9 | -target=module.eks_1 \
10 | -target=module.eks_2 \
11 | -target=kubernetes_secret.cacerts_cluster1 \
12 | -target=kubernetes_secret.cacerts_cluster2 \
13 | -target=module.eks_1_addons.module.aws_load_balancer_controller \
14 | -target=module.eks_2_addons.module.aws_load_balancer_controller
15 |
16 | terraform apply --auto-approve \
17 | -target="module.eks_1_addons.helm_release.this[\"istiod\"]" \
18 | -target="module.eks_2_addons.helm_release.this[\"istiod\"]" \
19 |
20 | terraform apply --auto-approve
21 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/scripts/destroy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | source `dirname "$(realpath $0)"`/set-cluster-contexts.sh $1 $2
6 |
7 | kubectl delete svc --all -n istio-ingress --context $CTX_CLUSTER1
8 | kubectl delete svc --all -n istio-ingress --context $CTX_CLUSTER2
9 |
10 | lbServicesExist() {
11 | o1=`kubectl get svc -n istio-ingress --context $CTX_CLUSTER1 -o json | jq '.items[] | select(.spec.type=="LoadBalancer") // ""' | jq -s length`
12 | o2=`kubectl get svc -n istio-ingress --context $CTX_CLUSTER2 -o json | jq '.items[] | select(.spec.type=="LoadBalancer") // ""' | jq -s length`
13 | if [[ $o1 -gt 0 || $o2 -gt 0 ]]; then
14 | echo "There are $o1 and $o2 LB services in $CTX_CLUSTER1 and $CTX_CLUSTER2 respectively"
15 | true
16 | else
17 | false
18 | fi
19 | }
20 |
21 | while lbServicesExist
22 | do
23 | echo "Waiting for 5 (more) seconds for the LB services to clear up ..."
24 | sleep 5
25 | done
26 |
27 | terraform destroy --auto-approve
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/scripts/set-cluster-contexts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | CLUSTER_1_NAME=${1:-eks-1}
4 | CLUSTER_2_NAME=${2:-eks-2}
5 |
6 | aws eks update-kubeconfig --region us-west-2 --name $CLUSTER_1_NAME
7 | aws eks update-kubeconfig --region us-west-2 --name $CLUSTER_2_NAME
8 |
9 | export CTX_CLUSTER1=`aws eks describe-cluster --name $CLUSTER_1_NAME | jq -r '.cluster.arn'`
10 | export CTX_CLUSTER2=`aws eks describe-cluster --name $CLUSTER_2_NAME | jq -r '.cluster.arn'`
11 |
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.47"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.9"
12 | }
13 | kubernetes = {
14 | source = "hashicorp/kubernetes"
15 | version = ">= 2.20"
16 | }
17 | }
18 |
19 | # ## Used for end-to-end testing on project; update to suit your needs
20 | # backend "s3" {
21 | # bucket = "terraform-ssp-github-actions-state"
22 | # region = "us-west-2"
23 | # key = "e2e/istio/terraform.tfstate"
24 | # }
25 | }
--------------------------------------------------------------------------------
/patterns/multi-cluster-multi-primary/single-network/vpc.tf:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # VPC
3 | ################################################################################
4 |
5 | module "vpc" {
6 | source = "terraform-aws-modules/vpc/aws"
7 | version = "~> 5.19.0"
8 |
9 | name = "shared-vpc"
10 | cidr = local.vpc_cidr
11 |
12 | azs = local.azs
13 | private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
14 | public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
15 |
16 | enable_nat_gateway = true
17 | single_nat_gateway = true
18 |
19 | # IPv6 settings
20 | enable_ipv6 = local.vpc_IPv6
21 | create_egress_only_igw = local.vpc_IPv6
22 |
23 | public_subnet_ipv6_prefixes = local.vpc_IPv6 == true ? [0, 1, 2] : []
24 | public_subnet_assign_ipv6_address_on_creation = local.vpc_IPv6
25 | private_subnet_ipv6_prefixes = local.vpc_IPv6 == true ? [3, 4, 5] : []
26 | private_subnet_assign_ipv6_address_on_creation = local.vpc_IPv6
27 |
28 | public_subnet_tags = {
29 | "kubernetes.io/role/elb" = 1
30 | }
31 |
32 | private_subnet_tags = {
33 | "kubernetes.io/role/internal-elb" = 1
34 | }
35 |
36 | tags = merge({
37 | Name = "shared-vpc"
38 | }, local.tags)
39 | }
--------------------------------------------------------------------------------
/terraform-blueprint/README.md:
--------------------------------------------------------------------------------
1 | # Amazon EKS Deployment Modes
2 |
3 | Starting with Istio 1.20, Istio now offers two deployment models: traditional (called `sidecar`) and resource-savvy (called `ambient`).
4 |
5 | You can choose your deployment mode by selecting one of the directories in the tree below. The `ambient` directory contains artifacts for deploying Istio in `Ambient` mode, while the `sidecar` directory contains artifacts for the traditional deployment mode.
6 | # Amazon EKS Deployment Modes with Istio
7 |
8 | As of Istio 1.20, deploying Istio on Amazon EKS introduces two distinct deployment models: the traditional approach, referred to as `sidecar`, and the newly introduced resource-optimized model, known as `ambient`.
9 |
10 | ## Understanding Deployment Modes
11 |
12 | When deploying Istio on Amazon EKS, it's crucial to select the appropriate deployment mode that aligns with your application's requirements and operational preferences. Istio provides flexibility through these two deployment models:
13 |
14 | ### Traditional Deployment (`sidecar`)
15 |
16 | The `sidecar` deployment mode reflects the conventional approach to Istio deployment. In this mode, each workload container is accompanied by an Istio sidecar container, facilitating the interception and management of traffic within the service mesh.
17 |
18 | ### Resource-Savvy Deployment (`ambient`)
19 |
20 | In contrast, the `ambient` deployment mode, introduced recently, offers a resource-optimized strategy tailored for efficient utilization within Amazon EKS environments. This mode prioritizes resource efficiency while maintaining the functionalities of Istio's service mesh.
21 |
22 | ## Selecting Your Deployment Mode
23 |
24 | To select the appropriate deployment mode for your Amazon EKS environment, navigate through the provided directory structure:
25 |
26 | - **`sidecar` Directory:** Contains artifacts and configurations specifically tailored for the traditional `sidecar` deployment mode. If you prefer the classic Istio deployment approach, this directory is your destination.
27 |
28 | - **`ambient` Directory:** Hosts artifacts and configurations optimized for the `ambient` deployment mode, designed to maximize resource efficiency within Amazon EKS clusters. If you aim for resource optimization while leveraging Istio's capabilities, explore the contents of this directory.
29 |
30 | ## Getting Started
31 |
32 | To get started with deploying Istio on Amazon EKS using your preferred deployment mode, refer to the respective directories' contents. Each directory contains comprehensive instructions and configuration files to streamline the deployment process.
33 |
--------------------------------------------------------------------------------
/terraform-blueprint/ambient/outputs.tf:
--------------------------------------------------------------------------------
1 | output "configure_kubectl" {
2 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
3 | value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform-blueprint/ambient/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/terraform-blueprint/ambient/variables.tf
--------------------------------------------------------------------------------
/terraform-blueprint/ambient/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.34"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.9"
12 | }
13 | kubernetes = {
14 | source = "hashicorp/kubernetes"
15 | version = ">= 2.20"
16 | }
17 | }
18 |
19 | # ## Used for end-to-end testing on project; update to suit your needs
20 | # backend "s3" {
21 | # bucket = "terraform-ssp-github-actions-state"
22 | # region = "us-west-2"
23 | # key = "e2e/istio/terraform.tfstate"
24 | # }
25 | }
26 |
--------------------------------------------------------------------------------
/terraform-blueprint/sidecar/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | provider "kubernetes" {
6 | host = module.eks.cluster_endpoint
7 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
8 |
9 | exec {
10 | api_version = "client.authentication.k8s.io/v1beta1"
11 | command = "aws"
12 | # This requires the awscli to be installed locally where Terraform is executed
13 | args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
14 | }
15 | }
16 |
17 | provider "helm" {
18 | kubernetes {
19 | host = module.eks.cluster_endpoint
20 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
21 |
22 | exec {
23 | api_version = "client.authentication.k8s.io/v1beta1"
24 | command = "aws"
25 | # This requires the awscli to be installed locally where Terraform is executed
26 | args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
27 | }
28 | }
29 | }
30 |
31 | data "aws_availability_zones" "available" {}
32 |
33 | locals {
34 | name = basename(path.cwd)
35 | region = "us-west-2"
36 |
37 | vpc_cidr = "10.0.0.0/16"
38 | azs = slice(data.aws_availability_zones.available.names, 0, 3)
39 |
40 | istio_chart_url = "https://istio-release.storage.googleapis.com/charts"
41 | istio_chart_version = "1.22.0"
42 |
43 | tags = {
44 | Blueprint = local.name
45 | GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
46 | }
47 | }
48 |
49 | ################################################################################
50 | # Cluster
51 | ################################################################################
52 |
53 | module "eks" {
54 | source = "terraform-aws-modules/eks/aws"
55 | version = "~> 20.11"
56 |
57 | cluster_name = local.name
58 | cluster_version = "1.30"
59 | cluster_endpoint_public_access = true
60 |
61 | # Give the Terraform identity admin access to the cluster
62 | # which will allow resources to be deployed into the cluster
63 | enable_cluster_creator_admin_permissions = true
64 |
65 | cluster_addons = {
66 | coredns = {}
67 | kube-proxy = {}
68 | vpc-cni = {}
69 | }
70 |
71 | vpc_id = module.vpc.vpc_id
72 | subnet_ids = module.vpc.private_subnets
73 |
74 | eks_managed_node_groups = {
75 | initial = {
76 | instance_types = ["m5.large"]
77 |
78 | min_size = 1
79 | max_size = 5
80 | desired_size = 2
81 | }
82 | }
83 |
84 | # EKS K8s API cluster needs to be able to talk with the EKS worker nodes with port 15017/TCP and 15012/TCP which is used by Istio
85 | # Istio in order to create sidecar needs to be able to communicate with webhook and for that network passage to EKS is needed.
86 | node_security_group_additional_rules = {
87 | ingress_15017 = {
88 | description = "Cluster API - Istio Webhook namespace.sidecar-injector.istio.io"
89 | protocol = "TCP"
90 | from_port = 15017
91 | to_port = 15017
92 | type = "ingress"
93 | source_cluster_security_group = true
94 | }
95 | ingress_15012 = {
96 | description = "Cluster API to nodes ports/protocols"
97 | protocol = "TCP"
98 | from_port = 15012
99 | to_port = 15012
100 | type = "ingress"
101 | source_cluster_security_group = true
102 | }
103 | }
104 |
105 | tags = local.tags
106 | }
107 |
108 | ################################################################################
109 | # EKS Blueprints Addons
110 | ################################################################################
111 |
112 | resource "kubernetes_namespace_v1" "istio_system" {
113 | metadata {
114 | name = "istio-system"
115 | }
116 | }
117 |
118 | module "eks_blueprints_addons" {
119 | source = "aws-ia/eks-blueprints-addons/aws"
120 | version = "~> 1.16"
121 |
122 | cluster_name = module.eks.cluster_name
123 | cluster_endpoint = module.eks.cluster_endpoint
124 | cluster_version = module.eks.cluster_version
125 | oidc_provider_arn = module.eks.oidc_provider_arn
126 |
127 | # This is required to expose Istio Ingress Gateway
128 | enable_aws_load_balancer_controller = true
129 |
130 | helm_releases = {
131 | istio-base = {
132 | chart = "base"
133 | chart_version = local.istio_chart_version
134 | repository = local.istio_chart_url
135 | name = "istio-base"
136 | namespace = kubernetes_namespace_v1.istio_system.metadata[0].name
137 | }
138 |
139 | istiod = {
140 | chart = "istiod"
141 | chart_version = local.istio_chart_version
142 | repository = local.istio_chart_url
143 | name = "istiod"
144 | namespace = kubernetes_namespace_v1.istio_system.metadata[0].name
145 |
146 | set = [
147 | {
148 | name = "meshConfig.accessLogFile"
149 | value = "/dev/stdout"
150 | }
151 | ]
152 | }
153 |
154 | istio-ingress = {
155 | chart = "gateway"
156 | chart_version = local.istio_chart_version
157 | repository = local.istio_chart_url
158 | name = "istio-ingress"
159 | namespace = "istio-ingress" # per https://github.com/istio/istio/blob/master/manifests/charts/gateways/istio-ingress/values.yaml#L2
160 | create_namespace = true
161 |
162 | values = [
163 | yamlencode(
164 | {
165 | labels = {
166 | istio = "ingressgateway"
167 | }
168 | service = {
169 | annotations = {
170 | "service.beta.kubernetes.io/aws-load-balancer-type" = "external"
171 | "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" = "ip"
172 | "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internet-facing"
173 | "service.beta.kubernetes.io/aws-load-balancer-attributes" = "load_balancing.cross_zone.enabled=true"
174 | }
175 | }
176 | }
177 | )
178 | ]
179 | }
180 | }
181 |
182 | tags = local.tags
183 | }
184 |
185 | ################################################################################
186 | # Supporting Resources
187 | ################################################################################
188 |
189 | module "vpc" {
190 | source = "terraform-aws-modules/vpc/aws"
191 | version = "~> 5.0"
192 |
193 | name = local.name
194 | cidr = local.vpc_cidr
195 |
196 | azs = local.azs
197 | private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
198 | public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
199 |
200 | enable_nat_gateway = true
201 | single_nat_gateway = true
202 |
203 | public_subnet_tags = {
204 | "kubernetes.io/role/elb" = 1
205 | }
206 |
207 | private_subnet_tags = {
208 | "kubernetes.io/role/internal-elb" = 1
209 | }
210 |
211 | tags = local.tags
212 | }
213 |
--------------------------------------------------------------------------------
/terraform-blueprint/sidecar/outputs.tf:
--------------------------------------------------------------------------------
1 | output "configure_kubectl" {
2 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
3 | value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform-blueprint/sidecar/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/istio-on-eks/32c93cc30967271a4b47cb7c11b58dd9892c4f66/terraform-blueprint/sidecar/variables.tf
--------------------------------------------------------------------------------
/terraform-blueprint/sidecar/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.34"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.9"
12 | }
13 | kubernetes = {
14 | source = "hashicorp/kubernetes"
15 | version = ">= 2.20"
16 | }
17 | }
18 |
19 | # ## Used for end-to-end testing on project; update to suit your needs
20 | # backend "s3" {
21 | # bucket = "terraform-ssp-github-actions-state"
22 | # region = "us-west-2"
23 | # key = "e2e/istio/terraform.tfstate"
24 | # }
25 | }
26 |
--------------------------------------------------------------------------------