├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── amplify.yaml ├── docs ├── authoring.md ├── maintenance.md ├── steering.md └── style-guide.md ├── package-lock.json ├── scripts └── setup-ws-instance-python.sh └── website ├── .gitignore ├── babel.config.js ├── docs ├── java │ ├── containers │ │ ├── build-image.md │ │ ├── images │ │ │ ├── ecr-console.png │ │ │ ├── ecr-with-image.png │ │ │ └── test-success.png │ │ ├── index.md │ │ ├── multi-arch-linux.png │ │ ├── multi-arch-windows.png │ │ ├── multi-arch.png │ │ ├── multi-stage.md │ │ └── upload-ecr.md │ ├── eks │ │ ├── deploy-app.md │ │ ├── eks-create.md │ │ ├── eks-setup.md │ │ ├── images │ │ │ ├── eks-deploy.png │ │ │ ├── eks-finished.png │ │ │ ├── eks-initial-log.png │ │ │ ├── eks-welcome.png │ │ │ ├── unicornstore-architecture-eks.png │ │ │ └── what-is-eks.png │ │ └── index.md │ ├── index.md │ ├── introduction │ │ ├── images │ │ │ ├── architecture-traditional.png │ │ │ ├── cloud9-console.png │ │ │ ├── cloud9-list.png │ │ │ ├── cloud9-new-terminal.png │ │ │ ├── cloud9-terminal.png │ │ │ ├── cloudformation.png │ │ │ ├── java-confirm.png │ │ │ ├── java-on-amazon-eks.png │ │ │ ├── logging-in.png │ │ │ └── unicornstore-app-flow.png │ │ ├── index.md │ │ ├── unicornstore-architecture.md │ │ └── workshop-setup.md │ └── optimizations │ │ ├── baseline.md │ │ ├── graal-vm.md │ │ ├── images │ │ ├── container-layers.png │ │ ├── ecr-with-image.png │ │ ├── eks-initial-log.png │ │ ├── graalvm-ecr.png │ │ ├── graalvm-eks.png │ │ ├── graalvm-result.png │ │ ├── java-on-aws-id.png │ │ ├── jib-ecr.png │ │ ├── jib-eks.png │ │ ├── jib-result.png │ │ ├── optimized-jvm-ecr.png │ │ ├── optimized-jvm-eks.png │ │ └── optimized-jvm-result.png │ │ ├── index.md │ │ ├── jib.md │ │ ├── optimized-jvm.md │ │ ├── results.md │ │ └── summary.md └── python │ ├── containers │ ├── about-multiservices.md │ ├── build-image.md │ ├── images │ │ ├── app-create-book.png │ │ ├── app-home.png │ │ ├── docker-extension-open-in-browser-v2.png │ │ ├── docker-extension-open-in-browser.png │ │ ├── multi-arch-linux.png │ │ ├── multi-arch-windows.png │ │ └── multi-arch.png │ ├── index.md │ ├── integration-ecr.md │ ├── multiarchitecture-image.md │ └── upload-ecr.md │ ├── eks │ ├── Cleanup.md │ ├── about-deploy.md │ ├── access-app.md │ ├── aws-otel-instrumentation.md │ ├── aws-rds-postgresql-lab.md │ ├── aws-secrets-manager-lab.md │ ├── create-cluster.md │ ├── deploy-app.md │ ├── deploy-secrets.md │ ├── images │ │ ├── FastAPI.png │ │ ├── Local-tracing.png │ │ ├── Metadata.png │ │ ├── Segment-Details.png │ │ ├── app-create-book.png │ │ ├── app-home.png │ │ ├── aws-rds-books.png │ │ ├── k8-app-trace.png │ │ ├── kubernetes-resources-1.jpg │ │ ├── kubernetes-resources-2.jpg │ │ ├── kubernetes-resources-3.jpg │ │ ├── kubernetes-resources-4.jpg │ │ ├── kubernetes-resources-5.jpg │ │ └── raw-trace-snippet.png │ ├── index.md │ ├── manage-contexts.md │ ├── setup-loadbalancing.md │ ├── setup-storage.md │ └── view-kubernetes-resources.md │ ├── index.md │ ├── introduction │ ├── environment-setup.md │ ├── images │ │ ├── visual-studio-terminal.png │ │ └── workshop-studio-event-dashboard.png │ ├── index.md │ └── refactoring.md │ └── kubernetes │ ├── about-multiservice.md │ ├── access-app.md │ ├── deploy-app.md │ ├── deploy-configmap.md │ ├── deploy-secrets.md │ ├── deployments-sizing.md │ ├── images │ ├── app-create-book.png │ ├── docker-extension-open-in-browser.png │ ├── kubernetes-dashboard-1.jpg │ ├── kubernetes-dashboard-2.jpg │ ├── kubernetes-dashboard-3.jpg │ ├── kubernetes-dashboard-4.jpg │ ├── kubernetes-dashboard-5.jpg │ ├── kubernetes-dashboard-6.jpg │ ├── kubernetes-dashboard-7.jpg │ ├── kubernetes-dashboard-8.jpg │ ├── kubernetes-dashboard-9.jpg │ └── requests-flow.png │ ├── index.md │ ├── kubernetes-dashboard.md │ ├── minikube-create.md │ └── pods-sizing.md ├── docusaurus.config.js ├── package-lock.json ├── package.json ├── sidebars.js ├── src ├── css │ └── custom.css ├── includes │ ├── get-ecr-uri.md │ └── get-env-vars.md ├── pages │ ├── index.js │ └── index.module.css ├── scripts │ └── FeedbackLink.jsx └── theme │ └── DocItem │ └── Footer │ ├── index.js │ └── styles.module.css ├── static ├── .nojekyll └── img │ ├── docusaurus.png │ ├── favicon.ico │ ├── favicon.png │ ├── logo.svg │ ├── undraw_docusaurus_mountain.svg │ ├── undraw_docusaurus_react.svg │ └── undraw_docusaurus_tree.svg └── yarn.lock /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # What problem does it solve? 2 | 3 | Summarize the changes and the motivation behind your changes. 4 | 5 | - 6 | 7 | ## Type of change 8 | 9 | - [ ] Bug fix or improvement (non-breaking change which fixes an issue) 10 | - [ ] New lab exercise (non-breaking change which adds functionality) 11 | - [ ] Breaking change (fix or feature that would cause existing workshop materials to not work as expected) 12 | 13 | # How Can Other Contributors Test Your Changes? 14 | 15 | Describe the tests that you ran to verify your changes so that other contributors can reproduce your steps. 16 | 17 | - [ ] N/A 18 | - [ ] Describe below: 19 | 20 | 21 | # Checklist: 22 | 23 | - [ ] My contributions follow the [style guidelines](../docs/style-guide.md) of this project 24 | - [ ] I have performed a self-review of my changes 25 | - [ ] I have added tests that prove my changes are effective 26 | 27 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## EKS Developers Workshop 2 | 3 | The EKS Developers Workshop is a technical workshop designed to equip developers with the skills needed to transition into the Kubernetes and Amazon Elastic Kubernetes Service (EKS) ecosystems. It is ideal for those with a foundational understanding of container technologies and a desire to apply this knowledge to Kubernetes-based application deployments, especially within the AWS ecosystem. 4 | 5 | ## Quickstart 6 | Contributors can view our [Authoring Guide for Contributors](./docs/authoring.md) guide to set up and run documentation locally. 7 | ## Security 8 | 9 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 10 | 11 | ## License 12 | 13 | This project is licensed under the Apache-2.0 License. 14 | 15 | -------------------------------------------------------------------------------- /amplify.yaml: -------------------------------------------------------------------------------- 1 | version: 1 2 | applications: 3 | - frontend: 4 | phases: 5 | preBuild: 6 | commands: 7 | - npm install 8 | build: 9 | commands: 10 | - npm run build 11 | artifacts: 12 | baseDirectory: build 13 | files: 14 | - '**/*' 15 | cache: 16 | paths: 17 | - node_modules/**/* 18 | appRoot: website 19 | -------------------------------------------------------------------------------- /docs/authoring.md: -------------------------------------------------------------------------------- 1 | # Authoring Guide for Contributors 2 | This guide provides instructions for setting up and running [Docusaurus](https://docusaurus.io/) for contributors to the EKS Developers Workshop documentation. It includes steps to create a fork, manage branches, and best practices for contributing. 3 | 4 | ### Prerequisites 5 | - [Node.js](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) (version 16.x or higher). To check your version, run: `node --version`. 6 | - [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) (version 8.x or higher). To check your version, run: `npm --version`. 7 | 8 | ### Setup 9 | Before you can start contributing to the documentation, you need to set up Docusaurus locally. 10 | 1. [Fork](https://help.github.com/articles/fork-a-repo/) the **eks-workshop-developers** repository. 11 | 2. Clone the forked repository: 12 | ```bash 13 | git clone https://github.com/your-gh-user-name/eks-workshop-developers.git 14 | cd eks-workshop-developers/website 15 | ``` 16 | 17 | 3. Run the following command to install the required dependencies. 18 | ```bash 19 | npm install 20 | ``` 21 | 22 | 4. Generate the static files for the documentation site: 23 | ```bash 24 | npm run build 25 | ``` 26 | This command will create a `/build` directory. 27 | 28 | 5. To view the documentation site locally, run: 29 | ```bash 30 | npm run serve 31 | ``` 32 | This will start a local development server. You will be redirected to workshop documentation in your browser. If you run into any issues building the documentation, see our troubleshooting guidance in the [Docusaurus Maintenance Guide](maintenance.md). 33 | 34 | ## Contributing 35 | When you're ready to contribute to the documentation, follow these steps: 36 | 37 | ### Major Updates to Lab Exercises 38 | If you're planning significant updates such as modifying the contents of an existing lab exercise (like Dockerfile, docker-compose.yml, Kubernetes manifests, etc.) or creating new lab exercises, please adhere to our branching strategy: 39 | 40 | - We maintain three primary branches in the **python-fastapi-demo-docker** repository: [main](https://github.com/aws-samples/python-fastapi-demo-docker/tree/main), [aws-opentelemetry](https://github.com/aws-samples/python-fastapi-demo-docker/tree/aws-opentelemetry), and [aws-secrets-manager-lab](https://github.com/aws-samples/python-fastapi-demo-docker/tree/aws-secrets-manager-lab). 41 | - Depending on the nature of the update, it might be necessary to apply your changes to multiple branches. 42 | - Always check the relevance of your update to each branch and coordinate with project maintainers for guidance on multi-branch updates. 43 | 44 | ### Steps 45 | #### 1. Create a Feature Branch 46 | From your forked repository, create a new branch for your feature or fix: 47 | ```bash 48 | git checkout -b feature/your-feature-name 49 | ``` 50 | Replace `your-feature-name` with a descriptive name for your feature. 51 | 52 | #### 2. Make Your Changes 53 | - Make changes to the content or documentation as needed. 54 | - Add or update markdown files within the docs directory. 55 | - Follow [Markdown Syntax](https://www.markdownguide.org/basic-syntax/) and [Docusaurus Syntax](https://docusaurus.io/docs) to format your documentation. 56 | 57 | 58 | #### 3. Test Your Changes 59 | - Ensure your changes are working as expected. 60 | - Run the development server to preview the changes. 61 | 62 | #### 4. Verify Style Guide 63 | - Make sure your changes adhere to the principles in our minimal [Documentation Style Guide](style-guide.md). 64 | 65 | #### 5. Commit Your Changes 66 | Stage your changes and commit them with a meaningful message: 67 | ```bash 68 | git add . 69 | git commit -m "Add a meaningful description of your changes" 70 | ``` 71 | 72 | #### 6. Push to GitHub 73 | Push your feature branch to your forked repository: 74 | ```bash 75 | git push origin feature/your-feature-name 76 | ``` 77 | 78 | #### 7. Create a Pull Request 79 | - Go to your forked repository on GitHub and click "Pull request" to open a new pull request against our repository. 80 | 81 | ## Best Practices 82 | - **Keep Documentation Clear**: Write clear, concise, and well-organized documentation. 83 | - **Branch Naming**: Use descriptive branch names like feature/add-install-guide or fix/typo-in-docs. 84 | - **Commit Messages**: Write meaningful commit messages that describe what the commit accomplishes. 85 | - **Pull Request Descriptions**: In your pull request, include a detailed description of your changes and link to any relevant issues. 86 | - **Stay Updated**: Regularly pull the latest changes from the upstream repository to keep your fork up-to-date. 87 | - **Respect Guidelines**: Adhere to any contribution guidelines provided by the project maintainers. 88 | ## Getting Help 89 | If you have any questions or need assistance, don't hesitate to ask for help by opening an issue on the GitHub repository. The community and maintainers are here to help! 90 | 91 | Thank you for contributing to the EKS Developers Workshop documentation! 92 | -------------------------------------------------------------------------------- /docs/maintenance.md: -------------------------------------------------------------------------------- 1 | # Docusaurus Maintenance Guide 2 | This guide outlines the procedures for updating and maintaining the package dependencies for the EKS Developers Workshop website, which is built with [Docusaurus](https://docusaurus.io/). 3 | 4 | ## Updating Packages 5 | To keep the project secure and up-to-date, follow these best practices: 6 | 7 | ### Regular Updates 8 | - Use `npm outdated` to check for available updates for your packages. 9 | - Update the packages in `package.json` using `npm update`. 10 | - For major updates or specific versions, modify `package.json` directly. 11 | 12 | 13 | ### Handling Vulnerabilities 14 | When notified of vulnerabilities: 15 | 1. Run `npm audit` to list vulnerabilities and assess their impact. 16 | 2. Use `npm audit fix` to automatically resolve compatible updates to vulnerable packages. 17 | 3. If automatic fixes fail, consider manual resolution or updating individual packages directly. 18 | 19 | 20 | ### Troubleshooting 21 | If there are unresolved issues after running `npm audit fix`, you may need to manually clean up: 22 | ```bash 23 | rm -rf node_modules package-lock.json 24 | npm install 25 | ``` 26 | This removes the `node_modules` directory and the `package-lock.json` file, forcing a clean slate for your dependencies. 27 | 28 | Docusaurus also provides a clear command to remove caches and build artifacts: 29 | ```bash 30 | npm run clear 31 | ``` 32 | Output from the clear command should indicate the successful removal of cache and build directories: 33 | ```bash 34 | [SUCCESS] Removed the Webpack persistent cache folder at "/path/to/your/project/node_modules/.cache". 35 | [SUCCESS] Removed the build output folder at "/path/to/your/project/build". 36 | [SUCCESS] Removed the generated folder at "/path/to/your/project/.docusaurus". 37 | ``` 38 | Sometimes after upgrading package versions in `package.json`, you’ll get an error. Run the following command to clean the cache: 39 | ```bash 40 | npm cache clean --force 41 | ``` 42 | 43 | Sometimes you’ll get a dependency error for a package that isn’t listed in `package.json`. This is due to a peer dependency conflict, which is a transitive dependency (a dependency of a dependency) in our project. You can identify which package is requiring the dependency by running the following command, replacing `` with the package name: 44 | ```bash 45 | npm ls 46 | ``` 47 | 48 | For example: 49 | ```bash 50 | npm ls @microlink/react-json-view 51 | ``` 52 | Example: 53 | ```bash 54 | website@0.0.0 /Users/leahtuck/docs/eks-workshop-developers/website 55 | └─┬ @docusaurus/preset-classic@3.0.0 56 | └─┬ @docusaurus/plugin-debug@3.0.0 57 | └── @microlink/react-json-view@1.23.0 58 | ``` 59 | 60 | ## Best Practices 61 | - **Monitor Repositories**: Keep an eye on the [Docusaurus GitHub repository](https://github.com/facebook/docusaurus) for updates, especially regarding security patches and vulnerabilities. 62 | - **Test Before Deployment**: Always test the website locally after updating dependencies to ensure that everything functions correctly. 63 | - **Use Version Control**: Commit changes to package.json and package-lock.json after updates and document the reasons for significant updates or version pinning. 64 | - **Stay Informed**: Subscribe to security bulletins and maintain a proactive stance on dependency management. 65 | -------------------------------------------------------------------------------- /docs/style-guide.md: -------------------------------------------------------------------------------- 1 | # Documentation Style Guide for Contributors 2 | This guide provides the minimum writing style guidelines for contributors of the EKS Developers Workshop documentation. 3 | 4 | ## Labs 5 | A lab exercise must have the following components. 6 | 7 | ``` 8 | # 9 | ## Objective 10 | What will the user accomplish in the lab? 11 | 12 | ## Prerequisites 13 | What must the user do before getting started with the lab? 14 | 15 | ## <#>.
16 | 17 | ## Conclusion 18 | What did users learn in the lab? 19 | ``` 20 | 21 | ## Syntax 22 | ### H1 Headings for Page Titles 23 | Always use <Verb>-ing for H1 page title headings. 24 | - **Do**: Monitoring Kubernetes Resources Using the Dashboard 25 | 26 | ### H2 Headings for Sections 27 | Always use a number <#> + <Verb>-ing for H2 section headings. 28 | - **Do**: Logging into Amazon ECR 29 | 30 | ### H3 Headings for Sub-sections 31 | Use <Verb> for H3 section headings as needed to distinguish large sections. 32 | - **Do**: Create the Service Account 33 | 34 | ### Kubernetes API Object Names 35 | Always use UpperCamelCase when referring to an API object. 36 | - **Do**: Generate the Kubernetes ConfigMap 37 | 38 | ### Sample Output 39 | Always show users what the sample output should be after running a command or deploying resources. 40 | - **Do**: The expected output should look like this: 41 | 42 | ``` 43 | sample output 44 | ``` 45 | 46 | ### File Names 47 | Always use code style for file names. 48 | - **Do**: Open the `.env` file. 49 | 50 | ### Commands 51 | Always use a colon for descriptions that precede a command. And never include the command prompt as part of the command. 52 | - **Do**: Push the tagged image to your Amazon ECR repository: 53 | 54 | ``` 55 | command 56 | ``` 57 | 58 | ### External Links 59 | Always provide the full page title and sufficient context of any hyperlinks that direct users outside the workshop. 60 | - **Do**: To learn more, see [Amazon EKS security group requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in EKS documentation. 61 | 62 | 63 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "eks-workshop-developers", 3 | "lockfileVersion": 3, 4 | "requires": true, 5 | "packages": {} 6 | } 7 | -------------------------------------------------------------------------------- /scripts/setup-ws-instance-python.sh: -------------------------------------------------------------------------------- 1 | #bin/sh 2 | 3 | ## Script to set up VScode terminal in Workshop Studio: install CLIs, clone fastapi, set env vars, etc 4 | 5 | ## Go to tmp directory 6 | cd /tmp 7 | 8 | ## Update OS 9 | sudo yum update 10 | 11 | ## Install additional dependencies 12 | sudo yum install -y jq 13 | 14 | wget https://github.com/mikefarah/yq/releases/download/v4.33.3/yq_linux_amd64.tar.gz -O - |\ 15 | tar xz && sudo mv yq_linux_amd64 /usr/bin/yq 16 | yq --version 17 | 18 | ## Install docker buildx 19 | export BUILDX_VERSION=$(curl --silent "https://api.github.com/repos/docker/buildx/releases/latest" |jq -r .tag_name) 20 | curl -JLO "https://github.com/docker/buildx/releases/download/$BUILDX_VERSION/buildx-$BUILDX_VERSION.linux-amd64" 21 | mkdir -p ~/.docker/cli-plugins 22 | mv "buildx-$BUILDX_VERSION.linux-amd64" ~/.docker/cli-plugins/docker-buildx 23 | chmod +x ~/.docker/cli-plugins/docker-buildx 24 | docker run --privileged --rm tonistiigi/binfmt --install all 25 | docker buildx create --use --driver=docker-container 26 | 27 | ## Install docker compose 28 | DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker} 29 | mkdir -p $DOCKER_CONFIG/cli-plugins 30 | curl -SL https://github.com/docker/compose/releases/download/v2.20.3/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose 31 | chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose 32 | echo 'alias docker-compose="docker compose"' >> ~/.bashrc 33 | docker compose version 34 | 35 | ## Install eksctl 36 | # for ARM systems, set ARCH to: `arm64`, `armv6` or `armv7` 37 | ARCH=amd64 38 | PLATFORM=$(uname -s)_$ARCH 39 | curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$PLATFORM.tar.gz" 40 | # (Optional) Verify checksum 41 | curl -sL "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_checksums.txt" | grep $PLATFORM | sha256sum --check 42 | tar -xzf eksctl_$PLATFORM.tar.gz -C /tmp && rm eksctl_$PLATFORM.tar.gz 43 | sudo mv /tmp/eksctl /usr/local/bin 44 | eksctl version 45 | 46 | ## Install kubectl 47 | # https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html 48 | curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.0/2024-05-12/bin/linux/amd64/kubectl 49 | chmod +x ./kubectl 50 | mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$PATH:$HOME/bin 51 | echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc 52 | kubectl version --client 53 | 54 | ## Install Helm 55 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 56 | chmod 700 get_helm.sh 57 | ./get_helm.sh 58 | helm version 59 | 60 | ## Install minikube 61 | cd /home/ec2-user/ 62 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 63 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 64 | minikube version 65 | rm minikube-linux-amd64 66 | 67 | ## Install VSCode extensions 68 | code-server --install-extension amazonwebservices.aws-toolkit-vscode --force 69 | code-server --install-extension ms-azuretools.vscode-docker --force 70 | code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools --force 71 | code-server --install-extension ms-python.python --force 72 | 73 | ## Setup environment 74 | cd ~/environment 75 | export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account) 76 | TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 77 | export AWS_REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region') 78 | export PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/public-ipv4) 79 | echo "export AWS_ACCOUNT_ID=${AWS_ACCOUNT_ID}" | tee -a ~/.bashrc 80 | echo "export AWS_REGION=${AWS_REGION}" | tee -a ~/.bashrc 81 | echo "export PUBLIC_IP=${PUBLIC_IP}" | tee -a ~/.bashrc 82 | echo "alias k=kubectl" | tee -a ~/.bashrc 83 | aws configure set default.region ${AWS_REGION} 84 | 85 | ## Clone Git repository of Python Fastapi app 86 | git clone https://github.com/aws-samples/python-fastapi-demo-docker.git /home/ec2-user/environment/python-fastapi-demo-docker/ 87 | 88 | ## Config .env file 89 | cd /home/ec2-user/environment/python-fastapi-demo-docker/ 90 | cp .env.example .env 91 | sed -i "s/\(AWS_REGION=\).*\$/\1$AWS_REGION/" .env 92 | sed -i "s/\(AWS_ACCOUNT_ID=\).*\$/\1$AWS_ACCOUNT_ID/" .env 93 | echo "set -a; source /home/ec2-user/environment/python-fastapi-demo-docker/.env; set +a" | tee -a ~/.bashrc 94 | 95 | ## Update region in eksctl yaml files 96 | sed -i "s/\(region: \).*\$/\1$AWS_REGION/" eks/create-fargate-python.yaml 97 | sed -i "s/\(region: \).*\$/\1$AWS_REGION/" eks/create-mng-python.yaml 98 | 99 | ## Print AWS env vars 100 | printenv | sort -------------------------------------------------------------------------------- /website/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | 22 | # Tooling files 23 | .tool-versions 24 | .rtx.toml -------------------------------------------------------------------------------- /website/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')], 3 | }; 4 | -------------------------------------------------------------------------------- /website/docs/java/containers/images/ecr-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/images/ecr-console.png -------------------------------------------------------------------------------- /website/docs/java/containers/images/ecr-with-image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/images/ecr-with-image.png -------------------------------------------------------------------------------- /website/docs/java/containers/images/test-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/images/test-success.png -------------------------------------------------------------------------------- /website/docs/java/containers/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Containers 3 | sidebar_position: 200 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | 8 | ## Overview 9 | 10 | This chapter introduces the process of containerizing an application, emphasizing the creation of multi-stage images compatible with Kubernetes. Subsequently, we'll show how to deploy these images to a private Amazon Elastic Container Registry (ECR) and manage them within a Kubernetes environment. 11 | 12 | ## Objective 13 | 14 | This guide aims to introduce essential concepts and practices related to containerization. It focuses on familiarizing you with the benefits of containerization, the role of Amazon ECR as a container registry, the importance of multi-stage images for Kubernetes, and how Kubernetes uses containerization for efficient application deployment and management. 15 | 16 | ## Terms 17 | 18 | Containerization is a method of running applications in isolated environments, each with its own resources. 19 | 20 | - A **container image** is a self-contained, lightweight package holding everything necessary to run an application. It comprises a series of read-only layers, each layer signifying a modification to its predecessor. These images are stored in a container registry and can be deployed on any system supporting containerization. 21 | 22 | - A **container** is a running instance of a container image, operating as a process on a host system. With its unique file system, network interface, and resource set, it's isolated from other containers and the host system. Containers are transient, capable of swift creation, commencement, halting, and deletion. 23 | 24 | - A **container registry** is a centralized platform for storing, managing, and distributing container images. It acts as a repository, facilitating easy image access and retrieval across various hosts or environments. Container registries can be public or private, reflecting the organization's security needs. While public registries like Docker Hub allow unrestricted image upload and access, private ones like Amazon Elastic Container Registry (ECR) cater to enterprise applications. 25 | 26 | ## Services 27 | 28 | - [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/) 29 | -------------------------------------------------------------------------------- /website/docs/java/containers/multi-arch-linux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/multi-arch-linux.png -------------------------------------------------------------------------------- /website/docs/java/containers/multi-arch-windows.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/multi-arch-windows.png -------------------------------------------------------------------------------- /website/docs/java/containers/multi-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/containers/multi-arch.png -------------------------------------------------------------------------------- /website/docs/java/containers/multi-stage.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Optimizing a Dockerfile with a Multi-stage Build 3 | sidebar_position: 2 4 | --- 5 | 6 | ## Objective 7 | 8 | In this lab we will optimize a container image size using a multi-stage build. 9 | 10 | ## Prerequisites 11 | 12 | - [Building and Running Container Images with Java Application Using Docker](./build-image.md) 13 | 14 | ## 1. Optimizing the Dockerfile 15 | 16 | We are now embedding additional build tools such as Maven, an image size will naturally increase. However, Maven is only needed during build-time and not for running the final JAR. You can therefore leverage a [multi-stage build](https://docs.docker.com/build/building/multi-stage/) to reduce the image size by separating the build from the runtime stage. 17 | 18 | Check size of the initial image, it is **1.66 GB** 19 | 20 | ```bash showLineNumbers 21 | docker images 22 | ``` 23 | 24 | :::info 25 | Image size and application startup times might be different in your case 26 | ::: 27 | 28 | ```bash showLineNumbers 29 | REPOSITORY TAG IMAGE ID CREATED SIZE 30 | unicorn-store-spring latest 836da356dc0e About a minute ago 1.66GB 31 | ``` 32 | 33 | Copy the prepared Dockerfile: 34 | 35 | ```bash showLineNumbers 36 | cd ~/environment/unicorn-store-spring 37 | cp dockerfiles/Dockerfile_02_multistage Dockerfile 38 | ``` 39 | 40 | Start the build for the container image. While it is building, you can move to the next step and inspect the Dockerfile. 41 | 42 | ```bash showLineNumbers 43 | docker buildx build --load -t unicorn-store-spring:latest . 44 | ``` 45 | 46 | Inspect the Dockerfile. 47 | 48 | As you can see in line 10 - we are starting with a fresh Amazon Corretto Image. On line 13 we are copying the artifact from the initial build stage to the fresh image. 49 | 50 | ```docker {10,13} showLineNumbers title="/unicorn-store-spring/Dockerfile" 51 | FROM public.ecr.aws/docker/library/maven:3.9-amazoncorretto-17-al2023 as builder 52 | 53 | COPY ./pom.xml ./pom.xml 54 | RUN mvn dependency:go-offline -f ./pom.xml 55 | 56 | COPY src ./src/ 57 | RUN mvn clean package && mv target/store-spring-1.0.0-exec.jar store-spring.jar 58 | RUN rm -rf ~/.m2/repository 59 | 60 | FROM public.ecr.aws/docker/library/amazoncorretto:17.0.9-al2023 61 | RUN yum install -y shadow-utils 62 | 63 | COPY --from=builder store-spring.jar store-spring.jar 64 | 65 | RUN groupadd --system spring -g 1000 66 | RUN adduser spring -u 1000 -g 1000 67 | 68 | USER 1000:1000 69 | 70 | EXPOSE 8080 71 | ENTRYPOINT ["java","-jar","-Dserver.port=8080","/store-spring.jar"] 72 | ``` 73 | 74 | Check size of the image, it is **1.04 GB** now. 75 | 76 | ```bash showLineNumbers 77 | docker images 78 | ``` 79 | 80 | Now we can see that the size of our image is less than in the previous build: 81 | 82 | ```bash showLineNumbers 83 | REPOSITORY TAG IMAGE ID CREATED SIZE 84 | unicorn-store-spring latest ea42046620d4 29 seconds ago 1.04GB 85 | ``` 86 | 87 | With multi-stage build we achieved more than **30%** reduction of container image size. 88 | 89 | We will continue to optimize the image in the following modules. 90 | 91 | ## Conclusion 92 | 93 | This lab explored the process of optimizing a container image size using a multi-stage build. 94 | -------------------------------------------------------------------------------- /website/docs/java/containers/upload-ecr.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Pushing a Container Image to Amazon Elastic Container Registry (ECR). 3 | sidebar_position: 2 4 | --- 5 | 6 | ## Objective 7 | 8 | In this lab we will login to ECR and push container images. 9 | 10 | ## Prerequisites 11 | 12 | - [Optimizing a Dockerfile with a Multi-stage Build](./multi-stage.md) 13 | 14 | ## 1. Pushing a container image to Amazon Elastic Container Registry (ECR) 15 | 16 | Before we move to deploying containers in different environments, you will push container image to [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/). 17 | 18 | The Amazon ECR repository with the name `unicorn-store-spring` was already created for you during the workshop setup. It is empty yet, but you can explore it and make yourself familiar with [Amazon ECR](https://console.aws.amazon.com/ecr/home#/) in the AWS console: 19 | 20 | ![ecr-console](./images/ecr-console.png) 21 | 22 | To be able to push images to the repository we need to login to the repository: 23 | 24 | ```bash showLineNumbers 25 | export ECR_URI=$(aws ecr describe-repositories --repository-names unicorn-store-spring | jq --raw-output '.repositories[0].repositoryUri') 26 | echo $ECR_URI 27 | aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $ECR_URI 28 | ``` 29 | 30 | Tag the local container image: 31 | 32 | ```bash showLineNumbers 33 | IMAGE_TAG=i$(date +%Y%m%d%H%M%S) 34 | echo $IMAGE_TAG 35 | docker tag unicorn-store-spring:latest $ECR_URI:$IMAGE_TAG 36 | docker tag unicorn-store-spring:latest $ECR_URI:latest 37 | docker images 38 | ``` 39 | 40 | Push the image to Amazon ECR: 41 | 42 | ```bash showLineNumbers 43 | docker push $ECR_URI:$IMAGE_TAG 44 | docker push $ECR_URI:latest 45 | ``` 46 | 47 | Go to the [Amazon ECR](https://console.aws.amazon.com/ecr/home#/)console directly, or navigate to Amazon ECR in the AWS Console. Verify that the image uploaded successfully: 48 | 49 | ![ecr-with-image](./images/ecr-with-image.png) 50 | 51 | :::info 52 | Image size in Amazon ECR is smaller than locally due to compression 53 | ::: 54 | 55 | If you change application source code, you can run the set of commands below to build and push a new container image to ECR: 56 | 57 | ```bash showLineNumbers 58 | cd ~/environment/unicorn-store-spring 59 | docker buildx build --load -t unicorn-store-spring:latest . 60 | IMAGE_TAG=i$(date +%Y%m%d%H%M%S) 61 | docker tag unicorn-store-spring:latest $ECR_URI:$IMAGE_TAG 62 | docker tag unicorn-store-spring:latest $ECR_URI:latest 63 | docker push $ECR_URI:$IMAGE_TAG 64 | docker push $ECR_URI:latest 65 | ``` 66 | 67 | ## Conclusion 68 | 69 | You successfully containerized Java application and optimized the build behavior and image size. Finally, you pushed the container image to Amazon ECR. With the container image in the AWS Cloud you can now proceed with [Deploy to Amazon EKS](java/eks/eks-create.md). 70 | -------------------------------------------------------------------------------- /website/docs/java/eks/deploy-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Deploy a container image to Amazon EKS 3 | sidebar_position: 203 4 | --- 5 | 6 | ## Objective 7 | 8 | This lab shows you how to deploy the the [Java Application](java/introduction/workshop-setup.md) project onto your Amazon EKS cluster. 9 | 10 | ## Prerequisites 11 | 12 | - [Setup Amazon EKS for Java Application](./eks-setup.md) 13 | 14 | ## 1. Deploying the application 15 | 16 | Create a new directory k8s in the application folder: 17 | 18 | ```bash showLineNumbers 19 | mkdir ~/environment/unicorn-store-spring/k8s 20 | cd ~/environment/unicorn-store-spring/k8s 21 | ``` 22 | 23 | Create Kubernetes manifest files for the deployment and the service: 24 | 25 | ```yml showLineNumbers 26 | export ECR_URI=$(aws ecr describe-repositories --repository-names unicorn-store-spring \ 27 | | jq --raw-output '.repositories[0].repositoryUri') 28 | export SPRING_DATASOURCE_URL=$(aws ssm get-parameter --name databaseJDBCConnectionString \ 29 | | jq --raw-output '.Parameter.Value') 30 | 31 | cat < ~/environment/unicorn-store-spring/k8s/deployment.yaml 32 | apiVersion: apps/v1 33 | kind: Deployment 34 | metadata: 35 | name: unicorn-store-spring 36 | namespace: unicorn-store-spring 37 | labels: 38 | app: unicorn-store-spring 39 | spec: 40 | replicas: 1 41 | selector: 42 | matchLabels: 43 | app: unicorn-store-spring 44 | template: 45 | metadata: 46 | labels: 47 | app: unicorn-store-spring 48 | spec: 49 | serviceAccountName: unicorn-store-spring 50 | containers: 51 | - name: unicorn-store-spring 52 | resources: 53 | requests: 54 | cpu: "1" 55 | memory: "2Gi" 56 | limits: 57 | cpu: "1" 58 | memory: "2Gi" 59 | image: ${ECR_URI}:latest 60 | env: 61 | - name: SPRING_DATASOURCE_PASSWORD 62 | valueFrom: 63 | secretKeyRef: 64 | name: "unicornstore-db-secret" 65 | key: "password" 66 | optional: false 67 | - name: SPRING_DATASOURCE_URL 68 | value: ${SPRING_DATASOURCE_URL} 69 | ports: 70 | - containerPort: 8080 71 | livenessProbe: 72 | httpGet: 73 | path: /actuator/health/liveness 74 | port: 8080 75 | readinessProbe: 76 | httpGet: 77 | path: /actuator/health/readiness 78 | port: 8080 79 | lifecycle: 80 | preStop: 81 | exec: 82 | command: ["sh", "-c", "sleep 10"] 83 | securityContext: 84 | runAsNonRoot: true 85 | allowPrivilegeEscalation: false 86 | EOF 87 | 88 | cat < ~/environment/unicorn-store-spring/k8s/service.yaml 89 | apiVersion: v1 90 | kind: Service 91 | metadata: 92 | name: unicorn-store-spring 93 | namespace: unicorn-store-spring 94 | labels: 95 | app: unicorn-store-spring 96 | spec: 97 | type: LoadBalancer 98 | ports: 99 | - port: 80 100 | targetPort: 8080 101 | protocol: TCP 102 | selector: 103 | app: unicorn-store-spring 104 | EOF 105 | ``` 106 | 107 | Deploy manifests to EKS cluster: 108 | 109 | ```bash showLineNumbers 110 | kubectl apply -f ~/environment/unicorn-store-spring/k8s/deployment.yaml 111 | kubectl apply -f ~/environment/unicorn-store-spring/k8s/service.yaml 112 | ``` 113 | 114 | Verify that the application is running: 115 | 116 | ```bash showLineNumbers 117 | kubectl wait deployment -n unicorn-store-spring unicorn-store-spring --for condition=Available=True --timeout=120s 118 | kubectl get deploy -n unicorn-store-spring 119 | export SVC_URL=http://$(kubectl get svc unicorn-store-spring -n unicorn-store-spring -o json | jq --raw-output '.status.loadBalancer.ingress[0].hostname') 120 | while [[ $(curl -s -o /dev/null -w "%{http_code}" $SVC_URL/) != "200" ]]; do echo "Service not yet available ..." && sleep 5; done 121 | echo $SVC_URL 122 | echo Service is Ready! 123 | ``` 124 | 125 | :::info 126 | The creation of the load balancer for the service might take around 2-5 minutes. 127 | ::: 128 | 129 | ![eks-deploy](./images/eks-deploy.png) 130 | 131 | Get the Load Balancer URL for the services and make an example API call: 132 | 133 | ```bash showLineNumbers 134 | echo $SVC_URL 135 | curl --location $SVC_URL; echo 136 | curl --location --request POST $SVC_URL'/unicorns' --header 'Content-Type: application/json' --data-raw '{ 137 | "name": "'"Something-$(date +%s)"'", 138 | "age": "20", 139 | "type": "Animal", 140 | "size": "Very big" 141 | }' | jq 142 | ``` 143 | 144 | ![eks-welcome](./images/eks-welcome.png) 145 | 146 | ## 2. Exploring Amazon EKS in AWS console 147 | 148 | Go to the [Amazon EKS](https://console.aws.amazon.com/eks/home#/) console directly, or navigate to Amazon EKS in the AWS console. 149 | 150 | ## 3. Accessing the application logs 151 | 152 | To further inspect the application startup or runtime behavior you can navigate to the application logs with the following steps. 153 | 154 | Get the logs from the current running pod via kubectl: 155 | 156 | ```bash showLineNumbers 157 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring 158 | ``` 159 | 160 | You should see a similar result to: 161 | 162 | ![eks-initial-log](./images/eks-initial-log.png) 163 | 164 | ## Conclusion 165 | 166 | In this section you have learned how to create a new EKS cluster. You deployed externals secrets, permissions and the UnicornStore Java application. With the container image deployed to Amazon EKS can now can apply different [Optimizations](java/optimizations/index.md) technics to container images. 167 | -------------------------------------------------------------------------------- /website/docs/java/eks/eks-setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Setup Amazon EKS for Java Application 3 | sidebar_position: 202 4 | --- 5 | 6 | ## Objective 7 | 8 | This chapter shows you how to deploy the Kubernetes resources for Java Application within an Amazon EKS cluster. 9 | 10 | ## Prerequisites 11 | 12 | - [Creating an Amazon EKS Cluster](./eks-create.md) 13 | 14 | [Amazon Elastic Kubernetes Service (Amazon EKS)](https://aws.amazon.com/eks/) is a managed service that you can use to run Kubernetes on AWS without needing to install, operate, and maintain your own Kubernetes control plane or nodes. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications. 15 | 16 | ## 1. Adding Kubernetes namespace for the application and a service account 17 | 18 | The Java application needs to push events to EventBridge, read parameters from Parameter Store and secrets from Secrets Manager. To achieve that and develop a secure application with the "Principle of least privilege" we need to create a [Service Account](https://eksctl.io/usage/iamserviceaccounts/) and give it required permissions to access AWS services. We also want to create a Kubernetes namespace for the Java Application. 19 | 20 | Create a Kubernetes namespace for the application: 21 | 22 | ```bash showLineNumbers 23 | kubectl create namespace unicorn-store-spring 24 | ``` 25 | 26 | Create a Kubernetes Service Account with a reference to the previous created IAM policy: 27 | 28 | ```bash showLineNumbers 29 | eksctl create iamserviceaccount --cluster=unicorn-store --name=unicorn-store-spring --namespace=unicorn-store-spring \ 30 | --attach-policy-arn=$(aws iam list-policies --query 'Policies[?PolicyName==`unicorn-eks-service-account-policy`].Arn' --output text) --approve --region=$AWS_REGION 31 | ``` 32 | 33 | ## 2. Synchronizing parameters and secrets 34 | 35 | Create the Kubernetes External Secret resources: 36 | 37 | ```yml showLineNumbers 38 | cat < 77 | 78 | 79 | 1. Execute the following commands to clean up your workshop environment: 80 | 81 | ```bash showLineNumbers 82 | # approximately 60 minutes 83 | ~/environment/java-on-aws/labs/unicorn-store/infrastructure/scripts/99-destroy-all.sh 84 | ``` 85 | 86 | :::info 87 | The deletion of the stacks might take more than 60 minutes. 88 | ::: 89 | 90 | :::warning 91 | If you created resources manually "Using UI (AWS Console)" you need to delete these resources manually 92 | ::: 93 | 94 | Delete Cloud9 instance `CloudFormation` → `Stacks` → `java-on-aws-workshop` → `Delete` 95 | 96 | 97 | 98 | 99 | All the infrastructure components will be deleted automatically 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /website/docs/java/introduction/unicornstore-architecture.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: UnicornStore Architecture 3 | sidebar_position: 103 4 | --- 5 | 6 | ## Objective 7 | 8 | Throughout the entire workshop we will be use the following reference application called the **UnicornStore**. In this section you will get an overview about the inner workings of the app. 9 | 10 | ## 1. Introducing the UnicornStore 11 | 12 | The UnicornStore is a **[Spring Boot 3](https://spring.io/projects/spring-boot) Java Application** that provides Create-, Read-, Update-, and Delete-(CRUD)-Operations for Unicorn-Records. 13 | It stores them in a relational database running on [Amazon RDS PostgreSQL](https://aws.amazon.com/rds/postgresql) and afterwards publishes an event about this action to [Amazon EventBridge](https://aws.amazon.com/eventbridge). 14 | 15 | In a traditional **non-Serverless** and **non-Containers** setup the application could have been hosted like this: 16 | 17 | ![architecture-traditional](./images/architecture-traditional.png) 18 | 19 | - (1) End-user can interact with the service via a REST-API that provides basic CRUD operations 20 | 21 | - **POST `/unicorns`** - Create a new unicorn 22 | - **PUT `/unicorns/{id}`** - Update an existing unicorn 23 | - **GET `/unicorns/{id}`** - Retrieve an existing unicorn 24 | - **DELETE `/unicorns/{id}`** - Delete an existing unicorn 25 | 26 | - (2) Usually a load balancer / reverse proxy acts as an entry point to the system and provides basic features such as TLS-termination and load distribution. 27 | 28 | - (3) The application itself is not directly exposed to the internet and can be any kind of compute (EC2-Instance / VM / On-Prem server). 29 | 30 | - (4) The application communicates with the database to store the Unicorn-Records. Depending on the service, this can be in the same subnet & VPC or in different ones. 31 | 32 | - (5) Finally the application publishes an event about the action e.g. `UNICORN_CREATED`. 33 | 34 | ## 2. Inside the Java Code 35 | 36 | :::info 37 | You can also inspect the code directly in Cloud 9 in the /unicorn-store-spring/src folder. 38 | ::: 39 | 40 | The example application in this workshop tries to find a balance between simplicity and comparability to a real world example. 41 | The application therefore includes basic features such as Input-Serialization, Exception-Handling, Object-Relational-Mapping 42 | and the usage of an additional SDK. The following diagram provides an overview of the classes and features used inside 43 | the UnicornStore: 44 | 45 | ![app-flow](./images/unicornstore-app-flow.png) 46 | 47 | To get a better understanding how the components work together you will now walk through an example where a user requests to create a Unicorn (POST Request): 48 | 49 | The **UnicornController** accepts the requests and directly provides it as a unicorn object (Input Serialization). 50 | It has the additional responsibility to catch the exceptions that might happen in the further processing to provide a 51 | meaningful message and HTTP-Code to the end user (Exception-Handling). Upon successful processing the controller returns 52 | the result object and the proper HTTP-Code via a `ResponseEntity`. 53 | 54 | ```java showLineNumbers 55 | @PostMapping("/unicorns") 56 | public ResponseEntity createUnicorn(@RequestBody Unicorn unicorn) { 57 | try { 58 | var savedUnicorn = unicornService.createUnicorn(unicorn); 59 | return ResponseEntity.ok(savedUnicorn); 60 | } catch (Exception e) { 61 | logger.error("Error creating unicorn", e); 62 | throw new ResponseStatusException(INTERNAL_SERVER_ERROR, "Error creating unicorn", e); 63 | } 64 | } 65 | ``` 66 | 67 | The **UnicornService** passes it to the **UnicornRepository** which 68 | is a default implementation of the `CRUDRepository` provided by [Spring Data](https://spring.io/projects/spring-data) 69 | and afterwards calls the **UnicornPublisher** to publish an event. 70 | 71 | ```java showLineNumbers 72 | public Unicorn createUnicorn(Unicorn unicorn) { 73 | var savedUnicorn = unicornRepository.save(unicorn); 74 | unicornPublisher.publish(savedUnicorn, UnicornEventType.UNICORN_CREATED); 75 | return savedUnicorn; 76 | } 77 | ``` 78 | 79 | **UnicornRepository**: 80 | 81 | ```java showLineNumbers 82 | @Repository 83 | public interface UnicornRepository extends CrudRepository { 84 | } 85 | ``` 86 | 87 | The **UnicornPublisher** serializes the object and uses the AWS SDK to publish an event to Amazon EventBridge: 88 | 89 | ```java showLineNumbers 90 | public void publish(Unicorn unicorn, UnicornEventType unicornEventType) { 91 | try { 92 | var unicornJson = objectMapper.writeValueAsString(unicorn); 93 | logger.info("Publishing ... " + unicornEventType.toString()); 94 | logger.info(unicornJson); 95 | 96 | var eventsRequest = createEventRequestEntry(unicornEventType, unicornJson); 97 | eventBridgeClient.putEvents(eventsRequest).get(); 98 | } catch (JsonProcessingException e) { 99 | logger.error("Error JsonProcessingException ..."); 100 | logger.error(e.getMessage()); 101 | } catch (EventBridgeException | ExecutionException | InterruptedException e) { 102 | logger.error("Error EventBridgeException | ExecutionException ..."); 103 | logger.error(e.getMessage()); 104 | } 105 | } 106 | ``` 107 | 108 | ## Conclusion 109 | 110 | In this section you got an overview about the Java Application which will be used during the workshop. You can now proceed with [Building and Running Container Images with Java Application Using Docker](java/containers/build-image.md). 111 | -------------------------------------------------------------------------------- /website/docs/java/introduction/workshop-setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Setting up the Development Environment 3 | sidebar_position: 102 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | 8 | ## Objective 9 | 10 | This guide shows you how to setup the development environment to for the workshop. 11 | 12 | 13 | 14 | 15 | ## Overview 16 | 17 | If you are attending an AWS hosted event, you will have access to an AWS account with any optional pre-provisioned infrastructure and IAM policies needed to complete this workshop. The goal of this section is to help you access this AWS account. You may skip this section if you plan to use your own AWS account. 18 | 19 | ## Prerequisites 20 | 21 | * Sign in via the one-click join event link provided by the event operator as part of an AWS hosted event. 22 | * OR via the [Workshop Studio join URL](https://catalog.workshops.aws/join) with the 12 digit event access code distributed by an event operator. 23 | * Carefully review the terms and conditions associated with this event. 24 | 25 | ## 1. Accessing AWS Account 26 | 27 | After joining the event, you should see the page with event information and workshop details. You should also see a section titled **"AWS account access"** on the left navigation bar. You can use these options to access the temporary AWS account provided to you. 28 | 29 | The **“Open AWS console”** link will open the AWS Management Console home page. This is the standard AWS Console that provides access to each service. Please note that the infrastructure associated with the workshop will be deployed to a specific region and can be only accessed from that region. 30 | 31 | ![logging-in](./images/logging-in.png) 32 | 33 | 34 | 35 | 36 | :::warning 37 | **In this workshop there will be a number of AWS resources created in your account. These resources will incur cost and will be billed to your AWS Account, make sure you delete all resources after the workshop to avoid unnecessary costs. Please refer to the cleanup section.** 38 | ::: 39 | 40 | You can use the following setup to create a virtual development environment by using [AWS Cloud9](https://aws.amazon.com/cloud9/). 41 | 42 | ## 1. Deploying AWS CloudFormation template for Cloud9 instance and the workshop infrastructure 43 | 44 | Go to the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home#/) console directly, or navigate to AWS CloudShell in the AWS console. 45 | 46 | :::info 47 | If using the link above make sure the AWS console has opened in the region that you wish to run the labs in. 48 | ::: 49 | 50 | Deploy AWS CloudFormation template 51 | 52 | ```bash showLineNumbers 53 | curl https://raw.githubusercontent.com/aws-samples/java-on-aws/main/labs/unicorn-store/infrastructure/cfn/java-on-aws-workshop.yaml > java-on-aws-workshop.yaml 54 | aws cloudformation deploy --stack-name java-on-aws-workshop \ 55 | --template-file ./java-on-aws-workshop.yaml \ 56 | --capabilities CAPABILITY_NAMED_IAM 57 | ``` 58 | 59 | Wait until the command finish successfully. 60 | 61 | Go to the [AWS CloudFormation](https://console.aws.amazon.com/cloudformation/home#/) console directly, or navigate to AWS CloudFormation in the AWS console. 62 | 63 | Verify that the Stacks reached the `CREATE_COMPLETE` status. 64 | 65 | :::info 66 | The creation of the stacks might take around 30 minutes. 67 | ::: 68 | 69 | ![cloudformation](./images/cloudformation.png) 70 | 71 | 72 | 73 | 74 | ## 2. Accessing AWS Cloud9 Instance 75 | 76 | Go to the [AWS Cloud 9](https://console.aws.amazon.com/cloud9control/home#/) console directly, or navigate to Cloud9 in the AWS console: 77 | 78 | ![cloud9-console](./images/cloud9-console.png) 79 | 80 | Click on "Open" for the "java-on-aws-workshop" instance to connect to Cloud9 IDE: 81 | 82 | ![cloud9-list](./images/cloud9-list.png) 83 | 84 | When asked about "Working with Java?" - click "Activate" and refresh the entire browser page to activate the extension. 85 | 86 | ![java-confirm](./images/java-confirm.png) 87 | 88 | :::info 89 | In case if you see Information: AWS Toolkit. "Connection expired. To continue using CodeWhisperer, connect with AWS Builder ID or AWS IAM Identity center.", click "Don't Show Again" and continue. 90 | ::: 91 | 92 | You have now successfully opened Cloud9 instance. 93 | 94 | After opening the Cloud9 instance, you can find the workshop code in the left sidebar. You can close the welcome window and use the "New terminal" command to open the terminal window and execute the commands provided in the workshop. 95 | 96 | ![cloud9-new-terminal](./images/cloud9-new-terminal.png) 97 | 98 | ![cloud9-terminal](./images/cloud9-terminal.png) 99 | 100 | :::warning 101 | AWS Cloud9 does not auto-save your files. Please ensure to save your files before deploying any changes via Ctrl+S or the top menu File→Save all. 102 | ::: 103 | 104 | ## Conclusion 105 | 106 | Once you've verified access to the AWS account and AWS Cloud9 instance, you should have everything you need to get started with this workshop. 107 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/baseline.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Preparation" 3 | sidebar_position: 302 4 | --- 5 | 6 | ## Objective 7 | 8 | In this chapter, we are going to evaluate different performance optimizations. It is therefore essential to understand how the current application performs to measure the impact of the optimizations. 9 | 10 | ## Prerequisites 11 | 12 | - [Deploy a container image to Amazon EKS](../../java/eks/deploy-app.md) 13 | 14 | ## 1. Getting the current image size 15 | 16 | The image size is one of the most important factors to consider when investigating containerized environments. It plays a key role as the image needs to be downloaded and started by the container orchestration service. 17 | 18 | Go to the [Amazon ECR](https://console.aws.amazon.com/ecr/home#/) console. 19 | 20 | Check the current container image size: 21 | 22 | ![ecr-with-image](./images/ecr-with-image.png) 23 | 24 | As we can see, the current image size is around **380 MB**. 25 | 26 | ## 2. Accessing the application logs 27 | 28 | To understand container startup times, we'll check the application logs and retrieve the Spring application context startup as a reference. 29 | 30 | Execute the commands below to get the application startup time reported by Spring Boot in Amazon EKS: 31 | 32 | ```bash showLineNumbers 33 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring 34 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring | grep "Started StoreApplication" 35 | ``` 36 | 37 | ![eks-initial-log](./images/eks-initial-log.png) 38 | 39 | As we can see, the current application startup time is around 12.5 seconds. 40 | 41 | :::info 42 | Note that image size and application startup times may vary. 43 | ::: 44 | 45 | ## Conclusion 46 | 47 | In this lab, you learned about the different factors that influence application startup time and how to measure them. You also learned how to access the application logs and identify the image size. In the next lab, you will apply the first optimization technique to accelerate Java applications running on AWS container services. 48 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/container-layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/container-layers.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/ecr-with-image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/ecr-with-image.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/eks-initial-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/eks-initial-log.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/graalvm-ecr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/graalvm-ecr.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/graalvm-eks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/graalvm-eks.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/graalvm-result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/graalvm-result.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/java-on-aws-id.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/java-on-aws-id.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/jib-ecr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/jib-ecr.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/jib-eks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/jib-eks.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/jib-result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/jib-result.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/optimized-jvm-ecr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/optimized-jvm-ecr.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/optimized-jvm-eks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/optimized-jvm-eks.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/images/optimized-jvm-result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/java/optimizations/images/optimized-jvm-result.png -------------------------------------------------------------------------------- /website/docs/java/optimizations/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Optimize Container Images" 3 | sidebar_position: 300 4 | --- 5 | 6 | Fast startup times are crucial for responding promptly to disruptions and periods of high demand, while also enhancing resource efficiency. In this lab, we will begin by discussing a typical implementation that relies on a base image and a full Java Runtime Environment (JRE). To improve our startup times, we will utilize [jib](https://github.com/GoogleContainerTools/jib) to create a custom JRE using jdeps and jlink. As an extra measure, we will employ GraalVM native images to further reduce application startup time and memory usage. 7 | 8 | Additionally, we will not focus on optimizing the code at the individual line level. These optimizations are generally specific to a particular workload, and conducting profiling is often necessary to identify costly code sections. In this regard, AWS offers a tool called [Amazon CodeGuru Profiler](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html) that allows for application profiling. By collecting runtime performance data from your live applications, CodeGuru Profiler generates recommendations to enhance your application's performance. With the assistance of machine learning algorithms, it can identify the most resource-intensive lines of code and provide suggestions to improve efficiency and eliminate CPU bottlenecks. 9 | 10 | Another important topic is memory management for Java applications in containers. Since Java 10, it has become much easier to manage the memory of a Java application in containers in a meaningful way. Previously the JVM was not aware of the memory and CPU allocated to the container. Fortunately, the fix has been back-ported to Java 8 (version 8u191). Now the JVM calculates its memory based on the memory for the container and not based on the memory for the underlying host. The best way to identify how much memory is necessary is through load testing in a pre-production environment, such as a staging environment. You can collect these metrics with a service such as CloudWatch Container Insights. Or, do so by using Amazon Managed Service for Prometheus together with Amazon Managed Grafana. 11 | 12 | Note that OOM errors are likely to occur during these tests. In order to analyze these errors with tools, such as Eclipse MAT (https://projects.eclipse.org/projects/tools.mat), it is necessary to generate a heap dump. This can be implemented automatically using `java -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/path/to/dump`, for example. Of course, a host file system must be included in the container so that the heap dump can be analyzed later by other developers. With AWS Fargate, Amazon Elastic File System (EFS) is ideal for this use case. EFS automatically grows and shrinks as you add and remove files with no need for management or provisioning. 13 | 14 | ![container-layers](./images/container-layers.png) 15 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/jib.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Building optimized OCI images with Jib" 3 | sidebar_position: 304 4 | --- 5 | 6 | ## Objective 7 | 8 | In this lab, you will learn a straightforward and efficient method for optimizing images from the [Open Container Initiative (OCI)](https://opencontainers.org/) using [Jib](https://github.com/GoogleContainerTools/jib). 9 | 10 | ## Prerequisites 11 | 12 | - [Preparation](./baseline.md) 13 | 14 | ## Context 15 | 16 | The OCI currently contains three specifications: the Runtime Specification, the Image Specification and the Distribution Specification. Jib is a tool to build optimized OCI images without using a container runtime. It's available as a Maven or Gradle-plugin as well as a Java library. In our case we're going to use the Maven-plugin in order to create an optimized container image. 17 | 18 | The important part for the build with Jib can be found in the `pom.xml`-file in the plugins section: 19 | 20 | ```xml showLineNumbers 21 | 22 | com.google.cloud.tools 23 | jib-maven-plugin 24 | 3.4.0 25 | 26 | public.ecr.aws/docker/library/amazoncorretto:17.0.9-alpine3.18 27 | 28 | 1000 29 | 30 | 31 | 32 | ``` 33 | 34 | At this stage, the configuration is quite compact. We simply specify that the Java process in the container image will not run as the root user, but instead with a different user ID. Additionally, we opt for the [Alpine Linux](https://www.alpinelinux.org/) base image to enhance the overall performance. 35 | 36 | ## 1. Changing the source code and pushing the image 37 | 38 | You are now going to change the application code of `unicorn-store-spring/src/main/java/com/unicorn/store/controller/UnicornController.java ` to identify the new version of the application deployment. 39 | 40 | Change the contents of the getWelcomeMessage function to identify the new version of the application: 41 | 42 | ```java showLineNumbers {3} 43 | @GetMapping("/") 44 | public ResponseEntity getWelcomeMessage() { 45 | return new ResponseEntity<>("Welcome to the Unicorn Store - from Jib generated Image!", HttpStatus.OK); 46 | } 47 | ``` 48 | 49 | :::info 50 | AWS Cloud9 does not auto-save your files. Please ensure to save your files before deploying any changes via Ctrl+S or the top menu File→Save all. 51 | ::: 52 | 53 | :::info 54 | AWS Cloud9 does not auto-save your files. Please ensure to save your files before deploying any changes via Ctrl+S or the top menu File→Save all. 55 | ::: 56 | 57 | Build new container images using `mvn compile jib:build`: 58 | 59 | ```bash showLineNumbers 60 | export ECR_URI=$(aws ecr describe-repositories --repository-names unicorn-store-spring | jq --raw-output '.repositories[0].repositoryUri') 61 | aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $ECR_URI 62 | cd ~/environment/unicorn-store-spring 63 | IMAGE_TAG=i$(date +%Y%m%d%H%M%S) 64 | IMAGE_PATH=$ECR_URI:$IMAGE_TAG 65 | mvn compile jib:build -Dimage=$IMAGE_PATH 66 | 67 | IMAGE_PATH=$ECR_URI:latest 68 | mvn compile jib:build -Dimage=$IMAGE_PATH 69 | ``` 70 | 71 | ## 2. Re-deploying the application 72 | 73 | After pushing the new image to ECR, you can re-trigger the deployment of the application: 74 | 75 | ```bash showLineNumbers 76 | kubectl rollout restart deploy unicorn-store-spring -n unicorn-store-spring 77 | kubectl rollout status deployment unicorn-store-spring -n unicorn-store-spring 78 | ``` 79 | 80 | ## 3. Testing the application 81 | 82 | Run the following API call to verify that the new version of the application was successfully deployed: 83 | 84 | ```bash showLineNumbers 85 | export SVC_URL=http://$(kubectl get svc unicorn-store-spring -n unicorn-store-spring -o json | jq --raw-output '.status.loadBalancer.ingress[0].hostname') 86 | curl --location --request GET $SVC_URL'/' --header 'Content-Type: application/json'; echo 87 | ``` 88 | 89 | ![jib-result](./images/jib-result.png) 90 | 91 | ## 4. Retrieving the results 92 | 93 | 1. Verify the new application image size in the [Amazon ECR](https://console.aws.amazon.com/ecr/home#/) console: 94 | 95 | ![jib-ecr](./images/jib-ecr.png) 96 | 97 | 2. Retrieve the logs for the application as outlined in the previous section. Below you can find an example starting time after the optimization of the Amazon EKS deployment: 98 | 99 | ```bash showLineNumbers 100 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring 101 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring | grep "Started StoreApplication" 102 | ``` 103 | 104 | ![jib-eks](./images/jib-eks.png) 105 | 106 | ## Conclusion 107 | 108 | As you can see, we managed to decrease the container image size from 380 MB to 212 MB, resulting in a reduction of approximately 45% without making any changes to the code. This was achieved by using Jib and the Linux Alpine base image for container creation. Additionally, the application startup time improved by around 2 seconds. 109 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/optimized-jvm.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Optimized runtime (Custom JRE)" 3 | sidebar_position: 303 4 | --- 5 | 6 | ## Objective 7 | 8 | In this lab, you will take a closer look at how to create a custom Java runtime environment (JRE) for the UnicornStore application. 9 | 10 | ## Prerequisites 11 | 12 | - [Preparation](./baseline.md) 13 | 14 | ## Context 15 | 16 | The Java Platform Module System (JPMS) was introduced with JDK 9, which split up `tools.jar` and `rt.jar` into 70 modules. This modularization can be used to create runtime environments that contain only the required modules (including transitive dependencies) of an application. This reduces the overall runtime size and increases performance during application startup. 17 | 18 | ## 1. Copying the modified Dockerfile 19 | 20 | We have already prepared a Dockerfile with the necessary steps to build and deploy the UnicornStore application with a custom JRE. 21 | 22 | Copy the Dockerfile to the current application folder: 23 | 24 | ```bash showLineNumbers 25 | cd ~/environment/unicorn-store-spring 26 | cp dockerfiles/Dockerfile_04_optimized_JVM Dockerfile 27 | ``` 28 | 29 | ## 2. Changing the source code and pushing the image 30 | 31 | You are now going to change the application code of `unicorn-store-spring/src/main/java/com/unicorn/store/controller/UnicornController.java ` to identify the new version of the application deployment. 32 | 33 | Change the contents of the getWelcomeMessage function to identify the new version of the application: 34 | 35 | ```java showLineNumbers {3} 36 | @GetMapping("/") 37 | public ResponseEntity getWelcomeMessage() { 38 | return new ResponseEntity<>("Welcome to the Unicorn Store - from Optimized JVM!", HttpStatus.OK); 39 | } 40 | ``` 41 | 42 | :::info 43 | AWS Cloud9 does not auto-save your files. Please ensure to save your files before deploying any changes via Ctrl+S or the top menu File→Save all. 44 | ::: 45 | 46 | Start the build for the container image. While it is building, you can move to the next step and inspect the Dockerfile. 47 | 48 | ```bash showLineNumbers 49 | cd ~/environment/unicorn-store-spring 50 | docker buildx build --load -t unicorn-store-spring:latest . 51 | IMAGE_TAG=i$(date +%Y%m%d%H%M%S) 52 | docker tag unicorn-store-spring:latest $ECR_URI:$IMAGE_TAG 53 | docker tag unicorn-store-spring:latest $ECR_URI:latest 54 | docker push $ECR_URI:$IMAGE_TAG 55 | docker push $ECR_URI:latest 56 | ``` 57 | 58 | Take a look at the modified `Dockerfile`. 59 | 60 | ```dockerfile showLineNumbers {11-14,20-23,28-29} 61 | FROM public.ecr.aws/docker/library/maven:3.9-amazoncorretto-17-al2023 as builder 62 | 63 | RUN yum install -y tar gzip unzip 64 | 65 | COPY ./pom.xml ./pom.xml 66 | RUN mvn dependency:go-offline -f ./pom.xml 67 | 68 | COPY src ./src/ 69 | RUN mvn clean package && mv target/store-spring-1.0.0-exec.jar target/store-spring.jar && cd target && unzip store-spring.jar 70 | 71 | RUN jdeps --ignore-missing-deps \ 72 | --multi-release 17 --print-module-deps \ 73 | --class-path="target/BOOT-INF/lib/*" \ 74 | target/store-spring.jar > jre-deps.info 75 | 76 | # Adding jdk.crypto.ec for TLS 1.3 support 77 | RUN truncate --size -1 jre-deps.info 78 | RUN echo ",jdk.crypto.ec" >> jre-deps.info && cat jre-deps.info 79 | 80 | RUN export JAVA_TOOL_OPTIONS=\"-Djdk.lang.Process.launchMechanism=vfork\" && \ 81 | jlink --verbose --compress 2 --strip-java-debug-attributes \ 82 | --no-header-files --no-man-pages --output custom-jre \ 83 | --add-modules $(cat jre-deps.info) 84 | 85 | FROM public.ecr.aws/amazonlinux/amazonlinux:2023.2.20231026.0 86 | RUN yum install -y shadow-utils 87 | 88 | COPY --from=builder target/store-spring.jar store-spring.jar 89 | COPY --from=builder custom-jre custom-jre 90 | 91 | RUN groupadd --system spring -g 1000 92 | RUN adduser spring -u 1000 -g 1000 93 | 94 | USER 1000:1000 95 | 96 | # OpenTelemetry agent configuration 97 | ENV OTEL_TRACES_SAMPLER "always_on" 98 | ENV OTEL_PROPAGATORS "tracecontext,baggage,xray" 99 | ENV OTEL_RESOURCE_ATTRIBUTES "service.name=unicorn-store-spring" 100 | ENV OTEL_IMR_EXPORT_INTERVAL "10000" 101 | ENV OTEL_EXPORTER_OTLP_ENDPOINT "http://localhost:4317" 102 | 103 | EXPOSE 8080 104 | ENTRYPOINT ["./custom-jre/bin/java","-jar","-Dserver.port=8080","/store-spring.jar"] 105 | ``` 106 | 107 | As mentioned in the first chapter, the Dockerfile utilizes a multi-stage build approach. The initial stage (beginning at line 1) involves building the application and a customized runtime. This custom runtime is created based on the dependencies listed in `jdeps` (refer to lines 11-14). 108 | 109 | In the first step, we analyze the entire classpath of the application and write down all module dependencies in a file called `jre-deps.info`. You might have noticed that we explicitly added `jdk.crypto.ec`. This module contains the implementation of the SunEC security provider, which is essential for TLS support. However, it is not easy to determine from the classpath analysis that this module is needed, which is why we include it at this stage. 110 | 111 | The `jre-deps.info` is used as input for `jlink` (line 20 - 23) in order to build a custom runtime. The goal of this is to reduce the size as much as possible, that's the reason the runtime is compressed and no header files and no man-pages are included. 112 | 113 | In the second stage of our build (line 28 - 29), we copy the JAR file of the application as well the custom runtime to the target image. 114 | 115 | ## 3. Re-deploying the application 116 | 117 | After pushing the new image to ECR, you can re-trigger the deployment of the application: 118 | 119 | ```bash 120 | kubectl rollout restart deploy unicorn-store-spring -n unicorn-store-spring 121 | kubectl rollout status deployment unicorn-store-spring -n unicorn-store-spring 122 | ``` 123 | 124 | ## 4. Testing the application 125 | 126 | Run the following API call to verify that the new version of the application has been deployed successfully: 127 | 128 | 1. Export the Service URL for later use: 129 | 130 | ```bash showLineNumbers 131 | export SVC_URL=http://$(kubectl get svc unicorn-store-spring -n unicorn-store-spring -o json | jq --raw-output '.status.loadBalancer.ingress[0].hostname') 132 | curl --location --request GET $SVC_URL'/' --header 'Content-Type: application/json'; echo 133 | ``` 134 | 135 | ![optimized-jvm-result](./images/optimized-jvm-result.png) 136 | 137 | ## 5. Retrieving the results 138 | 139 | Verify the new application image size in the [Amazon ECR](https://console.aws.amazon.com/ecr/home#/) console: 140 | 141 | ![optimized-jvm-ecr](./images/optimized-jvm-ecr.png) 142 | 143 | Retrieve the logs for the application as outlined in the previous section. Below you can find an example starting time after the optimization of the Amazon EKS deployment: 144 | 145 | ```bash showLineNumbers 146 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring 147 | kubectl logs $(kubectl get pods -n unicorn-store-spring -o json | jq --raw-output '.items[0].metadata.name') -n unicorn-store-spring | grep "Started StoreApplication" 148 | ``` 149 | 150 | ![optimized-jvm-eks](./images/optimized-jvm-eks.png) 151 | 152 | ## Conclusion 153 | 154 | As you can see, we managed to decrease the size of the container image from 380 MB to 257 MB. The application startup time also improved by a small margin. 155 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/results.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Results of the optimization steps" 3 | sidebar_position: 306 4 | --- 5 | 6 | In previous labs, we looked at one optimization at a time. We started with the initial container image without optimization, then used 'jlink' and 'jdeps' for a custom runtime, introduced 'jib', and finally used GraalVM. The following table shows the different versions with the image size and the start time of the application (tested using Amazon EKS). 7 | 8 | | Version | Image Size | Start time (p99) | 9 | | ----------- |------------|------------------| 10 | | No optimization | 380MB | 12.5s | 11 | | Custom JRE | 257MB | 12.1s | 12 | | Jib | 212MB | 10.5s | 13 | | GraalVM | 166MB | 0.92s | 14 | 15 | We can see very clearly from the table that different optimizations lead to different results. All optimizations show a significant reduction in the size of the container image. In terms of startup times, GraalVM clearly stands out with less than one second, for well-known reasons. 16 | 17 | The question remains, which optimization step should you use with your applications? On the one hand, this depends very much on what your optimization goal and technical skills available. The simplest of the optimization techniques is the use of Jib, as this hardly requires any changes in the build step. For a custom runtime, extensive changes in the Dockerfile are needed. In addition, 'jdeps' can only detect compile time dependencies, and any runtime dependencies have to be added manually. The most difficult optimization is using 'native-image' and 'GraalVM', especially for applications that have been implemented some time ago without GraalVM in mind. 18 | -------------------------------------------------------------------------------- /website/docs/java/optimizations/summary.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Summary" 3 | sidebar_position: 1000 4 | --- 5 | 6 | During this workshop, you've learned how to create container images using the Spring Boot Java Application. You've also explored various optimization techniques for these container images and successfully deployed them, along with your Java Application, to an Amazon EKS cluster. 7 | 8 | If you want to learn more about Cloud-native Java development on AWS you could dive deeper and explore [Java on AWS Immersion Day](https://catalog.workshops.aws/java-on-aws). 9 | 10 | https://catalog.workshops.aws/java-on-aws 11 | 12 | ![java-on-aws-id](./images/java-on-aws-id.png) 13 | 14 | Happy building! 15 | -------------------------------------------------------------------------------- /website/docs/python/containers/about-multiservices.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: About Docker Build and Service Orchestration 3 | sidebar_position: 1 4 | --- 5 | 6 | ## Overview 7 | 8 | This page serves as an introduction to Docker and Docker Compose, focusing on the deployment and orchestration of our Python-based FastAPI application using multi-stage builds and service management. 9 | 10 | ## Multi-Stage Builds in Docker for Cost Savings 11 | 12 | This section describes the practical application of Docker's multi-stage builds within our [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project's Dockerfile, reducing the final Docker image size and cost savings in cloud environments. Our project's Dockerfile employs a two-stage build process: "builder" and "runner". This strategy, utilizing only necessary elements for the final image, minimizes size and separates build-time and run-time dependencies. 13 | 14 | ### Stage 1: Builder 15 | 16 | The `builder` uses a Python base image, installs system dependencies, copies the requirements.txt file, and downloads Python dependencies as wheel files into the /server/wheels directory. These wheel files are binary packages facilitating safer, faster installations. 17 | 18 | ```dockerfile 19 | # Use an official Python runtime as a parent image 20 | FROM python:3.9-slim-buster as builder 21 | 22 | # Set environment variables 23 | ENV PYTHONDONTWRITEBYTECODE=1 \ 24 | PYTHONUNBUFFERED=1 25 | 26 | # Set work directory 27 | WORKDIR /server 28 | 29 | # Install system dependencies and Python dependencies 30 | COPY ./server/requirements.txt /server/ 31 | RUN pip wheel --no-cache-dir --no-deps --wheel-dir /server/wheels -r requirements.txt 32 | ``` 33 | 34 | ### Stage 2: Runner 35 | 36 | The `runner` starts with a Python base image, installs system dependencies like netcat, copies wheel files from the builder stage, installs Python packages, copies the application code, exposes the application's port, and sets the startup command. 37 | 38 | ```dockerfile 39 | # Use an official Python runtime as a parent image 40 | FROM python:3.9-slim-buster as builder 41 | 42 | # Set environment variables 43 | ENV PYTHONDONTWRITEBYTECODE=1 \ 44 | PYTHONUNBUFFERED=1 45 | 46 | # Set work directory 47 | WORKDIR /server 48 | 49 | # Install system dependencies and Python dependencies 50 | COPY ./server/requirements.txt /server/ 51 | RUN pip wheel --no-cache-dir --no-deps --wheel-dir /server/wheels -r requirements.txt 52 | 53 | FROM python:3.9-slim-buster as runner 54 | 55 | WORKDIR /server 56 | 57 | # Install system dependencies and Python dependencies 58 | COPY --from=builder /server/wheels /server/wheels 59 | COPY --from=builder /server/requirements.txt . 60 | RUN pip install --no-cache-dir /server/wheels/* \ 61 | && pip install --no-cache-dir uvicorn 62 | 63 | # Copy project 64 | COPY . /server/ 65 | 66 | # Expose the port the app runs in 67 | EXPOSE 8000 68 | 69 | # Define the command to start the container 70 | CMD ["uvicorn", "server.app.main:app", "--host", "0.0.0.0", "--port", "8000"] 71 | ``` 72 | 73 | ## Managing Multiple Services with Docker Compose 74 | 75 | This section details how Docker Compose is leveraged in the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project for the orchestration of multiple services. Our `docker-compose.yml` file in the 'python-fastapi-demo-docker' project outlines two services: our FastAPI application (the 'web' service) and the PostgreSQL database (the 'db' service). 76 | 77 | ### web Service (FastAPI Application) 78 | 79 | The **web** service builds a container image using the Dockerfile in our project directory and starts the FastAPI application. The current directory, containing the application code, is mounted into the '/server' directory inside the container. This setup ensures that any changes made to the application code on the host are immediately reflected in the container. This service is part of the 'webnet' network, allowing it to communicate with the 'db' service. 80 | 81 | ``` 82 | web: 83 | build: . 84 | image: fastapi-microservices:${IMAGE_VERSION} 85 | command: uvicorn server.app.main:app --host 0.0.0.0 --port 8000 86 | volumes: 87 | - .:/server 88 | ports: 89 | - 8000:8000 90 | depends_on: 91 | - db 92 | networks: 93 | - webnet 94 | env_file: 95 | - .env 96 | ``` 97 | 98 | ### db Service (PostgreSQL Database) 99 | 100 | The **db** service uses the official PostgreSQL image available on DockerHub. It is configured using environment variables in the `.env` file. Utilizing the `postgres_data` volume, the service ensures persistent storage of database data, safeguarding it against data loss even if the container is terminated. An `init.sh` script is executed upon container startup to initialize the database. This service is part of the 'webnet' network, allowing it to communicate with the 'web' service. 101 | 102 | ``` 103 | db: 104 | image: postgres:13 105 | env_file: 106 | - .env 107 | volumes: 108 | - ./server/db/init.sh:/docker-entrypoint-initdb.d/init.sh 109 | - postgres_data:/var/lib/postgresql/data 110 | networks: 111 | - webnet 112 | ``` 113 | 114 | ### Defining Networks 115 | 116 | The webnet network in the `docker-compose.yml` file plays a pivotal role in facilitating communication between our FastAPI application (the 'web' service) and the PostgreSQL database (the 'db' service). This custom network isolates the services in our project, ensuring that they can interact securely and efficiently. 117 | 118 | ```yaml 119 | networks: 120 | webnet: 121 | ``` 122 | 123 | ### postgres_data Volume 124 | 125 | The `postgres_data` volume in the `docker-compose.yml` is configured to store the PostgreSQL database data persistently. This ensures that the data remains intact even when the PostgreSQL container is stopped or deleted, providing a robust solution for data persistence. 126 | 127 | ```yaml 128 | volumes: 129 | postgres_data: 130 | ``` 131 | -------------------------------------------------------------------------------- /website/docs/python/containers/build-image.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Building and Running the Docker Containers 3 | sidebar_position: 2 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | 9 | ## Objective 10 | 11 | This lab walks you through the process of building container images for our [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project and running them as distinct services using Docker Compose or equivalent Finch commands. By the end, you'll know how to manage your multi-service applications more effectively, ensuring smoother development, deployment, and updates. 12 | 13 | ## Prerequisites 14 | 15 | - [Setting up the Development Environment](../../python/introduction/environment-setup.md) 16 | 17 | 18 | 19 | 20 | ## 1. Building Docker Images for Each Service 21 | 22 | Build Docker images for the application and database services by running: 23 | 24 | ```bash 25 | docker-compose build 26 | ``` 27 | 28 | Alternatively, if you're using Finch, run the following command to build the container images for the application and database: 29 | 30 | ```bash 31 | finch compose build 32 | ``` 33 | 34 | This builds Docker images based on the configurations in the docker-compose.yml file. Docker follows the Dockerfile instructions during each service's build process, creating separate images for the 'python-fastapi-demo-docker-web' and 'python-fastapi-demo-docker-db' services. 35 | 36 | ## 2. Running the Services as Docker Containers 37 | 38 | After building the images, start the application and database services in separate Docker containers using: 39 | 40 | ```bash 41 | docker-compose up 42 | ``` 43 | 44 | Alternatively, if you're using Finch, run the following command to start the application and database services: 45 | 46 | ```bash 47 | finch compose up 48 | ``` 49 | 50 | This command initiates containers for each service as specified in the docker-compose.yml file. Don't stop the command execution to keep application and database services running. 51 | 52 | **Use the tabs below to see the steps for the specific environment where you are running this lab.** 53 | 54 | 55 | 56 | 57 | 58 | 59 | Execute the command below in a new VScode terminal to show the URL to connect to FastAPI application: 60 | ``` 61 | echo "http://$PUBLIC_IP:8000" 62 | ``` 63 | Access this URL using your web browser. 64 | 65 | 66 | 67 | 68 | Upon navigating to [http://localhost:8000](http://localhost:8000/) in your browser, you should see the FastAPI application running. 69 | 70 | 71 | 72 | 73 | ![Image](./images/app-home.png) 74 | 75 | ## 3. Verify the Setup by Adding a Book 76 | 77 | To confirm that everything is functioning as expected, attempt to add a book by selecting the **Create a book** option. 78 | 79 | ![Image](./images/app-create-book.png) 80 | 81 | ## 4. Interpreting Containers 82 | 83 | Your application ('python-fastapi-demo-docker-web' service) and your database ('python-fastapi-demo-docker-db' service) will operate in separate containers. The "Containers" tab in the [Docker VS Code Extension](https://code.visualstudio.com/docs/containers/overview) shows the containers for our python-fastapi-demo-docker application, as instances of the services in our Docker Compose configuration. 84 | 85 | ![Image](./images/docker-extension-open-in-browser-v2.png) 86 | 87 | 88 | ## 5. Stopping the Services and Their Containers 89 | 90 | Stop and remove the containers of both services by pressing `CTRL + C` and running the following command: 91 | 92 | ```bash 93 | docker-compose down --volumes 94 | ``` 95 | 96 | Alternatively, if you're using Finch, press CTRL + C or run the following command to stop and remove the containers: 97 | 98 | ```bash 99 | finch compose down 100 | ``` 101 | 102 | This command halts the containers and, by default, also removes the containers, networks, and volumes as described in your docker-compose.yml file. 103 | 104 | ## 6. Rebuilding and Restarting Docker Services 105 | 106 | To rebuild the images and restart the services simultaneously, execute the following command: 107 | 108 | ```bash 109 | docker-compose up --build 110 | ``` 111 | 112 | Alternatively, if you're using Finch, run the following command: 113 | 114 | ```bash 115 | finch compose up --build 116 | ``` 117 | 118 | This rebuilds the Docker images, and starts the services with the new images, ensuring your services are always operating with the latest application version. 119 | 120 | ## 7. Stopping the Services and Removing Their Containers 121 | 122 | Then again, stop and remove the containers of both services by pressing `CTRL + C` and running the following command: 123 | 124 | ```bash 125 | docker-compose down --volumes 126 | ``` 127 | 128 | Alternatively, if you're using Finch, press `CTRL + C` and run the following command to stop and remove the containers: 129 | 130 | ```bash 131 | finch compose down 132 | ``` 133 | 134 | This command halts the containers and also removes the containers, networks, and volumes as described in your docker-compose.yml file. 135 | 136 | ## Conclusion 137 | 138 | This lab explored the process of constructing and executing Docker containers using Docker Compose in the 'python-fastapi-demo-docker' project. This approach provides an efficient way to manage multi-service applications, which greatly benefits developers by streamlining the process. 139 | -------------------------------------------------------------------------------- /website/docs/python/containers/images/app-create-book.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/app-create-book.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/app-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/app-home.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/docker-extension-open-in-browser-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/docker-extension-open-in-browser-v2.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/docker-extension-open-in-browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/docker-extension-open-in-browser.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/multi-arch-linux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/multi-arch-linux.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/multi-arch-windows.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/multi-arch-windows.png -------------------------------------------------------------------------------- /website/docs/python/containers/images/multi-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/containers/images/multi-arch.png -------------------------------------------------------------------------------- /website/docs/python/containers/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Containers 3 | sidebar_position: 201 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import MultiArchLinuxImageUrl from '@site/docs/python/containers/images/multi-arch-linux.png'; 8 | import MultiArchWindowsImageUrl from '@site/docs/python/containers/images/multi-arch-windows.png'; 9 | 10 | ## Overview 11 | 12 | This chapter introduces the process of containerizing an application, emphasizing the creation of multi-architecture images compatible with Kubernetes. Subsequently, we'll show how to deploy these images to a private Amazon Elastic Container Registry (ECR) and manage them within a Kubernetes environment. 13 | 14 | ## Objective 15 | 16 | This guide aims to introduce essential concepts and practices related to containerization. It focuses on familiarizing you with the benefits of containerization, the role of Amazon ECR as a container registry, the importance of multi-architecture images for Kubernetes, and how Kubernetes uses containerization for efficient application deployment and management. 17 | 18 | ## Terms 19 | 20 | Containerization is a method of running applications in isolated environments, each with its own resources. In the context of Kubernetes, container images should be multi-architecture to ensure compatibility across different node architectures. 21 | 22 | - A **container image** is a self-contained, lightweight package holding everything necessary to run an application. It comprises a series of read-only layers, each layer signifying a modification to its predecessor. For compatibility with Kubernetes, it's crucial to make container images multi-architecture. These images are stored in a container registry and can be deployed on any system supporting containerization. 23 | 24 | - A **container** is a running instance of a container image, operating as a process on a host system. With its unique file system, network interface, and resource set, it's isolated from other containers and the host system. Containers are transient, capable of swift creation, commencement, halting, and deletion. 25 | 26 | - A **container registry** is a centralized platform for storing, managing, and distributing container images. It acts as a repository, facilitating easy image access and retrieval across various hosts or environments. Container registries can be public or private, reflecting the organization's security needs. While public registries like Docker Hub allow unrestricted image upload and access, private ones like Amazon Elastic Container Registry (ECR) cater to enterprise applications. 27 | 28 | - In the context of containers, **multi-architecture** refers to the ability of a container image to run on multiple CPU architectures (e.g., `linux/amd64`, `linux/arm64`, `windows/amd64`). A multi-architecture container image is nothing but a list of images that have references to binaries and libraries compiled for multiple CPU architectures. An important advantage of multi-architecture containers is the ability to deploy highly available applications in a Kubernetes cluster that can be made up of nodes with different CPU architectures (x86-64, ARM64, Windows). Let's explore multi-architecture images across various container registries, such as an ECR public repository and DockerHub. In the following example, the [docker/library/python](https://gallery.ecr.aws/docker/library/python#:~:text=OS/Arch%3A%C2%A0Linux%2C%20Windows%2C%20ARM%2064%2C%20x86%2D64%2C%20x86%2C%20ARM) image on ECR supports multiple architectures like Linux, Windows, ARM64, and x86. The [python](https://hub.docker.com/_/python#:~:text=Supported%20architectures) image on DockerHub offers similar versatility. 29 | 30 | 31 | Linux/arm64 32 | 33 | 34 | Windows 35 | 36 | 37 | 38 | ## Services 39 | 40 | - [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/) 41 | -------------------------------------------------------------------------------- /website/docs/python/containers/integration-ecr.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Integrating Amazon ECR with Docker Compose 3 | sidebar_position: 4 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | 9 | This lab shows to streamline the process of uploading Docker images to Amazon ECR and employing them within Docker Compose. By shifting the Docker image storage to Amazon ECR and harnessing them in Docker Compose, we aim to enhance deployment smoothness and the scalability of your microservices application. 10 | 11 | ## Prerequisites 12 | 13 | - [Uploading Container Images to Amazon ECR](upload-ecr.md) 14 | 15 | 16 | 17 | 18 | ## 1. Adjustments to the Dockerfile 19 | 20 | After the successful upload of Docker images to Amazon ECR, no changes are required in the Dockerfile, which serves as a consistent blueprint for defining the environment, dependencies, and image creation steps. 21 | 22 | ## 2. Updating Docker Compose Configuration 23 | 24 | To ensure consistent deployments and resource efficiency, update the `docker-compose.yml` file to use images from your Amazon ECR repository, rather than building them locally. 25 | 26 | Replace the local build directive in your `docker-compose.yml`: 27 | 28 | ```yaml 29 | web: 30 | build: . 31 | image: fastapi-microservices:${IMAGE_VERSION} 32 | ``` 33 | 34 | Instead, use the pre-built Docker image hosted on Amazon ECR: 35 | 36 | ```yaml 37 | web: 38 | build: . 39 | image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 40 | ``` 41 | 42 | ## 3. Running Docker Compose 43 | 44 | Authenticate your Docker CLI to your Amazon ECR registry before running Docker Compose: 45 | 46 | ```bash 47 | aws ecr get-login-password --region ${AWS_REGION} | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com 48 | ``` 49 | 50 | Alternatively, if you're using Finch, run the following command to login to Amazon ECR: 51 | 52 | ```bash 53 | aws ecr get-login-password --region ${AWS_REGION} | finch login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com 54 | ``` 55 | 56 | Now, initiate the services with Docker Compose, pulling the image from Amazon ECR: 57 | 58 | ```bash 59 | docker-compose up 60 | ``` 61 | 62 | Alternatively, if you're using Finch, run the following command to compose and pull the image from Amazon ECR: 63 | 64 | ```bash 65 | finch compose up 66 | ``` 67 | 68 | This command will now pull the image from the Amazon ECR repository, as specified in the docker-compose.yml file, and start your services. 69 | 70 | Press `Ctrl+C` to stop the services. 71 | 72 | ## 4. Updating Docker Images 73 | 74 | If you're working with a team and sharing the Docker image for the FastAPI application on Amazon ECR, you might find yourself pulling updates from ECR, making changes, and then pushing updates back to ECR. Here's the typical workflow. 75 | 76 | To pull the latest image, run: 77 | 78 | ```bash 79 | docker-compose pull web 80 | ``` 81 | 82 | Alternatively, if you're using Finch, run the following command to pull the latest image: 83 | 84 | ```bash 85 | finch compose pull web 86 | ``` 87 | 88 | To start your services, run: 89 | 90 | ```bash 91 | docker-compose up 92 | ``` 93 | 94 | Alternatively, if you're using Finch, run the following command: 95 | 96 | ```bash 97 | finch compose up 98 | ``` 99 | 100 | After making changes to your application, stop the running services with `Ctrl+C`. 101 | Then update the image version value in your environment variable: 102 | 103 | ``` 104 | export IMAGE_VERSION=1.1 105 | ``` 106 | Verify that `IMAGE_VERSION` is updated by executing the following command: 107 | 108 | ```bash 109 | echo $IMAGE_VERSION 110 | 1.1 111 | ``` 112 | 113 | To build a new image for your application, run: 114 | 115 | ```bash 116 | docker-compose build web 117 | ``` 118 | 119 | Alternatively, if you're using Finch, run the following command: 120 | 121 | ```bash 122 | finch compose build web 123 | ``` 124 | 125 | Above command will build image with ECR tag to verify please run following command: 126 | 127 | ```bash 128 | docker image ls | grep amazonaws.com/fastapi-microservices 129 | AWS_ACCOUNT_ID.dkr.ecr.AWS_REGION.amazonaws.com/fastapi-microservices 1.1 defd60e3e376 6 minutes ago 233MB 130 | AWS_ACCOUNT_ID.dkr.ecr.AWS_REGION.amazonaws.com/fastapi-microservices 1.0 abc11f568055 2 hours ago 233MB 131 | ``` 132 | 133 | Alternatively, if you're using Finch, run the following command: 134 | 135 | ```bash 136 | finch image ls | grep amazonaws.com/fastapi-microservices 137 | ``` 138 | 139 | The expected output should look like this: 140 | 141 | ```text 142 | AWS_ACCOUNT_ID.dkr.ecr.AWS_REGION.amazonaws.com/fastapi-microservices 1.1 defd60e3e376 6 minutes ago 233MB 143 | AWS_ACCOUNT_ID.dkr.ecr.AWS_REGION.amazonaws.com/fastapi-microservices 1.0 abc11f568055 2 hours ago 233MB 144 | ``` 145 | 146 | To push the new image to your ECR repository, run: 147 | 148 | ```bash 149 | docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 150 | ``` 151 | 152 | Alternatively, if you're using Finch, run the following command to push the new image: 153 | 154 | ```bash 155 | finch push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 156 | ``` 157 | 158 | ## Cleanup 159 | 160 | To clean up created images run the following command: 161 | 162 | ```bash 163 | docker rmi -f $(docker images "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/*" -q) 164 | ``` 165 | 166 | Alternatively, if you're using Finch, run the following command: 167 | 168 | ```bash 169 | finch rmi -f $(finch images --filter reference=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com -q) 170 | ``` 171 | 172 | Stop and remove the containers of both services by running the following command: 173 | 174 | ```bash 175 | docker-compose down --volumes 176 | ``` 177 | 178 | Alternatively, if you're using Finch, run the following command to stop and remove the containers: 179 | 180 | ```bash 181 | finch compose down 182 | ``` 183 | 184 | ## Conclusion 185 | 186 | Through this lab, we've achieved seamless integration of Docker images with Docker Compose by uploading them to Amazon ECR. This approach has increased the portability of our FastAPI application and PostgreSQL database, allowing any Docker-equipped environment to pull images, create containers, and run the application with minimal fuss. 187 | -------------------------------------------------------------------------------- /website/docs/python/containers/upload-ecr.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Uploading Container Images to Amazon ECR 3 | sidebar_position: 3 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | 9 | This lab shows the process of pushing Docker images to Amazon ECR using the FastAPI and PostgreSQL images from our [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project. We'll showcase how uploading these Docker images to ECR enhances your development, testing, and deployment workflows by making the images accessible across different environments. 10 | 11 | ## Prerequisites 12 | 13 | - [Building and Running the Docker Containers](build-image.md) 14 | - Upgrade to the latest version of the AWS CLI using the steps in [official AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). 15 | 16 | 17 | 18 | 19 | ## 1. Creating an ECR Repository 20 | 21 | Create a new private Amazon ECR repository: 22 | 23 | ```bash 24 | aws ecr create-repository --repository-name fastapi-microservices 25 | ``` 26 | 27 | ## 2. Logging into Amazon ECR 28 | 29 | Authenticate your Docker CLI to your Amazon ECR registry using: 30 | 31 | ```bash 32 | aws ecr get-login-password \ 33 | --region ${AWS_REGION} | docker login \ 34 | --username AWS \ 35 | --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com 36 | ``` 37 | 38 | Alternatively, if you're using Finch, run the following command to authenticate: 39 | 40 | ```bash 41 | aws ecr get-login-password \ 42 | --region ${AWS_REGION} | finch login \ 43 | --username AWS \ 44 | --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com 45 | ``` 46 | 47 | You should see the following response output: “Login Succeeded”. 48 | 49 | **Note:** If you get an error, check the value of parameter `"credsStore"` in your docker configuration (e.g., `~/.docker/config.json` on Mac). If the value is `"ecr-login"` you can skip this step, because there is no need to execute the `docker login` command. 50 | 51 | ## 3. Uploading Docker Images to ECR 52 | 53 | Tag your Docker image for the ECR repository: 54 | 55 | ```bash 56 | docker tag fastapi-microservices:${IMAGE_VERSION} ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 57 | ``` 58 | 59 | Alternatively, if you're using Finch, run the following command to tag your image: 60 | 61 | ```bash 62 | finch tag fastapi-microservices:${IMAGE_VERSION} ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 63 | ``` 64 | 65 | Push the tagged image to the ECR repository: 66 | 67 | ```bash 68 | docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 69 | ``` 70 | 71 | Alternatively, if you're using Finch, run the following command to push the image: 72 | 73 | ```bash 74 | finch push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 75 | ``` 76 | 77 | ## 4. Retrieving the Docker Image from ECR 78 | 79 | Retrieve the Docker image from your ECR repository with this command: 80 | 81 | ```bash 82 | docker pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 83 | ``` 84 | 85 | Alternatively, if you're using Finch, run the following command to retrieve the container image from your ECR repository: 86 | 87 | ```bash 88 | finch pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 89 | ``` 90 | 91 | Look for an output message stating that the image is up-to-date, signaling a successful operation. 92 | 93 | ## Conclusion 94 | 95 | This lab walked you through the process of pushing a Docker container image to Amazon ECR. This method provides a convenient way to manage and distribute Docker images, making it an essential tool for any developer working with Docker. 96 | -------------------------------------------------------------------------------- /website/docs/python/eks/Cleanup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Cleaning Up Resources 3 | sidebar_position: 14 4 | --- 5 | 6 | import Tabs from '@theme/Tabs'; 7 | import TabItem from '@theme/TabItem'; 8 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 9 | 10 | ## Objective 11 | 12 | This guide shows you how to delete all the resources you created in this workshop. 13 | 14 | 15 | 16 | 17 | ## Cleanup 18 | 19 | To avoid incurring future charges, you should delete the resources you created during this workshop. 20 | 21 | 22 | 23 | 24 | 1. Retrieve the EFS ID (e.g., `fs-040f4681791902287`) you configured in the [previous lab exercise](setup-storage.md), then replace the sample value in [eks/efs-pv.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/efs-pv.yaml) with your EFS ID. 25 | ```bash 26 | echo $file_system_id 27 | ``` 28 | 29 | 2. Run the following commands to delete all resources created in this workshop. 30 | ```bash 31 | # Delete the ECR repository 32 | aws ecr delete-repository --repository-name fastapi-microservices --force 33 | 34 | # Delete FastAPI services 35 | kubectl delete -f eks/deploy-app-python.yaml 36 | 37 | # Delete PostgreSQL services 38 | kubectl delete -f eks/deploy-db-python-fargate.yaml 39 | 40 | # Delete the Persistent Volume Claim (PVC) 41 | kubectl delete pvc postgres-data-fastapi-postgres-0 -n my-cool-app 42 | 43 | # Delete the Persistent Volume (PV) 44 | kubectl delete -f eks/efs-pv.yaml 45 | 46 | # Delete the Storage Class 47 | kubectl delete -f eks/efs-sc.yaml 48 | 49 | # Delete all mount targets associated with your EFS file system 50 | for mount_target_id in $(aws efs describe-mount-targets --file-system-id $file_system_id --output text --query 'MountTargets[*].MountTargetId'); do 51 | aws efs delete-mount-target --mount-target-id "$mount_target_id" 52 | done 53 | 54 | # Delete the cluster 55 | eksctl delete cluster -f eks/create-fargate-python.yaml 56 | ``` 57 | 58 | 59 | 60 | 61 | ```bash 62 | # Delete the ECR repository 63 | aws ecr delete-repository --repository-name fastapi-microservices --force 64 | 65 | # Delete FastAPI services 66 | kubectl delete -f eks/deploy-app-python.yaml 67 | 68 | # Delete PostgreSQL services 69 | kubectl delete -f eks/deploy-db-python.yaml 70 | 71 | # Delete PodDisruptionBudgets for 'coredns' and 'ebs-csi-controller' 72 | kubectl delete pdb coredns ebs-csi-controller -n kube-system 73 | 74 | # Delete the cluster 75 | eksctl delete cluster -f eks/create-mng-python.yaml 76 | ``` 77 | 78 | -------------------------------------------------------------------------------- /website/docs/python/eks/about-deploy.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: About FastAPI and PostgreSQL Kubernetes resources 3 | sidebar_position: 2 4 | --- 5 | ## Overview 6 | This chapter shows you how to deploy the Kubernetes resources for our FastAPI application and PostgreSQL database within an Amazon EKS cluster. 7 | 8 | ## Objective 9 | This guide provides an overview of the resources to deploy the FastAPI application and PostgreSQL database within our Amazon EKS cluster. 10 | 11 | ## FastAPI - Deployment, Service, and Ingress 12 | The **[deploy-app-python.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/deploy-app-python.yaml)** manifest file is used for the deployment of the FastAPI application and consists of three primary resources: 13 | 14 | - **Service:** The service in the manifest exposes the FastAPI application running on the EKS cluster to the external world. It routes incoming traffic on port 80 to the FastAPI application's listening port (8000). The service uses a NodePort type which automatically allocates a port from the NodePort range (default: 30000-32767) and proxies traffic on each node from that port into the Service. This allows for the Service to be externally accessible to Application and Network Load Balancers. 15 | - **Deployment:** The deployment in the manifest dictates how the FastAPI application should be deployed onto the EKS cluster. It specifies the number of replicas (i.e. the number of application instances that should be running), the container image to be used, and the necessary environment variables from a secret. It sets resource requests and limits for the containers to ensure the application gets the necessary resources. 16 | - **Ingress:** The Ingress in the manifest provides HTTP route management for services within the EKS cluster. It routes incoming traffic to the FastAPI service based on the request path. In this scenario, all requests are directed to the FastAPI service. The Ingress configuration in this file is specifically set up for AWS using the AWS Load Balancer Controller, configuring an Application Load Balancer (ALB) that is internet-facing and employs an IP-based target type. 17 | 18 | ## PostgreSQL - StatefulSet, Service, StorageClass, and VolumeClaimTemplates 19 | The **[deploy-db-python.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/deploy-db-python.yaml)** file is used for the deployment of the PostgreSQL database and consists of four primary resources: 20 | 21 | - **StorageClass**: The StorageClass in the manifest is specific to AWS EBS (Elastic Block Store) and allows dynamic provisioning of storage for PersistentVolumeClaims. The EBS provisioner enables PersistentVolumes to have a stable and resilient storage solution for our PostgreSQL database. 22 | - **Service:** The Service in the manifest exposes the PostgreSQL database within the EKS cluster, facilitating the FastAPI application to access it. The Service listens on port 5432, which is the default PostgreSQL port. The Service is headless (as indicated by `clusterIP: None`), meaning it enables direct access to the Pods in the StatefulSet rather than load balancing across them. 23 | - **StatefulSet:** The StatefulSet in the manifest manages the PostgreSQL database deployment. A StatefulSet is used instead of a Deployment as it ensures each Pod receives a stable network identity and stable storage, which is essential for databases. The PostgreSQL container uses a Secret to obtain its environment variables and mounts a volume for persistent storage. 24 | - **VolumeClaimTemplates**: The volumeClaimTemplates within the StatefulSet definition request a specific storage amount for the PostgreSQL database. It requests a storage capacity of 1Gi with [ReadWriteOnce](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) access mode, and it uses the AWS EBS StorageClass defined in this manifest. This ensures that each Pod within the StatefulSet gets its own PersistentVolume, guaranteeing the database data remains persistent across pod restarts, and the data is accessible from any node in the EKS cluster. -------------------------------------------------------------------------------- /website/docs/python/eks/access-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Accessing the FastAPI App 3 | sidebar_position: 10 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | 9 | This guide aims to guide you through the process of accessing your microservices deployed onto EKS cluster. By using ingress object we were able to expose FastAPI service through Application Loadbalancer. The Management of Application Loadbalancer is done by AWS Loadbalancer controller through ingress manifest. 10 | 11 | ## Prerequisites 12 | 13 | - [Deploying FastAPI and PostgreSQL Microservices to EKS](./deploy-app.md) 14 | 15 | 16 | 17 | 18 | ## 1. Checking the Status of Pods 19 | 20 | Before we try to access our application, we need to ensure that all of our pods are running correctly. To check the status of all pods, run the following command: 21 | 22 | ```bash 23 | kubectl get pods -n my-cool-app 24 | ``` 25 | 26 | All your pods should be in the "Running" state. If they're not, you will need to troubleshoot the deployment before proceeding. 27 | 28 | ## 2. Getting the ALB URL 29 | 30 | Run the following command to get the ALB URL: 31 | 32 | ```bash 33 | kubectl get ingress -n my-cool-app 34 | ``` 35 | 36 | The expected output should look like this: 37 | 38 | ```bash 39 | NAME CLASS HOSTS ADDRESS PORTS AGE 40 | fastapi-ingress * k8s-mycoolap-fastapii-8114c40e9c-860636650.us-west-2.elb.amazonaws.com 80 3m17s 41 | ``` 42 | 43 | ## 3. Accessing the FastAPI Service 44 | 45 | In the previous lab exercise, we used the AWS Load Balancer Controller (LBC) to dynamically provision an [Application Load Balancer (ALB)](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html). Note that it takes several minutes or more before the ALB has finished provisioning. 46 | 47 | 1. **Check the status**: Open the [Load Balancers](https://console.aws.amazon.com/ec2/#LoadBalancers:) page on the Amazon EC2 console and select the AWS Region in which your Amazon EKS cluster resides. Next, select your ALB name, such as `k8s-mycoolap-fastapii-8004c40e9c`. 48 | 2. **Open the app**: Open a new tab in your browser paste the ALB link, such as `k8s-mycoolap-fastapii-8114c40e9c-860636650.us-west-2.elb.amazonaws.com`. You should see the welcome page: 49 | 50 | ![](./images/app-home.png) 51 | 52 | ## 4. Verifying the Setup by Adding a Book 53 | 54 | To confirm that everything is functioning as expected, attempt to add a book by selecting the **Create a book** option. 55 | 56 | ![](./images/app-create-book.png) 57 | 58 | ## Conclusion 59 | 60 | This guide has walked you through the steps necessary to access the FastAPI service deployed on an EKS cluster. We've shown you how to check the status of your pods and verify your setup by interacting with the FastAPI service. -------------------------------------------------------------------------------- /website/docs/python/eks/deploy-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Deploying FastAPI and PostgreSQL Microservices to EKS 3 | sidebar_position: 9 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | import GetECRURI from '../../../src/includes/get-ecr-uri.md'; 9 | 10 | ## Objective 11 | 12 | This lab shows you how to deploy the microservices of the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project onto your Amazon EKS cluster—either your AWS Fargate or managed node groups-based cluster. To gain a deeper understanding of the Kubernetes resources in these manifests, refer to [Deploying FastAPI and PostgreSQL Kubernetes resources to Amazon EKS](about-deploy.md). 13 | 14 | ## Prerequisites 15 | 16 | - [Securing FastAPI Microservices with Kubernetes Secrets in Amazon EKS](./deploy-secrets.md) 17 | - [Building and Running the Docker Containers](../containers/build-image.md) 18 | - [Uploading Container Images to Amazon ECR](../containers/upload-ecr.md) 19 | 20 | 21 | 22 | 23 | ## 1. Creating db-init-script Configmap 24 | 25 | Run the following command from the `python-fastapi-demo-docker` project directory to create the ConfigMap: 26 | 27 | ```bash 28 | kubectl create configmap db-init-script --from-file=init.sh=server/db/init.sh -n my-cool-app 29 | ``` 30 | 31 | The expected output should look like this: 32 | 33 | ```bash 34 | configmap/db-init-script created 35 | ``` 36 | 37 | To confirm that your Kubernetes Configmap has been successfully created, you can use the `kubectl get configmap` command. This command lists all ConfigMaps that exist in the specified namespace: 38 | 39 | ```bash 40 | kubectl get configmap -n my-cool-app 41 | ``` 42 | 43 | The expected output should look like this: 44 | 45 | ```bash 46 | 47 | NAME DATA AGE 48 | db-init-script 1 4m47s 49 | kube-root-ca.crt 1 5m36s 50 | ``` 51 | 52 | ## 2. Deploying the PostgreSQL StatefulSet, Service, and PersistentVolumeClaim 53 | 54 | The **[eks/deploy-db-python.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/deploy-db-python.yaml)** file is used for the deployment of the PostgreSQL database and consists of four primary resources: a StorageClass, Service, StatefulSet, and PersistentVolumeClaim. 55 | 56 | 57 | 58 | 59 | From the `python-fastapi-demo-docker` project directory, apply the database manifest: 60 | 61 | ```bash 62 | kubectl apply -f eks/deploy-db-python-fargate.yaml 63 | ``` 64 | 65 | It will take a few minutes for Fargate to provision pods. In order to verify that the database pod is running please run the below command: 66 | 67 | ```bash 68 | kubectl get pod fastapi-postgres-0 -n my-cool-app 69 | ``` 70 | 71 | The expected output should look like this: 72 | 73 | ```bash 74 | kubectl get pod fastapi-postgres-0 -n my-cool-app 75 | NAME READY STATUS RESTARTS AGE 76 | fastapi-postgres-0 1/1 Running 0 4m32s 77 | ``` 78 | 79 | 80 | 81 | 82 | From the `python-fastapi-demo-docker` project directory, apply the database manifest: 83 | 84 | ```bash 85 | kubectl apply -f eks/deploy-db-python.yaml 86 | ``` 87 | 88 | In order to verify that the database pod is running please run the below command: 89 | 90 | ```bash 91 | kubectl get pod fastapi-postgres-0 -n my-cool-app 92 | ``` 93 | 94 | The expected output should look like this: 95 | 96 | ```bash 97 | kubectl get pod fastapi-postgres-0 -n my-cool-app 98 | NAME READY STATUS RESTARTS AGE 99 | fastapi-postgres-0 1/1 Running 0 4m32s 100 | ``` 101 | 102 | 103 | 104 | 105 | ## 3. Deploying the FastAPI Deployment, Service, and Ingress 106 | 107 | The **[deploy-app-python.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/deploy-app-python.yaml)** manifest file is used for the deployment of the FastAPI application and consists of three primary resources: a Service, Deployment, and Ingress. 108 | 109 | 110 | 111 | 112 | Next, open **[eks/deploy-app-python.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/eks/deploy-app-python.yaml)** and replace the sample value with your ECR repository URI image and tag (e.g., `1.0`). 113 | 114 | From the `python-fastapi-demo-docker` project directory, apply the application manifest: 115 | 116 | ```bash 117 | kubectl apply -f eks/deploy-app-python.yaml 118 | ``` 119 | 120 | ## 4. Verifying the Deployment 121 | 122 | After applying the manifest, verify that the deployment is running correctly. 123 | 124 | Check the services: 125 | 126 | ```bash 127 | kubectl get services -n my-cool-app 128 | ``` 129 | 130 | The expected output should look like this: 131 | 132 | ```bash 133 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 134 | db ClusterIP None 5432/TCP 2m48s 135 | fastapi-service NodePort 10.100.18.255 80:30952/TCP 21s 136 | ``` 137 | 138 | Check the ingress: 139 | 140 | ```bash 141 | kubectl get ingress -n my-cool-app 142 | ``` 143 | 144 | The expected output should look like this: 145 | 146 | ```bash 147 | NAME CLASS HOSTS ADDRESS PORTS AGE 148 | fastapi-ingress * k8s-mycoolap-fastapii-8114c40e9c-860636650.us-west-2.elb.amazonaws.com 80 3m17s 149 | ``` 150 | 151 | Check the deployments: 152 | 153 | ```bash 154 | kubectl get deployments -n my-cool-app 155 | ``` 156 | 157 | The expected output should look like this: 158 | 159 | ```bash 160 | NAME READY UP-TO-DATE AVAILABLE AGE 161 | fastapi-deployment 1/1 1 1 67s 162 | ``` 163 | 164 | Check the StatefulSet: 165 | 166 | ```bash 167 | kubectl get statefulsets -n my-cool-app 168 | ``` 169 | 170 | The expected output should look like this: 171 | 172 | ```bash 173 | NAME READY AGE 174 | fastapi-postgres 1/1 3m59s 175 | ``` 176 | 177 | Check the PersistentVolumeClaims: 178 | 179 | ```bash 180 | kubectl get pvc -n my-cool-app 181 | ``` 182 | 183 | The expected output should look like this: 184 | 185 | ```bash 186 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 187 | postgres-data-fastapi-postgres-0 Bound pvc-84d12ce1-916c-4044-8056-94eb97e25ccd 1Gi RWO ebs-sc 4m12s 188 | ``` 189 | 190 | Check the pods: 191 | 192 | ```bash 193 | kubectl get pods -n my-cool-app 194 | ``` 195 | 196 | The expected output should look like this: 197 | 198 | ```bash 199 | NAME READY STATUS RESTARTS AGE 200 | fastapi-deployment-6b587dfb54-j26pc 1/1 Running 0 2m19s 201 | fastapi-postgres-0 1/1 Running 0 4m46s 202 | ``` 203 | -------------------------------------------------------------------------------- /website/docs/python/eks/deploy-secrets.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Securing FastAPI Microservices with Kubernetes Secrets in Amazon EKS 3 | sidebar_position: 8 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | 9 | This lab will help you secure sensitive information in your Amazon EKS Kubernetes cluster. By the end of it, you will be able to create Kubernetes secrets from an environment file and verify the creation of these secrets. 10 | 11 | ## Prerequisites 12 | 13 | - [Setting up Scalable Storage with the EBS CSI Driver in Amazon EKS](./setup-storage.md) 14 | 15 | 16 | 17 | 18 | ## 1. Creating a Generic Kubernetes Secret from the .env File 19 | 20 | Create the Kubernetes Secret in the `my-cool-app` namespace: 21 | 22 | ```bash 23 | kubectl create secret generic fastapi-secret --from-env-file=.env -n my-cool-app 24 | ``` 25 | 26 | The expected output should look like this: 27 | 28 | ```bash 29 | secret/fastapi-secret created 30 | ``` 31 | 32 | ## 2. Verifying the Secret Creation with kubectl get secret 33 | 34 | To confirm that your Kubernetes Secret has been successfully created, you can use [`kubectl get secrets`](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl/#verify-the-secret) to list all secrets that exist in the specified namespace: 35 | 36 | ```bash 37 | kubectl get secrets -n my-cool-app 38 | ``` 39 | 40 | The expected output should look like this: 41 | 42 | ```bash 43 | NAME TYPE DATA AGE 44 | fastapi-secret Opaque 20 9m 45 | ``` -------------------------------------------------------------------------------- /website/docs/python/eks/images/FastAPI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/FastAPI.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/Local-tracing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/Local-tracing.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/Metadata.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/Metadata.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/Segment-Details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/Segment-Details.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/app-create-book.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/app-create-book.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/app-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/app-home.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/aws-rds-books.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/aws-rds-books.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/k8-app-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/k8-app-trace.png -------------------------------------------------------------------------------- /website/docs/python/eks/images/kubernetes-resources-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/kubernetes-resources-1.jpg -------------------------------------------------------------------------------- /website/docs/python/eks/images/kubernetes-resources-2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/kubernetes-resources-2.jpg -------------------------------------------------------------------------------- /website/docs/python/eks/images/kubernetes-resources-3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/kubernetes-resources-3.jpg -------------------------------------------------------------------------------- /website/docs/python/eks/images/kubernetes-resources-4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/kubernetes-resources-4.jpg -------------------------------------------------------------------------------- /website/docs/python/eks/images/kubernetes-resources-5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/kubernetes-resources-5.jpg -------------------------------------------------------------------------------- /website/docs/python/eks/images/raw-trace-snippet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/eks/images/raw-trace-snippet.png -------------------------------------------------------------------------------- /website/docs/python/eks/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Amazon Elastic Kubernetes Service (EKS) 3 | sidebar_position: 401 4 | --- 5 | In this chapter, we'll learn how to create an Amazon EKS cluster and deploy our containerized applications to it. 6 | 7 | ## Terms 8 | Amazon Elastic Kubernetes Service (Amazon EKS) is a fully managed service that makes it easy to run Kubernetes on AWS without the need to manage the Kubernetes control plane or the worker nodes. With Amazon EKS, you can run Kubernetes applications on AWS without the need to operate your own Kubernetes control plane or worker nodes. 9 | 10 | Amazon EKS supports multiple cluster types, including self-managed nodes, managed node groups, and Fargate clusters. The choice of cluster type depends on the level of control you require over the underlying infrastructure and the ease of management. 11 | 12 | - A **self-managed node** cluster is a traditional Kubernetes cluster, where the worker nodes are EC2 instances that are managed by the user. With self-managed nodes, you have full control over the EC2 instances and the Kubernetes worker nodes, including the ability to use custom Amazon Machine Images (AMIs), use EC2 Spot instances to reduce costs, and use auto-scaling groups to adjust the number of nodes based on demand. 13 | - **Managed node groups** are a managed worker node option that simplifies the deployment and management of worker nodes in your EKS cluster. With managed node groups, you can launch a group of worker nodes with the desired configuration, including instance type, AMI, and security groups. The managed node group will automatically manage the scaling, patching, and lifecycle of the worker nodes, simplifying the operation of the EKS cluster. 14 | - **Fargate** clusters are another managed worker node option that abstracts away the underlying infrastructure, allowing you to run Kubernetes pods without managing the worker nodes. With Fargate clusters, you can launch pods directly on Fargate without the need for EC2 instances. Fargate clusters simplify the operation of the EKS cluster by abstracting away the worker nodes, allowing you to focus on the applications running in the Kubernetes cluster. 15 | 16 | ## Tools 17 | Before you begin, make sure that the following tools have been installed: 18 | 19 | ``` 20 | kubectl version --client 21 | eksctl version 22 | helm version 23 | ``` 24 | 25 | If any of these tools are missing, refer to section [Setting up the Development Environment](../../python/introduction/environment-setup.md) for installation instructions. 26 | 27 | ## Samples 28 | - [eksctl examples](https://github.com/weaveworks/eksctl/tree/main/examples) -------------------------------------------------------------------------------- /website/docs/python/eks/manage-contexts.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Managing Kubernetes Contexts in EKS Cluster 3 | sidebar_position: 5 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | 9 | ## Objective 10 | This lab shows how to verify and switch Kubernetes contexts in an EKS cluster. We'll make use of the kubectl command-line tool, which allows you to run commands against Kubernetes clusters. Specifically, you'll learn how to check your current context and switch to a different one if needed, allowing your local environment to interact with the desired cluster. 11 | 12 | ## Prerequisites 13 | 14 | - [Creating an Amazon EKS Cluster](create-cluster.md) 15 | 16 | 17 | 18 | 19 | ## 1. Verifying the Current Context 20 | 21 | In Kubernetes, the term "context" refers to the cluster and namespace currently targeted by the kubectl command-line tool. Start by verifying the current context with the following command: 22 | 23 | ```bash 24 | kubectl config current-context 25 | ``` 26 | 27 | This command will output the current context, which should resemble: 28 | 29 | ```bash 30 | arn:aws:eks:us-east-1:012345678901:cluster/fargate-quickstart 31 | ``` 32 | 33 | or 34 | 35 | ```bash 36 | admin@fargate-quickstart.us-east-1.eksctl.io 37 | ``` 38 | 39 | ## 2. Switching Contexts 40 | 41 | If your current context doesn't match your EKS cluster, you need to switch contexts. Switching contexts points your local Kubernetes CLI tool, kubectl, to interact with your desired cluster. 42 | 43 | From the `python-fastapi-demo-docker` project directory, update your local kubeconfig file using either one of the following commands: 44 | 45 | 46 | 47 | 48 | ```bash 49 | aws eks --region ${AWS_REGION} update-kubeconfig --name fargate-quickstart 50 | ``` 51 | 52 | or 53 | 54 | ```bash 55 | eksctl utils write-kubeconfig --cluster fargate-quickstart --region ${AWS_REGION} 56 | ``` 57 | 58 | Executing the above commands should output a confirmation message similar to the output below, indicating a successful context switch: 59 | 60 | ```bash 61 | Updated context arn:aws:eks:us-east-1:012345678901:cluster/fargate-quickstart in /Users/frank/.kube/config 62 | ``` 63 | or 64 | 65 | ```bash 66 | 2023-09-22 17:00:52 [✔] saved kubeconfig as "/Users/frank/.kube/config" 67 | ``` 68 | 69 | 70 | 71 | 72 | ```bash 73 | aws eks --region ${AWS_REGION} update-kubeconfig --name managednode-quickstart 74 | ``` 75 | 76 | or 77 | 78 | ```bash 79 | eksctl utils write-kubeconfig --cluster managednode-quickstart --region ${AWS_REGION} 80 | ``` 81 | 82 | Executing the above commands should output a confirmation message similar to the output below, indicating a successful context switch: 83 | 84 | ```bash 85 | Updated context arn:aws:eks:us-east-1:012345678901:cluster/managednode-quickstart in /Users/frank/.kube/config 86 | ``` 87 | or 88 | 89 | ```bash 90 | 2023-09-22 17:00:52 [✔] saved kubeconfig as "/Users/frank/.kube/config" 91 | ``` 92 | 93 | 94 | 95 | 96 | :::tip 97 | 98 | - If using an AWS CLI version older than 1.16.156, make sure that the `aws-iam-authenticator` is installed in your environment. Refer to [Installing aws-iam-authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html) in the EKS documentation. 99 | 100 | ::: 101 | 102 | ## Conclusion 103 | 104 | This lab provided a quick walkthrough on how to verify and switch Kubernetes contexts in an EKS cluster. With a good grasp of Kubernetes contexts, you're now better equipped to handle workloads on different EKS clusters efficiently. 105 | 106 | -------------------------------------------------------------------------------- /website/docs/python/eks/setup-loadbalancing.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Setting up the AWS Application Load Balancer Controller (LBC) on the EKS Cluster 3 | sidebar_position: 6 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | 9 | ## Objective 10 | This lab shows you how to set up the [AWS Load Balancer Controller (LBC)](https://kubernetes-sigs.github.io/aws-load-balancer-controller/) on your cluster, which enables the routing of external traffic to your Kubernetes services. We'll leverage the [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) we configured when we created our cluster, ensuring that the controller has the required permissions. 11 | 12 | :::info 13 | 14 | Classic Load Balancers are not supported for pods running on Fargate. Network Load Balancers are only supported when using the AWS Load Balancer Controller and IP target type mode. 15 | 16 | ::: 17 | 18 | ## Prerequisites 19 | - [Managing Kubernetes Contexts in EKS Cluster](./manage-contexts.md) 20 | 21 | 22 | 23 | 24 | ## 1. Set Environment Variables 25 | Before we start setting up our EKS cluster, we need to set a couple environment variables. 26 | 27 | Export the name of your EKS cluster and the VPC ID associated with your EKS cluster executing the following commands: 28 | 29 | 30 | 31 | 32 | ```bash 33 | export CLUSTER_VPC=$(aws eks describe-cluster --name fargate-quickstart --region ${AWS_REGION} --query "cluster.resourcesVpcConfig.vpcId" --output text) 34 | export CLUSTER_NAME=fargate-quickstart 35 | ``` 36 | 37 | 38 | 39 | 40 | 41 | ```bash 42 | export CLUSTER_VPC=$(aws eks describe-cluster --name managednode-quickstart --region ${AWS_REGION} --query "cluster.resourcesVpcConfig.vpcId" --output text) 43 | export CLUSTER_NAME=managednode-quickstart 44 | ``` 45 | 46 | 47 | 48 | 49 | 50 | 51 | ## 2. Verify the Service Account 52 | First, we need to make sure the "aws-load-balancer-controller" service account is correctly set up in the "kube-system" namespace in our cluster. 53 | 54 | Run the following command: 55 | ```bash 56 | kubectl get sa aws-load-balancer-controller -n kube-system -o yaml 57 | ``` 58 | The expected output should look like this: 59 | ```bash 60 | apiVersion: v1 61 | kind: ServiceAccount 62 | metadata: 63 | annotations: 64 | eks.amazonaws.com/role-arn: arn:aws:iam::012345678901:role/eksctl-fargate-quickstart-addon-iamserviceac-Role1-J2T54L9SG5L0 65 | creationTimestamp: "2023-05-30T23:09:32Z" 66 | labels: 67 | app.kubernetes.io/managed-by: eksctl 68 | name: aws-load-balancer-controller 69 | namespace: kube-system 70 | resourceVersion: "2102" 71 | uid: 2086b1c0-de23-4386-ae20-19d51b7db4a1 72 | ``` 73 | 74 | ## 3. Add and Update EKS chart repository to Helm: 75 | 76 | Add the EKS chart repository to Helm: 77 | 78 | ```bash 79 | helm repo add eks https://aws.github.io/eks-charts 80 | ``` 81 | 82 | Update the repositories to ensure Helm is aware of the latest versions of the charts: 83 | 84 | ```bash 85 | helm repo update 86 | ``` 87 | 88 | ## 4. Deploy the Load Balancer Controller 89 | To install the AWS Load Balancer Controller in the "kube-system" namespace of the EKS cluster, run the following Helm command, replacing region with your specific region: 90 | 91 | :::note 92 | If the below command fails with an error similar to `Error: INSTALLATION FAILED: cannot re-use a name that is still in use`, it means the AWS Load Balancer Controller is already installed. In this case, replace `helm install` with `helm upgrade -i` in the below command to ensure the latest version of the controller and Helm Chart. 93 | ::: 94 | 95 | ```bash 96 | helm install aws-load-balancer-controller eks/aws-load-balancer-controller \ 97 | --set clusterName=${CLUSTER_NAME} \ 98 | --set serviceAccount.create=false \ 99 | --set region=${AWS_REGION} \ 100 | --set vpcId=${CLUSTER_VPC} \ 101 | --set serviceAccount.name=aws-load-balancer-controller \ 102 | -n kube-system 103 | ``` 104 | 105 | You should receive an output confirming the successful installation of the AWS Load Balancer Controller (LBC): 106 | ```bash 107 | NAME: aws-load-balancer-controller 108 | LAST DEPLOYED: Sat May 11 01:21:04 2023 109 | NAMESPACE: kube-system 110 | STATUS: deployed 111 | REVISION: 1 112 | TEST SUITE: None 113 | NOTES: 114 | AWS Load Balancer controller installed! 115 | ``` 116 | 117 | To list installed helm releases run the following 118 | 119 | ```bash 120 | helm list -A 121 | ``` 122 | 123 | You should receive simillar output: 124 | 125 | ```bash 126 | NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION 127 | aws-load-balancer-controller kube-system 1 2023-09-11 00:31:57.585623 -0400 EDT deployed aws-load-balancer-controller-1.6.0 v2.6.0 128 | ``` 129 | -------------------------------------------------------------------------------- /website/docs/python/eks/view-kubernetes-resources.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Viewing Kubernetes Resources Using the EKS Console 3 | sidebar_position: 10 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | 8 | ## Objective 9 | 10 | This lab shows you how to view Kubernetes resources such as Pods, Services, and Nodes using the EKS console. With EKS, there is no need to deploy and manage a Kubernetes Dashboard Pod in order to view Kubernetes resources. To use this EKS console feature, the IAM principal logged into the EKS console must have the required permissions to access the EKS cluster. 11 | 12 | Of course, it's also possible to use the Kubernetes Dashboard instead of the EKS console. If you use Kubernetes Dashboard, please check the [kubernetes/dashboard: General-purpose web UI for Kubernetes clusters](https://github.com/kubernetes/dashboard) in the GitHub for installation instructions. You can also refer to [Monitoring Kubernetes Resources Using the Dashboard](http://localhost:3000/docs/python/kubernetes/kubernetes-dashboard) page for operation methods. 13 | 14 | ## Prerequisites 15 | 16 | - [Accessing the FastAPI App](./access-app.md) 17 | 18 | ## 1. Checking IAM permissions for the IAM principal logging into the EKS console 19 | 20 | **Use the tabs below to see the steps for the specific environment where you are running this lab.** 21 | 22 | 23 | 24 | 25 | You can access the EKS Console from the AWS Workshop Studio event page, as explained in section [Setting up the Development Environment](./../introduction/environment-setup.md). 26 | 27 | The IAM role used in Workshop Studio is the same IAM role that you have been using in Visual Studio to run CLI commands. This role is the creator of the cluster and it already has all required permissions. 28 | 29 | Next, skip to step '[4. View Kubernetes resources](#4-viewing-your-kubernetes-resources)' 30 | 31 | 32 | 33 | 34 | Make sure that the IAM principal you are using to log into the EKS console has the required permissions according to [View Kubernetes resources](https://docs.aws.amazon.com/eks/latest/userguide/view-kubernetes-resources.html#view-kubernetes-resources-permissions) in EKS documentation. If any permissions are missing, add them. Once the necessary permissions have been added, proceed to the next step. 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | ## 2. Creating Kubernetes RBAC resources 43 | 44 | 45 | :::caution 46 | 47 | If the IAM principal logged into the EKS console is the creator of the EKS cluster, skip this step and proceed to step '[4. View Kubernetes resources](#4-viewing-your-kubernetes-resources)'. This is because this IAM principal already has the necessary RBAC permissions. 48 | 49 | ::: 50 | 51 | 52 | To view Kubernetes resources for all namespaces, create RBAC resources by applying the following manifest: 53 | 54 | ```bash 55 | kubectl apply -f https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml 56 | ``` 57 | 58 | The expected output should look like this: 59 | 60 | ```bash 61 | clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created 62 | clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created 63 | ``` 64 | 65 | You can also view Kubernetes resources limited to a specific namespace. Refer to the EKS documentation for more details on creating RBAC bindings for a namespace. 66 | 67 | ## 3. Adding the IAM principal ARN to the `aws-auth` ConfigMap 68 | 69 | :::caution 70 | 71 | If the IAM principal logged into the EKS console is the creator of the EKS cluster, skip this step and proceed to step '[4. View Kubernetes resources](#4-viewing-your-kubernetes-resources)'. Performing this step by mistake will overwrite the original super-user permissions in the ConfigMap 'aws-auth', which can make the EKS cluster difficult to manage. 72 | 73 | ::: 74 | 75 | **Optionally**, you can register an existing IAM principal ARN logged into the EKS console to the ConfigMap 'aws-auth' by running the following command. Make sure to substitute the sample value with your _existing_ IAM principal ARN as the value for the `--arn` argument: 76 | 77 | 78 | 79 | 80 | ```bash 81 | eksctl create iamidentitymapping \ 82 | --cluster fargate-quickstart \ 83 | --region $AWS_REGION \ 84 | --arn arn:aws:iam::012345678901:role/my-console-viewer-role \ 85 | --group eks-console-dashboard-full-access-group \ 86 | --no-duplicate-arns 87 | ``` 88 | 89 | 90 | 91 | 92 | ```bash 93 | eksctl create iamidentitymapping \ 94 | --cluster managednode-quickstart \ 95 | --region $AWS_REGION \ 96 | --arn arn:aws:iam::012345678901:role/my-console-viewer-role \ 97 | --group eks-console-dashboard-full-access-group \ 98 | --no-duplicate-arns 99 | ``` 100 | 101 | 102 | 103 | The expected output should look like this: 104 | 105 | ```bash 106 | 2023-11-10 10:11:50 [ℹ] checking arn arn:aws:iam::012345678901:role/my-console-viewer-role against entries in the auth ConfigMap 107 | 2023-11-10 10:11:50 [ℹ] adding identity "arn:aws:iam::012345678901:role/my-console-viewer-role" to auth ConfigMap 108 | ``` 109 | 110 | ## 4. Viewing your Kubernetes resources 111 | 112 | You can check your Kubernetes resources on the 'Resources' tab on the cluster details page in the EKS console. 113 | 114 | Note: The following examples use a Fargate cluster. 115 | 116 | ![kubernetes-resources-1](./images/kubernetes-resources-1.jpg) 117 | 118 | ### View the details of the Pod 'fastapi-deployment' 119 | 120 | Select the 'Pods' in the 'Workloads' tree under the 'Resource Types', and click on the 'fastapi-deployment' Pod link in the red frame. 121 | 122 | ![kubernetes-resources-2](./images/kubernetes-resources-2.jpg) 123 | 124 | As a result, you can check the details of the Pod's resource information. You can troubleshoot by checking the Events log. Also, in the case of Fargate Pods, you can check the compute resources provisioned from the 'CapacityProvisioned' annotation. In this example, it's '0.25 vCPU 0.5 GB'. 125 | 126 | ![kubernetes-resources-3](./images/kubernetes-resources-3.jpg) 127 | 128 | ### View the details of the Service 'fastapi-service' 129 | 130 | Then, select the 'Services' in the 'Service and Networking' tree under the 'Resource Types' and click on the 'fastapi-service' service link in the red frame. 131 | 132 | ![kubernetes-resources-4](./images/kubernetes-resources-4.jpg) 133 | 134 | As a result, you can check the Service's resource details. You can check the Event logs and see the Pods to which requests are routed in the 'Endpoints', and you can navigate to those Pods from these links. 135 | 136 | ![kubernetes-resources-5](./images/kubernetes-resources-5.jpg) 137 | -------------------------------------------------------------------------------- /website/docs/python/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Welcome 3 | sidebar_position: 1 4 | --- 5 | 6 | # Welcome to the EKS Developers Workshop for Python 7 | Welcome to the EKS Developers Workshop, a technical deep-dive into refactoring applications for Amazon Elastic Kubernetes Service (EKS). 8 | 9 | ## Who Is This Workshop For? 10 | This workshop is tailored for developers that want to refactor an application for containers and Kubernetes environments using EKS. It's specifically designed to be Kubernetes beginner-friendly and particularly beneficial for those who: 11 | 12 | * Need visibility of the entire Kubernetes lifecycle, from refactoring containers, to Kubernetes integrations on Amazon EKS. 13 | * Have a foundational understanding of container technologies and seek to increase their knowledge of Kubernetes-based application deployments. 14 | * Aim to transition traditional applications to cloud-native architectures, particularly within the AWS ecosystem. 15 | 16 | ## What You Will Learn 17 | * **Application Refactoring:** Learn how to apply The Twelve-Factor App methodologies to refactor applications for containers and Kubernetes. 18 | 19 | * **Containerization Techniques:** Master the creation and management of Docker containers, integrate Amazon ECR with Docker Compose, and handle multi-architecture containers. 20 | * **Kubernetes Deployment:** Learn how to deploy your application to a local minikube cluster, including securing workloads with Kubernetes secrets. 21 | * **Amazon EKS Deployment:** Build your skills deploying Kubernetes workloads in production on Amazon EKS, integrating with AWS services like AWS Secrets Manager and Amazon RDS for PostgreSQL. 22 | 23 | ## How to Participate in the Workshop 24 | This workshop offers a self-paced, comprehensive guide through the entire Kubernetes lifecycle. From creating multi-architecture container images, to understanding Kubernetes, to integrating with Amazon EKS and other AWS services, this workshop is a continuous, end-to-end exploration. Each chapter is designed to build upon the previous, ensuring a cohesive learning experience. 25 | 26 | To get started with the workshop: 27 | 28 | * **In Your Own Account**: This option allows for a personalized and hands-on experience, using your AWS account to create resources. 29 | * **At an AWS Event**: Engage in a more guided and structured learning environment at an AWS Event using AWS Workshop Studio platform, ideal for those who prefer collaborative learning sessions. 30 | 31 | This workshop has an approximate duration of 4 hours. 32 | 33 | ## Getting Started 34 | Dive into our [introduction](./introduction/index.md) to commence your technical exploration of EKS. Equip yourself with the knowledge and skills to confidently manage Kubernetes applications on AWS. -------------------------------------------------------------------------------- /website/docs/python/introduction/images/visual-studio-terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/introduction/images/visual-studio-terminal.png -------------------------------------------------------------------------------- /website/docs/python/introduction/images/workshop-studio-event-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/introduction/images/workshop-studio-event-dashboard.png -------------------------------------------------------------------------------- /website/docs/python/introduction/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Python Workshop Overview 3 | sidebar_position: 1 4 | --- 5 | 6 | Welcome to the Python developers workshop! We'll embark on an end-to-end journey with the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project, a Python-based application developed with FastAPI and PostgreSQL as the database. This workshop will guide you from the fundamentals of containerization, Kubernetes, and finally to deploying the application on Amazon Web Services (AWS). 7 | 8 | ## About This Workshop 9 | This workshop dives into the unique aspects of both stateless and stateful applications within the project, broken down into the following sections: 10 | 11 | - **[Introduction](index.md)**: This chapter shows you the key principles we used to refactor the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project for Containers and Kubernetes environments, and how to setup your development environment. 12 | - **[Containers](../containers/index.md)**: This chapter shows you how to containerize applications using Docker and deploy a multi-architecture container image to [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/). 13 | - **[Kubernetes](../kubernetes/index.md)**: This chapter shows you how deploy your containerized application to a local Kubernetes cluster, providing an introduction into essential Kubernetes concepts such as service definitions, deployments, and secrets. 14 | - **[Amazon EKS](../eks/index.md)**: This chapter shows you how to deploy your containerized application stored in Amazon ECR onto an [Amazon EKS](https://aws.amazon.com/eks/) cluster, exploring use-case specific cluster set-up and integration with other AWS services. 15 | 16 | ## Stateful and Stateless Microservices Use Case 17 | The [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) serves as a practical case study throughout this workshop. It uses [FastAPI](https://fastapi.tiangolo.com/lo/), an asynchronous Python web framework, and employs a [PostgreSQL](https://www.postgresql.org/) database for persistent data storage. 18 | 19 | - **FastAPI Application (Web Service)**: This stateless component serves as the primary application layer. Leveraging the FastAPI framework, this Python-based web service enables the rapid construction of APIs while maintaining top-notch performance. It offers robust data validation, serialization, and documentation via its integral [OpenAPI](https://swagger.io/specification/) support. Despite being stateless, it can process requests and return responses without preserving any data, thereby enhancing scalability and resilience. 20 | - **PostgreSQL Database (DB Service)**: Representing the stateful element of the project, this service employs the official PostgreSQL image from Docker Hub. Known for its robustness, reliability, and performance, PostgreSQL is an open-source object-relational database system that is responsible for data persistence in the project. This statefulness enables the application's data to be stored, accessed, and modified over time. 21 | 22 | -------------------------------------------------------------------------------- /website/docs/python/kubernetes/about-multiservice.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: About Managing Multiple Services with Kubernetes 3 | sidebar_position: 1 4 | --- 5 | ## Overview 6 | The lab exercises that follow serve as an introduction to Kubernetes, covering the components and deployment of our microservices-based application on a local Kubernetes cluster using [minikube](https://minikube.sigs.k8s.io/docs/). 7 | 8 | ## Objective 9 | This guide shows how Kubernetes is utilized for our [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project, specifically the roles of Deployments, StatefulSets, Services, PersistentVolumeClaims, and Secrets in the orchestration and management of our multi-service application. 10 | 11 | ## Configuration Overview 12 | Our Kubernetes configurations for the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project outline two main components in our Kubernetes cluster: our FastAPI application and the PostgreSQL database. 13 | 14 | ## FastAPI Application (Deployment and Service) 15 | The **[fastapi-app.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/kubernetes/fastapi-app.yaml)** manifest file sets up a minikube cluster with the following Kubernetes resources: 16 | 17 | - **Deployment**: Our FastAPI application is defined as a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) in Kubernetes. The Deployment, as detailed in our 'fastapi-app.yaml' manifest file, ensures that a specified number of pods (in our case, one) is always running at all times within the minikube cluster. It utilizes the Docker image hosted in our Amazon ECR repository and starts the FastAPI application. The Deployment also uses Kubernetes Secrets to set environment variables similar to those in our docker-compose.yml file and to store the token for ECR authentication. Resource limits and requests for the pods are also defined in the Deployment. 18 | - **Service**: The associated [Service](https://kubernetes.io/docs/concepts/services-networking/service/) exposes the FastAPI application to be accessible outside the cluster. We use a "LoadBalancer" service type which minikube then routes to a specific "NodePort" for external access (i.e., outside the cluster). The Service is a primary Kubernetes resource that manages incoming traffic to our application. 19 | 20 | ## PostgreSQL Database (StatefulSet, Service, and PersistentVolumeClaim) 21 | The **[postgres-db.yaml](https://github.com/aws-samples/python-fastapi-demo-docker/blob/main/kubernetes/postgres-db.yaml)** manifest file sets up a minikube cluster with the following Kubernetes resources: 22 | 23 | - **StatefulSet**: Our PostgreSQL database is defined as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) with a corresponding [Service](https://kubernetes.io/docs/concepts/services-networking/service/) to allow interaction with the FastAPI application within the minikube cluster. The StatefulSet, as detailed in our 'postgres-db.yaml' manifest file, manages the deployment and scaling of a set of Pods and maintains the state of deployed pods, allowing PostgreSQL to persist across pod restarts. It uses the official PostgreSQL image from Docker Hub and is configured using environment variables. 24 | - **PersistentVolumeClaim**: The database data is stored persistently using a [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) tied to the StatefulSet, ensuring data remains intact even if the pod is stopped or deleted. This approach enhances the resilience of our data storage and facilitates better data management. The PersistentVolumeClaim is another primary Kubernetes resource used to provide persistent storage for PostgreSQL, ensuring data persistence across pod restarts and survival during pod deletion. It requests a storage capacity of 1Gi and requires the volume to allow read-write access by a single node. 25 | - **Service**: The corresponding Service provides network access to your PostgreSQL database within the cluster, allowing other components, such as our FastAPI application, to communicate with the database. 26 | 27 | ## Conclusion 28 | This guide has illustrated the role of Kubernetes in setting up a multi-pod environment for the 'python-fastapi-demo-docker' project. By leveraging Kubernetes and its Deployment and StatefulSet resources, we can significantly simplify the management of our application's components and their interconnections, efficiently scale our application to handle increased loads, and ensure the resilience and persistence of our database. -------------------------------------------------------------------------------- /website/docs/python/kubernetes/access-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Accessing the FastAPI App 3 | sidebar_position: 6 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | 9 | ## Objective 10 | This lab aims to guide you through the process of accessing your microservices deployed onto a minikube cluster. By using minikube's 'port forwarding' feature and enabling a network tunnel, we'll expose the FastAPI service, allowing you to interact with it from your local machine's web browser. This process is especially crucial for LoadBalancer service types, as they require an additional network route from the host to the service's cluster. 11 | 12 | ## Prerequisites 13 | - [Deploying FastAPI and PostgreSQL Microservices to Kubernetes using Minikube](./deploy-app.md) 14 | 15 | 16 | 17 | 18 | 19 | 20 | ## 1. Checking the Status of Pods 21 | Before we try to access our application, we need to ensure that all of our pods are running correctly. To check the status of all pods, run the following command: 22 | ```bash 23 | kubectl get pods -n my-cool-app 24 | ``` 25 | All your pods should be in the "Running" state. If they're not, you will need to troubleshoot the deployment before proceeding. 26 | 27 | ## 2. Accessing the FastAPI Service 28 | 29 | **Use the tabs below to see the steps for the specific environment where you are running this lab.** 30 | 31 | 32 | 33 | 34 | 35 | Use the [minikube service](https://minikube.sigs.k8s.io/docs/commands/service/) command to create a tunnel to the cluster and connect to FastAPI service: 36 | ```bash 37 | minikube service fastapi-service --namespace=my-cool-app 38 | ``` 39 | The expected output should look like this: 40 | ```bash 41 | |-------------|-----------------|-------------|---------------------------| 42 | | NAMESPACE | NAME | TARGET PORT | URL | 43 | |-------------|-----------------|-------------|---------------------------| 44 | | my-cool-app | fastapi-service | 80 | http://192.168.49.2:30639 | 45 | |-------------|-----------------|-------------|---------------------------| 46 | 🎉 Opening service my-cool-app/fastapi-service in default browser... 47 | 👉 http://192.168.49.2:30639 48 | ``` 49 | Minikube exposes service port 80 using localhost address. Additionally, you need to expose the service on the EC2 instance public IP address, to be able to access it from your local browser. 50 | 51 | Expose the service port 80 using host port 8000 by running 52 | ```bash 53 | kubectl -n my-cool-app port-forward --address 0.0.0.0 service/fastapi-service 8000:80 54 | ``` 55 | Keep this command running while accessing the service with the steps below. 56 | 57 | Execute the command below in a new VScode terminal to show the URL to connect to Node Port service fastapi-service: 58 | ``` 59 | echo "http://$PUBLIC_IP:8000" 60 | ``` 61 | Access this URL using your web browser. 62 | 63 | 64 | 65 | 66 | 67 | Use the [minikube service](https://minikube.sigs.k8s.io/docs/commands/service/) command to create a tunnel to the cluster and connect to FastAPI service: 68 | ```bash 69 | minikube service fastapi-service --namespace=my-cool-app 70 | ``` 71 | The expected output should look like this: 72 | ```bash 73 | |-------------|-----------------|-------------|---------------------------| 74 | | NAMESPACE | NAME | TARGET PORT | URL | 75 | |-------------|-----------------|-------------|---------------------------| 76 | | my-cool-app | fastapi-service | 80 | http://192.168.49.2:30639 | 77 | |-------------|-----------------|-------------|---------------------------| 78 | 🏃 Starting tunnel for service fastapi-service. 79 | |-------------|-----------------|-------------|------------------------| 80 | | NAMESPACE | NAME | TARGET PORT | URL | 81 | |-------------|-----------------|-------------|------------------------| 82 | | my-cool-app | fastapi-service | | http://127.0.0.1:58665 | 83 | |-------------|-----------------|-------------|------------------------| 84 | 🎉 Opening service my-cool-app/fastapi-service in default browser... 85 | ❗ Because you are using a Docker driver on darwin, the terminal needs to be open to run it. 86 | ``` 87 | This command needs to be continuously running to keep the network route open, so make sure to leave this terminal window open. 88 | 89 | 90 | 91 | 92 | 93 | ## 3. Verifying the Setup by Adding a Book 94 | To confirm that everything is functioning as expected, attempt to add a book by selecting the **Create a book** option. 95 | 96 | ![Image](./images/app-create-book.png) 97 | 98 | Now you can press `Ctrl+C` to stop kubectl command. 99 | 100 | ## Conclusion 101 | This lab has walked you through the steps necessary to access your microservices, specifically the FastAPI service, deployed on a minikube cluster from your local machine. We've shown how to check the status of your pods, enable a minikube tunnel for access, and verify your setup by interacting with the FastAPI service. The minikube service command is a convenient way to expose your Kubernetes services to your local machine and interact with them as if they were locally deployed. -------------------------------------------------------------------------------- /website/docs/python/kubernetes/deploy-configmap.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Initializing PostgreSQL Database with Kubernetes ConfigMaps 3 | sidebar_position: 3 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | In the realm of container orchestration and cloud-native applications, initializing databases securely and efficiently is crucial. Kubernetes ConfigMaps offer a way to manage configuration data and scripts, like our `init.sh`, separate from the container image for better modularity and security. This lab walks you through the process of creating a Kubernetes ConfigMap for the `init.sh` script in the 'my-cool-app' namespace. 9 | 10 | ## Prerequisites 11 | - [Creating a Kubernetes Cluster with Minikube](./minikube-create.md) 12 | 13 | 14 | 15 | 16 | ## 1. Creating the Kubernetes ConfigMap for Database Initialization 17 | Our PostgreSQL database requires custom initialization, which is why we use an init.sh script. This script creates the database, user, and table. To manage this script, we create a Kubernetes ConfigMap. This ensures that the script is executed when the PostgreSQL container starts, initializing the database as required. 18 | 19 | From the root directory of the 'python-fastapi-demo-docker' project, generate the Kubernetes ConfigMap: 20 | ```bash 21 | kubectl create configmap db-init-script --from-file=init.sh=server/db/init.sh -n my-cool-app 22 | ``` 23 | 24 | The expected output should look like this: 25 | ```bash 26 | configmap/db-init-script created 27 | ``` 28 | 29 | ## 2. Verifying the ConfigMap Creation 30 | To ensure that your Kubernetes ConfigMap has been successfully created, you can use the kubectl get configmap command. This command lists all ConfigMaps in the current namespace: 31 | ```bash 32 | kubectl get configmap -n my-cool-app 33 | ``` 34 | 35 | The expected output should look like this: 36 | ```bash 37 | NAME DATA AGE 38 | db-init-script 1 4m47s 39 | kube-root-ca.crt 1 5m36s 40 | ``` 41 | 42 | ## 3. Inspecting the ConfigMap Details 43 | For a deeper understanding of your created ConfigMap, you can use the following command to obtain detailed information about the specified ConfigMap: 44 | ```bash 45 | kubectl describe configmap db-init-script -n my-cool-app 46 | ``` 47 | 48 | The expected output should look like this: 49 | ```bash 50 | Name: db-init-script 51 | Namespace: my-cool-app 52 | Labels: 53 | Annotations: 54 | 55 | Data 56 | ==== 57 | init.sh: 58 | ---- 59 | #!/bin/bash 60 | ... 61 | ``` 62 | 63 | ## Conclusion 64 | This lab guided you through the process of creating a Kubernetes ConfigMap that securely initializes your PostgreSQL database within a Minikube environment. 65 | -------------------------------------------------------------------------------- /website/docs/python/kubernetes/deploy-secrets.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Securing FastAPI Microservices with Kubernetes Secrets 3 | sidebar_position: 4 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | In the evolving world of microservices and cloud-native applications, managing sensitive data securely is paramount. Kubernetes offers a "Secret" resource, designed for storing sensitive data like passwords, OAuth tokens, and ssh keys, separating them from the container image to enhance security and modularity. This lab shows you how to create Kubernetes secrets for the [python-fastapi-demo-docker](https://github.com/aws-samples/python-fastapi-demo-docker) project. 9 | 10 | ## Prerequisites 11 | - [Initializing PostgreSQL Database with Kubernetes ConfigMaps](deploy-configmap.md) 12 | 13 | 14 | 15 | 16 | ## 1. Creating the Kubernetes Secret for Amazon ECR 17 | Our Amazon ECR repository is private, so we need to generate an Amazon ECR authorization token and create a Kubernetes Secret with it. This is a critical step because it ensures that your Kubernetes cluster can pull the necessary container images from your private ECR repository. Now, you might be wondering whether this ECR secret will survive pod restarts, especially considering that ECR tokens are only valid for 12 hours. Kubernetes will automatically refresh the secret when it nears expiration, ensuring uninterrupted access to your private ECR repository. 18 | 19 | 20 | From the root directory of the 'python-fastapi-demo-docker' project, generate an Amazon ECR authorization token: 21 | ``` 22 | ECR_TOKEN=$(aws ecr get-login-password --region ${AWS_REGION}) 23 | ``` 24 | 25 | Run the following command to create the Kubernetes Secret in the "my-cool-app" namespace: 26 | ``` 27 | kubectl create secret docker-registry regcred \ 28 | --docker-server=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com \ 29 | --docker-username=AWS \ 30 | --docker-password="${ECR_TOKEN}" \ 31 | -n my-cool-app 32 | ``` 33 | The expected output should look like this: 34 | ```bash 35 | secret/regcred created 36 | ``` 37 | 38 | ## 2. Creating a Generic Kubernetes Secret from a .env File 39 | After generating the Docker registry secret, the next step is to create a Kubernetes Secret from the .env file. This file contains sensitive information typically stored as environment variables for your application. Using a Kubernetes Secret allows for safer management and access to this sensitive data within your Kubernetes cluster. 40 | 41 | Run the following command to create the Kubernetes Secret in the "my-cool-app" namespace: 42 | ``` 43 | kubectl create secret generic fastapi-secret --from-env-file=.env -n my-cool-app 44 | ``` 45 | The expected output should look like this: 46 | ```bash 47 | secret/fastapi-secret created 48 | ``` 49 | 50 | ## 3. Verifying the Secret Creation with kubectl get secret 51 | To confirm that your Kubernetes Secret has been successfully created, you can use the kubectl get secret command. This command lists all secrets that exist in the current namespace: 52 | ```bash 53 | kubectl get secrets -n my-cool-app 54 | ``` 55 | The expected output should look like this: 56 | ```bash 57 | NAME TYPE DATA AGE 58 | fastapi-secret Opaque 20 9m 59 | regcred kubernetes.io/dockerconfigjson 1 13m 60 | ``` 61 | 62 | ## 4. Inspecting the Secret Details with kubectl describe secret 63 | If you want more details about your created Secret, you can use the kubectl describe secret command. This command provides more detailed information about the specified secret. Here's how to use it: 64 | 65 | ```bash 66 | kubectl describe secret fastapi-secret -n my-cool-app 67 | ``` 68 | The expected output should look like this: 69 | ```bash 70 | Name: fastapi-secret 71 | Namespace: my-cool-app 72 | Labels: 73 | Annotations: 74 | 75 | Type: Opaque 76 | 77 | Data 78 | ==== 79 | DATABASE_URL: 58 bytes 80 | HTTP_HOST: 16 bytes 81 | IMAGE_VERSION: 3 bytes 82 | WORKSHOP_POSTGRES_PASSWORD: 10 bytes 83 | AWS_REGION: 9 bytes 84 | DOCKER_DATABASE_URL: 53 bytes 85 | POSTGRES_MASTER: 8 bytes 86 | POSTGRES_TABLE: 5 bytes 87 | APP_HOST: 7 bytes 88 | POSTGRES_HOST: 9 bytes 89 | POSTGRES_PASSWORD: 15 bytes 90 | POSTGRES_VOLUME: 2 bytes 91 | WORKSHOP_POSTGRES_DB: 9 bytes 92 | APP_PORT: 4 bytes 93 | DOCKER_USERNAME: 7 bytes 94 | LOCAL_HOST: 9 bytes 95 | POSTGRES_PORT: 4 bytes 96 | WORKSHOP_POSTGRES_USER: 11 bytes 97 | ``` 98 | 99 | ## Conclusion 100 | This tutorial took you through the secure handling of sensitive data using Kubernetes Secrets within a Minikube environment for the 'python-fastapi-demo-docker' application. By incorporating these methods, you have enhanced the security of your application and adhered to best practices for handling confidential data. 101 | -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/app-create-book.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/app-create-book.png -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/docker-extension-open-in-browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/docker-extension-open-in-browser.png -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-1.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-2.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-3.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-4.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-5.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-6.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-7.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-8.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/kubernetes-dashboard-9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/kubernetes-dashboard-9.jpg -------------------------------------------------------------------------------- /website/docs/python/kubernetes/images/requests-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/docs/python/kubernetes/images/requests-flow.png -------------------------------------------------------------------------------- /website/docs/python/kubernetes/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Introduction to Kubernetes 3 | sidebar_position: 301 4 | --- 5 | ## Overview 6 | This chapter provides a basic introduction to how Kubernetes manages applications within its environment. It delves into Kubernetes objects for different use cases and deployments on a local cluster, such as Deployments, Services, Secrets, and PersistentVolumeClaims, all integral to state, networking, and access management. 7 | 8 | ## Objective 9 | This guide aims to expose you to Kubernetes' core components and functionalities. Its primary objective is to equip you with knowledge of the basic components of a Kubernetes environment. 10 | 11 | ## Terms 12 | Kubernetes is an open-source container orchestration platform used to manage containerized applications. At a high level, Kubernetes consists of two main components: the Control Plane and Data Plane. 13 | 14 | ### Control Plane 15 | The **control plane** manages the Kubernetes cluster by maintaining the desired state of the system. It includes the following components: 16 | 17 | - **API Server**: The API server is the front-end for the Kubernetes Control Plane. It handles and validates requests, and stores data in the etcd database. 18 | - **etcd**: etcd is a distributed key-value store used by Kubernetes to store configuration data. 19 | - **Controller Manager**: The Controller Manager is responsible for running controllers that regulate the state of the cluster. 20 | - **Scheduler**: The Scheduler assigns new workloads to nodes based on available resources and workload constraints. 21 | 22 | ### Data Plane 23 | The **data plane** consists of nodes, which act as the worker machines that run the containerized applications. Each node runs a set of components that communicate with the Control Plane to maintain the desired state of the system. The main components running on a node are: 24 | 25 | - **kubelet**: The kubelet is responsible for managing the state of the node, including starting, stopping, and maintaining the containers on the node. 26 | - **kube-proxy**: The kube-proxy is responsible for network communication between services and pods in the cluster. 27 | - **Container Runtime**: The container runtime is responsible for pulling the container images from a registry and running them on the node. 28 | 29 | ## Tools 30 | Before you begin, make sure that the following tools have been installed: 31 | 32 | ``` 33 | kubectl version --client 34 | minikube version 35 | ``` 36 | 37 | If any of these tools are missing, refer to section [Setting up the Development Environment](../../python/introduction/environment-setup.md) for installation instructions. 38 | -------------------------------------------------------------------------------- /website/docs/python/kubernetes/kubernetes-dashboard.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Monitoring Kubernetes Resources Using the Dashboard 3 | sidebar_position: 7 4 | --- 5 | import Tabs from '@theme/Tabs'; 6 | import TabItem from '@theme/TabItem'; 7 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 8 | 9 | ## Objective 10 | 11 | This lab walks you through the process of using the [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) to view and manage Kubernetes resources. This user-friendly web-based GUI facilitates the management and troubleshooting of applications within your minikube cluster. 12 | 13 | ## Prerequisites 14 | - [Accessing the FastAPI App](./access-app.md) 15 | 16 | 17 | 18 | 19 | 20 | ## 1. Installing the Kubernetes Dashboard 21 | 22 | Installl the Kubernetes Dashboard and Metrics Server addons by running: 23 | ```bash 24 | minikube addons enable dashboard 25 | minikube addons enable metrics-server 26 | ``` 27 | Metrics Server collects CPU and memory usage statistics of the pods and nodes, and you can monitor them using the Kubernetes Dashboard. 28 | 29 | ## 2. Accessing Kubernetes Dashboard 30 | 31 | **Use the tabs below to see the steps for the specific environment where you are running this lab.** 32 | 33 | 34 | 35 | 36 | Expose Kubernetes Dashboard port 80 using host port 8001 by running: 37 | ```bash 38 | kubectl -n kubernetes-dashboard port-forward --address 0.0.0.0 service/kubernetes-dashboard 8001:80 39 | ``` 40 | 41 | Execute the command below in a new VScode terminal to show the URL to connect to the Kubernetes Dashboard: 42 | ``` 43 | echo "http://$PUBLIC_IP:8001" 44 | ``` 45 | Access this URL using your web browser. 46 | 47 | 48 | 49 | 50 | To initiate the Kubernetes Dashboard, open your terminal and run the command below: 51 | ```bash 52 | minikube dashboard 53 | ``` 54 | Once started, the Dashboard should automatically open in your default web browser. If it doesn't open automatically, the terminal will display a URL as part of the command's output that you can copy and paste into your web browser to access the Dashboard manually. 55 | For omre information, check the [Minikube documentation](https://minikube.sigs.k8s.io/docs/handbook/dashboard/). 56 | 57 | 58 | 59 | 60 | ## 3. Viewing Kubernetes resources 61 | 62 | ### Filter Kubernetes resources by the Namespace my-cool-app 63 | 64 | This workshop uses the 'my-cool-app' namespace. While the option to view "All namespaces" exists, narrowing down to specific resources streamlines the process. To filter, type the following: 65 | 66 | ![kubernetes-dashboard-1](./images/kubernetes-dashboard-1.jpg) 67 | 68 | Subsequently, navigate to 'Workloads' to list relevant resources. With 'metrics-server' enabled, you can also view CPU and memory statistics here. 69 | 70 | ![kubernetes-dashboard-2](./images/kubernetes-dashboard-2.jpg) 71 | 72 | Then, press the Pod 'fastapi-deployment' link circled in red. 73 | 74 | ### View the details of the Pod 'fastapi-deployment' 75 | 76 | Click on the 'fastapi-deployment' pod to check the spec and status. 77 | 78 | ![kubernetes-dashboard-3](./images/kubernetes-dashboard-3.jpg) 79 | 80 | Click the first button from the left of the red frame (top right corner of the dashboard) to check the Pod's container log. You can use this functionality in lieu of the 'kubectl logs' command when using the Kubernetes Dashboard. 81 | 82 | ![kubernetes-dashboard-4](./images/kubernetes-dashboard-4.jpg) 83 | 84 | Press the second button from the left of the red frame (top right corner of the dashboard) to log in to the Pod using a shell. You can use this functionality in lieu of the 'kubectl exec' command when using the Kubernetes Dashboard. 85 | 86 | ![kubernetes-dashboard-5](./images/kubernetes-dashboard-5.jpg) 87 | 88 | ### View the details of the Service 'fastapi-service' 89 | 90 | Switching to 'Services' in the navigation pane presents a list of created services. 91 | 92 | ![kubernetes-dashboard-6](./images/kubernetes-dashboard-6.jpg) 93 | 94 | Press the Service 'fastapi-service' link circled in red to check the spec and status of the Service 'fastapi-service', or check the endpoint pod list to see which requests have been routed. 95 | 96 | ![kubernetes-dashboard-7](./images/kubernetes-dashboard-7.jpg) 97 | 98 | ### View the details of the Node 'minikube' 99 | 100 | Exploring 'Nodes' shows the available nodes that have been created. In Minikube's case, typically you'll see the 'minikube' node, because it's a cluster that has only one node. 101 | 102 | ![kubernetes-dashboard-8](./images/kubernetes-dashboard-8.jpg) 103 | 104 | Click the Node 'minikube' link circled in red. Here, you can check the node's spec, status, and usage status. 105 | 106 | ![kubernetes-dashboard-9](./images/kubernetes-dashboard-9.jpg) 107 | 108 | ## Conclusion 109 | 110 | This lab has provided how to review deployed Kubernetes resources using the Kubernetes Dashboard. You are able to manage and troubleshoot Kubernetes resources with a GUI by using the Kubernetes Dashboard instead of using kubectl commands. By using it, it will be easier to manage workloads in actual production environments. 111 | -------------------------------------------------------------------------------- /website/docs/python/kubernetes/minikube-create.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Creating a Kubernetes Cluster with Minikube 3 | sidebar_position: 2 4 | --- 5 | import GetEnvVars from '../../../src/includes/get-env-vars.md'; 6 | 7 | ## Objective 8 | minikube is a tool that allows you to run Kubernetes locally. It creates a single-node or multi-node Kubernetes cluster inside a Virtual Machine (VM) on your local machine. The goal of this lab is to guide you in starting a local Kubernetes cluster using minikube and then creating a new namespace. This lays the groundwork for subsequent lab exercises. 9 | 10 | 11 | 12 | 13 | ## 1. Starting Minikube 14 | Before we can deploy applications to Kubernetes, we need to have a running Kubernetes cluster. minikube allows us to create a local Kubernetes cluster, which is suitable for development and testing. 15 | 16 | To start your minikube cluster, run the following command in your terminal: 17 | ``` 18 | minikube start 19 | ``` 20 | The expected output should look like this: 21 | ```bash 22 | 🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default 23 | ``` 24 | 25 | ## 2. Create a Namespace 26 | Namespaces in Kubernetes serve as a mechanism for dividing cluster resources between multiple users, applications, or environments. Creating separate namespaces for different applications or environments (e.g., development, staging, production) is a common practice. In our case, we are creating a namespace named my-cool-app to hold all the resources related to our application. 27 | 28 | To create the "my-cool-app" namespace, use the following command: 29 | ```bash 30 | kubectl create namespace my-cool-app 31 | ``` 32 | The expected output should look like this: 33 | ```bash 34 | namespace/my-cool-app created 35 | ``` 36 | 37 | ## Conclusion 38 | In this tutorial, we've introduced you to the basics of setting up a local Kubernetes development environment using minikube and the concept of Kubernetes namespaces. -------------------------------------------------------------------------------- /website/docusaurus.config.js: -------------------------------------------------------------------------------- 1 | const lightCodeTheme = require('prism-react-renderer').themes.github; 2 | const darkCodeTheme = require('prism-react-renderer').themes.dracula; 3 | const path = require('path'); 4 | 5 | module.exports = { 6 | title: 'EKS Developers Workshop', 7 | tagline: 'Dinosaurs are cool', 8 | url: 'https://your-docusaurus-test-site.com', 9 | baseUrl: '/', 10 | onBrokenLinks: 'throw', 11 | onBrokenMarkdownLinks: 'warn', 12 | favicon: 'img/favicon.png', 13 | organizationName: 'aws-samples', 14 | projectName: 'eks-workshop-developers', 15 | 16 | presets: [ 17 | [ 18 | '@docusaurus/preset-classic', 19 | { 20 | docs: { 21 | sidebarPath: require.resolve('./sidebars.js'), 22 | editUrl: 'https://github.com/aws-samples/eks-workshop-developers/tree/main/website', 23 | sidebarCollapsible: true, 24 | }, 25 | theme: { 26 | customCss: require.resolve('./src/css/custom.css'), 27 | }, 28 | } 29 | ], 30 | ], 31 | 32 | themeConfig: { 33 | navbar: { 34 | title: 'EKS Developers Workshop', 35 | logo: { 36 | alt: 'Amazon Web Services', 37 | src: 'img/logo.svg', 38 | }, 39 | items: [ 40 | { 41 | type: 'doc', 42 | docId: 'python/index', 43 | position: 'left', 44 | label: 'Python', 45 | }, 46 | { 47 | type: 'doc', 48 | docId: 'java/index', 49 | position: 'left', 50 | label: 'Java', 51 | }, 52 | { 53 | href: 'https://github.com/aws-samples/eks-workshop-developers', 54 | label: 'GitHub', 55 | position: 'right', 56 | }, 57 | ], 58 | }, 59 | footer: { 60 | style: 'dark', 61 | links: [ 62 | { 63 | title: 'Community', 64 | items: [ 65 | { 66 | label: 'GitHub', 67 | href: 'https://github.com/aws-samples/eks-workshop-developers', 68 | }, 69 | ], 70 | }, 71 | { 72 | title: 'Other', 73 | items: [ 74 | { 75 | label: 'Site Terms', 76 | href: 'https://aws.amazon.com/terms/?nc1=f_pr', 77 | }, 78 | { 79 | label: 'Privacy', 80 | href: 'https://aws.amazon.com/privacy/?nc1=f_pr', 81 | }, 82 | ], 83 | }, 84 | ], 85 | copyright: `Copyright © ${new Date().getFullYear()}, Amazon Web Services, Inc. or its affiliates. All rights reserved.`, 86 | }, 87 | prism: { 88 | theme: lightCodeTheme, 89 | darkTheme: darkCodeTheme, 90 | }, 91 | }, 92 | }; 93 | -------------------------------------------------------------------------------- /website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "website", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids" 15 | }, 16 | "dependencies": { 17 | "@docusaurus/core": "^3.0.0", 18 | "@docusaurus/plugin-content-blog": "^3.0.0", 19 | "@docusaurus/preset-classic": "^3.0.0", 20 | "@docusaurus/theme-classic": "^3.0.0", 21 | "@fortawesome/fontawesome-svg-core": "^6.4.2", 22 | "@fortawesome/free-solid-svg-icons": "^6.4.2", 23 | "@fortawesome/react-fontawesome": "^0.2.0", 24 | "@mdx-js/react": "^3.0.0", 25 | "clsx": "^2.0.0", 26 | "diff": "^5.1.0", 27 | "docusaurus-plugin-sass": "^0.2.5", 28 | "dotenv": "^16.3.1", 29 | "express": "^4.21.0", 30 | "prism-react-renderer": "^2.2.0", 31 | "react": "^18.0.0", 32 | "react-dom": "^18.0.0", 33 | "react-player": "^2.13.0", 34 | "react-tooltip": "^5.23.0", 35 | "sass": "^1.69.5", 36 | "yaml": "^2.3.4", 37 | "yamljs": "^0.3.0" 38 | }, 39 | "browserslist": { 40 | "production": [ 41 | ">0.5%", 42 | "not dead", 43 | "not op_mini all" 44 | ], 45 | "development": [ 46 | "last 1 chrome version", 47 | "last 1 firefox version", 48 | "last 1 safari version" 49 | ] 50 | }, 51 | "devDependencies": { 52 | "@docusaurus/module-type-aliases": "^3.0.0" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /website/sidebars.js: -------------------------------------------------------------------------------- 1 | // sidebars.js 2 | 3 | const sidebars = { 4 | Python: [{type: 'autogenerated', dirName: 'python'}], 5 | Java: [{type: 'autogenerated', dirName: 'java'}], 6 | }; 7 | 8 | module.exports = sidebars; -------------------------------------------------------------------------------- /website/src/css/custom.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --ifm-color-primary: #2e8555; 3 | --ifm-color-primary-dark: #29784c; 4 | --ifm-color-primary-darker: #277148; 5 | --ifm-color-primary-darkest: #205d3b; 6 | --ifm-color-primary-light: #33925d; 7 | --ifm-color-primary-lighter: #359962; 8 | --ifm-color-primary-lightest: #3cad6e; 9 | --ifm-hero-background-color: #000716; 10 | --ifm-navbar-background-color: #000716; 11 | --ifm-navbar-link-color: #d1d5db; 12 | --ifm-navbar-link-hover-color: #ff9900; 13 | --ifm-menu-color: #414d5c; 14 | --ifm-menu-color-active: #0972d3; 15 | --ifm-breadcrumb-color-active: #0972d3; 16 | --ifm-link-color: #0972d3; 17 | --ifm-footer-background-color: #0f1b2a; 18 | --ifm-footer-link-color: #d1d5db; 19 | --ifm-footer-title-color: #d1d5db; 20 | --ifm-footer-color: #d1d5db; 21 | --ifm-code-font-size: 95%; 22 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); 23 | --ifm-tabs-color-active: #0972d3; 24 | --ifm-tabs-color-active-border: #0972d3; 25 | --doc-sidebar-width: 350px !important; 26 | } 27 | 28 | [data-theme='dark'] { 29 | --ifm-color-primary: #25c2a0; 30 | --ifm-color-primary-dark: #21af90; 31 | --ifm-color-primary-darker: #1fa588; 32 | --ifm-color-primary-darkest: #1a8870; 33 | --ifm-color-primary-light: #29d5b0; 34 | --ifm-color-primary-lighter: #32d8b4; 35 | --ifm-color-primary-lightest: #4fddbf; 36 | --ifm-navbar-background-color: #000716; 37 | --ifm-background-color: #0f1b2a; 38 | --ifm-menu-color: #d1d5db; 39 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); 40 | } 41 | 42 | .navbar__title { 43 | margin-left: 1rem; 44 | margin-right: 2rem; 45 | } 46 | 47 | .code-block-highlight { 48 | background-color: #ff000020; 49 | display: block; 50 | margin: 0 calc(-1 * var(--ifm-pre-padding)); 51 | padding: 0 var(--ifm-pre-padding); 52 | border-left: 3px solid #ff000080; 53 | } 54 | 55 | .theme-doc-toc-desktop { 56 | display: none; 57 | } 58 | 59 | .header-github-link:hover { 60 | opacity: 0.6; 61 | } 62 | 63 | .header-github-link::before { 64 | content: ''; 65 | width: 24px; 66 | height: 24px; 67 | display: flex; 68 | background: url("data:image/svg+xml,%3Csvg%20viewBox%3D%270%200%2024%2024%27%20xmlns%3D%27http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%27%3E%3Cstyle%20type%3D%22text%2Fcss%22%3E.st0%7Bfill%3A%23ffffff%3B%7D%3C%2Fstyle%3E%3Cpath%20class%3D%22st0%22%20d%3D%27M12%20.297c-6.63%200-12%205.373-12%2012%200%205.303%203.438%209.8%208.205%2011.385.6.113.82-.258.82-.577%200-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422%2018.07%203.633%2017.7%203.633%2017.7c-1.087-.744.084-.729.084-.729%201.205.084%201.838%201.236%201.838%201.236%201.07%201.835%202.809%201.305%203.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93%200-1.31.465-2.38%201.235-3.22-.135-.303-.54-1.523.105-3.176%200%200%201.005-.322%203.3%201.23.96-.267%201.98-.399%203-.405%201.02.006%202.04.138%203%20.405%202.28-1.552%203.285-1.23%203.285-1.23.645%201.653.24%202.873.12%203.176.765.84%201.23%201.91%201.23%203.22%200%204.61-2.805%205.625-5.475%205.92.42.36.81%201.096.81%202.22%200%201.606-.015%202.896-.015%203.286%200%20.315.21.69.825.57C20.565%2022.092%2024%2017.592%2024%2012.297c0-6.627-5.373-12-12-12%27%2F%3E%3C%2Fsvg%3E") 69 | no-repeat; 70 | } 71 | 72 | .colorModeToggle_node_modules-\@docusaurus-theme-classic-lib-theme-Navbar-Content-styles-module { 73 | color: white; 74 | } 75 | 76 | .hero--primary { 77 | background-color: #000716; 78 | } 79 | 80 | .footer.footer--dark { 81 | --ifm-footer-background-color: #000716; /* adjust this color value to match your site's design */ 82 | --ifm-footer-color: var(--ifm-footer-link-color); 83 | --ifm-footer-link-color: var(--ifm-color-secondary); 84 | --ifm-footer-title-color: var(--ifm-color-white); 85 | } 86 | 87 | .footer .container { 88 | max-width: var(--ifm-container-width); /* assuming this variable is defined */ 89 | } 90 | 91 | .footer .text--center { 92 | color: var(--ifm-footer-color); 93 | font-size: var(--ifm-footer-font-size); /* assuming this variable is defined */ 94 | } 95 | 96 | .footer .footer__item { 97 | font-size: var(--ifm-footer-item-font-size); /* assuming this variable is defined */ 98 | padding: 8px 0 0; 99 | min-height: 30px; 100 | } 101 | 102 | .footer .footer__item a:hover svg { 103 | color: var(--ifm-footer-link-hover-color); /* assuming this variable is defined */ 104 | } 105 | 106 | .footer .footer__title { 107 | color: var(--ifm-footer-title-color); 108 | font: 700 var(--ifm-h4-font-size)/var(--ifm-heading-line-height) var(--ifm-font-family-base); 109 | margin-bottom: var(--ifm-heading-margin-bottom); 110 | text-transform: uppercase; 111 | } 112 | 113 | .footer .footer__col { 114 | margin: 0 15px 15px 15px; 115 | } 116 | 117 | .footer .footer__logo { 118 | margin-top: 1rem; 119 | max-width: var(--ifm-footer-logo-max-width); 120 | } 121 | 122 | .footer .footer__links { 123 | display: flex; 124 | flex-wrap: wrap; 125 | margin-bottom: 1rem; 126 | } 127 | 128 | .footer .footer__link { 129 | color: var(--ifm-footer-link-color); 130 | line-height: 2; 131 | margin: 5px var(--ifm-footer-link-horizontal-spacing); 132 | text-decoration: none; 133 | } 134 | 135 | .footer .footer__link:hover { 136 | color: var(--ifm-footer-link-hover-color); 137 | } 138 | 139 | .footer .footer__copy { 140 | color: var(--ifm-footer-color); 141 | font-size: var(--ifm-footer-copy-font-size); /* assuming this variable is defined */ 142 | margin: 15px 0 0; 143 | } 144 | -------------------------------------------------------------------------------- /website/src/includes/get-ecr-uri.md: -------------------------------------------------------------------------------- 1 | First, retrieve your Amazon ECR repository URI using the following command: 2 | 3 | ```bash 4 | echo ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/fastapi-microservices:${IMAGE_VERSION} 5 | ``` 6 | 7 | The expected output should look like this: 8 | 9 | ```bash 10 | 012345678901.dkr.ecr.us-west-1.amazonaws.com/fastapi-microservices:1.0 11 | ``` -------------------------------------------------------------------------------- /website/src/includes/get-env-vars.md: -------------------------------------------------------------------------------- 1 | ## Initial Setup 2 | Navigate to the root directory of the `python-fastapi-demo-docker` project where your [environment variables are sourced](../../python/introduction/environment-setup): 3 | ```bash 4 | cd ~/environment/python-fastapi-demo-docker 5 | ``` -------------------------------------------------------------------------------- /website/src/pages/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Redirect } from '@docusaurus/router'; 3 | 4 | export default function Home() { 5 | return ; 6 | } 7 | -------------------------------------------------------------------------------- /website/src/pages/index.module.css: -------------------------------------------------------------------------------- 1 | /** 2 | * CSS files with the .module.css suffix will be treated as CSS modules 3 | * and scoped locally. 4 | */ 5 | 6 | .heroBanner { 7 | padding: 4rem 0; 8 | text-align: center; 9 | position: relative; 10 | overflow: hidden; 11 | } 12 | 13 | @media screen and (max-width: 996px) { 14 | .heroBanner { 15 | padding: 2rem; 16 | } 17 | } 18 | 19 | .buttons { 20 | display: flex; 21 | align-items: center; 22 | justify-content: center; 23 | } -------------------------------------------------------------------------------- /website/src/scripts/FeedbackLink.jsx: -------------------------------------------------------------------------------- 1 | // src/scripts/FeedbackLink.jsx 2 | 3 | import React from 'react'; 4 | 5 | const FeedbackLink = () => { 6 | return ( 7 | 16 | ); 17 | }; 18 | 19 | export default FeedbackLink; 20 | -------------------------------------------------------------------------------- /website/src/theme/DocItem/Footer/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import clsx from 'clsx'; 3 | import {ThemeClassNames} from '@docusaurus/theme-common'; 4 | import {useDoc} from '@docusaurus/theme-common/internal'; 5 | import LastUpdated from '@theme/LastUpdated'; 6 | import EditThisPage from '@theme/EditThisPage'; 7 | import TagsListInline from '@theme/TagsListInline'; 8 | import styles from './styles.module.css'; 9 | import FeedbackLink from '../../../scripts/FeedbackLink'; 10 | 11 | function TagsRow(props) { 12 | return ( 13 |
18 |
19 | 20 |
21 |
22 | ); 23 | } 24 | function EditMetaRow({ 25 | editUrl, 26 | lastUpdatedAt, 27 | lastUpdatedBy, 28 | formattedLastUpdatedAt, 29 | }) { 30 | return ( 31 |
32 |
{editUrl && }
33 | 34 |
35 | {(lastUpdatedAt || lastUpdatedBy) && ( 36 | 41 | )} 42 |
43 |
44 | ); 45 | } 46 | export default function DocItemFooter() { 47 | const {metadata} = useDoc(); 48 | const {editUrl, lastUpdatedAt, formattedLastUpdatedAt, lastUpdatedBy, tags} = 49 | metadata; 50 | const canDisplayTagsRow = tags.length > 0; 51 | const canDisplayEditMetaRow = !!(editUrl || lastUpdatedAt || lastUpdatedBy); 52 | const canDisplayFooter = canDisplayTagsRow || canDisplayEditMetaRow; 53 | if (!canDisplayFooter) { 54 | return null; 55 | } 56 | 57 | return ( 58 |
61 | {canDisplayTagsRow && } 62 | {canDisplayEditMetaRow && ( 63 | 69 | )} 70 | {/* Feedback Link insertion */} 71 | 72 | 73 |
74 | ); 75 | } 76 | -------------------------------------------------------------------------------- /website/src/theme/DocItem/Footer/styles.module.css: -------------------------------------------------------------------------------- 1 | .lastUpdated { 2 | margin-top: 0.2rem; 3 | font-style: italic; 4 | font-size: smaller; 5 | } 6 | 7 | @media (min-width: 997px) { 8 | .lastUpdated { 9 | text-align: right; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /website/static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/static/.nojekyll -------------------------------------------------------------------------------- /website/static/img/docusaurus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/static/img/docusaurus.png -------------------------------------------------------------------------------- /website/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/static/img/favicon.ico -------------------------------------------------------------------------------- /website/static/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-workshop-developers/d409e11299f5b94f470522b463791a296fae25d9/website/static/img/favicon.png -------------------------------------------------------------------------------- /website/static/img/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 9 | 10 | 31 | 32 | 34 | 36 | 37 | 38 | 39 | --------------------------------------------------------------------------------