├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── app.py ├── cdk.json ├── config ├── amazon-cloudwatch-agent-mem-log.json ├── amazon-cloudwatch-agent-mem.json └── amazon-cloudwatch-agent.json ├── graviton ├── aurora_graviton │ └── aurora.py ├── cs_graviton │ ├── Deployment-aspnet.yaml │ ├── Deployment.yaml │ ├── Dockerfile │ ├── app │ │ ├── Dockerfile │ │ ├── arm64-buildspec.yml │ │ ├── manifest-buildspec.yml │ │ ├── nodejs_code │ │ │ └── app.js │ │ ├── package.json │ │ └── x86-buildspec.yml │ ├── arm64-dotnet-buildspec.yml │ ├── ecs_graviton.py │ ├── eks_graviton.py │ ├── pipeline_graviton.py │ └── pipeline_netcore_graviton.py ├── ec2_graviton │ ├── Python │ │ ├── .gitignore │ │ ├── app.py │ │ ├── requirements.txt │ │ └── short_url.py │ ├── ec2.py │ └── node_js │ │ ├── main.mjs │ │ └── package.json ├── opensearch_graviton │ ├── lambdas │ │ ├── insert_into_index.py │ │ └── requirements.txt │ └── open_search.py ├── perf_graviton │ └── perf.py ├── rds_graviton │ ├── rds2.py │ ├── rds_mysql_5.py │ ├── rds_mysql_8.py │ ├── rds_pg_restore.py │ ├── rds_pgsql_14.py │ └── rds_restore.py └── vpc_base │ └── vpc.py ├── pom-java11.xml ├── pom-java8.xml ├── requirements-dev.txt ├── requirements.txt ├── scripts ├── cfn-template.yaml ├── client ├── create_emr_buckets.sh ├── cs_cleanup.sh ├── ec2_module_sut_1 ├── ec2_module_sut_2 ├── ec2_module_sut_3 ├── ec2_module_test_client ├── elasticsearch-generate-data.py ├── elasticsearch-search.py ├── etl-spark.py ├── plot_results.py ├── rds-peering-cleanup.sh ├── rds-peering.sh ├── rds-pg-snapshot.sh ├── rds-snapshot.sh ├── resize.sh ├── runner.lua ├── sut_1 ├── sut_2 ├── tripdata.csv ├── ubuntu-prereqs.sh ├── user-script-client.sh ├── user-script-java11.sh ├── user-script-java8.sh ├── vpc-env.sh └── wrk2_wrapper.sh └── src └── main └── java └── com └── company └── example └── web └── bookshop └── VertxBookShopREST.java /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | package-lock.json 3 | __pycache__ 4 | .pytest_cache 5 | .venv 6 | *.egg-info 7 | 8 | # CDK asset staging directory 9 | .cdk.staging 10 | cdk.out 11 | 12 | #workshop sub-repos 13 | dotnet-docker 14 | test_db 15 | 16 | # Node.js 17 | node_modules/ 18 | npm-debug.log 19 | 20 | # Logs 21 | logs 22 | *.log 23 | npm-debug.log* 24 | 25 | # Dependency directories 26 | .pnpm 27 | .npm 28 | .cache 29 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This project is designed to help you deploy services such as EKS, ECS, RDS, and EMR 2 | on Graviton instances. 3 | All the labs use AWS CDK for initial deployment and utilize dedicated VPC. 4 | 5 | Currently covered scenarios include : 6 | 7 | * EKS cluster with multi-architecture (x86-64 and arm64) nodegroups 8 | * ECS cluster with sample task and service running on Graviton instance type 9 | * CI pipeline for multi-architecture docker container images using CodePipeline, CodeBuild, CodeCommit, and ECR (with docker manifests) 10 | * CI pipeline for running .Net Core 5 on Amazon EKS cluster 11 | * RDS migration scenario from MySQl 8 on m5 instance type to MySQL on m6g instance type 12 | * RDS migration scenario from MySQl 5 on m5 instance type -> in-place major version upgrade MySQL 8 -> to in-place instance change to m6g instance type 13 | * EMR cluster with sample ETL Spark job running on Graviton instance type 14 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | #!/usr/bin/env python3 4 | 5 | import aws_cdk as cdk 6 | from graviton.rds_graviton.rds_mysql_5 import CdkRds5Stack 7 | from graviton.rds_graviton.rds_mysql_8 import CdkRds8Stack 8 | from graviton.rds_graviton.rds_pgsql_14 import CdkPgSQLStack 9 | from graviton.rds_graviton.rds_pg_restore import CdkRdsPgRestoreStack 10 | from graviton.rds_graviton.rds_restore import CdkRdsRestoreStack 11 | from graviton.vpc_base.vpc import CdkVpcStack 12 | from graviton.cs_graviton.eks_graviton import CdkEksStack 13 | from graviton.cs_graviton.ecs_graviton import CdkEcsStack 14 | from graviton.cs_graviton.pipeline_graviton import CdkPipelineStack 15 | from graviton.cs_graviton.pipeline_netcore_graviton import CdkPipelineDotNetStack 16 | from graviton.opensearch_graviton.open_search import CdkOpenSearchStack 17 | from graviton.perf_graviton.perf import CdkPerfStack 18 | from graviton.ec2_graviton.ec2 import CdkEC2Stack 19 | from graviton.aurora_graviton.aurora import CdkAuroraStack 20 | 21 | 22 | class GravitonID(cdk.App): 23 | 24 | def __init__(self, **kwargs): 25 | super().__init__(**kwargs) 26 | 27 | self.stack_name = "GravitonID" 28 | self.base_module = CdkVpcStack(self, self.stack_name + "-base") 29 | self.rds_5_module = CdkRds5Stack( 30 | self, self.stack_name + "-rds-5", self.base_module.vpc 31 | ) 32 | self.rds_8_module = CdkRds8Stack( 33 | self, self.stack_name + "-rds-8", self.base_module.vpc 34 | ) 35 | self.rds_pg14_module = CdkPgSQLStack( 36 | self, self.stack_name + "-rds-pg14", self.base_module.vpc 37 | ) 38 | self.restore_pg_module = CdkRdsPgRestoreStack( 39 | self, self.stack_name + "-rds-pg-restore",self.base_module.vpc 40 | ) 41 | self.restore_module = CdkRdsRestoreStack( 42 | self, self.stack_name + "-rds-restore", self.base_module.vpc 43 | ) 44 | self.eks_module = CdkEksStack( 45 | self, self.stack_name + "-eks", self.base_module.vpc 46 | ) 47 | self.ecs_module = CdkEcsStack( 48 | self, self.stack_name + "-ecs", self.base_module.vpc 49 | ) 50 | self.pipeline_module = CdkPipelineStack( 51 | self, self.stack_name + "-pipeline", self.base_module.vpc 52 | ) 53 | self.pipeline_dotnet_module = CdkPipelineDotNetStack( 54 | self, self.stack_name + "-pipeline-dotnet", self.base_module.vpc 55 | ) 56 | self.es_module = CdkOpenSearchStack( 57 | self, self.stack_name + "-os", self.base_module.vpc 58 | ) 59 | self.perf_module = CdkPerfStack( 60 | self, self.stack_name + "-perf", self.base_module.vpc 61 | ) 62 | self.ec2_module = CdkEC2Stack( 63 | self, self.stack_name + "-ec2", self.base_module.vpc 64 | ) 65 | self.ec2_module = CdkAuroraStack( 66 | self, self.stack_name + "-aurora", self.base_module.vpc 67 | ) 68 | 69 | 70 | if __name__ == "__main__": 71 | app = GravitonID() 72 | app.synth() 73 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py", 3 | "watch": { 4 | "include": [ 5 | "**" 6 | ], 7 | "exclude": [ 8 | "README.md", 9 | "cdk*.json", 10 | "requirements*.txt", 11 | "source.bat", 12 | "**/__init__.py", 13 | "python/__pycache__", 14 | "tests" 15 | ] 16 | }, 17 | "context": { 18 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, 19 | "@aws-cdk/core:stackRelativeExports": true, 20 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, 21 | "@aws-cdk/aws-lambda:recognizeVersionProps": true, 22 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true, 23 | "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true, 24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, 25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, 26 | "@aws-cdk/core:checkSecretUsage": true, 27 | "@aws-cdk/aws-iam:minimizePolicies": true, 28 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, 29 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true, 30 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, 31 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, 32 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, 33 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, 34 | "@aws-cdk/core:enablePartitionLiterals": true, 35 | "@aws-cdk/core:target-partitions": [ 36 | "aws", 37 | "aws-cn" 38 | ] 39 | } 40 | } -------------------------------------------------------------------------------- /config/amazon-cloudwatch-agent-mem-log.json: -------------------------------------------------------------------------------- 1 | { 2 | "logs": { 3 | "logs_collected": { 4 | "files": { 5 | "collect_list": [ 6 | { 7 | "file_path": "/tmp/vertx.log", 8 | "log_group_name": "/vertx-logging", 9 | "log_stream_name": "vertx-logging-{instance_id}.log", 10 | "timestamp_format": "%Y-%m-%d %H:%M:%S.%f", 11 | "multi_line_start_pattern": "{timestamp_format}" 12 | } 13 | ] 14 | } 15 | } 16 | }, 17 | "metrics": { 18 | "metrics_collected": { 19 | "mem": { 20 | "measurement": [ 21 | "mem_used_percent" 22 | ], 23 | "metrics_collection_interval":30 24 | } 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /config/amazon-cloudwatch-agent-mem.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "metrics_collected": { 4 | "mem": { 5 | "measurement": [ 6 | "mem_used_percent" 7 | ], 8 | "metrics_collection_interval":30 9 | }, 10 | "swap": { 11 | "measurement": [ 12 | "swap_used_percent" 13 | ], 14 | "metrics_collection_interval":30 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /config/amazon-cloudwatch-agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "logs": { 3 | "logs_collected": { 4 | "files": { 5 | "collect_list": [ 6 | { 7 | "file_path": "/tmp/vertx.log", 8 | "log_group_name": "/vertx-logging", 9 | "log_stream_name": "vertx-logging-{instance_id}.log", 10 | "timestamp_format": "%Y-%m-%d %H:%M:%S.%f", 11 | "multi_line_start_pattern": "{timestamp_format}" 12 | } 13 | ] 14 | } 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /graviton/aurora_graviton/aurora.py: -------------------------------------------------------------------------------- 1 | import aws_cdk as cdk 2 | from constructs import Construct 3 | from aws_cdk import Stack, aws_rds as rds, aws_ec2 as ec2, aws_iam as iam, Duration 4 | import os 5 | 6 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 7 | 8 | 9 | class CdkAuroraStack(Stack): 10 | 11 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 12 | super().__init__(scope, id, **kwargs) 13 | 14 | # Security Group 15 | security_group = ec2.SecurityGroup( 16 | self, 17 | "AuroraPostgresSecurityGroup", 18 | vpc=vpc, 19 | allow_all_outbound=True, 20 | description="Security Group for Aurora PostgreSQL", 21 | security_group_name="AuroraPostgresSecurityGroup", 22 | ) 23 | 24 | # Add inbound rule to allow pgbench client to connect 25 | security_group.add_ingress_rule( 26 | ec2.Peer.ipv4(default_vpc_cidr), ec2.Port.all_traffic() 27 | ) 28 | 29 | # Aurora PostgreSQL Cluster 30 | cluster = rds.DatabaseCluster( 31 | self, 32 | "AuroraPostgresCluster", 33 | engine=rds.DatabaseClusterEngine.aurora_postgres( 34 | version=rds.AuroraPostgresEngineVersion.VER_14_6 35 | ), 36 | writer=rds.ClusterInstance.provisioned( 37 | "writer", 38 | instance_type=ec2.InstanceType.of( 39 | ec2.InstanceClass.R5, ec2.InstanceSize.XLARGE2 40 | ), 41 | enable_performance_insights=True, 42 | instance_identifier="lab-intel-based-instance-1", 43 | ), 44 | cluster_identifier="aurora-lab", 45 | monitoring_interval=Duration.minutes(1), 46 | security_groups=[security_group], 47 | vpc=vpc, 48 | storage_encrypted=True, 49 | default_database_name="postgres", 50 | ) 51 | 52 | cdk.CfnOutput( 53 | self, 54 | "AURORA_WRITER_ENDPOINT", 55 | value=cluster.cluster_endpoint.hostname, 56 | description="The endpoint of the Aurora cluster", 57 | ) 58 | cdk.CfnOutput( 59 | self, 60 | "AURORA_SECRET_NAME", 61 | value=cluster.secret.secret_name, 62 | description="Aurora PostgreSQL database secret name", 63 | ) 64 | -------------------------------------------------------------------------------- /graviton/cs_graviton/Deployment-aspnet.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: aspnet 5 | labels: 6 | name: aspnet 7 | --- 8 | apiVersion: v1 9 | kind: Service 10 | metadata: 11 | name: aspnet-service 12 | namespace: aspnet 13 | annotations: 14 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 15 | labels: 16 | app: aspnet-app 17 | spec: 18 | selector: 19 | app: aspnet-app 20 | ports: 21 | - protocol: TCP 22 | port: 80 23 | targetPort: 8080 24 | type: LoadBalancer 25 | --- 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: aspnet-deployment 30 | namespace: aspnet 31 | labels: 32 | app: aspnet-app 33 | spec: 34 | replicas: 2 35 | selector: 36 | matchLabels: 37 | app: aspnet-app 38 | template: 39 | metadata: 40 | labels: 41 | app: aspnet-app 42 | spec: 43 | affinity: 44 | nodeAffinity: 45 | requiredDuringSchedulingIgnoredDuringExecution: 46 | nodeSelectorTerms: 47 | - matchExpressions: 48 | - key: kubernetes.io/arch 49 | operator: In 50 | values: 51 | - arm64 52 | podAntiAffinity: 53 | requiredDuringSchedulingIgnoredDuringExecution: 54 | - labelSelector: 55 | matchExpressions: 56 | - key: app 57 | operator: In 58 | values: 59 | - aspnet-app 60 | topologyKey: "kubernetes.io/hostname" 61 | containers: 62 | - name: aspnet-container 63 | image: {{container_uri}} 64 | ports: 65 | - containerPort: 80 66 | -------------------------------------------------------------------------------- /graviton/cs_graviton/Deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: multiarch 5 | labels: 6 | name: multiarch 7 | --- 8 | apiVersion: v1 9 | kind: Service 10 | metadata: 11 | name: multiarch-service 12 | namespace: multiarch 13 | annotations: 14 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 15 | labels: 16 | app: multiarch-app 17 | spec: 18 | selector: 19 | app: multiarch-app 20 | ports: 21 | - protocol: TCP 22 | port: 80 23 | targetPort: 3000 24 | type: LoadBalancer 25 | --- 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: multiarch-deployment 30 | namespace: multiarch 31 | labels: 32 | app: multiarch-app 33 | spec: 34 | replicas: 4 35 | selector: 36 | matchLabels: 37 | app: multiarch-app 38 | template: 39 | metadata: 40 | labels: 41 | app: multiarch-app 42 | spec: 43 | affinity: 44 | nodeAffinity: 45 | requiredDuringSchedulingIgnoredDuringExecution: 46 | nodeSelectorTerms: 47 | - matchExpressions: 48 | - key: kubernetes.io/arch 49 | operator: In 50 | values: 51 | - amd64 52 | - arm64 53 | podAntiAffinity: 54 | requiredDuringSchedulingIgnoredDuringExecution: 55 | - labelSelector: 56 | matchExpressions: 57 | - key: app 58 | operator: In 59 | values: 60 | - multiarch-app 61 | topologyKey: "kubernetes.io/hostname" 62 | containers: 63 | - name: multiarch-container 64 | image: {{container_uri}} 65 | ports: 66 | - containerPort: 80 67 | -------------------------------------------------------------------------------- /graviton/cs_graviton/Dockerfile: -------------------------------------------------------------------------------- 1 | # Learn about building .NET container images: 2 | # https://github.com/dotnet/dotnet-docker/blob/main/samples/README.md 3 | FROM mcr.microsoft.com/dotnet/sdk:9.0-bookworm-slim AS build 4 | WORKDIR /source 5 | 6 | # copy csproj and restore as distinct layers 7 | COPY aspnetapp/*.csproj . 8 | RUN dotnet restore 9 | 10 | # copy everything else and build app 11 | COPY aspnetapp/. . 12 | RUN dotnet publish --no-restore -o /app 13 | 14 | 15 | # final stage/image 16 | FROM mcr.microsoft.com/dotnet/aspnet:9.0-bookworm-slim 17 | WORKDIR /app 18 | COPY --from=build /app . 19 | USER $APP_UID 20 | ENTRYPOINT ["./aspnetapp"] 21 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/amazonlinux/amazonlinux:2 2 | WORKDIR /usr/src/app 3 | COPY package*.json nodejs_code/app.js ./ 4 | RUN yum install https://rpm.nodesource.com/pub_16.x/nodistro/repo/nodesource-release-nodistro-1.noarch.rpm -y 5 | RUN yum install nodejs -y --setopt=nodesource-nodejs.module_hotfixes=1 6 | RUN npm install 7 | EXPOSE 3000 8 | CMD ["node", "app.js"] 9 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/arm64-buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | pre_build: 5 | commands: 6 | - echo Logging in to Amazon ECR... 7 | - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com 8 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7) 9 | - echo $COMMIT_HASH 10 | - IMAGE_TAG=${COMMIT_HASH}-arm64 11 | - echo $IMAGE_TAG 12 | - REPOSITORY_URI=${REPO_ECR} 13 | build: 14 | commands: 15 | - echo Build started on `date` 16 | - echo Building the Docker image... 17 | - docker build -t $REPOSITORY_URI:$IMAGE_TAG . 18 | post_build: 19 | commands: 20 | - echo Build completed on `date` 21 | - echo Pushing the Docker image... 22 | - docker push $REPOSITORY_URI:$IMAGE_TAG 23 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/manifest-buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | pre_build: 5 | commands: 6 | - echo Logging in to Amazon ECR... 7 | - aws --version 8 | - $(aws ecr get-login --region $AWS_DEFAULT_REGION --no-include-email) 9 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7) 10 | - IMAGE_TAG=${COMMIT_HASH} 11 | - ARM_TAG=${IMAGE_TAG}-arm64 12 | - X86_TAG=${IMAGE_TAG}-x86 13 | - echo $REPOSITORY_URI 14 | - echo $IMAGE_TAG 15 | - echo $X86_TAG 16 | - echo $ARM_TAG 17 | - REPOSITORY_URI=${REPO_ECR} 18 | - export DOCKER_CLI_EXPERIMENTAL=enabled 19 | build: 20 | commands: 21 | - echo Build started on `date` 22 | - echo Building the Docker manifest... 23 | - docker manifest create $REPOSITORY_URI:$IMAGE_TAG $REPOSITORY_URI:$ARM_TAG $REPOSITORY_URI:$X86_TAG 24 | - docker manifest annotate --arch arm64 $REPOSITORY_URI:$IMAGE_TAG $REPOSITORY_URI:$ARM_TAG 25 | - docker manifest annotate --arch amd64 $REPOSITORY_URI:$IMAGE_TAG $REPOSITORY_URI:$X86_TAG 26 | - docker manifest inspect $REPOSITORY_URI:$IMAGE_TAG 27 | - docker manifest push $REPOSITORY_URI:$IMAGE_TAG 28 | post_build: 29 | commands: 30 | - echo Build started on `date` 31 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/nodejs_code/app.js: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | const http = require('http'); 4 | 5 | const port = 3000; 6 | 7 | const server = http.createServer((req, res) => { 8 | res.statusCode = 200; 9 | res.setHeader('Content-Type', 'text/plain'); 10 | res.end(`Hi! This processor architecture is ${process.arch}`); 11 | }); 12 | 13 | server.listen(port, () => { 14 | console.log(`Server running on ${process.arch} architecture.`); 15 | }); 16 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Hello-arch-app", 3 | "version": "1.0.0" 4 | } 5 | -------------------------------------------------------------------------------- /graviton/cs_graviton/app/x86-buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | pre_build: 5 | commands: 6 | - echo Logging in to Amazon ECR... 7 | - $(aws ecr get-login --region $AWS_DEFAULT_REGION --no-include-email) 8 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7) 9 | - echo $COMMIT_HASH 10 | - IMAGE_TAG=${COMMIT_HASH}-x86 11 | - echo $IMAGE_TAG 12 | - REPOSITORY_URI=${REPO_ECR} 13 | build: 14 | commands: 15 | - echo Build started on `date` 16 | - echo Building the Docker image... 17 | - docker build -t $REPOSITORY_URI:$IMAGE_TAG . 18 | post_build: 19 | commands: 20 | - echo Build completed on `date` 21 | - echo Pushing the Docker image... 22 | - docker push $REPOSITORY_URI:$IMAGE_TAG 23 | -------------------------------------------------------------------------------- /graviton/cs_graviton/arm64-dotnet-buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | pre_build: 5 | commands: 6 | - echo Logging in to Amazon ECR... 7 | - $(aws ecr get-login --region $AWS_DEFAULT_REGION --no-include-email) 8 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7) 9 | - IMAGE_TAG=${COMMIT_HASH:=latest}-arm64 10 | - REPOSITORY_URI=${REPO_ECR} 11 | build: 12 | commands: 13 | - echo Build started on `date` 14 | - echo Building the Docker image... 15 | - docker build -t $REPOSITORY_URI:latest . 16 | - docker tag $REPOSITORY_URI:latest $REPOSITORY_URI:$IMAGE_TAG 17 | post_build: 18 | commands: 19 | - echo Build completed on `date` 20 | - echo Pushing the Docker image... 21 | - docker push $REPOSITORY_URI:$IMAGE_TAG 22 | -------------------------------------------------------------------------------- /graviton/cs_graviton/ecs_graviton.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | from aws_cdk import Duration 6 | import aws_cdk.aws_ec2 as ec2 7 | import aws_cdk.aws_ecs as ecs 8 | import aws_cdk.aws_elasticloadbalancingv2 as elbv2 9 | from aws_cdk import aws_ecs_patterns as ecs_patterns 10 | import aws_cdk.aws_ssm as ssm 11 | import aws_cdk.aws_autoscaling as autoscaling 12 | import aws_cdk.aws_iam as iam 13 | import os 14 | 15 | 16 | class CdkEcsStack(cdk.Stack): 17 | 18 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 19 | super().__init__(scope, id, **kwargs) 20 | 21 | 22 | cluster = ecs.Cluster( 23 | self, 'ECSGraviton', 24 | vpc=vpc, 25 | container_insights=True, 26 | enable_fargate_capacity_providers=True 27 | ) 28 | 29 | ecs_exec_role = iam.Role( 30 | self, 31 | "ECSExecRole", 32 | assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), 33 | role_name="ECSExecRole", 34 | managed_policies=[ 35 | iam.ManagedPolicy.from_managed_policy_arn( 36 | self, 37 | "ECSTaskExecutionRolePolicy", 38 | managed_policy_arn="arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy", 39 | ) 40 | ], 41 | ) 42 | 43 | container_uri = ssm.StringParameter.value_for_string_parameter(self ,"graviton_lab_container_uri") 44 | 45 | fargate_service =ecs_patterns.ApplicationLoadBalancedFargateService(self, "FargateService", 46 | cluster=cluster, 47 | cpu=256, 48 | desired_count=1, 49 | task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions( 50 | image=ecs.ContainerImage.from_registry(container_uri), 51 | container_port=3000, 52 | execution_role=ecs_exec_role, 53 | ), 54 | memory_limit_mib=512, 55 | public_load_balancer=True, 56 | runtime_platform=ecs.RuntimePlatform( 57 | operating_system_family=ecs.OperatingSystemFamily.LINUX, 58 | cpu_architecture=ecs.CpuArchitecture.ARM64 59 | ), 60 | ) 61 | 62 | cdk.CfnOutput( 63 | self, "LoadBalancerDNS", 64 | value=fargate_service.load_balancer.load_balancer_dns_name 65 | 66 | ) -------------------------------------------------------------------------------- /graviton/cs_graviton/eks_graviton.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | import aws_cdk as cdk 3 | from constructs import Construct 4 | import aws_cdk.aws_ec2 as ec2 5 | import aws_cdk.aws_eks as eks 6 | import aws_cdk.aws_iam as iam 7 | import os 8 | from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer 9 | 10 | class CdkEksStack(cdk.Stack): 11 | 12 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 13 | super().__init__(scope, id, **kwargs) 14 | 15 | # Create SecurityGroup for the Control Plane ENIs 16 | eks_security_group = ec2.SecurityGroup( 17 | self, "EKSSecurityGroup", 18 | vpc=vpc, 19 | allow_all_outbound=True 20 | ) 21 | 22 | eks_security_group.add_ingress_rule( 23 | ec2.Peer.ipv4('10.0.0.0/16'), 24 | ec2.Port.all_traffic() 25 | ) 26 | 27 | clusterAdminRole = iam.Role(self, 'ClusterAdmin', 28 | assumed_by= iam.AccountRootPrincipal() 29 | ) 30 | clusterAdminRole.add_to_policy(iam.PolicyStatement( 31 | resources=["*"], 32 | actions=[ 33 | "eks:Describe*", 34 | "eks:List*", 35 | "eks:AccessKubernetesApi", 36 | "ssm:GetParameter", 37 | "iam:ListRoles" 38 | ], 39 | )) 40 | 41 | 42 | # Managed Node Group Instance Role 43 | managed_node_managed_policies = ( 44 | iam.ManagedPolicy.from_aws_managed_policy_name('AmazonEKSWorkerNodePolicy'), 45 | iam.ManagedPolicy.from_aws_managed_policy_name('AmazonEKS_CNI_Policy'), 46 | iam.ManagedPolicy.from_aws_managed_policy_name('AmazonEC2ContainerRegistryReadOnly'), 47 | iam.ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy'), 48 | iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), 49 | ) 50 | managed_node_role = iam.Role(self,'NodeInstanceRole', 51 | path='/', 52 | assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'), 53 | managed_policies=list(managed_node_managed_policies), 54 | ) 55 | 56 | self.cluster = eks.Cluster(self, "EKSGraviton2", 57 | version=eks.KubernetesVersion.V1_29, 58 | default_capacity=0, 59 | output_cluster_name=True, 60 | masters_role=clusterAdminRole, 61 | output_config_command=True, 62 | endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE, 63 | vpc=vpc, 64 | security_group=eks_security_group, 65 | kubectl_layer=KubectlV29Layer(self, 'KubectlV29Layer') 66 | 67 | ) 68 | 69 | self.ng_x86 = self.cluster.add_nodegroup_capacity("x86-node-group", 70 | instance_types=[ec2.InstanceType("m5.large")], 71 | desired_size=2, 72 | node_role = managed_node_role, 73 | min_size=1, 74 | max_size=3 75 | ) 76 | 77 | self.ng_arm64 = self.cluster.add_nodegroup_capacity("arm64-node-group", 78 | instance_types=[ec2.InstanceType("m6g.large")], 79 | desired_size=2, 80 | node_role = managed_node_role, 81 | min_size=1, 82 | max_size=3 83 | ) 84 | 85 | -------------------------------------------------------------------------------- /graviton/cs_graviton/pipeline_graviton.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ecr as ecr 6 | import aws_cdk.aws_iam as iam 7 | import aws_cdk.aws_codecommit as codecommit 8 | import aws_cdk.aws_codepipeline as codepipeline 9 | import aws_cdk.aws_codebuild as codebuild 10 | import aws_cdk.aws_codepipeline_actions as codepipeline_actions 11 | import os 12 | 13 | 14 | class CdkPipelineStack(cdk.Stack): 15 | 16 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 17 | super().__init__(scope, id, **kwargs) 18 | 19 | name = "graviton-pipeline-lab" 20 | # ECR repositories 21 | container_repository = ecr.Repository( 22 | scope=self, 23 | id=f"{name}-container", 24 | repository_name=f"{name}" 25 | ) 26 | # Repo for Application 27 | codecommit_repo = codecommit.Repository( 28 | scope=self, 29 | id=f"{name}-container-git", 30 | repository_name=f"{name}", 31 | description=f"Application code" 32 | ) 33 | 34 | pipeline = codepipeline.Pipeline( 35 | scope=self, 36 | id=f"{name}-container--pipeline", 37 | pipeline_name=f"{name}" 38 | ) 39 | 40 | source_output = codepipeline.Artifact() 41 | docker_output_x86 = codepipeline.Artifact("x86_BuildOutput") 42 | docker_output_arm64 = codepipeline.Artifact("ARM64_BuildOutput") 43 | manifest_output = codepipeline.Artifact("ManifestOutput") 44 | 45 | buildspec_x86 = codebuild.BuildSpec.from_source_filename("x86-buildspec.yml") 46 | buildspec_arm64 = codebuild.BuildSpec.from_source_filename("arm64-buildspec.yml") 47 | buildspec_manifest = codebuild.BuildSpec.from_source_filename("manifest-buildspec.yml") 48 | 49 | docker_build_x86 = codebuild.PipelineProject( 50 | scope=self, 51 | id=f"DockerBuild_x86", 52 | environment=dict( 53 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, 54 | privileged=True), 55 | environment_variables={ 56 | 'REPO_ECR': codebuild.BuildEnvironmentVariable( 57 | value=container_repository.repository_uri), 58 | 'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable( 59 | value=self.account) 60 | }, 61 | build_spec=buildspec_x86 62 | ) 63 | 64 | docker_build_arm64 = codebuild.PipelineProject( 65 | scope=self, 66 | id=f"DockerBuild_ARM64", 67 | environment=dict( 68 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_ARM, 69 | privileged=True), 70 | environment_variables={ 71 | 'REPO_ECR': codebuild.BuildEnvironmentVariable( 72 | value=container_repository.repository_uri), 73 | 'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable( 74 | value=self.account) 75 | }, 76 | build_spec=buildspec_arm64 77 | ) 78 | 79 | manifest_build = codebuild.PipelineProject( 80 | scope=self, 81 | id=f"ManifestBuild", 82 | environment=dict( 83 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, 84 | privileged=True), 85 | environment_variables={ 86 | 'REPO_ECR': codebuild.BuildEnvironmentVariable( 87 | value=container_repository.repository_uri), 88 | 'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable( 89 | value=self.account) 90 | }, 91 | build_spec=buildspec_manifest 92 | ) 93 | 94 | container_repository.grant_pull_push(docker_build_x86) 95 | container_repository.grant_pull_push(docker_build_arm64) 96 | container_repository.grant_pull_push(manifest_build) 97 | 98 | docker_build_x86.add_to_role_policy(iam.PolicyStatement( 99 | effect=iam.Effect.ALLOW, 100 | actions=["ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage"], 101 | resources=[f"arn:{cdk.Stack.of(self).partition}:ecr:{cdk.Stack.of(self).region}:{cdk.Stack.of(self).account}:repository/*"],)) 102 | 103 | docker_build_arm64.add_to_role_policy(iam.PolicyStatement( 104 | effect=iam.Effect.ALLOW, 105 | actions=["ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage"], 106 | resources=[f"arn:{cdk.Stack.of(self).partition}:ecr:{cdk.Stack.of(self).region}:{cdk.Stack.of(self).account}:repository/*"],)) 107 | 108 | manifest_build.add_to_role_policy(iam.PolicyStatement( 109 | effect=iam.Effect.ALLOW, 110 | actions=["ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage"], 111 | resources=[f"arn:{cdk.Stack.of(self).partition}:ecr:{cdk.Stack.of(self).region}:{cdk.Stack.of(self).account}:repository/*"],)) 112 | 113 | source_action = codepipeline_actions.CodeCommitSourceAction( 114 | action_name="CodeCommit_Source", 115 | repository=codecommit_repo, 116 | output=source_output, 117 | branch="master" 118 | ) 119 | 120 | pipeline.add_stage( 121 | stage_name="Source", 122 | actions=[source_action] 123 | ) 124 | 125 | # Stages in CodePipeline 126 | pipeline.add_stage( 127 | stage_name="DockerBuild", 128 | actions=[ 129 | codepipeline_actions.CodeBuildAction( 130 | action_name=f"DockerBuild_x86", 131 | project=docker_build_x86, 132 | input=source_output, 133 | outputs=[docker_output_x86]), 134 | codepipeline_actions.CodeBuildAction( 135 | action_name=f"DockerBuild_ARM64", 136 | project=docker_build_arm64, 137 | input=source_output, 138 | outputs=[docker_output_arm64]) 139 | ] 140 | ) 141 | 142 | pipeline.add_stage( 143 | stage_name="Manifest", 144 | actions=[ 145 | codepipeline_actions.CodeBuildAction( 146 | action_name="Manifest", 147 | project=manifest_build, 148 | input=source_output, 149 | outputs=[manifest_output]) 150 | ] 151 | ) 152 | 153 | # Outputs 154 | cdk.CfnOutput( 155 | scope=self, 156 | id="application_repository", 157 | value=codecommit_repo.repository_clone_url_http 158 | ) 159 | -------------------------------------------------------------------------------- /graviton/cs_graviton/pipeline_netcore_graviton.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ecr as ecr 6 | import aws_cdk.aws_iam as iam 7 | import aws_cdk.aws_codecommit as codecommit 8 | import aws_cdk.aws_codepipeline as codepipeline 9 | import aws_cdk.aws_codebuild as codebuild 10 | import aws_cdk.aws_codepipeline_actions as codepipeline_actions 11 | import os 12 | 13 | 14 | class CdkPipelineDotNetStack(cdk.Stack): 15 | 16 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 17 | super().__init__(scope, id, **kwargs) 18 | 19 | name = "graviton-aspnet-lab" 20 | 21 | container_repository = ecr.Repository( 22 | scope=self, 23 | id=f"{name}-container", 24 | repository_name=f"{name}" 25 | ) 26 | 27 | codecommit_repo = codecommit.Repository( 28 | scope=self, 29 | id=f"{name}-container-git", 30 | repository_name=f"{name}", 31 | description=f"Application code" 32 | ) 33 | 34 | pipeline = codepipeline.Pipeline( 35 | scope=self, 36 | id=f"{name}-container--pipeline", 37 | pipeline_name=f"{name}" 38 | ) 39 | 40 | source_output = codepipeline.Artifact() 41 | docker_output_arm64 = codepipeline.Artifact("ARM64_BuildOutput") 42 | 43 | buildspec_arm64 = codebuild.BuildSpec.from_source_filename("arm64-dotnet-buildspec.yml") 44 | 45 | docker_build_arm64 = codebuild.PipelineProject( 46 | scope=self, 47 | id=f"DockerBuild_ARM64", 48 | environment=dict( 49 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_ARM, 50 | privileged=True), 51 | environment_variables={ 52 | 'REPO_ECR': codebuild.BuildEnvironmentVariable( 53 | value=container_repository.repository_uri), 54 | }, 55 | build_spec=buildspec_arm64 56 | ) 57 | 58 | container_repository.grant_pull_push(docker_build_arm64) 59 | 60 | docker_build_arm64.add_to_role_policy(iam.PolicyStatement( 61 | effect=iam.Effect.ALLOW, 62 | actions=["ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage"], 63 | resources=[f"arn:{cdk.Stack.of(self).partition}:ecr:{cdk.Stack.of(self).region}:{cdk.Stack.of(self).account}:repository/*"],)) 64 | 65 | source_action = codepipeline_actions.CodeCommitSourceAction( 66 | action_name="CodeCommit_Source", 67 | repository=codecommit_repo, 68 | output=source_output, 69 | branch="master" 70 | ) 71 | 72 | pipeline.add_stage( 73 | stage_name="Source", 74 | actions=[source_action] 75 | ) 76 | 77 | pipeline.add_stage( 78 | stage_name="DockerBuild", 79 | actions=[ 80 | codepipeline_actions.CodeBuildAction( 81 | action_name=f"DockerBuild_ARM64", 82 | project=docker_build_arm64, 83 | input=source_output, 84 | outputs=[docker_output_arm64]) 85 | ] 86 | ) 87 | 88 | # Outputs 89 | cdk.CfnOutput( 90 | scope=self, 91 | id="application_repository", 92 | value=codecommit_repo.repository_clone_url_http 93 | ) 94 | -------------------------------------------------------------------------------- /graviton/ec2_graviton/Python/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | workshop_env/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | 163 | -------------------------------------------------------------------------------- /graviton/ec2_graviton/Python/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, jsonify 2 | import short_url 3 | from short_url import create_short_url, retrive_from_dynamo 4 | import boto3 5 | 6 | app = Flask(__name__) 7 | 8 | @app.route('/') 9 | def index(): 10 | return 'Hi everyone' 11 | 12 | # # post parameter from url 13 | # @app.route('/shortenURL', methods=['POST']) 14 | # def short_url_post(): 15 | # data = request.get_json() 16 | # url = data.get('url') 17 | # return create_short_url(url) 18 | 19 | @app.route('/shortenURL', methods=['POST']) 20 | def short_url_post(): 21 | data = request.get_json() 22 | original_url = data.get('OriginalURL') # Use the correct key from your JSON payload 23 | if not original_url: 24 | return jsonify({'error': 'Missing URL'}), 400 25 | short_url = create_short_url(original_url) 26 | return short_url # Make sure to return a JSON object with the shortURL key 27 | 28 | 29 | # #get full url from short url 30 | # @app.route('/getFullURL', methods=['GET']) 31 | # def redirect_short_url(): 32 | # short_url = request.args.get('short_url', default=None, type=str) 33 | # full_url = retrive_from_dynamo(short_url) 34 | # return full_url['url'] 35 | 36 | @app.route('/getFullURL/', methods=['GET']) 37 | def redirect_short_url(short_url): 38 | full_url = retrive_from_dynamo(short_url) 39 | if full_url: 40 | return full_url['url'] 41 | else: 42 | return 'URL not found', 404 43 | 44 | if __name__ == '__main__': 45 | app.run(host="0.0.0.0", port=5000, debug=True) 46 | 47 | -------------------------------------------------------------------------------- /graviton/ec2_graviton/Python/requirements.txt: -------------------------------------------------------------------------------- 1 | blinker==1.7.0 2 | click==8.1.7 3 | Flask==3.0.0 4 | itsdangerous==2.1.2 5 | Jinja2==3.1.5 6 | MarkupSafe==2.1.3 7 | numpy==1.26.3 8 | Werkzeug==3.0.6 9 | boto3==1.26.90 10 | -------------------------------------------------------------------------------- /graviton/ec2_graviton/Python/short_url.py: -------------------------------------------------------------------------------- 1 | from flask import jsonify 2 | import random 3 | import string 4 | import boto3 5 | import requests 6 | 7 | 8 | def get_aws_region(): 9 | try: 10 | # Fetch the token 11 | token_response = requests.put( 12 | 'http://169.254.169.254/latest/api/token', 13 | headers={'X-aws-ec2-metadata-token-ttl-seconds': '21600'} 14 | ) 15 | token_response.raise_for_status() 16 | token = token_response.text 17 | 18 | # Use the token to fetch the region 19 | response = requests.get( 20 | 'http://169.254.169.254/latest/dynamic/instance-identity/document', 21 | headers={'X-aws-ec2-metadata-token': token} 22 | ) 23 | response.raise_for_status() 24 | return response.json()['region'] 25 | except requests.RequestException as e: 26 | print(f"Error fetching AWS region: {e}") 27 | return None 28 | 29 | current_region = get_aws_region() 30 | print(f"AWS_REGION={current_region}") 31 | dynamodb = boto3.resource('dynamodb', current_region) 32 | table_name = None 33 | 34 | 35 | def get_table_name(): 36 | global table_name 37 | if table_name is not None: 38 | return table_name 39 | # Create a CloudFormation client 40 | cloudformation = boto3.client('cloudformation', region_name=current_region) 41 | 42 | try: 43 | # Describe the stack to get the output values 44 | response = cloudformation.describe_stacks(StackName='GravitonID-ec2') 45 | stacks = response['Stacks'] 46 | if stacks: 47 | stack = stacks[0] 48 | outputs = stack['Outputs'] 49 | for output in outputs: 50 | if output['OutputKey'] == 'EC2ModuleDynamoDBTable': 51 | table_name = output['OutputValue'] 52 | return table_name 53 | return None 54 | except Exception as e: 55 | return str(e) 56 | 57 | 58 | def retrive_from_dynamo(short_url): 59 | dynamodb = boto3.resource('dynamodb', current_region) 60 | table_name = get_table_name() 61 | table = dynamodb.Table(table_name) 62 | 63 | try: 64 | # Get item from the table 65 | response = table.get_item( 66 | Key={ 67 | 'short_url': short_url 68 | } 69 | ) 70 | print('retrive_from_dynamo') 71 | return response['Item'] 72 | except Exception as e: 73 | return str(e) 74 | 75 | 76 | # create a function to shortening url 77 | def create_short_url(url): 78 | try: 79 | # create a random string of length 10 80 | letters = string.ascii_lowercase 81 | short_url = ''.join(random.choice(letters) for i in range(10)) 82 | save_in_dynamo(short_url, url) 83 | # append the random string generated to the url 84 | return jsonify({'shortURL':short_url, 'originalURL':url}) 85 | except Exception as e: 86 | return str(e) 87 | 88 | 89 | def save_in_dynamo(short_url, original_url): 90 | try: 91 | table_name = get_table_name() 92 | table = dynamodb.Table(table_name) 93 | # Put item in the table 94 | response = table.put_item( 95 | Item={ 96 | 'short_url': short_url, 97 | 'url': original_url 98 | } 99 | ) 100 | print('save_in_dynamo') 101 | except Exception as e: 102 | return str(e) 103 | 104 | return response 105 | -------------------------------------------------------------------------------- /graviton/ec2_graviton/ec2.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_dynamodb as dynamodb 6 | from aws_cdk import aws_iam as iam 7 | from aws_cdk import aws_ec2 as ec2 8 | import os 9 | 10 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 11 | 12 | class CdkEC2Stack(cdk.Stack): 13 | 14 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 15 | super().__init__(scope, id, **kwargs) 16 | 17 | current_region = self.region 18 | ec2_test_client = "c5.4xlarge" 19 | ec2_gv2_type = "c6g.xlarge" 20 | ec2_gv3_type = "c7g.xlarge" 21 | ec2_x86_type = "c5.xlarge" 22 | 23 | # Create a placement group with the CLUSTER strategy, we want all instances to be in same placement group for performance reasons 24 | # 25 | #pg = ec2.CfnPlacementGroup(self, 'ec2_module_PlacementGroup', strategy='cluster') 26 | 27 | amzn_linux_arm= ec2.MachineImage.latest_amazon_linux2023(cpu_type=ec2.AmazonLinuxCpuType.ARM_64) 28 | amzn_linux_x86_64= ec2.MachineImage.latest_amazon_linux2023(cpu_type=ec2.AmazonLinuxCpuType.X86_64) 29 | key_name= "gravitonKey" 30 | 31 | ec2_security_group = ec2.SecurityGroup( 32 | self, "Ec2SecurityGroup", 33 | vpc=vpc, 34 | allow_all_outbound=True 35 | ) 36 | ec2_security_group.add_ingress_rule( 37 | ec2.Peer.ipv4('10.0.0.0/16'), 38 | ec2.Port.all_traffic() 39 | ) 40 | ec2_security_group.add_ingress_rule( 41 | ec2.Peer.ipv4(default_vpc_cidr), 42 | ec2.Port.all_traffic() 43 | ) 44 | 45 | # Define the IAM policy for the SUT machines so the test application (shortenURL) on them to have right access 46 | sut_policy_statement = iam.PolicyStatement( 47 | actions=["cloudformation:*","dynamodb:*"], 48 | resources=["*"], 49 | effect=iam.Effect.ALLOW 50 | ) 51 | sut_policy_document = iam.PolicyDocument(statements=[sut_policy_statement]) 52 | 53 | # Create the IAM role for SUT machines 54 | sut_role = iam.Role( 55 | self, 'ec2_module_IAM_Role_SUT_machines', 56 | assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'), 57 | inline_policies={'ec2_module_SUT_machines_Policy': sut_policy_document} 58 | ) 59 | 60 | # Define the IAM policy for the Client machines so they can access the cloudformation to get the SUTs IPs 61 | client_policy_statement = iam.PolicyStatement( 62 | actions=["cloudformation:*"], 63 | resources=["*"], 64 | effect=iam.Effect.ALLOW 65 | ) 66 | client_policy_document = iam.PolicyDocument(statements=[client_policy_statement]) 67 | 68 | # Create the IAM role for client machine 69 | client_role = iam.Role( 70 | self, 'ec2_module_IAM_Role_Client_machines', 71 | assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'), 72 | inline_policies={'ec2_module_Client_machines_Policy': client_policy_document} 73 | ) 74 | 75 | # create a dynamodb table for the test application to use 76 | urls_table = dynamodb.Table( 77 | self, "GravitonWorkshopDdbUrlsTable", 78 | partition_key=dynamodb.Attribute( 79 | name="short_url", 80 | type=dynamodb.AttributeType.STRING 81 | ), 82 | billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, 83 | ) 84 | 85 | user_data = self.get_user_data("ec2_module_test_client") 86 | client1 = ec2.Instance(self, "Client-1", 87 | instance_type=ec2.InstanceType( 88 | instance_type_identifier=ec2_test_client), 89 | instance_name="EC2_Module_Test_Client1_for_SUT1", 90 | machine_image=amzn_linux_x86_64, 91 | vpc=vpc, 92 | key_name=key_name, 93 | security_group=ec2_security_group, 94 | vpc_subnets=ec2.SubnetSelection( 95 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 96 | user_data=ec2.UserData.custom(user_data), 97 | role=client_role # Attach the IAM role to the client instance 98 | ) 99 | # add to placement group with the CLUSTER strategy 100 | #client1.instance.add_property_override('PlacementGroupName', pg.ref) 101 | 102 | # client2 = ec2.Instance(self, "Client-2", 103 | # instance_type=ec2.InstanceType( 104 | # instance_type_identifier=ec2_test_client), 105 | # instance_name="EC2_Module_Test_Client2_for_SUT2", 106 | # machine_image=amzn_linux_x86_64, 107 | # vpc=vpc, 108 | # key_name=key_name, 109 | # security_group=ec2_security_group, 110 | # vpc_subnets=ec2.SubnetSelection( 111 | # subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 112 | # user_data=ec2.UserData.custom(user_data), 113 | # role=client_role # Attach the IAM role to the client instance 114 | # ) 115 | # # add to placement group with the CLUSTER strategy 116 | #client2.instance.add_property_override('PlacementGroupName', pg.ref) 117 | 118 | # client3 = ec2.Instance(self, "Client-3", 119 | # instance_type=ec2.InstanceType( 120 | # instance_type_identifier=ec2_test_client), 121 | # instance_name="EC2_Module_Test_Client3_for_SUT3", 122 | # machine_image=amzn_linux_x86_64, 123 | # vpc=vpc, 124 | # key_name=key_name, 125 | # security_group=ec2_security_group, 126 | # vpc_subnets=ec2.SubnetSelection( 127 | # subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 128 | # user_data=ec2.UserData.custom(user_data), 129 | # role=client_role # Attach the IAM role to the client instance 130 | # ) 131 | 132 | # add to placement group with the CLUSTER strategy 133 | #client3.instance.add_property_override('PlacementGroupName', pg.ref) 134 | 135 | user_data = self.get_user_data("ec2_module_sut_1") 136 | sut_1 = ec2.Instance(self, "SUT1", 137 | instance_type=ec2.InstanceType( 138 | instance_type_identifier=ec2_x86_type), 139 | instance_name="EC2_Module_SUT1_x86", 140 | machine_image=amzn_linux_x86_64, 141 | vpc=vpc, 142 | key_name=key_name, 143 | security_group=ec2_security_group, 144 | vpc_subnets=ec2.SubnetSelection( 145 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 146 | user_data=ec2.UserData.custom(user_data), 147 | role=sut_role # Attach the IAM role to the SUT instance 148 | ) 149 | 150 | # add to placement group with the CLUSTER strategy 151 | #sut_1.instance.add_property_override('PlacementGroupName', pg.ref) 152 | 153 | user_data = self.get_user_data("ec2_module_sut_2") 154 | sut_2 = ec2.Instance(self, "SUT2", 155 | instance_type=ec2.InstanceType( 156 | instance_type_identifier=ec2_gv2_type), 157 | instance_name="EC2_Module_SUT2_GV2", 158 | machine_image=amzn_linux_arm, 159 | vpc=vpc, 160 | key_name=key_name, 161 | security_group=ec2_security_group, 162 | vpc_subnets=ec2.SubnetSelection( 163 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 164 | user_data=ec2.UserData.custom(user_data), 165 | role=sut_role # Attach the IAM role to the SUT instance 166 | ) 167 | 168 | # add to placement group with the CLUSTER strategy 169 | #sut_2.instance.add_property_override('PlacementGroupName', pg.ref) 170 | 171 | user_data = self.get_user_data("ec2_module_sut_3") 172 | sut_3 = ec2.Instance(self, "SUT3", 173 | instance_type=ec2.InstanceType( 174 | instance_type_identifier=ec2_gv3_type), 175 | instance_name="EC2_Module_SUT3_GV3", 176 | machine_image=amzn_linux_arm, 177 | vpc=vpc, 178 | key_name=key_name, 179 | security_group=ec2_security_group, 180 | vpc_subnets=ec2.SubnetSelection( 181 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 182 | user_data=ec2.UserData.custom(user_data), 183 | role=sut_role # Attach the IAM role to the SUT instance 184 | ) 185 | 186 | # add to placement group with the CLUSTER strategy 187 | #sut_3.instance.add_property_override('PlacementGroupName', pg.ref) 188 | 189 | cdk.CfnOutput( self, "Client1_IP", value = client1.instance_private_ip) 190 | #cdk.CfnOutput( self, "Client2_IP", value = client2.instance_private_ip) 191 | #cdk.CfnOutput( self, "Client3_IP", value = client3.instance_private_ip) 192 | cdk.CfnOutput( self, "SUT1_IP", value = sut_1.instance_private_ip) 193 | cdk.CfnOutput( self, "SUT2_IP", value = sut_2.instance_private_ip) 194 | cdk.CfnOutput( self, "SUT3_IP", value = sut_3.instance_private_ip) 195 | cdk.CfnOutput( self, "EC2-Module-DynamoDB-Table", value =urls_table.table_name) 196 | # Output the current region 197 | cdk.CfnOutput(self, "CurrentRegionOutput", value=current_region) 198 | 199 | def get_user_data(self, filename): 200 | with open('./scripts/' + filename) as f: 201 | user_data = f.read() 202 | return user_data -------------------------------------------------------------------------------- /graviton/ec2_graviton/node_js/main.mjs: -------------------------------------------------------------------------------- 1 | 2 | import { DynamoDBClient } from "@aws-sdk/client-dynamodb"; 3 | import { PutCommand, GetCommand, DynamoDBDocumentClient } from "@aws-sdk/lib-dynamodb"; 4 | import { CloudFormationClient, DescribeStacksCommand } from '@aws-sdk/client-cloudformation'; 5 | 6 | 7 | import randomstring from 'randomstring'; 8 | import express from 'express'; 9 | import bodyParser from 'body-parser'; 10 | import axios from 'axios'; 11 | import cluster from 'cluster'; 12 | import os from 'os'; 13 | 14 | const numCPUs = os.cpus().length; 15 | 16 | if (cluster.isPrimary) { 17 | // Fork worker processes 18 | for (let i = 0; i < numCPUs; i++) { 19 | cluster.fork(); 20 | } 21 | } else { 22 | // Worker code 23 | main(); 24 | } 25 | 26 | 27 | async function main() { 28 | 29 | 30 | const awsRegion = await getAwsRegion(); 31 | 32 | //AWS 33 | const client = new DynamoDBClient({ 34 | region: awsRegion // Replace with your desired AWS region 35 | }); 36 | const docClient = DynamoDBDocumentClient.from(client); 37 | const cloudformation = new CloudFormationClient({ region: awsRegion }); 38 | 39 | //App 40 | const app = express(); 41 | app.use(bodyParser.json()); 42 | const PORT = process.env.PORT || 8080; 43 | 44 | const gravitonEc2Stack = 'GravitonID-ec2'; 45 | const dynamoDBTableKey = 'EC2ModuleDynamoDBTable' 46 | const cloudformationOutputs = { 47 | outputs: {} 48 | }; 49 | 50 | fetchStackParameters(gravitonEc2Stack); 51 | 52 | // Middleware for input validation 53 | const validateInput = (req, res, next) => { 54 | const originalUrl = req.body.OriginalURL; 55 | if (!originalUrl || typeof originalUrl !== 'string' || originalUrl.trim() === '') { 56 | return res.status(400).json({ error: 'Invalid input: OriginalURL is required and should be a non-empty string.' }); 57 | } 58 | next(); 59 | }; 60 | 61 | 62 | // Middleware for input validation 63 | const validateGetFullURLInput = (req, res, next) => { 64 | const shortUrl = req.params.shortUrl; 65 | if (!shortUrl || typeof shortUrl !== 'string' || shortUrl.trim() === '') { 66 | return res.status(400).json({ error: 'Invalid input: shortUrl is required and should be a non-empty string.' }); 67 | } 68 | next(); 69 | }; 70 | 71 | // Function to fetch parameters and store them globally 72 | async function fetchStackParameters(stackName) { 73 | try { 74 | 75 | const data = await cloudformation.send(new DescribeStacksCommand({ StackName: stackName })); 76 | const stack = data.Stacks[0]; 77 | 78 | const outputs = stack.Outputs; 79 | outputs.forEach(output => { 80 | cloudformationOutputs.outputs[output.OutputKey] = output.OutputValue; 81 | }); 82 | 83 | } catch (err) { 84 | console.error('Error fetching stack details:', err); 85 | } 86 | } 87 | 88 | 89 | async function getAwsRegion() { 90 | try { 91 | 92 | // Fetch the token 93 | const tokenResponse = await axios.put( 94 | 'http://169.254.169.254/latest/api/token', 95 | {}, 96 | { 97 | headers: { 98 | 'X-aws-ec2-metadata-token-ttl-seconds': '21600' 99 | } 100 | } 101 | ); 102 | const token = tokenResponse.data; 103 | 104 | // Use the token to fetch the region 105 | const response = await axios.get( 106 | 'http://169.254.169.254/latest/dynamic/instance-identity/document', 107 | { 108 | headers: { 109 | 'X-aws-ec2-metadata-token': token 110 | } 111 | } 112 | ); 113 | 114 | return response.data.region; 115 | 116 | } catch (error) { 117 | console.error(`Error fetching AWS region: ${error}`); 118 | return null; 119 | } 120 | } 121 | 122 | 123 | // POST endpoint 124 | app.post('/shortenURL', validateInput, async (req, res) => { 125 | 126 | try { 127 | const originalUrl = req.body.OriginalURL; 128 | 129 | const shortUrl = randomstring.generate({ 130 | length: 10, 131 | charset: 'alphabetic' 132 | }); 133 | 134 | const dynamoDBTableName = cloudformationOutputs.outputs[dynamoDBTableKey]; 135 | 136 | const command = new PutCommand({ 137 | TableName: dynamoDBTableName, 138 | Item: { 139 | short_url: shortUrl, 140 | originalUrl: originalUrl, 141 | }, 142 | }); 143 | 144 | const response = await docClient.send(command); 145 | 146 | if (response.$metadata.httpStatusCode !== 200) { 147 | console.log('Error in DynamoDB request:', response); 148 | return res.status(response.$metadata.httpStatusCode).json(response); 149 | } 150 | 151 | const output = { 152 | shortURL: shortUrl, 153 | originalURL: originalUrl, 154 | }; 155 | 156 | return res.status(200).json(output); 157 | 158 | 159 | } catch (error) { 160 | console.error('Error in main:', error); 161 | return res.status(500).json({ error: 'Internal Server Error' }); 162 | 163 | } 164 | }); 165 | 166 | // GET endpoint 167 | app.get('/getFullURL/:shortUrl', validateGetFullURLInput, async (req, res) => { 168 | 169 | try { 170 | 171 | const shortUrl = req.params.shortUrl; 172 | 173 | const dynamoDBTableName = cloudformationOutputs.outputs[dynamoDBTableKey]; 174 | 175 | const command = new GetCommand({ 176 | TableName: dynamoDBTableName, 177 | Key: { 178 | short_url: shortUrl, 179 | }, 180 | }); 181 | 182 | const response = await docClient.send(command); 183 | 184 | if (!response || !response.Item || !response.Item.originalUrl) { 185 | return res.status(404).json({ error: 'Short URL not found.' }); 186 | } 187 | 188 | const originalURL = response.Item.originalUrl 189 | 190 | return res.status(200).json(originalURL); 191 | 192 | } catch (error) { 193 | console.error('Error in main:', error); 194 | throw error; 195 | } 196 | 197 | }); 198 | 199 | 200 | app.listen(PORT, '0.0.0.0', () => { 201 | console.log(`Server running on http://0.0.0.0:${PORT}`); 202 | }); 203 | 204 | 205 | } -------------------------------------------------------------------------------- /graviton/ec2_graviton/node_js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "building-nodejs-applications-for-aws-graviton", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "main.mjs", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@aws-sdk/client-cloudformation": "^3.540.0", 13 | "@aws-sdk/client-dynamodb": "^3.528.0", 14 | "@aws-sdk/client-s3": "^3.499.0", 15 | "@aws-sdk/lib-dynamodb": "^3.499.0", 16 | "axios": "^1.6.8", 17 | "body-parser": "^1.20.2", 18 | "express": "^4.18.2", 19 | "randomstring": "^1.3.0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /graviton/opensearch_graviton/lambdas/insert_into_index.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | # 3 | # This lambda function inserts random data into an index named "people" within 4 | # an Amazon OpenSearch cluster specified by the user. The function will do 5 | # several insertions before exiting. 6 | 7 | import argparse 8 | import os 9 | import sys 10 | import time 11 | 12 | 13 | import boto3 14 | from elasticsearch import Elasticsearch, RequestsHttpConnection 15 | from elasticsearch.exceptions import ConnectionTimeout 16 | from faker import Faker 17 | from requests_aws4auth import AWS4Auth 18 | 19 | 20 | endpoint = os.environ['OS_ENDPOINT'] 21 | region = os.environ['AWS_REGION'] 22 | 23 | 24 | def handler(event, context): 25 | service = 'es' 26 | credentials = boto3.Session().get_credentials() 27 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 28 | 29 | fake = Faker() 30 | 31 | es = Elasticsearch( 32 | hosts = [{'host': endpoint, 'port': 443}], 33 | http_auth = awsauth, 34 | use_ssl = True, 35 | verify_certs = True, 36 | connection_class = RequestsHttpConnection 37 | ) 38 | 39 | attempts = 20 40 | print("Will insert %s items into the index" % attempts) 41 | for i in range(attempts): 42 | document = fake.profile() 43 | 44 | try: 45 | result = es.index(index="people", doc_type="_doc", body=document) 46 | print("Indexed with ID '%s'" % result['_id']) 47 | 48 | except ConnectionTimeout as e: 49 | print("Connection to the OS cluster timed out: %s" % str(e)) 50 | 51 | except Exception as e: 52 | print("Unexpected exception caught: %s" % str(e)) 53 | 54 | time.sleep(2) 55 | -------------------------------------------------------------------------------- /graviton/opensearch_graviton/lambdas/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.24.13 2 | elasticsearch==7.17.4 3 | requests==2.32.2 4 | requests-aws4auth==1.1.2 5 | Faker==13.13.0 6 | -------------------------------------------------------------------------------- /graviton/opensearch_graviton/open_search.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_opensearchservice as open_search 7 | import aws_cdk.aws_events as events 8 | import aws_cdk.aws_events_targets as events_targets 9 | import aws_cdk.aws_iam as iam 10 | import aws_cdk.aws_lambda as _lambda 11 | from aws_cdk.aws_lambda_python_alpha import PythonFunction 12 | import os 13 | 14 | class CdkOpenSearchStack(cdk.Stack): 15 | """A stack containing a basic Amazon Opensearch domain running on the 16 | x86 architecture.""" 17 | 18 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 19 | super().__init__(scope, id, **kwargs) 20 | 21 | os_security_group = ec2.SecurityGroup( 22 | self, "OSSecurityGroup", 23 | vpc=vpc, 24 | allow_all_outbound=True 25 | ) 26 | 27 | os_security_group.add_ingress_rule( 28 | ec2.Peer.ipv4('10.0.0.0/16'), 29 | ec2.Port.all_traffic() 30 | ) 31 | 32 | os_domain = open_search.Domain(self, 33 | "Domain", 34 | version=open_search.EngineVersion.OPENSEARCH_1_3, 35 | vpc=vpc, 36 | capacity={ 37 | 'data_node_instance_type': 'm5.large.search', 38 | 'data_nodes': 2, 39 | 'master_nodes': 0, 40 | 'warm_nodes': 0 41 | }, 42 | zone_awareness= open_search.ZoneAwarenessConfig( 43 | availability_zone_count=2 44 | ), 45 | vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)], 46 | security_groups=[os_security_group], 47 | removal_policy=cdk.RemovalPolicy.DESTROY 48 | ) 49 | 50 | insert_fn = PythonFunction(self, 51 | "InsertIntoIndexFunction", 52 | entry="graviton/opensearch_graviton/lambdas", 53 | index="insert_into_index.py", 54 | handler="handler", 55 | runtime=_lambda.Runtime.PYTHON_3_8, 56 | vpc=vpc, 57 | environment={ 58 | "OS_ENDPOINT": os_domain.domain_endpoint 59 | }, 60 | timeout=cdk.Duration.minutes(1)) 61 | 62 | os_domain.grant_write(insert_fn.grant_principal) 63 | 64 | scheduled_rule = events.Rule(self, 65 | "ScheduledIndexInsertionRule", 66 | schedule=events.Schedule.expression('cron(* * ? * * *)')) 67 | scheduled_rule.add_target(events_targets.LambdaFunction(insert_fn)) 68 | 69 | cdk.CfnOutput(self, "LambdaName",value=insert_fn.function_name) 70 | cdk.CfnOutput(self, "DomainEndpoint", value=os_domain.domain_endpoint) 71 | cdk.CfnOutput(self, "DomainName", value=os_domain.domain_name) 72 | -------------------------------------------------------------------------------- /graviton/perf_graviton/perf.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import os 8 | 9 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 10 | 11 | class CdkPerfStack(cdk.Stack): 12 | 13 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 14 | super().__init__(scope, id, **kwargs) 15 | 16 | ec2_type = "m6g.medium" 17 | ec2_type_client = "m5.large" 18 | amzn_linux= ec2.MachineImage.latest_amazon_linux2023(cpu_type=ec2.AmazonLinuxCpuType.ARM_64) 19 | amzn_linux_x86_64= ec2.MachineImage.latest_amazon_linux2023(cpu_type=ec2.AmazonLinuxCpuType.X86_64) 20 | key_name= "gravitonKey" 21 | 22 | # Create a placement group with the CLUSTER strategy 23 | #pg = ec2.PlacementGroup(self, "ec2_module_PlacementGroup",strategy=ec2.PlacementGroupStrategy.CLUSTER) 24 | pg = ec2.CfnPlacementGroup(self, 'ec2_module_PlacementGroup', strategy='cluster') 25 | ec2_security_group = ec2.SecurityGroup( 26 | self, "Ec2SecurityGroup", 27 | vpc=vpc, 28 | allow_all_outbound=True 29 | ) 30 | ec2_security_group.add_ingress_rule( 31 | ec2.Peer.ipv4('10.0.0.0/16'), 32 | ec2.Port.all_traffic() 33 | ) 34 | ec2_security_group.add_ingress_rule( 35 | ec2.Peer.ipv4(default_vpc_cidr), 36 | ec2.Port.all_traffic() 37 | ) 38 | 39 | user_data = self.get_user_data("client") 40 | client = ec2.Instance(self, "Client", 41 | instance_type=ec2.InstanceType( 42 | instance_type_identifier=ec2_type_client), 43 | instance_name="Perf_Client", 44 | machine_image=amzn_linux_x86_64, 45 | vpc=vpc, 46 | key_name=key_name, 47 | security_group=ec2_security_group, 48 | vpc_subnets=ec2.SubnetSelection( 49 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 50 | user_data=ec2.UserData.custom(user_data) 51 | ) 52 | client.instance.add_property_override('PlacementGroupName', pg.ref) 53 | 54 | user_data = self.get_user_data("sut_1") 55 | sut_1 = ec2.Instance(self, "SUT1", 56 | instance_type=ec2.InstanceType( 57 | instance_type_identifier=ec2_type), 58 | instance_name="Perf_SUT1", 59 | machine_image=amzn_linux, 60 | vpc=vpc, 61 | key_name=key_name, 62 | security_group=ec2_security_group, 63 | vpc_subnets=ec2.SubnetSelection( 64 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 65 | user_data=ec2.UserData.custom(user_data) 66 | ) 67 | 68 | 69 | user_data = self.get_user_data("sut_2") 70 | sut_2 = ec2.Instance(self, "SUT2", 71 | instance_type=ec2.InstanceType( 72 | instance_type_identifier=ec2_type), 73 | instance_name="Perf_SUT2", 74 | machine_image=amzn_linux, 75 | vpc=vpc, 76 | key_name=key_name, 77 | security_group=ec2_security_group, 78 | vpc_subnets=ec2.SubnetSelection( 79 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), 80 | user_data=ec2.UserData.custom(user_data) 81 | ) 82 | 83 | cdk.CfnOutput( self, "Client_IP", value = client.instance_private_ip) 84 | cdk.CfnOutput( self, "SUT1_IP", value = sut_1.instance_private_ip) 85 | cdk.CfnOutput( self, "SUT2_IP", value = sut_2.instance_private_ip) 86 | 87 | def get_user_data(self, filename): 88 | with open('./scripts/' + filename) as f: 89 | user_data = f.read() 90 | return user_data -------------------------------------------------------------------------------- /graviton/rds_graviton/rds2.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | from aws_cdk import core 4 | import aws_cdk.aws_ec2 as ec2 5 | import aws_cdk.aws_rds as rds 6 | import os 7 | 8 | c9_ip = os.environ["C9_HOSTNAME"] + '/32' 9 | 10 | class CdkRdsStack(core.Stack): 11 | 12 | def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: 13 | super().__init__(scope, id, **kwargs) 14 | 15 | db_mysql = rds.DatabaseInstance(self, "MySQL", 16 | engine=rds.DatabaseInstanceEngine.mysql( 17 | version=rds.MysqlEngineVersion.VER_5_0_30 18 | ), 19 | instance_type=ec2.InstanceType("m5.4xlarge"), 20 | vpc=vpc, 21 | multi_az=False, 22 | publicly_accessible=True, 23 | allocated_storage=100, 24 | storage_type=rds.StorageType.GP2, 25 | cloudwatch_logs_exports=["audit", "error", "general", "slowquery"], 26 | deletion_protection=False, 27 | delete_automated_backups=False, 28 | backup_retention=core.Duration.days(7), 29 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 30 | self, "para-group-mysql", 31 | parameter_group_name="default.mysql5.7" 32 | ) 33 | ) 34 | db_mysql.connections.allow_default_port_from(ec2.Peer.ipv4(c9_ip), "Cloud9 MySQL Access") 35 | 36 | core.CfnOutput( self, "RDSInstanceId", value = db_mysql.instance_identifier) 37 | -------------------------------------------------------------------------------- /graviton/rds_graviton/rds_mysql_5.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import os 8 | 9 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 10 | 11 | class CdkRds5Stack(cdk.Stack): 12 | 13 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 14 | super().__init__(scope, id, **kwargs) 15 | 16 | MySQL5secgroup = ec2.SecurityGroup(self, id="MySQL5secgroup", vpc=vpc) 17 | db_mysql5 = rds.DatabaseInstance(self, "MySQL5", 18 | engine=rds.DatabaseInstanceEngine.mysql( 19 | version=rds.MysqlEngineVersion.of('5.7.44','5.7') 20 | ), 21 | instance_type=ec2.InstanceType("m5.4xlarge"), 22 | vpc=vpc, 23 | multi_az=False, 24 | publicly_accessible=False, 25 | allocated_storage=100, 26 | storage_type=rds.StorageType.GP2, 27 | cloudwatch_logs_exports=["audit", "error", "general", "slowquery"], 28 | deletion_protection=False, 29 | enable_performance_insights=True, 30 | delete_automated_backups=True, 31 | backup_retention=cdk.Duration.days(1), 32 | security_groups=[MySQL5secgroup], 33 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 34 | self, "para-group-mysql", 35 | parameter_group_name="default.mysql5.7" 36 | ) 37 | ) 38 | db_mysql5.connections.allow_default_port_from(ec2.Peer.ipv4(default_vpc_cidr), "Cloud9 MySQL Access") 39 | cdk.CfnOutput( self, "MySQL5RDSInstanceId", value = db_mysql5.instance_identifier) 40 | cdk.CfnOutput( self, "MySQL5SecretArn", value = db_mysql5.secret.secret_arn) 41 | -------------------------------------------------------------------------------- /graviton/rds_graviton/rds_mysql_8.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import os 8 | 9 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 10 | 11 | class CdkRds8Stack(cdk.Stack): 12 | 13 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 14 | super().__init__(scope, id, **kwargs) 15 | 16 | MySQL8secgroup = ec2.SecurityGroup(self, id="MySQL8secgroup", vpc=vpc) 17 | db_mysql8 = rds.DatabaseInstance(self, "MySQL8", 18 | engine=rds.DatabaseInstanceEngine.mysql( 19 | version=rds.MysqlEngineVersion.of('8.0.36','8.0') 20 | ), 21 | instance_type=ec2.InstanceType("m5.4xlarge"), 22 | vpc=vpc, 23 | multi_az=False, 24 | publicly_accessible=False, 25 | allocated_storage=100, 26 | storage_type=rds.StorageType.GP2, 27 | cloudwatch_logs_exports=["error", "general", "slowquery"], 28 | deletion_protection=False, 29 | enable_performance_insights=True, 30 | delete_automated_backups=True, 31 | backup_retention=cdk.Duration.days(0), 32 | security_groups=[MySQL8secgroup], 33 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 34 | self, "para-group-mysql", 35 | parameter_group_name="default.mysql8.0" 36 | ) 37 | ) 38 | db_mysql8.connections.allow_default_port_from(ec2.Peer.ipv4(default_vpc_cidr), "Cloud9 MySQL Access") 39 | 40 | cdk.CfnOutput( self, "MySQL8RDSInstanceId", value = db_mysql8.instance_identifier) 41 | cdk.CfnOutput( self, "MySQL8SecretArn", value = db_mysql8.secret.secret_arn) 42 | -------------------------------------------------------------------------------- /graviton/rds_graviton/rds_pg_restore.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import aws_cdk.aws_ssm as ssm 8 | import os 9 | 10 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 11 | 12 | 13 | class CdkRdsPgRestoreStack(cdk.Stack): 14 | 15 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 16 | super().__init__(scope, id, **kwargs) 17 | 18 | Pgsqlsecgroup = ec2.SecurityGroup(self, id="Pgsqlsecgroup", vpc=vpc) 19 | snapshot_id = ssm.StringParameter.value_for_string_parameter(self ,"graviton_rds_pg_lab_snapshot") 20 | g2_db_pgsql14 = rds.DatabaseInstanceFromSnapshot(self, "GravitonPgSQL14", 21 | engine=rds.DatabaseInstanceEngine.postgres( 22 | version=rds.PostgresEngineVersion.VER_14_11 23 | ), 24 | instance_type=ec2.InstanceType("m6g.4xlarge"), 25 | snapshot_identifier=snapshot_id, 26 | vpc=vpc, 27 | multi_az=False, 28 | publicly_accessible=False, 29 | allocated_storage=100, 30 | storage_type=rds.StorageType.GP2, 31 | cloudwatch_logs_exports=["postgresql"], 32 | enable_performance_insights=True, 33 | deletion_protection=False, 34 | delete_automated_backups=True, 35 | backup_retention=cdk.Duration.days(0), 36 | security_groups=[Pgsqlsecgroup], 37 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 38 | self, "para-group-pgsql", 39 | parameter_group_name="default.postgres14" 40 | ) 41 | ) 42 | g2_db_pgsql14.connections.allow_default_port_from(ec2.Peer.ipv4(default_vpc_cidr), "Cloud9 PgSQL Access") 43 | cdk.CfnOutput( self, "PgSQL14RDSInstanceId", value = g2_db_pgsql14.instance_identifier) 44 | cdk.CfnOutput( self, "PgSQL14RSecurityGroup", value = Pgsqlsecgroup.security_group_id) -------------------------------------------------------------------------------- /graviton/rds_graviton/rds_pgsql_14.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import os 8 | 9 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 10 | 11 | class CdkPgSQLStack(cdk.Stack): 12 | 13 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 14 | super().__init__(scope, id, **kwargs) 15 | 16 | Pgsqlsecgroup = ec2.SecurityGroup(self, id="Pgsqlsecgroup", vpc=vpc) 17 | pgsql_db = rds.DatabaseInstance(self, "PgSQL14", 18 | engine=rds.DatabaseInstanceEngine.postgres( 19 | version=rds.PostgresEngineVersion.VER_14_11 20 | ), 21 | instance_type=ec2.InstanceType("m5.4xlarge"), 22 | vpc=vpc, 23 | multi_az=False, 24 | publicly_accessible=False, 25 | allocated_storage=100, 26 | storage_type=rds.StorageType.GP2, 27 | cloudwatch_logs_exports=["postgresql"], 28 | deletion_protection=False, 29 | enable_performance_insights=True, 30 | delete_automated_backups=True, 31 | backup_retention=cdk.Duration.days(0), 32 | security_groups=[Pgsqlsecgroup], 33 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 34 | self, "para-group-pgsql", 35 | parameter_group_name="default.postgres14" 36 | ) 37 | ) 38 | pgsql_db.connections.allow_default_port_from(ec2.Peer.ipv4(default_vpc_cidr), "Cloud9 PgSQL Access") 39 | 40 | cdk.CfnOutput( self, "PgSQL14RDSInstanceId", value = pgsql_db.instance_identifier) 41 | cdk.CfnOutput( self, "PgSQL14SecretArn", value = pgsql_db.secret.secret_arn) -------------------------------------------------------------------------------- /graviton/rds_graviton/rds_restore.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | from constructs import Construct 5 | import aws_cdk.aws_ec2 as ec2 6 | import aws_cdk.aws_rds as rds 7 | import aws_cdk.aws_ssm as ssm 8 | import os 9 | 10 | default_vpc_cidr = os.environ["DefaultRouteCidr"] 11 | 12 | 13 | class CdkRdsRestoreStack(cdk.Stack): 14 | 15 | def __init__(self, scope: Construct, id: str, vpc, **kwargs) -> None: 16 | super().__init__(scope, id, **kwargs) 17 | 18 | MySQL8Rsecgroup = ec2.SecurityGroup(self, id="MySQL8Rsecgroup", vpc=vpc) 19 | snapshot_id = ssm.StringParameter.value_for_string_parameter(self ,"graviton_rds_lab_snapshot") 20 | g2_db_mysql8 = rds.DatabaseInstanceFromSnapshot(self, "GravitonMySQL", 21 | engine=rds.DatabaseInstanceEngine.mysql( 22 | version=rds.MysqlEngineVersion.of('8.0.36','8.0') 23 | ), 24 | instance_type=ec2.InstanceType("m6g.4xlarge"), 25 | snapshot_identifier=snapshot_id, 26 | vpc=vpc, 27 | multi_az=False, 28 | publicly_accessible=False, 29 | allocated_storage=100, 30 | storage_type=rds.StorageType.GP2, 31 | cloudwatch_logs_exports=["error", "general", "slowquery"], 32 | enable_performance_insights=True, 33 | deletion_protection=False, 34 | delete_automated_backups=True, 35 | backup_retention=cdk.Duration.days(0), 36 | security_groups=[MySQL8Rsecgroup], 37 | parameter_group=rds.ParameterGroup.from_parameter_group_name( 38 | self, "para-group-mysql", 39 | parameter_group_name="default.mysql8.0" 40 | ) 41 | ) 42 | g2_db_mysql8.connections.allow_default_port_from(ec2.Peer.ipv4(default_vpc_cidr), "Cloud9 MySQL Access") 43 | cdk.CfnOutput( self, "G2MySQL8RDSInstanceId", value = g2_db_mysql8.instance_identifier) 44 | cdk.CfnOutput( self, "MySQL8RSecurityGroup", value = MySQL8Rsecgroup.security_group_id) 45 | -------------------------------------------------------------------------------- /graviton/vpc_base/vpc.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import aws_cdk as cdk 4 | import aws_cdk.aws_ec2 as ec2 5 | 6 | 7 | class CdkVpcStack(cdk.Stack): 8 | 9 | def __init__(self, scope: cdk.Stack, id=str, **kwargs): 10 | super().__init__(scope, id, **kwargs) 11 | 12 | self.vpc = ec2.Vpc( 13 | self, "BaseVPC", 14 | max_azs=2, 15 | ip_addresses=ec2.IpAddresses.cidr("10.0.0.0/16"), 16 | enable_dns_support=True, 17 | enable_dns_hostnames=True, 18 | subnet_configuration=[ec2.SubnetConfiguration( 19 | subnet_type=ec2.SubnetType.PUBLIC, 20 | name="Public", 21 | cidr_mask=24 22 | ), 23 | ec2.SubnetConfiguration( 24 | subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS, 25 | name="Private", 26 | cidr_mask=24 27 | ) 28 | ], 29 | nat_gateways=2, 30 | ) 31 | 32 | -------------------------------------------------------------------------------- /pom-java11.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.company 8 | web-bookshop 9 | 4.1.2 10 | 11 | 12 | 1.7.21 13 | com.company.example.web.bookshop.VertxBookShopREST 14 | 15 | 16 | 17 | 18 | 19 | io.vertx 20 | vertx-core 21 | ${project.version} 22 | 23 | 24 | 25 | io.vertx 26 | vertx-web 27 | ${project.version} 28 | 29 | 30 | 31 | io.vertx 32 | vertx-web-proxy 33 | ${project.version} 34 | 35 | 36 | 37 | org.slf4j 38 | slf4j-jdk14 39 | ${slf4j.version} 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 56 | 57 | 58 | org.apache.maven.plugins 59 | maven-compiler-plugin 60 | 3.8.0 61 | 62 | 11 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | org.apache.maven.plugins 71 | maven-shade-plugin 72 | 2.3 73 | 74 | 75 | package 76 | 77 | shade 78 | 79 | 80 | 81 | 82 | 83 | io.vertx.core.Launcher 84 | ${main.verticle} 85 | 86 | 87 | 88 | 89 | 90 | 91 | ${project.build.directory}/${project.artifactId}-${project.version}-fat.jar 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /pom-java8.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.company 8 | web-bookshop 9 | 4.1.2 10 | 11 | 12 | 1.7.21 13 | com.company.example.web.bookshop.VertxBookShopREST 14 | 15 | 16 | 17 | 18 | 19 | io.vertx 20 | vertx-core 21 | ${project.version} 22 | 23 | 24 | 25 | io.vertx 26 | vertx-web 27 | ${project.version} 28 | 29 | 30 | 31 | io.vertx 32 | vertx-web-proxy 33 | ${project.version} 34 | 35 | 36 | 37 | org.slf4j 38 | slf4j-jdk14 39 | ${slf4j.version} 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | maven-compiler-plugin 50 | 3.1 51 | 52 | 1.8 53 | 1.8 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | org.apache.maven.plugins 62 | maven-shade-plugin 63 | 2.3 64 | 65 | 66 | package 67 | 68 | shade 69 | 70 | 71 | 72 | 73 | 74 | io.vertx.core.Launcher 75 | ${main.verticle} 76 | 77 | 78 | 79 | 80 | 81 | 82 | ${project.build.directory}/${project.artifactId}-${project.version}-fat.jar 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest==6.2.5 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aws-cdk-lib==2.133.0 2 | aws-cdk.aws-lambda-python-alpha==2.133.0a0 3 | constructs>=10.0.0,<11.0.0 4 | aws_cdk.lambda_layer_kubectl_v29==2.0.0 5 | -------------------------------------------------------------------------------- /scripts/cfn-template.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/awslabs/aws-cloudformation-templates 2 | AWSTemplateFormatVersion: "2010-09-09" 3 | Metadata: 4 | License: Apache-2.0 5 | Description: "Lab infrastructure template" 6 | Parameters: 7 | AzName: 8 | Description: Name of the AZ in which the placement group is to be created 9 | Type: AWS::EC2::AvailabilityZone::Name 10 | ConstraintDescription: must be the name of a valid EC2 AvailabilityZone. 11 | KeyName: 12 | Description: Name of an existing EC2 KeyPair to enable SSH access to the instance 13 | Type: AWS::EC2::KeyPair::KeyName 14 | ConstraintDescription: must be the name of an existing EC2 KeyPair. 15 | InstanceTypeCnt: 16 | Description: TestClient EC2 instance type 17 | Type: String 18 | Default: m5.large 19 | AllowedValues: [m5.large, m5.xlarge, m5.2xlarge, m5.4xlarge] 20 | ConstraintDescription: must be a valid EC2 instance type. 21 | InstanceTypeSut: 22 | Description: TestServer EC2 instance type 23 | Type: String 24 | Default: m6g.medium 25 | AllowedValues: [m6g.medium, m6g.large, m6g.xlarge, m6g.2xlarge, m6g.4xlarge] 26 | ConstraintDescription: must be a valid EC2 instance type. 27 | SSHLocation: 28 | Description: The IP address range that can be used to SSH to the EC2 instances 29 | Type: String 30 | MinLength: 9 31 | MaxLength: 18 32 | Default: 0.0.0.0/0 33 | AllowedPattern: (\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2}) 34 | ConstraintDescription: must be a valid IP CIDR range of the form x.x.x.x/x. 35 | LatestAmiIdX86: 36 | Description: The AMI to be used when creating a test client instance (x86_64) 37 | Type: "AWS::SSM::Parameter::Value" 38 | Default: "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2" 39 | LatestAmiIdArm64: 40 | Description: The AMI to be used when creating a test server instance (arm64) 41 | Type: "AWS::SSM::Parameter::Value" 42 | Default: "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-arm64-gp2" 43 | Resources: 44 | # Add IAM permissions and roles: AmazonS3FullAccess, AmazonSSMManagedInstanceCore, CloudWatchFullAccess 45 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html 46 | ServiceRole: 47 | Type: "AWS::IAM::Role" 48 | Properties: 49 | AssumeRolePolicyDocument: 50 | Version: "2012-10-17" 51 | Statement: 52 | - Effect: Allow 53 | Principal: 54 | Service: 55 | - "ec2.amazonaws.com" 56 | Action: 57 | - "sts:AssumeRole" 58 | ManagedPolicyArns: 59 | - "arn:aws:iam::aws:policy/AmazonS3FullAccess" 60 | - "arn:aws:iam::aws:policy/CloudWatchFullAccess" 61 | - "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" 62 | - "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" 63 | # EC2 instance profile 64 | InstanceProfile: 65 | Type: "AWS::IAM::InstanceProfile" 66 | Properties: 67 | Roles: 68 | - !Ref "ServiceRole" 69 | # Cluster placement group 70 | PlacementGroup: 71 | Type: "AWS::EC2::PlacementGroup" 72 | Properties: 73 | Strategy: "cluster" 74 | # Test client 75 | EC2InstanceCnt: 76 | Type: "AWS::EC2::Instance" 77 | Properties: 78 | InstanceType: !Ref "InstanceTypeCnt" 79 | UserData: !Base64 | 80 | #!/bin/bash 81 | sudo yum update -y 82 | sudo yum install jq -y 83 | sudo yum install git -y 84 | sudo yum groupinstall "Development Tools" -y 85 | sudo yum install openssl-devel -y 86 | git clone https://github.com/giltene/wrk2.git 87 | make -C wrk2/ 88 | sudo cp wrk2/wrk /usr/local/bin/wrk 89 | sudo amazon-linux-extras install -y python3.8 90 | sudo alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1 91 | python3 -m ensurepip --upgrade 92 | python3 -m pip install matplotlib 93 | # Adding instance profile 94 | IamInstanceProfile: !Ref "InstanceProfile" 95 | SecurityGroups: 96 | - !Ref "InstanceSecurityGroupSsh" 97 | KeyName: !Ref "KeyName" 98 | ImageId: !Ref "LatestAmiIdX86" 99 | # PlacementGroupName: !Ref "PlacementGroup" 100 | AvailabilityZone: !Ref "AzName" 101 | # First SUT node 102 | EC2InstanceSut1: 103 | Type: "AWS::EC2::Instance" 104 | Properties: 105 | InstanceType: !Ref "InstanceTypeSut" 106 | UserData: !Base64 | 107 | #!/bin/bash 108 | sudo yum update -y 109 | sudo yum install git -y 110 | sudo amazon-linux-extras enable corretto8 111 | sudo yum clean metadata 112 | sudo yum install java-1.8.0-amazon-corretto-devel -y 113 | sudo yum install java-11-amazon-corretto-headless -y 114 | sudo update-alternatives --set java /usr/lib/jvm/java-1.8.0-amazon-corretto.aarch64/jre/bin/java 115 | sudo yum install maven -y 116 | sudo yum install amazon-cloudwatch-agent -y 117 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 118 | { 119 | "metrics": { 120 | "metrics_collected": { 121 | "mem": { 122 | "measurement": [ 123 | "mem_used_percent" 124 | ], 125 | "metrics_collection_interval":30 126 | }, 127 | "swap": { 128 | "measurement": [ 129 | "swap_used_percent" 130 | ], 131 | "metrics_collection_interval":30 132 | } 133 | } 134 | } 135 | } 136 | EOF 137 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 138 | -a fetch-config \ 139 | -m ec2 \ 140 | -s \ 141 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 142 | # Adding instance profile 143 | IamInstanceProfile: !Ref "InstanceProfile" 144 | SecurityGroups: 145 | - !Ref "InstanceSecurityGroupSsh" 146 | - !Ref "InstanceSecurityGroupHttp" 147 | KeyName: !Ref "KeyName" 148 | ImageId: !Ref "LatestAmiIdArm64" 149 | # PlacementGroupName: !Ref "PlacementGroup" 150 | AvailabilityZone: !Ref "AzName" 151 | # Second SUT node 152 | EC2InstanceSut2: 153 | Type: "AWS::EC2::Instance" 154 | Properties: 155 | InstanceType: !Ref "InstanceTypeSut" 156 | UserData: !Base64 | 157 | #!/bin/bash 158 | sudo yum update -y 159 | sudo yum install git -y 160 | sudo amazon-linux-extras enable corretto8 161 | sudo yum clean metadata 162 | sudo yum install java-1.8.0-amazon-corretto-devel -y 163 | sudo yum install java-11-amazon-corretto-headless -y 164 | sudo update-alternatives --set java /usr/lib/jvm/java-11-amazon-corretto.aarch64/bin/java 165 | sudo yum install maven -y 166 | sudo yum install amazon-cloudwatch-agent -y 167 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 168 | { 169 | "metrics": { 170 | "metrics_collected": { 171 | "mem": { 172 | "measurement": [ 173 | "mem_used_percent" 174 | ], 175 | "metrics_collection_interval":30 176 | }, 177 | "swap": { 178 | "measurement": [ 179 | "swap_used_percent" 180 | ], 181 | "metrics_collection_interval":30 182 | } 183 | } 184 | } 185 | } 186 | EOF 187 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 188 | -a fetch-config \ 189 | -m ec2 \ 190 | -s \ 191 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 192 | # Adding instance profile 193 | IamInstanceProfile: !Ref "InstanceProfile" 194 | SecurityGroups: 195 | - !Ref "InstanceSecurityGroupSsh" 196 | - !Ref "InstanceSecurityGroupHttp" 197 | KeyName: !Ref "KeyName" 198 | ImageId: !Ref "LatestAmiIdArm64" 199 | # PlacementGroupName: !Ref "PlacementGroup" 200 | AvailabilityZone: !Ref "AzName" 201 | # Test client SG 202 | InstanceSecurityGroupSsh: 203 | Type: "AWS::EC2::SecurityGroup" 204 | Properties: 205 | GroupDescription: Enable SSH access via port 22 206 | SecurityGroupIngress: 207 | - IpProtocol: tcp 208 | FromPort: 22 209 | ToPort: 22 210 | CidrIp: !Ref "SSHLocation" 211 | # SUT node SG 212 | InstanceSecurityGroupHttp: 213 | Type: "AWS::EC2::SecurityGroup" 214 | Properties: 215 | GroupDescription: Enable SSH access via port 22 216 | SecurityGroupIngress: 217 | - IpProtocol: tcp 218 | FromPort: 8080 219 | ToPort: 8080 220 | CidrIp: !Join ["/", [!GetAtt [EC2InstanceCnt, PublicIp], "32"]] 221 | Outputs: 222 | InstanceIdCnt: 223 | Description: InstanceId of the newly created EC2 instance 224 | Value: !Ref "EC2InstanceCnt" 225 | AZCnt: 226 | Description: Availability Zone of the newly created EC2 instance 227 | Value: !GetAtt ["EC2InstanceCnt", "AvailabilityZone"] 228 | PublicDNSCnt: 229 | Description: Public DNSName of the newly created EC2 instance 230 | Value: !GetAtt ["EC2InstanceCnt", "PublicDnsName"] 231 | PublicIPCnt: 232 | Description: Public IP address of the newly created EC2 instance 233 | Value: !GetAtt ["EC2InstanceCnt", "PublicIp"] 234 | InstanceIdSut1: 235 | Description: InstanceId of the newly created EC2 instance 236 | Value: !Ref "EC2InstanceSut1" 237 | AZSut1: 238 | Description: Availability Zone of the newly created EC2 instance 239 | Value: !GetAtt ["EC2InstanceSut1", "AvailabilityZone"] 240 | PublicDNSSut1: 241 | Description: Public DNSName of the newly created EC2 instance 242 | Value: !GetAtt ["EC2InstanceSut1", "PublicDnsName"] 243 | PublicIPSut1: 244 | Description: Public IP address of the newly created EC2 instance 245 | Value: !GetAtt ["EC2InstanceSut1", "PublicIp"] 246 | InstanceIdSut2: 247 | Description: InstanceId of the newly created EC2 instance 248 | Value: !Ref "EC2InstanceSut2" 249 | AZSut2: 250 | Description: Availability Zone of the newly created EC2 instance 251 | Value: !GetAtt ["EC2InstanceSut2", "AvailabilityZone"] 252 | PublicDNSSut2: 253 | Description: Public DNSName of the newly created EC2 instance 254 | Value: !GetAtt ["EC2InstanceSut2", "PublicDnsName"] 255 | PublicIPSut2: 256 | Description: Public IP address of the newly created EC2 instance 257 | Value: !GetAtt ["EC2InstanceSut2", "PublicIp"] 258 | -------------------------------------------------------------------------------- /scripts/client: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo yum update -y 3 | sudo yum install jq git-core 4 | sudo yum groupinstall "Development Tools" -y 5 | sudo yum install openssl-devel -y 6 | cd ~/ 7 | git clone https://github.com/giltene/wrk2.git 8 | make -C wrk2/ 9 | sudo cp wrk2/wrk /usr/local/bin/wrk -------------------------------------------------------------------------------- /scripts/create_emr_buckets.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | export emr_s3_uuid=`cat /dev/urandom | tr -dc 'a-z' | fold -w 32 | head -n 1` 5 | export emr_s3_name=graviton-emr-lab-$emr_s3_uuid 6 | 7 | echo "Creating S3 Bucket for EMR" 8 | 9 | 10 | aws s3 mb s3://$emr_s3_name 11 | echo "Blocking public access" 12 | aws s3api put-public-access-block --bucket $emr_s3_name --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" 13 | 14 | echo "Creating folders" 15 | aws s3api put-object --bucket $emr_s3_name --key input/ 16 | aws s3api put-object --bucket $emr_s3_name --key output/ 17 | aws s3api put-object --bucket $emr_s3_name --key files/ 18 | aws s3api put-object --bucket $emr_s3_name --key logs/ 19 | 20 | aws s3 cp ~/environment/graviton-workshop/scripts/tripdata.csv s3://$emr_s3_name/input/ 21 | -------------------------------------------------------------------------------- /scripts/cs_cleanup.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | aws ssm delete-parameter --name "graviton_net_container_uri" 5 | aws ssm delete-parameter --name "graviton_lab_container_uri" 6 | kubectl delete all --all -n aspnet 7 | kubectl delete all --all -n multiarch 8 | aws ecr delete-repository --repository-name graviton-pipeline-lab --force 9 | aws ecr delete-repository --repository-name graviton-aspnet-lab --force 10 | 11 | cdk destroy GravitonID-eks -f 12 | cdk destroy GravitonID-ecs -f 13 | cdk destroy GravitonID-pipeline-dotnet -f 14 | cdk destroy GravitonID-pipeline -f 15 | -------------------------------------------------------------------------------- /scripts/ec2_module_sut_1: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # add below here the specifics for the ec2 module / langauage needed 11 | -------------------------------------------------------------------------------- /scripts/ec2_module_sut_2: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # add below here the specifics for the ec2 module / langauage needed 11 | -------------------------------------------------------------------------------- /scripts/ec2_module_sut_3: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # add below here the specifics for the ec2 module / langauage needed 11 | -------------------------------------------------------------------------------- /scripts/ec2_module_test_client: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo yum update -y 3 | sudo yum install jq git-core 4 | sudo yum groupinstall "Development Tools" -y 5 | sudo yum install openssl-devel -y 6 | cd ~/ 7 | git clone https://github.com/giltene/wrk2.git 8 | make -C wrk2/ 9 | sudo cp wrk2/wrk /usr/local/bin/wrk -------------------------------------------------------------------------------- /scripts/elasticsearch-generate-data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 4 | # 5 | # This script inserts random data into an index named "people" within an Amazon 6 | # Elasticsearch cluster specified by the user. The script runs in an 7 | # uninterrupted fashion until stopped. 8 | 9 | import argparse 10 | import os 11 | import sys 12 | import time 13 | 14 | 15 | import boto3 16 | from elasticsearch import Elasticsearch, RequestsHttpConnection 17 | from elasticsearch.exceptions import ConnectionTimeout 18 | from faker import Faker 19 | from requests_aws4auth import AWS4Auth 20 | 21 | 22 | def main(endpoint): 23 | if 'AWS_REGION' not in os.environ: 24 | print("The AWS_REGION environment variable has not been set.") 25 | sys.exit(1) 26 | 27 | region = os.environ['AWS_REGION'] 28 | 29 | service = 'es' 30 | credentials = boto3.Session().get_credentials() 31 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 32 | 33 | fake = Faker() 34 | 35 | es = Elasticsearch( 36 | hosts = [{'host': endpoint, 'port': 443}], 37 | http_auth = awsauth, 38 | use_ssl = True, 39 | verify_certs = True, 40 | connection_class = RequestsHttpConnection 41 | ) 42 | 43 | while True: 44 | document = fake.profile() 45 | 46 | try: 47 | result = es.index(index="people", doc_type="_doc", body=document) 48 | print("Indexed with ID '%s'" % result['_id']) 49 | time.sleep(0.25) 50 | 51 | except ConnectionTimeout as e: 52 | print("Connection to the ES cluster timed out: %s" % str(e)) 53 | time.sleep(2) 54 | 55 | except Exception as e: 56 | print("Unexpected exception caught: %s" % str(e)) 57 | time.sleep(2) 58 | 59 | if __name__ == '__main__': 60 | parser = argparse.ArgumentParser(description='Inserts random data into an Amazon Elasticsearch domain') 61 | parser.add_argument('endpoint', 62 | help='the domain endpoint (e.g., "vpc-foo-123456789abcdefghijklmnopq.eu-west-1.es.amazonaws.com")') 63 | 64 | args = parser.parse_args() 65 | 66 | main(args.endpoint) 67 | -------------------------------------------------------------------------------- /scripts/elasticsearch-search.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 4 | # 5 | # This script performs a search on an index called "people" within an Amazon 6 | # Elasticsearch domain specificed by the user. 7 | 8 | import argparse 9 | import os 10 | import sys 11 | 12 | 13 | import boto3 14 | from elasticsearch import Elasticsearch, RequestsHttpConnection 15 | from requests_aws4auth import AWS4Auth 16 | 17 | 18 | def main(endpoint, search_term): 19 | if 'AWS_REGION' not in os.environ: 20 | print("The AWS_REGION environment variable has not been set.") 21 | sys.exit(1) 22 | 23 | region = os.environ['AWS_REGION'] 24 | 25 | service = 'es' 26 | credentials = boto3.Session().get_credentials() 27 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 28 | 29 | es = Elasticsearch( 30 | hosts = [{'host': endpoint, 'port': 443}], 31 | http_auth = awsauth, 32 | use_ssl = True, 33 | verify_certs = True, 34 | connection_class = RequestsHttpConnection 35 | ) 36 | 37 | search_body = { 38 | "query": { 39 | "match": { 40 | "job": search_term 41 | } 42 | } 43 | } 44 | 45 | result = es.search(index="people", body=search_body) 46 | 47 | if not len(result['hits']['hits']): 48 | print("Your search produced no results.") 49 | else: 50 | print("Your search produced %d results: \n" % len(result['hits']['hits'])) 51 | 52 | for hit in result['hits']['hits']: 53 | print(" - Name: %s\n Job: %s\n Score: %s" % (hit['_source']['name'], hit['_source']['job'], hit['_score'])) 54 | 55 | if __name__ == '__main__': 56 | parser = argparse.ArgumentParser(description='Performs a search on an Amazon Elasticsearch domain, on the "people" index by job title') 57 | parser.add_argument('endpoint', 58 | help='the domain endpoint (e.g., "vpc-foo-123456789abcdefghijklmnopq.eu-west-1.es.amazonaws.com")') 59 | parser.add_argument('search_term', 60 | help='the term to search') 61 | 62 | args = parser.parse_args() 63 | 64 | main(args.endpoint, args.search_term) 65 | -------------------------------------------------------------------------------- /scripts/etl-spark.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | 3 | import sys 4 | from datetime import datetime 5 | 6 | from pyspark.sql import SparkSession 7 | from pyspark.sql.functions import * 8 | 9 | if __name__ == "__main__": 10 | 11 | print(len(sys.argv)) 12 | if (len(sys.argv) != 3): 13 | print("Usage: spark-etl [input-folder] [output-folder]") 14 | sys.exit(0) 15 | 16 | spark = SparkSession\ 17 | .builder\ 18 | .appName("SparkETL")\ 19 | .getOrCreate() 20 | 21 | nyTaxi = spark.read.option("inferSchema", "true").option("header", "true").csv(sys.argv[1]) 22 | 23 | updatedNYTaxi = nyTaxi.withColumn("current_date", lit(datetime.now())) 24 | 25 | updatedNYTaxi.printSchema() 26 | 27 | print(updatedNYTaxi.show()) 28 | 29 | print("Total number of records: " + str(updatedNYTaxi.count())) 30 | 31 | updatedNYTaxi.write.parquet(sys.argv[2]) 32 | -------------------------------------------------------------------------------- /scripts/plot_results.py: -------------------------------------------------------------------------------- 1 | """Python script for plotting result data from wrk2.""" 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 4 | 5 | import argparse 6 | import csv 7 | import os 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | data_dict = {} 12 | 13 | parser = argparse.ArgumentParser(description="Graphing script for wrk2_wrapper.sh") 14 | parser.add_argument("--input_files", "-if", nargs="+", help="CSV output files from wrk_wrapper.sh") 15 | parser.add_argument( 16 | "--output_file", "-of", type=str, help="The file in which the Matplotlib graph will be saved" 17 | ) 18 | 19 | 20 | def check_for_file(filename: str) -> bool: 21 | """Check if a file exists.""" 22 | return os.path.exists(filename) 23 | 24 | 25 | def read_file(file_desc: str) -> csv.DictReader: 26 | """Given a filename return a csv.DictReader object.""" 27 | return csv.DictReader(file_desc) 28 | 29 | 30 | def remove_suffix(input_string, suffix): 31 | if suffix and input_string.endswith(suffix): 32 | return input_string[: -len(suffix)] 33 | return input_string 34 | 35 | 36 | def remove_prefix(input_string, prefix): 37 | if prefix and input_string.endswith(prefix): 38 | return input_string[len(prefix) :] 39 | return input_string 40 | 41 | 42 | def process_file(filepath: str, data_dict: dict) -> None: 43 | """Read a CSV file and iterate through all rows. 44 | 45 | Stores all records in an in-memory dict. 46 | """ 47 | # We're expecting the node_name in the file path 48 | # e.g., test-result-10.0.3.128.csv 49 | filename = filepath.split("/")[-1] 50 | no_ext = remove_suffix(filename, ".csv") 51 | node_name = remove_prefix(no_ext, "test-result-") 52 | 53 | with open(filepath, newline="") as open_file: 54 | dict_reader = read_file(file_desc=open_file) 55 | for row in dict_reader: 56 | _rps = row.get("Requests Per Second") 57 | dict_title = f"{_rps} - {node_name}" 58 | rps = data_dict.get(dict_title) 59 | 60 | if rps: 61 | # it exists, append 62 | rps["percentile"].append(float(row.get("Percentile"))) 63 | rps["latency"].append(float(row.get("Value (ms)"))) 64 | else: 65 | data_dict[dict_title] = {} 66 | data_dict[dict_title]["percentile"] = [float(row.get("Percentile"))] 67 | data_dict[dict_title]["latency"] = [float(row.get("Value (ms)"))] 68 | 69 | 70 | def generate_plot(data_dict: dict, output_file: str): 71 | """Use matplotlib.pyplot to generate a graph from the given data dictionary.""" 72 | 73 | for k, v in data_dict.items(): 74 | plt.plot(v["percentile"], v["latency"], label=str(k), alpha=0.7) 75 | 76 | plt.suptitle( 77 | "API Server latency distribution by percentile", 78 | fontsize=14, 79 | ) 80 | plt.xlabel("Percentile") 81 | plt.ylabel("Latency in MS") 82 | plt.legend(loc="center left", bbox_to_anchor=(1, 0)) 83 | plt.grid() 84 | plt.savefig(output_file, bbox_inches="tight") 85 | 86 | 87 | if __name__ == "__main__": 88 | # Create an empty container for results 89 | data_dict = {} 90 | 91 | args = parser.parse_args() 92 | 93 | for file in args.input_files: 94 | if check_for_file(file): 95 | process_file(filepath=file, data_dict=data_dict) 96 | else: 97 | print(f"Cannot find file {file}") 98 | 99 | if data_dict: 100 | generate_plot(data_dict=data_dict, output_file=args.output_file) 101 | -------------------------------------------------------------------------------- /scripts/rds-peering-cleanup.sh: -------------------------------------------------------------------------------- 1 | ### Gathering VPC IDs 2 | #export DefaultVpcId=$(aws ec2 describe-vpcs --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 3 | export DefaultVpcId=$(aws ec2 describe-vpcs --filter "Name=isDefault, Values=true" --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 4 | export GravitonVpcId=$(aws ec2 describe-vpcs --filters "Name=tag:Name,Values=GravitonID-base/BaseVPC" --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 5 | echo $DefaultVpcId 6 | echo $GravitonVpcId 7 | 8 | ### Route Table VPC IDs 9 | export DefaultRouteTableID=$(aws ec2 describe-route-tables --filters "Name=vpc-id,Values=${DefaultVpcId}" --query 'RouteTables[?Associations[0].Main == `true`]' --output json | jq -r '.[0].RouteTableId') 10 | export GravitonRouteTableID=$(aws ec2 describe-route-tables --filters "Name=vpc-id,Values=${GravitonVpcId}" --query 'RouteTables[?Associations[0].Main == `true`]' --output json| jq -r '.[0].RouteTableId') 11 | echo $DefaultRouteTableID 12 | echo $GravitonRouteTableID 13 | 14 | ### VPC CIDRs 15 | export DefaultRouteCidr=$(aws ec2 describe-vpcs --filters "Name=vpc-id,Values=${DefaultVpcId}" --query "Vpcs[].CidrBlock" --output json | jq -r '.[0]') 16 | echo $DefaultRouteCidr 17 | echo $GravitonRouteCidr 18 | 19 | #### Graviton VPC CIDR / Source VPC route table config 20 | aws ec2 delete-route --route-table-id "$DefaultRouteTableID" --destination-cidr-block "$GravitonRouteCidr" 21 | aws ec2 delete-route --route-table-id "$GravitonRouteTableID" --destination-cidr-block "$DefaultRouteCidr" 22 | 23 | #### Add routes to Graviton Private Subnets 24 | export GravitonPrivate1RouteTableID=$(aws ec2 describe-route-tables --filters 'Name=tag:Name,Values=GravitonID-base/BaseVPC/PrivateSubnet1' --query 'RouteTables[].Associations[].RouteTableId' --output json| jq -r '.[0]') 25 | export GravitonPrivate2RouteTableID=$(aws ec2 describe-route-tables --filters 'Name=tag:Name,Values=GravitonID-base/BaseVPC/PrivateSubnet2' --query 'RouteTables[].Associations[].RouteTableId' --output json| jq -r '.[0]') 26 | 27 | aws ec2 delete-route --route-table-id "$GravitonPrivate1RouteTableID" --destination-cidr-block "$DefaultRouteCidr" 28 | aws ec2 delete-route --route-table-id "$GravitonPrivate2RouteTableID" --destination-cidr-block "$DefaultRouteCidr" 29 | 30 | ### Create the VPC peering and accept the request 31 | VpcPeeringId=$(aws ec2 describe-vpc-peering-connections --filters 'Name=tag:Name,Values=GravitonID-VPCPeering' --query 'VpcPeeringConnections[*].VpcPeeringConnectionId' --output json | jq -r '.[0]') 32 | 33 | aws ec2 delete-vpc-peering-connection --vpc-peering-connection-id $VpcPeeringId 34 | -------------------------------------------------------------------------------- /scripts/rds-peering.sh: -------------------------------------------------------------------------------- 1 | ### Gathering VPC IDs 2 | #export DefaultVpcId=$(aws ec2 describe-vpcs --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 3 | export DefaultVpcId=$(aws ec2 describe-vpcs --filter "Name=isDefault, Values=true" --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 4 | export GravitonVpcId=$(aws ec2 describe-vpcs --filters "Name=tag:Name,Values=GravitonID-base/BaseVPC" --query "Vpcs[].VpcId" --output json | jq -r '.[0]') 5 | echo $DefaultVpcId 6 | echo $GravitonVpcId 7 | 8 | ### Route Table VPC IDs 9 | export DefaultRouteTableID=$(aws ec2 describe-route-tables --filters "Name=vpc-id,Values=${DefaultVpcId}" --query 'RouteTables[?Associations[0].Main == `true`]' --output json | jq -r '.[0].RouteTableId') 10 | export GravitonRouteTableID=$(aws ec2 describe-route-tables --filters "Name=vpc-id,Values=${GravitonVpcId}" --query 'RouteTables[?Associations[0].Main == `true`]' --output json| jq -r '.[0].RouteTableId') 11 | echo $DefaultRouteTableID 12 | echo $GravitonRouteTableID 13 | 14 | ### VPC CIDRs 15 | export DefaultRouteCidr=$(aws ec2 describe-vpcs --filters "Name=vpc-id,Values=${DefaultVpcId}" --query "Vpcs[].CidrBlock" --output json | jq -r '.[0]') 16 | export GravitonRouteCidr=$(aws ec2 describe-vpcs --filters "Name=vpc-id,Values=${GravitonVpcId}" --query "Vpcs[].CidrBlock" --output json | jq -r '.[0]') 17 | echo $DefaultRouteCidr 18 | echo $GravitonRouteCidr 19 | 20 | ### Create the VPC peering and accept the request 21 | export VpcPeeringId=$(aws ec2 create-vpc-peering-connection --vpc-id "$DefaultVpcId" --peer-vpc-id "$GravitonVpcId" --query VpcPeeringConnection.VpcPeeringConnectionId --output text) 22 | aws ec2 accept-vpc-peering-connection --vpc-peering-connection-id "$VpcPeeringId" 23 | aws ec2 create-tags --resources "$VpcPeeringId" --tags 'Key=Name,Value=GravitonID-VPCPeering' 24 | 25 | 26 | #### Graviton VPC CIDR / Source VPC route table config 27 | aws ec2 create-route --route-table-id "$DefaultRouteTableID" --destination-cidr-block "$GravitonRouteCidr" --vpc-peering-connection-id "$VpcPeeringId" 28 | aws ec2 create-route --route-table-id "$GravitonRouteTableID" --destination-cidr-block "$DefaultRouteCidr" --vpc-peering-connection-id "$VpcPeeringId" 29 | 30 | #### Add routes to Graviton Private Subnets 31 | export GravitonPrivate1RouteTableID=$(aws ec2 describe-route-tables --filters 'Name=tag:Name,Values=GravitonID-base/BaseVPC/PrivateSubnet1' --query 'RouteTables[].Associations[].RouteTableId' --output json| jq -r '.[0]') 32 | export GravitonPrivate2RouteTableID=$(aws ec2 describe-route-tables --filters 'Name=tag:Name,Values=GravitonID-base/BaseVPC/PrivateSubnet2' --query 'RouteTables[].Associations[].RouteTableId' --output json| jq -r '.[0]') 33 | 34 | aws ec2 create-route --route-table-id "$GravitonPrivate1RouteTableID" --destination-cidr-block "$DefaultRouteCidr" --vpc-peering-connection-id "$VpcPeeringId" 35 | aws ec2 create-route --route-table-id "$GravitonPrivate2RouteTableID" --destination-cidr-block "$DefaultRouteCidr" --vpc-peering-connection-id "$VpcPeeringId" 36 | 37 | 38 | -------------------------------------------------------------------------------- /scripts/rds-pg-snapshot.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | snapshot_uuid=`cat /dev/urandom | tr -dc 'a-z' | fold -w 16 | head -n 1` 5 | 6 | 7 | aws ssm delete-parameter --name "graviton_rds_pg_lab_snapshot" >/dev/null 2>&1 8 | 9 | echo "Saving snapshot id using AWS Systems Manager Parameter Store" 10 | 11 | aws ssm put-parameter --name "graviton_rds_pg_lab_snapshot" --value $snapshot_uuid --type String 12 | 13 | echo "Creating RDS database snapshot" 14 | 15 | aws rds create-db-snapshot --db-instance-identifier `aws cloudformation describe-stacks --stack-name GravitonID-rds-pg14 --query "Stacks[0].Outputs[1].OutputValue" --output text` --db-snapshot-identifier $snapshot_uuid >& /dev/null 16 | 17 | echo -e "Your snapshop id : \e[1;32m $snapshot_uuid \e[0m" -------------------------------------------------------------------------------- /scripts/rds-snapshot.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | snapshot_uuid=`cat /dev/urandom | tr -dc 'a-z' | fold -w 16 | head -n 1` 5 | 6 | 7 | aws ssm delete-parameter --name "graviton_rds_lab_snapshot" 8 | 9 | echo "Saving snapshot id using AWS Systems Manager Parameter Store" 10 | 11 | aws ssm put-parameter --name "graviton_rds_lab_snapshot" --value $snapshot_uuid --type String 12 | 13 | echo "Creating RDS database snapshot" 14 | 15 | aws rds create-db-snapshot --db-instance-identifier `aws cloudformation describe-stacks --stack-name GravitonID-rds-8 --query "Stacks[0].Outputs[1].OutputValue" --output text` --db-snapshot-identifier $snapshot_uuid >& /dev/null 16 | 17 | echo -e "Your snapshop id : \e[1;32m $snapshot_uuid \e[0m" 18 | -------------------------------------------------------------------------------- /scripts/resize.sh: -------------------------------------------------------------------------------- 1 | #Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Specify the desired volume size in GiB as a command line argument. If not specified, default to 20 GiB. 5 | SIZE=${1:-20} 6 | 7 | TOKEN=$(curl -X PUT 'http://169.254.169.254/latest/api/token' -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600') 8 | # Get the ID of the environment host Amazon EC2 instance. 9 | INSTANCEID=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/instance-id) 10 | REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') 11 | # Get the ID of the Amazon EBS volume associated with the instance. 12 | VOLUMEID=$(aws ec2 describe-instances \ 13 | --instance-id $INSTANCEID \ 14 | --query "Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId" \ 15 | --output text \ 16 | --region $REGION) 17 | 18 | # Resize the EBS volume. 19 | aws ec2 modify-volume --volume-id $VOLUMEID --size $SIZE 20 | 21 | # Wait for the resize to finish. 22 | while [ \ 23 | "$(aws ec2 describe-volumes-modifications \ 24 | --volume-id $VOLUMEID \ 25 | --filters Name=modification-state,Values="optimizing","completed" \ 26 | --query "length(VolumesModifications)"\ 27 | --output text)" != "1" ]; do 28 | sleep 1 29 | done 30 | 31 | #Check if we're on an NVMe filesystem 32 | if [[ -e "/dev/xvda" && $(readlink -f /dev/xvda) = "/dev/xvda" ]] 33 | then 34 | # Rewrite the partition table so that the partition takes up all the space that it can. 35 | sudo growpart /dev/xvda 1 36 | 37 | # Expand the size of the file system. 38 | # Check if we're on AL2 39 | STR=$(cat /etc/os-release) 40 | SUB="VERSION_ID=\"2\"" 41 | if [[ "$STR" == *"$SUB"* ]] 42 | then 43 | sudo xfs_growfs -d / 44 | else 45 | sudo resize2fs /dev/xvda1 46 | fi 47 | 48 | else 49 | # Rewrite the partition table so that the partition takes up all the space that it can. 50 | sudo growpart /dev/nvme0n1 1 51 | 52 | # Expand the size of the file system. 53 | # Check if we're on AL2 54 | STR=$(cat /etc/os-release) 55 | SUB="VERSION_ID=\"2\"" 56 | if [[ "$STR" == *"$SUB"* ]] 57 | then 58 | sudo xfs_growfs -d / 59 | else 60 | sudo resize2fs /dev/nvme0n1p1 61 | fi 62 | fi 63 | -------------------------------------------------------------------------------- /scripts/runner.lua: -------------------------------------------------------------------------------- 1 | -- Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | -- Script for performance testing endpoints of an API 3 | 4 | 5 | req0 = function() 6 | return wrk.format("GET", "/books") 7 | end 8 | 9 | req1 = function() 10 | return wrk.format("GET", "/books/1") 11 | end 12 | 13 | req2 = function() 14 | return wrk.format("GET", "/books/2") 15 | end 16 | 17 | req3 = function() 18 | return wrk.format("GET", "/carts/1") 19 | end 20 | 21 | req4 = function() 22 | return wrk.format("GET", "/carts/2/items") 23 | end 24 | 25 | requests = {} 26 | requests[0] = req0 27 | requests[1] = req1 28 | requests[2] = req2 29 | requests[3] = req3 30 | requests[4] = req4 31 | 32 | request = function() 33 | return requests[math.random(0, 4)]() 34 | end 35 | 36 | function fsize (file) 37 | local current = file:seek() -- get current position 38 | local size = file:seek("end") -- get file size 39 | file:seek("set", current) -- restore position 40 | return size 41 | end 42 | 43 | -- Write latency performance profiles to CSV 44 | done = function(summary, latency, requests) 45 | -- open output file 46 | f = io.open("test-result.csv", "a+") 47 | 48 | local fSize = fsize(f) 49 | 50 | if fSize == 0 then 51 | f:write("Percentile,Value (ms),Requests Per Second\n") 52 | end 53 | 54 | local duration_in_sec = (summary["duration"] / 1000000) 55 | local rps = (summary["requests"] / duration_in_sec) 56 | local _rps = string.format("%.2f RPS", rps) 57 | 58 | for _, p in pairs({ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9, 99.999, 100 }) do 59 | f:write(string.format("%f,%f,%s\n", 60 | p, 61 | -- Dived all values by 1000 to get to ms 62 | (latency:percentile(p) / 1000), -- percentile latency 63 | _rps 64 | ) 65 | ) 66 | end 67 | 68 | f:close() 69 | end 70 | -------------------------------------------------------------------------------- /scripts/sut_1: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # Java compiler and runtime version 11 | sudo amazon-linux-extras enable corretto8 12 | sudo yum clean metadata 13 | sudo yum install java-1.8.0-amazon-corretto-devel -y 14 | sudo yum install java-11-amazon-corretto-headless -y 15 | 16 | # Switch between Java versions 17 | sudo update-alternatives --set java /usr/lib/jvm/java-1.8.0-amazon-corretto.aarch64/jre/bin/java 18 | # sudo update-alternatives --set java /usr/lib/jvm/java-11-amazon-corretto.aarch64/bin/java 19 | 20 | # Build system 21 | sudo yum install maven -y 22 | 23 | # Clone repo 24 | # git clone https://github.com/vert-x3/vertx-examples.git 25 | 26 | # Install and configure CW agent 27 | sudo yum install amazon-cloudwatch-agent -y 28 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 29 | { 30 | "metrics": { 31 | "metrics_collected": { 32 | "mem": { 33 | "measurement": [ 34 | "mem_used_percent" 35 | ], 36 | "metrics_collection_interval":30 37 | }, 38 | "swap": { 39 | "measurement": [ 40 | "swap_used_percent" 41 | ], 42 | "metrics_collection_interval":30 43 | } 44 | } 45 | } 46 | } 47 | EOF 48 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 49 | -a fetch-config \ 50 | -m ec2 \ 51 | -s \ 52 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 53 | 54 | # Configure Vertx access logging (optional) 55 | # aws s3 cp s3://graviton-perf-module/SimpleREST.java \ 56 | # vertx-examples/web-examples/src/main/java/io/vertx/example/web/rest/SimpleREST.java 57 | 58 | # Maven build 59 | # aws s3 cp s3://graviton-perf-module/pom-java8.xml vertx-examples/web-examples/pom.xml 60 | # mvn package -f vertx-examples/web-examples/pom.xml 61 | 62 | # Launch verticle 63 | # nohup java -XX:+PerfDisableSharedMem -XX:+UseG1GC -jar vertx-examples/web-examples/target/web-examples-4.1.2-fat.jar & 64 | 65 | # Test verticle 66 | # curl http://localhost:8080/products 67 | -------------------------------------------------------------------------------- /scripts/sut_2: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # Java compiler and runtime version 11 | sudo amazon-linux-extras enable corretto8 12 | sudo yum clean metadata 13 | sudo yum install java-1.8.0-amazon-corretto-devel -y 14 | sudo yum install java-11-amazon-corretto-headless -y 15 | 16 | # Switch between Java versions 17 | # sudo update-alternatives --set java /usr/lib/jvm/java-1.8.0-amazon-corretto.aarch64/jre/bin/java 18 | sudo update-alternatives --set java /usr/lib/jvm/java-11-amazon-corretto.aarch64/bin/java 19 | 20 | # Build system 21 | sudo yum install maven -y 22 | 23 | # Hot fix: using `alternatives` does not fix the version used by Maven!! 24 | # export JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto.aarch64 25 | # export PATH=$JAVA_HOME/bin:$PATH 26 | # 27 | # Clone repo 28 | # git clone https://github.com/vert-x3/vertx-examples.git 29 | 30 | # Install and configure CW agent (optional) 31 | sudo yum install amazon-cloudwatch-agent -y 32 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 33 | { 34 | "metrics": { 35 | "metrics_collected": { 36 | "mem": { 37 | "measurement": [ 38 | "mem_used_percent" 39 | ], 40 | "metrics_collection_interval":30 41 | }, 42 | "swap": { 43 | "measurement": [ 44 | "swap_used_percent" 45 | ], 46 | "metrics_collection_interval":30 47 | } 48 | } 49 | } 50 | } 51 | EOF 52 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 53 | -a fetch-config \ 54 | -m ec2 \ 55 | -s \ 56 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 57 | 58 | # Configure Vertx access logging (optional) 59 | # aws s3 cp s3://graviton-perf-module/SimpleREST.java \ 60 | # vertx-examples/web-examples/src/main/java/io/vertx/example/web/rest/SimpleREST.java 61 | 62 | # Maven build 63 | # aws s3 cp s3://graviton-perf-module/pom-java11.xml vertx-examples/web-examples/pom.xml 64 | # mvn package -f vertx-examples/web-examples/pom.xml 65 | 66 | # Launch verticle 67 | # nohup java -XX:+PerfDisableSharedMem -XX:+UseG1GC -jar vertx-examples/web-examples/target/web-examples-4.1.2-fat.jar & 68 | 69 | # Test verticle 70 | # curl http://localhost:8080/products 71 | -------------------------------------------------------------------------------- /scripts/ubuntu-prereqs.sh: -------------------------------------------------------------------------------- 1 | sudo systemctl stop apt-daily.timer 2 | sudo apt-get update 3 | sudo apt-get install -y jq gettext bash-completion moreutils postgresql-client-14 postgresql-client-common 4 | curl -sSL -o /tmp/kubectl https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.0/2024-01-04/bin/linux/amd64/kubectl 5 | chmod +x /tmp/kubectl 6 | sudo mv /tmp/kubectl /usr/local/bin/kubectl 7 | pip3 install --upgrade awscli 8 | hash -r 9 | export TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 10 | export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account) 11 | export AWS_REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region') 12 | echo "export ACCOUNT_ID=${ACCOUNT_ID}" | tee -a ~/.bash_profile 13 | echo "export AWS_REGION=${AWS_REGION}" | tee -a ~/.bash_profile 14 | aws configure set default.region ${AWS_REGION} 15 | aws configure set default.account ${ACCOUNT_ID} 16 | aws configure get default.region 17 | aws configure get default.account 18 | aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing" || aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com" 19 | aws iam get-role --role-name "AWSServiceRoleForAmazonOpenSearchService" || aws iam create-service-linked-role --aws-service-name "opensearchservice.amazonaws.com" 20 | sudo systemctl start apt-daily.timer 21 | cd ~/environment/graviton-workshop && python3 -m venv .venv && source .venv/bin/activate && pip install -r requirements.txt 22 | npm -g uninstall cdk && npm install -g aws-cdk@2.133.0 23 | cdk bootstrap 24 | cdk synth 25 | -------------------------------------------------------------------------------- /scripts/user-script-client.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | 5 | # Update the system 6 | sudo yum update -y 7 | 8 | sudo yum install git -y 9 | sudo yum groupinstall "Development Tools" -y 10 | sudo yum install openssl-devel -y 11 | git clone https://github.com/giltene/wrk2.git 12 | make -C wrk2/ 13 | sudo cp wrk2/wrk /usr/local/bin/wrk 14 | 15 | # Get latest python 3.8.x and make it the default for python3 (AL2 comes with python 3.7.x) 16 | sudo amazon-linux-extras install -y python3.8 17 | sudo alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1 18 | 19 | # Install pip 20 | python3 -m ensurepip --upgrade 21 | 22 | # Install dependencies for plotting results 23 | python3 -m pip install matplotlib 24 | -------------------------------------------------------------------------------- /scripts/user-script-java11.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # Java compiler and runtime version 11 | sudo amazon-linux-extras enable corretto8 12 | sudo yum clean metadata 13 | sudo yum install java-1.8.0-amazon-corretto-devel -y 14 | sudo yum install java-11-amazon-corretto-headless -y 15 | 16 | # Switch between Java versions 17 | # sudo update-alternatives --set java /usr/lib/jvm/java-1.8.0-amazon-corretto.aarch64/jre/bin/java 18 | sudo update-alternatives --set java /usr/lib/jvm/java-11-amazon-corretto.aarch64/bin/java 19 | 20 | # Build system 21 | sudo yum install maven -y 22 | 23 | # Hot fix: using `alternatives` does not fix the version used by Maven!! 24 | # export JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto.aarch64 25 | # export PATH=$JAVA_HOME/bin:$PATH 26 | # 27 | # Clone repo 28 | # git clone https://github.com/vert-x3/vertx-examples.git 29 | 30 | # Install and configure CW agent (optional) 31 | sudo yum install amazon-cloudwatch-agent -y 32 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 33 | { 34 | "metrics": { 35 | "metrics_collected": { 36 | "mem": { 37 | "measurement": [ 38 | "mem_used_percent" 39 | ], 40 | "metrics_collection_interval":30 41 | }, 42 | "swap": { 43 | "measurement": [ 44 | "swap_used_percent" 45 | ], 46 | "metrics_collection_interval":30 47 | } 48 | } 49 | } 50 | } 51 | EOF 52 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 53 | -a fetch-config \ 54 | -m ec2 \ 55 | -s \ 56 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 57 | 58 | # Configure Vertx access logging (optional) 59 | # aws s3 cp s3://graviton-perf-module/SimpleREST.java \ 60 | # vertx-examples/web-examples/src/main/java/io/vertx/example/web/rest/SimpleREST.java 61 | 62 | # Maven build 63 | # aws s3 cp s3://graviton-perf-module/pom-java11.xml vertx-examples/web-examples/pom.xml 64 | # mvn package -f vertx-examples/web-examples/pom.xml 65 | 66 | # Launch verticle 67 | # nohup java -XX:+PerfDisableSharedMem -XX:+UseG1GC -jar vertx-examples/web-examples/target/web-examples-4.1.2-fat.jar & 68 | 69 | # Test verticle 70 | # curl http://localhost:8080/products 71 | -------------------------------------------------------------------------------- /scripts/user-script-java8.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | # Update the system 5 | sudo yum update -y 6 | 7 | # Prerequisites 8 | sudo yum install git -y 9 | 10 | # Java compiler and runtime version 11 | sudo amazon-linux-extras enable corretto8 12 | sudo yum clean metadata 13 | sudo yum install java-1.8.0-amazon-corretto-devel -y 14 | sudo yum install java-11-amazon-corretto-headless -y 15 | 16 | # Switch between Java versions 17 | sudo update-alternatives --set java /usr/lib/jvm/java-1.8.0-amazon-corretto.aarch64/jre/bin/java 18 | # sudo update-alternatives --set java /usr/lib/jvm/java-11-amazon-corretto.aarch64/bin/java 19 | 20 | # Build system 21 | sudo yum install maven -y 22 | 23 | # Clone repo 24 | # git clone https://github.com/vert-x3/vertx-examples.git 25 | 26 | # Install and configure CW agent 27 | sudo yum install amazon-cloudwatch-agent -y 28 | cat </opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 29 | { 30 | "metrics": { 31 | "metrics_collected": { 32 | "mem": { 33 | "measurement": [ 34 | "mem_used_percent" 35 | ], 36 | "metrics_collection_interval":30 37 | }, 38 | "swap": { 39 | "measurement": [ 40 | "swap_used_percent" 41 | ], 42 | "metrics_collection_interval":30 43 | } 44 | } 45 | } 46 | } 47 | EOF 48 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \ 49 | -a fetch-config \ 50 | -m ec2 \ 51 | -s \ 52 | -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json 53 | 54 | # Configure Vertx access logging (optional) 55 | # aws s3 cp s3://graviton-perf-module/SimpleREST.java \ 56 | # vertx-examples/web-examples/src/main/java/io/vertx/example/web/rest/SimpleREST.java 57 | 58 | # Maven build 59 | # aws s3 cp s3://graviton-perf-module/pom-java8.xml vertx-examples/web-examples/pom.xml 60 | # mvn package -f vertx-examples/web-examples/pom.xml 61 | 62 | # Launch verticle 63 | # nohup java -XX:+PerfDisableSharedMem -XX:+UseG1GC -jar vertx-examples/web-examples/target/web-examples-4.1.2-fat.jar & 64 | 65 | # Test verticle 66 | # curl http://localhost:8080/products 67 | -------------------------------------------------------------------------------- /scripts/vpc-env.sh: -------------------------------------------------------------------------------- 1 | #Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | metadata_service="http://169.254.169.254/latest/meta-data" 4 | TOKEN=$(curl -X PUT 'http://169.254.169.254/latest/api/token' -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600') 5 | interface=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s $metadata_service/network/interfaces/macs/ | head -n1 | tr -d '/') 6 | export DefaultRouteCidr=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s $metadata_service/network/interfaces/macs/$interface/vpc-ipv4-cidr-block/) 7 | echo "export DefaultRouteCidr=${DefaultRouteCidr}" | tee -a ~/.bash_profile 8 | -------------------------------------------------------------------------------- /scripts/wrk2_wrapper.sh: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 2 | #!/bin/bash 3 | 4 | 5 | RUN_RPS=() 6 | EXEC_DURATION=60 7 | EXEC_THREADS=4 8 | EXEC_CONNECTIONS=8 9 | EXEC_HOST="" 10 | 11 | function iterate_and_execute() { 12 | local NUM_EXECUTIONS=$(echo "${#RUN_RPS[@]}") 13 | 14 | NODE_NO_HTTP=${EXEC_HOST#http://*} 15 | NODE_NO_PORT=${NODE_NO_HTTP%:*} 16 | 17 | for RPS in "${RUN_RPS[@]}"; do 18 | run_wrk2 ${RPS} ${EXEC_THREADS} ${EXEC_CONNECTIONS} ${EXEC_DURATION} ${EXEC_HOST} 19 | done 20 | 21 | mv test-result.csv test-result-${NODE_NO_PORT}.csv 22 | } 23 | 24 | function run_wrk2() { 25 | local RPS=$1 26 | local THREADS=$2 27 | local CONNECTIONS=$3 28 | local DURATION=$4 29 | local EXEC_HOST=$5 30 | 31 | # Run wrk2 32 | wrk -t${THREADS} -c${CONNECTIONS} -d${DURATION}s -R${RPS} --latency -s runner.lua ${EXEC_HOST} 33 | } 34 | 35 | # Collect all run rate arguments 36 | while getopts ":r:d:t:c:h:" opt; do 37 | case $opt in 38 | r) 39 | RPS="$OPTARG" 40 | RUN_RPS+=(${RPS}) 41 | ;; 42 | d) 43 | # Optional 44 | EXEC_DURATION="$OPTARG" 45 | ;; 46 | t) 47 | # Optional 48 | EXEC_THREADS="$OPTARG" 49 | ;; 50 | c) 51 | # Optional 52 | EXEC_CONNECTIONS="$OPTARG" 53 | ;; 54 | h) 55 | EXEC_HOST="$OPTARG" 56 | ;; 57 | *) 58 | echo $"Usage: $0 {r: rate as requests per second|d: test duration in seconds (default: 60)|t: threads for the test (default: 4)|c: connections to open (defaults to 8)}" 59 | exit 1 60 | esac 61 | done 62 | 63 | iterate_and_execute 64 | -------------------------------------------------------------------------------- /src/main/java/com/company/example/web/bookshop/VertxBookShopREST.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 3 | */ 4 | 5 | package com.company.example.web.bookshop; 6 | 7 | import io.vertx.core.AbstractVerticle; 8 | import io.vertx.core.http.HttpServerResponse; 9 | import io.vertx.core.json.JsonArray; 10 | import io.vertx.core.json.JsonObject; 11 | import io.vertx.ext.web.Router; 12 | import io.vertx.ext.web.RoutingContext; 13 | import io.vertx.ext.web.handler.BodyHandler; 14 | 15 | // Import access log handler 16 | // import io.vertx.ext.web.handler.LoggerHandler; 17 | 18 | import java.util.HashMap; 19 | import java.util.Map; 20 | 21 | public class VertxBookShopREST extends AbstractVerticle { 22 | 23 | private Map books = new HashMap<>(); 24 | private Map carts = new HashMap<>(); 25 | 26 | @Override 27 | public void start() { 28 | 29 | initializeData(); 30 | 31 | Router router = Router.router(vertx); 32 | 33 | // Capture access logs 34 | // router.route().handler(LoggerHandler.create()); 35 | 36 | router.route().handler(BodyHandler.create()); 37 | 38 | // Books API 39 | router.get("/books/:bookId").handler(this::handleGetBook); 40 | router.put("/books/:bookId").handler(this::handleAddBook); 41 | router.get("/books").handler(this::handleListBooks); 42 | 43 | // Cart API 44 | router.get("/carts").handler(this::handleListCarts); 45 | router.get("/carts/:userId").handler(this::handleGetCart); 46 | router.put("/carts/:userId").handler(this::handleAddCart); 47 | router.get("/carts/:userId/items").handler(this::handleListCartItems); 48 | 49 | vertx.createHttpServer().requestHandler(router).listen(8080); 50 | } 51 | 52 | private void handleGetBook(RoutingContext routingContext) { 53 | String bookId = routingContext.request().getParam("bookId"); 54 | HttpServerResponse response = routingContext.response(); 55 | 56 | if (bookId == null) { 57 | response.setStatusCode(400).end(); 58 | } else { 59 | JsonObject book = books.get(bookId); 60 | if (book == null) { 61 | response.setStatusCode(404).end(); 62 | } else { 63 | response.putHeader("content-type", "application/json").end(book.encodePrettily()); 64 | } 65 | } 66 | } 67 | 68 | private void handleGetCart(RoutingContext routingContext) { 69 | String userId = routingContext.request().getParam("userId"); 70 | HttpServerResponse response = routingContext.response(); 71 | 72 | if (userId == null) { 73 | response.setStatusCode(400).end(); 74 | } else { 75 | JsonObject cart = carts.get(userId); 76 | if (cart == null) { 77 | response.setStatusCode(404).end(); 78 | } else { 79 | response.putHeader("content-type", "application/json").end(cart.encodePrettily()); 80 | } 81 | } 82 | } 83 | 84 | private void handleAddBook(RoutingContext routingContext) { 85 | String bookId = routingContext.request().getParam("bookId"); 86 | HttpServerResponse response = routingContext.response(); 87 | 88 | if (bookId == null) { 89 | response.setStatusCode(400).end(); 90 | } else { 91 | JsonObject book = routingContext.getBodyAsJson(); 92 | if (book == null) { 93 | response.setStatusCode(400).end(); 94 | } else { 95 | addBook(book); 96 | response.end(); 97 | } 98 | } 99 | } 100 | 101 | private void handleAddCart(RoutingContext routingContext) { 102 | String userId = routingContext.request().getParam("userId"); 103 | HttpServerResponse response = routingContext.response(); 104 | 105 | if (userId == null) { 106 | response.setStatusCode(400).end(); 107 | } else { 108 | JsonObject cart = routingContext.getBodyAsJson(); 109 | if (cart == null) { 110 | response.setStatusCode(400).end(); 111 | } else { 112 | addCart(cart); 113 | response.end(); 114 | } 115 | } 116 | } 117 | 118 | private void handleListBooks(RoutingContext routingContext) { 119 | JsonArray arr = new JsonArray(); 120 | books.forEach((k, v) -> arr.add(v)); 121 | routingContext.response().putHeader("content-type", "application/json").end(arr.encodePrettily()); 122 | } 123 | 124 | private void handleListCarts(RoutingContext routingContext) { 125 | JsonArray arr = new JsonArray(); 126 | carts.forEach((k, v) -> arr.add(v)); 127 | routingContext.response().putHeader("content-type", "application/json").end(arr.encodePrettily()); 128 | } 129 | 130 | private void handleListCartItems(RoutingContext routingContext) { 131 | String userId = routingContext.request().getParam("userId"); 132 | HttpServerResponse response = routingContext.response(); 133 | if (userId == null) { 134 | response.setStatusCode(400).end(); 135 | } else { 136 | JsonObject cart = carts.get(userId); 137 | if (cart == null) { 138 | response.setStatusCode(400).end(); 139 | } else { 140 | JsonArray items = cart.getJsonArray("items"); 141 | if (items == null) { 142 | response.setStatusCode(400).end(); 143 | } else { 144 | response.putHeader("content-type", "application/json").end(items.encodePrettily()); 145 | } 146 | } 147 | } 148 | } 149 | 150 | private void addBook(JsonObject book) { 151 | books.put(book.getString("book_id"), book); 152 | } 153 | 154 | private void addCart(JsonObject cart) { 155 | carts.put(cart.getString("user_id"), cart); 156 | } 157 | 158 | private void initializeData() { 159 | // Initialize sample books 160 | addBook(new JsonObject().put("book_id", "1").put("title", "Anna Karenina").put("author", "Leo Tolstoy").put("genre", "Classics").put("rating", 5.0)); 161 | addBook(new JsonObject().put("book_id", "2").put("title", "Madame Bovary").put("author", "Gustave Flaubert").put("genre", "Classics").put("rating", 5.0)); 162 | addBook(new JsonObject().put("book_id", "3").put("title", "War and Peace").put("author", "Leo Tolstoy").put("genre", "Classics").put("rating", 5.0)); 163 | addBook(new JsonObject().put("book_id", "4").put("title", "The Great Gatsby").put("author", "F. Scott Fitzgerald").put("genre", "Classics").put("rating", 5.0)); 164 | addBook(new JsonObject().put("book_id", "5").put("title", "Lolita").put("author", "Vladimir Nabokov").put("genre", "Classics").put("rating", 5.0)); 165 | addBook(new JsonObject().put("book_id", "6").put("title", "Middlemarch").put("author", "George Eliot").put("genre", "Classics").put("rating", 5.0)); 166 | addBook(new JsonObject().put("book_id", "7").put("title", "The Adventures of Huckleberry Finn").put("author", "Mark Twain").put("genre", "Classics").put("rating", 5.0)); 167 | addBook(new JsonObject().put("book_id", "8").put("title", "The Stories of Anton Chekhov").put("author", "Anton Chekhov").put("genre", "Classics").put("rating", 5.0)); 168 | addBook(new JsonObject().put("book_id", "9").put("title", "In Search of Lost Time").put("author", "Marcel Proust").put("genre", "Classics").put("rating", 5.0)); 169 | addBook(new JsonObject().put("book_id", "10").put("title", "Hamlet").put("author", "William Shakespeare").put("genre", "Classics").put("rating", 5.0)); 170 | 171 | // Initialize sample carts 172 | addCart(new JsonObject() 173 | .put("user_id", "1") 174 | .put("items", new JsonArray() 175 | .add(new JsonObject().put("item_id", "1").put("item_count", 2).put("price", 10.99)) 176 | .add(new JsonObject().put("item_id", "2").put("item_count", 4).put("price", 12.99))) 177 | .put("total", 73.94)); 178 | 179 | addCart(new JsonObject() 180 | .put("user_id", "2") 181 | .put("items", new JsonArray() 182 | .add(new JsonObject().put("item_id", "7").put("item_count", 3).put("price", 9.99)) 183 | .add(new JsonObject().put("item_id", "9").put("item_count", 7).put("price", 13.99))) 184 | .put("total", 127.90)); 185 | } 186 | 187 | } 188 | --------------------------------------------------------------------------------