├── .npmignore ├── .gitignore ├── lib ├── index.ts ├── eks-cluster.ts └── eks-node-group.ts ├── aws-auth-cm.yaml ├── cdk.json ├── tsconfig.json ├── package.json ├── bin └── eks-example.ts └── README.md /.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.js 2 | *.d.ts 3 | aws-auth-cm.yaml 4 | node_modules 5 | -------------------------------------------------------------------------------- /lib/index.ts: -------------------------------------------------------------------------------- 1 | export * from './eks-cluster'; 2 | export * from './eks-node-group'; 3 | -------------------------------------------------------------------------------- /aws-auth-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: aws-auth 5 | namespace: kube-system 6 | data: 7 | mapRoles: | 8 | - rolearn: '' 9 | username: system:node:{{EC2PrivateDNSName}} 10 | groups: 11 | - system:bootstrappers 12 | - system:nodes 13 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "node bin/eks-example.js", 3 | "context": { 4 | "cluster-name": "EksExample", 5 | "key-name": null, 6 | "node-group-max-size": 5, 7 | "node-group-min-size": 1, 8 | "node-group-desired-size": 3, 9 | "node-group-instance-type": "t3.medium", 10 | "bastion": false, 11 | "ssh-allowed-cidr": [] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target":"ES2018", 4 | "module": "commonjs", 5 | "lib": ["es2016", "es2017.object", "es2017.string"], 6 | "declaration": true, 7 | "strict": true, 8 | "noImplicitAny": true, 9 | "strictNullChecks": true, 10 | "noImplicitThis": true, 11 | "alwaysStrict": true, 12 | "noUnusedLocals": true, 13 | "noUnusedParameters": true, 14 | "noImplicitReturns": true, 15 | "noFallthroughCasesInSwitch": false, 16 | "inlineSourceMap": true, 17 | "inlineSources": true, 18 | "experimentalDecorators": true, 19 | "strictPropertyInitialization":false 20 | } 21 | } 22 | 23 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "eks-example", 3 | "version": "0.1.0", 4 | "main": "lib/index.js", 5 | "types": "lib/index.d.ts", 6 | "bin": { 7 | "eks-example": "bin/eks-example.js" 8 | }, 9 | "scripts": { 10 | "build": "tsc", 11 | "watch": "tsc -w", 12 | "cdk": "cdk" 13 | }, 14 | "devDependencies": { 15 | "@types/node": "^8.9.4", 16 | "typescript": "^3.1.2", 17 | "aws-cdk": "^0.19.0" 18 | }, 19 | "dependencies": { 20 | "@aws-cdk/aws-ec2": "^0.19.0", 21 | "@aws-cdk/aws-iam": "^0.19.0", 22 | "@aws-cdk/aws-autoscaling": "^0.19.0", 23 | "@aws-cdk/aws-eks": "^0.19.0", 24 | "@aws-cdk/cdk": "^0.19.0" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /bin/eks-example.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { EksClusterStack } from '../lib/eks-cluster'; 3 | import { EksNodeGroupStack } from '../lib/eks-node-group'; 4 | import cdk = require('@aws-cdk/cdk'); 5 | 6 | const app = new cdk.App(); 7 | 8 | const clusterName = app.getContext('cluster-name'); 9 | const cluster = new EksClusterStack(app, 'EksCluster', { clusterName }); 10 | 11 | /* worker node configuration properties */ 12 | const bastion: boolean = !!app.getContext('bastion'); 13 | const nodeGroupMaxSize = app.getContext('node-group-max-size'); 14 | const nodeGroupMinSize = app.getContext('node-group-min-size'); 15 | const nodeGroupDesiredSize = app.getContext('node-group-desired-size'); 16 | const keyFromContext = app.getContext('key-name'); 17 | const keyName = (keyFromContext === null) ? undefined : keyFromContext; 18 | const sshAllowedCidr = app.getContext('ssh-allowed-cidr'); 19 | const nodeGroupInstanceType = app.getContext('node-group-instance-type'); 20 | 21 | new EksNodeGroupStack(app, 'EksWorkers', { 22 | controlPlaneSG: cluster.controlPlaneSG, 23 | vpc: cluster.vpc, 24 | clusterName, 25 | bastion, 26 | keyName, 27 | sshAllowedCidr, 28 | nodeGroupMaxSize, 29 | nodeGroupMinSize, 30 | nodeGroupDesiredSize, 31 | nodeGroupInstanceType, 32 | }); 33 | 34 | app.run(); 35 | -------------------------------------------------------------------------------- /lib/eks-cluster.ts: -------------------------------------------------------------------------------- 1 | import ec2 = require('@aws-cdk/aws-ec2'); 2 | import eks = require('@aws-cdk/aws-eks'); 3 | import iam = require('@aws-cdk/aws-iam'); 4 | import cdk = require('@aws-cdk/cdk'); 5 | 6 | export interface ClusterProps extends cdk.StackProps { 7 | clusterName: string; 8 | vpcProps?: ec2.VpcNetworkProps; 9 | } 10 | 11 | const EKS_POLICIES: string[] = [ 12 | "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", 13 | "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", 14 | ]; 15 | 16 | export class EksClusterStack extends cdk.Stack { 17 | public readonly vpc: ec2.VpcNetworkRefProps; 18 | public readonly controlPlaneSG: ec2.SecurityGroupRefProps; 19 | public readonly cluster: eks.cloudformation.ClusterResource; 20 | constructor(parent: cdk.App, name: string, props: ClusterProps) { 21 | super(parent, name, props); 22 | const vpc = this.createVpc(props); 23 | this.vpc = vpc.export(); 24 | const controlPlaneSG = new ec2.SecurityGroup(this, `${props.clusterName}ControlPlaneSG`, { 25 | vpc, 26 | }); 27 | this.controlPlaneSG = controlPlaneSG.export(); 28 | const eksRole = new iam.Role(this, 'EksServiceRole', { 29 | assumedBy: new iam.ServicePrincipal('eks.amazonaws.com'), 30 | managedPolicyArns: EKS_POLICIES, 31 | }); 32 | eksRole.addToPolicy( 33 | new iam.PolicyStatement(). 34 | addAction("elasticloadbalancing:*"). 35 | addAction("ec2:CreateSecurityGroup"). 36 | addAction("ec2:Describe*"). 37 | addAllResources() 38 | ); 39 | 40 | const publicSubnetIds = vpc.publicSubnets.map( s => s.subnetId); 41 | const privateSubnetIds = vpc.privateSubnets.map( s => s.subnetId); 42 | this.cluster = new eks.cloudformation.ClusterResource(this, props.clusterName, { 43 | name: props.clusterName, 44 | resourcesVpcConfig: { 45 | subnetIds: publicSubnetIds.concat(privateSubnetIds), 46 | securityGroupIds: [controlPlaneSG.securityGroupId], 47 | }, 48 | roleArn: eksRole.roleArn, 49 | }); 50 | } 51 | 52 | private createVpc(props: ClusterProps): ec2.VpcNetworkRef { 53 | const vpcProps = props.vpcProps || this.defaultVpcProps(props.clusterName); 54 | 55 | return new ec2.VpcNetwork(this, 'EksVpc', vpcProps); 56 | } 57 | 58 | private defaultVpcProps(clusterName: string): ec2.VpcNetworkProps { 59 | const tags: {[key: string]: string} = {}; 60 | tags[`kubernetes.io/cluster/${clusterName}`] = 'shared'; 61 | const privateSubnetTags: {[key: string]: string} = {}; 62 | privateSubnetTags['kubernetes.io/role/internal-elb'] = '1'; 63 | 64 | return { 65 | cidr: '10.0.0.0/16', 66 | natGateways: 1, 67 | subnetConfiguration: [ 68 | { 69 | subnetType: ec2.SubnetType.Public, 70 | name: 'EksPublic', 71 | cidrMask: 24, 72 | }, 73 | { 74 | subnetType: ec2.SubnetType.Private, 75 | name: 'EksPrivate', 76 | cidrMask: 21, 77 | tags: privateSubnetTags, 78 | }, 79 | ], 80 | tags, 81 | }; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /lib/eks-node-group.ts: -------------------------------------------------------------------------------- 1 | import ec2 = require('@aws-cdk/aws-ec2'); 2 | import iam = require('@aws-cdk/aws-iam'); 3 | import asg = require('@aws-cdk/aws-autoscaling'); 4 | import cdk = require('@aws-cdk/cdk'); 5 | 6 | export interface NodeGroupProps extends cdk.StackProps { 7 | controlPlaneSG: ec2.SecurityGroupRefProps; 8 | vpc: ec2.VpcNetworkRefProps; 9 | clusterName: string; 10 | bastion: boolean; 11 | sshAllowedCidr: string[]; 12 | keyName?: string; 13 | nodeGroupMaxSize: number; 14 | nodeGroupMinSize: number; 15 | nodeGroupDesiredSize: number; 16 | nodeGroupInstanceType: string; 17 | } 18 | 19 | const CP_WORKER_PORTS = new ec2.TcpPortRange(1025, 65535); 20 | const API_PORTS = new ec2.TcpPort(443); 21 | const WORKER_NODE_POLICIES: string[] = [ 22 | "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", 23 | "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", 24 | "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" 25 | ]; 26 | 27 | export class EksNodeGroupStack extends cdk.Stack { 28 | 29 | public readonly workerNodeASG: asg.AutoScalingGroup; 30 | private bastionASG: asg.AutoScalingGroup; 31 | 32 | constructor(parent: cdk.App, name: string, props: NodeGroupProps) { 33 | super(parent, name, props); 34 | 35 | const vpc = ec2.VpcNetworkRef.import(this, 'ClusterVpc', props.vpc); 36 | const controlPlaneSG = ec2.SecurityGroupRef.import(this, 'ControlPlaneSG', props.controlPlaneSG) 37 | 38 | // have to periodically update this constant 39 | const amiMap: {[region: string]: string;} = { 40 | 'us-west-2': 'ami-0f54a2f7d2e9c88b3', 41 | 'us-east-1': 'ami-0a0b913ef3249b655', 42 | 'us-east-2': 'ami-0958a76db2d150238', 43 | 'eu-west-1': 'ami-00c3b2d35bddd4f5c', 44 | }; 45 | this.workerNodeASG = new asg.AutoScalingGroup(this, 'Workers', { 46 | instanceType: new ec2.InstanceType(props.nodeGroupInstanceType), 47 | machineImage: new ec2.GenericLinuxImage(amiMap), 48 | vpc, 49 | allowAllOutbound: true, 50 | minSize: props.nodeGroupMinSize, 51 | maxSize: props.nodeGroupMaxSize, 52 | desiredCapacity: props.nodeGroupDesiredSize, 53 | keyName: props.keyName, 54 | vpcPlacement: {subnetsToUse: ec2.SubnetType.Private}, 55 | updateType: asg.UpdateType.RollingUpdate, 56 | rollingUpdateConfiguration: { 57 | maxBatchSize: 1, 58 | minInstancesInService: 1, 59 | pauseTimeSec: 300, 60 | waitOnResourceSignals: true, 61 | }, 62 | }); 63 | this.workerNodeASG.tags.setTag(`kubernetes.io/cluster/${props.clusterName}`, 'owned'); 64 | this.workerNodeASG.tags.setTag('NodeType', 'Worker'); 65 | for (const policy of WORKER_NODE_POLICIES) { 66 | this.workerNodeASG.role.attachManagedPolicy(policy); 67 | } 68 | 69 | this.workerNodeASG.role. 70 | addToPolicy( new iam.PolicyStatement(). 71 | addAction('cloudformation:SignalResource'). 72 | addResource( `arn:aws:cloudformation:${new cdk.AwsRegion()}:${new cdk.AwsAccountId()}:stack/${new cdk.AwsStackName}/*`)); 73 | 74 | this.workerNodeASG.role. 75 | addToPolicy( new iam.PolicyStatement(). 76 | addAction('ec2:DescribeTags').addAllResources()); 77 | 78 | // this issue is being tracked: https://github.com/awslabs/aws-cdk/issues/623 79 | const asgResource = this.workerNodeASG.children.find(c => (c as cdk.Resource).resourceType === 'AWS::AutoScaling::AutoScalingGroup') as asg.cloudformation.AutoScalingGroupResource; 80 | 81 | this.workerNodeASG.addUserData( 82 | 'set -o xtrace', 83 | `/etc/eks/bootstrap.sh ${props.clusterName}`, 84 | `/opt/aws/bin/cfn-signal --exit-code $? \\`, 85 | ` --stack ${new cdk.AwsStackName()} \\`, 86 | ` --resource ${asgResource.logicalId} \\`, 87 | ` --region ${new cdk.AwsRegion()}` 88 | ); 89 | 90 | this.workerNodeASG.connections.allowFrom(controlPlaneSG, CP_WORKER_PORTS); 91 | this.workerNodeASG.connections.allowFrom(controlPlaneSG, API_PORTS); 92 | this.workerNodeASG.connections.allowInternally(new ec2.AllTraffic()); 93 | const cpConnection = controlPlaneSG.connections; 94 | cpConnection.allowTo(this.workerNodeASG, CP_WORKER_PORTS); 95 | cpConnection.allowTo(this.workerNodeASG, API_PORTS); 96 | cpConnection.allowFrom(this.workerNodeASG, CP_WORKER_PORTS); 97 | 98 | new cdk.Output(this, 'WorkerRoleArn', { 99 | value: this.workerNodeASG.role.roleArn, 100 | }); 101 | 102 | // add variable constructs at the end because if they are in the middle they 103 | // will force a destruction of any resources added after them 104 | // see: https://awslabs.github.io/aws-cdk/logical-ids.html 105 | if (props.bastion) { 106 | this.bastionASG = new asg.AutoScalingGroup(this, 'Bastion', { 107 | instanceType: new ec2.InstanceTypePair(ec2.InstanceClass.T3, ec2.InstanceSize.Micro), 108 | machineImage: new ec2.GenericLinuxImage(amiMap), 109 | vpc, 110 | minSize: 1, 111 | maxSize: 1, 112 | desiredCapacity: 1, 113 | keyName: props.keyName, 114 | vpcPlacement: {subnetsToUse: ec2.SubnetType.Public}, 115 | }); 116 | for (const cidr of props.sshAllowedCidr) { 117 | this.bastionASG.connections.allowFrom(new ec2.CidrIPv4(cidr), new ec2.TcpPort(22)); 118 | } 119 | this.workerNodeASG.connections.allowFrom(this.bastionASG, new ec2.TcpPort(22)); 120 | } 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CDK Example of EKS 2 | 3 | First, this is an *example*. This is not production ready. The point of this 4 | repository is demonstrate some of the features available with CDK. The use of 5 | EKS is to ensure there is a enough complexity to make this valuable. This 6 | example makes many assumptions in order to keep to the solution easy to follow. 7 | For example, the rolling update configuration is not exposed and nor does this 8 | address worker node draining during upgrades. 9 | 10 | **If you choose to run this stack you are responsible for any AWS costs that 11 | are incurred. The default values are designed to be cost conscious.** 12 | 13 | ## Building a Getting Started EKS Cluster 14 | 15 | This repository has reasonable quick start defaults for two AWS CloudFormation 16 | stacks that result in a working EKS cluster. In order to use `kubectl` with this 17 | you will still need to ensure you have the [prerequisites AWS 18 | requires](https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html). 19 | I have chosen to use the `cdk.json` file to pass in and configure parameters. 20 | There are multiple options for [passing parameters into CDK](https://awslabs.github.io/aws-cdk/passing-in-data.html). 21 | We will cover the supported options in each stack. 22 | 23 | #### CDK Setup 24 | 25 | If you don't already have the CDK installed please follow the 26 | [guide](https://awslabs.github.io/aws-cdk/getting-started.html). 27 | 28 | We will be using Typescript for these examples. 29 | 30 | Before going any further clone this repository and run the following commands: 31 | 32 | ``` 33 | # from root of this repo 34 | npm install 35 | npm run build 36 | ``` 37 | 38 | #### EKS Cluster Control Plane - Stack 1 39 | 40 | The first stack we will be creating is the EKS Cluster and Control Plane. This 41 | stack is functionally very similar to the [AWS Getting Started Step 1](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#eks-create-cluster). 42 | 43 | The context allows you to set your desired EKS Cluster Name, but if you do not 44 | alter `cdk.json` or pass in a command line argument the default will be used. 45 | The stack will also create a VPC and NAT Gateway. 46 | 47 | Using the defaults the command would be: 48 | 49 | ``` 50 | # from root of this repo 51 | cdk deploy EksCluster 52 | # output will be similar to 53 | ⏳ Starting deployment of stack EksCluster... 54 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::EC2::VPC] EksVpc4BB427FA 55 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::IAM::Role] EksServiceRole2C9FD210 56 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::EC2::EIP] EksVpcEksPublicSubnet2EIP6E00FE76 57 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::EC2::InternetGateway] EksVpcIGWF47619EF 58 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::EC2::EIP] EksVpcEksPublicSubnet3EIP7AA2ED70 59 | [ 0/39] Mon Oct 22 2018 16:27:52 GMT-0700 (Pacific Daylight Time) CREATE_IN_PROGRESS [AWS::CDK::Metadata] CDKMetadata 60 | # ... snip ... 61 | ✅ Deployment of stack EksCluster completed successfully, it has ARN arn:aws:cloudformation:us-west-2:12345678901:stack/EksCluster/00000000-aaaa-bbbb-cccc-dddddddddddd 62 | EksCluster.EksVpcPublicSubnetIDs00000000 = subnet-11111111111111111,subnet-22222222222222222,subnet-33333333333333333 63 | EksCluster.EksVpcVpcId11111111 = vpc-00000000000000000 64 | EksCluster.EksExampleControlPlaneSGSecurityGroupIdeeeeeeee = sg-00000000000000000 65 | EksCluster.EksVpcPrivateSubnetIDsffffffff = subnet-44444444444444444,subnet-55555555555555555,subnet-66666666666666666 66 | ``` 67 | 68 | The creation of the EKS cluster can take up to 15 minutes. After the stack 69 | completes we can verify we have the credentials necessary to use `kubectl`. 70 | 71 | ``` 72 | # With AWS Credentials available to awscli 73 | # if you changed the default name use it here 74 | aws eks update-kubeconfig --name EksExample 75 | kubectl get svc 76 | # expected output 77 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 78 | kube-dns ClusterIP 172.20.0.10 53/UDP,53/TCP 1d 79 | ``` 80 | 81 | #### EKS Worker Nodes - Stack 2 82 | 83 | Now that AWS is running our Kubernetes API Server and required components for 84 | us, we need to create worker nodes. There are configuration options for the 85 | workers, but for now if you just want to deploy some nodes the defaults will 86 | work. 87 | 88 | ``` 89 | # from root of this repo 90 | cdk deploy EksWorkers 91 | # this output a similar success message at the end 92 | ``` 93 | 94 | The defaults for the workers can be found in the [cdk.json](cdk.json). The only 95 | aspect that might be confusing is the optional [bastion](https://en.wikipedia.org/wiki/Bastion_host) configuration. 96 | If you want a bastion host the best option is to edit the [cdk.json](cdk.json) 97 | file and the values for your configuration. The edits will be made to the 98 | `bastion`, `key-name`, and `ssh-allowed-cidr` json keys. 99 | 100 | `key-name` is an [AWS EC2 Keypair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html). 101 | 102 | `ssh-allowed-cidr` is a list of IP addresses that will be allowed to SSH to the 103 | bastion host. You can lookup your external IP via [AWS](http://checkip.amazonaws.com/). At a minimum you will want to add that IP as a `/32` below. 104 | 105 | Your file might look similar to this: 106 | 107 | ```json 108 | { 109 | "app": "node bin/eks-example.js", 110 | "context": { 111 | "cluster-name": "EksExample", 112 | "key-name": "MyKeyPair", 113 | "node-group-max-size": 5, 114 | "node-group-min-size": 1, 115 | "node-group-desired-size": 3, 116 | "node-group-instance-type": "t3.medium", 117 | "bastion": true, 118 | "ssh-allowed-cidr": ["1.2.3.4/32"] 119 | } 120 | ``` 121 | 122 | If you change these values after deploying you will need to re-deploy the stack 123 | in order to apply the updates. That can be done: 124 | 125 | ``` 126 | npm run build 127 | cdk diff EksWorkers 128 | # make sure the diff matches what you think is happening 129 | cdk deploy EksWorkers 130 | # example success 131 | Outputs: 132 | EksWorkers.WorkerRoleArn = arn:aws:iam::667237269012:role/EksWorkers-WorkersInstanceRole510CB30C-QFC0D1PV61B 133 | # note this ARN for the next step 134 | ``` 135 | 136 | Once you have your workers deployed we need to join them to the cluster. This 137 | currently must be done using `kubectl`. In order to do this update the file in 138 | this repo called [aws-auth-cm.yaml](aws-auth-cm.yaml) with the Role ARN from the 139 | EksWorkers state output. Specifically replace this line with your value. 140 | 141 | ``` 142 | - rolearn: '' 143 | ``` 144 | 145 | This file gives the Kubernetes permission to join the cluster specifically to 146 | the role attached to these nodes. 147 | 148 | ``` 149 | kubectl apply -f aws-auth-cm.yaml 150 | kubectl get nodes --watch # this will follow the k8s events CTRL-C to break 151 | # example output 152 | NAME STATUS ROLES AGE VERSION 153 | ip-10-0-15-168.us-west-2.compute.internal NotReady 0s v1.10.3 154 | ip-10-0-28-14.us-west-2.compute.internal NotReady 0s v1.10.3 155 | ip-10-0-28-14.us-west-2.compute.internal NotReady 0s v1.10.3 156 | ip-10-0-19-99.us-west-2.compute.internal NotReady 1s v1.10.3 157 | ip-10-0-19-99.us-west-2.compute.internal NotReady 1s v1.10.3 158 | ip-10-0-31-23.us-west-2.compute.internal NotReady 1s v1.10.3 159 | ip-10-0-31-23.us-west-2.compute.internal NotReady 1s v1.10.3 160 | ip-10-0-17-255.us-west-2.compute.internal NotReady 0s v1.10.3 161 | ip-10-0-17-255.us-west-2.compute.internal NotReady 0s v1.10.3 162 | ip-10-0-15-168.us-west-2.compute.internal NotReady 10s v1.10.3 163 | ip-10-0-28-14.us-west-2.compute.internal NotReady 10s v1.10.3 164 | ip-10-0-19-99.us-west-2.compute.internal NotReady 11s v1.10.3 165 | ip-10-0-31-23.us-west-2.compute.internal NotReady 11s v1.10.3 166 | ip-10-0-17-255.us-west-2.compute.internal NotReady 11s v1.10.3 167 | ip-10-0-15-168.us-west-2.compute.internal Ready 20s v1.10.3 168 | ip-10-0-28-14.us-west-2.compute.internal NotReady 20s v1.10.3 169 | ip-10-0-19-99.us-west-2.compute.internal Ready 21s v1.10.3 170 | ip-10-0-31-23.us-west-2.compute.internal Ready 21s v1.10.3 171 | ip-10-0-17-255.us-west-2.compute.internal Ready 21s v1.10.3 172 | ``` 173 | 174 | At this point you have working EKS Cluster to experiment with, but remember you 175 | are being charged for these resources so you might want to clean up. 176 | 177 | #### Cleaning Up the Example 178 | 179 | The CDK comes equipped with destroy commands: 180 | 181 | ``` 182 | cdk destroy EksCluster 183 | # follow the prompts 184 | cdk destroy EksWorkers 185 | ``` 186 | 187 | That should delete all the resources we created in this example 188 | 189 | #### CDK Issues 190 | 191 | During the development of this example I noted a couple of issues with the CDK. 192 | The issues are in comments but for simple tracking you can check out these links 193 | for issues I worked around in making this example work. 194 | * https://github.com/awslabs/aws-cdk/issues/623 195 | 196 | 197 | --------------------------------------------------------------------------------