├── .gitignore
├── ECS-PyPackage
├── Makefile
├── README.md
├── infrastructure
│ ├── aws-unused-resources.yaml
│ └── pre-requistes
│ │ └── ecr-stack.yaml
└── src
│ ├── Dockerfile
│ ├── aws_resources.py
│ └── requirements.txt
├── ECS
├── Makefile
├── README.md
├── infrastructure
│ ├── aws-unused-resources.yaml
│ └── pre-requistes
│ │ └── ecr-stack.yaml
└── src
│ ├── Dockerfile
│ ├── aws_resources.py
│ └── requirements.txt
├── Images
└── Unused.png
├── Lambda-PyPacakge
├── Makefile
├── README.md
├── infrastructure
│ ├── aws-unused-resources.yaml
│ └── pre-requistes
│ │ └── artifact-store-stack.yaml
└── src
│ ├── aws_resources.py
│ └── requirements.txt
├── Lambda
├── Makefile
├── README.md
├── infrastructure
│ ├── aws-unused-resources.yaml
│ └── pre-requistes
│ │ └── artifact-store-stack.yaml
└── src
│ ├── aws_resources.py
│ ├── lambda.zip
│ └── requirements.txt
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/ECS-PyPackage/Makefile:
--------------------------------------------------------------------------------
1 | PROFILE =
2 | ENVIRONMENT = sbx
3 | PREFIX =
4 | Sender =
5 | Receiver =
6 | Days = 14
7 | application =
8 | ECRImageURI =
9 | ECRRepoName =
10 | VPCID =
11 |
12 |
13 |
14 | .PHONY: explain
15 | explain:
16 | #
17 | #
18 | # ___ _ _______ __ __ __ ____
19 | # / | | / / ___/ / / / /___ __ __________ ____/ / / __ \___ _________ __ _______________ _____
20 | # / /| | | /| / /\__ \ / / / / __ \/ / / / ___/ _ \/ __ / / /_/ / _ \/ ___/ __ \/ / / / ___/ ___/ _ \/ ___/
21 | # / ___ | |/ |/ /___/ / / /_/ / / / / /_/ (__ ) __/ /_/ / / _, _/ __(__ ) /_/ / /_/ / / / /__/ __(__ )
22 | # /_/ |_|__/|__//____/ \____/_/ /_/\__,_/____/\___/\__,_/ /_/ |_|\___/____/\____/\__,_/_/ \___/\___/____/
23 |
24 |
25 | #
26 | ### Targets
27 | #
28 | @cat Makefile* | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
29 |
30 |
31 |
32 | .PHONY: getecruri
33 | getecruri: ## gets the ecr repo uri
34 | #aws ecr describe-repositories --repository-names ${ECRRepoName} | grep \"repositoryUri\" | cut -f4 -d\" --profile $(PROFILE)
35 | aws ecr describe-repositories --repository-names ${Environment}-unused-ecr --profile $(PROFILE)
36 |
37 | .PHONY: ecrlogin
38 | ecrlogin: ## login to the ECR
39 | aws ecr get-login --region eu-west-2 --no-include-email --profile $(PROFILE)
40 | # take the docker login and enter in command prompt
41 |
42 | .PHONY: build
43 | build: ## Zips the code to the docker image and push it
44 | cd src
45 | docker build -t awsunused:latest .
46 | docker tag awsunused:latest $(ECRImageURI):latest
47 | docker push $(ECRImageURI):latest
48 |
49 |
50 | .PHONY: create_stack
51 | create_stack: ## Creates a cloudformation stack in AWS
52 | make deploy ACTION=create
53 |
54 | .PHONY: update_stack
55 | update_stack: ## Updates an existing cloudformation stack in AWS
56 | make deploy ACTION=update
57 |
58 |
59 | .PHONY: deploy
60 | deploy: ## deploy the cloudformation stack in AWS
61 | aws cloudformation $(ACTION)-stack \
62 | --stack-name ${PREFIX}-$(ENVIRONMENT)-awsunusedresources \
63 | --template-body file://infrastructure/aws-unused-resources.yaml \
64 | --profile $(PROFILE) \
65 | --capabilities CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND \
66 | --parameters \
67 | ParameterKey=sender,ParameterValue=$(Sender) \
68 | ParameterKey=receiver,ParameterValue=$(Receiver) \
69 | ParameterKey=env,ParameterValue=$(ENVIRONMENT) \
70 | ParameterKey=days,ParameterValue=$(Days) \
71 | ParameterKey=application,ParameterValue=$(application) \
72 | ParameterKey=VpcId,ParameterValue=$(VPCID) \
73 | ParameterKey=AWSUnusedImage,ParameterValue=$(ECRImageURI) \
74 | ParameterKey=Prefix,ParameterValue=$(PREFIX) \
75 |
76 | .PHONY: create_cluster_ecr
77 | create_cluster_ecr: ## Creates a new ecr cloudformation stack in AWS
78 | make create_update_ecr ACTION=create
79 |
80 | .PHONY: update_cluster_ecr
81 | update_cluster_ecr: ## Updates an existing ecr cloudformation stack in AWS
82 | make create_update_ecr ACTION=update
83 |
84 | .PHONY: create_update_ecr
85 | create_update_ecr: ## Creates or updates the ecr cloudformation stack based on the action
86 | aws cloudformation $(ACTION)-stack \
87 | --stack-name ${PREFIX}-$(ENVIRONMENT)-ecr \
88 | --template-body file://infrastructure/pre-requistes/ecr-stack.yaml \
89 | --profile $(PROFILE) \
90 | --capabilities CAPABILITY_NAMED_IAM \
91 | --parameters \
92 | ParameterKey=Environment,ParameterValue=$(ENVIRONMENT) \
93 |
--------------------------------------------------------------------------------
/ECS-PyPackage/README.md:
--------------------------------------------------------------------------------
1 | # Unused-AWS Resources in AWS ECS with python 3.6
2 |
3 | This python deployment package allows you to identify un-used aws resources in AWS ECS Task-cloudwatch event rule with python 3.6 runtime.
4 |
5 | Clone the repo and then simply add your related details in `MakeFile` and then run the follwoing commands:
6 |
7 | Note: Need VPC and subnet for running the ECS Task.
8 |
9 | To create Cluster and ECR
10 |
11 | ```Make create_cluster_ecr```
12 |
13 | To get ECR Image URI
14 |
15 | ```Make getecruri```
16 |
17 | To push the image , need to login to ECR
18 |
19 | ```Make ecrlogin```
20 |
21 | To build and push the code to ECR
22 |
23 | ```Make build```
24 |
25 | To create Task Definition and Cloudwatch Event
26 |
27 | ```Make create_stack```
28 |
29 | For sending mail , i have configured the mail id in AWS SES. If you have any other option , you can make use of that also.
30 |
31 | Enjoy!
--------------------------------------------------------------------------------
/ECS-PyPackage/infrastructure/aws-unused-resources.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Transform: "AWS::Serverless-2016-10-31"
3 |
4 | Description: >
5 | Scheduled ECS Task that will Check the unused aws resources based on the time.
6 |
7 | Parameters:
8 | AWSUnusedImage:
9 | Description: Docker image to use
10 | Type: String
11 |
12 | sender:
13 | Type: String
14 | Default: "xyz@xyz.com"
15 | Description: mail id of the sender.
16 |
17 | receiver:
18 | Type: String
19 | Default: "xyz@xyz.com"
20 | Description: mail id of the receiver.
21 |
22 | env:
23 | Type: String
24 | Default: sbx
25 | Description: environment.
26 | AllowedValues:
27 | - prd
28 | - tst
29 | - dev
30 | - stg
31 | - sbx
32 |
33 | days:
34 | Type: String
35 | Default: "14"
36 | Description: days difference for resurces.
37 |
38 | application:
39 | Type: String
40 | Default: "test"
41 | Description: name of the application.
42 |
43 | VpcId:
44 | Type: String
45 | Default: "vpc-12345678901"
46 | Description: id of the vpc.
47 |
48 | Prefix:
49 | Type: String
50 | Description: The unique prefix for resources
51 |
52 | Resources:
53 | ExecutionRole:
54 | Type: AWS::IAM::Role
55 | Properties:
56 | RoleName: unusedres-ecs-execution-role
57 | AssumeRolePolicyDocument:
58 | Version: "2012-10-17"
59 | Statement:
60 | - Effect: Allow
61 | Principal:
62 | Service:
63 | - ecs-tasks.amazonaws.com
64 | Action:
65 | - sts:AssumeRole
66 | Path: "/"
67 | Policies:
68 | - PolicyName: unusedres-ecs-execution-policy
69 | PolicyDocument:
70 | Version: "2012-10-17"
71 | Statement:
72 | - Effect: Allow
73 | Action: "*"
74 | Resource: "*"
75 |
76 | EventRole:
77 | Type: AWS::IAM::Role
78 | Properties:
79 | RoleName: unusedres-event-execution-role
80 | AssumeRolePolicyDocument:
81 | Version: "2012-10-17"
82 | Statement:
83 | - Effect: Allow
84 | Principal:
85 | Service:
86 | - events.amazonaws.com
87 | Action:
88 | - sts:AssumeRole
89 | Path: "/"
90 | Policies:
91 | - PolicyName: unusedres-events-execution-policy
92 | PolicyDocument:
93 | Version: "2012-10-17"
94 | Statement:
95 | - Effect: Allow
96 | Action: "*"
97 | Resource: "*"
98 |
99 | UnusedLogsGroup:
100 | Type: AWS::Logs::LogGroup
101 | Properties:
102 | LogGroupName: !Sub ${Prefix}-${env}-unused
103 | RetentionInDays: 60
104 |
105 | UnusedTaskDefinition:
106 | Type: AWS::ECS::TaskDefinition
107 | Properties:
108 | Family: !Sub ${Prefix}-${env}-unused
109 | NetworkMode: awsvpc
110 | RequiresCompatibilities:
111 | - "FARGATE"
112 | Cpu: "256"
113 | Memory: "512"
114 | ExecutionRoleArn: !GetAtt ExecutionRole.Arn
115 | TaskRoleArn: !GetAtt ExecutionRole.Arn
116 | ContainerDefinitions:
117 | - Name: UnusedAWS
118 | Essential: true
119 | Image: !Ref AWSUnusedImage
120 | Environment:
121 | - Name: days
122 | Value: !Ref days
123 | - Name: env
124 | Value: !Ref env
125 | - Name: receiver
126 | Value: !Ref receiver
127 | - Name: sender
128 | Value: !Ref sender
129 | - Name: app
130 | Value: !Ref application
131 | LogConfiguration:
132 | LogDriver: awslogs
133 | Options:
134 | awslogs-group: !Ref "UnusedLogsGroup"
135 | awslogs-region: !Ref "AWS::Region"
136 | awslogs-stream-prefix: Unused
137 |
138 | EcsSecurityGroup:
139 | Type: AWS::EC2::SecurityGroup
140 | Properties:
141 | GroupDescription: "ECS Security Group"
142 | VpcId: !Ref VpcId
143 |
144 | EcsSecurityGroupHTTPinbound:
145 | Type: AWS::EC2::SecurityGroupIngress
146 | Properties:
147 | GroupId: !Ref EcsSecurityGroup
148 | IpProtocol: "tcp"
149 | FromPort: 80
150 | ToPort: 80
151 | CidrIp: 0.0.0.0/0
152 |
153 | UnusedResourcesEventRule:
154 | Type: AWS::Events::Rule
155 | DependsOn: UnusedTaskDefinition
156 | Properties:
157 | Description: "Trigger weekly once to check the unused resources"
158 | ScheduleExpression: "cron(30 5 ? * FRI *)"
159 | State: "ENABLED"
160 | Targets:
161 | - Arn: !Sub "arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:cluster/${env}-unused-ecs"
162 | Id: "ECSTask"
163 | RoleArn: !GetAtt EventRole.Arn
164 | EcsParameters:
165 | TaskDefinitionArn: !Ref UnusedTaskDefinition
166 | TaskCount: 1
167 | LaunchType: "FARGATE"
168 | NetworkConfiguration:
169 | AwsVpcConfiguration:
170 | AssignPublicIp: ENABLED
171 | SecurityGroups:
172 | - !Ref EcsSecurityGroup
173 | Subnets:
174 | - #subnet-id
175 |
--------------------------------------------------------------------------------
/ECS-PyPackage/infrastructure/pre-requistes/ecr-stack.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: Creates the ecr to store image
3 |
4 | Parameters:
5 | Environment:
6 | Type: String
7 | Description: The environment that the ECR is deployed to
8 | Default: sbx
9 | AllowedValues:
10 | - prd
11 | - tst
12 | - dev
13 | - stg
14 | - sbx
15 |
16 | Resources:
17 | UnusedECR:
18 | Type: AWS::ECR::Repository
19 | Properties:
20 | RepositoryName: !Sub ${Environment}-unused-ecr
21 | RepositoryPolicyText: |
22 | {
23 | "Version": "2008-10-17",
24 | "Statement": [
25 | {
26 | "Effect": "Allow",
27 | "Principal": "*",
28 | "Action": [
29 | "ecr:GetDownloadUrlForLayer",
30 | "ecr:BatchGetImage",
31 | "ecr:BatchCheckLayerAvailability",
32 | "ecr:GetAuthorizationToken",
33 | "ecr:PutImage",
34 | "ecr:InitiateLayerUpload",
35 | "ecr:UploadLayerPart",
36 | "ecr:CompleteLayerUpload"
37 | ]
38 | }
39 | ]
40 | }
41 |
42 | ECSCluster:
43 | Type: AWS::ECS::Cluster
44 | Properties:
45 | ClusterName: !Sub ${Environment}-unused-ecs
46 |
47 | Outputs:
48 | UnusedECRRepo:
49 | Description: Unused ECR Repo
50 | Value: !GetAtt UnusedECR.Arn
51 | Export:
52 | Name: !Sub ${Environment}-Unused-ECR-Repo
53 |
54 | ECSClusterName:
55 | Description: Name for ECSCluster
56 | Value: !Ref ECSCluster
57 | Export:
58 | Name: !Sub ${Environment}-unused-ecs
59 |
--------------------------------------------------------------------------------
/ECS-PyPackage/src/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3
2 |
3 |
4 | COPY requirements.txt /aws-unused/
5 | RUN pip3 install -r /aws-unused/requirements.txt
6 |
7 | COPY *.py /aws-unused/
8 | RUN chmod +x /aws-unused/aws_resources.py
9 |
10 | CMD ["python", "/aws-unused/aws_resources.py" ]
--------------------------------------------------------------------------------
/ECS-PyPackage/src/aws_resources.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import os
4 | #from datetime import date, timedelta
5 | import datetime
6 | from _datetime import timedelta
7 | from botocore.exceptions import ClientError
8 | import re
9 | from awsunusedresources import unused_res
10 | import sys
11 |
12 |
13 | def main():
14 | print('Finding Unused Resources in AWS:')
15 | try:
16 | unused_res(os.environ['days'], os.environ['sender'],
17 | os.environ['receiver'], os.environ['app'], os.environ['env'])
18 | except:
19 | print("error in execution")
20 |
21 | return {
22 | 'statusCode': 200,
23 | 'body': json.dumps("success")
24 | }
25 |
26 |
27 | if __name__ == '__main__':
28 | sys.exit(main())
29 |
--------------------------------------------------------------------------------
/ECS-PyPackage/src/requirements.txt:
--------------------------------------------------------------------------------
1 | awsunusedresources==0.1.6
--------------------------------------------------------------------------------
/ECS/Makefile:
--------------------------------------------------------------------------------
1 | PROFILE =
2 | ENVIRONMENT = sbx
3 | PREFIX =
4 | Sender =
5 | Receiver =
6 | Days = 14
7 | application =
8 | ECRImageURI =
9 | ECRRepoName =
10 | VPCID =
11 |
12 |
13 |
14 | .PHONY: explain
15 | explain:
16 | #
17 | #
18 | # ___ _ _______ __ __ __ ____
19 | # / | | / / ___/ / / / /___ __ __________ ____/ / / __ \___ _________ __ _______________ _____
20 | # / /| | | /| / /\__ \ / / / / __ \/ / / / ___/ _ \/ __ / / /_/ / _ \/ ___/ __ \/ / / / ___/ ___/ _ \/ ___/
21 | # / ___ | |/ |/ /___/ / / /_/ / / / / /_/ (__ ) __/ /_/ / / _, _/ __(__ ) /_/ / /_/ / / / /__/ __(__ )
22 | # /_/ |_|__/|__//____/ \____/_/ /_/\__,_/____/\___/\__,_/ /_/ |_|\___/____/\____/\__,_/_/ \___/\___/____/
23 |
24 |
25 | #
26 | ### Targets
27 | #
28 | @cat Makefile* | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
29 |
30 |
31 |
32 | .PHONY: getecruri
33 | getecruri: ## gets the ecr repo uri
34 | #aws ecr describe-repositories --repository-names ${ECRRepoName} | grep \"repositoryUri\" | cut -f4 -d\" --profile $(PROFILE)
35 | aws ecr describe-repositories --repository-names ${Environment}-unused-ecr --profile $(PROFILE)
36 |
37 | .PHONY: ecrlogin
38 | ecrlogin: ## login to the ECR
39 | aws ecr get-login --region eu-west-2 --no-include-email --profile $(PROFILE)
40 | # take the docker login and enter in command prompt
41 |
42 | .PHONY: build
43 | build: ## Zips the code to the docker image and push it
44 | cd src
45 | docker build -t awsunused:latest .
46 | docker tag awsunused:latest $(ECRImageURI):latest
47 | docker push $(ECRImageURI):latest
48 |
49 |
50 | .PHONY: create_stack
51 | create_stack: ## Creates a cloudformation stack in AWS
52 | make deploy ACTION=create
53 |
54 | .PHONY: update_stack
55 | update_stack: ## Updates an existing cloudformation stack in AWS
56 | make deploy ACTION=update
57 |
58 |
59 | .PHONY: deploy
60 | deploy: ## deploy the cloudformation stack in AWS
61 | aws cloudformation $(ACTION)-stack \
62 | --stack-name ${PREFIX}-$(ENVIRONMENT)-awsunusedresources \
63 | --template-body file://infrastructure/aws-unused-resources.yaml \
64 | --profile $(PROFILE) \
65 | --capabilities CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND \
66 | --parameters \
67 | ParameterKey=sender,ParameterValue=$(Sender) \
68 | ParameterKey=receiver,ParameterValue=$(Receiver) \
69 | ParameterKey=env,ParameterValue=$(ENVIRONMENT) \
70 | ParameterKey=days,ParameterValue=$(Days) \
71 | ParameterKey=application,ParameterValue=$(application) \
72 | ParameterKey=VpcId,ParameterValue=$(VPCID) \
73 | ParameterKey=AWSUnusedImage,ParameterValue=$(ECRImageURI) \
74 | ParameterKey=Prefix,ParameterValue=$(PREFIX) \
75 |
76 | .PHONY: create_cluster_ecr
77 | create_cluster_ecr: ## Creates a new ecr cloudformation stack in AWS
78 | make create_update_ecr ACTION=create
79 |
80 | .PHONY: update_cluster_ecr
81 | update_cluster_ecr: ## Updates an existing ecr cloudformation stack in AWS
82 | make create_update_ecr ACTION=update
83 |
84 | .PHONY: create_update_ecr
85 | create_update_ecr: ## Creates or updates the ecr cloudformation stack based on the action
86 | aws cloudformation $(ACTION)-stack \
87 | --stack-name ${PREFIX}-$(ENVIRONMENT)-ecr \
88 | --template-body file://infrastructure/pre-requistes/ecr-stack.yaml \
89 | --profile $(PROFILE) \
90 | --capabilities CAPABILITY_NAMED_IAM \
91 | --parameters \
92 | ParameterKey=Environment,ParameterValue=$(ENVIRONMENT) \
93 |
--------------------------------------------------------------------------------
/ECS/README.md:
--------------------------------------------------------------------------------
1 | # Unused-AWS Resources in AWS ECS with python 3.6
2 |
3 | This python deployment package allows you to identify un-used aws resources in AWS ECS Task-cloudwatch event rule with python 3.6 runtime.
4 |
5 | Clone the repo and then simply add your related details in `MakeFile` and then run the follwoing commands:
6 |
7 | Note: Need VPC and subnet for running the ECS Task.
8 |
9 | To create Cluster and ECR
10 |
11 | ```Make create_cluster_ecr```
12 |
13 | To get ECR Image URI
14 |
15 | ```Make getecruri```
16 |
17 | To push the image , need to login to ECR
18 |
19 | ```Make ecrlogin```
20 |
21 | To build and push the code to ECR
22 |
23 | ```Make build```
24 |
25 | To create Task Definition and Cloudwatch Event
26 |
27 | ```Make create_stack```
28 |
29 | For sending mail , i have configured the mail id in AWS SES. If you have any other option , you can make use of that also.
30 |
31 | Enjoy!
--------------------------------------------------------------------------------
/ECS/infrastructure/aws-unused-resources.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Transform: "AWS::Serverless-2016-10-31"
3 |
4 | Description: >
5 | Scheduled ECS Task that will Check the unused aws resources based on the time.
6 |
7 | Parameters:
8 | AWSUnusedImage:
9 | Description: Docker image to use
10 | Type: String
11 |
12 | sender:
13 | Type: String
14 | Default: "xyz@xyz.com"
15 | Description: mail id of the sender.
16 |
17 | receiver:
18 | Type: String
19 | Default: "xyz@xyz.com"
20 | Description: mail id of the receiver.
21 |
22 | env:
23 | Type: String
24 | Default: sbx
25 | Description: environment.
26 | AllowedValues:
27 | - prd
28 | - tst
29 | - dev
30 | - stg
31 | - sbx
32 |
33 | days:
34 | Type: String
35 | Default: "14"
36 | Description: days difference for resurces.
37 |
38 | application:
39 | Type: String
40 | Default: "test"
41 | Description: name of the application.
42 |
43 | VpcId:
44 | Type: String
45 | Default: "vpc-12345678901"
46 | Description: id of the vpc.
47 |
48 | Prefix:
49 | Type: String
50 | Description: The unique prefix for resources
51 |
52 | Resources:
53 | ExecutionRole:
54 | Type: AWS::IAM::Role
55 | Properties:
56 | RoleName: unusedres-ecs-execution-role
57 | AssumeRolePolicyDocument:
58 | Version: "2012-10-17"
59 | Statement:
60 | - Effect: Allow
61 | Principal:
62 | Service:
63 | - ecs-tasks.amazonaws.com
64 | Action:
65 | - sts:AssumeRole
66 | Path: "/"
67 | Policies:
68 | - PolicyName: unusedres-ecs-execution-policy
69 | PolicyDocument:
70 | Version: "2012-10-17"
71 | Statement:
72 | - Effect: Allow
73 | Action: "*"
74 | Resource: "*"
75 |
76 | EventRole:
77 | Type: AWS::IAM::Role
78 | Properties:
79 | RoleName: unusedres-event-execution-role
80 | AssumeRolePolicyDocument:
81 | Version: "2012-10-17"
82 | Statement:
83 | - Effect: Allow
84 | Principal:
85 | Service:
86 | - events.amazonaws.com
87 | Action:
88 | - sts:AssumeRole
89 | Path: "/"
90 | Policies:
91 | - PolicyName: unusedres-events-execution-policy
92 | PolicyDocument:
93 | Version: "2012-10-17"
94 | Statement:
95 | - Effect: Allow
96 | Action: "*"
97 | Resource: "*"
98 |
99 | UnusedLogsGroup:
100 | Type: AWS::Logs::LogGroup
101 | Properties:
102 | LogGroupName: !Sub ${Prefix}-${env}-unused
103 | RetentionInDays: 60
104 |
105 | UnusedTaskDefinition:
106 | Type: AWS::ECS::TaskDefinition
107 | Properties:
108 | Family: !Sub ${Prefix}-${env}-unused
109 | NetworkMode: awsvpc
110 | RequiresCompatibilities:
111 | - "FARGATE"
112 | Cpu: "256"
113 | Memory: "512"
114 | ExecutionRoleArn: !GetAtt ExecutionRole.Arn
115 | TaskRoleArn: !GetAtt ExecutionRole.Arn
116 | ContainerDefinitions:
117 | - Name: UnusedAWS
118 | Essential: true
119 | Image: !Ref AWSUnusedImage
120 | Environment:
121 | - Name: days
122 | Value: !Ref days
123 | - Name: env
124 | Value: !Ref env
125 | - Name: receiver
126 | Value: !Ref receiver
127 | - Name: sender
128 | Value: !Ref sender
129 | - Name: app
130 | Value: !Ref application
131 | LogConfiguration:
132 | LogDriver: awslogs
133 | Options:
134 | awslogs-group: !Ref "UnusedLogsGroup"
135 | awslogs-region: !Ref "AWS::Region"
136 | awslogs-stream-prefix: Unused
137 |
138 | EcsSecurityGroup:
139 | Type: AWS::EC2::SecurityGroup
140 | Properties:
141 | GroupDescription: "ECS Security Group"
142 | VpcId: !Ref VpcId
143 |
144 | EcsSecurityGroupHTTPinbound:
145 | Type: AWS::EC2::SecurityGroupIngress
146 | Properties:
147 | GroupId: !Ref EcsSecurityGroup
148 | IpProtocol: "tcp"
149 | FromPort: 80
150 | ToPort: 80
151 | CidrIp: 0.0.0.0/0
152 |
153 | UnusedResourcesEventRule:
154 | Type: AWS::Events::Rule
155 | DependsOn: UnusedTaskDefinition
156 | Properties:
157 | Description: "Trigger weekly once to check the unused resources"
158 | ScheduleExpression: "cron(30 5 ? * FRI *)"
159 | State: "ENABLED"
160 | Targets:
161 | - Arn: !Sub "arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:cluster/${env}-unused-ecs"
162 | Id: "ECSTask"
163 | RoleArn: !GetAtt EventRole.Arn
164 | EcsParameters:
165 | TaskDefinitionArn: !Ref UnusedTaskDefinition
166 | TaskCount: 1
167 | LaunchType: "FARGATE"
168 | NetworkConfiguration:
169 | AwsVpcConfiguration:
170 | AssignPublicIp: ENABLED
171 | SecurityGroups:
172 | - !Ref EcsSecurityGroup
173 | Subnets:
174 | - #subnet-id
175 |
--------------------------------------------------------------------------------
/ECS/infrastructure/pre-requistes/ecr-stack.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: Creates the ecr to store image
3 |
4 | Parameters:
5 | Environment:
6 | Type: String
7 | Description: The environment that the ECR is deployed to
8 | Default: sbx
9 | AllowedValues:
10 | - prd
11 | - tst
12 | - dev
13 | - stg
14 | - sbx
15 |
16 | Resources:
17 | UnusedECR:
18 | Type: AWS::ECR::Repository
19 | Properties:
20 | RepositoryName: !Sub ${Environment}-unused-ecr
21 | RepositoryPolicyText: |
22 | {
23 | "Version": "2008-10-17",
24 | "Statement": [
25 | {
26 | "Effect": "Allow",
27 | "Principal": "*",
28 | "Action": [
29 | "ecr:GetDownloadUrlForLayer",
30 | "ecr:BatchGetImage",
31 | "ecr:BatchCheckLayerAvailability",
32 | "ecr:GetAuthorizationToken",
33 | "ecr:PutImage",
34 | "ecr:InitiateLayerUpload",
35 | "ecr:UploadLayerPart",
36 | "ecr:CompleteLayerUpload"
37 | ]
38 | }
39 | ]
40 | }
41 |
42 | ECSCluster:
43 | Type: AWS::ECS::Cluster
44 | Properties:
45 | ClusterName: !Sub ${Environment}-unused-ecs
46 |
47 | Outputs:
48 | UnusedECRRepo:
49 | Description: Unused ECR Repo
50 | Value: !GetAtt UnusedECR.Arn
51 | Export:
52 | Name: !Sub ${Environment}-Unused-ECR-Repo
53 |
54 | ECSClusterName:
55 | Description: Name for ECSCluster
56 | Value: !Ref ECSCluster
57 | Export:
58 | Name: !Sub ${Environment}-unused-ecs
59 |
--------------------------------------------------------------------------------
/ECS/src/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3
2 |
3 |
4 | COPY requirements.txt /aws-unused/
5 | RUN pip3 install -r /aws-unused/requirements.txt
6 |
7 | COPY *.py /aws-unused/
8 | RUN chmod +x /aws-unused/aws_resources.py
9 |
10 | CMD ["python", "/aws-unused/aws_resources.py" ]
--------------------------------------------------------------------------------
/ECS/src/aws_resources.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import os
4 | #from datetime import date, timedelta
5 | import datetime
6 | from _datetime import timedelta
7 | import pandas as pd
8 | from botocore.exceptions import ClientError
9 | import re
10 | import numpy as np
11 | import sys
12 |
13 |
14 | def data_table(df,column):
15 | result_table = '
Resource Type | Resource Name / Id | {} |
'
16 | result_table = result_table.format(column)
17 | #print(column)
18 | #print(result_table)
19 | for index, row in df.iterrows():
20 | result_table += '{} | {} | {} | '.format(
21 | row['resourceType'], row['resourceName'], row['reason'])
22 | result_table += '
'
23 | return result_table
24 |
25 |
26 | def getAvailableVolumes():
27 | # returns list of volumes in 'available' state
28 | ec2 = boto3.client('ec2')
29 | availableVolList = []
30 | filterList = [{'Name': 'status', 'Values': ['available']}]
31 | response = ec2.describe_volumes(Filters=filterList, MaxResults=500)
32 | if(len(response['Volumes'])> 0):
33 | for v in response['Volumes']:
34 | if(len(v['Attachments'])) == 0:
35 | availableVolList.append(v['VolumeId'])
36 | while('NextToken' in response):
37 | response = ec2.describe_volumes(
38 | Filters=filterList, MaxResults=500, NextToken=response['NextToken'])
39 | for v in response['Volumes']:
40 | if(len(v['Attachments'])) == 0:
41 | availableVolList.append(v['VolumeId'])
42 | return availableVolList
43 |
44 | def getLogsWithNoRetention():
45 | # returns list of log groups with no retention days
46 | cw = boto3.client('logs')
47 | loggroups = ['/aws','API-Gateway','RDSOSMetrics','test',os.environ['env'],'/ecs']
48 | logswithNoRetention = []
49 | for groupname in loggroups:
50 | cwresponse = cw.describe_log_groups(logGroupNamePrefix=groupname)
51 | if(len(cwresponse['logGroups']) > 0):
52 | for v in cwresponse['logGroups']:
53 | if "retentionInDays" not in v:
54 | logswithNoRetention.append(v['logGroupName'])
55 | return logswithNoRetention
56 |
57 | def getNotAssociatedEIP():
58 | # returns list of EIP in 'not used' state
59 | ec2 = boto3.client('ec2')
60 | availableEIPList = []
61 | eipresponse = ec2.describe_addresses()
62 | if(len(eipresponse['Addresses']) > 0):
63 | for address in eipresponse['Addresses']:
64 | if "AssociationId" not in address:
65 | availableEIPList.append(address['AllocationId'])
66 | return availableEIPList
67 |
68 | def getUnusedRDSSnapshot(startdate):
69 | # returns list of snapshots in 'not used' state
70 | rds = boto3.client('rds')
71 | unUsedRDSsnapshotlList = []
72 | rdsresponse = rds.describe_db_cluster_snapshots()
73 | if(len(rdsresponse['DBClusterSnapshots']) > 0):
74 | for snapshot in rdsresponse['DBClusterSnapshots']:
75 | if(snapshot['SnapshotCreateTime'].replace(tzinfo=None) < startdate):
76 | unUsedRDSsnapshotlList.append(snapshot['DBClusterSnapshotIdentifier'])
77 | while('Marker' in rdsresponse):
78 | rdsresponse = rds.describe_db_cluster_snapshots(Marker = rdsresponse['Marker'])
79 | if(snapshot['SnapshotCreateTime'].replace(tzinfo=None) < startdate):
80 | unUsedRDSsnapshotlList.append(snapshot['DBClusterSnapshotIdentifier'])
81 | return unUsedRDSsnapshotlList
82 |
83 | def getUnusedEBSSnapshot(startdate):
84 | # returns list of snapshots in 'not used' state
85 | ebs = boto3.client('ec2')
86 | unUsedEBSsnapshotlList = []
87 | ebsresponse = ebs.describe_snapshots()
88 | if(len(ebsresponse['Snapshots']) > 0):
89 | for snapshot in ebsresponse['Snapshots']:
90 | if(snapshot['StartTime'].replace(tzinfo=None) < startdate):
91 | unUsedEBSsnapshotlList.append(snapshot['VolumeId'])
92 | while('NextToken' in ebsresponse):
93 | ebsresponse = ebs.describe_db_cluster_snapshots(NextToken = ebsresponse['NextToken'])
94 | if(snapshot['StartTime'].replace(tzinfo=None) < startdate):
95 | unUsedEBSsnapshotlList.append(snapshot['VolumeId'])
96 | return unUsedEBSsnapshotlList
97 |
98 | def getUnusedES():
99 | # returns list of EIP in 'not used' state
100 | es = boto3.client('es')
101 | escw = boto3.client('cloudwatch')
102 | availableDomainNameList = []
103 | unUsedDomainNameList = []
104 | esresponse = es.list_domain_names()
105 | if(len(esresponse['DomainNames']) > 0):
106 | for data in esresponse['DomainNames']:
107 | if "DomainName" in data:
108 | availableDomainNameList.append(data['DomainName'])
109 |
110 | if(len(availableDomainNameList) > 0 ):
111 | for domainname in availableDomainNameList:
112 | MetricName = ["CPUUtilization"]
113 | for metric in MetricName:
114 | instancemetricresponse = escw.get_metric_statistics(
115 | Namespace="AWS/ES",
116 | MetricName=metric,
117 | Dimensions=[
118 | {'Name': 'DomainName',
119 | 'Value': domainname}
120 | ],
121 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
122 | EndTime=datetime.datetime.utcnow(),
123 | Statistics=["Average"],
124 | Period=3600 #604800
125 | )
126 | # print(instancemetricresponse)
127 | # metricdata.append(instancemetricresponse)
128 | average = 0
129 | #print(len(instancemetricresponse['Datapoints']))
130 | for r in instancemetricresponse['Datapoints']:
131 | average = average + r['Average']
132 | #print("average: " ,average)
133 | # print(average)
134 | if (round(average,2)) < 60:
135 | unUsedDomainNameList.append(domainname)
136 | return unUsedDomainNameList
137 |
138 | def getUnusedECS():
139 | # returns list of EIP in 'not used' state
140 | ecs = boto3.client('ecs')
141 | ecscw = boto3.client('cloudwatch')
142 | availableserviceList = []
143 | unUsedECSServiceList = []
144 | ecsresponse = ecs.list_clusters()
145 | if(len(ecsresponse['clusterArns']) > 0):
146 | for cluster in ecsresponse['clusterArns']:
147 | ecsserviceresponse = ecs.list_services(cluster=cluster.split(":")[5].split("/",1)[1])
148 | if (len(ecsserviceresponse['serviceArns'])) > 0:
149 | for service in ecsserviceresponse['serviceArns']:
150 | availableserviceList.append(service.split(":")[5].split("/",1)[1])
151 |
152 | if(len(availableserviceList) > 0 ):
153 | for servicename in availableserviceList:
154 | MetricName = ["CPUUtilization"]
155 | for metric in MetricName:
156 | instancemetricresponse = ecscw.get_metric_statistics(
157 | Namespace="AWS/ECS",
158 | MetricName=metric,
159 | Dimensions=[
160 | {'Name': 'ServiceName',
161 | 'Value': servicename}
162 | ],
163 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
164 | EndTime=datetime.datetime.utcnow(),
165 | Statistics=["Average"],
166 | Period=3600 #604800
167 | )
168 | # print(instancemetricresponse)
169 | # metricdata.append(instancemetricresponse)
170 | average = 0
171 | #print(len(instancemetricresponse['Datapoints']))
172 | for r in instancemetricresponse['Datapoints']:
173 | average = average + r['Average']
174 | #print("average: " ,average)
175 | # print(average)
176 | if (round(average,2)) < 60:
177 | unUsedECSServiceList.append(servicename)
178 | return unUsedECSServiceList
179 |
180 |
181 | def getNotUsedSG():
182 | # returns list of SG in 'not used' state
183 | ec2 = boto3.client('ec2')
184 | elbclient = boto3.client('elbv2')
185 | rdsclient = boto3.client('rds')
186 | allgroups = []
187 | groups = ec2.describe_security_groups()
188 | for groupobj in groups['SecurityGroups']:
189 | allgroups.append(groupobj['GroupName'])
190 |
191 | # Get all instances security groups (EC2)
192 | groups_in_use = []
193 | reservations = ec2.describe_instances()
194 | for r in reservations['Reservations']:
195 | for ec2_group_list in r['Groups']:
196 | # print(ec2_group_list)
197 | for groupname in ec2_group_list:
198 | if groupname['GroupName'] not in groups_in_use:
199 | groups_in_use.append(groupname)
200 |
201 | # Get all security groups from ELB
202 | load_balancers = elbclient.describe_load_balancers()
203 | for load_balancer in load_balancers:
204 | if 'SecurityGroups' in load_balancer:
205 | for elb_group_list in load_balancer['SecurityGroups']:
206 | # print(elb_group_list)
207 | security_group = ec2.describe_security_groups(
208 | GroupIds=[elb_group_list])
209 | for groupobj in security_group['SecurityGroups']:
210 | if groupobj['GroupName'] not in groups_in_use:
211 | groups_in_use.append(groupobj['GroupName'])
212 |
213 | # Get all security groups from Networ Interfaces
214 | niresponse = ec2.describe_network_interfaces()
215 | for network_interface in niresponse['NetworkInterfaces']:
216 | # print(network_interface)
217 | if 'Groups' in network_interface:
218 | for ni_group_list in network_interface['Groups']:
219 | # print(ni_group_list['GroupName'])
220 | if ni_group_list['GroupName'] not in groups_in_use:
221 | groups_in_use.append(ni_group_list['GroupName'])
222 |
223 | # Get all security groups from RDS
224 | dbresponse = rdsclient.describe_db_instances()
225 | for db in dbresponse['DBInstances']:
226 | if 'VpcSecurityGroups' in db:
227 | for db_group_list in db['VpcSecurityGroups']:
228 | # print(db_group_list)
229 | db_security_group = ec2.describe_security_groups(
230 | GroupIds=[db_group_list['VpcSecurityGroupId']])
231 | # print(db_security_group)
232 | for groupobj in db_security_group['SecurityGroups']:
233 | # print(groupobj['GroupName'])
234 | if groupobj['GroupName'] not in groups_in_use:
235 | groups_in_use.append(groupobj['GroupName'])
236 |
237 | unnused_SG = []
238 | for group in allgroups:
239 | if group not in groups_in_use:
240 | unnused_SG.append(group)
241 | return unnused_SG
242 |
243 |
244 | def main():
245 | print('Enumerating all resources in the following services:')
246 | startTime = datetime.datetime.utcnow(
247 | ) - timedelta(days=int(os.environ['days']))
248 | endTime = datetime.datetime.utcnow()
249 | seconds_in_one_day = 1209600 # 86400 # used for granularity
250 |
251 | configclient = boto3.client('config')
252 | resources = ['AWS::EC2::EIP', 'AWS::EC2::Host', 'AWS::EC2::Instance',
253 | 'AWS::EC2::Volume',
254 | 'AWS::EC2::VPC',
255 | 'AWS::EC2::NatGateway', 'AWS::ElasticLoadBalancingV2::LoadBalancer', 'AWS::ACM::Certificate',
256 | 'AWS::RDS::DBInstance', 'AWS::RDS::DBSnapshot',
257 | 'AWS::RDS::DBCluster', 'AWS::RDS::DBClusterSnapshot', 'AWS::S3::Bucket',
258 | 'AWS::CloudWatch::Alarm',
259 | 'AWS::CloudFormation::Stack', 'AWS::ElasticLoadBalancing::LoadBalancer', 'AWS::AutoScaling::AutoScalingGroup',
260 | 'AWS::AutoScaling::LaunchConfiguration', 'AWS::AutoScaling::ScalingPolicy',
261 | 'AWS::DynamoDB::Table', 'AWS::CodeBuild::Project', 'AWS::WAF::RateBasedRule', 'AWS::WAF::Rule', 'AWS::WAF::RuleGroup',
262 | 'AWS::WAF::WebACL', 'AWS::WAFRegional::RateBasedRule', 'AWS::WAFRegional::Rule', 'AWS::WAFRegional::RuleGroup',
263 | 'AWS::WAFRegional::WebACL', 'AWS::CloudFront::Distribution', 'AWS::CloudFront::StreamingDistribution',
264 | 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage',
265 | 'AWS::ApiGateway::RestApi', 'AWS::ApiGatewayV2::Stage', 'AWS::ApiGatewayV2::Api',
266 | 'AWS::CodePipeline::Pipeline',
267 | 'AWS::SQS::Queue', 'AWS::KMS::Key', 'AWS::SecretsManager::Secret',
268 | 'AWS::SNS::Topic', ]
269 | datas = []
270 | for resource in resources:
271 | response = configclient.list_discovered_resources(
272 | resourceType=resource
273 | )
274 | datas.append(response['resourceIdentifiers'])
275 |
276 | cloudwatchclient = boto3.client('cloudwatch')
277 |
278 | resourceType = []
279 | resourceName = []
280 | reason = []
281 | count = []
282 |
283 | cwresourceType = []
284 | cwresourceName = []
285 | cwreason = []
286 |
287 | lmdresourceType = []
288 | lmdresourceName = []
289 | lmdpackagesize=[]
290 |
291 | s3resourceType = []
292 | s3resourceName = []
293 | s3size=[]
294 |
295 | # EBS
296 | ebsVolumes = getAvailableVolumes()
297 | # print(ebsVolumes)
298 | if(len(ebsVolumes) > 0):
299 | for volumes in ebsVolumes:
300 | # print(volumes)
301 | resourceType.append("AWS::EC2::Volume")
302 | resourceName.append(volumes)
303 | reason.append("EC2 Volume is Not Used")
304 | count.append(1)
305 |
306 | # EIP
307 | eipData = getNotAssociatedEIP()
308 | # print(eipData)
309 | if(len(eipData) > 0):
310 | for address in eipData:
311 | # print(volumes)
312 | resourceType.append("AWS::EC2::EIP")
313 | resourceName.append(address)
314 | reason.append("EIP is Not Used")
315 | count.append(1)
316 |
317 | # RDS Snapshots
318 | rdsData = getUnusedRDSSnapshot(startTime)
319 | # print(eipData)
320 | if(len(rdsData) > 0):
321 | for data in rdsData:
322 | # print(volumes)
323 | resourceType.append("AWS::RDS::SNAPSHOT")
324 | resourceName.append(data)
325 | reason.append("Long Back created RDS Cluster SnapShot is still available")
326 | count.append(1)
327 |
328 | # Elastic Search
329 | esData = getUnusedES()
330 | # print(eipData)
331 | if(len(esData) > 0):
332 | for data in esData:
333 | # print(volumes)
334 | resourceType.append("AWS::Elasticsearch::Domain")
335 | resourceName.append(data)
336 | reason.append("Elastic Search domain is underutilized")
337 | count.append(1)
338 |
339 | # Elastic Container Service
340 | ecsData = getUnusedECS()
341 | # print(eipData)
342 | if(len(ecsData) > 0):
343 | for data in ecsData:
344 | # print(volumes)
345 | resourceType.append("AWS::ECS::Service")
346 | resourceName.append(data)
347 | reason.append("Elastic Container Service is underutilized")
348 | count.append(1)
349 |
350 | # SG
351 | sgData = getNotUsedSG()
352 | # print(sgData)
353 | if(len(sgData) > 0):
354 | for sggroup in sgData:
355 | # print(sggroup)
356 | resourceType.append("AWS::EC2::SecurityGroup")
357 | resourceName.append(sggroup)
358 | reason.append("Security Group is Not Used")
359 | count.append(1)
360 |
361 | # CloudWatch Log Groups
362 | cwData = getLogsWithNoRetention()
363 | # print(sgData)
364 | if(len(cwData) > 0):
365 | for cwgroup in cwData:
366 | # print(sggroup)
367 | cwresourceType.append("AWS::Logs::LogGroup")
368 | cwresourceName.append(cwgroup)
369 | cwreason.append("Retention Days is not specified")
370 |
371 | for data in datas:
372 | for getvalue in data:
373 | if getvalue["resourceType"] == "AWS::DynamoDB::Table":
374 | # print(getvalue["resourceId"])
375 | MetricName = ["ConsumedReadCapacityUnits",
376 | "ConsumedWriteCapacityUnits"]
377 | for metric in MetricName:
378 | metricresponse = cloudwatchclient.get_metric_statistics(
379 | Namespace="AWS/DynamoDB",
380 | MetricName=metric,
381 | Dimensions=[
382 | {'Name': 'TableName',
383 | 'Value': getvalue["resourceId"]}
384 | ],
385 | StartTime=startTime,
386 | EndTime=endTime,
387 | Statistics=["Sum"],
388 | Period=seconds_in_one_day
389 | )
390 | # print(metricresponse)
391 | # metricdata.append(metricresponse)
392 | for r in metricresponse['Datapoints']:
393 | if (r['Sum']) == 0:
394 | #print("Not usable")
395 | resourceType.append(getvalue["resourceType"])
396 | resourceName.append(getvalue["resourceId"])
397 | count.append(1)
398 | if metric == "ConsumedReadCapacityUnits":
399 | reason.append("Read capacity is not used")
400 | else:
401 | reason.append("Write capacity is not used")
402 |
403 | if getvalue["resourceType"] == "AWS::ElasticLoadBalancingV2::LoadBalancer" and getvalue["resourceId"].split(":")[5].split("/", 1)[1].split("/")[0] == "net":
404 | # print(getvalue["resourceId"])
405 | # ActiveFlowCount
406 | MetricName = ["NewFlowCount", "ActiveFlowCount"]
407 | for metric in MetricName:
408 | lbmetricresponse = cloudwatchclient.get_metric_statistics(
409 | Namespace="AWS/NetworkELB",
410 | MetricName=metric,
411 | Dimensions=[
412 | {'Name': 'LoadBalancer',
413 | 'Value': getvalue["resourceId"].split(":")[5].split("/", 1)[1]}
414 | ],
415 | StartTime=startTime,
416 | EndTime=endTime,
417 | Statistics=["Sum"],
418 | Period=seconds_in_one_day
419 | )
420 | # print(lbmetricresponse)
421 | for r in lbmetricresponse['Datapoints']:
422 | if (r['Sum']) == 0:
423 | #print("Not usable")
424 | resourceType.append(getvalue["resourceType"])
425 | resourceName.append(getvalue["resourceId"].split(
426 | ":")[5].split("/", 1)[1].split("/")[1])
427 | reason.append("Network LoadBalancer is not used")
428 | count.append(1)
429 |
430 | if getvalue["resourceType"] == "AWS::ElasticLoadBalancingV2::LoadBalancer" and getvalue["resourceId"].split(":")[5].split("/", 1)[1].split("/")[0] == "app":
431 | # print(getvalue["resourceId"])
432 | MetricName = ["RequestCount", "ConsumedLCUs"]
433 | for metric in MetricName:
434 | albmetricresponse = cloudwatchclient.get_metric_statistics(
435 | Namespace="AWS/ApplicationELB",
436 | MetricName=metric,
437 | Dimensions=[
438 | {'Name': 'LoadBalancer',
439 | 'Value': getvalue["resourceId"].split(":")[5].split("/", 1)[1]}
440 | ],
441 | StartTime=startTime,
442 | EndTime=endTime,
443 | Statistics=["Sum"],
444 | Period=seconds_in_one_day
445 | )
446 | # print(albmetricresponse)
447 | for r in albmetricresponse['Datapoints']:
448 | if (r['Sum']) == 0:
449 | #print("Not usable")
450 | resourceType.append(getvalue["resourceType"])
451 | resourceName.append(getvalue["resourceId"].split(
452 | ":")[5].split("/", 1)[1].split("/")[1])
453 | reason.append(
454 | "Application LoadBalancer is not used")
455 | count.append(1)
456 |
457 | if getvalue["resourceType"] == "AWS::ACM::Certificate":
458 | # print(getvalue["resourceId"])
459 | certclient = boto3.client('acm')
460 | try:
461 | certresponse = certclient.describe_certificate(
462 | CertificateArn=getvalue["resourceId"])
463 | if (len(certresponse['Certificate']["InUseBy"])) == 0:
464 | resourceType.append(getvalue["resourceType"])
465 | resourceName.append(getvalue["resourceId"].split(":")[5])
466 | count.append(1)
467 | reason.append("Certificate is not used")
468 | except:
469 | print("No data in certificates")
470 |
471 |
472 | if getvalue["resourceType"] == "AWS::SecretsManager::Secret":
473 | print(getvalue["resourceId"])
474 | secreclient = boto3.client('secretsmanager')
475 | try:
476 | secrtresponse = secreclient.describe_secret(
477 | SecretId=getvalue["resourceId"])
478 | if 'LastAccessedDate' in secrtresponse:
479 | delta = endTime.replace(
480 | tzinfo=None) - secrtresponse['LastAccessedDate'].replace(tzinfo=None)
481 | if (delta.days) > 14:
482 | resourceType.append(getvalue["resourceType"])
483 | resourceName.append(getvalue["resourceId"].split(":")[6])
484 | count.append(1)
485 | reason.append("Secret Manager Value is not used")
486 | else:
487 | resourceType.append(getvalue["resourceType"])
488 | resourceName.append(getvalue["resourceId"].split(":")[6])
489 | count.append(1)
490 | reason.append("Secret Manager Value is not used")
491 | except:
492 | print("No data in secret Manager")
493 |
494 |
495 | if getvalue["resourceType"] == "AWS::EC2::NatGateway":
496 | # print(getvalue["resourceId"])
497 | MetricName = ["ConnectionEstablishedCount"]
498 | for metric in MetricName:
499 | natmetricresponse = cloudwatchclient.get_metric_statistics(
500 | Namespace="AWS/NATGateway",
501 | MetricName=metric,
502 | Dimensions=[
503 | {'Name': 'NatGatewayId',
504 | 'Value': getvalue["resourceId"]}
505 | ],
506 | StartTime=datetime.datetime.utcnow() - timedelta(days=30),
507 | EndTime=datetime.datetime.utcnow(),
508 | Statistics=["Sum"],
509 | Period=2592000
510 | )
511 | # print(natmetricresponse)
512 | # metricdata.append(natmetricresponse)
513 | for r in natmetricresponse['Datapoints']:
514 | if (r['Sum']) == 0:
515 | # print("Not usable natgateway")
516 | resourceType.append(getvalue["resourceType"])
517 | resourceName.append(getvalue["resourceId"])
518 | count.append(1)
519 | reason.append("NAT Gateway is not used")
520 |
521 | if getvalue["resourceType"] == "AWS::SNS::Topic":
522 | #print(getvalue["resourceId"])
523 | #print(getvalue["resourceId"].split(":")[5])
524 | MetricName = ["NumberOfMessagesPublished"]
525 | for metric in MetricName:
526 | snsmetricresponse = cloudwatchclient.get_metric_statistics(
527 | Namespace="AWS/SNS",
528 | MetricName=metric,
529 | Dimensions=[
530 | {'Name': 'TopicName',
531 | 'Value': getvalue["resourceId"].split(":")[5]}
532 | ],
533 | StartTime=startTime,
534 | EndTime=endTime,
535 | Statistics=["Sum"],
536 | Period=seconds_in_one_day
537 | )
538 | # print(snsmetricresponse)
539 | # metricdata.append(snsmetricresponse)
540 | for r in snsmetricresponse['Datapoints']:
541 | if (r['Sum']) == 0:
542 | # print("Not usable natgateway")
543 | resourceType.append(getvalue["resourceType"])
544 | resourceName.append(
545 | getvalue["resourceId"].split(":")[5])
546 | count.append(1)
547 | reason.append("SNS is not used")
548 |
549 | if getvalue["resourceType"] == "AWS::SQS::Queue":
550 | #print(getvalue["resourceId"])
551 | #print(getvalue["resourceName"])
552 | MetricName = ["NumberOfMessagesReceived"]
553 | for metric in MetricName:
554 | sqsmetricresponse = cloudwatchclient.get_metric_statistics(
555 | Namespace="AWS/SQS",
556 | MetricName=metric,
557 | Dimensions=[
558 | {'Name': 'QueueName',
559 | 'Value': getvalue["resourceName"]}
560 | ],
561 | StartTime=startTime,
562 | EndTime=endTime,
563 | Statistics=["Sum"],
564 | Period=seconds_in_one_day
565 | )
566 | # print(sqsmetricresponse)
567 | # metricdata.append(sqsmetricresponse)
568 | for r in sqsmetricresponse['Datapoints']:
569 | if (r['Sum']) == 0:
570 | # print("Not usable natgateway")
571 | resourceType.append(getvalue["resourceType"])
572 | resourceName.append(
573 | getvalue["resourceName"])
574 | count.append(1)
575 | reason.append("SQS is not used")
576 |
577 | if getvalue["resourceType"] == "AWS::CodePipeline::Pipeline":
578 | # print(getvalue["resourceId"])
579 | pipelineclient = boto3.client('codepipeline')
580 | try:
581 | pipelineresponse = pipelineclient.list_pipeline_executions(
582 | pipelineName=getvalue["resourceId"])
583 | if 'pipelineExecutionSummaries' in pipelineresponse:
584 | cpdelta = endTime.replace(
585 | tzinfo=None) - pipelineresponse["pipelineExecutionSummaries"][0]["lastUpdateTime"].replace(tzinfo=None)
586 | if (cpdelta.days) > 14:
587 | resourceType.append(getvalue["resourceType"])
588 | resourceName.append(getvalue["resourceId"])
589 | count.append(1)
590 | reason.append("Pipeline is not used")
591 | else:
592 | print("No data in pipeline")
593 | resourceType.append(getvalue["resourceType"])
594 | resourceName.append(getvalue["resourceId"])
595 | count.append(1)
596 | reason.append("Pipeline is not used")
597 | except:
598 | print("No data in pipeline")
599 |
600 |
601 | if getvalue["resourceType"] == "AWS::CodeBuild::Project":
602 | # print(getvalue["resourceId"])
603 | cbclient = boto3.client('codebuild')
604 | try:
605 | cbresponse = cbclient.list_builds_for_project(
606 | projectName=getvalue["resourceName"], sortOrder='DESCENDING')
607 | cbbuildresponse = cbclient.batch_get_builds(
608 | ids=[cbresponse["ids"][0]])
609 | cbdelta = endTime.replace(
610 | tzinfo=None) - cbbuildresponse["builds"][0]["startTime"].replace(tzinfo=None)
611 | if (cbdelta.days) > 14:
612 | resourceType.append(getvalue["resourceType"])
613 | resourceName.append(getvalue["resourceName"])
614 | count.append(1)
615 | reason.append("Code Build is not used")
616 | except:
617 | print("No data in code build")
618 |
619 |
620 | if getvalue["resourceType"] == "AWS::EC2::Instance":
621 | #print("Instance")
622 | #print(getvalue["resourceId"])
623 | MetricName = ["CPUUtilization"]
624 | for metric in MetricName:
625 | instancemetricresponse = cloudwatchclient.get_metric_statistics(
626 | Namespace="AWS/EC2",
627 | MetricName=metric,
628 | Dimensions=[
629 | {'Name': 'InstanceId',
630 | 'Value': getvalue["resourceId"]}
631 | ],
632 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
633 | EndTime=datetime.datetime.utcnow(),
634 | Statistics=["Average"],
635 | Period=3600 #604800
636 | )
637 | # print(instancemetricresponse)
638 | # metricdata.append(instancemetricresponse)
639 | average = 0
640 | #print(len(instancemetricresponse['Datapoints']))
641 | for r in instancemetricresponse['Datapoints']:
642 | average = average + r['Average']
643 | #print("average: " ,average)
644 | # print(average)
645 | if (round(average,2)) < 60:
646 | resourceType.append(getvalue["resourceType"])
647 | resourceName.append(getvalue["resourceId"])
648 | count.append(1)
649 | reason.append("EC2 Instance is underutilized")
650 |
651 | if getvalue["resourceType"] == "AWS::Lambda::Function":
652 | # print(getvalue["resourceId"])
653 | MetricName = ["Invocations"]
654 | for metric in MetricName:
655 | lambdametricresponse = cloudwatchclient.get_metric_statistics(
656 | Namespace="AWS/Lambda",
657 | MetricName=metric,
658 | Dimensions=[
659 | {'Name': 'FunctionName',
660 | 'Value': getvalue["resourceName"]}
661 | ],
662 | StartTime=startTime,
663 | EndTime=endTime,
664 | Statistics=["Average"],
665 | Period=seconds_in_one_day
666 | )
667 | # print(lambdametricresponse)
668 | if len(lambdametricresponse['Datapoints']) == 0:
669 | resourceType.append(getvalue["resourceType"])
670 | resourceName.append(getvalue["resourceName"])
671 | count.append(1)
672 | reason.append("Lambda is not used")
673 | lmdclient = boto3.client('lambda')
674 | lmdresponse = lmdclient.get_function(FunctionName=getvalue["resourceName"])
675 | lmdresourceType.append(getvalue["resourceType"])
676 | lmdresourceName.append(getvalue["resourceName"])
677 | lmdpackagesize.append(convert_bytes(lmdresponse['Configuration']['CodeSize']))
678 |
679 | if getvalue["resourceType"] == "AWS::RDS::DBCluster":
680 | # print(getvalue["resourceId"])
681 | MetricName = ["DatabaseConnections"]
682 | for metric in MetricName:
683 | rdsmetricresponse = cloudwatchclient.get_metric_statistics(
684 | Namespace="AWS/RDS",
685 | MetricName=metric,
686 | Dimensions=[
687 | {'Name': 'DBClusterIdentifier',
688 | 'Value': getvalue["resourceName"]}
689 | ],
690 | StartTime=startTime,
691 | EndTime=endTime,
692 | Statistics=["Average"],
693 | Period=seconds_in_one_day
694 | )
695 | # print(rdsmetricresponse)
696 | for r in rdsmetricresponse['Datapoints']:
697 | if (r['Average']) == 0:
698 | # print("Not usable natgateway")
699 | resourceType.append(getvalue["resourceType"])
700 | resourceName.append(
701 | getvalue["resourceName"])
702 | count.append(1)
703 | reason.append("DB Cluster is not used")
704 |
705 | if getvalue["resourceType"] == "AWS::ApiGateway::RestApi" or getvalue["resourceType"] == "AWS::ApiGatewayV2::Api" :
706 | # print(getvalue["resourceId"])
707 | MetricName = ["Count"]
708 | for metric in MetricName:
709 | apimetricresponse = cloudwatchclient.get_metric_statistics(
710 | Namespace="AWS/ApiGateway",
711 | MetricName=metric,
712 | Dimensions=[
713 | {'Name': 'ApiName',
714 | 'Value': getvalue["resourceName"]}
715 | ],
716 | StartTime=startTime,
717 | EndTime=endTime,
718 | Statistics=["Average"],
719 | Period=seconds_in_one_day
720 | )
721 | # print(apimetricresponse)
722 | if len(apimetricresponse['Datapoints']) == 0:
723 | resourceType.append(getvalue["resourceType"])
724 | resourceName.append(getvalue["resourceName"])
725 | count.append(1)
726 | reason.append("Api Gateway is not used")
727 |
728 | if getvalue["resourceType"] == "AWS::S3::Bucket":
729 | # print(getvalue["resourceId"])
730 | s3client = boto3.client('s3')
731 | s3objects = []
732 | size = 0
733 | try:
734 | s3response = s3client.list_objects(
735 | Bucket=getvalue["resourceName"])
736 | if 'Contents' in s3response:
737 | for data in s3response['Contents']:
738 | s3objects.append(data['LastModified'])
739 | size = size + data['Size']
740 | #if s3response['IsTruncated'] == True:
741 | # while('IsTruncated' == True in s3response):
742 | # s3response = s3client.list_objects(
743 | # Bucket=getvalue["resourceName"] ,Marker=s3response['Key'])
744 | # for data in s3response['Contents']:
745 | # s3objects.append(data['LastModified'])
746 | s3delta = endTime.replace(
747 | tzinfo=None) - sorted(s3objects,reverse=True)[0].replace(tzinfo=None)
748 | if (s3delta.days) > 14:
749 | resourceType.append(getvalue["resourceType"])
750 | resourceName.append(getvalue["resourceName"])
751 | count.append(1)
752 | reason.append("S3 is not used")
753 | s3resourceType.append(getvalue["resourceType"])
754 | s3resourceName.append(getvalue["resourceName"])
755 | s3size.append(convert_bytes(size))
756 | # s3metricresponse = cloudwatchclient.get_metric_statistics(
757 | # Namespace="AWS/S3",
758 | # MetricName='BucketSizeBytes',
759 | # Dimensions=[
760 | # {'Name': 'BucketName','Value': getvalue["resourceName"]},
761 | # {'Name':'StorageType','Value': s3response['Contents'][0]['StorageClass'] + 'Storage'}
762 | # ],
763 | # StartTime=startTime,
764 | # EndTime=endTime,
765 | # Statistics=["Average"],
766 | # Period=seconds_in_one_day
767 | # )
768 | # for r in s3metricresponse['Datapoints']:
769 | # s3resourceType.append(getvalue["resourceType"])
770 | # s3resourceName.append(getvalue["resourceName"])
771 | # s3size.append(convert_bytes(r['Average']))
772 | else:
773 | resourceType.append(getvalue["resourceType"])
774 | resourceName.append(getvalue["resourceName"])
775 | count.append(1)
776 | reason.append("S3 is not used")
777 | s3resourceType.append(getvalue["resourceType"])
778 | s3resourceName.append(getvalue["resourceName"])
779 | s3size.append("0 B")
780 | except:
781 | print("No data in S3 Bucket")
782 |
783 | # print(resources)
784 |
785 | dataset = {
786 | 'resourceType': resourceType,
787 | 'resourceName': resourceName,
788 | 'reason': reason,
789 | 'count': count
790 | }
791 |
792 | cwdataset = {
793 | 'resourceType': cwresourceType,
794 | 'resourceName': cwresourceName,
795 | 'reason': cwreason
796 | }
797 |
798 | lmddataset = {
799 | 'resourceType': lmdresourceType,
800 | 'resourceName': lmdresourceName,
801 | 'reason': lmdpackagesize
802 | }
803 |
804 | s3dataset = {
805 | 'resourceType': s3resourceType,
806 | 'resourceName': s3resourceName,
807 | 'reason': s3size
808 | }
809 |
810 | cwdf = pd.DataFrame.from_dict(cwdataset)
811 | cw_result_table = data_table(cwdf,'Reason')
812 |
813 | lmddf = pd.DataFrame.from_dict(lmddataset)
814 | lmd_result_table = data_table(lmddf,'Size')
815 |
816 | s3df = pd.DataFrame.from_dict(s3dataset)
817 | s3_result_table = data_table(s3df,'Size')
818 |
819 | df = pd.DataFrame.from_dict(dataset)
820 | result_table = data_table(df,'Reason')
821 | bar_resources = bar_totals(df, 'resourceType')
822 |
823 | BODY_HTML = "Breakdown by AWS service {} |
"
824 | BODY_HTML = BODY_HTML.format(bar_resources)
825 | BODY_HTML += "All Resources:
{}
"
826 | BODY_HTML = BODY_HTML.format(result_table)
827 | # BODY_HTML += result_table
828 |
829 | #print(lmd_result_table)
830 | BODY_HTML += "Lambda Unused Code Size:
{}
"
831 | BODY_HTML = BODY_HTML.format(lmd_result_table)
832 |
833 | BODY_HTML += "S3 Unused Bucket Size:
{}
"
834 | BODY_HTML = BODY_HTML.format(s3_result_table)
835 |
836 | BODY_HTML += "CloudWatch Log Groups:
{}
"
837 | BODY_HTML = BODY_HTML.format(cw_result_table)
838 |
839 | res = send_mail(BODY_HTML, startTime, endTime)
840 | # res = send_mail(result_table, startTime, endTime)
841 |
842 | return {
843 | 'statusCode': 200,
844 | 'body': json.dumps(res)
845 | }
846 |
847 |
848 | def send_mail(table, startTime, endTime):
849 | SENDER = os.environ['sender']
850 | #print(SENDER)
851 | RECIPIENT = os.environ['receiver']
852 | #print(RECIPIENT)
853 | CONFIGURATION_SET = "ConfigSet"
854 | SUBJECT = "Un-used AWS Resources in "+ os.environ['app'] +" "+ \
855 | os.environ['env'] + " environment"
856 | BODY_TEXT = ("Amazon SES Test (Python)\r\n"
857 | "This email was sent with Amazon SES using the "
858 | "AWS SDK for Python (Boto).")
859 |
860 | CHARSET = "UTF-8"
861 | BODY_HTML = "AWS Un-used Resources in " + \
862 | os.environ['env'] + \
863 | " environment from {} to {}
"
864 | BODY_HTML += table
865 | BODY_HTML = BODY_HTML.format(startTime.strftime(
866 | '%d-%m-%Y'), endTime.strftime('%d-%m-%Y'))
867 | sesclient = boto3.client('ses')
868 |
869 | # Try to send the email.
870 | try:
871 | # Provide the contents of the email.
872 | response = sesclient.send_email(
873 | Destination={
874 | 'ToAddresses': [
875 | RECIPIENT,
876 | ],
877 | },
878 | Message={
879 | 'Body': {
880 | 'Html': {
881 | 'Charset': CHARSET,
882 | 'Data': BODY_HTML,
883 | },
884 | 'Text': {
885 | 'Charset': CHARSET,
886 | 'Data': BODY_TEXT,
887 | },
888 | },
889 | 'Subject': {
890 | 'Charset': CHARSET,
891 | 'Data': SUBJECT,
892 | },
893 | },
894 | Source=SENDER,
895 | # If you are not using a configuration set, comment or delete the
896 | # following line
897 | # ConfigurationSetName=CONFIGURATION_SET,
898 | )
899 | # Display an error if something goes wrong.
900 | except ClientError as e:
901 | print(e.response['Error']['Message'])
902 | return "Error"
903 | else:
904 | print("Email sent! Message ID:"),
905 | print(response['MessageId'])
906 | return "Success"
907 |
908 |
909 | def bar_totals(df, grouping_col):
910 | df_grouped = df.groupby([grouping_col])['count'].sum().reset_index(
911 | name='total_count').sort_values(by=['total_count'], ascending=False)
912 | df_grouped['total_count'] = df_grouped['total_count']
913 | chli = return_total_count(df)
914 | chd = ''
915 | chl = ''
916 | chdl = ''
917 | for index, row in df_grouped.iterrows():
918 | chd += '{}|'.format(row['total_count'])
919 | chdl += '{}({})|'.format(row[grouping_col], row['total_count'])
920 | bar_chart = '
'.format(
921 | chd[:-3], chdl[:-1])
922 | return bar_chart
923 |
924 |
925 | def return_total_count(df):
926 | df_total = df['count'].sum().round(3)
927 | result = '{}'.format(df_total)
928 | return result
929 |
930 | def convert_bytes(num):
931 | step_unit = 1000.0 #1024 bad the size
932 | for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
933 | if num < step_unit:
934 | return "%3.1f %s" % (num, x)
935 | num /= step_unit
936 |
937 | if __name__ == '__main__':
938 | sys.exit(main())
--------------------------------------------------------------------------------
/ECS/src/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.14.60
2 | psycopg2==2.8.6
3 | botocore==1.17.60
4 | requests==2.24.0
5 | pandas==1.1.2
--------------------------------------------------------------------------------
/Images/Unused.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/karthickcse05/aws_unused_resources/23a170e7e005f35818950583e83a3dc5ab51526f/Images/Unused.png
--------------------------------------------------------------------------------
/Lambda-PyPacakge/Makefile:
--------------------------------------------------------------------------------
1 | PROFILE =
2 | ENVIRONMENT = sbx
3 | PREFIX =
4 | Sender =
5 | Receiver =
6 | Days = 14
7 | BucketName =
8 | application =
9 |
10 |
11 |
12 | .PHONY: explain
13 | explain:
14 | #
15 | #
16 | # ___ _ _______ __ __ __ ____
17 | # / | | / / ___/ / / / /___ __ __________ ____/ / / __ \___ _________ __ _______________ _____
18 | # / /| | | /| / /\__ \ / / / / __ \/ / / / ___/ _ \/ __ / / /_/ / _ \/ ___/ __ \/ / / / ___/ ___/ _ \/ ___/
19 | # / ___ | |/ |/ /___/ / / /_/ / / / / /_/ (__ ) __/ /_/ / / _, _/ __(__ ) /_/ / /_/ / / / /__/ __(__ )
20 | # /_/ |_|__/|__//____/ \____/_/ /_/\__,_/____/\___/\__,_/ /_/ |_|\___/____/\____/\__,_/_/ \___/\___/____/
21 |
22 |
23 | #
24 | ### Targets
25 | #
26 | @cat Makefile* | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
27 |
28 |
29 | .PHONY: build
30 | build: ## alternative build # to run this you need WSL in your system
31 | cd src
32 | apt update
33 | apt-get install -y libsasl2-dev python3-dev libldap2-dev libssl-dev python3-pip
34 | pip3 install -r requirements.txt --target .
35 | zip -qr lambdas.zip ./*
36 |
37 |
38 | .PHONY: package
39 | package: ## packages the file in s3
40 | aws cloudformation package \
41 | --template-file "infrastructure\aws-unused-resources.yaml" \
42 | --s3-bucket $(BucketName) \
43 | --output-template-file "aws-unused-resources-release.yaml" \
44 | --profile $(PROFILE) \
45 |
46 |
47 | .PHONY: create_stack
48 | create_stack: ## Creates a cloudformation stack in AWS
49 | make deploy ACTION=create
50 |
51 | .PHONY: update_stack
52 | update_stack: ## Updates an existing cloudformation stack in AWS
53 | make deploy ACTION=update
54 |
55 |
56 | .PHONY: deploy
57 | deploy: ## deploy the cloudformation stack in AWS
58 | aws cloudformation $(ACTION)-stack \
59 | --stack-name ${PREFIX}-$(ENVIRONMENT)-awsunusedresources \
60 | --template-body file://aws-unused-resources-release.yaml \
61 | --profile $(PROFILE) \
62 | --capabilities CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND \
63 | --parameters \
64 | ParameterKey=sender,ParameterValue=$(Sender) \
65 | ParameterKey=receiver,ParameterValue=$(Receiver) \
66 | ParameterKey=env,ParameterValue=$(ENVIRONMENT) \
67 | ParameterKey=days,ParameterValue=$(Days) \
68 | ParameterKey=application,ParameterValue=$(application) \
69 |
70 | .PHONY: create_bucket
71 | create_bucket: ## Creates a new bucket cloudformation stack in AWS
72 | make create_update_bucket ACTION=create
73 |
74 | .PHONY: update_bucket
75 | update_bucket: ## Updates an existing bucket cloudformation stack in AWS
76 | make create_update_bucket ACTION=update
77 |
78 | .PHONY: create_update_bucket
79 | create_update_bucket: ## Creates or updates the bucket cloudformation stack based on the action
80 | aws cloudformation $(ACTION)-stack \
81 | --stack-name ${PREFIX}-$(ENVIRONMENT)-buckets \
82 | --template-body file://infrastructure/pre-requistes/artifact-store-stack.yaml \
83 | --profile $(PROFILE) \
84 | --capabilities CAPABILITY_NAMED_IAM \
85 | --parameters \
86 | ParameterKey=StackPrefix,ParameterValue=$(PREFIX) \
87 | ParameterKey=Environment,ParameterValue=$(ENVIRONMENT) \
88 |
--------------------------------------------------------------------------------
/Lambda-PyPacakge/README.md:
--------------------------------------------------------------------------------
1 | # Unused-AWS Resources in AWS Lambda with python 3.7
2 |
3 | This python deployment package allows you to identify un-used aws resources in AWS Lambda-cloudwatch event rule with python 3.6 runtime.
4 |
5 | Clone the repo and then simply add your related details in `MakeFile` and then run the follwoing commands:
6 |
7 | ```Make package```
8 |
9 | ```Make create_stack```
10 |
11 | If you dont have s3 buckets , then first execute the below command
12 |
13 | ```Make create_bucket```
14 |
15 | For sending mail , i have configured the mail id in AWS SES. If you have any other option , you can make use of that also.
16 |
17 | Enjoy!
--------------------------------------------------------------------------------
/Lambda-PyPacakge/infrastructure/aws-unused-resources.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Transform: "AWS::Serverless-2016-10-31"
3 |
4 | Description: >
5 | Scheduled Lambda function that will Check the unused aws resources based on the time.
6 |
7 | Parameters:
8 | sender:
9 | Type: String
10 | Default: "xyz@xyz.com"
11 | Description: mail id of the sender.
12 |
13 | receiver:
14 | Type: String
15 | Default: "xyz@xyz.com"
16 | Description: mail id of the receiver.
17 |
18 | env:
19 | Type: String
20 | Default: sbx
21 | Description: environment.
22 |
23 | days:
24 | Type: String
25 | Default: "14"
26 | Description: days difference for resurces.
27 |
28 | application:
29 | Type: String
30 | Default: "test"
31 | Description: name of the application.
32 |
33 | Resources:
34 | LambdaExecutionRole:
35 | Type: AWS::IAM::Role
36 | Properties:
37 | RoleName: unusedres-lambda-execution-role
38 | AssumeRolePolicyDocument:
39 | Version: "2012-10-17"
40 | Statement:
41 | - Effect: Allow
42 | Principal:
43 | Service:
44 | - lambda.amazonaws.com
45 | Action:
46 | - sts:AssumeRole
47 | Path: "/"
48 | Policies:
49 | - PolicyName: unusedres-lambda-execution-policy
50 | PolicyDocument:
51 | Version: "2012-10-17"
52 | Statement:
53 | - Effect: Allow
54 | Action: "*"
55 | Resource: "*"
56 |
57 | UnUsedResFunction:
58 | Type: "AWS::Serverless::Function"
59 | Properties:
60 | Description: "Lambda to find the unused aws resources"
61 | FunctionName: "Lambda_Unused_AWS_Resources"
62 | Handler: aws_resources.lambda_handler
63 | Runtime: python3.6
64 | Role: !GetAtt LambdaExecutionRole.Arn
65 | CodeUri: ../src/lambda.zip
66 | Environment:
67 | Variables:
68 | days: !Ref days
69 | env: !Ref env
70 | receiver: !Ref receiver
71 | sender: !Ref sender
72 | app: !Ref application
73 | MemorySize: 512
74 | Timeout: 60
75 |
76 | UnusedResourcesEventRule:
77 | Type: AWS::Events::Rule
78 | DependsOn: UnUsedResFunction
79 | Properties:
80 | Description: "Trigger weekly once to check the unused resources"
81 | ScheduleExpression: "cron(30 5 ? * FRI *)" #"cron(30 5 ? * FRI *)"
82 | State: "ENABLED"
83 | Targets:
84 | - Arn: !GetAtt "UnUsedResFunction.Arn"
85 | Id: "StartFunction"
86 |
87 | LambdaStartPermission:
88 | Type: "AWS::Lambda::Permission"
89 | Properties:
90 | Action: "lambda:InvokeFunction"
91 | FunctionName: !GetAtt "UnUsedResFunction.Arn"
92 | Principal: "events.amazonaws.com"
93 | SourceArn: !GetAtt UnusedResourcesEventRule.Arn
94 |
--------------------------------------------------------------------------------
/Lambda-PyPacakge/infrastructure/pre-requistes/artifact-store-stack.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: Creates the bucket to store build artifacts
3 |
4 | Parameters:
5 | StackPrefix:
6 | Type: String
7 | Description: The unique prefix for bucket
8 |
9 | Environment:
10 | Type: String
11 | Description: The environment that the bucket is deployed to
12 | Default: sbx
13 | AllowedValues:
14 | - prd
15 | - tst
16 | - dev
17 | - stg
18 | - sbx
19 |
20 | Resources:
21 | ArtifactBucket:
22 | Type: AWS::S3::Bucket
23 | Properties:
24 | AccessControl: Private
25 | BucketName: !Sub ${StackPrefix}-${Environment}-artifacts
26 | PublicAccessBlockConfiguration:
27 | BlockPublicAcls: true
28 | BlockPublicPolicy: true
29 | IgnorePublicAcls: true
30 | RestrictPublicBuckets: true
31 | VersioningConfiguration:
32 | Status: Enabled
33 | Tags:
34 | - Key: Name
35 | Value: !Sub ${StackPrefix}-${Environment}-artifacts-s3
36 |
37 | Outputs:
38 | ArtifactStore:
39 | Value: !Ref ArtifactBucket
40 | Export:
41 | Name: !Sub ${StackPrefix}-${Environment}-artifacts
42 |
--------------------------------------------------------------------------------
/Lambda-PyPacakge/src/aws_resources.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import os
4 | #from datetime import date, timedelta
5 | import datetime
6 | from _datetime import timedelta
7 | import pandas as pd
8 | from botocore.exceptions import ClientError
9 | import re
10 | import numpy as np
11 | from awsunusedresources import unused_res
12 |
13 |
14 | def lambda_handler(event, context):
15 | print('Finding Unused Resources in AWS:')
16 |
17 | try:
18 | unused_res(os.environ['days'], os.environ['sender'],
19 | os.environ['receiver'], os.environ['app'], os.environ['env'])
20 | except:
21 | print("error in execution")
22 |
23 | return {
24 | 'statusCode': 200,
25 | 'body': json.dumps("success")
26 | }
27 |
--------------------------------------------------------------------------------
/Lambda-PyPacakge/src/requirements.txt:
--------------------------------------------------------------------------------
1 | awsunusedresources==0.1.6
--------------------------------------------------------------------------------
/Lambda/Makefile:
--------------------------------------------------------------------------------
1 | PROFILE =
2 | ENVIRONMENT = sbx
3 | PREFIX =
4 | Sender =
5 | Receiver =
6 | Days = 14
7 | BucketName =
8 | application =
9 |
10 |
11 |
12 | .PHONY: explain
13 | explain:
14 | #
15 | #
16 | # ___ _ _______ __ __ __ ____
17 | # / | | / / ___/ / / / /___ __ __________ ____/ / / __ \___ _________ __ _______________ _____
18 | # / /| | | /| / /\__ \ / / / / __ \/ / / / ___/ _ \/ __ / / /_/ / _ \/ ___/ __ \/ / / / ___/ ___/ _ \/ ___/
19 | # / ___ | |/ |/ /___/ / / /_/ / / / / /_/ (__ ) __/ /_/ / / _, _/ __(__ ) /_/ / /_/ / / / /__/ __(__ )
20 | # /_/ |_|__/|__//____/ \____/_/ /_/\__,_/____/\___/\__,_/ /_/ |_|\___/____/\____/\__,_/_/ \___/\___/____/
21 |
22 |
23 | #
24 | ### Targets
25 | #
26 | @cat Makefile* | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
27 |
28 | .PHONY: build
29 | build: ## Zips the code to the lambda.zip
30 | zip -ur ./src/lambda.zip ./src/aws_resources.py
31 |
32 | .PHONY: Alternativebuild
33 | Alternativebuild: ## alternative build # to run this you need WSL in your system
34 | cd src
35 | apt update
36 | apt-get install -y libsasl2-dev python3-dev libldap2-dev libssl-dev python3-pip
37 | pip3 install -r requirements.txt --target .
38 | zip -qr lambdas.zip ./*
39 |
40 |
41 | .PHONY: package
42 | package: ## packages the file in s3
43 | aws cloudformation package \
44 | --template-file "infrastructure\aws-unused-resources.yaml" \
45 | --s3-bucket $(BucketName) \
46 | --output-template-file "aws-unused-resources-release.yaml" \
47 | --profile $(PROFILE) \
48 |
49 |
50 | .PHONY: create_stack
51 | create_stack: ## Creates a cloudformation stack in AWS
52 | make deploy ACTION=create
53 |
54 | .PHONY: update_stack
55 | update_stack: ## Updates an existing cloudformation stack in AWS
56 | make deploy ACTION=update
57 |
58 |
59 | .PHONY: deploy
60 | deploy: ## deploy the cloudformation stack in AWS
61 | aws cloudformation $(ACTION)-stack \
62 | --stack-name ${PREFIX}-$(ENVIRONMENT)-awsunusedresources \
63 | --template-body file://aws-unused-resources-release.yaml \
64 | --profile $(PROFILE) \
65 | --capabilities CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND \
66 | --parameters \
67 | ParameterKey=sender,ParameterValue=$(Sender) \
68 | ParameterKey=receiver,ParameterValue=$(Receiver) \
69 | ParameterKey=env,ParameterValue=$(ENVIRONMENT) \
70 | ParameterKey=days,ParameterValue=$(Days) \
71 | ParameterKey=application,ParameterValue=$(application) \
72 |
73 | .PHONY: create_bucket
74 | create_bucket: ## Creates a new bucket cloudformation stack in AWS
75 | make create_update_bucket ACTION=create
76 |
77 | .PHONY: update_bucket
78 | update_bucket: ## Updates an existing bucket cloudformation stack in AWS
79 | make create_update_bucket ACTION=update
80 |
81 | .PHONY: create_update_bucket
82 | create_update_bucket: ## Creates or updates the bucket cloudformation stack based on the action
83 | aws cloudformation $(ACTION)-stack \
84 | --stack-name ${PREFIX}-$(ENVIRONMENT)-buckets \
85 | --template-body file://infrastructure/pre-requistes/artifact-store-stack.yaml \
86 | --profile $(PROFILE) \
87 | --capabilities CAPABILITY_NAMED_IAM \
88 | --parameters \
89 | ParameterKey=StackPrefix,ParameterValue=$(PREFIX) \
90 | ParameterKey=Environment,ParameterValue=$(ENVIRONMENT) \
91 |
--------------------------------------------------------------------------------
/Lambda/README.md:
--------------------------------------------------------------------------------
1 | # Unused-AWS Resources in AWS Lambda with python 3.6
2 |
3 | This python deployment package allows you to identify un-used aws resources in AWS Lambda-cloudwatch event rule with python 3.6 runtime.
4 |
5 | Clone the repo and then simply add your related details in `MakeFile` and then run the follwoing commands:
6 |
7 | ```Make package```
8 |
9 | ```Make create_stack```
10 |
11 | If you dont have s3 buckets , then first execute the below command
12 |
13 | ```Make create_bucket```
14 |
15 | For sending mail , i have configured the mail id in AWS SES. If you have any other option , you can make use of that also.
16 |
17 | Enjoy!
--------------------------------------------------------------------------------
/Lambda/infrastructure/aws-unused-resources.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Transform: "AWS::Serverless-2016-10-31"
3 |
4 | Description: >
5 | Scheduled Lambda function that will Check the unused aws resources based on the time.
6 |
7 | Parameters:
8 | sender:
9 | Type: String
10 | Default: "xyz@xyz.com"
11 | Description: mail id of the sender.
12 |
13 | receiver:
14 | Type: String
15 | Default: "xyz@xyz.com"
16 | Description: mail id of the receiver.
17 |
18 | env:
19 | Type: String
20 | Default: sbx
21 | Description: environment.
22 |
23 | days:
24 | Type: String
25 | Default: "14"
26 | Description: days difference for resurces.
27 |
28 | application:
29 | Type: String
30 | Default: "test"
31 | Description: name of the application.
32 |
33 | Resources:
34 | LambdaExecutionRole:
35 | Type: AWS::IAM::Role
36 | Properties:
37 | RoleName: unusedres-lambda-execution-role
38 | AssumeRolePolicyDocument:
39 | Version: "2012-10-17"
40 | Statement:
41 | - Effect: Allow
42 | Principal:
43 | Service:
44 | - lambda.amazonaws.com
45 | Action:
46 | - sts:AssumeRole
47 | Path: "/"
48 | Policies:
49 | - PolicyName: unusedres-lambda-execution-policy
50 | PolicyDocument:
51 | Version: "2012-10-17"
52 | Statement:
53 | - Effect: Allow
54 | Action: "*"
55 | Resource: "*"
56 |
57 | UnUsedResFunction:
58 | Type: "AWS::Serverless::Function"
59 | Properties:
60 | Description: "Lambda to find the unused aws resources"
61 | FunctionName: "Lambda_Unused_AWS_Resources"
62 | Handler: aws_resources.lambda_handler
63 | Runtime: python3.6
64 | Role: !GetAtt LambdaExecutionRole.Arn
65 | CodeUri: ../src/lambda.zip
66 | Environment:
67 | Variables:
68 | days: !Ref days
69 | env: !Ref env
70 | receiver: !Ref receiver
71 | sender: !Ref sender
72 | app: !Ref application
73 | MemorySize: 512
74 | Timeout: 60
75 |
76 | UnusedResourcesEventRule:
77 | Type: AWS::Events::Rule
78 | DependsOn: UnUsedResFunction
79 | Properties:
80 | Description: "Trigger weekly once to check the unused resources"
81 | ScheduleExpression: "cron(30 5 ? * FRI *)" #"cron(30 5 ? * FRI *)"
82 | State: "ENABLED"
83 | Targets:
84 | - Arn: !GetAtt "UnUsedResFunction.Arn"
85 | Id: "StartFunction"
86 |
87 | LambdaStartPermission:
88 | Type: "AWS::Lambda::Permission"
89 | Properties:
90 | Action: "lambda:InvokeFunction"
91 | FunctionName: !GetAtt "UnUsedResFunction.Arn"
92 | Principal: "events.amazonaws.com"
93 | SourceArn: !GetAtt UnusedResourcesEventRule.Arn
94 |
--------------------------------------------------------------------------------
/Lambda/infrastructure/pre-requistes/artifact-store-stack.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: Creates the bucket to store build artifacts
3 |
4 | Parameters:
5 | StackPrefix:
6 | Type: String
7 | Description: The unique prefix for bucket
8 |
9 | Environment:
10 | Type: String
11 | Description: The environment that the bucket is deployed to
12 | Default: sbx
13 | AllowedValues:
14 | - prd
15 | - tst
16 | - dev
17 | - stg
18 | - sbx
19 |
20 | Resources:
21 | ArtifactBucket:
22 | Type: AWS::S3::Bucket
23 | Properties:
24 | AccessControl: Private
25 | BucketName: !Sub ${StackPrefix}-${Environment}-artifacts
26 | PublicAccessBlockConfiguration:
27 | BlockPublicAcls: true
28 | BlockPublicPolicy: true
29 | IgnorePublicAcls: true
30 | RestrictPublicBuckets: true
31 | VersioningConfiguration:
32 | Status: Enabled
33 | Tags:
34 | - Key: Name
35 | Value: !Sub ${StackPrefix}-${Environment}-artifacts-s3
36 |
37 | Outputs:
38 | ArtifactStore:
39 | Value: !Ref ArtifactBucket
40 | Export:
41 | Name: !Sub ${StackPrefix}-${Environment}-artifacts
42 |
--------------------------------------------------------------------------------
/Lambda/src/aws_resources.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import os
4 | #from datetime import date, timedelta
5 | import datetime
6 | from _datetime import timedelta
7 | import pandas as pd
8 | from botocore.exceptions import ClientError
9 | import re
10 | import numpy as np
11 |
12 |
13 | def data_table(df,column):
14 | result_table = 'Resource Type | Resource Name / Id | {} |
'
15 | result_table = result_table.format(column)
16 | #print(column)
17 | #print(result_table)
18 | for index, row in df.iterrows():
19 | result_table += '{} | {} | {} | '.format(
20 | row['resourceType'], row['resourceName'], row['reason'])
21 | result_table += '
'
22 | return result_table
23 |
24 |
25 | def getAvailableVolumes():
26 | # returns list of volumes in 'available' state
27 | ec2 = boto3.client('ec2')
28 | availableVolList = []
29 | filterList = [{'Name': 'status', 'Values': ['available']}]
30 | response = ec2.describe_volumes(Filters=filterList, MaxResults=500)
31 | if(len(response['Volumes'])> 0):
32 | for v in response['Volumes']:
33 | if(len(v['Attachments'])) == 0:
34 | availableVolList.append(v['VolumeId'])
35 | while('NextToken' in response):
36 | response = ec2.describe_volumes(
37 | Filters=filterList, MaxResults=500, NextToken=response['NextToken'])
38 | for v in response['Volumes']:
39 | if(len(v['Attachments'])) == 0:
40 | availableVolList.append(v['VolumeId'])
41 | return availableVolList
42 |
43 | def getLogsWithNoRetention():
44 | # returns list of log groups with no retention days
45 | cw = boto3.client('logs')
46 | loggroups = ['/aws','API-Gateway','RDSOSMetrics','test',os.environ['env'],'/ecs']
47 | logswithNoRetention = []
48 | for groupname in loggroups:
49 | cwresponse = cw.describe_log_groups(logGroupNamePrefix=groupname)
50 | if(len(cwresponse['logGroups']) > 0):
51 | for v in cwresponse['logGroups']:
52 | if "retentionInDays" not in v:
53 | logswithNoRetention.append(v['logGroupName'])
54 | return logswithNoRetention
55 |
56 | def getNotAssociatedEIP():
57 | # returns list of EIP in 'not used' state
58 | ec2 = boto3.client('ec2')
59 | availableEIPList = []
60 | eipresponse = ec2.describe_addresses()
61 | if(len(eipresponse['Addresses']) > 0):
62 | for address in eipresponse['Addresses']:
63 | if "AssociationId" not in address:
64 | availableEIPList.append(address['AllocationId'])
65 | return availableEIPList
66 |
67 | def getUnusedRDSSnapshot(startdate):
68 | # returns list of snapshots in 'not used' state
69 | rds = boto3.client('rds')
70 | unUsedRDSsnapshotlList = []
71 | rdsresponse = rds.describe_db_cluster_snapshots()
72 | if(len(rdsresponse['DBClusterSnapshots']) > 0):
73 | for snapshot in rdsresponse['DBClusterSnapshots']:
74 | if(snapshot['SnapshotCreateTime'].replace(tzinfo=None) < startdate):
75 | unUsedRDSsnapshotlList.append(snapshot['DBClusterSnapshotIdentifier'])
76 | while('Marker' in rdsresponse):
77 | rdsresponse = rds.describe_db_cluster_snapshots(Marker = rdsresponse['Marker'])
78 | if(snapshot['SnapshotCreateTime'].replace(tzinfo=None) < startdate):
79 | unUsedRDSsnapshotlList.append(snapshot['DBClusterSnapshotIdentifier'])
80 | return unUsedRDSsnapshotlList
81 |
82 | def getUnusedEBSSnapshot(startdate):
83 | # returns list of snapshots in 'not used' state
84 | ebs = boto3.client('ec2')
85 | unUsedEBSsnapshotlList = []
86 | ebsresponse = ebs.describe_snapshots()
87 | if(len(ebsresponse['Snapshots']) > 0):
88 | for snapshot in ebsresponse['Snapshots']:
89 | if(snapshot['StartTime'].replace(tzinfo=None) < startdate):
90 | unUsedEBSsnapshotlList.append(snapshot['VolumeId'])
91 | while('NextToken' in ebsresponse):
92 | ebsresponse = ebs.describe_db_cluster_snapshots(NextToken = ebsresponse['NextToken'])
93 | if(snapshot['StartTime'].replace(tzinfo=None) < startdate):
94 | unUsedEBSsnapshotlList.append(snapshot['VolumeId'])
95 | return unUsedEBSsnapshotlList
96 |
97 | def getUnusedES():
98 | # returns list of EIP in 'not used' state
99 | es = boto3.client('es')
100 | escw = boto3.client('cloudwatch')
101 | availableDomainNameList = []
102 | unUsedDomainNameList = []
103 | esresponse = es.list_domain_names()
104 | if(len(esresponse['DomainNames']) > 0):
105 | for data in esresponse['DomainNames']:
106 | if "DomainName" in data:
107 | availableDomainNameList.append(data['DomainName'])
108 |
109 | if(len(availableDomainNameList) > 0 ):
110 | for domainname in availableDomainNameList:
111 | MetricName = ["CPUUtilization"]
112 | for metric in MetricName:
113 | instancemetricresponse = escw.get_metric_statistics(
114 | Namespace="AWS/ES",
115 | MetricName=metric,
116 | Dimensions=[
117 | {'Name': 'DomainName',
118 | 'Value': domainname}
119 | ],
120 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
121 | EndTime=datetime.datetime.utcnow(),
122 | Statistics=["Average"],
123 | Period=3600 #604800
124 | )
125 | # print(instancemetricresponse)
126 | # metricdata.append(instancemetricresponse)
127 | average = 0
128 | #print(len(instancemetricresponse['Datapoints']))
129 | for r in instancemetricresponse['Datapoints']:
130 | average = average + r['Average']
131 | #print("average: " ,average)
132 | # print(average)
133 | if (round(average,2)) < 60:
134 | unUsedDomainNameList.append(domainname)
135 | return unUsedDomainNameList
136 |
137 | def getUnusedECS():
138 | # returns list of EIP in 'not used' state
139 | ecs = boto3.client('ecs')
140 | ecscw = boto3.client('cloudwatch')
141 | availableserviceList = []
142 | unUsedECSServiceList = []
143 | ecsresponse = ecs.list_clusters()
144 | if(len(ecsresponse['clusterArns']) > 0):
145 | for cluster in ecsresponse['clusterArns']:
146 | ecsserviceresponse = ecs.list_services(cluster=cluster.split(":")[5].split("/",1)[1])
147 | if (len(ecsserviceresponse['serviceArns'])) > 0:
148 | for service in ecsserviceresponse['serviceArns']:
149 | availableserviceList.append(service.split(":")[5].split("/",1)[1])
150 |
151 | if(len(availableserviceList) > 0 ):
152 | for servicename in availableserviceList:
153 | MetricName = ["CPUUtilization"]
154 | for metric in MetricName:
155 | instancemetricresponse = ecscw.get_metric_statistics(
156 | Namespace="AWS/ECS",
157 | MetricName=metric,
158 | Dimensions=[
159 | {'Name': 'ServiceName',
160 | 'Value': servicename}
161 | ],
162 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
163 | EndTime=datetime.datetime.utcnow(),
164 | Statistics=["Average"],
165 | Period=3600 #604800
166 | )
167 | # print(instancemetricresponse)
168 | # metricdata.append(instancemetricresponse)
169 | average = 0
170 | #print(len(instancemetricresponse['Datapoints']))
171 | for r in instancemetricresponse['Datapoints']:
172 | average = average + r['Average']
173 | #print("average: " ,average)
174 | # print(average)
175 | if (round(average,2)) < 60:
176 | unUsedECSServiceList.append(servicename)
177 | return unUsedECSServiceList
178 |
179 |
180 | def getNotUsedSG():
181 | # returns list of SG in 'not used' state
182 | ec2 = boto3.client('ec2')
183 | elbclient = boto3.client('elbv2')
184 | rdsclient = boto3.client('rds')
185 | allgroups = []
186 | groups = ec2.describe_security_groups()
187 | for groupobj in groups['SecurityGroups']:
188 | allgroups.append(groupobj['GroupName'])
189 |
190 | # Get all instances security groups (EC2)
191 | groups_in_use = []
192 | reservations = ec2.describe_instances()
193 | for r in reservations['Reservations']:
194 | for ec2_group_list in r['Groups']:
195 | # print(ec2_group_list)
196 | for groupname in ec2_group_list:
197 | if groupname['GroupName'] not in groups_in_use:
198 | groups_in_use.append(groupname)
199 |
200 | # Get all security groups from ELB
201 | load_balancers = elbclient.describe_load_balancers()
202 | for load_balancer in load_balancers:
203 | if 'SecurityGroups' in load_balancer:
204 | for elb_group_list in load_balancer['SecurityGroups']:
205 | # print(elb_group_list)
206 | security_group = ec2.describe_security_groups(
207 | GroupIds=[elb_group_list])
208 | for groupobj in security_group['SecurityGroups']:
209 | if groupobj['GroupName'] not in groups_in_use:
210 | groups_in_use.append(groupobj['GroupName'])
211 |
212 | # Get all security groups from Networ Interfaces
213 | niresponse = ec2.describe_network_interfaces()
214 | for network_interface in niresponse['NetworkInterfaces']:
215 | # print(network_interface)
216 | if 'Groups' in network_interface:
217 | for ni_group_list in network_interface['Groups']:
218 | # print(ni_group_list['GroupName'])
219 | if ni_group_list['GroupName'] not in groups_in_use:
220 | groups_in_use.append(ni_group_list['GroupName'])
221 |
222 | # Get all security groups from RDS
223 | dbresponse = rdsclient.describe_db_instances()
224 | for db in dbresponse['DBInstances']:
225 | if 'VpcSecurityGroups' in db:
226 | for db_group_list in db['VpcSecurityGroups']:
227 | # print(db_group_list)
228 | db_security_group = ec2.describe_security_groups(
229 | GroupIds=[db_group_list['VpcSecurityGroupId']])
230 | # print(db_security_group)
231 | for groupobj in db_security_group['SecurityGroups']:
232 | # print(groupobj['GroupName'])
233 | if groupobj['GroupName'] not in groups_in_use:
234 | groups_in_use.append(groupobj['GroupName'])
235 |
236 | unnused_SG = []
237 | for group in allgroups:
238 | if group not in groups_in_use:
239 | unnused_SG.append(group)
240 | return unnused_SG
241 |
242 |
243 | def lambda_handler(event, context):
244 | print('Enumerating all resources in the following services:')
245 | startTime = datetime.datetime.utcnow(
246 | ) - timedelta(days=int(os.environ['days']))
247 | endTime = datetime.datetime.utcnow()
248 | seconds_in_one_day = 1209600 # 86400 # used for granularity
249 |
250 | configclient = boto3.client('config')
251 | resources = ['AWS::EC2::EIP', 'AWS::EC2::Host', 'AWS::EC2::Instance',
252 | 'AWS::EC2::Volume',
253 | 'AWS::EC2::VPC',
254 | 'AWS::EC2::NatGateway', 'AWS::ElasticLoadBalancingV2::LoadBalancer', 'AWS::ACM::Certificate',
255 | 'AWS::RDS::DBInstance', 'AWS::RDS::DBSnapshot',
256 | 'AWS::RDS::DBCluster', 'AWS::RDS::DBClusterSnapshot', 'AWS::S3::Bucket',
257 | 'AWS::CloudWatch::Alarm',
258 | 'AWS::CloudFormation::Stack', 'AWS::ElasticLoadBalancing::LoadBalancer', 'AWS::AutoScaling::AutoScalingGroup',
259 | 'AWS::AutoScaling::LaunchConfiguration', 'AWS::AutoScaling::ScalingPolicy',
260 | 'AWS::DynamoDB::Table', 'AWS::CodeBuild::Project', 'AWS::WAF::RateBasedRule', 'AWS::WAF::Rule', 'AWS::WAF::RuleGroup',
261 | 'AWS::WAF::WebACL', 'AWS::WAFRegional::RateBasedRule', 'AWS::WAFRegional::Rule', 'AWS::WAFRegional::RuleGroup',
262 | 'AWS::WAFRegional::WebACL', 'AWS::CloudFront::Distribution', 'AWS::CloudFront::StreamingDistribution',
263 | 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage',
264 | 'AWS::ApiGateway::RestApi', 'AWS::ApiGatewayV2::Stage', 'AWS::ApiGatewayV2::Api',
265 | 'AWS::CodePipeline::Pipeline',
266 | 'AWS::SQS::Queue', 'AWS::KMS::Key', 'AWS::SecretsManager::Secret',
267 | 'AWS::SNS::Topic', ]
268 | datas = []
269 | for resource in resources:
270 | response = configclient.list_discovered_resources(
271 | resourceType=resource
272 | )
273 | datas.append(response['resourceIdentifiers'])
274 |
275 | cloudwatchclient = boto3.client('cloudwatch')
276 |
277 | resourceType = []
278 | resourceName = []
279 | reason = []
280 | count = []
281 |
282 | cwresourceType = []
283 | cwresourceName = []
284 | cwreason = []
285 |
286 | lmdresourceType = []
287 | lmdresourceName = []
288 | lmdpackagesize=[]
289 |
290 | s3resourceType = []
291 | s3resourceName = []
292 | s3size=[]
293 |
294 | # EBS
295 | ebsVolumes = getAvailableVolumes()
296 | # print(ebsVolumes)
297 | if(len(ebsVolumes) > 0):
298 | for volumes in ebsVolumes:
299 | # print(volumes)
300 | resourceType.append("AWS::EC2::Volume")
301 | resourceName.append(volumes)
302 | reason.append("EC2 Volume is Not Used")
303 | count.append(1)
304 |
305 | # EIP
306 | eipData = getNotAssociatedEIP()
307 | # print(eipData)
308 | if(len(eipData) > 0):
309 | for address in eipData:
310 | # print(volumes)
311 | resourceType.append("AWS::EC2::EIP")
312 | resourceName.append(address)
313 | reason.append("EIP is Not Used")
314 | count.append(1)
315 |
316 | # RDS Snapshots
317 | rdsData = getUnusedRDSSnapshot(startTime)
318 | # print(eipData)
319 | if(len(rdsData) > 0):
320 | for data in rdsData:
321 | # print(volumes)
322 | resourceType.append("AWS::RDS::SNAPSHOT")
323 | resourceName.append(data)
324 | reason.append("Long Back created RDS Cluster SnapShot is still available")
325 | count.append(1)
326 |
327 | # Elastic Search
328 | esData = getUnusedES()
329 | # print(eipData)
330 | if(len(esData) > 0):
331 | for data in esData:
332 | # print(volumes)
333 | resourceType.append("AWS::Elasticsearch::Domain")
334 | resourceName.append(data)
335 | reason.append("Elastic Search domain is underutilized")
336 | count.append(1)
337 |
338 | # Elastic Container Service
339 | ecsData = getUnusedECS()
340 | # print(eipData)
341 | if(len(ecsData) > 0):
342 | for data in ecsData:
343 | # print(volumes)
344 | resourceType.append("AWS::ECS::Service")
345 | resourceName.append(data)
346 | reason.append("Elastic Container Service is underutilized")
347 | count.append(1)
348 |
349 | # SG
350 | sgData = getNotUsedSG()
351 | # print(sgData)
352 | if(len(sgData) > 0):
353 | for sggroup in sgData:
354 | # print(sggroup)
355 | resourceType.append("AWS::EC2::SecurityGroup")
356 | resourceName.append(sggroup)
357 | reason.append("Security Group is Not Used")
358 | count.append(1)
359 |
360 | # CloudWatch Log Groups
361 | cwData = getLogsWithNoRetention()
362 | # print(sgData)
363 | if(len(cwData) > 0):
364 | for cwgroup in cwData:
365 | # print(sggroup)
366 | cwresourceType.append("AWS::Logs::LogGroup")
367 | cwresourceName.append(cwgroup)
368 | cwreason.append("Retention Days is not specified")
369 |
370 | for data in datas:
371 | for getvalue in data:
372 | if getvalue["resourceType"] == "AWS::DynamoDB::Table":
373 | # print(getvalue["resourceId"])
374 | MetricName = ["ConsumedReadCapacityUnits",
375 | "ConsumedWriteCapacityUnits"]
376 | for metric in MetricName:
377 | metricresponse = cloudwatchclient.get_metric_statistics(
378 | Namespace="AWS/DynamoDB",
379 | MetricName=metric,
380 | Dimensions=[
381 | {'Name': 'TableName',
382 | 'Value': getvalue["resourceId"]}
383 | ],
384 | StartTime=startTime,
385 | EndTime=endTime,
386 | Statistics=["Sum"],
387 | Period=seconds_in_one_day
388 | )
389 | # print(metricresponse)
390 | # metricdata.append(metricresponse)
391 | for r in metricresponse['Datapoints']:
392 | if (r['Sum']) == 0:
393 | #print("Not usable")
394 | resourceType.append(getvalue["resourceType"])
395 | resourceName.append(getvalue["resourceId"])
396 | count.append(1)
397 | if metric == "ConsumedReadCapacityUnits":
398 | reason.append("Read capacity is not used")
399 | else:
400 | reason.append("Write capacity is not used")
401 |
402 | if getvalue["resourceType"] == "AWS::ElasticLoadBalancingV2::LoadBalancer" and getvalue["resourceId"].split(":")[5].split("/", 1)[1].split("/")[0] == "net":
403 | # print(getvalue["resourceId"])
404 | # ActiveFlowCount
405 | MetricName = ["NewFlowCount", "ActiveFlowCount"]
406 | for metric in MetricName:
407 | lbmetricresponse = cloudwatchclient.get_metric_statistics(
408 | Namespace="AWS/NetworkELB",
409 | MetricName=metric,
410 | Dimensions=[
411 | {'Name': 'LoadBalancer',
412 | 'Value': getvalue["resourceId"].split(":")[5].split("/", 1)[1]}
413 | ],
414 | StartTime=startTime,
415 | EndTime=endTime,
416 | Statistics=["Sum"],
417 | Period=seconds_in_one_day
418 | )
419 | # print(lbmetricresponse)
420 | for r in lbmetricresponse['Datapoints']:
421 | if (r['Sum']) == 0:
422 | #print("Not usable")
423 | resourceType.append(getvalue["resourceType"])
424 | resourceName.append(getvalue["resourceId"].split(
425 | ":")[5].split("/", 1)[1].split("/")[1])
426 | reason.append("Network LoadBalancer is not used")
427 | count.append(1)
428 |
429 | if getvalue["resourceType"] == "AWS::ElasticLoadBalancingV2::LoadBalancer" and getvalue["resourceId"].split(":")[5].split("/", 1)[1].split("/")[0] == "app":
430 | # print(getvalue["resourceId"])
431 | MetricName = ["RequestCount", "ConsumedLCUs"]
432 | for metric in MetricName:
433 | albmetricresponse = cloudwatchclient.get_metric_statistics(
434 | Namespace="AWS/ApplicationELB",
435 | MetricName=metric,
436 | Dimensions=[
437 | {'Name': 'LoadBalancer',
438 | 'Value': getvalue["resourceId"].split(":")[5].split("/", 1)[1]}
439 | ],
440 | StartTime=startTime,
441 | EndTime=endTime,
442 | Statistics=["Sum"],
443 | Period=seconds_in_one_day
444 | )
445 | # print(albmetricresponse)
446 | for r in albmetricresponse['Datapoints']:
447 | if (r['Sum']) == 0:
448 | #print("Not usable")
449 | resourceType.append(getvalue["resourceType"])
450 | resourceName.append(getvalue["resourceId"].split(
451 | ":")[5].split("/", 1)[1].split("/")[1])
452 | reason.append(
453 | "Application LoadBalancer is not used")
454 | count.append(1)
455 |
456 | if getvalue["resourceType"] == "AWS::ACM::Certificate":
457 | # print(getvalue["resourceId"])
458 | certclient = boto3.client('acm')
459 | try:
460 | certresponse = certclient.describe_certificate(
461 | CertificateArn=getvalue["resourceId"])
462 | if (len(certresponse['Certificate']["InUseBy"])) == 0:
463 | resourceType.append(getvalue["resourceType"])
464 | resourceName.append(getvalue["resourceId"].split(":")[5])
465 | count.append(1)
466 | reason.append("Certificate is not used")
467 | except:
468 | print("No data in certificates")
469 |
470 |
471 | if getvalue["resourceType"] == "AWS::SecretsManager::Secret":
472 | print(getvalue["resourceId"])
473 | secreclient = boto3.client('secretsmanager')
474 | try:
475 | secrtresponse = secreclient.describe_secret(
476 | SecretId=getvalue["resourceId"])
477 | if 'LastAccessedDate' in secrtresponse:
478 | delta = endTime.replace(
479 | tzinfo=None) - secrtresponse['LastAccessedDate'].replace(tzinfo=None)
480 | if (delta.days) > 14:
481 | resourceType.append(getvalue["resourceType"])
482 | resourceName.append(getvalue["resourceId"].split(":")[6])
483 | count.append(1)
484 | reason.append("Secret Manager Value is not used")
485 | else:
486 | resourceType.append(getvalue["resourceType"])
487 | resourceName.append(getvalue["resourceId"].split(":")[6])
488 | count.append(1)
489 | reason.append("Secret Manager Value is not used")
490 | except:
491 | print("No data in secret Manager")
492 |
493 |
494 | if getvalue["resourceType"] == "AWS::EC2::NatGateway":
495 | # print(getvalue["resourceId"])
496 | MetricName = ["ConnectionEstablishedCount"]
497 | for metric in MetricName:
498 | natmetricresponse = cloudwatchclient.get_metric_statistics(
499 | Namespace="AWS/NATGateway",
500 | MetricName=metric,
501 | Dimensions=[
502 | {'Name': 'NatGatewayId',
503 | 'Value': getvalue["resourceId"]}
504 | ],
505 | StartTime=datetime.datetime.utcnow() - timedelta(days=30),
506 | EndTime=datetime.datetime.utcnow(),
507 | Statistics=["Sum"],
508 | Period=2592000
509 | )
510 | # print(natmetricresponse)
511 | # metricdata.append(natmetricresponse)
512 | for r in natmetricresponse['Datapoints']:
513 | if (r['Sum']) == 0:
514 | # print("Not usable natgateway")
515 | resourceType.append(getvalue["resourceType"])
516 | resourceName.append(getvalue["resourceId"])
517 | count.append(1)
518 | reason.append("NAT Gateway is not used")
519 |
520 | if getvalue["resourceType"] == "AWS::SNS::Topic":
521 | #print(getvalue["resourceId"])
522 | #print(getvalue["resourceId"].split(":")[5])
523 | MetricName = ["NumberOfMessagesPublished"]
524 | for metric in MetricName:
525 | snsmetricresponse = cloudwatchclient.get_metric_statistics(
526 | Namespace="AWS/SNS",
527 | MetricName=metric,
528 | Dimensions=[
529 | {'Name': 'TopicName',
530 | 'Value': getvalue["resourceId"].split(":")[5]}
531 | ],
532 | StartTime=startTime,
533 | EndTime=endTime,
534 | Statistics=["Sum"],
535 | Period=seconds_in_one_day
536 | )
537 | # print(snsmetricresponse)
538 | # metricdata.append(snsmetricresponse)
539 | for r in snsmetricresponse['Datapoints']:
540 | if (r['Sum']) == 0:
541 | # print("Not usable natgateway")
542 | resourceType.append(getvalue["resourceType"])
543 | resourceName.append(
544 | getvalue["resourceId"].split(":")[5])
545 | count.append(1)
546 | reason.append("SNS is not used")
547 |
548 | if getvalue["resourceType"] == "AWS::SQS::Queue":
549 | #print(getvalue["resourceId"])
550 | #print(getvalue["resourceName"])
551 | MetricName = ["NumberOfMessagesReceived"]
552 | for metric in MetricName:
553 | sqsmetricresponse = cloudwatchclient.get_metric_statistics(
554 | Namespace="AWS/SQS",
555 | MetricName=metric,
556 | Dimensions=[
557 | {'Name': 'QueueName',
558 | 'Value': getvalue["resourceName"]}
559 | ],
560 | StartTime=startTime,
561 | EndTime=endTime,
562 | Statistics=["Sum"],
563 | Period=seconds_in_one_day
564 | )
565 | # print(sqsmetricresponse)
566 | # metricdata.append(sqsmetricresponse)
567 | for r in sqsmetricresponse['Datapoints']:
568 | if (r['Sum']) == 0:
569 | # print("Not usable natgateway")
570 | resourceType.append(getvalue["resourceType"])
571 | resourceName.append(
572 | getvalue["resourceName"])
573 | count.append(1)
574 | reason.append("SQS is not used")
575 |
576 | if getvalue["resourceType"] == "AWS::CodePipeline::Pipeline":
577 | # print(getvalue["resourceId"])
578 | pipelineclient = boto3.client('codepipeline')
579 | try:
580 | pipelineresponse = pipelineclient.list_pipeline_executions(
581 | pipelineName=getvalue["resourceId"])
582 | if 'pipelineExecutionSummaries' in pipelineresponse:
583 | cpdelta = endTime.replace(
584 | tzinfo=None) - pipelineresponse["pipelineExecutionSummaries"][0]["lastUpdateTime"].replace(tzinfo=None)
585 | if (cpdelta.days) > 14:
586 | resourceType.append(getvalue["resourceType"])
587 | resourceName.append(getvalue["resourceId"])
588 | count.append(1)
589 | reason.append("Pipeline is not used")
590 | else:
591 | print("No data in pipeline")
592 | resourceType.append(getvalue["resourceType"])
593 | resourceName.append(getvalue["resourceId"])
594 | count.append(1)
595 | reason.append("Pipeline is not used")
596 | except:
597 | print("No data in pipeline")
598 |
599 |
600 | if getvalue["resourceType"] == "AWS::CodeBuild::Project":
601 | # print(getvalue["resourceId"])
602 | cbclient = boto3.client('codebuild')
603 | try:
604 | cbresponse = cbclient.list_builds_for_project(
605 | projectName=getvalue["resourceName"], sortOrder='DESCENDING')
606 | cbbuildresponse = cbclient.batch_get_builds(
607 | ids=[cbresponse["ids"][0]])
608 | cbdelta = endTime.replace(
609 | tzinfo=None) - cbbuildresponse["builds"][0]["startTime"].replace(tzinfo=None)
610 | if (cbdelta.days) > 14:
611 | resourceType.append(getvalue["resourceType"])
612 | resourceName.append(getvalue["resourceName"])
613 | count.append(1)
614 | reason.append("Code Build is not used")
615 | except:
616 | print("No data in code build")
617 |
618 |
619 | if getvalue["resourceType"] == "AWS::EC2::Instance":
620 | #print("Instance")
621 | #print(getvalue["resourceId"])
622 | MetricName = ["CPUUtilization"]
623 | for metric in MetricName:
624 | instancemetricresponse = cloudwatchclient.get_metric_statistics(
625 | Namespace="AWS/EC2",
626 | MetricName=metric,
627 | Dimensions=[
628 | {'Name': 'InstanceId',
629 | 'Value': getvalue["resourceId"]}
630 | ],
631 | StartTime=datetime.datetime.utcnow() - timedelta(days=7),
632 | EndTime=datetime.datetime.utcnow(),
633 | Statistics=["Average"],
634 | Period=3600 #604800
635 | )
636 | # print(instancemetricresponse)
637 | # metricdata.append(instancemetricresponse)
638 | average = 0
639 | #print(len(instancemetricresponse['Datapoints']))
640 | for r in instancemetricresponse['Datapoints']:
641 | average = average + r['Average']
642 | #print("average: " ,average)
643 | # print(average)
644 | if (round(average,2)) < 60:
645 | resourceType.append(getvalue["resourceType"])
646 | resourceName.append(getvalue["resourceId"])
647 | count.append(1)
648 | reason.append("EC2 Instance is underutilized")
649 |
650 | if getvalue["resourceType"] == "AWS::Lambda::Function":
651 | # print(getvalue["resourceId"])
652 | MetricName = ["Invocations"]
653 | for metric in MetricName:
654 | lambdametricresponse = cloudwatchclient.get_metric_statistics(
655 | Namespace="AWS/Lambda",
656 | MetricName=metric,
657 | Dimensions=[
658 | {'Name': 'FunctionName',
659 | 'Value': getvalue["resourceName"]}
660 | ],
661 | StartTime=startTime,
662 | EndTime=endTime,
663 | Statistics=["Average"],
664 | Period=seconds_in_one_day
665 | )
666 | # print(lambdametricresponse)
667 | if len(lambdametricresponse['Datapoints']) == 0:
668 | resourceType.append(getvalue["resourceType"])
669 | resourceName.append(getvalue["resourceName"])
670 | count.append(1)
671 | reason.append("Lambda is not used")
672 | lmdclient = boto3.client('lambda')
673 | lmdresponse = lmdclient.get_function(FunctionName=getvalue["resourceName"])
674 | lmdresourceType.append(getvalue["resourceType"])
675 | lmdresourceName.append(getvalue["resourceName"])
676 | lmdpackagesize.append(convert_bytes(lmdresponse['Configuration']['CodeSize']))
677 |
678 | if getvalue["resourceType"] == "AWS::RDS::DBCluster":
679 | # print(getvalue["resourceId"])
680 | MetricName = ["DatabaseConnections"]
681 | for metric in MetricName:
682 | rdsmetricresponse = cloudwatchclient.get_metric_statistics(
683 | Namespace="AWS/RDS",
684 | MetricName=metric,
685 | Dimensions=[
686 | {'Name': 'DBClusterIdentifier',
687 | 'Value': getvalue["resourceName"]}
688 | ],
689 | StartTime=startTime,
690 | EndTime=endTime,
691 | Statistics=["Average"],
692 | Period=seconds_in_one_day
693 | )
694 | # print(rdsmetricresponse)
695 | for r in rdsmetricresponse['Datapoints']:
696 | if (r['Average']) == 0:
697 | # print("Not usable natgateway")
698 | resourceType.append(getvalue["resourceType"])
699 | resourceName.append(
700 | getvalue["resourceName"])
701 | count.append(1)
702 | reason.append("DB Cluster is not used")
703 |
704 | if getvalue["resourceType"] == "AWS::ApiGateway::RestApi" or getvalue["resourceType"] == "AWS::ApiGatewayV2::Api" :
705 | # print(getvalue["resourceId"])
706 | MetricName = ["Count"]
707 | for metric in MetricName:
708 | apimetricresponse = cloudwatchclient.get_metric_statistics(
709 | Namespace="AWS/ApiGateway",
710 | MetricName=metric,
711 | Dimensions=[
712 | {'Name': 'ApiName',
713 | 'Value': getvalue["resourceName"]}
714 | ],
715 | StartTime=startTime,
716 | EndTime=endTime,
717 | Statistics=["Average"],
718 | Period=seconds_in_one_day
719 | )
720 | # print(apimetricresponse)
721 | if len(apimetricresponse['Datapoints']) == 0:
722 | resourceType.append(getvalue["resourceType"])
723 | resourceName.append(getvalue["resourceName"])
724 | count.append(1)
725 | reason.append("Api Gateway is not used")
726 |
727 | if getvalue["resourceType"] == "AWS::S3::Bucket":
728 | # print(getvalue["resourceId"])
729 | s3client = boto3.client('s3')
730 | s3objects = []
731 | size = 0
732 | try:
733 | s3response = s3client.list_objects(
734 | Bucket=getvalue["resourceName"])
735 | if 'Contents' in s3response:
736 | for data in s3response['Contents']:
737 | s3objects.append(data['LastModified'])
738 | size = size + data['Size']
739 | #if s3response['IsTruncated'] == True:
740 | # while('IsTruncated' == True in s3response):
741 | # s3response = s3client.list_objects(
742 | # Bucket=getvalue["resourceName"] ,Marker=s3response['Key'])
743 | # for data in s3response['Contents']:
744 | # s3objects.append(data['LastModified'])
745 | s3delta = endTime.replace(
746 | tzinfo=None) - sorted(s3objects,reverse=True)[0].replace(tzinfo=None)
747 | if (s3delta.days) > 14:
748 | resourceType.append(getvalue["resourceType"])
749 | resourceName.append(getvalue["resourceName"])
750 | count.append(1)
751 | reason.append("S3 is not used")
752 | s3resourceType.append(getvalue["resourceType"])
753 | s3resourceName.append(getvalue["resourceName"])
754 | s3size.append(convert_bytes(size))
755 | # s3metricresponse = cloudwatchclient.get_metric_statistics(
756 | # Namespace="AWS/S3",
757 | # MetricName='BucketSizeBytes',
758 | # Dimensions=[
759 | # {'Name': 'BucketName','Value': getvalue["resourceName"]},
760 | # {'Name':'StorageType','Value': s3response['Contents'][0]['StorageClass'] + 'Storage'}
761 | # ],
762 | # StartTime=startTime,
763 | # EndTime=endTime,
764 | # Statistics=["Average"],
765 | # Period=seconds_in_one_day
766 | # )
767 | # for r in s3metricresponse['Datapoints']:
768 | # s3resourceType.append(getvalue["resourceType"])
769 | # s3resourceName.append(getvalue["resourceName"])
770 | # s3size.append(convert_bytes(r['Average']))
771 | else:
772 | resourceType.append(getvalue["resourceType"])
773 | resourceName.append(getvalue["resourceName"])
774 | count.append(1)
775 | reason.append("S3 is not used")
776 | s3resourceType.append(getvalue["resourceType"])
777 | s3resourceName.append(getvalue["resourceName"])
778 | s3size.append("0 B")
779 | except:
780 | print("No data in S3 Bucket")
781 | # print(resources)
782 |
783 | dataset = {
784 | 'resourceType': resourceType,
785 | 'resourceName': resourceName,
786 | 'reason': reason,
787 | 'count': count
788 | }
789 |
790 | cwdataset = {
791 | 'resourceType': cwresourceType,
792 | 'resourceName': cwresourceName,
793 | 'reason': cwreason
794 | }
795 |
796 | lmddataset = {
797 | 'resourceType': lmdresourceType,
798 | 'resourceName': lmdresourceName,
799 | 'reason': lmdpackagesize
800 | }
801 |
802 | s3dataset = {
803 | 'resourceType': s3resourceType,
804 | 'resourceName': s3resourceName,
805 | 'reason': s3size
806 | }
807 |
808 | cwdf = pd.DataFrame.from_dict(cwdataset)
809 | cw_result_table = data_table(cwdf,'Reason')
810 |
811 | lmddf = pd.DataFrame.from_dict(lmddataset)
812 | lmd_result_table = data_table(lmddf,'Size')
813 |
814 | s3df = pd.DataFrame.from_dict(s3dataset)
815 | s3_result_table = data_table(s3df,'Size')
816 |
817 | df = pd.DataFrame.from_dict(dataset)
818 | result_table = data_table(df,'Reason')
819 | bar_resources = bar_totals(df, 'resourceType')
820 |
821 | BODY_HTML = "Breakdown by AWS service {} |
"
822 | BODY_HTML = BODY_HTML.format(bar_resources)
823 | BODY_HTML += "All Resources:
{}
"
824 | BODY_HTML = BODY_HTML.format(result_table)
825 | # BODY_HTML += result_table
826 |
827 | #print(lmd_result_table)
828 | BODY_HTML += "Lambda Unused Code Size:
{}
"
829 | BODY_HTML = BODY_HTML.format(lmd_result_table)
830 |
831 | BODY_HTML += "S3 Unused Bucket Size:
{}
"
832 | BODY_HTML = BODY_HTML.format(s3_result_table)
833 |
834 | BODY_HTML += "CloudWatch Log Groups:
{}
"
835 | BODY_HTML = BODY_HTML.format(cw_result_table)
836 |
837 | res = send_mail(BODY_HTML, startTime, endTime)
838 | # res = send_mail(result_table, startTime, endTime)
839 |
840 | return {
841 | 'statusCode': 200,
842 | 'body': json.dumps(res)
843 | }
844 |
845 |
846 | def send_mail(table, startTime, endTime):
847 | SENDER = os.environ['sender']
848 | #print(SENDER)
849 | RECIPIENT = os.environ['receiver']
850 | #print(RECIPIENT)
851 | CONFIGURATION_SET = "ConfigSet"
852 | SUBJECT = "Un-used AWS Resources in "+ os.environ['app'] +" "+ \
853 | os.environ['env'] + " environment"
854 | BODY_TEXT = ("Amazon SES Test (Python)\r\n"
855 | "This email was sent with Amazon SES using the "
856 | "AWS SDK for Python (Boto).")
857 |
858 | CHARSET = "UTF-8"
859 | BODY_HTML = "AWS Un-used Resources in " + \
860 | os.environ['env'] + \
861 | " environment from {} to {}
"
862 | BODY_HTML += table
863 | BODY_HTML = BODY_HTML.format(startTime.strftime(
864 | '%d-%m-%Y'), endTime.strftime('%d-%m-%Y'))
865 | sesclient = boto3.client('ses')
866 |
867 | # Try to send the email.
868 | try:
869 | # Provide the contents of the email.
870 | response = sesclient.send_email(
871 | Destination={
872 | 'ToAddresses': [
873 | RECIPIENT,
874 | ],
875 | },
876 | Message={
877 | 'Body': {
878 | 'Html': {
879 | 'Charset': CHARSET,
880 | 'Data': BODY_HTML,
881 | },
882 | 'Text': {
883 | 'Charset': CHARSET,
884 | 'Data': BODY_TEXT,
885 | },
886 | },
887 | 'Subject': {
888 | 'Charset': CHARSET,
889 | 'Data': SUBJECT,
890 | },
891 | },
892 | Source=SENDER,
893 | # If you are not using a configuration set, comment or delete the
894 | # following line
895 | # ConfigurationSetName=CONFIGURATION_SET,
896 | )
897 | # Display an error if something goes wrong.
898 | except ClientError as e:
899 | print(e.response['Error']['Message'])
900 | return "Error"
901 | else:
902 | print("Email sent! Message ID:"),
903 | print(response['MessageId'])
904 | return "Success"
905 |
906 |
907 | def bar_totals(df, grouping_col):
908 | df_grouped = df.groupby([grouping_col])['count'].sum().reset_index(
909 | name='total_count').sort_values(by=['total_count'], ascending=False)
910 | df_grouped['total_count'] = df_grouped['total_count']
911 | chli = return_total_count(df)
912 | chd = ''
913 | chl = ''
914 | chdl = ''
915 | for index, row in df_grouped.iterrows():
916 | chd += '{}|'.format(row['total_count'])
917 | chdl += '{}({})|'.format(row[grouping_col], row['total_count'])
918 | bar_chart = '
'.format(
919 | chd[:-3], chdl[:-1])
920 | return bar_chart
921 |
922 |
923 | def return_total_count(df):
924 | df_total = df['count'].sum().round(3)
925 | result = '{}'.format(df_total)
926 | return result
927 |
928 | def convert_bytes(num):
929 | step_unit = 1000.0 #1024 bad the size
930 | for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
931 | if num < step_unit:
932 | return "%3.1f %s" % (num, x)
933 | num /= step_unit
--------------------------------------------------------------------------------
/Lambda/src/lambda.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/karthickcse05/aws_unused_resources/23a170e7e005f35818950583e83a3dc5ab51526f/Lambda/src/lambda.zip
--------------------------------------------------------------------------------
/Lambda/src/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.14.60
2 | psycopg2==2.8.6
3 | botocore==1.17.60
4 | requests==2.24.0
5 | pandas==1.1.2
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Un-used AWS Resources
2 |
3 | ## If you are directly using the code , Can be handled in two ways. Either Lambda or ECS Task
4 |
5 | [[Lambda]](Lambda/README.md)
6 |
7 | [[ECS Task]](ECS/README.md)
8 |
9 |
10 |
11 | ## If we are using the [Python Package](https://pypi.org/project/awsunusedresources/), then it can be done in two ways using Lambda or ECS Task
12 |
13 | [[Lambda]](Lambda-PyPacakge/README.md)
14 |
15 | [[ECS Task]](ECS-PyPackage/README.md)
16 |
17 | ## Screenshot
18 |
19 | 
--------------------------------------------------------------------------------