├── README.md ├── cleanup_old_ami ├── requirements.txt ├── README.md └── cleanup_old_ami_v1.py ├── cleanup_unused_ebs_vol ├── requirements.txt ├── README.md ├── cleanup_unused_ebs_vol_v1.py └── cleanup_unused_ebs_vol_v2.py ├── check_vpc_flow_log_enabled ├── trust-policy.json ├── check_vpc_flow_logs.json ├── check_vpc_flow_log_enabled_v2.py └── check_vpc_flow_log_enabled_v1.py ├── check_if_process_is_running ├── script_to_check_if_process_is_running_v1.sh ├── script_to_check_if_process_is_running_v2.sh ├── test_check_process.sh ├── script_to_check_if_process_is_running_v3.sh ├── script_to_check_if_process_is_running_v4.sh └── README.md ├── PULL_REQUEST_TEMPLATE.md ├── Creating-IAM-user ├── creating_iam_user_v1.py ├── creating_iam_user_v2.py ├── README.md ├── creating_iam_user_v3.py └── creating_iam_user_v4.py ├── script_to_setup_dev_environment ├── Dockerfile ├── README.md ├── script_to_setup_dev_env_v2.sh └── script_to_setup_dev_env_v1.sh ├── check_system_utilization_metrics ├── README.md └── check_memory_usage.sh ├── create_snapshots ├── README.md └── create_snapshot_v1.py ├── finding_and_deleting_files_greater_than_X_days ├── README.md ├── finding_and_deleting_files_greater_than_X_days_v1.sh └── finding_and_deleting_files_greater_than_X_days_v2.sh ├── remote_backup_script ├── README.md └── backup_to_remote_server_v1.sh ├── automatically_update_system ├── README.md └── automatically_update_system_v1.sh ├── check_if_server_is_up ├── check_if_server_is_up_v1.sh ├── check_if_server_is_up_v2.sh ├── check_if_server_is_up_v3.sh └── README.md ├── get_s3_bucket_size ├── README.md └── get_s3_bucket_size_v1.py ├── stop_start_ec2_instance ├── README.md └── script_to_stop_start_ec2_instance_v1.py ├── check_if_file_directory_present ├── check_if_file_directory_present_v1.sh ├── check_if_file_directory_present_v2.sh ├── README.md ├── check_if_file_directory_present_v3.sh └── test_check_if_file_directory_present.sh ├── get_instance_details ├── README.md └── get_instance_details_v1.py ├── rotate_iam_keys ├── README.md └── rotate_iam_keys_v1.py ├── deleting_iam_user └── deleting_iam_user.py ├── ssh_authentication_failure_block ├── README.md └── ssh_authentication_failure_block_v1.sh └── Setting-up-dev-environment ├── environment_setup_v2.sh └── environment_setup_v1.sh /README.md: -------------------------------------------------------------------------------- 1 | # N-days-of-automation 2 | -------------------------------------------------------------------------------- /cleanup_old_ami/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.26.96 2 | botocore==1.29.96 3 | jmespath==1.0.1 4 | python-dateutil==2.8.2 5 | s3transfer==0.6.0 6 | six==1.16.0 7 | urllib3==1.26.15 8 | -------------------------------------------------------------------------------- /cleanup_unused_ebs_vol/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.26.95 2 | botocore==1.29.95 3 | jmespath==1.0.1 4 | python-dateutil==2.8.2 5 | s3transfer==0.6.0 6 | six==1.16.0 7 | urllib3==1.26.15 8 | -------------------------------------------------------------------------------- /check_vpc_flow_log_enabled/trust-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [{ 4 | "Effect": "Allow", 5 | "Principal": { 6 | "Service": [ 7 | "vpc-flow-logs.amazonaws.com" 8 | ] 9 | }, 10 | "Action": "sts:AssumeRole" 11 | }] 12 | } 13 | -------------------------------------------------------------------------------- /check_if_process_is_running/script_to_check_if_process_is_running_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the process name was provided 4 | if [ -z "$1" ]; then 5 | echo "Please provide the process name." 6 | exit 1 7 | fi 8 | 9 | # Check if the process is running 10 | if pgrep -x "$1" >/dev/null; then 11 | echo "Process '$1' is running." 12 | else 13 | echo "Process '$1' is not running." 14 | fi -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Changes proposed in this pull request: 2 | 3 | - [ ] Describe the changes you are proposing in detail. 4 | - [ ] Explain the rationale for the changes. 5 | - [ ] If applicable, include screenshots or other visual aids to help explain the changes. 6 | - [ ] If applicable, provide instructions for testing the changes. 7 | 8 | ## Related issues: 9 | 10 | - [ ] List any related issues or pull requests that this pull request addresses or fixes. 11 | -------------------------------------------------------------------------------- /check_if_process_is_running/script_to_check_if_process_is_running_v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function check_process { 4 | # Check if the process name was provided 5 | if [ -z "$1" ]; then 6 | echo "Please provide the process name." 7 | return 1 8 | fi 9 | 10 | # Check if the process is running 11 | if pgrep -x "$1" >/dev/null; then 12 | echo "Process '$1' is running." 13 | return 0 14 | else 15 | echo "Process '$1' is not running." 16 | return 1 17 | fi 18 | } 19 | 20 | check_process "$1" 21 | -------------------------------------------------------------------------------- /Creating-IAM-user/creating_iam_user_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | def create_iam_user(username): 4 | iam = boto3.client("iam") 5 | 6 | 7 | try: 8 | iam.get_user(UserName=username) 9 | print(f'User {username} already exists') 10 | except iam.exceptions.NoSuchEntityException: 11 | try: 12 | iam.create_user(UserName=username) 13 | print(f'User {username} created sucessfully') 14 | except Exception as e: 15 | print(f"Error creating {username}: {e}") 16 | 17 | 18 | username=input("Please enter a IAM user you want to create: ") 19 | create_iam_user(username) 20 | 21 | -------------------------------------------------------------------------------- /script_to_setup_dev_environment/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | # Define Terraform version 4 | ENV TF_VERSION=1.3.9 5 | 6 | # Install necessary dependencies 7 | RUN apk update && apk add --no-cache \ 8 | wget \ 9 | unzip 10 | 11 | # Install Terraform 12 | RUN wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip \ 13 | && unzip terraform_${TF_VERSION}_linux_amd64.zip \ 14 | && mv terraform /usr/local/bin/ \ 15 | && chmod +x /usr/local/bin/terraform \ 16 | && rm terraform_${TF_VERSION}_linux_amd64.zip 17 | 18 | # Install Python packages 19 | RUN pip install --no-cache-dir \ 20 | boto3 \ 21 | awscli 22 | -------------------------------------------------------------------------------- /check_system_utilization_metrics/README.md: -------------------------------------------------------------------------------- 1 | # Memory Utilization Check Shell Script 2 | 3 | This shell script calculates the memory utilization of a Linux system by subtracting the free, shared, and buffer cache memory from the total memory. If the memory utilization exceeds the threshold (default: 70%), the script will also send an email alert to the specified recipient. 4 | 5 | ## Requirements 6 | ``` 7 | 1. Bash shell 8 | 2. mail command (for email alerts) 9 | ``` 10 | ## Usage 11 | ``` 12 | bash check_memory_usage.sh 13 | 14 | ``` 15 | ## Contributing 16 | 17 | Contributions are welcome! If you find any issues or have any suggestions for improvements, please submit an issue or pull request on GitHub. 18 | -------------------------------------------------------------------------------- /create_snapshots/README.md: -------------------------------------------------------------------------------- 1 | # AWS EC2 Snapshot Creator 2 | This Python script uses the AWS SDK for Python (Boto3) to create snapshots of all EBS volumes attached to all EC2 instances in a user's AWS account. The script is easy to use and helps automate the process of creating snapshots for data backup and disaster recovery purposes. 3 | ## Prerequisites 4 | * Python 3.6 or later 5 | * Boto3 library installed: pip install boto3 6 | * AWS CLI configured with appropriate credentials and default region 7 | ## Usage 8 | ``` 9 | python create_snapshot_v1.py 10 | ``` 11 | ## Contributing 12 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 13 | -------------------------------------------------------------------------------- /finding_and_deleting_files_greater_than_X_days/README.md: -------------------------------------------------------------------------------- 1 | # Delete Files older than X days 2 | 3 | This shell script finds all files in a specified directory that are older than a certain number of days and prompts the user for confirmation before deleting them. 4 | 5 | ## Usage 6 | ``` 7 | bash finding_and_deleting_files_greater_than_X_days_v1.sh 8 | ``` 9 | 10 | ## Notes 11 | 1. This script will prompt the user for confirmation before deleting any files. 12 | 2. This script will send an email notification to recipient@example.com when old files are deleted. 13 | 14 | ## Contributing 15 | 16 | Pull requests are welcome. For major changes, please open an issue first 17 | to discuss what you would like to change. -------------------------------------------------------------------------------- /remote_backup_script/README.md: -------------------------------------------------------------------------------- 1 | # Remote Backup Script 2 | This script is used to backup a local directory to a remote server using rsync. 3 | 4 | 5 | ## Prerequisites 6 | This script requires the following: 7 | 8 | ``` 9 | Bash 10 | rsync 11 | nc or netcat-openbsd 12 | SSH access to the remote server 13 | Setup password less authentication between two servers 14 | ``` 15 | 16 | ## Usage 17 | ``` 18 | ./check_remote_server_up_v1.sh 19 | 20 | ./check_remote_server_up_v1.sh /path/to/local/directory myuser example.com 21 | 22 | 23 | ``` 24 | ## Contributing 25 | Contributions are welcome! If you find any issues or have any suggestions for improvements, please submit an issue or pull request on GitHub. -------------------------------------------------------------------------------- /check_vpc_flow_log_enabled/check_vpc_flow_logs.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "ec2:CreateFlowLogs", 8 | "ec2:DescribeFlowLogs", 9 | "ec2:DescribeVpcs", 10 | "logs:CreateLogGroup", 11 | "logs:DescribeLogGroups" 12 | ], 13 | "Resource": "*" 14 | }, 15 | { 16 | "Effect": "Allow", 17 | "Action": [ 18 | "logs:CreateLogStream", 19 | "logs:PutLogEvents", 20 | "logs:DescribeLogStreams" 21 | ], 22 | "Resource": "*" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /cleanup_old_ami/README.md: -------------------------------------------------------------------------------- 1 | # AWS AMI Cleanup 2 | 3 | This Python script helps you clean up old Amazon Machine Images (AMIs) in your AWS account by deregistering them based on their age. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * Python 3.6 or later 10 | * boto3 library installed 11 | * AWS credentials configured (either as environment variables or in the AWS credentials file) 12 | 13 | ## Usage 14 | 15 | To use this script, follow these steps: 16 | 17 | ``` 18 | python cleanup_old_ami.py 19 | ``` 20 | 21 | ## Contributing 22 | 23 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 24 | -------------------------------------------------------------------------------- /automatically_update_system/README.md: -------------------------------------------------------------------------------- 1 | # System Update Script 2 | 3 | This script automates the process of updating Linux systems based on Ubuntu/Debian and CentOS/RHEL/Fedora distributions. The script detects the distribution type and applies the latest security patches and software updates. 4 | 5 | ## Usage 6 | ``` 7 | bash automatically_update_system_v1.sh 8 | ``` 9 | 10 | ## Features 11 | * Detects distribution type (Ubuntu/Debian or CentOS/RHEL/Fedora) 12 | * Updates package list 13 | * Upgrades installed packages 14 | * Applies security updates 15 | * Removes unused packages 16 | * Cleans up package cache 17 | 18 | ## Contributing 19 | 20 | Contributions are welcome! If you find any issues or have any suggestions for improvements, please submit an issue or pull request on GitHub. -------------------------------------------------------------------------------- /check_if_server_is_up/check_if_server_is_up_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_server_up(){ 4 | # Perform ping 5 time to check if server is up 5 | if ping -c 5 "$1" > /dev/null 2>&1 6 | then 7 | echo "Server $1 is up and running" 8 | return 0 9 | else 10 | echo "Server $1 is down or unreachable" 11 | return 1 12 | fi 13 | } 14 | 15 | # Prompt user to enter server name as input 16 | read -p "Enter the hostname or IP address of the server: " server_name 17 | 18 | # Call the function with the user input 19 | check_server_up "$server_name" 20 | 21 | # Safety check to check the return value of function 22 | if [ $? -eq 0 ] 23 | then 24 | echo "Server $server_name is up and running" 25 | else 26 | echo "There is an error checking server $server_name " 27 | fi -------------------------------------------------------------------------------- /get_s3_bucket_size/README.md: -------------------------------------------------------------------------------- 1 | # S3 Bucket Size Calculator 2 | 3 | This Python script calculates the total size of an Amazon S3 bucket using the boto3 library and the list_objects_v2 operation. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * Python 3.6 or later 10 | * boto3 library installed 11 | * AWS credentials configured (either as environment variables or in the AWS credentials file) 12 | 13 | ## Usage 14 | 15 | To use this script, follow these steps: 16 | 17 | ``` 18 | python get_s3_bucket_size_v1.py 19 | ``` 20 | 21 | ## Contributing 22 | 23 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 24 | -------------------------------------------------------------------------------- /script_to_setup_dev_environment/README.md: -------------------------------------------------------------------------------- 1 | # Bash Script for Installing Python Modules and Terraform 2 | 3 | This shell script checks if the required Python modules (boto3, and awscli),pip3 and Terraform are installed, and installs them if they are missing. It works on both CentOS, Ubuntu, and macOS. 4 | 5 | 6 | ## Installation 7 | 8 | Clone the repository: 9 | 10 | ```bash 11 | git clone git@github.com:/N-days-of-automation.git 12 | ``` 13 | 14 | ## Usage 15 | 16 | ``` 17 | cd N-days-of-automation/script_to_setup_dev_environment 18 | bash script_to_setup_dev_env_v2.sh 19 | ``` 20 | 21 | ## Contributing 22 | 23 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 24 | 25 | ## License 26 | 27 | -------------------------------------------------------------------------------- /stop_start_ec2_instance/README.md: -------------------------------------------------------------------------------- 1 | # EC2 Instance Stop/Start Script 2 | 3 | This script allows you to stop or start an Amazon EC2 instance using the AWS SDK for Python (Boto3). It takes the instance ID and the desired action (stop or start) as command line arguments. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * Python 3.6 or later 10 | * boto3 library installed 11 | * AWS credentials configured (either as environment variables or in the AWS credentials file) 12 | 13 | ## Usage 14 | 15 | To use this script, follow these steps: 16 | 17 | ``` 18 | python ec2_stop_start.py 19 | ``` 20 | 21 | ## Contributing 22 | 23 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 24 | -------------------------------------------------------------------------------- /get_s3_bucket_size/get_s3_bucket_size_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | 4 | def get_bucket_size(bucket_name): 5 | try: 6 | s3 = boto3.client('s3') 7 | paginator = s3.get_paginator('list_objects_v2') 8 | 9 | size = 0 10 | for page in paginator.paginate(Bucket=bucket_name): 11 | for obj in page.get('Contents', []): 12 | size += obj['Size'] 13 | 14 | return size 15 | 16 | except Exception as e: 17 | print(f"An error occured: {e}") 18 | return None 19 | 20 | if __name__ == "__main__": 21 | parser = argparse.ArgumentParser(description="Get the size of S3 bucket") 22 | parser.add_argument('bucket_name', type=str, help="The name of S3 bucket") 23 | args = parser.parse_args() 24 | 25 | bucket_size = get_bucket_size(args.bucket_name) 26 | if bucket_size is not None: 27 | print(f"Size of {args.bucket_name} with bucket size in {bucket_size} bytes") -------------------------------------------------------------------------------- /Creating-IAM-user/creating_iam_user_v2.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | import sys 4 | 5 | def create_iam_user(username): 6 | iam = boto3.client("iam") 7 | 8 | 9 | try: 10 | iam.get_user(UserName=username) 11 | print(f'User {username} already exists') 12 | except iam.exceptions.NoSuchEntityException: 13 | try: 14 | iam.create_user(UserName=username) 15 | print(f'User {username} created sucessfully') 16 | except Exception as e: 17 | print(f"Error creating {username}: {e}") 18 | 19 | 20 | if __name__ == '__main__': 21 | parser = argparse.ArgumentParser(description="Creating an IAM user") 22 | parser.add_argument('--username', type=str, help="The name of the IAM user, user want to create") 23 | args = parser.parse_args() 24 | if not any(vars(args).values()): 25 | parser.print_help() 26 | sys.exit() 27 | 28 | create_iam_user(args.username) 29 | 30 | -------------------------------------------------------------------------------- /check_if_file_directory_present/check_if_file_directory_present_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_file_or_dir(){ 4 | if [ ! -e "$1" ] 5 | then 6 | echo "File or directory $1 not present" 7 | return 1 8 | fi 9 | 10 | if [ -d "$1" ] 11 | then 12 | echo "The entered input $1 is a directory" 13 | elif [ -f "$1" ] 14 | then 15 | echo "The entered input $1 is a file" 16 | else 17 | echo "The entered input $1 is neither a file nor a directory" 18 | return 2 19 | fi 20 | } 21 | 22 | # Ask user for the input 23 | read -p "Enter the name of file or directory: " input_file_or_dir 24 | 25 | # Call the function with the user input 26 | check_file_or_dir "$input_file_or_dir" 27 | 28 | # Safety check to check the return value of function 29 | if [ $? -eq 0 ] 30 | then 31 | echo "File or directory $input_file_or_dir exists" 32 | else 33 | echo "There is an error checking $input_file_or_dir" 34 | fi -------------------------------------------------------------------------------- /check_if_server_is_up/check_if_server_is_up_v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_server_up(){ 4 | # Perform ping 5 time to check if server is up 5 | if ping -c 5 "$1" > /dev/null 2>&1 6 | then 7 | echo "Server $1 is up and running" 8 | return 0 9 | else 10 | echo "Server $1 is down or unreachable" 11 | return 1 12 | fi 13 | } 14 | 15 | # Prompt user to enter server name as input 16 | read -p "Enter the name of the file containing server name: " server_name_file 17 | 18 | # Check if the file exists 19 | 20 | if [ ! -f "$server_name_file" ] 21 | then 22 | echo "File $server_name_file not found" 23 | exit 1 24 | fi 25 | 26 | # Read each server name from the file and verify there status 27 | 28 | while read -r server_name 29 | do 30 | check_server_up "$server_name" 31 | if [ $? -eq 0 ] 32 | then 33 | echo "Server $server_name is up and running" 34 | else 35 | echo "There is an error checking server $server_name " 36 | fi 37 | done < "$server_name_file" -------------------------------------------------------------------------------- /check_if_server_is_up/check_if_server_is_up_v3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_server_up(){ 4 | # Perform a check to see if the server is reachable on the specified port 5 | if nc -z -w1 "$1" "$2" >/dev/null 2>&1 6 | then 7 | return 0 8 | else 9 | return 1 10 | fi 11 | } 12 | 13 | # Prompt user to enter server name and port as input 14 | read -p "Enter the name of the file containing server name and port: " server_info_file 15 | 16 | # Check if the file exists 17 | 18 | if [ ! -f "$server_info_file" ] 19 | then 20 | echo "File $server_info_file not found" 21 | exit 1 22 | fi 23 | 24 | # Read each server name and port from the file and verify their status 25 | 26 | while read -r server_name server_port 27 | do 28 | check_server_up "$server_name" "$server_port" 29 | if [ $? -eq 0 ] 30 | then 31 | echo "Server $server_name is up and running" 32 | else 33 | echo "There is an error checking server $server_name " 34 | fi 35 | done < "$server_info_file" 36 | -------------------------------------------------------------------------------- /check_if_file_directory_present/check_if_file_directory_present_v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_file_or_dir(){ 4 | if [ ! -e "$1" ] 5 | then 6 | echo "File or directory $1 not present" 7 | return 1 8 | fi 9 | 10 | if [ -d "$1" ] 11 | then 12 | echo "The entered input $1 is a directory" 13 | elif [ -f "$1" ] 14 | then 15 | echo "The entered input $1 is a file" 16 | else 17 | echo "The entered input $1 is neither a file nor a directory" 18 | return 2 19 | fi 20 | 21 | if [ ! -r "$1" ] 22 | then 23 | echo "You don't have a permission to read $1" 24 | fi 25 | } 26 | 27 | # Ask user for the input 28 | read -p "Enter the name of file or directory: " input_file_or_dir 29 | 30 | # Call the function with the user input 31 | check_file_or_dir "$input_file_or_dir" 32 | 33 | # Safety check to check the return value of function 34 | if [ $? -eq 0 ] 35 | then 36 | echo "File or directory $input_file_or_dir exists" 37 | else 38 | echo "There is an error checking $input_file_or_dir" 39 | fi 40 | -------------------------------------------------------------------------------- /cleanup_unused_ebs_vol/README.md: -------------------------------------------------------------------------------- 1 | # AWS EBS Volume Cleanup Script 2 | 3 | This Python script helps you clean up specified unused AWS EBS (Elastic Block Store) volumes. It takes an EBS volume ID as an argument and checks if the volume is available for deletion. If the volume is available, the script prompts the user for confirmation before deleting the volume. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * Python 3.6 or later 10 | * boto3 library installed 11 | * AWS credentials configured (either as environment variables or in the AWS credentials file) 12 | 13 | To install the required dependencies, run the following command: 14 | 15 | ``` 16 | pip install -r requirements.txt 17 | 18 | ``` 19 | 20 | ## Usage 21 | 22 | Run the script by passing an EBS volume ID as an argument: 23 | 24 | ``` 25 | python cleanup_unused_ebs_vol_v2.py vol-1234567890abcdef0 26 | ``` 27 | 28 | ## Contributing 29 | 30 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 31 | -------------------------------------------------------------------------------- /check_if_process_is_running/test_check_process.sh: -------------------------------------------------------------------------------- 1 | # Test case 1 2 | output=$(check_process nginx) 3 | if [ "$output" = "Process 'nginx' is running." ]; then 4 | echo "Test case 1 passed" 5 | else 6 | echo "Test case 1 failed" 7 | fi 8 | exit_code=$? 9 | if [ "$exit_code" -eq 0 ]; then 10 | echo "Exit code 0 for Test case 1" 11 | else 12 | echo "Exit code non-zero for Test case 1" 13 | fi 14 | 15 | # Test case 2 16 | output=$(check_process fake_process_name) 17 | if [ "$output" = "Process 'fake_process_name' is not running." ]; then 18 | echo "Test case 2 passed" 19 | else 20 | echo "Test case 2 failed" 21 | fi 22 | exit_code=$? 23 | if [ "$exit_code" -eq 1 ]; then 24 | echo "Exit code 1 for Test case 2" 25 | else 26 | echo "Exit code non-zero for Test case 2" 27 | fi 28 | 29 | # Test case 3 30 | output=$(check_process) 31 | if [ "$output" = "Please provide the process name." ]; then 32 | echo "Test case 3 passed" 33 | else 34 | echo "Test case 3 failed" 35 | fi 36 | exit_code=$? 37 | if [ "$exit_code" -eq 1 ]; then 38 | echo "Exit code 1 for Test case 3" 39 | else 40 | echo "Exit code non-zero for Test case 3" 41 | fi 42 | -------------------------------------------------------------------------------- /check_if_server_is_up/README.md: -------------------------------------------------------------------------------- 1 | # Check Server Status 2 | 3 | This script checks the status of one or more remote servers using the ping command. The script takes the name of a file containing the server names as input, and outputs the status of each server to the console. 4 | 5 | ## Getting Started 6 | To use the script, simply clone or download the repository. 7 | ``` 8 | git clone git@github.com:100daysofdevops/N-days-of-automation.git 9 | ``` 10 | ## Prerequisites 11 | The script requires Bash to be installed on your Linux system. 12 | 13 | ## Usage 14 | To use the script, simply run the command : 15 | 16 | ``` 17 | bash check_if_server_is_up_v2.sh 18 | ``` 19 | 20 | ## Input File Format 21 | The input file should be a text file containing one server name or IP address per line. For example: 22 | 23 | ``` 24 | 192.168.1.1 25 | google.com 26 | ``` 27 | 28 | ## Shell one-liner 29 | 30 | ``` 31 | ping -c 1 SERVER_IP_ADDRESS >/dev/null && echo "Server is up" || echo "Server is down" 32 | 33 | ``` 34 | 35 | ## Contributing 36 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 37 | -------------------------------------------------------------------------------- /get_instance_details/README.md: -------------------------------------------------------------------------------- 1 | # AWS EC2 Instance Comparison 2 | 3 | This script allows you to fetch and compare the details of two AWS EC2 instance types using the boto3 library. It displays the instance type, number of vCPUs, and memory size side by side in a tabular format. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * Python 3.6 or later 10 | * boto3 library installed 11 | * AWS credentials configured (either as environment variables or in the AWS credentials file) 12 | 13 | ## Usage 14 | 15 | To use this script, follow these steps: 16 | 17 | ``` 18 | python instance_comparison.py 19 | ``` 20 | 21 | ## Output 22 | ``` 23 | Instance Comparison: 24 | -------------------- 25 | Attribute Instance 1 Instance 2 26 | --------------------------------------------- 27 | Instance Type r6i.32xlarge r6i.24xlarge 28 | vCPUs 128 96 29 | Memory (MiB) 1048576 786432 30 | 31 | ``` 32 | 33 | ## Contributing 34 | 35 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 36 | -------------------------------------------------------------------------------- /rotate_iam_keys/README.md: -------------------------------------------------------------------------------- 1 | # AWS IAM Key Rotation with Python and Boto3 2 | 3 | This is a Python script that allows you to rotate access keys for IAM users in your AWS account using the Boto3 library. Access keys are rotated based on the maximum age you specify. 4 | 5 | ## Getting Started 6 | 7 | Before running the script, you need to make sure you have the following: 8 | 9 | * An AWS account 10 | * AWS credentials with permission to rotate access keys 11 | * Python 3 installed 12 | 13 | To install the required dependencies, run the following command: 14 | 15 | ``` 16 | pip install boto3 argparse 17 | ``` 18 | 19 | ## Usage 20 | 21 | To rotate access keys for IAM users, run the script with the --max-key-age option: 22 | 23 | ``` 24 | python rotate_iam_keys.py --max-key-age 60 25 | ``` 26 | 27 | This will rotate any access keys that are older than 60 days for all IAM users in your account. 28 | 29 | ## Options 30 | 31 | ```--max-key-age```:The maximum number of days an access key can be active before it needs to be rotated. 32 | 33 | 34 | 35 | ## Contributing 36 | 37 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 38 | -------------------------------------------------------------------------------- /check_if_process_is_running/script_to_check_if_process_is_running_v3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Email configuration 4 | EMAIL_TO="reciever@example.com" 5 | EMAIL_FROM="sender@example.com" 6 | EMAIL_SUBJECT="process is not running" 7 | 8 | function send_email { 9 | echo "$1" | mail -s "EMAIL_SUBJECT" -r "EMAIL_FROM" "EMAIL_TO" 10 | } 11 | 12 | function check_process { 13 | # Check if the process name was provided 14 | if [ -z "$1" ]; then 15 | echo "Please provide the process name." 16 | return 1 17 | fi 18 | 19 | # Check if the process is running 20 | if pgrep -x "$1" >/dev/null; then 21 | echo "Process '$1' is running." 22 | return 0 23 | else 24 | echo "Process '$1' is not running. Attempting to restart the process..." 25 | sudo systemctl restart $1 26 | sleep 5 27 | 28 | # Check if process restarted sucesfully 29 | 30 | if pgrep -x "$1" >/dev/null; then 31 | echo "Process '$1' restarted sucesfully" 32 | else 33 | echo "Failed to restart process '$1'. Sending email notification.." 34 | send_email "Failed to restart the process '$1'. Please investigate..." 35 | return 1 36 | fi 37 | 38 | fi 39 | } 40 | 41 | check_process "$1" -------------------------------------------------------------------------------- /cleanup_unused_ebs_vol/cleanup_unused_ebs_vol_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | 4 | 5 | def cleanup_unused_ebs_vol(): 6 | ec2 = boto3.client('ec2') 7 | 8 | try: 9 | response = ec2.describe_volumes(Filters=[{'Name': 'status', 'Values': ['available']}]) 10 | unused_volumes = response['Volumes'] 11 | except Exception as e: 12 | print(f"Error getting the list of unused EBS volumes: {e}") 13 | return 14 | 15 | 16 | for volume in unused_volumes: 17 | volume_id = volume["VolumeId"] 18 | print(f"These are the unused volumes: {volume_id} ") 19 | 20 | confirm = input(f"Are you sure want to delete these volumes {volume_id}? (y/n) ") 21 | 22 | if confirm.lower() == 'y': 23 | try: 24 | ec2.delete_volume(VolumeId=volume_id) 25 | print(f"The following Volume is deleted {volume_id}") 26 | except ClientError as e: 27 | print(f"Error deleting the ebs volume with volume id {volume_id}: {e}") 28 | else: 29 | print(f"Skipping deleting volume: {volume_id}") 30 | 31 | 32 | if __name__ == '__main__': 33 | cleanup_unused_ebs_vol() 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /check_if_file_directory_present/README.md: -------------------------------------------------------------------------------- 1 | # Check if File or Directory is Present 2 | 3 | A simple Bash script to check if a file or directory exists, and whether it is a file or directory. The script also handles errors if the file or directory does not exist or if the user does not have permission to access it.. 4 | 5 | ## Getting Started 6 | To use the script, simply clone or download the repository. 7 | ``` 8 | git clone git@github.com:100daysofdevops/N-days-of-automation.git 9 | ``` 10 | 11 | You can then run the script by executing the following command: 12 | 13 | ``` 14 | bash check_if_file_directory_present_2.sh 15 | ``` 16 | 17 | 18 | ## Prerequisites 19 | The script requires Bash to be installed on your Linux system. 20 | 21 | ## Usage 22 | To use the script, simply run the command : 23 | 24 | ``` 25 | bash check_if_file_directory_present_2.sh 26 | ``` 27 | 28 | ## Shell script one liner 29 | ``` 30 | [ -e "$path" ] && [ -f "$path" ] && echo "$path is a file." || [ -d "$path" ] && echo "$path is a directory." || echo "$path does not exist." 31 | ``` 32 | 33 | ## Contributing 34 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 35 | -------------------------------------------------------------------------------- /check_if_file_directory_present/check_if_file_directory_present_v3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_file_or_dir(){ 4 | if [ ! -e "$1" ] 5 | then 6 | echo "File or directory $1 not present" 7 | return 1 8 | fi 9 | 10 | if [ -d "$1" ] 11 | then 12 | echo "The entered input $1 is a directory" 13 | elif [ -f "$1" ] 14 | then 15 | echo "The entered input $1 is a file" 16 | else 17 | echo "The entered input $1 is neither a file nor a directory" 18 | return 2 19 | fi 20 | 21 | if [ ! -r "$1" ] 22 | then 23 | echo "You don't have a permission to read $1" 24 | fi 25 | } 26 | 27 | # Ask user for the input 28 | read -p "Enter the name of file or directory (press enter for current directory): " input_file_or_dir 29 | 30 | # Use current directory if user input is empty 31 | if [ -z "$input_file_or_dir" ] 32 | then 33 | input_file_or_dir=$(pwd) 34 | fi 35 | 36 | # Call the function with the user input 37 | check_file_or_dir "$input_file_or_dir" 38 | 39 | # Safety check to check the return value of function 40 | if [ $? -eq 0 ] 41 | then 42 | echo "File or directory $input_file_or_dir exists" 43 | else 44 | echo "There is an error checking $input_file_or_dir" 45 | fi 46 | -------------------------------------------------------------------------------- /cleanup_old_ami/cleanup_old_ami_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import datetime 3 | from dateutil import parser 4 | import argparse 5 | 6 | def cleanup_old_ami(ami_age): 7 | try: 8 | ec2 = boto3.client('ec2') 9 | 10 | response = ec2.describe_images(Owners=['self']) 11 | 12 | current_date = datetime.datetime.now(datetime.timezone.utc) 13 | 14 | for image in response['Images']: 15 | ami_creation_date = parser.parse(image['CreationDate']) 16 | ami_age_in_days = (current_date - ami_creation_date).days 17 | 18 | if ami_age_in_days > ami_age: 19 | try: 20 | print(f"Deleting AMI ID {image['ImageId']} created {ami_age_in_days} day ago ") 21 | ec2.deregister_image(ImageId=image['ImageId']) 22 | except Exception as e: 23 | print(f"Error deleting AMI ID {image['ImageId']}: {e} ") 24 | except Exception as e: 25 | print(f"Error occured while describing ami: {e}") 26 | 27 | 28 | if __name__ == '__main__': 29 | parser = argparse.ArgumentParser(description="Delete AWS AMIs older than specified days") 30 | parser.add_argument("ami_age", type=int, help="Age of AMIs in days") 31 | args = parser.parse_args() 32 | 33 | cleanup_old_ami(args.ami_age) -------------------------------------------------------------------------------- /check_if_process_is_running/script_to_check_if_process_is_running_v4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Email configuration 4 | RECIPIENT_EMAIL="receiver@example.com" 5 | SENDER_EMAIL="sender@example.com" 6 | EMAIL_SUBJECT="Process is not running" 7 | 8 | function send_email { 9 | echo "$1" | mail -s "$EMAIL_SUBJECT" -r "$SENDER_EMAIL" "$RECIPIENT_EMAIL" 10 | } 11 | 12 | function check_process { 13 | # Check if the process name was provided 14 | if [ -z "$1" ]; then 15 | echo "Please provide the process name." 16 | exit 1 17 | fi 18 | 19 | # Check if the process is running 20 | if systemctl is-active --quiet "$1"; then 21 | echo "Process '$1' is running." 22 | else 23 | echo "Process '$1' is not running. Attempting to restart the process..." 24 | if [ "$(id -u)" != "0" ]; then 25 | echo "This script must be run as root to restart the process." 26 | exit 1 27 | fi 28 | sudo systemctl restart "$1" 29 | sleep 5 30 | 31 | # Check if process restarted successfully 32 | if systemctl is-active --quiet "$1"; then 33 | echo "Process '$1' restarted successfully" 34 | else 35 | echo "Failed to restart process '$1'. Sending email notification.." 36 | send_email "Failed to restart the process '$1'. Please investigate..." 37 | exit 1 38 | fi 39 | fi 40 | } 41 | 42 | check_process "$1" 43 | -------------------------------------------------------------------------------- /stop_start_ec2_instance/script_to_stop_start_ec2_instance_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | 4 | ec2 = boto3.client('ec2') 5 | 6 | def ec2_stop_start(instance_id, action): 7 | try: 8 | if action == 'stop': 9 | print(f"Stopping Instance with instance id {instance_id} ....") 10 | ec2.stop_instances(InstanceIds=[instance_id]) 11 | waiter = ec2.get_waiter('instance_stopped') 12 | waiter.wait(InstanceIds=[instance_id]) 13 | elif action == 'start': 14 | print(f"Starting Instance with instance id {instance_id} ...") 15 | ec2.start_instances(InstanceIds=[instance_id]) 16 | waiter = ec2.get_waiter('instance_running') 17 | waiter.wait(InstanceIds=[instance_id]) 18 | else: 19 | print("Please enter a valid action: stop/start") 20 | return 21 | 22 | print(f"{action.capitalize()}ped Instance with instance id {instance_id}") 23 | except Exception as e: 24 | print(f"An error occurred: {e}") 25 | 26 | if __name__ == '__main__': 27 | parser = argparse.ArgumentParser(description='Stop or start an EC2 instance') 28 | parser.add_argument('instance_id', type=str, help='The ID of the EC2 instance') 29 | parser.add_argument('action', type=str, choices=['stop', 'start'], help='Action to perform (stop or start)') 30 | 31 | args = parser.parse_args() 32 | ec2_stop_start(args.instance_id, args.action) -------------------------------------------------------------------------------- /cleanup_unused_ebs_vol/cleanup_unused_ebs_vol_v2.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import argparse 4 | 5 | 6 | def cleanup_unused_ebs_vol(volume_id): 7 | ec2 = boto3.client('ec2') 8 | 9 | try: 10 | response = ec2.describe_volumes(VolumeIds=[volume_id]) 11 | unused_volumes = response['Volumes'][0] 12 | except Exception as e: 13 | print(f"Error getting the list of unused EBS volumes: {e}") 14 | return 15 | 16 | 17 | if unused_volumes['State'] == 'available': 18 | confirm = input(f"Are you sure want to delete these volumes {volume_id}? (y/n) ") 19 | if confirm.lower() == 'y': 20 | try: 21 | ec2.delete_volume(VolumeId=volume_id) 22 | print(f"The following Volume is deleted {volume_id}") 23 | except ClientError as e: 24 | print(f"Error deleting the ebs volume with volume id {volume_id}: {e}") 25 | else: 26 | print(f"Skipping deleting volume: {volume_id}") 27 | else: 28 | print(f"The following volume id {volume_id} is not available for deletion") 29 | 30 | 31 | if __name__ == '__main__': 32 | parser = argparse.ArgumentParser(description="Cleanup unused EBS volumes") 33 | parser.add_argument('volume_id',help="The EBS volume id need to be cleaned up") 34 | args = parser.parse_args() 35 | cleanup_unused_ebs_vol(args.volume_id) 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /deleting_iam_user/deleting_iam_user.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | def delete_iam_users(except_user): 4 | iam = boto3.client('iam') 5 | response = iam.list_users() 6 | for user in response['Users']: 7 | if user['UserName'] != except_user: 8 | user_name = user['UserName'] 9 | print(f"Deleting IAM user: {user_name}") 10 | # Detach all policies from the user 11 | response = iam.list_attached_user_policies(UserName=user_name) 12 | for policy in response['AttachedPolicies']: 13 | iam.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) 14 | # Delete all access keys for the user 15 | response = iam.list_access_keys(UserName=user_name) 16 | for access_key in response['AccessKeyMetadata']: 17 | iam.delete_access_key(UserName=user_name, AccessKeyId=access_key['AccessKeyId']) 18 | # Delete all login profiles for the user 19 | try: 20 | response = iam.get_login_profile(UserName=user_name) 21 | iam.delete_login_profile(UserName=user_name) 22 | except iam.exceptions.NoSuchEntityException: 23 | pass 24 | # Delete the user 25 | iam.delete_user(UserName=user_name) 26 | print(f"IAM user {user_name} deleted successfully") 27 | 28 | if __name__ == '__main__': 29 | except_user = input("Enter the username that should not be deleted: ") 30 | delete_iam_users(except_user) -------------------------------------------------------------------------------- /automatically_update_system/automatically_update_system_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Updating Ubuntu/Debian systems 4 | function update_ubuntu_debian() { 5 | # Update package list, upgrade packages, apply security updates,remove unused packages, and clean up package cache 6 | if ! apt-get update -y && apt-get upgrade -y && apt-get dist-upgrade -y && apt-get autoremove -y && apt-get autoclean -y; then 7 | echo "Error: Failed to update the system." 8 | return 1 9 | fi 10 | 11 | echo "Ubuntu/Debian system update complete." 12 | return 0 13 | } 14 | 15 | # Updating CentOS/RHEL/Fedora systems 16 | function update_centos_rhel_fedora() { 17 | # Update package list, apply updates, and clean up package cache 18 | if ! yum update -y && yum clean all; then 19 | echo "Error: Failed to update the system." 20 | return 1 21 | fi 22 | 23 | echo "CentOS/RHEL/Fedora system update complete." 24 | return 0 25 | } 26 | 27 | # Detecting OS distribution and updating the system 28 | function update_system() { 29 | # Detect the distribution type 30 | if [ -f /etc/os-release ]; then 31 | . /etc/os-release 32 | else 33 | echo "Error: Unable to detect distribution type." 34 | return 1 35 | fi 36 | 37 | if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]]; then 38 | update_ubuntu_debian 39 | elif [[ "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "fedora" ]]; then 40 | update_centos_rhel_fedora 41 | else 42 | echo "Error: Unsupported distribution type." 43 | return 1 44 | fi 45 | } 46 | 47 | # Call the update_system function 48 | update_system -------------------------------------------------------------------------------- /get_instance_details/get_instance_details_v1.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import boto3 3 | 4 | def get_instance_details(instance_type): 5 | client = boto3.client('ec2') 6 | 7 | # Describe the instance types 8 | response = client.describe_instance_types(InstanceTypes=[instance_type]) 9 | instance_details = response['InstanceTypes'][0] 10 | 11 | return instance_details 12 | 13 | def print_comparison(instance_details1, instance_details2): 14 | print(f"{'Attribute':<15} {'Instance 1':<15} {'Instance 2':<15}") 15 | print("-" * 45) 16 | print(f"{'Instance Type':<15} {instance_details1['InstanceType']:<15} {instance_details2['InstanceType']:<15}") 17 | print(f"{'vCPUs':<15} {instance_details1['VCpuInfo']['DefaultVCpus']:<15} {instance_details2['VCpuInfo']['DefaultVCpus']:<15}") 18 | print(f"{'Memory (MiB)':<15} {instance_details1['MemoryInfo']['SizeInMiB']:<15} {instance_details2['MemoryInfo']['SizeInMiB']:<15}") 19 | print() 20 | 21 | if __name__ == '__main__': 22 | parser = argparse.ArgumentParser(description='Fetch and compare instance details') 23 | parser.add_argument('instance_type1', type=str, help='First instance type') 24 | parser.add_argument('instance_type2', type=str, help='Second instance type') 25 | args = parser.parse_args() 26 | 27 | try: 28 | instance_details1 = get_instance_details(args.instance_type1) 29 | instance_details2 = get_instance_details(args.instance_type2) 30 | 31 | print("Instance Comparison:") 32 | print("--------------------") 33 | print_comparison(instance_details1, instance_details2) 34 | except Exception as e: 35 | print(f"Error: {e}") -------------------------------------------------------------------------------- /check_if_process_is_running/README.md: -------------------------------------------------------------------------------- 1 | # Check If a Process Is Running Bash Script 2 | 3 | This Bash script provides a quick and easy way to check if a process is running on a Linux system. The script checks whether a specified process is currently running and returns a success message if the process is running, or an error message if it is not. 4 | 5 | ## Getting Started 6 | To use the script, simply clone or download the script_to_check_if_process_is_running_v1.sh or script_to_check_if_process_is_running_v2.sh(updated version) file to your Linux system. You can then run the script by executing the following command: 7 | 8 | ``` 9 | bash script_to_check_if_process_is_running_v2.sh PROCESS_NAME 10 | ``` 11 | Replace PROCESS_NAME with the name of the process you want to check. 12 | 13 | ## Prerequisites 14 | The script requires Bash to be installed on your Linux system. 15 | 16 | ## Usage 17 | To use the script, simply run the command with the process name you want to check as an argument, like so: 18 | 19 | ``` 20 | bash script_to_check_if_process_is_running_v2.sh nginx 21 | ``` 22 | 23 | If the process is running, the script will output a success message indicating that the process is running. If the process is not running, the script will output an error message indicating that the process is not running. 24 | 25 | ## Shell one-liner 26 | 27 | ``` 28 | systemctl is-active --quiet process_name || (systemctl restart process_name && systemctl is-active process_name || echo "Failed to start process_name" | mail -s "Process Restart Failed" reciever@example.com) 29 | ``` 30 | 31 | ## Contributing 32 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 33 | -------------------------------------------------------------------------------- /finding_and_deleting_files_greater_than_X_days/finding_and_deleting_files_greater_than_X_days_v1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | delete_old_files() { 4 | # Verify for required arguments 5 | if [ "$#" -ne 2 ]; then 6 | echo "Error: incorrect number of arguments" 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | # Prompt user to enter directory and number of days from the command line arguments 12 | directory="$1" 13 | days="$2" 14 | 15 | # Verify that directory is not empty 16 | if [ -z "$directory" ]; then 17 | echo "Error: directory path cannot be empty" 18 | exit 1 19 | fi 20 | 21 | #find files older than specified number of days 22 | old_files=$(find "$directory" -type f -mtime +"$(( $days - 1 ))") 23 | 24 | if [ -z "$old_files" ]; then 25 | echo "No files older than $days days found in $directory" 26 | exit 0 27 | fi 28 | 29 | # Confirm with user before deleting the files 30 | echo "The following files are older than $days days:" 31 | echo "$old_files" 32 | read -p "Are you sure you want to delete these files? [y/N]: " confirm_delete 33 | 34 | if [ "$confirm_delete" = "y" ]; then 35 | # delete the files 36 | echo "$old_files" | xargs rm -vf 37 | 38 | # Notify users via email 39 | echo "Old files successfully deleted" | mail -s "Old Files Deleted" recipient@example.com 40 | else 41 | echo "Aborting deletion of old files" 42 | exit 0 43 | fi 44 | } 45 | 46 | # Verifyrequired arguments 47 | if [ "$#" -lt 2 ]; then 48 | echo "Error: insufficient arguments" 49 | echo "Usage: $0 " 50 | exit 1 51 | elif [ "$#" -gt 2 ]; then 52 | echo "Error: incorrect number of arguments" 53 | echo "Usage: $0 " 54 | exit 1 55 | fi 56 | 57 | # Call the function with command line arguments directory and number of days 58 | delete_old_files "$1" "$2" -------------------------------------------------------------------------------- /create_snapshots/create_snapshot_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | 4 | 5 | def create_snapshots(): 6 | try: 7 | ec2 = boto3.client('ec2') 8 | 9 | # Get the list of all the instances 10 | instances = ec2.describe_instances() 11 | instance_ids = [] 12 | for reservation in instances['Reservations']: 13 | for instance in reservation['Instances']: 14 | instance_ids.append(instance['InstanceId']) 15 | 16 | all_snapshots = [] 17 | 18 | for instance_id in instance_ids: 19 | # Get the list of all the volumes attached to the instance 20 | instance_details = ec2.describe_instances(InstanceIds=[instance_id]) 21 | block_device_mappings = instance_details['Reservations'][0]['Instances'][0]['BlockDeviceMappings'] 22 | for device in block_device_mappings: 23 | volume_id = device['Ebs']['VolumeId'] 24 | snapshot_description = f"Snapshot for {instance_id} - {volume_id}" 25 | 26 | # Create the snapshots 27 | snapshot = ec2.create_snapshot(VolumeId=volume_id, Description=snapshot_description) 28 | all_snapshots.append(snapshot) 29 | return all_snapshots 30 | 31 | except ClientError as e: 32 | print(f"An error occurred while creating the snapshots: {e}") 33 | return None 34 | except Exception as e: 35 | print(f"An unexpected error occurred: {e}") 36 | return None 37 | 38 | 39 | if __name__ == "__main__": 40 | 41 | snapshots = create_snapshots() 42 | 43 | if snapshots: 44 | print("Snapshots created successfully:") 45 | for snapshot in snapshots: 46 | print(f"Snapshot ID: {snapshot['SnapshotId']}, Description: {snapshot['Description']}") 47 | else: 48 | print("Failed to create snapshots.") -------------------------------------------------------------------------------- /check_system_utilization_metrics/check_memory_usage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to Calculate the memory utilization after subtracting the free, shared, and buffer cache memory from the total memory 3 | 4 | 5 | # Set the memory average threshold 6 | THRESHOLD=70 7 | 8 | # Get the total available memory in the system 9 | total_memory=$(awk '/MemTotal/ {print $2}' /proc/meminfo) 10 | # Get free memory 11 | free_memory=$(awk '/MemFree/ {print $2}' /proc/meminfo) 12 | # Get shared memory 13 | shared_memory=$(awk '/Shmem/ {print $2}' /proc/meminfo) 14 | # Get buffer cache memory 15 | buffer_cache_memory=$(awk '/Buffers/ {print $2}' /proc/meminfo) 16 | 17 | # Check if the memory value is numeric 18 | for val in $total_memory $free_memory $shared_memory $buffer_cache_memory; do 19 | if ! [[ $val =~ ^[0-9]+$ ]]; then 20 | echo "Error: Invalid memory value" 21 | exit 1 22 | fi 23 | done 24 | 25 | # Calculate memory utilization 26 | used_memory=$((total_memory - free_memory - shared_memory - buffer_cache_memory)) 27 | memory_utilization=$((used_memory * 100 / total_memory)) 28 | 29 | # Check if mail command is installed 30 | if ! type "mail" > /dev/null 31 | then 32 | echo "mail command is not installed. Installing now..." 33 | sudo yum -y install mailx 34 | else 35 | echo "mail command is already installed" 36 | fi 37 | 38 | # Check memory utilization against threshold 39 | if [ $memory_utilization -gt $THRESHOLD ] 40 | then 41 | echo "Memory utilization is above the threshold of $threshold%" 42 | 43 | # Send email alert 44 | email_subject="Memory utilization is above threshold" 45 | email_body="Memory utilization is currently at $memory_utilization%. Please check the server immediately." 46 | recipient_email="recipient@example.com" 47 | echo "$email_body" | mail -s "$email_subject" "$recipient_email" 48 | else 49 | echo "Memory utilization is below the threshold of $threshold%" 50 | fi -------------------------------------------------------------------------------- /ssh_authentication_failure_block/README.md: -------------------------------------------------------------------------------- 1 | # SSH Authentication Failure Blocker 2 | 3 | This script monitors the /var/log/secure log file for any SSH authentication failures, and blocks the SSH port for IP addresses that exceed a specified threshold. 4 | 5 | 6 | ## Installation 7 | 8 | To use the script, you must have the firewalld and mailx packages installed. You can install them using the following commands or script will take care of it: 9 | 10 | ```bash 11 | sudo yum -y install firewalld mailx # For CentOS, RHEL, or Fedora 12 | sudo apt-get update && sudo apt-get -y install firewalld mailx # For Debian or Ubuntu 13 | 14 | ``` 15 | 16 | ## Usage 17 | 18 | ``` 19 | bash ssh_authentication_failure_block_v1.sh 20 | ``` 21 | 22 | ## Customization 23 | You can customize the script by modifying the following variables at the top of the script: 24 | 25 | ```THRESHOLD:``` The number of authentication failures from a single IP address that triggers the blocking of the SSH port. Default is 5. 26 | 27 | ```recipient_email:``` The email address where the notification email will be sent. Default is recipient@mail.com. 28 | 29 | You can also modify the firewall-cmd command in the check_unauthenticated_attempt function to block other ports or protocols as needed. 30 | 31 | ## Troubleshooting 32 | If you're having issues with the script, you can check the log file /var/log/secure for any errors or warnings related to the script. You can also check the firewalld logs for any errors related to the firewall rules. 33 | 34 | To check the firewall rules, you can use the following command: 35 | 36 | ``` 37 | sudo firewall-cmd --zone=public --list-all 38 | ``` 39 | This will display a list of all the rules in the public zone, including any rules added by the script to block IP addresses or the SSH port. 40 | 41 | ## Contributing 42 | 43 | Contributions are welcome! If you find any issues or have any suggestions for improvements, please submit an issue or pull request on GitHub. 44 | -------------------------------------------------------------------------------- /finding_and_deleting_files_greater_than_X_days/finding_and_deleting_files_greater_than_X_days_v2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | delete_old_files() { 4 | # Verify for required arguments 5 | if [ "$#" -ne 2 ]; then 6 | echo "Error: incorrect number of arguments" 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | # Prompt user to enter directory and number of days from the command line arguments 12 | directory="$1" 13 | days="$2" 14 | 15 | # Verify that directory is not empty 16 | if [ -z "$directory" ]; then 17 | echo "Error: directory path cannot be empty" 18 | exit 1 19 | fi 20 | 21 | # Search for all files in the directory that are older than the specified number of days 22 | old_files=$(locate -b "$directory/*" | xargs -d '\n' ls -l --time-style=+"%s" | awk -v "days=$days" '{if (NR>1 && systime() - $6 > days*24*3600) print $NF}') 23 | 24 | if [ -z "$old_files" ]; then 25 | echo "No files older than $days days found in $directory" 26 | exit 0 27 | fi 28 | 29 | # Confirm with user before deleting the files 30 | echo "The following files are older than $days days:" 31 | echo "$old_files" 32 | read -p "Are you sure you want to delete these files? [y/N]: " confirm_delete 33 | 34 | if [ "$confirm_delete" = "y" ]; then 35 | # delete the files 36 | echo "$old_files" | xargs rm -vf 37 | 38 | # Notify users via email 39 | echo "Old files successfully deleted" | mail -s "Old Files Deleted" recipient@example.com 40 | else 41 | echo "Aborting deletion of old files" 42 | exit 0 43 | fi 44 | } 45 | 46 | # Verifyrequired arguments 47 | if [ "$#" -lt 2 ]; then 48 | echo "Error: insufficient arguments" 49 | echo "Usage: $0 " 50 | exit 1 51 | elif [ "$#" -gt 2 ]; then 52 | echo "Error: incorrect number of arguments" 53 | echo "Usage: $0 " 54 | exit 1 55 | fi 56 | 57 | # Call the function with command line arguments directory and number of days 58 | delete_old_files "$1" "$2" 59 | -------------------------------------------------------------------------------- /Creating-IAM-user/README.md: -------------------------------------------------------------------------------- 1 | # Creating IAM Users with Python and Boto3 2 | 3 | This is a simple Python script that allows you to create IAM users in your AWS account using the Boto3 library. You can create a single user or multiple users by providing a filename with a list of usernames. 4 | 5 | ## Getting Started 6 | 7 | Before running the script, you need to make sure you have Python 3 and the Boto3 library installed. You also need to have AWS credentials configured on your system, either by setting environment variables or using a credentials file. 8 | 9 | To install the required dependencies, run the following command: 10 | 11 | ``` 12 | pip install boto3 argparse 13 | ``` 14 | 15 | ## Usage 16 | 17 | To create a single user, run the script with the --username option: 18 | 19 | ``` 20 | python create_iam_user.py --username prashant --password secretpassword --attach_policy 21 | ``` 22 | 23 | This will create a new IAM user with the username ```prashant```, set their password to ```secretpassword```, and attach the S3 Readonly Policy to their account. 24 | 25 | If you don't provide a password, the script will generate a random password for you. 26 | 27 | To create multiple users at once, you can provide a filename with a list of usernames: 28 | 29 | ``` 30 | python create_iam_user.py --filename users.txt --password secretpassword --attach_policy 31 | ``` 32 | 33 | This will create one IAM user for each line in the "users.txt" file. 34 | 35 | ## Options 36 | 37 | ```--username```: The name of the IAM user to create. 38 | 39 | ```--password```: The password for the IAM user. If not provided, a random password will be generated. 40 | 41 | ```--attach_policy```: Attach an IAM policy to the user. If not provided, the script will attach the S3 Readonly Policy by default. 42 | 43 | ```--filename```: A filename that contains a list of IAM usernames to create, one per line. 44 | 45 | ## Contributing 46 | 47 | Contributions to this script are welcome. If you find any bugs or have suggestions for improvement, feel free to open an issue or submit a pull request. 48 | -------------------------------------------------------------------------------- /Creating-IAM-user/creating_iam_user_v3.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | import sys 4 | import random 5 | import string 6 | 7 | def random_password(length=12): 8 | # Generate a random password with specified length 9 | chars = string.ascii_letters + string.digits + '!@#$%^&*' 10 | password = "" 11 | for i in range(length): 12 | password += random.choice(chars) 13 | return password 14 | 15 | 16 | def create_iam_user(username, password=None, attach_policy=None): 17 | iam = boto3.client("iam") 18 | 19 | 20 | # Check if the user already exists 21 | try: 22 | iam.get_user(UserName=username) 23 | print(f'User {username} already exists') 24 | except iam.exceptions.NoSuchEntityException: 25 | try: 26 | # If no password is provided, generate a random password 27 | if password is None: 28 | password = random_password() 29 | iam.create_user(UserName=username) 30 | iam.create_login_profile(UserName=username, Password=password, PasswordResetRequired=True ) 31 | print(f'User {username} created sucessfully with password {password}') 32 | if attach_policy is None: 33 | iam.attach_user_policy(UserName=username, PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess') 34 | print(f"Administrator Policy attached to the user: {username} ") 35 | 36 | 37 | except Exception as e: 38 | print(f"Error creating {username}: {e}") 39 | 40 | 41 | if __name__ == '__main__': 42 | parser = argparse.ArgumentParser(description="Creating an IAM user") 43 | parser.add_argument('--username', type=str, help="The name of the IAM user, user want to create") 44 | parser.add_argument('--password', type=str, help="The password for the IAM user(default is to generate the random password") 45 | parser.add_argument('--attach_policy', help="Attach an IAM Admin policy to the user") 46 | 47 | args = parser.parse_args() 48 | if not any(vars(args).values()): 49 | parser.print_help() 50 | sys.exit() 51 | 52 | 53 | create_iam_user(args.username, password=args.password, attach_policy=args.attach_policy) -------------------------------------------------------------------------------- /rotate_iam_keys/rotate_iam_keys_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import datetime 3 | import argparse 4 | 5 | 6 | def rotate_iam_keys(max_key_age): 7 | try: 8 | iam = boto3.client("iam") 9 | 10 | # Get the list of all IAM users 11 | response = iam.list_users() 12 | users = response['Users'] 13 | 14 | # Get the list of all IAM users 15 | for user in users: 16 | user_name = user['UserName'] 17 | 18 | # Get the list of user access key 19 | response = iam.list_access_keys(UserName=user_name) 20 | access_keys = response['AccessKeyMetadata'] 21 | 22 | # Rotate the Key as it's older than max_key_age 23 | for access_key in access_keys: 24 | create_date = access_key['CreateDate'] 25 | key_age = (datetime.datetime.now(datetime.timezone.utc) - create_date).days 26 | 27 | if key_age > max_key_age: 28 | response = iam.create_access_key(UserName=user_name) 29 | new_access_key = response['AccessKey'] 30 | print(f"New access key created for user {user_name}: Access key ID: {new_access_key['AccessKeyId']}, Secret access key: {new_access_key['SecretAccessKey']}") 31 | 32 | # Ask for user confirmation before deactivating old access key 33 | confirm = input(f"Do you want to deactivate the access key {access_key['AccessKeyId']} for user {user_name}? (y/n) ") 34 | if confirm.lower() == 'y': 35 | iam.update_access_key(UserName=user_name, AccessKeyId=access_key['AccessKeyId'], Status='Inactive') 36 | print(f"Deactivated access key {access_key['AccessKeyId']} for user {user_name}") 37 | 38 | except Exception as e: 39 | print(f"Error rotating key: {e}") 40 | 41 | 42 | if __name__ == "__main__": 43 | parser = argparse.ArgumentParser(description="Rotate IAM key for the user if it's more than specified number of days") 44 | parser.add_argument("--max-key-age", type=int, help="The maximum key age") 45 | args = parser.parse_args() 46 | 47 | if not args.max_key_age: 48 | parser.print_help() 49 | else: 50 | rotate_iam_keys(args.max_key_age) -------------------------------------------------------------------------------- /remote_backup_script/backup_to_remote_server_v1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | backup_to_remote_server() { 6 | if [[ $# -ne 3 ]]; then 7 | echo "Usage: $0 " 8 | return 1 9 | fi 10 | 11 | local readonly DIRECTORY_TO_BACKUP="$1" 12 | local readonly REMOTE_SERVER_USER="$2" 13 | local readonly REMOTE_SERVER="$3" 14 | local readonly LOG_FILE="/var/log/backup.log" 15 | 16 | if ! command -v nc &>/dev/null; then 17 | if [[ -f /etc/redhat-release ]]; then 18 | if ! rpm -q nc &>/dev/null; then 19 | sudo yum -y install nc 20 | fi 21 | elif [[ -f /etc/lsb-release ]]; then 22 | sudo apt-get -y install netcat-openbsd 23 | else 24 | echo "Error: Unsupported operating system. Unable to install nc" 25 | return 1 26 | fi 27 | fi 28 | 29 | if ! nc -z -w 1 "${REMOTE_SERVER}" 22 &>/dev/null; then 30 | echo "Error: Remote Server ${REMOTE_SERVER} is not reachable" 31 | echo "$(date) Backup to Remote server ${REMOTE_SERVER} failed: server is not reachable" >>"$LOG_FILE" 32 | return 1 33 | fi 34 | 35 | if ! ssh "${REMOTE_SERVER_USER}@${REMOTE_SERVER}" "[ -d ${DIRECTORY_TO_BACKUP} ] && [ -r ${DIRECTORY_TO_BACKUP} ] && [ -w ${DIRECTORY_TO_BACKUP} ]" &>/dev/null; then 36 | echo "Error: Backup directory ${DIRECTORY_TO_BACKUP} does not exist or user ${REMOTE_SERVER_USER} does not have read/write permission" 37 | echo "$(date) Backup to Remote server ${REMOTE_SERVER} failed: backup directory ${DIRECTORY_TO_BACKUP} does not exist or user ${REMOTE_SERVER_USER} does not have permission" >>"$LOG_FILE" 38 | return 1 39 | fi 40 | 41 | # Take the backup using rsync command 42 | rsync -avz --delete --exclude '*.log' --exclude '*.tmp' ${DIRECTORY_TO_BACKUP}/ ${REMOTE_SERVER_USER}@${REMOTE_SERVER}:${DIRECTORY_TO_BACKUP}/ 43 | 44 | # Verify if rsync command is successful 45 | if [ $? -ne 0 ] 46 | then 47 | echo "Error: There is an issue in performing backup to remote system using rsync" 48 | return 1 49 | fi 50 | 51 | 52 | echo "Backup to ${REMOTE_SERVER}:${DIRECTORY_TO_BACKUP} completed successfully" 53 | return 0 54 | } 55 | 56 | if [[ $# -ne 3 ]]; then 57 | echo "Usage: $0 " 58 | exit 1 59 | fi 60 | 61 | backup_to_remote_server "$@" -------------------------------------------------------------------------------- /script_to_setup_dev_environment/script_to_setup_dev_env_v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This shell script that checks if the required python modules (pip3, boto3, and awscli) 3 | # the terraform package are installed, and installs them if they are missing. 4 | # This script should work on both CentOS, Ubuntu and macOS 5 | # Check the terraform release page to use the latest terraform package https://releases.hashicorp.com/terraform/ 6 | 7 | # Define Terraform version 8 | TF_VERSION=1.3.9 9 | 10 | install_terraform() { 11 | sudo apt-get -y install wget unzip || sudo yum install -y unzip 12 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 13 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 14 | sudo mv terraform /usr/local/bin/ 15 | sudo chmod +x /usr/local/bin/terraform 16 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 17 | } 18 | 19 | # Check if pip3 is installed 20 | if ! type "pip3" &> /dev/null 21 | then 22 | echo "pip3 is not installed. Installing now ..." 23 | # Install pip3 24 | # Checking for Mac 25 | if [ "$(uname)" == "Darwin" ] 26 | then 27 | brew install python3 28 | # Checking for Centos 29 | elif [ -f /etc/centos-release ] 30 | then 31 | sudo yum -y update 32 | sudo yum -y install python3-pip -y 33 | # Checking for Ubuntu 34 | elif [ -f /etc/lsb-release ] 35 | then 36 | sudo apt-get -y update 37 | sudo apt-get -y install python3-pip 38 | fi 39 | fi 40 | 41 | # Check if boto3 is installed 42 | if python3 -c "import boto3" &> /dev/null; then 43 | echo "Boto3 is already installed" 44 | else 45 | echo "Boto3 is not installed. Installing now...." 46 | pip3 install boto3 --user 47 | fi 48 | 49 | # Check if awscli is installed 50 | if ! type "aws" > /dev/null 51 | then 52 | echo "awscli is not installed. Installing now..." 53 | pip3 install awscli --user 54 | else 55 | echo "awscli is already installed" 56 | fi 57 | 58 | # Check if terraform is installed 59 | if ! type "terraform" > /dev/null 60 | then 61 | echo "terraform is not installed installing now..." 62 | # Install terraform 63 | #Checking for Mac 64 | if [ "$(uname)" == "Darwin" ] 65 | then 66 | brew install terraform 67 | # Checking for Centos 68 | elif [ -f /etc/centos-release ] || [ -f /etc/lsb-release ] 69 | then 70 | install_terraform 71 | fi 72 | else 73 | echo "Terraform is already installed" 74 | 75 | fi 76 | -------------------------------------------------------------------------------- /check_if_file_directory_present/test_check_if_file_directory_present.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED_COLOR='\e[0;31m' 4 | GRN_COLOR='\e[0;32m' 5 | RST_COLOR='\e[0m' 6 | 7 | # Function to check if file or directory exists 8 | check_file_or_dir(){ 9 | INPUT="${1}" 10 | if [ ! -e "$INPUT" ]; then 11 | OUTPUT="File or directory $INPUT not present" 12 | elif [ -d "$INPUT" ]; then 13 | OUTPUT="The entered input $INPUT is a directory" 14 | EXISTS=1 15 | elif [ -f "$INPUT" ]; then 16 | OUTPUT="The entered input $INPUT is a file" 17 | EXISTS=1 18 | else 19 | OUTPUT="The entered input $INPUT is neither a file nor a directory" 20 | fi 21 | 22 | # If file/dir not exists, don't check read permission 23 | if [[ ${EXISTS} -gt 0 ]]; then 24 | if [ ! -r "${1}" ]; then 25 | OUTPUT+="; You don't have permission to read ${1}" 26 | fi 27 | fi 28 | echo "$OUTPUT" 29 | return 0 30 | } 31 | 32 | # Set test report function 33 | test_report() { 34 | OUTPUT=$(check_file_or_dir "${1}") 35 | EXPECTED="${2}" 36 | if [[ "$OUTPUT" != "$EXPECTED" ]]; then 37 | printf "%b Test failed: expected '%s' but got '%s'%b\n" \ 38 | "${RED_COLOR}" "${EXPECTED}" "${OUTPUT}" "${RST_COLOR}" 39 | return 1 40 | else 41 | printf '%bTest passed%b\n' "${GRN_COLOR}" "${RST_COLOR}" 42 | return 0 43 | fi 44 | } 45 | 46 | # Set tests 47 | test1() { 48 | EXPECTED="The entered input test.txt is a file" 49 | INPUT="test.txt" 50 | test_report "${INPUT}" "${EXPECTED}" 51 | } 52 | 53 | test2() { 54 | EXPECTED="The entered input /proc/1 is a directory" 55 | INPUT="/proc/1" 56 | test_report "${INPUT}" "${EXPECTED}" 57 | } 58 | 59 | test3() { 60 | EXPECTED="The entered input /etc/shadow is a file" 61 | EXPECTED+="; You don't have permission to read /etc/shadow" 62 | INPUT="/etc/shadow" 63 | test_report "${INPUT}" "${EXPECTED}" 64 | } 65 | 66 | test4() { 67 | EXPECTED="File or directory xyz.txt not present" 68 | INPUT="xyz.txt" 69 | test_report "${INPUT}" "${EXPECTED}" 70 | } 71 | 72 | # Declare tests array 73 | TESTS=( test1 74 | test2 75 | test3 76 | test4 ) 77 | 78 | # Run the tests 79 | for TEST in ${TESTS[@]}; do 80 | $TEST 81 | TEST_RESULTS+="$?" 82 | done 83 | 84 | # Check the exit codes of the tests 85 | if [[ ${TEST_RESULTS[@]} =~ 1 ]]; then 86 | echo "Some tests failed" 87 | else 88 | echo "All tests passed" 89 | fi 90 | -------------------------------------------------------------------------------- /Creating-IAM-user/creating_iam_user_v4.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | import sys 4 | import random 5 | import string 6 | 7 | def random_password(length=12): 8 | # Generate a random password with specified length 9 | chars = string.ascii_letters + string.digits + '!@#$%^&*' 10 | password = "" 11 | for i in range(length): 12 | password += random.choice(chars) 13 | return password 14 | 15 | 16 | def create_iam_user(username, password=None, attach_policy=None): 17 | iam = boto3.client("iam") 18 | 19 | 20 | try: 21 | # Check if the user already exists 22 | iam.get_user(UserName=username) 23 | print(f'User {username} already exists') 24 | except iam.exceptions.NoSuchEntityException: 25 | try: 26 | # If no password is provided, generate a random password 27 | if password is None: 28 | password = random_password() 29 | iam.create_user(UserName=username) 30 | iam.create_login_profile(UserName=username, Password=password, PasswordResetRequired=True ) 31 | print(f'User {username} created sucessfully with password {password}') 32 | # If no policy is provided, attach a S3 Readonly Policy 33 | if attach_policy is None: 34 | iam.attach_user_policy(UserName=username, PolicyArn='arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess') 35 | print(f"S3 Readonly Policy attached to the user: {username} ") 36 | 37 | 38 | except Exception as e: 39 | print(f"Error creating {username}: {e}") 40 | 41 | 42 | if __name__ == '__main__': 43 | parser = argparse.ArgumentParser(description="Creating an IAM user") 44 | parser.add_argument('--username', type=str, help="The name of the IAM user, user want to create") 45 | parser.add_argument('--password', type=str, help="The password for the IAM user(default is to generate the random password") 46 | parser.add_argument('--attach_policy', help="Attach an IAM policy to the user") 47 | parser.add_argument('--filename', type=str, help="Filename that contain IAM user, seperated by line") 48 | 49 | args = parser.parse_args() 50 | # If no argumnent is provided, print help and exit 51 | if not any(vars(args).values()): 52 | parser.print_help() 53 | sys.exit() 54 | 55 | if args.filename: 56 | with open(args.filename, 'r') as file: 57 | usernames = file.read().splitlines() 58 | for username in usernames: 59 | create_iam_user(username, password=args.password, attach_policy=args.attach_policy) 60 | else: 61 | create_iam_user(args.username, password=args.password, attach_policy=args.attach_policy) 62 | -------------------------------------------------------------------------------- /Setting-up-dev-environment/environment_setup_v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This shell script that checks if the required python modules (pip3, boto3, and awscli) 3 | # Check terraform package are installed, and installs them if they are missing. 4 | # This script should work on both CentOS, Ubuntu and macOS 5 | # Check the terraform release page to use the latest terraform package https://releases.hashicorp.com/terraform/ 6 | # Author: Prashant Lakhera(laprashant@gmail.com) 7 | # Date: Feb 5, 2023 8 | 9 | # Define Terraform version 10 | TF_VERSION=1.3.7 11 | 12 | # Check if pip3 is installed 13 | if ! command -v pip3 &> /dev/null 14 | then 15 | echo "pip3 is not installed. Installing now ..." 16 | # Install pip3 17 | # Checking for Mac 18 | if [ "$(uname)" == "Darwin" ] 19 | then 20 | brew install python3 21 | # Checking for Centos 22 | elif [ -f /etc/centos-release ] 23 | then 24 | sudo yum -y update 25 | sudo yum -y install python3-pip 26 | # Checking for Ubuntu 27 | elif [ -f /etc/lsb-release ] 28 | then 29 | sudo apt-get -y update 30 | sudo apt-get -y install python3-pip 31 | fi 32 | fi 33 | 34 | # Check if boto3 is installed 35 | if python3 -c "import boto3" &> /dev/null; then 36 | echo "Boto3 is already installed" 37 | else 38 | echo "Boto3 is not installed. Installing now...." 39 | pip3 install boto3 --user 40 | fi 41 | 42 | # Check if awscli is installed 43 | if ! command -v aws &> /dev/null 44 | then 45 | echo "awscli is not installed. Installing now..." 46 | # --user option allows the packages to be installed in the user's home directory, rather than globally. 47 | pip3 install awscli --user 48 | else 49 | echo "awscli is already installed" 50 | fi 51 | 52 | # Creating function to install terraform 53 | 54 | install_terraform () { 55 | sudo yum install -y unzip 56 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 57 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 58 | sudo mv terraform /usr/local/bin/ 59 | sudo chmod +x /usr/local/bin/terraform 60 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 61 | } 62 | 63 | 64 | # Check if terraform is installed 65 | if ! command -v terraform &> /dev/null 66 | then 67 | echo "terraform is not installed installing now..." 68 | # Install terraform 69 | #Checking for Mac 70 | if [ "$(uname)" == "Darwin" ] 71 | then 72 | brew install terraform 73 | # Checking for Centos 74 | elif [ -f /etc/centos-release ] 75 | then 76 | install_terraform 77 | 78 | # Checking for Ubuntu 79 | elif [ -f /etc/lsb-release ] 80 | then 81 | install_terraform 82 | fi 83 | else 84 | echo "Terraform is already installed" 85 | 86 | fi -------------------------------------------------------------------------------- /script_to_setup_dev_environment/script_to_setup_dev_env_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This shell script that checks if the required python modules (pip3, boto3, and awscli) 3 | # Check terraform package are installed, and installs them if they are missing. 4 | # This script should work on both CentOS, Ubuntu and macOS 5 | # Check the terraform release page to use the latest terraform package https://releases.hashicorp.com/terraform/ 6 | 7 | # Define Terraform version 8 | TF_VERSION=1.3.9 9 | 10 | # Check if pip3 is installed 11 | if ! command -v pip3 &> /dev/null 12 | then 13 | echo "pip3 is not installed. Installing now ..." 14 | # Install pip3 15 | # Checking for Mac 16 | if [ "$(uname)" == "Darwin" ] 17 | then 18 | brew install python3 19 | # Checking for Centos 20 | elif [ -f /etc/centos-release ] 21 | then 22 | sudo yum -y update 23 | sudo yum -y install python3-pip 24 | # Checking for Ubuntu 25 | elif [ -f /etc/lsb-release ] 26 | then 27 | sudo apt-get -y update 28 | sudo apt-get -y install python3-pip 29 | fi 30 | fi 31 | 32 | # Check if boto3 is installed 33 | if python3 -c "import boto3" &> /dev/null; then 34 | echo "Boto3 is already installed" 35 | else 36 | echo "Boto3 is not installed. Installing now...." 37 | pip3 install boto3 --user 38 | fi 39 | 40 | # Check if awscli is installed 41 | if ! command -v aws &> /dev/null 42 | then 43 | echo "awscli is not installed. Installing now..." 44 | # --user option allows the packages to be installed in the user's home directory, rather than globally. 45 | pip3 install awscli --user 46 | else 47 | echo "awscli is already installed" 48 | fi 49 | 50 | # Check if terraform is installed 51 | if ! command -v terraform &> /dev/null 52 | then 53 | echo "terraform is not installed installing now..." 54 | # Install terraform 55 | #Checking for Mac 56 | if [ "$(uname)" == "Darwin" ] 57 | then 58 | brew install terraform 59 | # Checking for Centos 60 | elif [ -f /etc/centos-release ] 61 | then 62 | sudo yum install -y unzip 63 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 64 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 65 | sudo mv terraform /usr/local/bin/ 66 | sudo chmod +x /usr/local/bin/terraform 67 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 68 | # Checking for Ubuntu 69 | elif [ -f /etc/lsb-release ] 70 | then 71 | sudo apt-get -y install wget unzip 72 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 73 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 74 | sudo mv terraform /usr/local/bin/ 75 | sudo chmod +x /usr/local/bin/terraform 76 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 77 | fi 78 | else 79 | echo "Terraform is already installed" 80 | 81 | fi 82 | -------------------------------------------------------------------------------- /check_vpc_flow_log_enabled/check_vpc_flow_log_enabled_v2.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | from botocore.exceptions import ClientError, NoCredentialsError 4 | 5 | DELIVER_LOGS_PERMISSION_ARN = 'REPLACE IT WITH YOUR ROLE ARN' 6 | 7 | def enable_vpc_flowlogs(vpc_id): 8 | try: 9 | ec2 = boto3.client('ec2') 10 | logs = boto3.client('logs') 11 | 12 | log_group_name = f"{vpc_id}-cloudwatch-lg" 13 | 14 | try: 15 | logs.create_log_group(logGroupName=log_group_name) 16 | print(f"Created log group: {log_group_name}") 17 | except logs.exceptions.ResourceAlreadyExistsException: 18 | print(f"Log group with name: {log_group_name} already exists") 19 | 20 | flow_logs = ec2.describe_flow_logs( 21 | Filters=[ 22 | { 23 | 'Name': 'resource-id', 24 | 'Values': [vpc_id] 25 | } 26 | ] 27 | ) 28 | 29 | vpc_flow_logs_enabled = False 30 | for flow_log in flow_logs['FlowLogs']: 31 | if 'LogGroupName' in flow_log and flow_log['LogGroupName'] == log_group_name: 32 | vpc_flow_logs_enabled = True 33 | break 34 | if vpc_flow_logs_enabled: 35 | print(f"VPC Flow log enabled for VPC {vpc_id}") 36 | else: 37 | print(f"Enabling VPC Flow logs for VPC {vpc_id}") 38 | try: 39 | response = ec2.create_flow_logs( 40 | ResourceIds=[vpc_id], 41 | ResourceType='VPC', 42 | TrafficType='ALL', 43 | LogDestinationType='cloud-watch-logs', 44 | LogGroupName=log_group_name, 45 | DeliverLogsPermissionArn=DELIVER_LOGS_PERMISSION_ARN 46 | ) 47 | if response.get('Unsuccessful'): 48 | error_msg = response['Unsuccessful'][0]['Error']['Message'] 49 | print(f"Failed to enable VPC Flow Logs for VPC {vpc_id}: {error_msg}") 50 | else: 51 | print(f"Successfully enabled VPC Flow logs for VPC: {vpc_id}") 52 | except ClientError as e: 53 | if e.response['Error']['Code'] == 'FlowLogAlreadyExists': 54 | print(f"Flow Log with the same configuration and log destination already exists for VPC {vpc_id}") 55 | else: 56 | print(f"Unexpected error: {e}") 57 | 58 | except NoCredentialsError: 59 | print("Credentials not found. Please ensure your AWS credentials are configured correctly.") 60 | except ClientError as e: 61 | print(f"Unexpected error: {e}") 62 | 63 | if __name__ == '__main__': 64 | parser = argparse.ArgumentParser(description='Enable VPC Flow Logs for the specified VPC ID') 65 | parser.add_argument('vpc_id', help='VPC ID to enable flow logs for') 66 | args = parser.parse_args() 67 | 68 | enable_vpc_flowlogs(args.vpc_id) -------------------------------------------------------------------------------- /check_vpc_flow_log_enabled/check_vpc_flow_log_enabled_v1.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError, NoCredentialsError 3 | 4 | DELIVER_LOGS_PERMISSION_ARN = 'REPLACE IT WITH YOUR ROLE ARN' 5 | 6 | def enable_vpc_flowlogs(): 7 | try: 8 | ec2 = boto3.client('ec2') 9 | logs = boto3.client('logs') 10 | vpcs = ec2.describe_vpcs() 11 | 12 | for vpc in vpcs['Vpcs']: 13 | vpc_id = vpc['VpcId'] 14 | log_group_name = f"{vpc_id}-cloudwatch-lg" 15 | 16 | try: 17 | logs.create_log_group(logGroupName=log_group_name) 18 | print(f"Create log group: {log_group_name}") 19 | except logs.exceptions.ResourceAlreadyExistsException: 20 | print(f"Log group with name: {log_group_name} already exist") 21 | 22 | flow_logs = ec2.describe_flow_logs( 23 | Filters=[ 24 | { 25 | 'Name': 'resource-id', 26 | 'Values': [vpc_id] 27 | } 28 | ] 29 | ) 30 | 31 | vpc_flow_logs_enabled = False 32 | for flow_log in flow_logs['FlowLogs']: 33 | if 'LogGroupName' in flow_log and flow_log['LogGroupName'] == log_group_name: 34 | vpc_flow_logs_enabled = True 35 | break 36 | if vpc_flow_logs_enabled: 37 | print(f"VPC Flow log enabled for VPC {vpc_id}") 38 | else: 39 | print(f"Enabling VPC Flow logs for VPC {vpc_id}") 40 | try: 41 | response = ec2.create_flow_logs( 42 | ResourceIds=[vpc_id], 43 | ResourceType='VPC', 44 | TrafficType='ALL', 45 | LogDestinationType='cloud-watch-logs', 46 | LogGroupName=log_group_name, 47 | DeliverLogsPermissionArn=DELIVER_LOGS_PERMISSION_ARN 48 | ) 49 | if response.get('Unsuccessful'): 50 | error_msg = response['Unsuccessful'][0]['Error']['Message'] 51 | print(f"Failed to enable VPC Flow Logs for VPC {vpc_id}: {error_msg}") 52 | else: 53 | print(f"Successfully enabled VPC Flow logs for VPC: {vpc_id}") 54 | except ClientError as e: 55 | if e.response['Error']['Code'] == 'FlowLogAlreadyExists': 56 | print(f"Flow Log with the same configuration and log destination already exists for VPC {vpc_id}") 57 | else: 58 | print(f"Unexpected error: {e}") 59 | 60 | except NoCredentialsError: 61 | print("Credentials not found. Please ensure your AWS credentials are configured correctly.") 62 | except ClientError as e: 63 | print(f"Unexpected error: {e}") 64 | 65 | 66 | 67 | if __name__ == '__main__': 68 | enable_vpc_flowlogs() 69 | -------------------------------------------------------------------------------- /Setting-up-dev-environment/environment_setup_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This shell script that checks if the required python modules (pip3, boto3, and awscli) 3 | # Check terraform package are installed, and installs them if they are missing. 4 | # This script should work on both CentOS, Ubuntu and macOS 5 | # Check the terraform release page to use the latest terraform package https://releases.hashicorp.com/terraform/ 6 | # Author: Prashant Lakhera(laprashant@gmail.com) 7 | # Date: Feb 5, 2023 8 | 9 | # Define Terraform version 10 | TF_VERSION=1.3.7 11 | 12 | # Check if pip3 is installed 13 | if ! command -v pip3 &> /dev/null 14 | then 15 | echo "pip3 is not installed. Installing now ..." 16 | # Install pip3 17 | # Checking for Mac 18 | if [ "$(uname)" == "Darwin" ] 19 | then 20 | brew install python3 21 | # Checking for Centos 22 | elif [ -f /etc/centos-release ] 23 | then 24 | sudo yum -y update 25 | sudo yum -y install python3-pip 26 | # Checking for Ubuntu 27 | elif [ -f /etc/lsb-release ] 28 | then 29 | sudo apt-get -y update 30 | sudo apt-get -y install python3-pip 31 | fi 32 | fi 33 | 34 | # Check if boto3 is installed 35 | if python3 -c "import boto3" &> /dev/null; then 36 | echo "Boto3 is already installed" 37 | else 38 | echo "Boto3 is not installed. Installing now...." 39 | pip3 install boto3 --user 40 | fi 41 | 42 | # Check if awscli is installed 43 | if ! command -v aws &> /dev/null 44 | then 45 | echo "awscli is not installed. Installing now..." 46 | # --user option allows the packages to be installed in the user's home directory, rather than globally. 47 | pip3 install awscli --user 48 | else 49 | echo "awscli is already installed" 50 | fi 51 | 52 | # Check if terraform is installed 53 | if ! command -v terraform &> /dev/null 54 | then 55 | echo "terraform is not installed installing now..." 56 | # Install terraform 57 | #Checking for Mac 58 | if [ "$(uname)" == "Darwin" ] 59 | then 60 | brew install terraform 61 | # Checking for Centos 62 | elif [ -f /etc/centos-release ] 63 | then 64 | sudo yum install -y unzip 65 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 66 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 67 | sudo mv terraform /usr/local/bin/ 68 | sudo chmod +x /usr/local/bin/terraform 69 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 70 | # Checking for Ubuntu 71 | elif [ -f /etc/lsb-release ] 72 | then 73 | sudo apt-get -y install wget unzip 74 | sudo wget https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip 75 | sudo unzip terraform_${TF_VERSION}_linux_amd64.zip 76 | sudo mv terraform /usr/local/bin/ 77 | sudo chmod +x /usr/local/bin/terraform 78 | sudo rm terraform_${TF_VERSION}_linux_amd64.zip 79 | fi 80 | else 81 | echo "Terraform is already installed" 82 | 83 | fi 84 | -------------------------------------------------------------------------------- /ssh_authentication_failure_block/ssh_authentication_failure_block_v1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | install_firewall_mailx() { 4 | # Create an array with a list of packages 5 | packages=("firewalld" "mailx") 6 | 7 | # Check if the following packages are installed 8 | for package in "${packages[@]}"; do 9 | if ! command -v "$package" >/dev/null 2>&1; then 10 | # If the package is not installed, install it 11 | if ! sudo yum -y install "$package" || ! sudo apt-get update && sudo apt-get -y install "$package"; then 12 | echo "Failed to install $package" 13 | exit 1 14 | fi 15 | fi 16 | done 17 | 18 | echo "All dependencies are installed now" 19 | } 20 | 21 | check_unauthenticated_attempt() { 22 | #Set the threshold for blocking IP addresses 23 | THRESHOLD=5 24 | 25 | #Get the current date and time 26 | current_date=$(date +"%Y-%m-%d %H:%M:%S") 27 | 28 | #Check /var/log/secure for any unauthenticated attempts 29 | unauthenticated_attempts=$(grep -c "Authentication failure" /var/log/secure | grep -v "invalid user") 30 | 31 | #If there is no authentication failure, exit the function 32 | if [[ $unauthenticated_attempts -eq 0 ]]; then 33 | return 0 34 | fi 35 | 36 | #Get the IP addresses of the failed attempts 37 | failed_ips=$(grep "authentication failure" /var/log/secure | grep -v "invalid user" | awk '{match($0, /[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/, ip); print ip[0]}' | sort | uniq -c | awk '$1 >= '"$THRESHOLD"' {print $2}') 38 | 39 | #If there are no IP addresses that exceed the threshold, exit the function 40 | if [[ -z $failed_ips ]]; then 41 | return 0 42 | fi 43 | 44 | #Send an email notification with the number of unauthenticated attempts 45 | email_subject="Unauthenticated attempt detected on $(hostname) at $current_date" 46 | email_body="There were $unauthenticated_attempts unauthenticated attempts detected on $(hostname) at $current_date. The following IP addresses have exceeded the threshold: $failed_ips" 47 | recipient_email="recipient@mail.com" 48 | echo "$email_body" | mailx -s "$email_subject" "$recipient_email" 49 | 50 | #Block the SSH port for the IP addresses at firewalld 51 | for ip in $failed_ips; do 52 | if ! firewall-cmd --zone=public --query-rich-rule="rule family='ipv4' source address='$ip' service name='ssh' reject"; then 53 | if ! firewall-cmd --zone=public --add-rich-rule="rule family='ipv4' source address='$ip' service name='ssh' reject"; then 54 | echo "Failed to add firewall rule for IP address $ip" 55 | exit 1 56 | else 57 | echo "Added firewall rule to block SSH port for IP address $ip" 58 | fi 59 | else 60 | echo "Firewall rule for IP address $ip already exists" 61 | fi 62 | done 63 | 64 | #Reload the firewall rules 65 | if ! firewall-cmd --reload; then 66 | echo "Failed to reload firewall rules" 67 | exit 1 68 | else 69 | echo "Firewall rules reloaded" 70 | fi 71 | } 72 | 73 | # Call the functions 74 | install_firewall_mailx 75 | check_unauthenticated_attempt --------------------------------------------------------------------------------