├── .gitignore ├── Project-1 ├── MyTest.json ├── README.md ├── SecurityGroupCFstack.yaml ├── error.json └── lambda_function.py ├── Project-2 ├── README.md ├── Sample_Images │ ├── architecture.jpg │ └── nature.jpg ├── src │ └── lambda_function.py ├── test │ └── test.py └── tf-aws-infra │ ├── .terraformignore │ ├── alarms.tf │ ├── lambda │ └── lambda_function.zip │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ └── dev.tfvars ├── Project-3 ├── README.md ├── back-end │ └── lambda_function.py ├── errors_encountered.md ├── front-end │ ├── app.js │ ├── index.html │ └── style.css ├── test │ ├── architecture.jpg │ └── file.txt └── tf-aws-infra │ ├── lambda │ └── lambda_function.zip │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ └── dev.tfvars ├── Project-4 ├── README.md ├── errors_encountered.md ├── front-end │ ├── apigClient.js │ ├── app.js │ ├── index.html │ ├── lib │ │ ├── CryptoJS │ │ │ ├── components │ │ │ │ ├── enc-base64.js │ │ │ │ └── hmac.js │ │ │ └── rollups │ │ │ │ ├── hmac-sha256.js │ │ │ │ └── sha256.js │ │ ├── apiGatewayCore │ │ │ ├── apiGatewayClient.js │ │ │ ├── sigV4Client.js │ │ │ ├── simpleHttpClient.js │ │ │ └── utils.js │ │ ├── axios │ │ │ └── dist │ │ │ │ └── axios.standalone.js │ │ └── url-template │ │ │ └── url-template.js │ └── style.css ├── image-1.png ├── image.png ├── test_files │ ├── architecture.jpg │ └── file.txt └── tf-aws-infra │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ └── dev.tfvars ├── Project-5 ├── README.md ├── front-end │ ├── app.js │ ├── index.html │ └── style.css └── tf-aws-infra │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ └── dev.tfvars ├── Project-6 ├── README.md ├── blog-app │ ├── Dockerfile │ ├── app.py │ ├── buildspec.yaml │ ├── requirements.txt │ ├── static │ │ └── css │ │ │ └── style.css │ └── templates │ │ ├── base.html │ │ ├── index.html │ │ └── posts │ │ ├── edit.html │ │ ├── index.html │ │ ├── new.html │ │ └── view.html ├── tf-aws-base-infra │ ├── codebuild.tf │ ├── dynamodb.tf │ ├── ecr.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ │ └── dev.tfvars └── tf-aws-ecs-alb │ ├── alb.tf │ ├── ecs.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── variables │ └── dev.tfvars ├── Project-7 ├── Jenkinsfile ├── README.md ├── jenkins_server │ ├── scripts │ │ └── install_build_tools.sh │ └── tf-aws-ec2 │ │ ├── backend.tf │ │ ├── data.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── provider.tf │ │ ├── variables.tf │ │ └── variables │ │ ├── dev.tfvars │ │ ├── prod.tfvars │ │ └── test.tfvars ├── manifest │ ├── deployment.yaml │ ├── kubectl_commands.md │ └── service.yaml └── tf-aws-eks │ ├── backend.tf │ ├── data.tf │ ├── eks.tf │ ├── provider.tf │ ├── variables.tf │ ├── variables │ ├── dev.tfvars │ ├── prod.tfvars │ └── test.tfvars │ └── vpc.tf └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /.DS_Store 2 | .DS_Store 3 | __pycache__ 4 | 5 | # Compiled files 6 | *.tfstate 7 | *.tfstate.*.backup 8 | *.tfstate.backup 9 | 10 | # Module directory 11 | .terraform/ 12 | 13 | *.plan 14 | plan/ 15 | *.tfplan 16 | *.terraform.lock* 17 | 18 | terraform.*/ -------------------------------------------------------------------------------- /Project-1/MyTest.json: -------------------------------------------------------------------------------- 1 | { 2 | "STACK_NAME": "SecurityGroupCFStack", 3 | "RESOURCE_TYPE": "AWS::EC2::SecurityGroup", 4 | "SECURITY_GROUP_NAME": "SSHSecurityGroup" 5 | } -------------------------------------------------------------------------------- /Project-1/README.md: -------------------------------------------------------------------------------- 1 | ## Automating CloudFormation Stack Drift Remediation with AWS Eventbridge and Lambda -------------------------------------------------------------------------------- /Project-1/SecurityGroupCFstack.yaml: -------------------------------------------------------------------------------- 1 | Parameters: 2 | VPCId: 3 | Description: Default VPC that SG is deployed into 4 | Type: AWS::EC2::VPC::Id 5 | 6 | Resources: 7 | SSHSecurityGroup: 8 | Type: "AWS::EC2::SecurityGroup" 9 | Properties: 10 | GroupDescription: SSH Only Ingress Security Group 11 | SecurityGroupIngress: 12 | - CidrIp: "10.0.0.0/20" 13 | FromPort: 22 14 | ToPort: 22 15 | IpProtocol: tcp 16 | VpcId: !Ref VPCId -------------------------------------------------------------------------------- /Project-1/error.json: -------------------------------------------------------------------------------- 1 | Test Event Name 2 | Mytest 3 | 4 | Response 5 | { 6 | "errorMessage": "An error occurred (AccessDenied) when calling the DetectStackDrift operation: User: arn:aws:sts::503382476502:assumed-role/DetectDrfitFunction-role-evsmx98r/DetectDrfitFunction is not authorized to perform: cloudformation:DetectStackResourceDrift on resource: arn:aws:cloudformation:us-east-1:503382476502:stack/SecurityGroupCFStack/1db63cc0-7eca-11ee-8cb6-12d1774c641b because no identity-based policy allows the cloudformation:DetectStackResourceDrift action", 7 | "errorType": "ClientError", 8 | "requestId": "53f3b006-3fdf-45d9-ba51-96cbfa18f093", 9 | "stackTrace": [ 10 | " File \"/var/task/lambda_function.py\", line 41, in lambda_handler\n stack_drift_detection = CF_CLIENT.detect_stack_drift( StackName=STACK_NAME )\n", 11 | " File \"/var/runtime/botocore/client.py\", line 530, in _api_call\n return self._make_api_call(operation_name, kwargs)\n", 12 | " File \"/var/runtime/botocore/client.py\", line 960, in _make_api_call\n raise error_class(parsed_response, operation_name)\n" 13 | ] 14 | } 15 | 16 | Test Event Name 17 | Mytest 18 | 19 | Response 20 | { 21 | "errorMessage": "An error occurred (UnauthorizedOperation) when calling the DescribeSecurityGroupRules operation: You are not authorized to perform this operation. User: arn:aws:sts::503382476502:assumed-role/DetectDrfitFunction-role-evsmx98r/DetectDrfitFunction is not authorized to perform: ec2:DescribeSecurityGroupRules because no identity-based policy allows the ec2:DescribeSecurityGroupRules action", 22 | "errorType": "ClientError", 23 | "requestId": "650f2c71-8ca4-4272-8def-7b53b5dc17b3", 24 | "stackTrace": [ 25 | " File \"/var/task/lambda_function.py\", line 77, in lambda_handler\n restore_ssh_security_group(resource_id, expected_properties)\n", 26 | " File \"/var/task/lambda_function.py\", line 13, in restore_ssh_security_group\n rules = EC2_CLIENT.describe_security_group_rules(Filters=[{'Name': 'group-id', 'Values': [resource_id]}])\n", 27 | " File \"/var/runtime/botocore/client.py\", line 530, in _api_call\n return self._make_api_call(operation_name, kwargs)\n", 28 | " File \"/var/runtime/botocore/client.py\", line 960, in _make_api_call\n raise error_class(parsed_response, operation_name)\n" 29 | ] 30 | } 31 | 32 | Test Event Name 33 | Mytest 34 | 35 | Response 36 | { 37 | "errorMessage": "An error occurred (UnauthorizedOperation) when calling the AuthorizeSecurityGroupIngress operation: You are not authorized to perform this operation. User: arn:aws:sts::503382476502:assumed-role/DetectDrfitFunction-role-evsmx98r/DetectDrfitFunction is not authorized to perform: ec2:AuthorizeSecurityGroupIngress on resource: arn:aws:ec2:us-east-1:503382476502:security-group/sg-06925a8d4560ed77e because no identity-based policy allows the ec2:AuthorizeSecurityGroupIngress action. Encoded authorization failure message: 4l6DlKSlXm2aZJy8Z084pOfwdiT1XkREjCePPvkujVdGKSabslM1UpgIWEqe9grmKPAlCbwaMyKk60M1JwEn2fgX0WAeQkZgJH2BshGeHlvKee0LzVdfWNaqe64b0BZedCHgJxchdCJuVyGg5SRFWbfgE_fTAoN1dTctB0sfBNY1_KJ2F7ekNgzJCFxabNFaMVYl2Ar3b-GVZ6ONV58-UawJoJZLfMNCf5BVPC3YrhC_gSIlKkbwm8lMKmRsBeggOuwmNnzFIe2O8qfmf3bOOSvU2va5aSYwElsHJDJh-cmJhjWIqtJgW_vqWTqEXhaAyIwVBa673cQJZNowKUQVWBDBV9hp7eswgQv45_Jc-ObysCYuzuspZw75gT8DRp5PXjSxQ4kirlIySr6zXGUD6IcDespPNTP1IzNCDP6rxEXDve_5ayZF4hqesLfbaJh4l3NhqTsSgY3BmQgCMRz8ihYruV81yVMsghHT-oO70mqImj9o6ewJq70r_RBMlftVnJek1XImlY2PHPi_UNXTVRBF6Vr_LvuqD_dp-COxbjboqvGhfaO6yHCFlm5Of_N_L4HXJ6DnfpPPiCVZ4OirwX9J7f5b3j4FeKttO0LwrK06_ZrMee_bfu5Bt0F5Kb9Fjzqb1Xy_7cf4id-aunlK5mycjV-j4Gn1_SQrcv-D36kktYF8YfYiG_0fF3Kl_xdQYP3hrmOKAYOIh1UM9QFix7UKoCFpxv-6CMNa6CRePAZLFrXlvJEcBTv2D8B1r7Qd1CTYrUp9TOzUQoOz8OKTMig", 38 | "errorType": "ClientError", 39 | "requestId": "550a4429-fa81-4cdd-b77a-096058571467", 40 | "stackTrace": [ 41 | " File \"/var/task/lambda_function.py\", line 77, in lambda_handler\n restore_ssh_security_group(resource_id, expected_properties)\n", 42 | " File \"/var/task/lambda_function.py\", line 27, in restore_ssh_security_group\n security_group.authorize_ingress(\n", 43 | " File \"/var/runtime/boto3/resources/factory.py\", line 580, in do_action\n response = action(self, *args, **kwargs)\n", 44 | " File \"/var/runtime/boto3/resources/action.py\", line 88, in __call__\n response = getattr(parent.meta.client, operation_name)(*args, **params)\n", 45 | " File \"/var/runtime/botocore/client.py\", line 530, in _api_call\n return self._make_api_call(operation_name, kwargs)\n", 46 | " File \"/var/runtime/botocore/client.py\", line 960, in _make_api_call\n raise error_class(parsed_response, operation_name)\n" 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /Project-1/lambda_function.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | import time 4 | 5 | 6 | CF_CLIENT = boto3.client('cloudformation') 7 | EC2_CLIENT = boto3.client('ec2') 8 | 9 | def restore_ssh_security_group(resource_id, expected_properties): 10 | # Instantiate Security Group resource 11 | security_group = boto3.resource('ec2').SecurityGroup(resource_id) 12 | # Retrieve all security group rules 13 | rules = EC2_CLIENT.describe_security_group_rules(Filters=[{'Name': 'group-id', 'Values': [resource_id]}]) 14 | 15 | # Revoke ALL ingress security group rules 16 | # Skip egress rules 17 | if len(rules['SecurityGroupRules']) > 0: 18 | revoked_rules = [] 19 | for rule in rules['SecurityGroupRules']: 20 | if rule['IsEgress'] == True: 21 | continue 22 | revoked_rules.append(rule['SecurityGroupRuleId']) 23 | if len(revoked_rules) > 0: 24 | security_group.revoke_ingress(SecurityGroupRuleIds = revoked_rules) 25 | # In the event of a deleted expected security group rule, 26 | # authorize ingress security rule using expected properties 27 | security_group.authorize_ingress( 28 | CidrIp=expected_properties['CidrIp'], 29 | FromPort=expected_properties['FromPort'], 30 | ToPort=expected_properties['ToPort'], 31 | IpProtocol=expected_properties['IpProtocol'] 32 | ) 33 | print("Restored SSH Security Group Successfully") 34 | return 35 | 36 | def lambda_handler(event, context): 37 | STACK_NAME = event['STACK_NAME'] #"SecurityGroupCFStack" 38 | RESOURCE_TYPE = event['RESOURCE_TYPE'] #"AWS::EC2::SecurityGroup" 39 | SECURITY_GROUP_NAME = event['SECURITY_GROUP_NAME'] #"SSHSecurityGroup" 40 | # Initiate a stack drift detection 41 | stack_drift_detection = CF_CLIENT.detect_stack_drift( StackName=STACK_NAME ) 42 | stack_drift_detection_id = stack_drift_detection["StackDriftDetectionId"] 43 | print(f"Stack Drift Detection ID: {stack_drift_detection_id}") 44 | drift_detection_status = "" 45 | 46 | while drift_detection_status not in ["DETECTION_COMPLETE", "DETECTION_FAILED"]: 47 | check_stack_drift_detection_status = CF_CLIENT.describe_stack_drift_detection_status( 48 | StackDriftDetectionId=stack_drift_detection_id 49 | ) 50 | drift_detection_status = check_stack_drift_detection_status["DetectionStatus"] 51 | # Delay status check for 1 second to avoid CloudFormation API throttling 52 | time.sleep(1) 53 | print(f"Completed. Detection Status: {drift_detection_status}") 54 | 55 | # Alert if detection fails then continue with successfully reported resources 56 | if drift_detection_status == "DETECTION_FAILED": 57 | print("The stack drift detection did not complete successfully") 58 | 59 | # Check if the stack has drifted 60 | if check_stack_drift_detection_status["StackDriftStatus"] == "DRIFTED": 61 | 62 | # Retrieve resources that have drifted in ca-lab-demo stack 63 | stack_resource_drift = CF_CLIENT.describe_stack_resource_drifts( 64 | StackName=STACK_NAME 65 | ) 66 | 67 | # Iterate over drifted resources 68 | for drifted_stack_resource in stack_resource_drift["StackResourceDrifts"]: 69 | resource_type = drifted_stack_resource["ResourceType"] 70 | security_group_name = drifted_stack_resource["LogicalResourceId"] 71 | resource_id = drifted_stack_resource["PhysicalResourceId"] 72 | 73 | # If the drifted resource is the SSH Security Group resource, 74 | # restore security group rules using expected resource properties 75 | if resource_type == RESOURCE_TYPE and security_group_name == SECURITY_GROUP_NAME: 76 | expected_properties = json.loads(drifted_stack_resource["ExpectedProperties"])["SecurityGroupIngress"][0] 77 | restore_ssh_security_group(resource_id, expected_properties) 78 | else: 79 | print("No Drift Detected") -------------------------------------------------------------------------------- /Project-2/README.md: -------------------------------------------------------------------------------- 1 | ## Monitoring and Logging with AWS CloudWatch for Serverless Application 2 | 3 | ### Project: Serverless Image Processing with Monitoring and Logging 4 | 5 | #### AWS Console - 6 | 7 | Step 1: Set Up AWS Resources 8 | 9 | Step 2: Develop the Lambda Function 10 | 11 | Step 3: Set Up Custom Metrics 12 | 13 | Step 4: Set Up CloudWatch Alarms 14 | 15 | Step 5: Set Up CloudWatch Logs 16 | 17 | Step 7: Test the Application and Monitor 18 | 19 | Step 8: Clean Up 20 | 21 | OR 22 | 23 | #### Terraform Automated Deployment - 24 | Step 1: Set Up AWS CLi and Terraform CLI or Terraform Cloud 25 | 26 | - Make sure Terraform is configured with appropriate AWS credentials. 27 | 28 | Step 2: Develop the Lambda Function in python and create a zip file 29 | 30 | Step 3: Create Terraform configuration files for S3 Bucket, IAM Role, Lambda Function, CloudWatch Alarms 31 | 32 | step 4: `terraform init` and `terraform apply` 33 | 34 | step 5: Test the application with AWS CLI. 35 | 36 | step 5: `terraform destroy` -------------------------------------------------------------------------------- /Project-2/Sample_Images/architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishal2505/AWSDevOpsProjects/7e33c298aea8f3054f1be18f6e2294d9ca564260/Project-2/Sample_Images/architecture.jpg -------------------------------------------------------------------------------- /Project-2/Sample_Images/nature.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishal2505/AWSDevOpsProjects/7e33c298aea8f3054f1be18f6e2294d9ca564260/Project-2/Sample_Images/nature.jpg -------------------------------------------------------------------------------- /Project-2/src/lambda_function.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from PIL import Image 3 | import os 4 | import time 5 | import logging 6 | 7 | LOGGER = logging.getLogger() 8 | LOGGER.setLevel(logging.INFO) 9 | 10 | SRC_BUCKET = os.environ.get('SRC_BUCKET') 11 | TGT_BUCKET = os.environ.get('TGT_BUCKET') 12 | 13 | s3 = boto3.client('s3') 14 | cloudwatch = boto3.client('cloudwatch') 15 | 16 | def publish_custom_metric(value): 17 | cloudwatch.put_metric_data( 18 | Namespace='ImageProcessing', 19 | MetricData=[ 20 | { 21 | 'MetricName': 'ExecutionTime', 22 | 'Value': value, 23 | 'Unit': 'Milliseconds', 24 | }, 25 | ] 26 | ) 27 | 28 | def lambda_handler(event, context): 29 | LOGGER.info('Source Bucket: %s', SRC_BUCKET) 30 | LOGGER.info('Target Bucket: %s', TGT_BUCKET) 31 | start_time = time.time() 32 | 33 | for record in event['Records']: 34 | bucket = record['s3']['bucket']['name'] 35 | key = record['s3']['object']['key'] 36 | LOGGER.info('Processing file: %s', key) 37 | download_path = '/tmp/{}'.format(key) 38 | upload_path = '/tmp/resized-{}'.format(key) 39 | 40 | s3.download_file(bucket, key, download_path) 41 | 42 | with Image.open(download_path) as image: 43 | resized_image = image.resize((300, 300)) 44 | 45 | resized_image.save(upload_path) 46 | 47 | s3.upload_file(upload_path, '{}'.format(TGT_BUCKET), key) 48 | os.remove(download_path) 49 | os.remove(upload_path) 50 | 51 | end_time = time.time() 52 | execution_time = (end_time - start_time) * 1000 # in milliseconds 53 | LOGGER.info('Total Execution Time: %s ms', execution_time) 54 | 55 | publish_custom_metric(execution_time) 56 | -------------------------------------------------------------------------------- /Project-2/test/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | 4 | download_path = "../Sample_Images/nature.jpg" 5 | upload_path = "../Sample_Images/nature_bw_resized.jpg" 6 | with Image.open(download_path) as image: 7 | resized_image = image.resize((300, 300)) 8 | resized_image.save(upload_path) 9 | -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/.terraformignore: -------------------------------------------------------------------------------- 1 | .terraform* 2 | .terraform/* -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/alarms.tf: -------------------------------------------------------------------------------- 1 | # Create CloudWatch Alarms for Lambda execution time 2 | resource "aws_cloudwatch_metric_alarm" "high_execution_time_alarm" { 3 | alarm_name = "HighExecutionTimeAlarm" 4 | comparison_operator = "GreaterThanThreshold" 5 | evaluation_periods = 1 6 | metric_name = "ExecutionTime" 7 | namespace = "ImageProcessing" 8 | period = 300 9 | statistic = "Average" 10 | threshold = 1000 # Adjust the threshold as needed 11 | alarm_description = "Alarm triggered when Lambda execution time exceeds 1000 milliseconds." 12 | 13 | actions_enabled = true 14 | alarm_actions = ["arn:aws:sns:${var.aws_region}:${var.aws_account_id}:${var.sns_topic_name}"] 15 | ok_actions = ["arn:aws:sns:${var.aws_region}:${var.aws_account_id}:${var.sns_topic_name}"] 16 | insufficient_data_actions = [] 17 | } 18 | 19 | resource "aws_cloudwatch_metric_alarm" "lambda_error_alarm" { 20 | alarm_name = "LambdaInvocationErrorAlarm" 21 | comparison_operator = "GreaterThanOrEqualToThreshold" 22 | evaluation_periods = "1" 23 | metric_name = "Errors" 24 | namespace = "AWS/Lambda" 25 | period = "300" 26 | statistic = "Sum" 27 | threshold = "1" 28 | 29 | dimensions = { 30 | FunctionName = var.lambda_function_name 31 | } 32 | 33 | alarm_actions = ["arn:aws:sns:${var.aws_region}:${var.aws_account_id}:${var.sns_topic_name}"] 34 | ok_actions = ["arn:aws:sns:${var.aws_region}:${var.aws_account_id}:${var.sns_topic_name}"] 35 | insufficient_data_actions = [] 36 | 37 | } -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/lambda/lambda_function.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishal2505/AWSDevOpsProjects/7e33c298aea8f3054f1be18f6e2294d9ca564260/Project-2/tf-aws-infra/lambda/lambda_function.zip -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/main.tf: -------------------------------------------------------------------------------- 1 | #define variables 2 | locals { 3 | layer_zip_path = "lambda_layer.zip" 4 | layer_name = "lambda_requirements_layer" 5 | requirements_path = "${path.module}/../dependencies/requirements.txt" 6 | lambda_src_dir = "${path.module}/../src/" 7 | lambda_function_zip_path = "${path.module}/lambda/lambda_function.zip" 8 | } 9 | 10 | # Create S3 buckets - src and tgt 11 | resource "aws_s3_bucket" "image_bucket_src" { 12 | bucket = var.src_s3_bucket 13 | force_destroy = true 14 | } 15 | 16 | resource "aws_s3_bucket_ownership_controls" "image_bucket_src" { 17 | bucket = aws_s3_bucket.image_bucket_src.id 18 | rule { 19 | object_ownership = "BucketOwnerPreferred" 20 | } 21 | } 22 | 23 | resource "aws_s3_bucket_acl" "image_bucket_src" { 24 | depends_on = [aws_s3_bucket_ownership_controls.image_bucket_src] 25 | 26 | bucket = aws_s3_bucket.image_bucket_src.id 27 | acl = "private" 28 | } 29 | 30 | resource "aws_s3_bucket" "image_bucket_tgt" { 31 | bucket = var.tgt_s3_bucket 32 | force_destroy = true 33 | } 34 | 35 | resource "aws_s3_bucket_ownership_controls" "image_bucket_tgt" { 36 | bucket = aws_s3_bucket.image_bucket_tgt.id 37 | rule { 38 | object_ownership = "BucketOwnerPreferred" 39 | } 40 | } 41 | 42 | resource "aws_s3_bucket_acl" "image_bucket_tgt" { 43 | depends_on = [aws_s3_bucket_ownership_controls.image_bucket_tgt] 44 | 45 | bucket = aws_s3_bucket.image_bucket_tgt.id 46 | acl = "private" 47 | } 48 | 49 | # Create an IAM role for Lambda 50 | resource "aws_iam_role" "lambda_role" { 51 | name = "LambdaRole" 52 | 53 | assume_role_policy = jsonencode({ 54 | Version = "2012-10-17", 55 | Statement = [{ 56 | Action = "sts:AssumeRole", 57 | Effect = "Allow", 58 | Principal = { 59 | Service = "lambda.amazonaws.com" 60 | } 61 | }] 62 | }) 63 | } 64 | 65 | # Create S3 policy for Lambda functiion role to get and put objects to S3 bucket 66 | data "aws_iam_policy_document" "policy" { 67 | statement { 68 | effect = "Allow" 69 | actions = ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:CopyObject", "s3:HeadObject", 70 | "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "cloudwatch:PutMetricData"] 71 | resources = ["*"] 72 | } 73 | } 74 | resource "aws_iam_policy" "policy" { 75 | name = "lambda-policy" 76 | policy = data.aws_iam_policy_document.policy.json 77 | } 78 | 79 | # Attach the AWSLambdaBasicExecutionRole policy to the IAM role 80 | resource "aws_iam_role_policy_attachment" "lambda_role_policy" { 81 | policy_arn = aws_iam_policy.policy.arn 82 | role = aws_iam_role.lambda_role.name 83 | } 84 | 85 | # Create the Lambda function using data resource 86 | data "archive_file" "lambda" { 87 | source_dir = local.lambda_src_dir 88 | output_path = local.lambda_function_zip_path 89 | type = "zip" 90 | } 91 | resource "aws_lambda_function" "image_processing_lambda" { 92 | filename = local.lambda_function_zip_path 93 | function_name = var.lambda_function_name 94 | role = aws_iam_role.lambda_role.arn 95 | handler = "lambda_function.lambda_handler" 96 | runtime = var.lambda_runtime 97 | timeout = 10 98 | memory_size = 128 99 | source_code_hash = data.archive_file.lambda.output_base64sha256 100 | # Use the Lambda Layer 101 | layers = ["arn:aws:lambda:us-east-1:770693421928:layer:Klayers-p38-Pillow:10"] 102 | 103 | environment { 104 | variables = { 105 | SRC_BUCKET = var.src_s3_bucket, 106 | TGT_BUCKET = var.tgt_s3_bucket 107 | } 108 | } 109 | 110 | } 111 | 112 | #Grant the source S3 bucket the permission to trigger our Lambda function 113 | resource "aws_lambda_permission" "allow_image_processing_bucket" { 114 | statement_id = "AllowExecutionFromS3Bucket" 115 | action = "lambda:InvokeFunction" 116 | function_name = "${aws_lambda_function.image_processing_lambda.arn}" 117 | principal = "s3.amazonaws.com" 118 | source_arn = "${aws_s3_bucket.image_bucket_src.arn}" 119 | } 120 | 121 | # We will use s3:ObjectCreated:* so we can get a notification when a file is added to our S3 bucket. 122 | resource "aws_s3_bucket_notification" "bucket_image_processing_notification" { 123 | bucket = "${aws_s3_bucket.image_bucket_src.id}" 124 | lambda_function { 125 | lambda_function_arn = "${aws_lambda_function.image_processing_lambda.arn}" 126 | events = ["s3:ObjectCreated:*"] 127 | } 128 | depends_on = [ aws_lambda_permission.allow_image_processing_bucket ] 129 | } 130 | 131 | #SNS Topic 132 | resource "aws_sns_topic" "app_notification" { 133 | name = var.sns_topic_name 134 | } 135 | 136 | #SNS Subscription 137 | resource "aws_sns_topic_subscription" "app_notification_recipient" { 138 | topic_arn = aws_sns_topic.app_notification.arn 139 | protocol = "email" 140 | endpoint = var.alarm_receipient 141 | } 142 | 143 | -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/outputs.tf: -------------------------------------------------------------------------------- 1 | output "Source-S3-bucket" { 2 | value = "${aws_s3_bucket.image_bucket_src.id}" 3 | } 4 | output "Destination-S3-bucket" { 5 | value = "${aws_s3_bucket.image_bucket_tgt.id}" 6 | } -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "5.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | } -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The region where the infrastructure should be deployed to" 3 | type = string 4 | } 5 | 6 | variable "aws_account_id" { 7 | description = "AWS Account ID" 8 | type = string 9 | } 10 | 11 | variable "src_s3_bucket" { 12 | description = "Source S3 bucket where files will be uploded" 13 | type = string 14 | } 15 | 16 | variable "tgt_s3_bucket" { 17 | description = "Target S3 bucket where processed files will be stored" 18 | type = string 19 | } 20 | 21 | variable "lambda_function_name" { 22 | description = "Lambda Function Name" 23 | type = string 24 | } 25 | 26 | variable "lambda_runtime" { 27 | description = "Lambda runtime" 28 | type = string 29 | } 30 | 31 | variable "sns_topic_name" { 32 | description = "SNS Topic Name" 33 | type = string 34 | } 35 | 36 | variable "alarm_receipient" { 37 | description = "Email Id for the Alarm receipient" 38 | type = string 39 | } -------------------------------------------------------------------------------- /Project-2/tf-aws-infra/variables/dev.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | src_s3_bucket = "image-processing-src-901" 4 | tgt_s3_bucket = "image-processing-tgt-902" 5 | lambda_function_name = "ImageProcessingLambda" 6 | lambda_runtime = "python3.8" 7 | sns_topic_name = "image-processing-topic" 8 | alarm_receipient = "vishal.mishra2505@gmail.com" -------------------------------------------------------------------------------- /Project-3/README.md: -------------------------------------------------------------------------------- 1 | ## Implementing a file storage service for user content using API Gateway, Lambda, S3, and Cognito 2 | 3 | ### Create a Serverless file upload service where **authenticated** users can securely upload files to an S3 bucket through an API Gateway endpoint and process the uploaded files using Lambda functions. A 4 | 5 | ##### Part - 1 : File Storage Service Web app hosted in S3 and API Gatweay & Lambda for the backend - *Completed* 6 | 7 | 8 | ##### Part - 2 : Web Identity Federation using Cognito - *Work in Progress* -------------------------------------------------------------------------------- /Project-3/back-end/lambda_function.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import logging 6 | 7 | LOGGER = logging.getLogger() 8 | LOGGER.setLevel(logging.INFO) 9 | 10 | SRC_BUCKET = os.environ.get('USER_BUCKET') 11 | 12 | s3 = boto3.client('s3') 13 | 14 | def lambda_handler(event, context): 15 | # Get the file content from the POST request 16 | print(event) 17 | file_content = event['body'] 18 | 19 | # Define S3 bucket and key (file path) to store the uploaded file 20 | file_key = 'uploads/' + event['queryStringParameters']['filename'] # Define your S3 file path 21 | 22 | try: 23 | # Upload the file to S3 bucket 24 | s3.put_object(Body=file_content, Bucket=SRC_BUCKET, Key=file_key) 25 | 26 | # Return a success response 27 | return { 28 | 'statusCode': 200, 29 | 'body': json.dumps('File uploaded successfully to S3'), 30 | "headers": { 31 | "Access-Control-Allow-Origin": "*" 32 | } 33 | } 34 | except ClientError as e: 35 | # If upload fails, return an error response 36 | return { 37 | 'statusCode': 500, 38 | 'body': json.dumps('Failed to upload file to S3: {}'.format(str(e))) 39 | } 40 | -------------------------------------------------------------------------------- /Project-3/errors_encountered.md: -------------------------------------------------------------------------------- 1 | ### Errors Encountered and Resolutions 2 | 3 | 1. Getting below error even though CORS is enabled in api gateway and file is getting uploaded to S3 4 | 5 | ``` 6 | Access to fetch at 'https://xxxxxx.execute-api.us-east-1.amazonaws.com/dev/upload?filename=file.txt' from origin 'http://127.0.0.1:5500' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource. If an opaque response serves your needs, set the request's mode to 'no-cors' to fetch the resource with CORS disabled. 7 | 8 | POST https://xxxxxx.execute-api.us-east-1.amazonaws.com/dev/upload?filename=file.txt net::ERR_FAILED 200 (OK) 9 | 10 | app.js:35 There was a problem with the file upload: TypeError: Failed to fetch 11 | at uploadFile (app.js:20:3) 12 | at HTMLInputElement.handleFileUpload (app.js:7:5) 13 | ``` 14 | 15 | 16 | *Solution* Since I was using Proxy integration so I will have to send the following in the response header - "Access-Control-Allow-Origin". I modfied the LAmbda function with the response header and after that this error got resolved. 17 | 18 | 19 | 2. Getting *Access Denied* error whle accessing Cloudfront distribution. 20 | 21 | Error - 22 | ``` 23 | 24 | AccessDenied 25 | Access Denied 26 | GT6MJX8YVDJW7N5F 27 | QKDWhlO6IU0nnk32nOpaJCRVK3Q4Ehrs/5ap9yTnaa2vRy2AhD7GYpkHx3tocfaB+QtmSrYrlxQ= 28 | 29 | ``` 30 | 31 | Make sure bucket policy is updated for Cloudfront to access. 32 | 33 | *Solution* 34 | 35 | Updated below policy in the bucket. 36 | 37 | ``` 38 | { 39 | "Version": "2008-10-17", 40 | "Id": "PolicyForCloudFrontPrivateContent", 41 | "Statement": [ 42 | { 43 | "Sid": "AllowCloudFrontServicePrincipal", 44 | "Effect": "Allow", 45 | "Principal": { 46 | "Service": "cloudfront.amazonaws.com" 47 | }, 48 | "Action": "s3:GetObject", 49 | "Resource": "arn:aws:s3:::file-uploader-service-app-9002/*", 50 | "Condition": { 51 | "StringEquals": { 52 | "AWS:SourceArn": "arn:aws:cloudfront::503382476502:distribution/E683SHD2Z443X" 53 | } 54 | } 55 | } 56 | ] 57 | } 58 | ``` -------------------------------------------------------------------------------- /Project-3/front-end/app.js: -------------------------------------------------------------------------------- 1 | document.getElementById('file-input').addEventListener('change', getFileDetails); 2 | 3 | function getFileDetails(event) { 4 | const fileInput = document.getElementById('file-input'); 5 | const fileDetails = document.getElementById('file-details'); 6 | 7 | while (fileDetails.firstChild) { 8 | fileDetails.removeChild(fileDetails.firstChild); 9 | } 10 | 11 | for (const file of fileInput.files) { 12 | const fileInfo = document.createElement('p'); 13 | fileInfo.textContent = `File name: ${file.name}, Size: ${file.size} bytes`; 14 | fileDetails.appendChild(fileInfo); 15 | } 16 | } 17 | 18 | document.getElementById('submit-btn').addEventListener('click', uploadFile); 19 | 20 | function uploadFile(event) { 21 | 22 | const fileInput = document.getElementById('file-input'); 23 | const uploadStatus = document.getElementById('upload-status'); 24 | 25 | if (fileInput.files.length === 0) { 26 | alert('Please select a file before submitting.'); 27 | return; 28 | } 29 | const file = fileInput.files[0]; 30 | 31 | //const apiUrl = 'https://.execute-api.us-east-1.amazonaws.com/dev/upload' // API Gateway 32 | const apiUrl = 'https://deov8gwhrd.execute-api.us-east-1.amazonaws.com/prod/upload' 33 | 34 | const formData = new FormData(); 35 | formData.append('file', file); 36 | 37 | const urlWithParams = `${apiUrl}?filename=${encodeURIComponent(file.name)}`; 38 | 39 | uploadStatus.textContent = 'File upload in progress...'; 40 | 41 | fetch(urlWithParams, { 42 | method: 'POST', 43 | body: formData 44 | }) 45 | .then(response => { 46 | if (!response.ok) { 47 | throw new Error('Network response was not ok.'); 48 | } 49 | return response.json(); 50 | }) 51 | .then(data => { 52 | const uploadStatus = document.getElementById('upload-status'); 53 | uploadStatus.textContent = `File upload successful: ${file.name}`; 54 | console.log('File upload successful:', data); 55 | }) 56 | .catch(error => { 57 | const uploadStatus = document.getElementById('upload-status'); 58 | uploadStatus.textContent = `There was a problem with the file upload: ${error.message}`; 59 | console.error('There was a problem with the file upload:', error); 60 | }); 61 | } 62 | 63 | document.getElementById('reset-btn').addEventListener('click', function () { 64 | const fileDetails = document.getElementById('file-details'); 65 | const uploadStatus = document.getElementById('upload-status'); 66 | 67 | while (fileDetails.firstChild) { 68 | fileDetails.removeChild(fileDetails.firstChild); 69 | } 70 | 71 | uploadStatus.textContent = ''; 72 | }); 73 | -------------------------------------------------------------------------------- /Project-3/front-end/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | File Uploader 6 | 7 | 8 | 9 |
10 |

Cloud File Storage Service

11 |

Select a File to Upload

12 | 16 |
17 | 18 | 19 |
20 |
21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /Project-3/front-end/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | display: flex; 3 | justify-content: center; 4 | align-items: center; 5 | height: 100vh; 6 | margin: 0; 7 | font-family: Arial, sans-serif; 8 | } 9 | 10 | h1 { 11 | font-size: 26px; 12 | color: #333; 13 | margin-bottom: 20px; 14 | } 15 | 16 | h2 { 17 | font-size: 20px; 18 | color: #333; 19 | margin-bottom: 30px; 20 | } 21 | 22 | #upload-form { 23 | text-align: center; 24 | border: 2px dashed #ccc; 25 | padding: 60px; 26 | } 27 | 28 | #file-input { 29 | display: none; 30 | } 31 | 32 | #file-details { 33 | margin-top: 20px; 34 | text-align: left; 35 | } 36 | 37 | #upload-status { 38 | margin-top: 20px; 39 | } 40 | 41 | #choose-btn, #submit-btn, #reset-btn { 42 | cursor: pointer; 43 | padding: 10px 20px; 44 | background-color: #007bff; 45 | color: #fff; 46 | border-radius: 4px; 47 | } -------------------------------------------------------------------------------- /Project-3/test/architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishal2505/AWSDevOpsProjects/7e33c298aea8f3054f1be18f6e2294d9ca564260/Project-3/test/architecture.jpg -------------------------------------------------------------------------------- /Project-3/test/file.txt: -------------------------------------------------------------------------------- 1 | this is sjust a test file which will be uploaded to s3 bucket via api gateway endpoint -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/lambda/lambda_function.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishal2505/AWSDevOpsProjects/7e33c298aea8f3054f1be18f6e2294d9ca564260/Project-3/tf-aws-infra/lambda/lambda_function.zip -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/main.tf: -------------------------------------------------------------------------------- 1 | #define variables 2 | locals { 3 | lambda_src_dir = "${path.module}/../back-end/" 4 | lambda_function_zip_path = "${path.module}/lambda/lambda_function.zip" 5 | } 6 | 7 | # Create S3 buckets for uploading user content from API Gateway Endpoint 8 | resource "aws_s3_bucket" "user_content_bucket" { 9 | bucket = var.user_bucket 10 | force_destroy = true 11 | } 12 | 13 | resource "aws_s3_bucket_ownership_controls" "user_content_bucket" { 14 | bucket = aws_s3_bucket.user_content_bucket.id 15 | rule { 16 | object_ownership = "BucketOwnerPreferred" 17 | } 18 | } 19 | 20 | resource "aws_s3_bucket_acl" "user_content_bucket" { 21 | depends_on = [aws_s3_bucket_ownership_controls.user_content_bucket] 22 | 23 | bucket = aws_s3_bucket.user_content_bucket.id 24 | acl = "private" 25 | } 26 | 27 | # Create an IAM role for Lambda 28 | resource "aws_iam_role" "lambda_role" { 29 | name = "LambdaRole" 30 | 31 | assume_role_policy = jsonencode({ 32 | Version = "2012-10-17", 33 | Statement = [{ 34 | Action = "sts:AssumeRole", 35 | Effect = "Allow", 36 | Principal = { 37 | Service = "lambda.amazonaws.com" 38 | } 39 | }] 40 | }) 41 | } 42 | 43 | # Create S3 policy for Lambda functiion role to get and put objects to S3 bucket 44 | data "aws_iam_policy_document" "policy" { 45 | statement { 46 | effect = "Allow" 47 | actions = ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:CopyObject", "s3:HeadObject", 48 | "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents"] 49 | resources = ["*"] 50 | } 51 | } 52 | resource "aws_iam_policy" "policy" { 53 | name = "lambda-policy" 54 | policy = data.aws_iam_policy_document.policy.json 55 | } 56 | 57 | # Attach the AWSLambdaBasicExecutionRole policy to the IAM role 58 | resource "aws_iam_role_policy_attachment" "lambda_role_policy" { 59 | policy_arn = aws_iam_policy.policy.arn 60 | role = aws_iam_role.lambda_role.name 61 | } 62 | 63 | # Create the Lambda function using data resource 64 | data "archive_file" "lambda" { 65 | source_dir = local.lambda_src_dir 66 | output_path = local.lambda_function_zip_path 67 | type = "zip" 68 | } 69 | resource "aws_lambda_function" "file_uploader_lambda" { 70 | filename = local.lambda_function_zip_path 71 | function_name = var.lambda_function_name 72 | role = aws_iam_role.lambda_role.arn 73 | handler = "lambda_function.lambda_handler" 74 | runtime = var.lambda_runtime 75 | timeout = 20 76 | memory_size = 128 77 | source_code_hash = data.archive_file.lambda.output_base64sha256 78 | 79 | environment { 80 | variables = { 81 | USER_BUCKET = var.user_bucket, 82 | } 83 | } 84 | 85 | } 86 | 87 | # Create API Gateway 88 | 89 | resource "aws_api_gateway_rest_api" "FileUploderService" { 90 | name = "FileUploderService" 91 | } 92 | 93 | resource "aws_api_gateway_resource" "FileUploderService" { 94 | parent_id = aws_api_gateway_rest_api.FileUploderService.root_resource_id 95 | path_part = "upload" 96 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 97 | } 98 | 99 | resource "aws_api_gateway_method" "FileUploderService" { 100 | authorization = "NONE" 101 | http_method = "POST" 102 | resource_id = aws_api_gateway_resource.FileUploderService.id 103 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 104 | } 105 | 106 | resource "aws_api_gateway_integration" "FileUploderService" { 107 | http_method = aws_api_gateway_method.FileUploderService.http_method 108 | resource_id = aws_api_gateway_resource.FileUploderService.id 109 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 110 | integration_http_method = "POST" 111 | type = "AWS_PROXY" 112 | uri = aws_lambda_function.file_uploader_lambda.invoke_arn 113 | } 114 | 115 | # Enabling CORS 116 | 117 | resource "aws_api_gateway_method_response" "FileUploderService" { 118 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 119 | resource_id = aws_api_gateway_resource.FileUploderService.id 120 | http_method = aws_api_gateway_method.FileUploderService.http_method 121 | status_code = "200" 122 | 123 | response_models = { 124 | "application/json" = "Empty" 125 | } 126 | 127 | response_parameters = { 128 | "method.response.header.Access-Control-Allow-Origin" = true, 129 | "method.response.header.Access-Control-Allow-Headers" = true, 130 | "method.response.header.Access-Control-Allow-Methods" = true 131 | } 132 | 133 | } 134 | 135 | resource "aws_api_gateway_deployment" "FileUploderService" { 136 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 137 | 138 | triggers = { 139 | redeployment = sha1(jsonencode([ 140 | aws_api_gateway_resource.FileUploderService.id, 141 | aws_api_gateway_method.FileUploderService.id, 142 | aws_api_gateway_integration.FileUploderService.id, 143 | ])) 144 | } 145 | 146 | lifecycle { 147 | create_before_destroy = true 148 | } 149 | } 150 | 151 | resource "aws_api_gateway_stage" "prod" { 152 | deployment_id = aws_api_gateway_deployment.FileUploderService.id 153 | rest_api_id = aws_api_gateway_rest_api.FileUploderService.id 154 | stage_name = "prod" 155 | } 156 | 157 | # Permission for API Gateway to invoke lambda function 158 | resource "aws_lambda_permission" "apigw_lambda" { 159 | statement_id = "AllowExecutionFromAPIGateway" 160 | action = "lambda:InvokeFunction" 161 | function_name = aws_lambda_function.file_uploader_lambda.function_name 162 | principal = "apigateway.amazonaws.com" 163 | source_arn = "arn:aws:execute-api:${var.aws_region}:${var.aws_account_id}:${aws_api_gateway_rest_api.FileUploderService.id}/*/${aws_api_gateway_method.FileUploderService.http_method}${aws_api_gateway_resource.FileUploderService.path}" 164 | } 165 | 166 | 167 | # Creating S3 bucket and cloudFront Distribution for the web app - (front-end) 168 | 169 | resource "aws_s3_bucket" "file_uploader_app_bucket" { 170 | bucket = var.webapp_bucket 171 | force_destroy = true 172 | 173 | tags = { 174 | Name = "File Uploader Service App Bucket" 175 | } 176 | } 177 | 178 | resource "aws_s3_bucket_ownership_controls" "file_uploader_app_bucket_owner" { 179 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 180 | rule { 181 | object_ownership = "BucketOwnerPreferred" 182 | } 183 | } 184 | 185 | resource "aws_s3_bucket_acl" "file_uploader_app_bucket_acl" { 186 | depends_on = [aws_s3_bucket_ownership_controls.file_uploader_app_bucket_owner] 187 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 188 | acl = "private" 189 | } 190 | 191 | locals { 192 | s3_origin_id = "FileUploaderS3Origin" 193 | } 194 | 195 | resource "aws_cloudfront_origin_access_control" "oac" { 196 | name = "fileuploader-oac" 197 | description = "File Uploader Policy" 198 | origin_access_control_origin_type = "s3" 199 | signing_behavior = "always" 200 | signing_protocol = "sigv4" 201 | } 202 | resource "aws_cloudfront_distribution" "s3_distribution" { 203 | origin { 204 | domain_name = aws_s3_bucket.file_uploader_app_bucket.bucket_regional_domain_name 205 | origin_access_control_id = aws_cloudfront_origin_access_control.oac.id 206 | origin_id = local.s3_origin_id 207 | } 208 | 209 | enabled = true 210 | is_ipv6_enabled = true 211 | comment = "Some comment" 212 | default_root_object = "index.html" 213 | 214 | default_cache_behavior { 215 | allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] 216 | cached_methods = ["GET", "HEAD"] 217 | target_origin_id = local.s3_origin_id 218 | 219 | forwarded_values { 220 | query_string = false 221 | 222 | cookies { 223 | forward = "none" 224 | } 225 | } 226 | 227 | viewer_protocol_policy = "redirect-to-https" 228 | min_ttl = 0 229 | default_ttl = 3600 230 | max_ttl = 86400 231 | } 232 | 233 | price_class = "PriceClass_200" 234 | 235 | restrictions { 236 | geo_restriction { 237 | restriction_type = "none" 238 | locations = [] 239 | } 240 | } 241 | 242 | tags = { 243 | Environment = "production" 244 | } 245 | 246 | viewer_certificate { 247 | cloudfront_default_certificate = true 248 | } 249 | } 250 | 251 | # Bucket policy for Cloudfront to access 252 | 253 | data "aws_iam_policy_document" "allow_access_from_cloudfront" { 254 | statement { 255 | principals { 256 | type = "Service" 257 | identifiers = ["cloudfront.amazonaws.com"] 258 | } 259 | 260 | actions = [ 261 | "s3:GetObject", 262 | "s3:ListBucket", 263 | ] 264 | 265 | resources = [ 266 | aws_s3_bucket.file_uploader_app_bucket.arn, 267 | "${aws_s3_bucket.file_uploader_app_bucket.arn}/*", 268 | ] 269 | condition { 270 | test = "StringEquals" 271 | variable = "AWS:SourceArn" 272 | values = ["arn:aws:cloudfront::${var.aws_account_id}:distribution/${aws_cloudfront_distribution.s3_distribution.id}"] 273 | 274 | } 275 | } 276 | } 277 | resource "aws_s3_bucket_policy" "allow_access_from_cloudfront" { 278 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 279 | policy = data.aws_iam_policy_document.allow_access_from_cloudfront.json 280 | } 281 | 282 | 283 | -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/outputs.tf: -------------------------------------------------------------------------------- 1 | output "Source-S3-bucket" { 2 | value = aws_s3_bucket.user_content_bucket.id 3 | } 4 | 5 | output "File-Uploader-App-bucket" { 6 | value = aws_s3_bucket.file_uploader_app_bucket.id 7 | } 8 | 9 | output "fileuploader-api-endpoint" { 10 | value = aws_api_gateway_rest_api.FileUploderService.id 11 | } 12 | 13 | output "fileuploader-app-url" { 14 | value = aws_cloudfront_distribution.s3_distribution.domain_name 15 | } -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "5.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | } -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The region where the infrastructure should be deployed to" 3 | type = string 4 | } 5 | 6 | variable "aws_account_id" { 7 | description = "AWS Account ID" 8 | type = string 9 | } 10 | 11 | variable "user_bucket" { 12 | description = " S3 bucket where files will be uploded" 13 | type = string 14 | } 15 | 16 | variable "lambda_function_name" { 17 | description = "Lambda Function Name" 18 | type = string 19 | } 20 | 21 | variable "lambda_runtime" { 22 | description = "Lambda runtime" 23 | type = string 24 | } 25 | 26 | variable "webapp_bucket" { 27 | description = "Bucket for hosting html, css and js for the app" 28 | type = string 29 | } -------------------------------------------------------------------------------- /Project-3/tf-aws-infra/variables/dev.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | user_bucket = "user-content-bucket-9001" 4 | lambda_function_name = "FileUploaderLambda" 5 | lambda_runtime = "python3.8" 6 | webapp_bucket = "file-uploader-service-app-9002" -------------------------------------------------------------------------------- /Project-4/README.md: -------------------------------------------------------------------------------- 1 | ## Implementing a file storage service for user content using API Gateway, S3, and Cognito 2 | 3 | ### Create a Serverless file upload service where **authenticated** users can securely upload files to an S3 bucket through an API Gateway endpoint and file will be uplaoded to the S3 directly. 4 | 5 | 6 | ##### In this we are also going to use Web Identity Federation using Cognito. 7 | 8 | Steps - 9 | 10 | 1. Create Backend infrastructure for example - API Gateway and S3 bucket for the file upload. Enable AWS_IAM authentication for the API Gateway. 11 | 12 | AWS_Link - [https://docs.aws.amazon.com/apigateway/latest/developerguide/integrating-api-with-aws-services-s3.html] 13 | 14 | 2. Create S3 bucket for website hosting and CloudFront distribution for the Content Delivery. 15 | 3. Create Google API Project and create client ID. Provide Cloudfront distribution for Javascript origin page. 16 | 4. Create Cognito Identiy pool and provide the Google Client ID created in the previous step. 17 | 5. Create HTML and Javascript for the front end - 18 | - There wil be sign in with Google button whihc will allow users to sign in with their Google Account. 19 | - After signing in, Google ID token will be generated, We'll be using this toeken and exchange credetials from Cognito and will get the temporary credentials. 20 | - Using these temporary credential, there will be a call to the Rest API gateway. User will be able to successfully upload the files. 21 | 22 | 23 | Ref Docs - 24 | 25 | 26 | https://docs.aws.amazon.com/apigateway/latest/developerguide/integrating-api-with-aws-services-s3.html 27 | 28 | 29 | Connecting to an API Gateway endpoint secured using AWS IAM can be challenging. You need to sign your requests using Signature Version 4. You can use: 30 | 31 | Generated API Gateway SDK 32 | AWS Amplify 33 | 34 | 35 | 36 | https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html 37 | 38 | https://sst.dev/archives/connect-to-api-gateway-with-iam-auth.html 39 | 40 | https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-generate-sdk-javascript.html 41 | 42 | 43 | #### Issues and Resolutions - 44 | 45 | 1. CORS Issue - Options method need to specified along with PUT and need to enable CORS on OPTIONS Methods. 46 | 47 | 2. Foolowing header was getting added in the text file. And also image file so after downloaing from S3, not able to open image. 48 | 49 | ``` 50 | ------WebKitFormBoundaryhtY6Yp8bjHScHEno 51 | Content-Disposition: form-data; name="file"; filename="file.txt" 52 | Content-Type: text/plain 53 | 54 | ------WebKitFormBoundaryhtY6Yp8bjHScHEno-- 55 | ``` 56 | 57 | 58 | #### This is a failed project. Not able to complete because of formData object passing to API Agteway call. Issue was even though call was getting successful. Files were getting uploaded with 0 bytes. Body apramter which i was passing as formData was getting lost somewhere in between making the API Gateway call. I did enable the multipart/form-data binay media types in the API Gateway settings and was passing the headers correctly. 59 | 60 | ##### API was working fine from the POSTMAN but from front-end i wasn't able to make it work. 61 | 62 | I tried debugging and removed the following code from *apiGateWatClient.js* then got the below error - 63 | 64 | From - 65 | ``` 66 | if (request.body === undefined || request.body === '' || request.body === null || Object.keys(request.body).length === 0) { 67 | ``` 68 | 69 | TO - 70 | ``` 71 | if (request.body === undefined || request.body === '' || request.body === null) { 72 | ``` 73 | 74 | 75 | ERROR - 76 | 77 | ``` 78 | getting below error utils.js:62 Uncaught TypeError: Failed to construct 'FormData': Please use the 'new' operator, this DOM object constructor cannot be called as a function. in the following statement - apigClient.bucketFilenamePut(params, formData, additionalParams) 79 | ``` 80 | 81 | 82 | ![POSTMAN API](image.png) 83 | 84 | ![Front-end](image-1.png) -------------------------------------------------------------------------------- /Project-4/errors_encountered.md: -------------------------------------------------------------------------------- 1 | ### Errors Encountered and Resolutions 2 | 3 | 1. Getting below error ewhile making cals to STS for getting temporary credentials. 4 | 5 | ``` 6 | 7 | POST https://sts.amazonaws.com/ 403 (Forbidden) 8 | 9 | app.js:48 AccessDenied: User: arn:aws:sts::503382476502:assumed-role/FileStorageAppRole/CognitoIdentityCredentials is not authorized to perform: sts:GetFederationToken on resource: arn:aws:sts::503382476502:federated-user/MyTemporaryCredentials 10 | at constructor.o (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:14:23990) 11 | at constructor.callListeners (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:16753) 12 | at constructor.emit (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:16462) 13 | at constructor.emitEvent (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:4122) 14 | at constructor.e (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:555) 15 | at a.runTo (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:16:14399) 16 | at https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:16:14606 17 | at constructor. (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:765) 18 | at constructor. (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:4177) 19 | at constructor.callListeners (https://sdk.amazonaws.com/js/aws-sdk-2.2.19.min.js:15:16859) 20 | ``` 21 | 22 | 23 | *Solution* Since I was using Proxy integration so I will have to send the following in the response header - "Access-Control-Allow-Origin". I modfied the LAmbda function with the response header and after that this error got resolved. 24 | 25 | 26 | 2. Getting *Access Denied* error whle accessing Cloudfront distribution. 27 | 28 | Error - 29 | ``` 30 | 31 | AccessDenied 32 | Access Denied 33 | GT6MJX8YVDJW7N5F 34 | QKDWhlO6IU0nnk32nOpaJCRVK3Q4Ehrs/5ap9yTnaa2vRy2AhD7GYpkHx3tocfaB+QtmSrYrlxQ= 35 | 36 | ``` 37 | 38 | Make sure bucket policy is updated for Cloudfront to access. 39 | 40 | *Solution* 41 | 42 | Updated below policy in the bucket. 43 | 44 | ``` 45 | { 46 | "Version": "2008-10-17", 47 | "Id": "PolicyForCloudFrontPrivateContent", 48 | "Statement": [ 49 | { 50 | "Sid": "AllowCloudFrontServicePrincipal", 51 | "Effect": "Allow", 52 | "Principal": { 53 | "Service": "cloudfront.amazonaws.com" 54 | }, 55 | "Action": "s3:GetObject", 56 | "Resource": "arn:aws:s3:::file-uploader-service-app-9002/*", 57 | "Condition": { 58 | "StringEquals": { 59 | "AWS:SourceArn": "arn:aws:cloudfront::503382476502:distribution/E683SHD2Z443X" 60 | } 61 | } 62 | } 63 | ] 64 | } 65 | ``` -------------------------------------------------------------------------------- /Project-4/front-end/apigClient.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). 5 | * You may not use this file except in compliance with the License. 6 | * A copy of the License is located at 7 | * 8 | * http://aws.amazon.com/apache2.0 9 | * 10 | * or in the "license" file accompanying this file. This file is distributed 11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 12 | * express or implied. See the License for the specific language governing 13 | * permissions and limitations under the License. 14 | */ 15 | 16 | var apigClientFactory = {}; 17 | apigClientFactory.newClient = function (config) { 18 | var apigClient = { }; 19 | if(config === undefined) { 20 | config = { 21 | accessKey: '', 22 | secretKey: '', 23 | sessionToken: '', 24 | region: '', 25 | apiKey: undefined, 26 | defaultContentType: 'application/json', 27 | defaultAcceptType: 'application/json' 28 | }; 29 | } 30 | if(config.accessKey === undefined) { 31 | config.accessKey = ''; 32 | } 33 | if(config.secretKey === undefined) { 34 | config.secretKey = ''; 35 | } 36 | if(config.apiKey === undefined) { 37 | config.apiKey = ''; 38 | } 39 | if(config.sessionToken === undefined) { 40 | config.sessionToken = ''; 41 | } 42 | if(config.region === undefined) { 43 | config.region = 'us-east-1'; 44 | } 45 | //If defaultContentType is not defined then default to application/json 46 | if(config.defaultContentType === undefined) { 47 | config.defaultContentType = 'application/json'; 48 | } 49 | //If defaultAcceptType is not defined then default to application/json 50 | if(config.defaultAcceptType === undefined) { 51 | config.defaultAcceptType = 'application/json'; 52 | } 53 | 54 | 55 | // extract endpoint and path from url 56 | var invokeUrl = 'https://9qxyb7ppvj.execute-api.us-east-1.amazonaws.com/prod'; 57 | var endpoint = /(^https?:\/\/[^\/]+)/g.exec(invokeUrl)[1]; 58 | var pathComponent = invokeUrl.substring(endpoint.length); 59 | 60 | var sigV4ClientConfig = { 61 | accessKey: config.accessKey, 62 | secretKey: config.secretKey, 63 | sessionToken: config.sessionToken, 64 | serviceName: 'execute-api', 65 | region: config.region, 66 | endpoint: endpoint, 67 | defaultContentType: config.defaultContentType, 68 | defaultAcceptType: config.defaultAcceptType 69 | }; 70 | 71 | var authType = 'NONE'; 72 | if (sigV4ClientConfig.accessKey !== undefined && sigV4ClientConfig.accessKey !== '' && sigV4ClientConfig.secretKey !== undefined && sigV4ClientConfig.secretKey !== '') { 73 | authType = 'AWS_IAM'; 74 | } 75 | 76 | var simpleHttpClientConfig = { 77 | endpoint: endpoint, 78 | defaultContentType: config.defaultContentType, 79 | defaultAcceptType: config.defaultAcceptType 80 | }; 81 | 82 | var apiGatewayClient = apiGateway.core.apiGatewayClientFactory.newClient(simpleHttpClientConfig, sigV4ClientConfig); 83 | 84 | 85 | 86 | apigClient.bucketFilenamePut = function (params, body, additionalParams) { 87 | if(additionalParams === undefined) { additionalParams = {}; } 88 | 89 | apiGateway.core.utils.assertParametersDefined(params, ['filename', 'bucket', 'Content-Type', 'Accept'], ['body']); 90 | 91 | var bucketFilenamePutRequest = { 92 | verb: 'put'.toUpperCase(), 93 | path: pathComponent + uritemplate('/{bucket}/{filename}').expand(apiGateway.core.utils.parseParametersToObject(params, ['filename', 'bucket', ])), 94 | headers: apiGateway.core.utils.parseParametersToObject(params, ['Content-Type', 'Accept']), 95 | queryParams: apiGateway.core.utils.parseParametersToObject(params, []), 96 | body: body 97 | }; 98 | 99 | console.log('Just before claling req body:', bucketFilenamePutRequest.body); 100 | 101 | return apiGatewayClient.makeRequest(bucketFilenamePutRequest, authType, additionalParams, config.apiKey); 102 | }; 103 | 104 | 105 | apigClient.bucketFilenameOptions = function (params, body, additionalParams) { 106 | if(additionalParams === undefined) { additionalParams = {}; } 107 | 108 | apiGateway.core.utils.assertParametersDefined(params, [], ['body']); 109 | 110 | var bucketFilenameOptionsRequest = { 111 | verb: 'options'.toUpperCase(), 112 | path: pathComponent + uritemplate('/{bucket}/{filename}').expand(apiGateway.core.utils.parseParametersToObject(params, [])), 113 | headers: apiGateway.core.utils.parseParametersToObject(params, []), 114 | queryParams: apiGateway.core.utils.parseParametersToObject(params, []), 115 | body: body 116 | }; 117 | 118 | 119 | return apiGatewayClient.makeRequest(bucketFilenameOptionsRequest, authType, additionalParams, config.apiKey); 120 | }; 121 | 122 | 123 | return apigClient; 124 | }; 125 | -------------------------------------------------------------------------------- /Project-4/front-end/app.js: -------------------------------------------------------------------------------- 1 | document.getElementById('file-input').addEventListener('change', getFileDetails); 2 | 3 | function getFileDetails(event) { 4 | const fileInput = document.getElementById('file-input'); 5 | const fileDetails = document.getElementById('file-details'); 6 | 7 | while (fileDetails.firstChild) { 8 | fileDetails.removeChild(fileDetails.firstChild); 9 | } 10 | 11 | for (const file of fileInput.files) { 12 | const fileInfo = document.createElement('p'); 13 | fileInfo.textContent = `File name: ${file.name}, Size: ${file.size} bytes`; 14 | fileDetails.appendChild(fileInfo); 15 | } 16 | } 17 | 18 | document.getElementById('submit-btn').addEventListener('click', function(event) { 19 | 20 | event.preventDefault(); 21 | 22 | const fileInput = document.getElementById('file-input'); 23 | const uploadStatus = document.getElementById('upload-status'); 24 | 25 | if (fileInput.files.length === 0) { 26 | alert('Please select a file before submitting.'); 27 | return; 28 | } 29 | const file = fileInput.files[0]; 30 | // Check file type (text or image) 31 | const fileType = file.type.split('/')[0] 32 | 33 | const bucket = 'user-content-bucket-9001'; 34 | 35 | const formData = new FormData(); 36 | formData.append('file', file); 37 | 38 | /* Extracting data from a FormData object */ 39 | 40 | for (let item of formData.entries()) { 41 | console.log(item[0]+ ', ' + item[1].length); 42 | } 43 | 44 | const filename = `${encodeURIComponent(file.name)}`; 45 | 46 | const apiUrl = `https://9qxyb7ppvj.execute-api.us-east-1.amazonaws.com/prod/${bucket}/${filename}` 47 | console.log("API URL: " + apiUrl) 48 | 49 | uploadStatus.textContent = 'File upload in progress...'; 50 | 51 | 52 | var apigClient = apigClientFactory.newClient(); 53 | var params = { 54 | bucket: bucket, 55 | filename: filename, 56 | 'Content-Type': 'multipart/form-data', 57 | 'Accept': '*/*' 58 | }; 59 | var additionalParams = {}; 60 | 61 | // fetch(apiUrl, { 62 | // method: 'PUT', 63 | // body: formData 64 | // }) 65 | apigClient.bucketFilenamePut(params, formData, additionalParams) 66 | .then(function(result){ 67 | const uploadStatus = document.getElementById('upload-status'); 68 | uploadStatus.textContent = `File upload successful: ${file.name}`; 69 | console.log('File upload successful:', result); 70 | }).catch( function(result){ 71 | const uploadStatus = document.getElementById('upload-status'); 72 | uploadStatus.textContent = `There was a problem with the file upload: ${error.message}`; 73 | console.error('There was a problem with the file upload:', result); 74 | }); 75 | // .then(response => { 76 | // if (!response.ok) { 77 | // console.log(response); 78 | // throw new Error('Network response was not ok.'); 79 | // } 80 | // return response.json(); 81 | // }) 82 | // .then(data => { 83 | // const uploadStatus = document.getElementById('upload-status'); 84 | // uploadStatus.textContent = `File upload successful: ${file.name}`; 85 | // console.log('File upload successful:', data); 86 | // }) 87 | // .catch(error => { 88 | // const uploadStatus = document.getElementById('upload-status'); 89 | // uploadStatus.textContent = `There was a problem with the file upload: ${error.message}`; 90 | // console.error('There was a problem with the file upload:', error); 91 | // }); 92 | }); 93 | 94 | document.getElementById('reset-btn').addEventListener('click', function () { 95 | const fileDetails = document.getElementById('file-details'); 96 | const uploadStatus = document.getElementById('upload-status'); 97 | 98 | while (fileDetails.firstChild) { 99 | fileDetails.removeChild(fileDetails.firstChild); 100 | } 101 | 102 | uploadStatus.textContent = ''; 103 | }); 104 | -------------------------------------------------------------------------------- /Project-4/front-end/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | File Uploader 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 |
21 |

Cloud File Storage Service

22 |

Select a File to Upload

23 | 27 |
28 | 29 | 30 |
31 |
32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/CryptoJS/components/enc-base64.js: -------------------------------------------------------------------------------- 1 | /* 2 | CryptoJS v3.1.2 3 | code.google.com/p/crypto-js 4 | (c) 2009-2013 by Jeff Mott. All rights reserved. 5 | code.google.com/p/crypto-js/wiki/License 6 | */ 7 | (function () { 8 | // Shortcuts 9 | var C = CryptoJS; 10 | var C_lib = C.lib; 11 | var WordArray = C_lib.WordArray; 12 | var C_enc = C.enc; 13 | 14 | /** 15 | * Base64 encoding strategy. 16 | */ 17 | var Base64 = C_enc.Base64 = { 18 | /** 19 | * Converts a word array to a Base64 string. 20 | * 21 | * @param {WordArray} wordArray The word array. 22 | * 23 | * @return {string} The Base64 string. 24 | * 25 | * @static 26 | * 27 | * @example 28 | * 29 | * var base64String = CryptoJS.enc.Base64.stringify(wordArray); 30 | */ 31 | stringify: function (wordArray) { 32 | // Shortcuts 33 | var words = wordArray.words; 34 | var sigBytes = wordArray.sigBytes; 35 | var map = this._map; 36 | 37 | // Clamp excess bits 38 | wordArray.clamp(); 39 | 40 | // Convert 41 | var base64Chars = []; 42 | for (var i = 0; i < sigBytes; i += 3) { 43 | var byte1 = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff; 44 | var byte2 = (words[(i + 1) >>> 2] >>> (24 - ((i + 1) % 4) * 8)) & 0xff; 45 | var byte3 = (words[(i + 2) >>> 2] >>> (24 - ((i + 2) % 4) * 8)) & 0xff; 46 | 47 | var triplet = (byte1 << 16) | (byte2 << 8) | byte3; 48 | 49 | for (var j = 0; (j < 4) && (i + j * 0.75 < sigBytes); j++) { 50 | base64Chars.push(map.charAt((triplet >>> (6 * (3 - j))) & 0x3f)); 51 | } 52 | } 53 | 54 | // Add padding 55 | var paddingChar = map.charAt(64); 56 | if (paddingChar) { 57 | while (base64Chars.length % 4) { 58 | base64Chars.push(paddingChar); 59 | } 60 | } 61 | 62 | return base64Chars.join(''); 63 | }, 64 | 65 | /** 66 | * Converts a Base64 string to a word array. 67 | * 68 | * @param {string} base64Str The Base64 string. 69 | * 70 | * @return {WordArray} The word array. 71 | * 72 | * @static 73 | * 74 | * @example 75 | * 76 | * var wordArray = CryptoJS.enc.Base64.parse(base64String); 77 | */ 78 | parse: function (base64Str) { 79 | // Shortcuts 80 | var base64StrLength = base64Str.length; 81 | var map = this._map; 82 | 83 | // Ignore padding 84 | var paddingChar = map.charAt(64); 85 | if (paddingChar) { 86 | var paddingIndex = base64Str.indexOf(paddingChar); 87 | if (paddingIndex != -1) { 88 | base64StrLength = paddingIndex; 89 | } 90 | } 91 | 92 | // Convert 93 | var words = []; 94 | var nBytes = 0; 95 | for (var i = 0; i < base64StrLength; i++) { 96 | if (i % 4) { 97 | var bits1 = map.indexOf(base64Str.charAt(i - 1)) << ((i % 4) * 2); 98 | var bits2 = map.indexOf(base64Str.charAt(i)) >>> (6 - (i % 4) * 2); 99 | words[nBytes >>> 2] |= (bits1 | bits2) << (24 - (nBytes % 4) * 8); 100 | nBytes++; 101 | } 102 | } 103 | 104 | return WordArray.create(words, nBytes); 105 | }, 106 | 107 | _map: 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=' 108 | }; 109 | }()); 110 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/CryptoJS/components/hmac.js: -------------------------------------------------------------------------------- 1 | /* 2 | CryptoJS v3.1.2 3 | code.google.com/p/crypto-js 4 | (c) 2009-2013 by Jeff Mott. All rights reserved. 5 | code.google.com/p/crypto-js/wiki/License 6 | */ 7 | (function () { 8 | // Shortcuts 9 | var C = CryptoJS; 10 | var C_lib = C.lib; 11 | var Base = C_lib.Base; 12 | var C_enc = C.enc; 13 | var Utf8 = C_enc.Utf8; 14 | var C_algo = C.algo; 15 | 16 | /** 17 | * HMAC algorithm. 18 | */ 19 | var HMAC = C_algo.HMAC = Base.extend({ 20 | /** 21 | * Initializes a newly created HMAC. 22 | * 23 | * @param {Hasher} hasher The hash algorithm to use. 24 | * @param {WordArray|string} key The secret key. 25 | * 26 | * @example 27 | * 28 | * var hmacHasher = CryptoJS.algo.HMAC.create(CryptoJS.algo.SHA256, key); 29 | */ 30 | init: function (hasher, key) { 31 | // Init hasher 32 | hasher = this._hasher = new hasher.init(); 33 | 34 | // Convert string to WordArray, else assume WordArray already 35 | if (typeof key == 'string') { 36 | key = Utf8.parse(key); 37 | } 38 | 39 | // Shortcuts 40 | var hasherBlockSize = hasher.blockSize; 41 | var hasherBlockSizeBytes = hasherBlockSize * 4; 42 | 43 | // Allow arbitrary length keys 44 | if (key.sigBytes > hasherBlockSizeBytes) { 45 | key = hasher.finalize(key); 46 | } 47 | 48 | // Clamp excess bits 49 | key.clamp(); 50 | 51 | // Clone key for inner and outer pads 52 | var oKey = this._oKey = key.clone(); 53 | var iKey = this._iKey = key.clone(); 54 | 55 | // Shortcuts 56 | var oKeyWords = oKey.words; 57 | var iKeyWords = iKey.words; 58 | 59 | // XOR keys with pad constants 60 | for (var i = 0; i < hasherBlockSize; i++) { 61 | oKeyWords[i] ^= 0x5c5c5c5c; 62 | iKeyWords[i] ^= 0x36363636; 63 | } 64 | oKey.sigBytes = iKey.sigBytes = hasherBlockSizeBytes; 65 | 66 | // Set initial values 67 | this.reset(); 68 | }, 69 | 70 | /** 71 | * Resets this HMAC to its initial state. 72 | * 73 | * @example 74 | * 75 | * hmacHasher.reset(); 76 | */ 77 | reset: function () { 78 | // Shortcut 79 | var hasher = this._hasher; 80 | 81 | // Reset 82 | hasher.reset(); 83 | hasher.update(this._iKey); 84 | }, 85 | 86 | /** 87 | * Updates this HMAC with a message. 88 | * 89 | * @param {WordArray|string} messageUpdate The message to append. 90 | * 91 | * @return {HMAC} This HMAC instance. 92 | * 93 | * @example 94 | * 95 | * hmacHasher.update('message'); 96 | * hmacHasher.update(wordArray); 97 | */ 98 | update: function (messageUpdate) { 99 | this._hasher.update(messageUpdate); 100 | 101 | // Chainable 102 | return this; 103 | }, 104 | 105 | /** 106 | * Finalizes the HMAC computation. 107 | * Note that the finalize operation is effectively a destructive, read-once operation. 108 | * 109 | * @param {WordArray|string} messageUpdate (Optional) A final message update. 110 | * 111 | * @return {WordArray} The HMAC. 112 | * 113 | * @example 114 | * 115 | * var hmac = hmacHasher.finalize(); 116 | * var hmac = hmacHasher.finalize('message'); 117 | * var hmac = hmacHasher.finalize(wordArray); 118 | */ 119 | finalize: function (messageUpdate) { 120 | // Shortcut 121 | var hasher = this._hasher; 122 | 123 | // Compute HMAC 124 | var innerHash = hasher.finalize(messageUpdate); 125 | hasher.reset(); 126 | var hmac = hasher.finalize(this._oKey.clone().concat(innerHash)); 127 | 128 | return hmac; 129 | } 130 | }); 131 | }()); 132 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/CryptoJS/rollups/hmac-sha256.js: -------------------------------------------------------------------------------- 1 | /* 2 | CryptoJS v3.1.2 3 | code.google.com/p/crypto-js 4 | (c) 2009-2013 by Jeff Mott. All rights reserved. 5 | code.google.com/p/crypto-js/wiki/License 6 | */ 7 | var CryptoJS=CryptoJS||function(h,s){var f={},g=f.lib={},q=function(){},m=g.Base={extend:function(a){q.prototype=this;var c=new q;a&&c.mixIn(a);c.hasOwnProperty("init")||(c.init=function(){c.$super.init.apply(this,arguments)});c.init.prototype=c;c.$super=this;return c},create:function(){var a=this.extend();a.init.apply(a,arguments);return a},init:function(){},mixIn:function(a){for(var c in a)a.hasOwnProperty(c)&&(this[c]=a[c]);a.hasOwnProperty("toString")&&(this.toString=a.toString)},clone:function(){return this.init.prototype.extend(this)}}, 8 | r=g.WordArray=m.extend({init:function(a,c){a=this.words=a||[];this.sigBytes=c!=s?c:4*a.length},toString:function(a){return(a||k).stringify(this)},concat:function(a){var c=this.words,d=a.words,b=this.sigBytes;a=a.sigBytes;this.clamp();if(b%4)for(var e=0;e>>2]|=(d[e>>>2]>>>24-8*(e%4)&255)<<24-8*((b+e)%4);else if(65535>>2]=d[e>>>2];else c.push.apply(c,d);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<< 9 | 32-8*(c%4);a.length=h.ceil(c/4)},clone:function(){var a=m.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],d=0;d>>2]>>>24-8*(b%4)&255;d.push((e>>>4).toString(16));d.push((e&15).toString(16))}return d.join("")},parse:function(a){for(var c=a.length,d=[],b=0;b>>3]|=parseInt(a.substr(b, 10 | 2),16)<<24-4*(b%8);return new r.init(d,c/2)}},n=l.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var d=[],b=0;b>>2]>>>24-8*(b%4)&255));return d.join("")},parse:function(a){for(var c=a.length,d=[],b=0;b>>2]|=(a.charCodeAt(b)&255)<<24-8*(b%4);return new r.init(d,c)}},j=l.Utf8={stringify:function(a){try{return decodeURIComponent(escape(n.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return n.parse(unescape(encodeURIComponent(a)))}}, 11 | u=g.BufferedBlockAlgorithm=m.extend({reset:function(){this._data=new r.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=j.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,d=c.words,b=c.sigBytes,e=this.blockSize,f=b/(4*e),f=a?h.ceil(f):h.max((f|0)-this._minBufferSize,0);a=f*e;b=h.min(4*a,b);if(a){for(var g=0;gn;){var j;a:{j=k;for(var u=h.sqrt(j),t=2;t<=u;t++)if(!(j%t)){j=!1;break a}j=!0}j&&(8>n&&(m[n]=l(h.pow(k,0.5))),r[n]=l(h.pow(k,1/3)),n++);k++}var a=[],f=f.SHA256=q.extend({_doReset:function(){this._hash=new g.init(m.slice(0))},_doProcessBlock:function(c,d){for(var b=this._hash.words,e=b[0],f=b[1],g=b[2],j=b[3],h=b[4],m=b[5],n=b[6],q=b[7],p=0;64>p;p++){if(16>p)a[p]= 15 | c[d+p]|0;else{var k=a[p-15],l=a[p-2];a[p]=((k<<25|k>>>7)^(k<<14|k>>>18)^k>>>3)+a[p-7]+((l<<15|l>>>17)^(l<<13|l>>>19)^l>>>10)+a[p-16]}k=q+((h<<26|h>>>6)^(h<<21|h>>>11)^(h<<7|h>>>25))+(h&m^~h&n)+r[p]+a[p];l=((e<<30|e>>>2)^(e<<19|e>>>13)^(e<<10|e>>>22))+(e&f^e&g^f&g);q=n;n=m;m=h;h=j+k|0;j=g;g=f;f=e;e=k+l|0}b[0]=b[0]+e|0;b[1]=b[1]+f|0;b[2]=b[2]+g|0;b[3]=b[3]+j|0;b[4]=b[4]+h|0;b[5]=b[5]+m|0;b[6]=b[6]+n|0;b[7]=b[7]+q|0},_doFinalize:function(){var a=this._data,d=a.words,b=8*this._nDataBytes,e=8*a.sigBytes; 16 | d[e>>>5]|=128<<24-e%32;d[(e+64>>>9<<4)+14]=h.floor(b/4294967296);d[(e+64>>>9<<4)+15]=b;a.sigBytes=4*d.length;this._process();return this._hash},clone:function(){var a=q.clone.call(this);a._hash=this._hash.clone();return a}});s.SHA256=q._createHelper(f);s.HmacSHA256=q._createHmacHelper(f)})(Math); 17 | (function(){var h=CryptoJS,s=h.enc.Utf8;h.algo.HMAC=h.lib.Base.extend({init:function(f,g){f=this._hasher=new f.init;"string"==typeof g&&(g=s.parse(g));var h=f.blockSize,m=4*h;g.sigBytes>m&&(g=f.finalize(g));g.clamp();for(var r=this._oKey=g.clone(),l=this._iKey=g.clone(),k=r.words,n=l.words,j=0;j>>2]|=(d[e>>>2]>>>24-8*(e%4)&255)<<24-8*((b+e)%4);else if(65535>>2]=d[e>>>2];else c.push.apply(c,d);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<< 9 | 32-8*(c%4);a.length=h.ceil(c/4)},clone:function(){var a=j.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],d=0;d>>2]>>>24-8*(b%4)&255;d.push((e>>>4).toString(16));d.push((e&15).toString(16))}return d.join("")},parse:function(a){for(var c=a.length,d=[],b=0;b>>3]|=parseInt(a.substr(b, 10 | 2),16)<<24-4*(b%8);return new q.init(d,c/2)}},k=v.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var d=[],b=0;b>>2]>>>24-8*(b%4)&255));return d.join("")},parse:function(a){for(var c=a.length,d=[],b=0;b>>2]|=(a.charCodeAt(b)&255)<<24-8*(b%4);return new q.init(d,c)}},l=v.Utf8={stringify:function(a){try{return decodeURIComponent(escape(k.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return k.parse(unescape(encodeURIComponent(a)))}}, 11 | x=t.BufferedBlockAlgorithm=j.extend({reset:function(){this._data=new q.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=l.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,d=c.words,b=c.sigBytes,e=this.blockSize,f=b/(4*e),f=a?h.ceil(f):h.max((f|0)-this._minBufferSize,0);a=f*e;b=h.min(4*a,b);if(a){for(var m=0;mk;){var l;a:{l=u;for(var x=h.sqrt(l),w=2;w<=x;w++)if(!(l%w)){l=!1;break a}l=!0}l&&(8>k&&(j[k]=v(h.pow(u,0.5))),q[k]=v(h.pow(u,1/3)),k++);u++}var a=[],f=f.SHA256=g.extend({_doReset:function(){this._hash=new t.init(j.slice(0))},_doProcessBlock:function(c,d){for(var b=this._hash.words,e=b[0],f=b[1],m=b[2],h=b[3],p=b[4],j=b[5],k=b[6],l=b[7],n=0;64>n;n++){if(16>n)a[n]= 15 | c[d+n]|0;else{var r=a[n-15],g=a[n-2];a[n]=((r<<25|r>>>7)^(r<<14|r>>>18)^r>>>3)+a[n-7]+((g<<15|g>>>17)^(g<<13|g>>>19)^g>>>10)+a[n-16]}r=l+((p<<26|p>>>6)^(p<<21|p>>>11)^(p<<7|p>>>25))+(p&j^~p&k)+q[n]+a[n];g=((e<<30|e>>>2)^(e<<19|e>>>13)^(e<<10|e>>>22))+(e&f^e&m^f&m);l=k;k=j;j=p;p=h+r|0;h=m;m=f;f=e;e=r+g|0}b[0]=b[0]+e|0;b[1]=b[1]+f|0;b[2]=b[2]+m|0;b[3]=b[3]+h|0;b[4]=b[4]+p|0;b[5]=b[5]+j|0;b[6]=b[6]+k|0;b[7]=b[7]+l|0},_doFinalize:function(){var a=this._data,d=a.words,b=8*this._nDataBytes,e=8*a.sigBytes; 16 | d[e>>>5]|=128<<24-e%32;d[(e+64>>>9<<4)+14]=h.floor(b/4294967296);d[(e+64>>>9<<4)+15]=b;a.sigBytes=4*d.length;this._process();return this._hash},clone:function(){var a=g.clone.call(this);a._hash=this._hash.clone();return a}});s.SHA256=g._createHelper(f);s.HmacSHA256=g._createHmacHelper(f)})(Math); 17 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/apiGatewayCore/apiGatewayClient.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). 5 | * You may not use this file except in compliance with the License. 6 | * A copy of the License is located at 7 | * 8 | * http://aws.amazon.com/apache2.0 9 | * 10 | * or in the "license" file accompanying this file. This file is distributed 11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 12 | * express or implied. See the License for the specific language governing 13 | * permissions and limitations under the License. 14 | */ 15 | 16 | var apiGateway = apiGateway || {}; 17 | apiGateway.core = apiGateway.core || {}; 18 | 19 | apiGateway.core.apiGatewayClientFactory = {}; 20 | apiGateway.core.apiGatewayClientFactory.newClient = function (simpleHttpClientConfig, sigV4ClientConfig) { 21 | var apiGatewayClient = { }; 22 | //Spin up 2 httpClients, one for simple requests, one for SigV4 23 | var sigV4Client = apiGateway.core.sigV4ClientFactory.newClient(sigV4ClientConfig); 24 | var simpleHttpClient = apiGateway.core.simpleHttpClientFactory.newClient(simpleHttpClientConfig); 25 | 26 | apiGatewayClient.makeRequest = function (request, authType, additionalParams, apiKey) { 27 | //Default the request to use the simple http client 28 | var clientToUse = simpleHttpClient; 29 | 30 | //Attach the apiKey to the headers request if one was provided 31 | if (apiKey !== undefined && apiKey !== '' && apiKey !== null) { 32 | request.headers['x-api-key'] = apiKey; 33 | } 34 | 35 | console.log("Obj keys: " + Object.keys(request.body)); 36 | console.log("Req body in apiGatewayClient: " + request.body); 37 | 38 | //if (request.body === undefined || request.body === '' || request.body === null || Object.keys(request.body).length === 0) { 39 | if (request.body === undefined || request.body === '' || request.body === null) { 40 | request.body = undefined; 41 | } 42 | 43 | // If the user specified any additional headers or query params that may not have been modeled 44 | // merge them into the appropriate request properties 45 | request.headers = apiGateway.core.utils.mergeInto(request.headers, additionalParams.headers); 46 | request.queryParams = apiGateway.core.utils.mergeInto(request.queryParams, additionalParams.queryParams); 47 | 48 | //If an auth type was specified inject the appropriate auth client 49 | if (authType === 'AWS_IAM') { 50 | clientToUse = sigV4Client; 51 | } 52 | 53 | //Call the selected http client to make the request, returning a promise once the request is sent 54 | return clientToUse.makeRequest(request); 55 | }; 56 | return apiGatewayClient; 57 | }; 58 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/apiGatewayCore/sigV4Client.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). 5 | * You may not use this file except in compliance with the License. 6 | * A copy of the License is located at 7 | * 8 | * http://aws.amazon.com/apache2.0 9 | * 10 | * or in the "license" file accompanying this file. This file is distributed 11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 12 | * express or implied. See the License for the specific language governing 13 | * permissions and limitations under the License. 14 | */ 15 | 16 | var apiGateway = apiGateway || {}; 17 | apiGateway.core = apiGateway.core || {}; 18 | 19 | apiGateway.core.sigV4ClientFactory = {}; 20 | apiGateway.core.sigV4ClientFactory.newClient = function (config) { 21 | var AWS_SHA_256 = 'AWS4-HMAC-SHA256'; 22 | var AWS4_REQUEST = 'aws4_request'; 23 | var AWS4 = 'AWS4'; 24 | var X_AMZ_DATE = 'x-amz-date'; 25 | var X_AMZ_SECURITY_TOKEN = 'x-amz-security-token'; 26 | var HOST = 'host'; 27 | var AUTHORIZATION = 'Authorization'; 28 | 29 | function hash(value) { 30 | return CryptoJS.SHA256(value); 31 | } 32 | 33 | function hexEncode(value) { 34 | return value.toString(CryptoJS.enc.Hex); 35 | } 36 | 37 | function hmac(secret, value) { 38 | return CryptoJS.HmacSHA256(value, secret, {asBytes: true}); 39 | } 40 | 41 | function buildCanonicalRequest(method, path, queryParams, headers, payload) { 42 | return method + '\n' + 43 | buildCanonicalUri(path) + '\n' + 44 | buildCanonicalQueryString(queryParams) + '\n' + 45 | buildCanonicalHeaders(headers) + '\n' + 46 | buildCanonicalSignedHeaders(headers) + '\n' + 47 | hexEncode(hash(payload)); 48 | } 49 | 50 | function hashCanonicalRequest(request) { 51 | return hexEncode(hash(request)); 52 | } 53 | 54 | function buildCanonicalUri(uri) { 55 | return encodeURI(uri); 56 | } 57 | 58 | function buildCanonicalQueryString(queryParams) { 59 | if (Object.keys(queryParams).length < 1) { 60 | return ''; 61 | } 62 | 63 | var sortedQueryParams = []; 64 | for (var property in queryParams) { 65 | if (queryParams.hasOwnProperty(property)) { 66 | sortedQueryParams.push(property); 67 | } 68 | } 69 | sortedQueryParams.sort(); 70 | 71 | var canonicalQueryString = ''; 72 | for (var i = 0; i < sortedQueryParams.length; i++) { 73 | canonicalQueryString += sortedQueryParams[i] + '=' + fixedEncodeURIComponent(queryParams[sortedQueryParams[i]]) + '&'; 74 | } 75 | return canonicalQueryString.substr(0, canonicalQueryString.length - 1); 76 | } 77 | 78 | function fixedEncodeURIComponent (str) { 79 | return encodeURIComponent(str).replace(/[!'()*]/g, function(c) { 80 | return '%' + c.charCodeAt(0).toString(16).toUpperCase(); 81 | }); 82 | } 83 | 84 | function buildCanonicalHeaders(headers) { 85 | var canonicalHeaders = ''; 86 | var sortedKeys = []; 87 | for (var property in headers) { 88 | if (headers.hasOwnProperty(property)) { 89 | sortedKeys.push(property); 90 | } 91 | } 92 | sortedKeys.sort(); 93 | 94 | for (var i = 0; i < sortedKeys.length; i++) { 95 | canonicalHeaders += sortedKeys[i].toLowerCase() + ':' + headers[sortedKeys[i]] + '\n'; 96 | } 97 | return canonicalHeaders; 98 | } 99 | 100 | function buildCanonicalSignedHeaders(headers) { 101 | var sortedKeys = []; 102 | for (var property in headers) { 103 | if (headers.hasOwnProperty(property)) { 104 | sortedKeys.push(property.toLowerCase()); 105 | } 106 | } 107 | sortedKeys.sort(); 108 | 109 | return sortedKeys.join(';'); 110 | } 111 | 112 | function buildStringToSign(datetime, credentialScope, hashedCanonicalRequest) { 113 | return AWS_SHA_256 + '\n' + 114 | datetime + '\n' + 115 | credentialScope + '\n' + 116 | hashedCanonicalRequest; 117 | } 118 | 119 | function buildCredentialScope(datetime, region, service) { 120 | return datetime.substr(0, 8) + '/' + region + '/' + service + '/' + AWS4_REQUEST 121 | } 122 | 123 | function calculateSigningKey(secretKey, datetime, region, service) { 124 | return hmac(hmac(hmac(hmac(AWS4 + secretKey, datetime.substr(0, 8)), region), service), AWS4_REQUEST); 125 | } 126 | 127 | function calculateSignature(key, stringToSign) { 128 | return hexEncode(hmac(key, stringToSign)); 129 | } 130 | 131 | function buildAuthorizationHeader(accessKey, credentialScope, headers, signature) { 132 | return AWS_SHA_256 + ' Credential=' + accessKey + '/' + credentialScope + ', SignedHeaders=' + buildCanonicalSignedHeaders(headers) + ', Signature=' + signature; 133 | } 134 | 135 | var awsSigV4Client = { }; 136 | if(config.accessKey === undefined || config.secretKey === undefined) { 137 | return awsSigV4Client; 138 | } 139 | awsSigV4Client.accessKey = apiGateway.core.utils.assertDefined(config.accessKey, 'accessKey'); 140 | awsSigV4Client.secretKey = apiGateway.core.utils.assertDefined(config.secretKey, 'secretKey'); 141 | awsSigV4Client.sessionToken = config.sessionToken; 142 | awsSigV4Client.serviceName = apiGateway.core.utils.assertDefined(config.serviceName, 'serviceName'); 143 | awsSigV4Client.region = apiGateway.core.utils.assertDefined(config.region, 'region'); 144 | awsSigV4Client.endpoint = apiGateway.core.utils.assertDefined(config.endpoint, 'endpoint'); 145 | 146 | awsSigV4Client.makeRequest = function (request) { 147 | var verb = apiGateway.core.utils.assertDefined(request.verb, 'verb'); 148 | var path = apiGateway.core.utils.assertDefined(request.path, 'path'); 149 | var queryParams = apiGateway.core.utils.copy(request.queryParams); 150 | if (queryParams === undefined) { 151 | queryParams = {}; 152 | } 153 | var headers = apiGateway.core.utils.copy(request.headers); 154 | if (headers === undefined) { 155 | headers = {}; 156 | } 157 | 158 | //If the user has not specified an override for Content type the use default 159 | if(headers['Content-Type'] === undefined) { 160 | headers['Content-Type'] = config.defaultContentType; 161 | } 162 | 163 | //If the user has not specified an override for Accept type the use default 164 | if(headers['Accept'] === undefined) { 165 | headers['Accept'] = config.defaultAcceptType; 166 | } 167 | 168 | var body = apiGateway.core.utils.copy(request.body); 169 | if (body === undefined || verb === 'GET') { // override request body and set to empty when signing GET requests 170 | body = ''; 171 | } else { 172 | body = JSON.stringify(body); 173 | } 174 | 175 | //If there is no body remove the content-type header so it is not included in SigV4 calculation 176 | if(body === '' || body === undefined || body === null) { 177 | delete headers['Content-Type']; 178 | } 179 | 180 | var datetime = new Date().toISOString().replace(/\.\d{3}Z$/, 'Z').replace(/[:\-]|\.\d{3}/g, ''); 181 | headers[X_AMZ_DATE] = datetime; 182 | var parser = document.createElement('a'); 183 | parser.href = awsSigV4Client.endpoint; 184 | headers[HOST] = parser.hostname; 185 | 186 | var canonicalRequest = buildCanonicalRequest(verb, path, queryParams, headers, body); 187 | var hashedCanonicalRequest = hashCanonicalRequest(canonicalRequest); 188 | var credentialScope = buildCredentialScope(datetime, awsSigV4Client.region, awsSigV4Client.serviceName); 189 | var stringToSign = buildStringToSign(datetime, credentialScope, hashedCanonicalRequest); 190 | var signingKey = calculateSigningKey(awsSigV4Client.secretKey, datetime, awsSigV4Client.region, awsSigV4Client.serviceName); 191 | var signature = calculateSignature(signingKey, stringToSign); 192 | headers[AUTHORIZATION] = buildAuthorizationHeader(awsSigV4Client.accessKey, credentialScope, headers, signature); 193 | if(awsSigV4Client.sessionToken !== undefined && awsSigV4Client.sessionToken !== '') { 194 | headers[X_AMZ_SECURITY_TOKEN] = awsSigV4Client.sessionToken; 195 | } 196 | delete headers[HOST]; 197 | 198 | var url = config.endpoint + path; 199 | var queryString = buildCanonicalQueryString(queryParams); 200 | if (queryString != '') { 201 | url += '?' + queryString; 202 | } 203 | 204 | //Need to re-attach Content-Type if it is not specified at this point 205 | if(headers['Content-Type'] === undefined) { 206 | headers['Content-Type'] = config.defaultContentType; 207 | } 208 | 209 | var signedRequest = { 210 | method: verb, 211 | url: url, 212 | headers: headers, 213 | data: body 214 | }; 215 | return axios(signedRequest); 216 | }; 217 | 218 | return awsSigV4Client; 219 | }; 220 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/apiGatewayCore/simpleHttpClient.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). 5 | * You may not use this file except in compliance with the License. 6 | * A copy of the License is located at 7 | * 8 | * http://aws.amazon.com/apache2.0 9 | * 10 | * or in the "license" file accompanying this file. This file is distributed 11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 12 | * express or implied. See the License for the specific language governing 13 | * permissions and limitations under the License. 14 | */ 15 | 16 | var apiGateway = apiGateway || {}; 17 | apiGateway.core = apiGateway.core || {}; 18 | 19 | apiGateway.core.simpleHttpClientFactory = {}; 20 | apiGateway.core.simpleHttpClientFactory.newClient = function (config) { 21 | function buildCanonicalQueryString(queryParams) { 22 | //Build a properly encoded query string from a QueryParam object 23 | if (Object.keys(queryParams).length < 1) { 24 | return ''; 25 | } 26 | 27 | var canonicalQueryString = ''; 28 | for (var property in queryParams) { 29 | if (queryParams.hasOwnProperty(property)) { 30 | canonicalQueryString += encodeURIComponent(property) + '=' + encodeURIComponent(queryParams[property]) + '&'; 31 | } 32 | } 33 | 34 | return canonicalQueryString.substr(0, canonicalQueryString.length - 1); 35 | } 36 | 37 | var simpleHttpClient = { }; 38 | simpleHttpClient.endpoint = apiGateway.core.utils.assertDefined(config.endpoint, 'endpoint'); 39 | 40 | simpleHttpClient.makeRequest = function (request) { 41 | var verb = apiGateway.core.utils.assertDefined(request.verb, 'verb'); 42 | var path = apiGateway.core.utils.assertDefined(request.path, 'path'); 43 | var queryParams = apiGateway.core.utils.copy(request.queryParams); 44 | if (queryParams === undefined) { 45 | queryParams = {}; 46 | } 47 | var headers = apiGateway.core.utils.copy(request.headers); 48 | if (headers === undefined) { 49 | headers = {}; 50 | } 51 | 52 | //If the user has not specified an override for Content type the use default 53 | if(headers['Content-Type'] === undefined) { 54 | headers['Content-Type'] = config.defaultContentType; 55 | } 56 | 57 | //If the user has not specified an override for Accept type the use default 58 | if(headers['Accept'] === undefined) { 59 | headers['Accept'] = config.defaultAcceptType; 60 | } 61 | 62 | var body = apiGateway.core.utils.copy(request.body); 63 | if (body === undefined) { 64 | body = ''; 65 | } 66 | 67 | var url = config.endpoint + path; 68 | var queryString = buildCanonicalQueryString(queryParams); 69 | if (queryString != '') { 70 | url += '?' + queryString; 71 | } 72 | 73 | console.log("Accept : " + headers['Accept']); 74 | console.log("Content-Type : " + headers['Content-Type']); 75 | console.log("body : " + body); 76 | var simpleHttpRequest = { 77 | method: verb, 78 | url: url, 79 | headers: headers, 80 | data: body 81 | }; 82 | return axios(simpleHttpRequest); 83 | }; 84 | return simpleHttpClient; 85 | }; -------------------------------------------------------------------------------- /Project-4/front-end/lib/apiGatewayCore/utils.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). 5 | * You may not use this file except in compliance with the License. 6 | * A copy of the License is located at 7 | * 8 | * http://aws.amazon.com/apache2.0 9 | * 10 | * or in the "license" file accompanying this file. This file is distributed 11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 12 | * express or implied. See the License for the specific language governing 13 | * permissions and limitations under the License. 14 | */ 15 | 16 | var apiGateway = apiGateway || {}; 17 | apiGateway.core = apiGateway.core || {}; 18 | 19 | apiGateway.core.utils = { 20 | assertDefined: function (object, name) { 21 | if (object === undefined) { 22 | throw name + ' must be defined'; 23 | } else { 24 | return object; 25 | } 26 | }, 27 | assertParametersDefined: function (params, keys, ignore) { 28 | if (keys === undefined) { 29 | return; 30 | } 31 | if (keys.length > 0 && params === undefined) { 32 | params = {}; 33 | } 34 | for (var i = 0; i < keys.length; i++) { 35 | if(!apiGateway.core.utils.contains(ignore, keys[i])) { 36 | apiGateway.core.utils.assertDefined(params[keys[i]], keys[i]); 37 | } 38 | } 39 | }, 40 | parseParametersToObject: function (params, keys) { 41 | if (params === undefined) { 42 | return {}; 43 | } 44 | var object = { }; 45 | for (var i = 0; i < keys.length; i++) { 46 | object[keys[i]] = params[keys[i]]; 47 | } 48 | return object; 49 | }, 50 | contains: function(a, obj) { 51 | if(a === undefined) { return false;} 52 | var i = a.length; 53 | while (i--) { 54 | if (a[i] === obj) { 55 | return true; 56 | } 57 | } 58 | return false; 59 | }, 60 | copy: function (obj) { 61 | if (null == obj || "object" != typeof obj) return obj; 62 | var copy = obj.constructor(); 63 | for (var attr in obj) { 64 | if (obj.hasOwnProperty(attr)) copy[attr] = obj[attr]; 65 | } 66 | return copy; 67 | }, 68 | mergeInto: function (baseObj, additionalProps) { 69 | if (null == baseObj || "object" != typeof baseObj) return baseObj; 70 | var merged = baseObj.constructor(); 71 | for (var attr in baseObj) { 72 | if (baseObj.hasOwnProperty(attr)) merged[attr] = baseObj[attr]; 73 | } 74 | if (null == additionalProps || "object" != typeof additionalProps) return baseObj; 75 | for (attr in additionalProps) { 76 | if (additionalProps.hasOwnProperty(attr)) merged[attr] = additionalProps[attr]; 77 | } 78 | return merged; 79 | } 80 | }; 81 | -------------------------------------------------------------------------------- /Project-4/front-end/lib/url-template/url-template.js: -------------------------------------------------------------------------------- 1 | /* 2 | UriTemplates Template Processor - Version: @VERSION - Dated: @DATE 3 | (c) marc.portier@gmail.com - 2011-2012 4 | Licensed under APLv2 (http://opensource.org/licenses/Apache-2.0) 5 | */ 6 | 7 | ; 8 | var uritemplate = (function() { 9 | 10 | // Below are the functions we originally used from jQuery. 11 | // The implementations below are often more naive then what is inside jquery, but they suffice for our needs. 12 | 13 | function isFunction(fn) { 14 | return typeof fn == 'function'; 15 | } 16 | 17 | function isEmptyObject (obj) { 18 | for(var name in obj){ 19 | return false; 20 | } 21 | return true; 22 | } 23 | 24 | function extend(base, newprops) { 25 | for (var name in newprops) { 26 | base[name] = newprops[name]; 27 | } 28 | return base; 29 | } 30 | 31 | /** 32 | * Create a runtime cache around retrieved values from the context. 33 | * This allows for dynamic (function) results to be kept the same for multiple 34 | * occuring expansions within one template. 35 | * Note: Uses key-value tupples to be able to cache null values as well. 36 | */ 37 | //TODO move this into prep-processing 38 | function CachingContext(context) { 39 | this.raw = context; 40 | this.cache = {}; 41 | } 42 | CachingContext.prototype.get = function(key) { 43 | var val = this.lookupRaw(key); 44 | var result = val; 45 | 46 | if (isFunction(val)) { // check function-result-cache 47 | var tupple = this.cache[key]; 48 | if (tupple !== null && tupple !== undefined) { 49 | result = tupple.val; 50 | } else { 51 | result = val(this.raw); 52 | this.cache[key] = {key: key, val: result}; 53 | // NOTE: by storing tupples we make sure a null return is validly consistent too in expansions 54 | } 55 | } 56 | return result; 57 | }; 58 | 59 | CachingContext.prototype.lookupRaw = function(key) { 60 | return CachingContext.lookup(this, this.raw, key); 61 | }; 62 | 63 | CachingContext.lookup = function(me, context, key) { 64 | var result = context[key]; 65 | if (result !== undefined) { 66 | return result; 67 | } else { 68 | var keyparts = key.split('.'); 69 | var i = 0, keysplits = keyparts.length - 1; 70 | for (i = 0; i { 62 | if (err){ 63 | console.log("Error fetching file list", err) 64 | } else { 65 | data.Contents.forEach((object) => { 66 | var fileRow = document.createElement('tr'); 67 | 68 | // Getting File Name 69 | var fileNameCell = document.createElement('td'); 70 | fileNameCell.textContent = object.Key; 71 | fileRow.appendChild(fileNameCell); 72 | 73 | // Getting File Size 74 | var fileSizeCell = document.createElement('td'); 75 | fileSizeCell.textContent = object.Size; 76 | fileRow.appendChild(fileSizeCell); 77 | 78 | // Getting Presigned URL for Download link 79 | var downloadCell = document.createElement('td'); 80 | var downloadLink = document.createElement('a'); 81 | downloadLink.href = s3.getSignedUrl("getObject", { 82 | Bucket: bucketname, 83 | Key: object.Key 84 | }); 85 | downloadLink.textContent = "Download"; 86 | downloadCell.appendChild(downloadLink); 87 | fileRow.appendChild(downloadCell); 88 | 89 | // Delete button - OnClick call deleteFile() method 90 | var deleteCell = document.createElement('td'); 91 | var deleteButton = document.createElement('button'); 92 | deleteButton.textContent = "Delete"; 93 | deleteButton.addEventListener('click', () => { 94 | deleteFile(bucketname, object.Key); 95 | }) 96 | deleteCell.appendChild(deleteButton); 97 | fileRow.appendChild(deleteCell); 98 | 99 | // Adding row for the object in the table 100 | tableBody.appendChild(fileRow); 101 | }); 102 | } 103 | }) 104 | } 105 | 106 | // Method for uploading files to S3 107 | function uploadFiles(){ 108 | var s3 = new AWS.S3() 109 | let files = document.getElementById('fileInput').files; 110 | console.log(files); 111 | 112 | for(var fileIter=0;fileIter { 121 | if (err){ 122 | console.log("Error uploading the file", err); 123 | } else { 124 | console.log("File Uploaded"); 125 | 126 | // Creating Alert for upload-success 127 | const uploadAlertEl = document.getElementById('upload-success'); 128 | uploadAlertEl.innerText = `File "${file.name}" uploaded successfully!`; 129 | uploadAlertEl.classList.remove('d-none'); // Show the alert 130 | // Adding timeout for automatic dismissal 131 | setTimeout(() => uploadAlertEl.classList.add('d-none'), 3000); 132 | 133 | refreshFileList(user_content_bucket); 134 | } 135 | }) 136 | } 137 | } 138 | 139 | // Method for deleting files from S3 140 | function deleteFile(bucketname,key){ 141 | var s3 = new AWS.S3() 142 | var params = { 143 | Bucket: bucketname, 144 | Key:key 145 | } 146 | s3.deleteObject(params, (err,data) => { 147 | if (err){ 148 | console.log("Error deleteing the file", err); 149 | } else { 150 | console.log("File deleted successfully"); 151 | 152 | // Creating Alert for delete-success 153 | const deleteAlertEl = document.getElementById('delete-success'); 154 | deleteAlertEl.innerText = `File "${key}" deleted successfully!`; 155 | deleteAlertEl.classList.remove('d-none'); 156 | // Adding timeout for automatic dismissal 157 | setTimeout(() => deleteAlertEl.classList.add('d-none'), 3000); 158 | 159 | refreshFileList(bucketname); 160 | } 161 | }) 162 | } 163 | 164 | // Logout button - onClick() AWS Temp credentials will be deleted and fileTable will be empty 165 | function logout() { 166 | const emptyCredentials = { 167 | accessKeyId: null, 168 | secretAccessKey: null, 169 | sessionToken: null 170 | }; 171 | AWS.config.credentials = new AWS.Credentials(emptyCredentials); 172 | 173 | const fileTable = document.getElementById('fileTable'); 174 | fileTable.style.display = 'none'; 175 | } 176 | 177 | function getHtml(template) { 178 | return template.join('\n'); 179 | } 180 | 181 | // A utility function to decode the google token 182 | function parseJwt(token) { 183 | var base64Url = token.split('.')[1]; 184 | var base64 = base64Url.replace('-', '+').replace('_', '/'); 185 | var plain_token = JSON.parse(window.atob(base64)); 186 | return plain_token; 187 | }; -------------------------------------------------------------------------------- /Project-5/front-end/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Image View, Upload, Delete to S3 7 | 8 | 9 | 10 | 11 | 12 | 13 |
14 |

15 | AWS S3 File Storage Service 16 |

17 |
18 |
24 |
25 | 26 | 27 | 28 |
29 | 30 | 33 |
34 |
35 | 36 |
37 | 38 | 39 |
40 | 41 |

42 | Upload Files 43 |

44 |
45 | 46 |
47 |

48 |

49 | 52 |
53 |

Uploaded Files

54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 |
NameSize (Bytes)DownloadDelete
66 |
67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /Project-5/front-end/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | display: flex; 3 | justify-content: center; 4 | align-items: center; 5 | height: 100vh; 6 | margin: 0; 7 | font-family: Arial, sans-serif; 8 | } 9 | 10 | h1 { 11 | text-align: center; 12 | margin-bottom: 30px; 13 | } 14 | 15 | h2 { 16 | color: #333; 17 | margin-top: 20px; 18 | margin-bottom: 20px; 19 | } 20 | 21 | h4 { 22 | color: #333; 23 | margin-top: 20px; 24 | margin-bottom: 2%; 25 | } 26 | 27 | .in-container { 28 | display: flex; 29 | } 30 | 31 | .left-div, .right-div { 32 | flex: 1; 33 | } 34 | 35 | #logout { 36 | align-items: right; 37 | text-align: right; 38 | } -------------------------------------------------------------------------------- /Project-5/tf-aws-infra/main.tf: -------------------------------------------------------------------------------- 1 | #define variables 2 | 3 | # Create S3 buckets for uploading image files 4 | resource "aws_s3_bucket" "user_content_bucket" { 5 | bucket = var.user_bucket 6 | force_destroy = true 7 | } 8 | 9 | resource "aws_s3_bucket_ownership_controls" "user_content_bucket" { 10 | bucket = aws_s3_bucket.user_content_bucket.id 11 | rule { 12 | object_ownership = "BucketOwnerPreferred" 13 | } 14 | } 15 | 16 | resource "aws_s3_bucket_acl" "user_content_bucket" { 17 | depends_on = [aws_s3_bucket_ownership_controls.user_content_bucket] 18 | 19 | bucket = aws_s3_bucket.user_content_bucket.id 20 | acl = "private" 21 | } 22 | 23 | # Enabling CORS for the S3 bucket 24 | resource "aws_s3_bucket_cors_configuration" "user_content_bucket_cors" { 25 | bucket = aws_s3_bucket.user_content_bucket.id 26 | depends_on = [aws_cloudfront_distribution.s3_distribution] 27 | 28 | cors_rule { 29 | allowed_headers = ["*"] 30 | allowed_methods = ["GET", "PUT", "POST", "DELETE"] 31 | allowed_origins = ["https://${aws_cloudfront_distribution.s3_distribution.domain_name}"] 32 | expose_headers = [] 33 | } 34 | } 35 | 36 | # Creating S3 bucket and cloudFront Distribution for the web app - (front-end) 37 | 38 | resource "aws_s3_bucket" "file_uploader_app_bucket" { 39 | bucket = var.webapp_bucket 40 | force_destroy = true 41 | } 42 | 43 | resource "aws_s3_bucket_ownership_controls" "file_uploader_app_bucket_owner" { 44 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 45 | rule { 46 | object_ownership = "BucketOwnerPreferred" 47 | } 48 | } 49 | 50 | resource "aws_s3_bucket_acl" "file_uploader_app_bucket_acl" { 51 | depends_on = [aws_s3_bucket_ownership_controls.file_uploader_app_bucket_owner] 52 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 53 | acl = "private" 54 | } 55 | 56 | locals { 57 | s3_origin_id = "FileUploaderS3Origin" 58 | } 59 | 60 | resource "aws_cloudfront_origin_access_control" "oac" { 61 | name = "fileuploader-oac" 62 | description = "File Uploader Policy" 63 | origin_access_control_origin_type = "s3" 64 | signing_behavior = "always" 65 | signing_protocol = "sigv4" 66 | } 67 | resource "aws_cloudfront_distribution" "s3_distribution" { 68 | origin { 69 | domain_name = aws_s3_bucket.file_uploader_app_bucket.bucket_regional_domain_name 70 | origin_access_control_id = aws_cloudfront_origin_access_control.oac.id 71 | origin_id = local.s3_origin_id 72 | } 73 | 74 | enabled = true 75 | is_ipv6_enabled = true 76 | comment = "Distribution for AWS S3 Image app" 77 | default_root_object = "index.html" 78 | 79 | default_cache_behavior { 80 | allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] 81 | cached_methods = ["GET", "HEAD"] 82 | target_origin_id = local.s3_origin_id 83 | 84 | forwarded_values { 85 | query_string = false 86 | 87 | cookies { 88 | forward = "none" 89 | } 90 | } 91 | 92 | viewer_protocol_policy = "redirect-to-https" 93 | min_ttl = 0 94 | default_ttl = 3600 95 | max_ttl = 86400 96 | } 97 | 98 | price_class = "PriceClass_200" 99 | 100 | restrictions { 101 | geo_restriction { 102 | restriction_type = "none" 103 | locations = [] 104 | } 105 | } 106 | 107 | tags = { 108 | Environment = "production" 109 | } 110 | 111 | viewer_certificate { 112 | cloudfront_default_certificate = true 113 | } 114 | } 115 | 116 | # Bucket policy for Cloudfront to access 117 | 118 | data "aws_iam_policy_document" "allow_access_from_cloudfront" { 119 | statement { 120 | principals { 121 | type = "Service" 122 | identifiers = ["cloudfront.amazonaws.com"] 123 | } 124 | 125 | actions = [ 126 | "s3:GetObject", 127 | "s3:ListBucket", 128 | ] 129 | 130 | resources = [ 131 | aws_s3_bucket.file_uploader_app_bucket.arn, 132 | "${aws_s3_bucket.file_uploader_app_bucket.arn}/*", 133 | ] 134 | condition { 135 | test = "StringEquals" 136 | variable = "AWS:SourceArn" 137 | values = ["arn:aws:cloudfront::${var.aws_account_id}:distribution/${aws_cloudfront_distribution.s3_distribution.id}"] 138 | 139 | } 140 | } 141 | } 142 | resource "aws_s3_bucket_policy" "allow_access_from_cloudfront" { 143 | bucket = aws_s3_bucket.file_uploader_app_bucket.id 144 | policy = data.aws_iam_policy_document.allow_access_from_cloudfront.json 145 | } -------------------------------------------------------------------------------- /Project-5/tf-aws-infra/outputs.tf: -------------------------------------------------------------------------------- 1 | output "User-S3-bucket" { 2 | value = aws_s3_bucket.user_content_bucket.id 3 | } 4 | 5 | output "web-app-bucket" { 6 | value = aws_s3_bucket.file_uploader_app_bucket.id 7 | } 8 | 9 | output "Web-app-cdn" { 10 | value = aws_cloudfront_distribution.s3_distribution.domain_name 11 | } -------------------------------------------------------------------------------- /Project-5/tf-aws-infra/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "5.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | } -------------------------------------------------------------------------------- /Project-5/tf-aws-infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The region where the infrastructure should be deployed to" 3 | type = string 4 | } 5 | 6 | variable "aws_account_id" { 7 | description = "AWS Account ID" 8 | type = string 9 | } 10 | 11 | variable "user_bucket" { 12 | description = " S3 bucket where files will be uploded" 13 | type = string 14 | } 15 | 16 | variable "webapp_bucket" { 17 | description = "Bucket for hosting html, css and js for the app" 18 | type = string 19 | } -------------------------------------------------------------------------------- /Project-5/tf-aws-infra/variables/dev.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | user_bucket = "image-storage-bucket-7001" 4 | webapp_bucket = "image-storage-frontend-7002" -------------------------------------------------------------------------------- /Project-6/README.md: -------------------------------------------------------------------------------- 1 | ## Serverless Blog Web App using ECS, ECR, Docker - Blue Green Deployment 2 | 3 | ### Automated Infra creation using Terraform 4 | 5 | #### Steps - 6 | 7 | 1. Create new virtual env and install libraries 8 | ``` 9 | conda create -n ecsproject_py310 python=3.10 10 | conda activate ecsproject_py310 11 | pip install flask boto3 12 | ``` 13 | 14 | 2. Create folders for various files - 15 | 16 | Explanation: 17 | 18 | - app.py: The main Python file where you'll create your Flask application, define routes, and handle logic. 19 | - requirements.txt: List of Python libraries required for the application, used for dependency management. 20 | - Dockerfile: Instructions for building the Docker image that will package your application and its dependencies. 21 | - templates/: Folder containing HTML templates used to render the user interface. 22 | - static/: Folder for static assets like CSS, JavaScript, and images. 23 | - tests/: Folder for unit tests to ensure code quality. 24 | 25 | 3. start writing html/code in the respective files. 26 | 27 | 4. Test locally. 28 | 29 | 5. Create Dockerfile: 30 | 31 | - Define instructions to build a Docker image for your application. 32 | - Include necessary dependencies and configuration. 33 | - Expose the appropriate port. 34 | 35 | 6. Create CodeBuild Project: 36 | 37 | *I have to create different repo for codebuild project as Codebuild project in AWS has to sources from github repostiry and root directory.* 38 | *Here is the repo for CodeBuild project. BAsically it has the same code asd `blog-app` folder as in this repo. 39 | [https://github.com/vishal2505/MyBlogApp] 40 | - In the AWS Management Console, navigate to CodeBuild and create a new project. 41 | - Set up source code location (GitHub repository or S3 bucket). 42 | - Choose a build environment image (compatible with Python and Flask). 43 | - Define build commands to: 44 | - Install dependencies. 45 | - Run tests (optional). 46 | - Build the Docker image. 47 | - Push the image to Amazon ECR (Elastic Container Registry). 48 | 49 | 7. Create ECS Cluster and Task Definition and Service for Blue Service: 50 | 51 | - In the ECS console, create a cluster and task definition. 52 | - Specify container image from ECR. 53 | - Define CPU and memory requirements. 54 | - Set container port mappings. 55 | 56 | 8. Create ALB, target group and http listener which forward traffice to the Blue Service 57 | 58 | - Use CodeBuild to trigger automatic deployments whenever code changes or manually trigger a build. 59 | - CodeBuild will build the image, push it to ECR, and update the task definition in ECS. 60 | 61 | 9. Test the application via load balancer URL. 62 | 63 | 10. MAke change in the web app code and commit the changes. 64 | 65 | 11. Enable Automatic Code build trigger to build the image upon code merge. Tag this image as new "Green" image. Image will be pushed to ECR. 66 | 67 | 12. Create another ECS task and service which is gonna pull the "Green" image from ECR. 68 | 69 | 13. Create new ALB Target group for the ECS Green Service and update listener rule for 50-50%. 70 | 71 | 14. Gradually shift traffic from Blue to Green using weighted target groups. 72 | 73 | 15. Once all traffic is on Green, terminate the Blue service. 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /Project-6/blog-app/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base image with Python and Flask 2 | FROM python:3.10-slim 3 | 4 | # Set working directory 5 | WORKDIR /app 6 | 7 | # Copy application code 8 | COPY . /app 9 | 10 | # Install dependencies 11 | RUN pip install -r requirements.txt 12 | 13 | # Expose the port Flask listens on 14 | EXPOSE 5000 15 | 16 | # Run the Flask app 17 | #CMD ["python", "app.py"] 18 | CMD flask run -h 0.0.0.0 -p 5000 -------------------------------------------------------------------------------- /Project-6/blog-app/app.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import uuid 3 | from flask import Flask, render_template, request, redirect, url_for 4 | import boto3 # Import Boto3 for DynamoDB 5 | from boto3.dynamodb.conditions import Key 6 | 7 | app = Flask(__name__) 8 | 9 | # Configure DynamoDB 10 | # No need to provide acess key and secret access key 11 | # instead we'll be attaching an IAM role with appropriate DynamoDB access permissions to our ECS task. 12 | 13 | dynamodb = boto3.resource('dynamodb', region_name='us-east-1') 14 | 15 | # Define routes 16 | # Route for the home page 17 | @app.route('/') 18 | def index(): 19 | # Retrieve posts from DynamoDB 20 | posts = get_posts_from_dynamodb() 21 | print(posts) 22 | return render_template('index.html', posts=posts) 23 | 24 | # Route for getting the post details based on the post_id 25 | @app.route('/posts/') 26 | def view_post(post_id): 27 | # Retrieve post details from DynamoDB 28 | post = get_post_by_id(post_id) 29 | print(post) 30 | return render_template('posts/view.html', post=post) 31 | 32 | # Route for creating new post 33 | @app.route('/posts/new', methods=['GET', 'POST']) 34 | def new_post(): 35 | if request.method == 'GET': 36 | return render_template('posts/new.html') 37 | else: 38 | title = request.form['title'] 39 | author = request.form['author'] 40 | content = request.form['content'] 41 | create_post(title, author, content) 42 | return redirect(url_for('index')) 43 | 44 | # Route for updating exiting post 45 | @app.route('/posts//edit', methods=['GET', 'POST']) 46 | def edit_post(post_id): 47 | post = get_post_by_id(post_id) # Use your existing function 48 | if request.method == 'GET': 49 | return render_template('posts/edit.html', post=post) 50 | else: 51 | title = request.form['title'] 52 | content = request.form['content'] 53 | author = request.form['author'] 54 | print("Updating for author: " + author) 55 | update_post(post_id, title, author, content) 56 | return redirect(url_for('view_post', post_id=post_id)) 57 | 58 | # Route for deleting exiting post 59 | @app.route('/posts//delete', methods=['GET', 'POST']) 60 | def delete_post(post_id): 61 | author = request.args.get('author') 62 | print("Author: " + author) 63 | delete_post(post_id, author) 64 | print("Post delete successfully.") 65 | return redirect(url_for('index')) 66 | 67 | # Functions for interacting with DynamoDB 68 | def get_posts_from_dynamodb(): 69 | table = dynamodb.Table('BlogPosts') 70 | 71 | response = table.scan() # Retrieve all items from the table 72 | posts = response['Items'] 73 | 74 | while 'LastEvaluatedKey' in response: # Handle pagination 75 | response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey']) 76 | posts.extend(response['Items']) 77 | 78 | return posts 79 | 80 | # Function for getting post details by post_id 81 | def get_post_by_id(post_id): 82 | print("post id: ", post_id) 83 | table = dynamodb.Table('BlogPosts') 84 | 85 | try: 86 | response = table.query(KeyConditionExpression=Key('post_id').eq(post_id)) 87 | #response = table.get_item(Key={'post_id': post_id}) # Get the specific item 88 | post = response['Items'][0] 89 | return post 90 | except KeyError: 91 | return None 92 | 93 | # Function for creating new post 94 | def create_post(title, author, content): 95 | table = dynamodb.Table('BlogPosts') 96 | 97 | new_post = { 98 | 'post_id': generate_unique_id(), 99 | 'title': title, 100 | 'author': author, 101 | 'content': content, 102 | 'timestamp': datetime.utcnow().isoformat(), # Using UTC timestamp 103 | } 104 | 105 | table.put_item(Item=new_post) 106 | 107 | # Function for updating exiting post 108 | def update_post(post_id, title, author, content): 109 | table = dynamodb.Table('BlogPosts') 110 | 111 | updated_post = { 112 | 'Key': {'post_id': post_id, 'author': author}, 113 | 'UpdateExpression': 'SET title = :title, content = :content', 114 | 'ExpressionAttributeValues': { 115 | ':title': title, 116 | ':content': content, 117 | }, 118 | } 119 | print(updated_post) 120 | response = table.update_item(**updated_post) 121 | print(response) 122 | print("Table udpated successfully") 123 | 124 | # Function for Deleitng exiting post by post_id and author - Key attributes 125 | def delete_post(post_id, author): 126 | table = dynamodb.Table('BlogPosts') 127 | 128 | table.delete_item(Key={'post_id': post_id, 'author':author}) 129 | 130 | def generate_unique_id(): 131 | unique_id = str(uuid.uuid4()) # Generating a universally unique identifier 132 | return unique_id 133 | 134 | if __name__ == '__main__': 135 | app.run(host='0.0.0.0', debug=True) 136 | -------------------------------------------------------------------------------- /Project-6/blog-app/buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - pip install -r requirements.txt 7 | 8 | build: 9 | commands: 10 | - docker build -t ${ECR_REPOSITORY}:latest . 11 | 12 | post_build: 13 | commands: 14 | - aws ecr get-login-password --region ${AWS_DEFAULT_REGION} | docker login --username AWS --password-stdin ${ECR_REGISTRY} 15 | - docker push ${ECR_REGISTRY}/${ECR_REPOSITORY}:latest -------------------------------------------------------------------------------- /Project-6/blog-app/requirements.txt: -------------------------------------------------------------------------------- 1 | blinker==1.7.0 2 | boto3==1.34.6 3 | botocore==1.34.6 4 | click==8.1.7 5 | Flask==3.0.0 6 | itsdangerous==2.1.2 7 | Jinja2==3.1.2 8 | jmespath==1.0.1 9 | MarkupSafe==2.1.3 10 | python-dateutil==2.8.2 11 | s3transfer==0.10.0 12 | six==1.16.0 13 | urllib3==2.0.7 14 | Werkzeug==3.0.1 15 | -------------------------------------------------------------------------------- /Project-6/blog-app/static/css/style.css: -------------------------------------------------------------------------------- 1 | /* General styling */ 2 | body { 3 | font-family: sans-serif; 4 | margin: 0; 5 | padding: 20px; 6 | background-color: #f5f5f5; /* Light grayish-white */ 7 | } 8 | 9 | a { 10 | color: #337ab7; 11 | text-decoration: none; 12 | } 13 | 14 | a:hover { 15 | text-decoration: underline; 16 | } 17 | 18 | h1, h2, h3 { 19 | margin-top: 0; 20 | padding: auto; 21 | } 22 | 23 | /* Header styling */ 24 | header { 25 | padding: 10px; 26 | } 27 | 28 | .header-title { 29 | font-size: 24px; 30 | font-weight: bold; 31 | margin-bottom: 0; 32 | } 33 | 34 | /* Navigation styling */ 35 | .navbar-brand { 36 | padding-left: 2rem; 37 | } 38 | 39 | nav ul { 40 | list-style: none; 41 | padding: 0; 42 | margin: 0; 43 | } 44 | 45 | nav li { 46 | display: inline-block; 47 | margin-right: 20px; 48 | } 49 | 50 | /* Content styling */ 51 | main { 52 | padding: 20px; 53 | } 54 | 55 | article { 56 | margin-bottom: 20px; 57 | } 58 | 59 | .post-title { 60 | font-size: 20px; 61 | margin-bottom: 10px; 62 | } 63 | 64 | .post-date { 65 | font-size: 14px; 66 | color: #999; 67 | } 68 | 69 | /* Footer styling */ 70 | footer { 71 | text-align: center; 72 | padding: 10px; 73 | border-top: 1px solid #ccc; 74 | } 75 | -------------------------------------------------------------------------------- /Project-6/blog-app/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | My Serverless Blog 7 | 8 | 9 | 10 | 11 |
12 | 28 |
29 | 30 |
31 |
{% block content %}{% endblock %} 32 |
33 |
34 | 35 |
36 |

© 2023 My Serverless Blog

37 |
38 | 39 | 40 | -------------------------------------------------------------------------------- /Project-6/blog-app/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |

Welcome to my blog!

5 | 6 |
7 |

Latest Posts

8 |
9 | {% for post in posts %} 10 |
11 |
12 |
13 |
{{ post.title }}
14 |

{{ post.timestamp }}

15 |
16 |
17 |
18 | {% endfor %} 19 |
20 |
21 | {% endblock %} 22 | -------------------------------------------------------------------------------- /Project-6/blog-app/templates/posts/edit.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 |

Edit Post

7 |
8 |
9 |
10 |
11 | 12 | 13 |
14 |
15 | 16 | 17 |
18 |
19 | 20 | 21 |
22 | 23 |
24 |
25 |
26 | {% endblock %} 27 | -------------------------------------------------------------------------------- /Project-6/blog-app/templates/posts/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |

All Posts

5 | 6 |
7 |
    8 | {% for post in posts %} 9 |
  • 10 |

    {{ post.title }}

    11 |

    {{ post.author }}

    12 |

    {{ post.timestamp }}

    13 |

    {{ post.content | truncate(200) }}

  • 14 | {% endfor %} 15 |
16 |
17 | {% endblock %} -------------------------------------------------------------------------------- /Project-6/blog-app/templates/posts/new.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 |

Create a New Post

7 |
8 |
9 |
10 |
11 | 12 | 13 |
14 |
15 | 16 | 17 |
18 |
19 | 20 | 21 |
22 | 23 |
24 |
25 |
26 | {% endblock %} 27 | -------------------------------------------------------------------------------- /Project-6/blog-app/templates/posts/view.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 |

{{ post.title }}

7 |
8 |
9 |

10 | Written by: {{ post.author }}
11 | Post ID: {{ post.post_id }}
12 | Last Modified At: {{ post.timestamp }} 13 |

14 |
15 | Edit Post 16 | Delete Post 17 |
18 |
19 |

{{ post.content | safe }}

20 |
21 |
22 | {% endblock %} -------------------------------------------------------------------------------- /Project-6/tf-aws-base-infra/codebuild.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "codebuild_role" { 2 | name = "codebuild-role" 3 | 4 | assume_role_policy = < terraform-eks-cicd-7001 5 | 6 | 2. Create Jenkins Server on EC2 using tools - Jenkins, git, Terraform and Kubectl 7 | 8 | 3. Configure Jenkins Server 9 | 10 | 4. Create Terraform configuration files for EKS Cluster in private VPC 11 | 12 | 5. Add stages in the Jenkins pipeline for terraform init, plan and apply for EKS cluster 13 | 14 | 6. Creaye Manigest files - Deployment.yaml and Service.yaml for a simple NGinx application 15 | 16 | 7. Add another stage in the jenkins pipeline to apply these manifest files 17 | 18 | 8. Run the pipeline 19 | 20 | Below is the repo which is going to be used in the Jenkins pipeline during `SCM checkout`. 21 | 22 | https://github.com/vishal2505/terraform-eks-cicd/tree/main 23 | 24 | -------------------------------------------------------------------------------- /Project-7/jenkins_server/scripts/install_build_tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ref - https://www.jenkins.io/doc/book/installing/linux/ 4 | # Installing jenkins 5 | sudo yum install wget -y 6 | sudo wget -O /etc/yum.repos.d/jenkins.repo \ 7 | https://pkg.jenkins.io/redhat/jenkins.repo 8 | sudo rpm --import https://pkg.jenkins.io/redhat/jenkins.io-2023.key 9 | sudo yum upgrade -y 10 | # Add required dependencies for the jenkins package 11 | sudo yum install java-17-amazon-corretto-devel -y 12 | sudo yum install jenkins -y 13 | sudo systemctl daemon-reload 14 | 15 | # Starting Jenkins 16 | sudo systemctl enable jenkins 17 | sudo systemctl start jenkins 18 | sudo systemctl status jenkins 19 | 20 | # Ref - https://www.atlassian.com/git/tutorials/install-git 21 | # Installing git 22 | sudo yum install -y git 23 | git --version 24 | 25 | # Installing Docker 26 | # Ref - https://www.cyberciti.biz/faq/how-to-install-docker-on-amazon-linux-2/ 27 | sudo yum update 28 | sudo yum install docker -y 29 | 30 | sudo usermod -a -G docker ec2-user 31 | sudo usermod -aG docker jenkins 32 | 33 | # Add group membership for the default ec2-user so you can run all docker commands without using the sudo command: 34 | id ec2-user 35 | newgrp docker 36 | 37 | sudo systemctl enable docker.service 38 | sudo systemctl start docker.service 39 | sudo systemctl status docker.service 40 | 41 | sudo chmod 777 /var/run/docker.sock 42 | 43 | # Run Docker Container of Sonarqube 44 | docker run -d --name sonar -p 9000:9000 sonarqube:lts-community 45 | 46 | # Installing AWS CLI 47 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 48 | sudo apt install unzip -y 49 | unzip awscliv2.zip 50 | sudo ./aws/install 51 | 52 | # Ref - https://developer.hashicorp.com/terraform/cli/install/yum 53 | # Installing terraform 54 | sudo yum install -y yum-utils 55 | sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 56 | sudo yum -y install terraform 57 | 58 | # Ref - https://pwittrock.github.io/docs/tasks/tools/install-kubectl/ 59 | # Installing kubectl 60 | sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubectl 61 | sudo chmod +x ./kubectl 62 | sudo mkdir -p $HOME/bin && sudo cp ./kubectl $HOME/bin/kubectl && export PATH=$PATH:$HOME/bin 63 | 64 | # Installing Trivy 65 | # Ref - https://aquasecurity.github.io/trivy-repo/ 66 | sudo tee /etc/yum.repos.d/trivy.repo << 'EOF' 67 | [trivy] 68 | name=Trivy repository 69 | baseurl=https://aquasecurity.github.io/trivy-repo/rpm/releases/$basearch/ 70 | gpgcheck=1 71 | enabled=1 72 | gpgkey=https://aquasecurity.github.io/trivy-repo/rpm/public.key 73 | EOF 74 | 75 | sudo yum -y update 76 | sudo yum -y install trivy 77 | 78 | # Intalling Helm 79 | # Ref - https://helm.sh/docs/intro/install/ 80 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 81 | chmod 700 get_helm.sh 82 | ./get_helm.sh 83 | -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "terraform-eks-cicd-7001" 4 | key = "jenkins/terraform.tfstate" 5 | region = "us-east-1" 6 | } 7 | } -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "azs" {} 2 | 3 | # Get latest Amazon Linux AMI 4 | data "aws_ami" "amazon-linux" { 5 | most_recent = true 6 | owners = ["amazon"] 7 | filter { 8 | name = "name" 9 | values = ["amzn2-ami-*-x86_64-gp2"] 10 | } 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/main.tf: -------------------------------------------------------------------------------- 1 | # We'll be using publicly available modules for creating different services instead of resources 2 | # https://registry.terraform.io/browse/modules?provider=aws 3 | 4 | # Creating a VPC 5 | module "vpc" { 6 | source = "terraform-aws-modules/vpc/aws" 7 | 8 | name = var.vpc_name 9 | cidr = var.vpc_cidr 10 | 11 | azs = data.aws_availability_zones.azs.names 12 | public_subnets = var.public_subnets 13 | map_public_ip_on_launch = true 14 | 15 | enable_dns_hostnames = true 16 | 17 | tags = { 18 | Name = var.vpc_name 19 | Terraform = "true" 20 | Environment = "dev" 21 | } 22 | 23 | public_subnet_tags = { 24 | Name = "jenkins-subnet" 25 | } 26 | } 27 | 28 | # SG 29 | module "sg" { 30 | source = "terraform-aws-modules/security-group/aws" 31 | 32 | name = var.jenkins_security_group 33 | description = "Security Group for Jenkins Server" 34 | vpc_id = module.vpc.vpc_id 35 | 36 | ingress_with_cidr_blocks = [ 37 | { 38 | from_port = 8080 39 | to_port = 8080 40 | protocol = "tcp" 41 | description = "JenkinsPort" 42 | cidr_blocks = "0.0.0.0/0" 43 | }, 44 | { 45 | from_port = 443 46 | to_port = 443 47 | protocol = "tcp" 48 | description = "HTTPS" 49 | cidr_blocks = "0.0.0.0/0" 50 | }, 51 | { 52 | from_port = 80 53 | to_port = 80 54 | protocol = "tcp" 55 | description = "HTTP" 56 | cidr_blocks = "0.0.0.0/0" 57 | }, 58 | { 59 | from_port = 22 60 | to_port = 22 61 | protocol = "tcp" 62 | description = "SSH" 63 | cidr_blocks = "0.0.0.0/0" 64 | }, 65 | { 66 | from_port = 9000 67 | to_port = 9000 68 | protocol = "tcp" 69 | description = "SonarQubePort" 70 | cidr_blocks = "0.0.0.0/0" 71 | } 72 | ] 73 | 74 | egress_with_cidr_blocks = [ 75 | { 76 | from_port = 0 77 | to_port = 0 78 | protocol = "-1" 79 | cidr_blocks = "0.0.0.0/0" 80 | } 81 | ] 82 | 83 | tags = { 84 | Name = "jenkins-sg" 85 | } 86 | } 87 | 88 | # EC2 89 | module "ec2_instance" { 90 | source = "terraform-aws-modules/ec2-instance/aws" 91 | 92 | name = var.jenkins_ec2_instance 93 | 94 | instance_type = var.instance_type 95 | ami = "ami-0e8a34246278c21e4" 96 | key_name = "jenkins_server_keypair" 97 | monitoring = true 98 | vpc_security_group_ids = [module.sg.security_group_id] 99 | subnet_id = module.vpc.public_subnets[0] 100 | associate_public_ip_address = true 101 | user_data = file("../scripts/install_build_tools.sh") 102 | availability_zone = data.aws_availability_zones.azs.names[0] 103 | 104 | tags = { 105 | Name = "Jenkins-Server" 106 | Terraform = "true" 107 | Environment = "dev" 108 | } 109 | } -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ec2_instance_ip" { 2 | value = module.ec2_instance.public_ip 3 | } 4 | 5 | -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "5.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | } -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The region where the infrastructure should be deployed to" 3 | type = string 4 | } 5 | 6 | variable "aws_account_id" { 7 | description = "AWS Account ID" 8 | type = string 9 | } 10 | 11 | variable "backend_jenkins_bucket" { 12 | description = "S3 bucket where jenkins terraform state file will be stored" 13 | type = string 14 | } 15 | 16 | variable "backend_jenkins_bucket_key" { 17 | description = "bucket key for the jenkins terraform state file" 18 | type = string 19 | } 20 | 21 | variable "vpc_name" { 22 | description = "VPC Name for Jenkins Server VPC" 23 | type = string 24 | } 25 | 26 | variable "vpc_cidr" { 27 | description = "VPC CIDR for Jenkins Server VPC" 28 | type = string 29 | } 30 | 31 | variable "public_subnets" { 32 | description = "Subnets CIDR range" 33 | type = list(string) 34 | } 35 | 36 | variable "instance_type" { 37 | description = "Instance Type" 38 | type = string 39 | } 40 | 41 | variable "jenkins_security_group" { 42 | description = "Instance Type" 43 | type = string 44 | } 45 | 46 | variable "jenkins_ec2_instance" { 47 | description = "Instance Type" 48 | type = string 49 | } -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/variables/dev.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | backend_jenkins_bucket = "jenkins-terraform-state-7001" 4 | backend_jenkins_bucket_key = "jenkins/terraform.tfstate" 5 | vpc_name = "jenkins-vpc" 6 | vpc_cidr = "10.0.0.0/16" 7 | public_subnets = ["10.0.1.0/24"] 8 | instance_type = "t2.large" 9 | jenkins_ec2_instance = "Jenkins-Build-Server" 10 | jenkins_security_group = "jenkins-sg" -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/variables/prod.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | backend_jenkins_bucket = "jenkins-terraform-state-7001" 4 | backend_jenkins_bucket_key = "jenkins/terraform.tfstate" 5 | vpc_name = "jenkins-vpc" 6 | vpc_cidr = "10.0.0.0/16" 7 | public_subnets = ["10.0.1.0/24"] 8 | instance_type = "t2.large" -------------------------------------------------------------------------------- /Project-7/jenkins_server/tf-aws-ec2/variables/test.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | backend_jenkins_bucket = "jenkins-terraform-state-7001" 4 | backend_jenkins_bucket_key = "jenkins/terraform.tfstate" 5 | vpc_name = "jenkins-vpc" 6 | vpc_cidr = "10.0.0.0/16" 7 | public_subnets = ["10.0.1.0/24"] 8 | instance_type = "t2.medium" -------------------------------------------------------------------------------- /Project-7/manifest/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /Project-7/manifest/kubectl_commands.md: -------------------------------------------------------------------------------- 1 | 2 | [ec2-user@ip-10-0-1-203 ~]$ aws configure 3 | 4 | [ec2-user@ip-10-0-1-203 ~]$ aws eks update-kubeconfig --region us-east-1 --name my-eks-cluster 5 | Added new context arn:aws:eks:us-east-1:503382476502:cluster/my-eks-cluster to /home/ec2-user/.kube/config 6 | 7 | [ec2-user@ip-10-0-1-203 ~]$ kubectl get namespace 8 | NAME STATUS AGE 9 | default Active 3h42m 10 | kube-node-lease Active 3h42m 11 | kube-public Active 3h42m 12 | kube-system Active 3h42m 13 | 14 | [ec2-user@ip-10-0-1-203 ~]$ 15 | [ec2-user@ip-10-0-1-203 ~]$ kubectl get namespace 16 | NAME STATUS AGE 17 | default Active 3h43m 18 | eks-nginx-app Active 11s 19 | kube-node-lease Active 3h43m 20 | kube-public Active 3h43m 21 | kube-system Active 3h43m 22 | [ec2-user@ip-10-0-1-203 ~]$ 23 | [ec2-user@ip-10-0-1-203 ~]$ 24 | [ec2-user@ip-10-0-1-203 ~]$ kubectl get all -n eks-nginx-app 25 | NAME READY STATUS RESTARTS AGE 26 | pod/nginx-7c5ddbdf54-jz5xh 1/1 Running 0 28s 27 | 28 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 29 | service/nginx LoadBalancer 10.100.146.242 ababb1d14ffe64d6e9ee1d7a7da9c92c-711131747.us-east-1.elb.amazonaws.com 80:30287/TCP 27s 30 | 31 | NAME READY UP-TO-DATE AVAILABLE AGE 32 | deployment.apps/nginx 1/1 1 1 28s 33 | 34 | NAME DESIRED CURRENT READY AGE 35 | replicaset.apps/nginx-7c5ddbdf54 1 1 1 28s 36 | [ec2-user@ip-10-0-1-203 ~]$ 37 | -------------------------------------------------------------------------------- /Project-7/manifest/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | ports: 9 | - name: http 10 | port: 80 11 | protocol: TCP 12 | targetPort: 80 13 | selector: 14 | app: nginx 15 | type: LoadBalancer -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "terraform-eks-cicd-7001" 4 | key = "eks/terraform.tfstate" 5 | region = "us-east-1" 6 | } 7 | } -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "azs" {} -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/eks.tf: -------------------------------------------------------------------------------- 1 | # Ref - https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest 2 | 3 | module "eks" { 4 | source = "terraform-aws-modules/eks/aws" 5 | version = "~> 20.0" 6 | 7 | cluster_name = "my-eks-cluster" 8 | cluster_version = "1.29" 9 | 10 | cluster_endpoint_public_access = true 11 | 12 | vpc_id = module.vpc.vpc_id 13 | subnet_ids = module.vpc.private_subnets 14 | 15 | 16 | eks_managed_node_groups = { 17 | nodes = { 18 | min_size = 1 19 | max_size = 3 20 | desired_size = 2 21 | 22 | instance_types = ["t2.small"] 23 | capacity_type = "SPOT" 24 | } 25 | } 26 | 27 | tags = { 28 | Environment = "dev" 29 | Terraform = "true" 30 | } 31 | } -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">=5.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | } -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The region where the infrastructure should be deployed to" 3 | type = string 4 | } 5 | 6 | variable "aws_account_id" { 7 | description = "AWS Account ID" 8 | type = string 9 | } 10 | 11 | variable "vpc_name" { 12 | description = "VPC Name for Jenkins Server VPC" 13 | type = string 14 | } 15 | 16 | variable "vpc_cidr" { 17 | description = "VPC CIDR for Jenkins Server VPC" 18 | type = string 19 | } 20 | 21 | variable "public_subnets" { 22 | description = "Subnets CIDR range" 23 | type = list(string) 24 | } 25 | 26 | variable "private_subnets" { 27 | description = "Subnets CIDR range" 28 | type = list(string) 29 | } 30 | 31 | variable "instance_type" { 32 | description = "Instance Type" 33 | type = string 34 | } -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/variables/dev.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | vpc_name = "eks-vpc" 4 | vpc_cidr = "192.168.0.0/16" 5 | public_subnets = ["192.168.1.0/24", "192.168.2.0/24", "192.168.3.0/24"] 6 | private_subnets = ["192.168.4.0/24", "192.168.5.0/24", "192.168.6.0/24"] 7 | instance_type = "t2.small" -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/variables/prod.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | backend_jenkins_bucket = "jenkins-terraform-state-7001" 4 | backend_jenkins_bucket_key = "jenkins/terraform.tfstate" 5 | vpc_name = "jenkins-vpc" 6 | vpc_cidr = "10.0.0.0/16" 7 | public_subnets = ["10.0.1.0/24"] 8 | instance_type = "t2.large" -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/variables/test.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-east-1" 2 | aws_account_id = "503382476502" 3 | backend_jenkins_bucket = "jenkins-terraform-state-7001" 4 | backend_jenkins_bucket_key = "jenkins/terraform.tfstate" 5 | vpc_name = "jenkins-vpc" 6 | vpc_cidr = "10.0.0.0/16" 7 | public_subnets = ["10.0.1.0/24"] 8 | instance_type = "t2.medium" -------------------------------------------------------------------------------- /Project-7/tf-aws-eks/vpc.tf: -------------------------------------------------------------------------------- 1 | # We'll be using publicly available modules for creating different services instead of resources 2 | # https://registry.terraform.io/browse/modules?provider=aws 3 | 4 | # Creating a VPC 5 | module "vpc" { 6 | source = "terraform-aws-modules/vpc/aws" 7 | 8 | name = var.vpc_name 9 | cidr = var.vpc_cidr 10 | 11 | azs = data.aws_availability_zones.azs.names 12 | public_subnets = var.public_subnets 13 | private_subnets = var.private_subnets 14 | 15 | 16 | enable_dns_hostnames = true 17 | enable_nat_gateway = true 18 | single_nat_gateway = true 19 | 20 | tags = { 21 | "kubernetes.io/cluster/my-eks-cluster" = "shared" 22 | Terraform = "true" 23 | Environment = "dev" 24 | } 25 | 26 | public_subnet_tags = { 27 | "kubernetes.io/cluster/my-eks-cluster" = "shared" 28 | "kubernetes.io/role/elb" = 1 29 | } 30 | 31 | private_subnet_tags = { 32 | "kubernetes.io/cluster/my-eks-cluster" = "shared" 33 | "kubernetes.io/role/internal-elb" = 1 34 | } 35 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS DevOps projects 2 | 3 | ### Project - 1: Automating CloudFormation Stack Drift Remediation with AWS Eventbridge and Lambda 4 | 5 | SourceCode : https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-1 6 | 7 | Blog : https://medium.com/@vishalmishra_2505/automating-cloudformation-stack-drift-remediation-with-aws-eventbridge-and-lambda-34a1af718628 8 | 9 | 10 | ### Project - 2: Monitoring and Logging with AWS CloudWatch for Serverless Application 11 | 12 | SourceCode : https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-2 13 | 14 | Blog : https://medium.com/@vishalmishra_2505/monitoring-and-logging-with-aws-cloudwatch-65c92ffe9e13 15 | 16 | 17 | ### Project - 3: Implementing a file storage service for user content using API Gateway, Lambda, and S3 18 | 19 | SourceCode : https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-3 20 | 21 | Blog : https://medium.com/@vishalmishra_2505/implementing-a-file-storage-service-for-user-content-using-api-gateway-lambda-and-s3-part-1-2c5b2d1ae67c 22 | 23 | 24 | ### Project - 5: Web Identity Federation using Cognito with S3 Integration 25 | 26 | SourceCode : https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-5 27 | 28 | Blog : https://medium.com/@vishalmishra_2505/one-tap-to-your-files-simplifying-security-with-cognito-and-s3-using-web-identity-federation-e504e98a9653 29 | 30 | 31 | ### Project - 6: Blog App Blue/Green Deployment using ECS, CodeBuild and ALB 32 | 33 | SourceCode : 34 | 35 | https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-6 36 | 37 | https://github.com/vishal2505/MyBlogApp/tree/main 38 | 39 | Blog : 40 | 41 | https://towardsaws.com/say-goodbye-to-downtime-achieving-seamless-updates-with-blue-green-deployments-in-ecs-on-aws-abaff92a96ce 42 | 43 | https://towardsaws.com/say-goodbye-to-downtime-achieving-seamless-updates-with-blue-green-deployments-in-ecs-on-aws-30e6af5c0b2c 44 | 45 | 46 | ### Project - 7: Deploying EKS Cluster and Nginx application via CICD (Jenkins) Pipeline 47 | 48 | SourceCode: 49 | 50 | https://github.com/vishal2505/AWSDevOpsProjects/tree/main/Project-7 51 | 52 | https://github.com/vishal2505/terraform-eks-cicd/tree/main 53 | 54 | Blog: 55 | 56 | https://medium.com/@vishalmishra_2505/from-scratch-to-production-deploying-eks-clusters-and-applications-with-ci-cd-using-jenkins-and-f27d4686d5fe 57 | 58 | 59 | --------------------------------------------------------------------------------