├── PortalAccessProtocol └── README.md └── S3PublicBucketCheck ├── README.md ├── S3PublicBucketCheck.py └── s3pubcheck.tf /PortalAccessProtocol/README.md: -------------------------------------------------------------------------------- 1 | This was an attempt to create a dedicated portal to "semi-automatically" add IPs to security groups. 2 | 3 | This was tested with an R-shiny app hosted on an EC2 instance. 4 | 5 | It uses a Google Form as a way for authorized users to request new IPs get added to the EC2 instance hosting the app. 6 | 7 | An authorized user checks incoming requests and runs a simple command to add the new IP. 8 | 9 | 10 | ## Server Side Changes 11 | 1. Instal ModRewrite apache module - `sudo a2enmod rewrite` 12 | 2. Disable / Comment out these lines in /etc/apache2/sites-enabled/000-default.conf to disable the reverse proxy. `#ProxyPass / http://0.0.0.0:3838/ & #ProxyPassReverse / http://0.0.0.0:3838/` 13 | 3. Create a .htaccess file in /var/www folder which contains 14 | - Options +FollowSymLinks 15 | - RewriteEngine On 16 | - RewriteRule ^.*$ auth.php 17 | 18 | ## AUTH.PHP script 19 | Whenever a user accesses the R-Shiny server - it will redirect them to an auth.php script that checks whether their IP is present in a {IP-Whitelist} file. 20 | 21 | If IP exists in the {Whitelist} { 22 | Auth.php redirects to requested URL 23 | else 24 | it redirects them to [CaDC Analytics Portal access request form](https://goo.gl/forms/vpB3JTPSk3oDf4F92) 25 | 26 | ## GOOGLE APPS NOTIFICATION SCRIPT 27 | I also wrote a custom Google Apps Script on the Response Spreadsheet of the [Portal Access Request Form](https://goo.gl/forms/vpB3JTPSk3oDf4F92). For any new row, it will send email to argo@argolabs.org notifying them of new access request. 28 | https://script.google.com/a/macros/argolabs.org/s/AKfycbzKH_V3Ix6aNUazQzeZiJxBPO672cogWbEKZ8yCecxpUNvLEQ02/exec 29 | 30 | ## SYNCING w/AWS security groups 31 | The {IP-Whitelist} file is updated on schedule by issuing: 32 | 33 | sudo aws ec2 describe-security-groups --group-ids sg-46b9723e | grep CidrIp | awk -F\" '{print $4}' | awk -F/ '{print $1}' >>whitelist 34 | 35 | -------------------------------------------------------------------------------- /S3PublicBucketCheck/README.md: -------------------------------------------------------------------------------- 1 | # S3-Public-Bucket-Check 2 | 3 | An AWS lambda function that checks your account for Public S3 buckets and emails you whenever a new public bucket is created. 4 | 5 | The function is triggered by a Cloudwatch event that fires every 60 minutes. 6 | 7 | Solves for [S3 Shaming](https://www.theregister.co.uk/2018/04/19/48_million_personal_profiles_left_exposed_by_data_firm_localblox/) 8 | -------------------------------------------------------------------------------- /S3PublicBucketCheck/S3PublicBucketCheck.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import sys 3 | from botocore.exceptions import ClientError 4 | import json 5 | 6 | 7 | # Replace SENDER & RECEPIENT with email addresses. 8 | # If your account is still in the "sandbox", SES requires that the address be verified. 9 | 10 | SENDER = "" 11 | RECIPIENT = "" 12 | # Replace AWS_REGION appropriately 13 | AWS_REGION = "" 14 | 15 | # Gets Account ID - You can place this Lambda across all your AWS accounts 16 | account_id=boto3.client('sts').get_caller_identity().get('Account') 17 | SUBJECT = "S3 Public Bucket Notification for Account:" + str(account_id) 18 | 19 | # The email body for recipients with non-HTML email clients. 20 | BODY_TEXT = ("Amazon SES Test (Python)\r\n" 21 | "This email was sent with Amazon SES using the " 22 | "AWS SDK for Python (Boto)." 23 | ) 24 | 25 | # The character encoding for the email. 26 | CHARSET = "UTF-8" 27 | 28 | # Create a new SES resource and specify a region. 29 | client2 = boto3.client('ses',region_name=AWS_REGION) 30 | 31 | 32 | def lambda_handler(event, context): 33 | s3 = boto3.resource('s3') 34 | count=0 35 | bucket_policy={} 36 | principle={} 37 | effect={} 38 | account_id=boto3.client('sts').get_caller_identity().get('Account') 39 | email_str="S3 Public Bucket Notification for Account:" + str(account_id) + "\n" + "*****************************************" + "\n" 40 | 41 | for bucket in s3.buckets.all(): 42 | bucket_name=bucket.name 43 | createDate=bucket.creation_date 44 | bucket_acl=s3.BucketAcl(bucket_name).grants 45 | #Checks Bucket Policies for Principal:* and Effect:Allow 46 | try: 47 | bucket_policy=json.loads(s3.BucketPolicy(bucket_name).policy) 48 | for i in range(0,len(bucket_policy['Statement'])): 49 | principle = bucket_policy['Statement'][i]['Principal'] 50 | effect = bucket_policy['Statement'][i]['Effect'] 51 | if "*" in principle and "Allow" in effect: 52 | count = count + 1 53 | email_str = str(email_str) + str(bucket_name) + " " + str(createDate) + " Bucket Policy" + "\n" 54 | break 55 | #print principle,effect,bucket_name 56 | except ClientError as e: # Handles buckets without any policy 57 | pass 58 | #Checks Bucket ACL for "AllUsers" 59 | if "AllUsers" in str(bucket_acl) and str(bucket_name) not in email_str: 60 | count=count+1 61 | email_str = str(email_str) + str(bucket_name) + " " + str(createDate) + "\n" 62 | count_str=str(count) 63 | filename=account_id+".txt" 64 | 65 | # We want to preserve the number of Public S3 buckets for each invocation. I have used S3 in this case. 66 | obj = s3.Object('ENTER S3 BUCKET NAME THAT STORES THIS VALUE', filename) 67 | s3_count=obj.get()['Body'].read().decode('utf-8') 68 | print "S3 count: ",s3_count,"\n","Current Count:",count 69 | # Checks to see if current Bucket Count is more than previous bucket count 70 | if int(count) == int(s3_count): 71 | print "Bucket counts equal" 72 | print email_str 73 | if int(count) > int(s3_count): 74 | print "New public s3 bucket detected, sending email" 75 | print email_str 76 | emailResult(email_str,SUBJECT) 77 | ## Writes new public bucket count to the S3 bucket/account-id.txt 78 | s3.Bucket('ENTER S3 BUCKET NAME THAT STORES THIS VALUE').put_object(Key=filename, Body=count_str) 79 | 80 | 81 | def emailResult(email_result,SUBJECT): 82 | #Provide the contents of the email. 83 | response = client2.send_email( 84 | Destination={ 85 | 'ToAddresses': [ 86 | RECIPIENT, 87 | ], 88 | }, 89 | Message={ 90 | 'Body': { 91 | 'Text': { 92 | 'Charset': CHARSET, 93 | 'Data': str(email_result) 94 | }, 95 | }, 96 | 'Subject': { 97 | 'Charset': CHARSET, 98 | 'Data': SUBJECT, 99 | }, 100 | }, 101 | Source=SENDER 102 | ) 103 | -------------------------------------------------------------------------------- /S3PublicBucketCheck/s3pubcheck.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "lambda_S3PublicBucketCheck_role" { 2 | name = "lambda_S3PublicBucketCheck_role" 3 | assume_role_policy = "${data.aws_iam_policy_document.lambda_S3PublicBucketCheck_AssumePolicy.json}" 4 | } 5 | 6 | data "aws_iam_policy_document" "lambda_S3PublicBucketCheck_AssumePolicy" { 7 | statement { 8 | sid = "" 9 | effect = "Allow" 10 | actions = ["sts:AssumeRole"] 11 | 12 | principals { 13 | type = "Service" 14 | identifiers = ["lambda.amazonaws.com"] 15 | } 16 | } 17 | } 18 | 19 | resource "aws_iam_policy" "lambda_S3PublicBucketCheck_policy" { 20 | name = "lambda_S3PublicBucketCheck_policy" 21 | policy = "${data.aws_iam_policy_document.lambda_S3PublicBucketCheck_policyDoc.json}" 22 | } 23 | 24 | data "aws_iam_policy_document" "lambda_S3PublicBucketCheck_policyDoc" { 25 | statement { 26 | sid = "" 27 | effect = "Allow" 28 | 29 | actions = [ 30 | "logs:CreateLogGroup", 31 | "logs:CreateLogStream", 32 | "logs:PutLogEvents", 33 | ] 34 | 35 | resources = ["arn:aws:logs:*:*:*"] 36 | } 37 | 38 | statement { 39 | sid = "" 40 | effect = "Allow" 41 | 42 | actions = [ 43 | "ses:SendEmail", 44 | "ses:SendRawEmail", 45 | ] 46 | 47 | resources = ["*"] 48 | } 49 | 50 | statement { 51 | sid = "" 52 | effect = "Allow" 53 | 54 | actions = [ 55 | "s3:Get*", 56 | "s3:List*", 57 | ] 58 | 59 | resources = ["*"] 60 | } 61 | 62 | statement { 63 | sid = "" 64 | effect = "Allow" 65 | 66 | actions = [ 67 | "s3:Put*", 68 | "s3:List*", 69 | ] 70 | 71 | resources = [ 72 | "arn:aws:s3:::BUCKET/*", 73 | "arn:aws:s3:::BUCKET", 74 | ] 75 | } 76 | } 77 | 78 | resource "aws_iam_role_policy_attachment" "attach-to-role" { 79 | role = "${aws_iam_role.lambda_S3PublicBucketCheck_role.name}" 80 | policy_arn = "${aws_iam_policy.lambda_S3PublicBucketCheck_policy.arn}" 81 | } 82 | 83 | resource "aws_lambda_function" "S3PublicBucketCheck" { 84 | function_name = "S3PublicBucketCheck" 85 | handler = "S3PublicBucketCheck.lambda_handler" 86 | runtime = "python2.7" 87 | filename = "./S3PublicBucketCheck/S3PublicBucketCheck.py.zip" 88 | source_code_hash = "${base64sha256(file("./S3PublicBucketCheck/S3PublicBucketCheck.py.zip"))}" 89 | role = "${aws_iam_role.lambda_S3PublicBucketCheck_role.arn}" 90 | timeout = 300 91 | } 92 | 93 | resource "aws_cloudwatch_event_rule" "every_sixty_minutes" { 94 | name = "every_sixty_minutes" 95 | description = "Fires every thirty minutes" 96 | schedule_expression = "rate(60 minutes)" 97 | } 98 | 99 | resource "aws_cloudwatch_event_target" "S3PublicBucketCheck_lambda" { 100 | rule = "${aws_cloudwatch_event_rule.every_sixty_minutes.name}" 101 | target_id = "S3PublicBucketCheck_lambda" 102 | arn = "${aws_lambda_function.S3PublicBucketCheck.arn}" 103 | } 104 | 105 | resource "aws_lambda_permission" "allow_cloudwatch_to_call_S3PublicBucketCheck_lambda" { 106 | statement_id = "AllowExecutionFromCloudWatch" 107 | action = "lambda:InvokeFunction" 108 | function_name = "${aws_lambda_function.S3PublicBucketCheck.function_name}" 109 | principal = "events.amazonaws.com" 110 | source_arn = "${aws_cloudwatch_event_rule.every_sixty_minutes.arn}" 111 | } 112 | --------------------------------------------------------------------------------