├── securityhub-collector
├── src
│ ├── __init__.py
│ └── securityhub_collector.py
├── sam
│ ├── requirements.txt
│ ├── sam_package.sh
│ ├── create_layer.sh
│ ├── packaged.yaml
│ ├── template.yaml
│ └── event.json
└── README.md
├── securityhub-forwarder
├── src
│ ├── __init__.py
│ ├── utils.py
│ └── securityhub_forwarder.py
├── sam
│ ├── requirements.txt
│ ├── sam_package.sh
│ ├── template.yaml
│ └── packaged.yaml
├── test
│ ├── fixtures.json
│ └── test_securityhub_connector.py
└── README.md
├── sumologic-app-utils
├── src
│ ├── __init__.py
│ ├── main.py
│ └── sumologic.py
├── README.md
├── sumo_app_utils.yaml
├── packaged_sumo_app_utils.yaml
└── deploy.sh
├── loggroup-lambda-connector
├── test
│ ├── __init__.py
│ ├── requirements.txt
│ └── loggroup-lambda-cft.json
├── package.json
├── sam
│ ├── sam_package.sh
│ ├── packaged.yaml
│ └── template.yaml
├── Readme.md
└── src
│ └── loggroup-lambda-connector.js
├── cloudwatchlogs-with-dlq
├── requirements.txt
├── sumo-dlq-function-utils
│ ├── lib
│ │ ├── mainindex.js
│ │ ├── dlqutils.js
│ │ ├── utils.js
│ │ └── sumologsclient.js
│ └── package.json
├── package.json
├── DLQProcessor.js
├── Readme.md
├── vpcutils.js
└── cloudwatchlogs_lambda.js
├── LICENSE.txt
├── kinesisfirehose-processor
├── package.json
├── kinesisfirehose-processor.js
├── Readme.md
├── kinesisfirehose-lambda-cft.json
└── test-kinesisfirehose-lambda-cft.json
├── cloudwatchevents
├── guardduty
│ ├── packaged.yaml
│ ├── template.yaml
│ ├── event.json
│ ├── README.md
│ └── cloudwatchevents.json
├── package.json
├── guarddutybenchmark
│ ├── README.md
│ ├── template_v2.yaml
│ └── packaged_v2.yaml
├── README.md
└── src
│ └── cloudwatchevents.js
├── CHANGELOG.md
├── .travis.yml
├── s3
├── README.md
└── node.js
│ └── s3.js
├── inspector
├── Readme.md
└── python
│ └── inspector.py
├── cloudwatchlogs
├── README.md
└── cloudwatchlogs_lambda.js
├── cloudtrail_s3
├── cloudtrail_s3_to_sumo.js
└── README.md
├── deploy_function.py
├── kinesis
└── README.md
└── README.md
/securityhub-collector/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/securityhub-forwarder/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/sumologic-app-utils/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.9.60
2 |
--------------------------------------------------------------------------------
/securityhub-forwarder/sam/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.9.66
2 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.20.0
2 | boto3==1.5.1
3 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/test/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.20.0
2 | boto3==1.5.1
3 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/mainindex.js:
--------------------------------------------------------------------------------
1 | var SumoLogsClient = require('./sumologsclient.js').SumoLogsClient;
2 | var DLQUtils = require('./dlqutils.js');
3 | var Utils = require('./utils.js');
4 |
5 | module.exports = {
6 | SumoLogsClient: SumoLogsClient,
7 | DLQUtils: DLQUtils,
8 | Utils: Utils
9 | };
10 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "sumo-dlq-function-utils",
3 | "version": "0.0.1",
4 | "description": "This is an utility package for Sumo Logic Inc.",
5 | "license": "Apache",
6 | "files": ["./lib"],
7 | "main": "./lib/mainindex",
8 | "dependencies": {
9 | "aws-sdk": "^2.160.0"
10 | },
11 | "devDependencies": {},
12 | "author": "Himanshu Pal"
13 | }
14 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2015, Sumo Logic Inc. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/kinesisfirehose-processor/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kinesisfirehose-processor",
3 | "version": "1.0.0",
4 | "description": "Lambda Function for transforming incoming data from kinesis firehose",
5 | "main": "kinesisfirehose-processor.js",
6 | "dependencies": {},
7 | "devDependencies": {},
8 | "scripts": {
9 | "test": "echo \"Error: no test specified\" && exit 1",
10 | "build": "rm -f kinesisfirehose-processor.zip && zip -r kinesisfirehose-processor.zip kinesisfirehose-processor.js package.json"
11 | },
12 | "keywords": [
13 | "AWS",
14 | "Kinesis Firehose"
15 | ],
16 | "author": "Himanshu Pal",
17 | "license": "Apache-2.0"
18 | }
19 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "dlq_processor",
3 | "version": "1.0.0",
4 | "description": "Lambda function for processing messages from CloudWatch with Dead Letter Queue Support",
5 | "main": "DLQProcessor.js",
6 | "dependencies": {
7 | "jmespath": "^0.15.0",
8 | "lodash.find": "^4.6.0"
9 | },
10 | "devDependencies": {},
11 | "scripts": {
12 | "test": "node -e 'require('./test').test()'",
13 | "build": "rm -f cloudwatchlogs-with-dlq.zip && npm install && zip -r cloudwatchlogs-with-dlq.zip DLQProcessor.js cloudwatchlogs_lambda.js vpcutils.js package.json sumo-dlq-function-utils/ node_modules/",
14 | "prod_deploy": "python -c 'from test_cwl_lambda import prod_deploy;prod_deploy()'"
15 | },
16 | "author": "Himanshu Pal",
17 | "license": "Apache-2.0"
18 | }
19 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "loggroup-lambda-connector",
3 | "version": "1.0.0",
4 | "description": "Lambda Function for automatic subscription of any Sumo Logic lambda function with loggroups matching an input pattern.",
5 | "main": "loggroup-lambda-connector.js",
6 | "dependencies": {
7 | "aws-sdk": "^2.160.0"
8 | },
9 | "devDependencies": {},
10 | "scripts": {
11 | "test": "echo \"Error: no test specified\" && exit 1",
12 | "build": "echo `pwd` && rm -f test/loggroup-lambda-connector.zip && zip -r test/loggroup-lambda-connector.zip src/loggroup-lambda-connector.js package.json",
13 | "prod_deploy": "python -c 'from test.test_loggroup_lambda_connector import prod_deploy;prod_deploy()'"
14 | },
15 | "keywords": [
16 | "AWS"
17 | ],
18 | "author": "Himanshu Pal",
19 | "license": "Apache-2.0"
20 | }
21 |
--------------------------------------------------------------------------------
/sumologic-app-utils/README.md:
--------------------------------------------------------------------------------
1 | # sumologic-app-utils
2 |
3 | This lambda function is used for creating custom resources.This application is used in conjunction with other sam applications like sumologic-guardduty-benchmark to automate creation/deletion of sumo logic resources.
4 |
5 |
6 | Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
7 |
8 |
9 |
10 | ## Setup
11 |
12 | 1. Deploying the SAM Application
13 | 1. Go to https://serverlessrepo.aws.amazon.com/applications.
14 | 2. Search for sumologic-app-utils.
15 | 3. Click on the sumologic-app-utils application, and then click Deploy.
16 | 4. Click Deploy.
17 |
18 |
19 | ## License
20 |
21 | Apache License 2.0 (Apache-2.0)
22 |
23 |
24 | ## Support
25 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
26 |
27 |
--------------------------------------------------------------------------------
/sumologic-app-utils/sumo_app_utils.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: 'AWS::Serverless-2016-10-31'
3 | Description: >
4 | This solution consists of a lambda function which which gets triggered when CF stack is deployed. This is used for creating sumologic resources like collector, source and app folders.
5 |
6 | Globals:
7 | Function:
8 | Timeout: 300
9 |
10 | Resources:
11 | SumoAppUtilsFunction:
12 | Type: 'AWS::Serverless::Function'
13 | Properties:
14 | Handler: main.handler
15 | Runtime: python3.7
16 | CodeUri: s3://appdevstore/sumo_app_utils.zip
17 | MemorySize: 128
18 | Timeout: 300
19 |
20 | Outputs:
21 | SumoAppUtilsFunction:
22 | Description: "SumoAppUtils Function ARN"
23 | Value: !GetAtt SumoAppUtilsFunction.Arn
24 | Export:
25 | Name : !Sub "${AWS::StackName}-SumoAppUtilsFunction"
26 |
--------------------------------------------------------------------------------
/sumologic-app-utils/packaged_sumo_app_utils.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'This solution consists of a lambda function which which gets triggered
3 | when CF stack is deployed. This is used for creating sumologic resources like collector,
4 | source and app folders.
5 |
6 | '
7 | Globals:
8 | Function:
9 | Timeout: 300
10 | Outputs:
11 | SumoAppUtilsFunction:
12 | Description: SumoAppUtils Function ARN
13 | Export:
14 | Name:
15 | Fn::Sub: ${AWS::StackName}-SumoAppUtilsFunction
16 | Value:
17 | Fn::GetAtt:
18 | - SumoAppUtilsFunction
19 | - Arn
20 | Resources:
21 | SumoAppUtilsFunction:
22 | Properties:
23 | CodeUri: s3://appdevstore/sumo_app_utils.zip
24 | Handler: main.handler
25 | MemorySize: 128
26 | Runtime: python3.7
27 | Timeout: 300
28 | Type: AWS::Serverless::Function
29 | Transform: AWS::Serverless-2016-10-31
30 |
--------------------------------------------------------------------------------
/securityhub-forwarder/sam/sam_package.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$AWS_PROFILE" == "prod" ]
4 | then
5 | SAM_S3_BUCKET="appdevstore"
6 | AWS_REGION="us-east-1"
7 | else
8 | SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
9 | AWS_REGION="us-east-2"
10 | fi
11 |
12 | sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml
13 |
14 | sam deploy --template-file packaged.yaml --stack-name testingsecurityhubforwarder --capabilities CAPABILITY_IAM --region $AWS_REGION
15 | #aws cloudformation describe-stack-events --stack-name testingsecurityhublambda --region $AWS_REGION
16 | #aws cloudformation get-template --stack-name testingsecurityhublambda --region $AWS_REGION
17 | # aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-forwarder --semantic-version 1.0.1 --template-body file://packaged.yaml
18 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/sam_package.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$AWS_PROFILE" == "prod" ]
4 | then
5 | SAM_S3_BUCKET="appdevstore"
6 | AWS_REGION="us-east-1"
7 | else
8 | SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
9 | AWS_REGION="us-east-2"
10 | fi
11 | sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml
12 |
13 | sam deploy --template-file packaged.yaml --stack-name testingsecurityhubcollector --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides S3SourceBucketName=securityhubfindings
14 | #aws cloudformation describe-stack-events --stack-name testingsecurityhublambda --region $AWS_REGION
15 | #aws cloudformation get-template --stack-name testingsecurityhublambda --region $AWS_REGION
16 | # aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-connector --semantic-version 1.0.1 --template-body file://packaged.yaml
17 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/sam/sam_package.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$AWS_PROFILE" == "prod" ]
4 | then
5 | SAM_S3_BUCKET="appdevstore"
6 | AWS_REGION="us-east-1"
7 | else
8 | SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
9 | AWS_REGION="us-east-2"
10 | fi
11 | sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml
12 |
13 | sam deploy --template-file packaged.yaml --stack-name testingloggrpconnector --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides LambdaARN="arn:aws:lambda:us-east-1:956882708938:function:AccessVPCResourcesLambda"
14 | #aws cloudformation describe-stack-events --stack-name testingloggrpconnector --region $AWS_REGION
15 | #aws cloudformation get-template --stack-name testingloggrpconnector --region $AWS_REGION
16 | # aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-connector --semantic-version 1.0.1 --template-body file://packaged.yaml
17 |
--------------------------------------------------------------------------------
/cloudwatchevents/guardduty/packaged.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'This function is invoked by AWS CloudWatch events in response to state
3 | change in your AWS resources which matches a event target definition. The event
4 | payload received is then forwarded to Sumo Logic HTTP source endpoint.
5 |
6 | '
7 | Globals:
8 | Function:
9 | Timeout: 300
10 | Outputs:
11 | CloudWatchEventFunction:
12 | Description: CloudWatchEvent Processor Function ARN
13 | Value:
14 | Fn::GetAtt:
15 | - CloudWatchEventFunction
16 | - Arn
17 | Parameters:
18 | SumoEndpointUrl:
19 | Type: String
20 | Resources:
21 | CloudWatchEventFunction:
22 | Properties:
23 | CodeUri: s3://appdevstore/e62e525a25bb080e521d8bf64909ea41
24 | Environment:
25 | Variables:
26 | SUMO_ENDPOINT:
27 | Ref: SumoEndpointUrl
28 | Events:
29 | CloudWatchEventTrigger:
30 | Properties:
31 | Pattern:
32 | source:
33 | - aws.guardduty
34 | Type: CloudWatchEvent
35 | Handler: cloudwatchevents.handler
36 | Runtime: nodejs8.10
37 | Type: AWS::Serverless::Function
38 | Transform: AWS::Serverless-2016-10-31
39 |
--------------------------------------------------------------------------------
/cloudwatchevents/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "cloudwatchevents-processor",
3 | "version": "1.0.0",
4 | "description": "AWS Lambda function to collect CloudWatch events and post them to SumoLogic.",
5 | "main": "src/cloudwatchevents.js",
6 | "repository": "https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents",
7 | "author": "Himanshu Pal",
8 | "license": "Apache-2.0",
9 | "dependencies": {},
10 | "scripts": {
11 | "test": "cd guardduty && sam local invoke CloudWatchEventFunction -e event.json",
12 | "build_guardduty": "cd guardduty && sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml",
13 | "deploy_guardduty": "cd guardduty && sam deploy --template-file packaged.yaml --stack-name testingguarddutylambda --capabilities CAPABILITY_IAM --parameter-overrides SumoEndpointUrl=$SUMO_ENDPOINT",
14 | "view_deploy_logs": "aws cloudformation describe-stack-events --stack-name testingguarddutylambda",
15 | "build_zip": "rm -f guardduty.zip && cd src && zip ../guardduty.zip cloudwatchevents.js && cd ..",
16 | "build_temp": "aws cloudformation get-template --stack-name testingguarddutylambda --region $AWS_REGION"
17 | },
18 | "keywords": [
19 | "lambda",
20 | "cloudwatch-events"
21 | ]
22 | }
23 |
24 |
25 |
--------------------------------------------------------------------------------
/securityhub-forwarder/test/fixtures.json:
--------------------------------------------------------------------------------
1 | {
2 | "Types": "Software and Configuration Checks/Industry and Regulatory Standards/HIPAA Controls",
3 | "Description": "This search gives top 10 resources which are accessed in last 15 minutes",
4 | "GeneratorID": "InsertFindingsScheduledSearch",
5 | "Severity": 30,
6 | "SourceUrl":"https://service.sumologic.com/ui/#/search/RmC8kAUGZbXrkj2rOFmUxmHtzINUgfJnFplh3QWY",
7 | "ComplianceStatus": "FAILED",
8 | "Rows": "[{\"Timeslice\":1545042427000,\"finding_time\":\"1545042427000\",\"item_name\":\"A nice dashboard.png\",\"title\":\"Vulnerability: Apple iTunes m3u Playlist File Title Parsing Buffer Overflow Vulnerability(34886) found on 207.235.176.3\",\"resource_id\":\"10.178.11.43\",\"resource_type\":\"Other\"},{\"Timeslice\":\"1545042427000\",\"finding_time\":\"1545042427000\",\"item_name\":\"Screen Shot 2014-07-30 at 11.39.29 PM.png\",\"title\":\"PCI Req 01: Traffic to Cardholder Environment: Direct external traffic to secure port on 10.178.11.43\",\"resource_id\":\"10.178.11.42\",\"resource_type\":\"AwsEc2Instance\"},{\"Timeslice\":\"1545042427000\",\"finding_time\":\"1545042427000\",\"item_name\":\"10388049_589057504526630_2031213996_n.jpg\",\"title\":\"Test Check Success for 207.235.176.5\",\"resource_id\":\"10.178.11.41\",\"resource_type\":\"Other\"}]"
9 | }
10 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG for sumologic-aws-lambda functions
2 |
3 | This file lists changes made in each version of function repo
4 |
5 | ## 1.2.2:
6 | July 11, 2016:
7 | * Merged [PR#12](https://github.com/SumoLogic/sumologic-aws-lambda/pull/12).
8 |
9 | ## 1.2.1:
10 | Apr 05, 2016
11 | * Merged [PR#7](https://github.com/SumoLogic/sumologic-aws-lambda/pull/7).
12 |
13 | ## 1.2.0:
14 | Feb 29, 2016
15 | * Merged [PR#4](https://github.com/SumoLogic/sumologic-aws-lambda/pull/4), add LICENSE file, and a function for reading from S3.
16 |
17 | ## 1.1.1:
18 | Feb 22, 2016
19 | * Merged [PR#3](https://github.com/SumoLogic/sumologic-aws-lambda/pull/3), add strict equality comparison operator in the Lambda function for Lambda logs
20 |
21 | ## 1.1.0:
22 | Feb 3, 2016
23 | * Merged [PR#2](https://github.com/SumoLogic/sumologic-aws-lambda/pull/2), add a function for reading from AWS Kinesis
24 |
25 | ## 1.0.0:
26 | Jan 25, 2016
27 | * Initial release with 2 functions for reading from AWS CloudWatch Logs (VPC and Lambda function logs)
28 |
29 |
30 | - - -
31 | Check the [Markdown Syntax Guide](http://daringfireball.net/projects/markdown/syntax) for help with Markdown.
32 |
33 | The [Github Flavored Markdown page](http://github.github.com/github-flavored-markdown/) describes the differences between markdown on github and standard markdown.
34 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/create_layer.sh:
--------------------------------------------------------------------------------
1 | #!bash/bin
2 |
3 | if [ ! -f securityhub_deps.zip ]; then
4 | echo "creating zip file"
5 | mkdir python
6 | cd python
7 | pip install -r ../requirements.txt -t ./
8 | zip -r ../securityhub_deps.zip .
9 | cd ..
10 | fi
11 |
12 | declare -a regions=("us-east-2" "us-east-1" "us-west-1" "us-west-2" "ap-south-1" "ap-northeast-2" "ap-southeast-1" "ap-southeast-2" "ap-northeast-1" "ca-central-1" "eu-central-1" "eu-west-1" "eu-west-2" "eu-west-3" "sa-east-1")
13 |
14 | for i in "${regions[@]}"
15 | do
16 | echo "Deploying layer in $i"
17 | bucket_name="appdevzipfiles-$i"
18 | aws s3 cp securityhub_deps.zip s3://$bucket_name/ --region $i
19 |
20 | aws lambda publish-layer-version --layer-name securityhub_deps --description "contains securityhub solution dependencies" --license-info "MIT" --content S3Bucket=$bucket_name,S3Key=securityhub_deps.zip --compatible-runtimes python3.7 python3.6 --region $i
21 |
22 | aws lambda add-layer-version-permission --layer-name securityhub_deps --statement-id securityhub-deps --version-number 1 --principal '*' --action lambda:GetLayerVersion --region $i
23 | done
24 |
25 | # aws lambda remove-layer-version-permission --layer-name securityhub_deps --version-number 1 --statement-id securityhub-deps --region us-east-1
26 | # aws lambda get-layer-version-policy --layer-name securityhub_deps --region us-east-1
27 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/dlqutils.js:
--------------------------------------------------------------------------------
1 | var AWS = require("aws-sdk");
2 |
3 | function Messages(env) {
4 | this.sqs = new AWS.SQS({region: env.AWS_REGION});
5 | this.env = env;
6 | }
7 |
8 | Messages.prototype.receiveMessages = function (messageCount, callback) {
9 | var params = {
10 | QueueUrl: this.env.TASK_QUEUE_URL,
11 | MaxNumberOfMessages: messageCount
12 | };
13 | this.sqs.receiveMessage(params, callback);
14 | };
15 |
16 | Messages.prototype.deleteMessage = function (receiptHandle, callback) {
17 | this.sqs.deleteMessage({
18 | ReceiptHandle: receiptHandle,
19 | QueueUrl: this.env.TASK_QUEUE_URL
20 | }, callback);
21 | };
22 |
23 | function invokeLambdas(awsRegion, numOfWorkers, functionName, payload, context) {
24 |
25 | for (var i = 0; i < numOfWorkers; i++) {
26 | var lambda = new AWS.Lambda({
27 | region: awsRegion
28 | });
29 | lambda.invoke({
30 | InvocationType: 'Event',
31 | FunctionName: functionName,
32 | Payload: payload
33 | }, function(err, data) {
34 | if (err) {
35 | context.fail(err);
36 | } else {
37 | context.succeed('success');
38 | }
39 | });
40 | }
41 | }
42 |
43 | module.exports = {
44 | Messages: Messages,
45 | invokeLambdas: invokeLambdas
46 | };
47 |
--------------------------------------------------------------------------------
/cloudwatchevents/guardduty/template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: AWS::Serverless-2016-10-31
3 | Description: >
4 | This function is invoked by AWS CloudWatch events in response to state change in your AWS resources which matches a event target definition. The event payload received is then forwarded to Sumo Logic HTTP source endpoint.
5 |
6 | # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst
7 | Globals:
8 | Function:
9 | Timeout: 300
10 |
11 | Parameters:
12 | SumoEndpointUrl:
13 | Type: String
14 |
15 | Resources:
16 |
17 | CloudWatchEventFunction:
18 | Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction
19 | Properties:
20 | CodeUri: ../src/
21 | Handler: cloudwatchevents.handler
22 | Runtime: nodejs8.10
23 | Environment:
24 | Variables:
25 | SUMO_ENDPOINT: !Ref SumoEndpointUrl
26 | Events:
27 | CloudWatchEventTrigger:
28 | Type: CloudWatchEvent
29 | Properties:
30 | Pattern:
31 | source:
32 | - aws.guardduty
33 | Outputs:
34 |
35 | CloudWatchEventFunction:
36 | Description: "CloudWatchEvent Processor Function ARN"
37 | Value: !GetAtt CloudWatchEventFunction.Arn
38 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/packaged.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: AWS::Serverless-2016-10-31
3 | Description: 'This solution consists of a lambda function which which gets triggered
4 | by CloudWatch events with findings as payload which are then ingested to Sumo Logic
5 | via S3 source
6 |
7 | '
8 | Globals:
9 | Function:
10 | Timeout: 300
11 | Parameters:
12 | S3SourceBucketName:
13 | Type: String
14 | Resources:
15 | SecurityHubCollectorFunction:
16 | Type: AWS::Serverless::Function
17 | Properties:
18 | Handler: securityhub_collector.lambda_handler
19 | Runtime: python3.7
20 | CodeUri: s3://appdevstore/3821fd9c5288ebaca71e4ea0b26629ab
21 | MemorySize: 128
22 | Timeout: 300
23 | Policies:
24 | - Statement:
25 | - Sid: SecurityHubS3PutObjectPolicy
26 | Effect: Allow
27 | Action:
28 | - s3:PutObject
29 | Resource:
30 | - Fn::Sub: arn:aws:s3:::${S3SourceBucketName}
31 | - Fn::Sub: arn:aws:s3:::${S3SourceBucketName}/*
32 | Environment:
33 | Variables:
34 | S3_LOG_BUCKET:
35 | Ref: S3SourceBucketName
36 | Events:
37 | CloudWatchEventTrigger:
38 | Type: CloudWatchEvent
39 | Properties:
40 | Pattern:
41 | source:
42 | - aws.securityhub
43 | Outputs:
44 | SecurityHubCollectorFunction:
45 | Description: SecurityHubCollector Function ARN
46 | Value:
47 | Fn::GetAtt:
48 | - SecurityHubCollectorFunction
49 | - Arn
50 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: 'AWS::Serverless-2016-10-31'
3 | Description: >
4 | This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic via S3 source
5 |
6 | Globals:
7 | Function:
8 | Timeout: 300
9 |
10 | Parameters:
11 | S3SourceBucketName:
12 | Type: String
13 |
14 | Resources:
15 |
16 | SecurityHubCollectorFunction:
17 | Type: 'AWS::Serverless::Function'
18 | Properties:
19 | Handler: securityhub_collector.lambda_handler
20 | Runtime: python3.7
21 | CodeUri: ../src/
22 | MemorySize: 128
23 | Timeout: 300
24 | Policies:
25 | - Statement:
26 | - Sid: SecurityHubS3PutObjectPolicy
27 | Effect: Allow
28 | Action:
29 | - "s3:PutObject"
30 | Resource:
31 | - !Sub 'arn:aws:s3:::${S3SourceBucketName}'
32 | - !Sub 'arn:aws:s3:::${S3SourceBucketName}/*'
33 |
34 | Environment:
35 | Variables:
36 | S3_LOG_BUCKET: !Ref S3SourceBucketName
37 |
38 | Events:
39 | CloudWatchEventTrigger:
40 | Type: CloudWatchEvent
41 | Properties:
42 | Pattern:
43 | source:
44 | - aws.securityhub
45 |
46 | Outputs:
47 | SecurityHubCollectorFunction:
48 | Description: "SecurityHubCollector Function ARN"
49 | Value: !GetAtt SecurityHubCollectorFunction.Arn
50 |
--------------------------------------------------------------------------------
/cloudwatchevents/guardduty/event.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0",
3 | "id": "f81b1e52-fff2-e312-7b6f-66e0a353fee4",
4 | "detail-type": "AWS API Call via CloudTrail",
5 | "source": "aws.guardduty",
6 | "account": "456227676011",
7 | "time": "2018-10-03T00:53:52Z",
8 | "region": "us-east-2",
9 | "resources": [],
10 | "detail": {
11 | "eventVersion": "1.05",
12 | "userIdentity": {
13 | "type": "IAMUser",
14 | "principalId": "AIDAIFSYASDF6O2ZUR4M",
15 | "arn": "arn:aws:iam::45234274376011:user/HELLOWORLD",
16 | "accountId": "4562123123011",
17 | "accessKeyId": "ASIA123123123MU7V",
18 | "userName": "hello",
19 | "sessionContext": {
20 | "attributes": {
21 | "mfaAuthenticated": "true",
22 | "creationDate": "2018-10-02T23:20:50Z"
23 | }
24 | },
25 | "invokedBy": "signin.amazonaws.com"
26 | },
27 | "eventTime": "2018-10-03T00:53:52Z",
28 | "eventSource": "guardduty.amazonaws.com",
29 | "eventName": "CreateSampleFindings",
30 | "awsRegion": "us-east-2",
31 | "sourceIPAddress": "122.177.239.147",
32 | "userAgent": "signin.amazonaws.com",
33 | "requestParameters": {
34 | "detectorId": "d0b31b9d4905e74c121212b12e79f"
35 | },
36 | "responseElements": null,
37 | "requestID": "cbd191aa-1234-11e8-a342-399aed033d0d",
38 | "eventID": "1329e405-1234-4289-adb7-612b503622a5",
39 | "readOnly": false,
40 | "eventType": "AwsApiCall"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/securityhub-collector/README.md:
--------------------------------------------------------------------------------
1 | # sumologic-securityhub-collector
2 |
3 | This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic via S3 source
4 |
5 |
6 | Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
7 |
8 | 
9 |
10 | ## Setup
11 |
12 |
13 | 1. Configure a [Hosted Collector](https://help.sumologic.com/03Send-Data/Hosted-Collectors/Configure-a-Hosted-Collector) and an [AWS S3 Source](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon-Web-Services/AWS-S3-Source#AWS_Sources) to Sumo Logic, and in Advanced Options for Logs, under Timestamp Format, click Specify a format and enter the following:
14 | Specify Format as yyyy-MM-dd'T'HH:mm:ss.SSS'Z'
15 | Specify Timestamp locator as .*"UpdatedAt":"(.*)".*
16 |
17 | 2. Deploying the SAM Application
18 | 1. Open a browser window and enter the following URL: https://serverlessrepo.aws.amazon.com/applications
19 | 2. In the Serverless Application Repository, search for sumologic.
20 | 3. Select Show apps that create custom IAM roles or resource policies check box.
21 | 4. Click the sumologic-securityhub-collector,link, and then click Deploy.
22 | 5. In the Configure application parameters panel, enter the name of the S3 bucket configured while creating AWS S3 source.
23 | Click Deploy.
24 |
25 |
26 | ## License
27 |
28 | Apache License 2.0 (Apache-2.0)
29 |
30 |
31 | ## Support
32 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
33 |
34 |
--------------------------------------------------------------------------------
/securityhub-forwarder/src/utils.py:
--------------------------------------------------------------------------------
1 | import time
2 | from functools import wraps
3 |
4 |
5 | def fixed_sleep(fixed_wait_time):
6 | def handler():
7 | return fixed_wait_time
8 | return handler
9 |
10 |
11 | def incrementing_sleep(wait_time_inc, start_wait_time=3):
12 | attempt = 1
13 |
14 | def handler():
15 | nonlocal attempt
16 | print("generating time", attempt)
17 | result = start_wait_time + (attempt-1)*wait_time_inc
18 | attempt += 1
19 | return result
20 | return handler
21 |
22 |
23 | def exponential_sleep(multiplier):
24 | attempt = 1
25 |
26 | def handler():
27 | nonlocal attempt
28 | exp = 2 ** attempt
29 | result = multiplier * exp
30 | attempt += 1
31 | return result
32 | return handler
33 |
34 |
35 | def retry_if_exception_of_type(retryable_types):
36 | def _retry_if_exception_these_types(exception):
37 | return isinstance(exception, retryable_types)
38 | return _retry_if_exception_these_types
39 |
40 |
41 | def retry(ExceptionToCheck=(Exception,), max_retries=4,
42 | logger=None, handler_type=exponential_sleep, *hdlrargs, **hdlrkwargs):
43 |
44 | def deco_retry(f):
45 |
46 | @wraps(f)
47 | def f_retry(*args, **kwargs):
48 | delay_handler = handler_type(*hdlrargs, **hdlrkwargs)
49 | retries_left, wait_time = max_retries, delay_handler()
50 | while retries_left > 1:
51 | try:
52 | return f(*args, **kwargs)
53 | except ExceptionToCheck as e:
54 | msg = "%s, Retrying in %d seconds..." % (str(e), wait_time)
55 | if logger:
56 | logger.warning(msg)
57 | else:
58 | print(msg)
59 | time.sleep(wait_time)
60 | retries_left -= 1
61 | wait_time = delay_handler()
62 | return f(*args, **kwargs)
63 |
64 | return f_retry
65 |
66 | return deco_retry
67 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 | dist: trusty
3 | language: node_js
4 | python:
5 | - '2.7'
6 | jobs:
7 | include:
8 | - stage: Node 8
9 | node_js: '8.10'
10 | env: FUNCTION_DIR=cloudwatchlogs-with-dlq TEST_DIR=cloudwatchlogs-with-dlq TEST_FILE=test_cwl_lambda.py NODE_VERSION="8.10"
11 | - stage: Node 8
12 | node_js: '8.10'
13 | env: FUNCTION_DIR=loggroup-lambda-connector TEST_DIR=loggroup-lambda-connector/test TEST_FILE=test_loggroup_lambda_connector.py NODE_VERSION="8.10"
14 | before_install:
15 | - sudo apt-get install python-pip
16 | - cd $TEST_DIR && sudo pip install -r requirements.txt && cd -
17 | script:
18 | - cd $FUNCTION_DIR && npm run build && cd -
19 | - cd $TEST_DIR && python $TEST_FILE && cd -
20 | env:
21 | global:
22 | - secure: iGATK/X1PfH8FCJlUpBbJ//mQ57QBQT0jETyDDH2r+GxZxXPnFS46ugPGsWTX0IDDEGjXE+/P6wnm0ORo7aa3yp7DZnfWFDyrgFchgUo1p3unt5sQcHg/9mitkQF7lHFlnAqM1D69AEGP5WU63m/9xQoj0BVYCVb2eUEqatV1AU8lpvRAkFc2juumy1ba4skoDFLOtrsaO2k+SCCBfMKq2SOJkcPnfMZGTOT7niaNnNAZSDYDovAlMYaLDOR10EMUAyklnmmAADyDxNRwSSAG8JKJMmfxSqIe4+s7xjqztjtkApWmEAjevDLpc62v1TWe0O2zCxnb4E4EBN6A31R1XJha4i/IKSeVL99J9X8aV1wIb9feV85qmZmlxVL2EU7/CcFGTdKeAak3qQWHZ1C6X32TBB6x5C0qASSC/x5pwDDJIOyeZr0Z93+dDhnBLNmzz8sA3h7AyzQZfhTqG/f4/SOgxTf7aF13X8BKuoM8BaGfXjy0keaVb0xbTjtDvi9F21EymWPdwNlQHsKca+EcTT2KE3mwFNrHAsTeNhGzMbrzmbZzvNHlmIwjB1C5l9h4GpUkxNb/mqi9SBTx9YfDIgz0bDOds1T92tGIAcBaHfJLTjc2JIgxwgdL13X3cL8GJBiFFwiJqiKJCSz4SxhWqsrrbsGSQBUqN5UoSsyKco=
23 | - secure: pmgNH6sLnwPadB/m4e/DtV/NbblVCa84N56Q45vpDkdP7fSIt8YShkvElrxFNJBWXaPyG/uE2gnIrXREtN/7xox+xv2ej+Gsv+cLYwBIIs1oGgHVlm/JG4OBLhuSn23w/DK/RuuHWWjDJ2DsDaXlXgbPTU01EJC2kpM9YnsmeifDnq/HSNPVx8k6bBKhzED7atf8v8yy8XYAkpL3viNwm3B98xU/AvEcgrNwG0XYQexCBTm9nJTQ2q3sBFQfuvQXFNuQoQWuN0wmSlhFuAnGsm0nugk7YJ8HZTsw1X1OUW61J3c9p0BHKL69nWHoYvSkyzl/9kls3QxYhLumF2DepBSbw/+iKMkxNzd4s7DDKGMqM7Y/9omxj3djrGxn8qGpn7GKNyZJR5EqLS+KY9E7xQ6ql1COdUA1W6aTEzLeEelti4abHEoA7a5sEhRSC/rmR0v+PP3sKc2FJjDOB9/eBVG/8V05EgN3Ji7KEu5vsrvIzu1Ng4a7BUyM06gw1vF92H/uOOBGGh25H8LLIZTpB9z//brZ9RtrzSA585KyJPFFW8JdMl34CE+nz8DhGwXSCDBQz/HMh0h1RJ0+8nJkIuxi96yOPH73c1tngUTnhm7OZh7yyNCr1RLT7yS552stnR0WqSv8gSxWK1+Apmzi6P5s5oqraDhEW9CeQe/qkzI=
24 |
--------------------------------------------------------------------------------
/kinesisfirehose-processor/kinesisfirehose-processor.js:
--------------------------------------------------------------------------------
1 | function encodebase64(data) {
2 | return (new Buffer(data, 'utf8')).toString('base64');
3 | }
4 |
5 | function decodebase64(data) {
6 | return (new Buffer(data, 'base64')).toString('utf8');
7 | }
8 |
9 | function addDelimitertoJSON(data, delimiter) {
10 | delimiter = typeof delimiter === 'undefined' ? '\n' : delimiter;
11 | let resultdata = decodebase64(data);
12 | resultdata = resultdata + delimiter;
13 | resultdata = encodebase64(resultdata);
14 | return resultdata;
15 | }
16 |
17 | function convertToLine(data) {
18 | // converts json object to a single line ({k1:v1,k2:v2} to k1=v1 k2=v2)
19 | const entryObj = JSON.parse(decodebase64(data));
20 | var resultdata = "";
21 | for (var key in entryObj) {
22 | if (entryObj.hasOwnProperty(key)) {
23 | resultdata += key + "=" + entryObj[key] + " ";
24 | }
25 | }
26 | resultdata = resultdata.trim() + "\n";
27 | resultdata = encodebase64(resultdata);
28 | return resultdata;
29 | }
30 | exports.handler = (event, context, callback) => {
31 | console.log("invoking transformation lambda");
32 | let success = 0;
33 | let failure = 0;
34 |
35 | const output = event.records.map( function (record) {
36 | try {
37 | // let resultdata = convertToLine(record.data);
38 | let resultdata = addDelimitertoJSON(record.data);
39 | success++;
40 | return {
41 | recordId: record.recordId,
42 | result: 'Ok',
43 | data: resultdata
44 | };
45 | } catch(error) {
46 | console.log("Error in record transformation", error);
47 | failure++;
48 | return {
49 | recordId: record.recordId,
50 | result: 'ProcessingFailed',
51 | data: record.data,
52 | };
53 | }
54 | });
55 | console.log(`Processing completed.Total records ${output.length}. Success ${success} Failed ${failure}`);
56 | callback(null, { records: output });
57 | };
58 |
--------------------------------------------------------------------------------
/securityhub-collector/src/securityhub_collector.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import logging
4 | import sys
5 | sys.path.insert(0, '/opt') # layer packages are in opt directory
6 | import boto3
7 | from collections import defaultdict
8 |
9 |
10 | BUCKET_NAME = os.getenv("S3_LOG_BUCKET")
11 | BUCKET_REGION = os.getenv("AWS_REGION")
12 | s3cli = boto3.client('s3', region_name=BUCKET_REGION)
13 |
14 |
15 | logger = logging.getLogger()
16 | logger.setLevel(logging.INFO)
17 |
18 |
19 | def post_to_s3(findings, filename, silent=False):
20 |
21 | findings_data = "\n\n".join([json.dumps(data) for data in findings])
22 | is_success = False
23 | try:
24 | response = s3cli.put_object(Body=findings_data, Bucket=BUCKET_NAME, Key=filename)
25 | is_success = True
26 | logger.info("Saved %d findings to s3 %s status_code: %s" % (len(findings), filename, response["ResponseMetadata"].get("HTTPStatusCode")))
27 | except Exception as e:
28 | logger.error("Failed to post findings to S3: %s" % str(e))
29 | if not silent:
30 | raise e
31 |
32 | return is_success
33 |
34 |
35 | def send_findings(findings, context):
36 |
37 | count = 0
38 | if len(findings) > 0:
39 | finding_buckets = defaultdict(list)
40 | for f in findings:
41 | finding_buckets[f['ProductArn']].append(f)
42 | count += 1
43 |
44 | for product_arn, finding_list in finding_buckets.items():
45 | filename = "%s-%s" % (product_arn, context.aws_request_id)
46 | post_to_s3(finding_list, filename)
47 |
48 | logger.info("Finished Sending NumFindings: %d" % (count))
49 |
50 |
51 | def lambda_handler(event, context):
52 | logger.info("Invoking SecurityHubCollector source %s region %s" % (event['source'], event['region']))
53 | findings = event['detail'].get('findings', [])
54 | send_findings(findings, context)
55 |
56 |
57 | if __name__ == '__main__':
58 |
59 | event = json.load(open('../sam/event.json'))
60 | BUCKET_NAME = "securityhubfindings"
61 |
62 | class context:
63 | aws_request_id = "testid12323"
64 |
65 | lambda_handler(event, context)
66 |
--------------------------------------------------------------------------------
/sumologic-app-utils/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$AWS_PROFILE" == "prod" ]
4 | then
5 | SAM_S3_BUCKET="appdevstore"
6 | AWS_REGION="us-east-1"
7 | else
8 | SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
9 | AWS_REGION="us-east-2"
10 | fi
11 |
12 | rm src/external/*.pyc
13 | rm src/*.pyc
14 | rm sumo_app_utils.zip
15 |
16 | if [ ! -f sumo_app_utils.zip ]; then
17 | echo "creating zip file"
18 | mkdir python
19 | cd python
20 | pip install crhelper -t .
21 | pip install requests -t .
22 | cp -v ../src/*.py .
23 | zip -r ../sumo_app_utils.zip .
24 | cd ..
25 | rm -r python
26 | fi
27 |
28 | aws s3 cp sumo_app_utils.zip s3://$SAM_S3_BUCKET/ --region $AWS_REGION
29 |
30 | sam package --template-file sumo_app_utils.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged_sumo_app_utils.yaml
31 |
32 | sam deploy --template-file packaged_sumo_app_utils.yaml --stack-name testingsumoapputils --capabilities CAPABILITY_IAM --region $AWS_REGION
33 |
34 | # Before testing below command one needs to publish new version of sumo_app_utils and change version in template
35 | sam package --template-file /Users/hpal/git/sumologic-aws-lambda/cloudwatchevents/guarddutybenchmark/template_v2.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file /Users/hpal/git/sumologic-aws-lambda/cloudwatchevents/guarddutybenchmark/packaged_v2.yaml
36 |
37 | sam deploy --template-file /Users/hpal/git/sumologic-aws-lambda/cloudwatchevents/guarddutybenchmark/packaged_v2.yaml --stack-name guarddutysamdemo --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region $AWS_REGION --parameter-overrides SumoAccessID=$SUMO_ACCESS_ID SumoAccessKey=$SUMO_ACCESS_KEY SumoDeployment=$SUMO_DEPLOYMENT RemoveSumoResourcesOnDeleteStack="true"
38 |
39 | #aws cloudformation describe-stack-events --stack-name testingsecurityhublambda --region $AWS_REGION
40 | #aws cloudformation get-template --stack-name testingsecurityhublambda --region $AWS_REGION
41 | # aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-connector --semantic-version 1.0.1 --template-body file://packaged.yaml
42 |
--------------------------------------------------------------------------------
/cloudwatchevents/guardduty/README.md:
--------------------------------------------------------------------------------
1 | # sumologic-guardduty-events-processor
2 |
3 | This solution creates resources for processing and sending Amazon GuardDuty Events to Sumo logic.
4 |
5 |
6 | Made with ❤️ by Sumo Logic AppDev Team. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
7 |
8 | 
9 |
10 | ## Setup
11 | 1. First create an HTTP collector endpoint within SumoLogic. You will need the endpoint URL for the lambda function later.
12 | 2. Go to https://serverlessrepo.aws.amazon.com/applications.
13 | 3. Search for sumologic-guardduty-events-processor and click on deploy.
14 | 4. In Configure application parameters panel paste the HTTP collector endpoint previously configured.
15 | 5. Click on Deploy
16 |
17 | ## Lambda Environment Variables
18 | The following AWS Lambda environment variables are supported
19 |
20 | SUMO_ENDPOINT (REQUIRED) - SumoLogic HTTP Collector endpoint URL.
21 | SOURCE_CATEGORY_OVERRIDE (OPTIONAL) - Override _sourceCategory metadata field within SumoLogic. If none will not be overridden
22 | SOURCE_HOST_OVERRIDE (OPTIONAL) - Override _sourceHost metadata field within SumoLogic. If none will not be overridden
23 | SOURCE_NAME_OVERRIDE (OPTIONAL) - Override _sourceName metadata field within SumoLogic. If none will not be overridden
24 |
25 | ## Excluding Outer Event Fields
26 |
27 | By default, a CloudWatch Event has a format similar to this:
28 | ```
29 | {
30 | "version":"0",
31 | "id":"0123456d-7e46-ecb4-f5a2-e59cec50b100",
32 | "detail-type":"AWS API Call via CloudTrail",
33 | "source":"aws.logs",
34 | "account":"012345678908",
35 | "time":"2017-11-06T23:36:59Z",
36 | "region":"us-east-1",
37 | "resources":[ ],
38 | "detail":▶{ … }
39 | }
40 | ```
41 | This event will be sent as-is to Sumo Logic. If you just want to send the detail key instead, set the removeOuterFields variable to true.
42 |
43 |
44 | ## License
45 |
46 | Apache License 2.0 (Apache-2.0)
47 |
48 |
49 | ## Support
50 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
51 |
52 |
--------------------------------------------------------------------------------
/cloudwatchevents/guarddutybenchmark/README.md:
--------------------------------------------------------------------------------
1 | # sumologic-guardduty-benchmark
2 |
3 | This solution installs the Guardduty Benchmark App, creates collectors/sources in Sumo Logic platform and deploys the lambda function in your AWS account using configuration provided at the time of sam application deployment.
4 |
5 |
6 | Made with ❤️ by Sumo Logic AppDev Team. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
7 |
8 | 
9 |
10 | ## Setup
11 | 1. Generate Access key from sumologic console as per [docs](https://help.sumologic.com/Manage/Security/Access-Keys#Create_an_access_key).
12 |
13 | 2. Go to https://serverlessrepo.aws.amazon.com/applications.
14 | 3. Search for sumologic-guardduty-benchmark and click on deploy.
15 | 4. In the Configure application parameters panel, enter the following parameters
16 | * Access ID(Required): Sumo Logic Access ID generated from Step 1
17 | * Access Key(Required): Sumo Logic Access Key generated from Step 1
18 | * Deployment Name(Required): Deployment name (environment name in lower case as per [docs](https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-and-Firewall-Security))
19 | * Collector Name: Enter the name of the Hosted Collector which will be created in Sumo Logic.
20 | * Source Name: Enter the name of the HTTP Source which will be created within the collector.
21 | * Source Category Name: Enter the name of the Source Category which will be used for writing search queries.
22 | 5. Click on Deploy
23 |
24 |
25 | ## Excluding Outer Event Fields
26 |
27 | By default, a CloudWatch Event has a format similar to this:
28 | ```
29 | {
30 | "version":"0",
31 | "id":"0123456d-7e46-ecb4-f5a2-e59cec50b100",
32 | "detail-type":"AWS API Call via CloudTrail",
33 | "source":"aws.logs",
34 | "account":"012345678908",
35 | "time":"2017-11-06T23:36:59Z",
36 | "region":"us-east-1",
37 | "resources":[ ],
38 | "detail":▶{ … }
39 | }
40 | ```
41 | This event will be sent as-is to Sumo Logic. If you just want to send the detail key instead, set the removeOuterFields variable to true.
42 |
43 |
44 | ## License
45 |
46 | Apache License 2.0 (Apache-2.0)
47 |
48 |
49 | ## Support
50 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
51 |
52 |
--------------------------------------------------------------------------------
/s3/README.md:
--------------------------------------------------------------------------------
1 | # Warning: This Lambda Function has been deprecated
2 | We recommend using [S3 Event Notifications Integration](https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon_Web_Services/AWS_S3_Source#S3_Event_Notifications_Integration),
3 |
4 | S3 to Sumo Logic
5 | ===========================================
6 |
7 | Files
8 | -----
9 | * *node.js/s3.js*: node.js function to read files from an S3 bucket to a Sumo Logic hosted HTTP collector. Files in the source bucket can be gzipped, or in cleartext, but should contain only texts. The function receives S3 notifications on new files uploaded to the source S3 bucket, then reads these files, or unzips them if the file names end with `gz`, and finally sends the data to the target Sumo endpoint.
10 |
11 | ## Lambda Setup
12 | For the Sumo collector configuration, do not enable multiline processing or
13 | one message per request -- the idea is to send as many messages in one request
14 | as possible to Sumo and let Sumo break them apart as needed.
15 |
16 | In the AWS console, use a code entry type of 'Edit code inline' and paste in the
17 | code (doublecheck the hostname and path as per your collector setup).
18 |
19 | In configuration specify index.handler as the Handler. Specify a Role that has
20 | sufficient privileges to read from the *source* bucket, and invoke a lambda
21 | function. One can use the AWSLambdaBasicExecution and the AWSS3ReadOnlyAccess role, although it is *strongly* recommended to customize them to restrict to relevant resources in production:
22 |
23 |
24 | {
25 | "Version": "2012-10-17",
26 | "Statement": [
27 | {
28 | "Effect": "Allow",
29 | "Action": [
30 | "logs:CreateLogGroup",
31 | "logs:CreateLogStream",
32 | "logs:PutLogEvents"
33 | ],
34 | "Resource": "arn:aws:logs:*:*:*"
35 | }
36 | ]
37 | }
38 |
39 |
40 | AND
41 |
42 |
43 | {
44 | "Version": "2012-10-17",
45 | "Statement": [
46 | {
47 | "Effect": "Allow",
48 | "Action": [
49 | "s3:Get*",
50 | "s3:List*"
51 | ],
52 | "Resource": "*"
53 | }
54 | ]
55 | }
56 |
57 |
58 | Once the function is created, you can tie it to the source S3 bucket. From the S3 Management console, select the bucket, goto its Properties, select Events and add a Notification. From there, provide a name for the notification, select *ObjectCreated (All)* as the Events, and select *Lambda* as the *Send To* option. Finally, select the Lambda function created above and Save.
59 |
60 |
61 |
--------------------------------------------------------------------------------
/sumologic-app-utils/src/main.py:
--------------------------------------------------------------------------------
1 | from crhelper import CfnResource
2 | from api import ResourceFactory
3 |
4 | helper = CfnResource(json_logging=False, log_level='DEBUG')
5 |
6 |
7 | def get_resource(event):
8 | resource_type = event.get("ResourceType").split("::")[-1]
9 | resource_class = ResourceFactory.get_resource(resource_type)
10 | props = event.get("ResourceProperties")
11 | resource = resource_class(props["SumoAccessID"], props["SumoAccessKey"], props["SumoDeployment"])
12 | params = resource.extract_params(event)
13 | params["remove_on_delete_stack"] = props.get("RemoveOnDeleteStack") == 'true'
14 | print(params)
15 | return resource, resource_type, params
16 |
17 |
18 | @helper.create
19 | def create(event, context):
20 | # Test with failure cases should not get stuck in progress
21 | # Optionally return an ID that will be used for the resource PhysicalResourceId,
22 | # if None is returned an ID will be generated. If a poll_create function is defined
23 | # return value is placed into the poll event as event['CrHelperData']['PhysicalResourceId']
24 | resource, resource_type, params = get_resource(event)
25 | data, resource_id = resource.create(**params)
26 | print(data)
27 | print(resource_id)
28 | helper.Data.update(data)
29 | helper.Status = "SUCCESS"
30 | print("Created %s" % resource_type)
31 | return "%s/%s" % (event.get('LogicalResourceId', ''), resource_id)
32 |
33 |
34 | @helper.update
35 | def update(event, context):
36 | resource, resource_type, params = get_resource(event)
37 | data, resource_id = resource.create(**params)
38 | print(data)
39 | print(resource_id)
40 | helper.Data.update(data)
41 | helper.Status = "SUCCESS"
42 | print("Updated %s" % resource_type)
43 | return "%s/%s" % (event.get('LogicalResourceId', ''), resource_id)
44 | # If the update resulted in a new resource being created, return an id for the new resource.
45 | # CloudFormation will send a delete event with the old id when stack update completes
46 |
47 |
48 | @helper.delete
49 | def delete(event, context):
50 | if "/" not in event.get('PhysicalResourceId', ""):
51 | print("%s resource_id not found" % event.get('PhysicalResourceId'))
52 | return
53 | resource, resource_type, params = get_resource(event)
54 | resource.delete(**params)
55 | helper.Status = "SUCCESS"
56 | print("Deleted %s" % resource_type)
57 | # Delete never returns anything. Should not fail if the underlying resources are already deleted. Desired state.
58 |
59 |
60 | def handler(event, context):
61 | helper(event, context)
62 |
--------------------------------------------------------------------------------
/inspector/Readme.md:
--------------------------------------------------------------------------------
1 | # SumoLogic Lambda Function for Amazon Inspector
2 |
3 | This function receives the records published to a SNS Topic by Amazon Inspector.It looks up an Inspector object based on its arn and type and then adds extra context to the final messages which are compressed and send to Sumo Logic HTTP source endpoint.
4 |
5 | ## Lambda Setup((docs)[https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-Inspector-App/01-Collect-Data-for-Amazon-Inspector])
6 |
7 | ### Create an Amazon SNS Topic
8 | 1. Login to the Amazon Console.
9 | 2. Go to Application Integration > Simple Notification Service (SNS).
10 | 3. On the SNS Dashboard, select Create topic.
11 | 4. Enter a Topic name and a Display name, and click Create topic.
12 | 5. To assign the following policy to this topic, select the topic, then under Advanced view, click Actions/Edit topic policy.
13 | 6. Replace the existing text with the following:
14 | ```
15 | {
16 | "Version": "2008-10-17",
17 | "Id": "inspector-sns-publish-policy",
18 | "Statement": [
19 | {
20 | "Sid": "inspector-sns-publish-statement",
21 | "Effect": "Allow",
22 | "Principal": {
23 | "Service": "inspector.amazonaws.com"
24 | },
25 | "Action": "SNS:Publish",
26 | "Resource": "arn:aws:sns:*"
27 | }
28 | ]
29 | }
30 | ```
31 | 7. Click Update policy.
32 |
33 | ### Configure Amazon Inspector
34 | 1. In the Amazon Console, go to Security, Identity & Compliance > Inspector.
35 | 2. Select each assessment template you want to monitor.
36 | 3. Expand each row and find the section called SNS topics.
37 | 4. Click the Edit icon and select the SNS topic you created in the previous section.
38 | 5. Click Save.
39 |
40 | ### Create a Role
41 | In the Amazon Console, go to Security, Identity & Compliance > IAM.
42 | Create a new role called Lambda-Inspector.
43 |
44 | ### Create a Lambda Function
45 | 1. In the Amazon Console, go to Compute > Lambda.
46 | 2. Create a new function.
47 | 3. On the Select blueprint page, select a Blank function.
48 | 4. Select the SNS topic you created in Create an Amazon SNS Topic as trigger.
49 | 5. Click Next.
50 | 6. On the Configure function page, enter a name for the function.
51 | 7. Go to https://github.com/SumoLogic/sumologic-aws-lambda/blob/master/inspector/python/inspector.py and copy and paste the sumologic-aws-lambda code into the field.
52 | 8. Edit the code to enter the URL of the Sumo Logic endpoint that will receive data from the HTTP Source.
53 | 9. Scroll down and configure the rest of the settings as follows:
54 | Memory (MB). 128.
55 | Timeout. 5 min.
56 | VPC. No VCP.
57 | 10. Click Next.
58 | 11. Click Create function.
59 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/sam/packaged.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: '"Lambda Function for automatic subscription of any Sumo Logic lambda
3 | function with loggroups matching an input pattern."
4 |
5 | '
6 | Globals:
7 | Function:
8 | MemorySize: 128
9 | Timeout: 300
10 | Outputs:
11 | SumoLogGroupLambdaConnector:
12 | Description: SumoLogGroupLambdaConnector Function ARN
13 | Value:
14 | Fn::GetAtt:
15 | - SumoLogGroupLambdaConnector
16 | - Arn
17 | Parameters:
18 | LambdaARN:
19 | Default: arn:aws:lambda:us-east-1:123456789000:function:TestLambda
20 | Description: Enter ARN for target lambda function
21 | Type: String
22 | LogGroupPattern:
23 | Default: Test
24 | Description: Enter regex for matching logGroups
25 | Type: String
26 | UseExistingLogs:
27 | AllowedValues:
28 | - 'true'
29 | - 'false'
30 | Default: 'false'
31 | Description: Select true for subscribing existing logs
32 | Type: String
33 | Resources:
34 | SumoCWLambdaInvokePermission:
35 | Properties:
36 | Action: lambda:InvokeFunction
37 | FunctionName:
38 | Ref: LambdaARN
39 | Principal:
40 | Fn::Sub: logs.${AWS::Region}.amazonaws.com
41 | SourceAccount:
42 | Ref: AWS::AccountId
43 | SourceArn:
44 | Fn::Sub: arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*:*
45 | Type: AWS::Lambda::Permission
46 | SumoLogGroupLambdaConnector:
47 | Properties:
48 | CodeUri: s3://appdevstore/6bef113d950a9923b446dd438116f2a1
49 | Environment:
50 | Variables:
51 | LAMBDA_ARN:
52 | Ref: LambdaARN
53 | LOG_GROUP_PATTERN:
54 | Ref: LogGroupPattern
55 | USE_EXISTING_LOG_GROUPS:
56 | Ref: UseExistingLogs
57 | Events:
58 | LambdaTrigger:
59 | Properties:
60 | Pattern:
61 | detail:
62 | eventName:
63 | - CreateLogGroup
64 | eventSource:
65 | - logs.amazonaws.com
66 | source:
67 | - aws.logs
68 | Type: CloudWatchEvent
69 | Handler: loggroup-lambda-connector.handler
70 | Policies:
71 | - Statement:
72 | - Action:
73 | - logs:DescribeLogGroups
74 | - logs:DescribeLogStreams
75 | - logs:PutSubscriptionFilter
76 | Effect: Allow
77 | Resource:
78 | - Fn::Sub: arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*
79 | Sid: ReadWriteFilterPolicy
80 | Runtime: nodejs8.10
81 | Type: AWS::Serverless::Function
82 | Transform: AWS::Serverless-2016-10-31
83 |
--------------------------------------------------------------------------------
/securityhub-collector/sam/event.json:
--------------------------------------------------------------------------------
1 | {
2 | "account": "956882702234",
3 | "detail": {
4 | "findings": [
5 | {
6 | "AwsAccountId": "956882702234",
7 | "Compliance": {
8 | "Status": "FAILED"
9 | },
10 | "CreatedAt": "2019-04-18T14:51:55.000000Z",
11 | "Description": "This search gives top 10 resources which are accessed in last 15 minutes",
12 | "FirstObservedAt": "2019-04-18T14:51:55.000000Z",
13 | "GeneratorId": "InsertFindingsScheduledSearch",
14 | "Id": "sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9",
15 | "ProductArn": "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda",
16 | "ProductFields": {
17 | "aws/securityhub/CompanyName": "Sumo Logic",
18 | "aws/securityhub/FindingId": "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda/sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9",
19 | "aws/securityhub/ProductName": "Machine Data Analytics",
20 | "aws/securityhub/SeverityLabel": "LOW"
21 | },
22 | "RecordState": "ACTIVE",
23 | "Resources": [
24 | {
25 | "Id": "10.178.11.43",
26 | "Type": "Other"
27 | }
28 | ],
29 | "SchemaVersion": "2018-10-08",
30 | "Severity": {
31 | "Normalized": 30
32 | },
33 | "SourceUrl": "https://service.sumologic.com/ui/#/search/RmC8kAUGZbXrkj2rOFmUxmHtzINUgfJnFplh3QWY",
34 | "Title": "Vulnerability: Apple iTunes m3u Playlist File Title Parsing Buffer Overflow Vulnerability(34886) found on 207.235.176.3",
35 | "Types": [
36 | "Software and Configuration Checks/Industry and Regulatory Standards/HIPAA Controls"
37 | ],
38 | "UpdatedAt": "2019-04-18T14:51:55.000000Z",
39 | "WorkflowState": "NEW",
40 | "approximateArrivalTimestamp": 1555599782.881,
41 | "updatedAt": "2019-04-18T14:51:55.000000Z"
42 | }
43 | ]
44 | },
45 | "detail-type": "Security Hub Findings",
46 | "id": "f06f61e9-b099-8321-e446-5a20583bd791",
47 | "region": "us-east-2",
48 | "resources": [
49 | "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda/sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9"
50 | ],
51 | "source": "aws.securityhub",
52 | "time": "2019-04-18T15:03:04Z",
53 | "version": "0"
54 | }
55 |
--------------------------------------------------------------------------------
/securityhub-forwarder/sam/template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: 'AWS::Serverless-2016-10-31'
3 | Description: >
4 | This function is invoked by Sumo Logic(via Scheduled Search) through API Gateway. The event payload received is then forwarded to AWS Security Hub.
5 |
6 | Resources:
7 | SecurityHubForwarderApiGateway:
8 | Type: AWS::Serverless::Api
9 | Properties:
10 | StageName: prod
11 | EndpointConfiguration: EDGE
12 | DefinitionBody:
13 | swagger: "2.0"
14 | info:
15 | title:
16 | Ref: AWS::StackName
17 | description: API endpoint for invoking SecurityHubForwarderFunction
18 | version: 1.0.0
19 | securityDefinitions:
20 | sigv4:
21 | type: "apiKey"
22 | name: "Authorization"
23 | in: "header"
24 | x-amazon-apigateway-authtype: "awsSigv4"
25 | paths:
26 | /findings:
27 | post:
28 | consumes:
29 | - "application/json"
30 | produces:
31 | - "application/json"
32 | responses: {}
33 | security:
34 | - sigv4: []
35 | x-amazon-apigateway-integration:
36 | type: "aws_proxy"
37 | uri:
38 | Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${SecurityHubForwarderFunction.Arn}/invocations
39 | passthroughBehavior: "when_no_match"
40 | httpMethod: "POST"
41 | requestParameters:
42 | integration.request.header.X-Amz-Invocation-Type: "'RequestResponse'"
43 |
44 | SecurityHubForwarderFunction:
45 | Type: 'AWS::Serverless::Function'
46 | Properties:
47 | Handler: securityhub_forwarder.lambda_handler
48 | Runtime: python3.7
49 | Layers:
50 | - !Sub 'arn:aws:lambda:${AWS::Region}:956882708938:layer:securityhub_deps:1'
51 | CodeUri: ../src/
52 | MemorySize: 128
53 | Timeout: 300
54 | Policies:
55 | - Statement:
56 | - Sid: SecurityHubImportFindingsPolicy
57 | Effect: Allow
58 | Action:
59 | - securityhub:BatchImportFindings
60 | Resource: 'arn:aws:securityhub:*:*:*'
61 | Events:
62 | Api1:
63 | Type: Api
64 | Properties:
65 | Path: '/findings'
66 | Method: POST
67 | RestApiId:
68 | Ref: SecurityHubForwarderApiGateway
69 |
70 | Outputs:
71 |
72 | SecurityHubForwarderFunction:
73 | Description: "SecurityHubForwarder Function ARN"
74 | Value: !GetAtt SecurityHubForwarderFunction.Arn
75 | SecurityHubForwarderApiUrl:
76 | Description: URL of your API endpoint
77 | Value: !Sub "https://${SecurityHubForwarderApiGateway}.execute-api.${AWS::Region}.amazonaws.com/prod/findings"
78 |
--------------------------------------------------------------------------------
/securityhub-forwarder/sam/packaged.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'This function is invoked by Sumo Logic(via Scheduled Search) through
3 | API Gateway. The event payload received is then forwarded to AWS Security Hub.
4 |
5 | '
6 | Outputs:
7 | SecurityHubForwarderApiUrl:
8 | Description: URL of your API endpoint
9 | Value:
10 | Fn::Sub: https://${SecurityHubForwarderApiGateway}.execute-api.${AWS::Region}.amazonaws.com/prod/findings
11 | SecurityHubForwarderFunction:
12 | Description: SecurityHubForwarder Function ARN
13 | Value:
14 | Fn::GetAtt:
15 | - SecurityHubForwarderFunction
16 | - Arn
17 | Resources:
18 | SecurityHubForwarderApiGateway:
19 | Properties:
20 | DefinitionBody:
21 | info:
22 | description: API endpoint for invoking SecurityHubForwarderFunction
23 | title:
24 | Ref: AWS::StackName
25 | version: 1.0.0
26 | paths:
27 | /findings:
28 | post:
29 | consumes:
30 | - application/json
31 | produces:
32 | - application/json
33 | responses: {}
34 | security:
35 | - sigv4: []
36 | x-amazon-apigateway-integration:
37 | httpMethod: POST
38 | passthroughBehavior: when_no_match
39 | requestParameters:
40 | integration.request.header.X-Amz-Invocation-Type: '''RequestResponse'''
41 | type: aws_proxy
42 | uri:
43 | Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${SecurityHubForwarderFunction.Arn}/invocations
44 | securityDefinitions:
45 | sigv4:
46 | in: header
47 | name: Authorization
48 | type: apiKey
49 | x-amazon-apigateway-authtype: awsSigv4
50 | swagger: '2.0'
51 | EndpointConfiguration: EDGE
52 | StageName: prod
53 | Type: AWS::Serverless::Api
54 | SecurityHubForwarderFunction:
55 | Properties:
56 | CodeUri: s3://appdevstore/98ee274ed4543bd1e1344fec701211df
57 | Events:
58 | Api1:
59 | Properties:
60 | Method: POST
61 | Path: /findings
62 | RestApiId:
63 | Ref: SecurityHubForwarderApiGateway
64 | Type: Api
65 | Handler: securityhub_forwarder.lambda_handler
66 | Layers:
67 | - Fn::Sub: arn:aws:lambda:${AWS::Region}:956882708938:layer:securityhub_deps:1
68 | MemorySize: 128
69 | Policies:
70 | - Statement:
71 | - Action:
72 | - securityhub:BatchImportFindings
73 | Effect: Allow
74 | Resource: arn:aws:securityhub:*:*:*
75 | Sid: SecurityHubImportFindingsPolicy
76 | Runtime: python3.7
77 | Timeout: 300
78 | Type: AWS::Serverless::Function
79 | Transform: AWS::Serverless-2016-10-31
80 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/sam/template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: AWS::Serverless-2016-10-31
3 | Description: >
4 | "Lambda Function for automatic subscription of any Sumo Logic lambda function with loggroups matching an input pattern."
5 |
6 | # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst
7 | Globals:
8 | Function:
9 | Timeout: 300
10 | MemorySize: 128
11 |
12 | Parameters:
13 | LambdaARN:
14 | Type : String
15 | Default: "arn:aws:lambda:us-east-1:123456789000:function:TestLambda"
16 | Description: "Enter ARN for target lambda function"
17 |
18 | LogGroupPattern:
19 | Type : String
20 | Default: "Test"
21 | Description: "Enter regex for matching logGroups"
22 |
23 | UseExistingLogs:
24 | Type : String
25 | Default: "false"
26 | AllowedValues : ["true", "false"]
27 | Description: "Select true for subscribing existing logs"
28 |
29 | Resources:
30 |
31 | SumoLogGroupLambdaConnector:
32 | Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction
33 | Properties:
34 | CodeUri: ../src/
35 | Handler: "loggroup-lambda-connector.handler"
36 | Runtime: nodejs8.10
37 | Environment:
38 | Variables:
39 | LAMBDA_ARN: !Ref "LambdaARN"
40 | LOG_GROUP_PATTERN: !Ref "LogGroupPattern"
41 | USE_EXISTING_LOG_GROUPS: !Ref "UseExistingLogs"
42 | Policies:
43 | - Statement:
44 | - Sid: ReadWriteFilterPolicy
45 | Effect: Allow
46 | Action:
47 | - logs:DescribeLogGroups
48 | - logs:DescribeLogStreams
49 | - logs:PutSubscriptionFilter
50 | Resource:
51 | - !Sub 'arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*'
52 | Events:
53 | LambdaTrigger:
54 | Type: CloudWatchEvent
55 | Properties:
56 | Pattern:
57 | source:
58 | - aws.logs
59 | detail:
60 | eventSource:
61 | - logs.amazonaws.com
62 | eventName:
63 | - CreateLogGroup
64 | SumoCWLambdaInvokePermission:
65 | Type: AWS::Lambda::Permission
66 | Properties:
67 | Action: lambda:InvokeFunction
68 | FunctionName: !Ref "LambdaARN"
69 | Principal: !Sub 'logs.${AWS::Region}.amazonaws.com'
70 | SourceAccount: !Ref AWS::AccountId
71 | SourceArn: !Sub 'arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*:*'
72 |
73 | Outputs:
74 |
75 | SumoLogGroupLambdaConnector:
76 | Description: "SumoLogGroupLambdaConnector Function ARN"
77 | Value: !GetAtt SumoLogGroupLambdaConnector.Arn
78 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/Readme.md:
--------------------------------------------------------------------------------
1 | # SumoLogic LogGroup Connector
2 | This is used to automatically subscribe newly created and existing Cloudwatch LogGroups to a Lambda function.
3 |
4 | Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
5 |
6 | ### Deploying the SAM Application
7 | 1. Open a browser window and enter the following URL: https://serverlessrepo.aws.amazon.com/applications
8 | 2. In the Serverless Application Repository, search for sumologic.
9 | 3. Select Show apps that create custom IAM roles or resource policies check box.
10 | 4. Click the sumologic-loggroup-connector,link, and then click Deploy.
11 | 5. In the Configure application parameters panel,
12 | LambdaARN: "Enter ARN for target lambda function" All loggroups matching the pattern are subscribed to this function
13 | LogGroupPattern: "Enter regex for matching logGroups"
14 | UseExistingLogs: "Select true for subscribing existing logs"
15 | 6. Click Deploy.
16 |
17 |
18 | ### Configuring Lambda
19 | It has two environment variables
20 |
21 | **LOG_GROUP_PATTERN**: This is a javascript regex to filter out loggroups. Only loggroups which match this pattern will be subscribed to the lambda function.Do not use '/' while writing the pattern and it is case insensitive.
22 |
23 | ```
24 | Test - will match testlogroup, logtestgroup and LogGroupTest
25 | ```
26 |
27 | **LAMBDA_ARN**: This specifies ARN of the lambda functions. Also you have to specify FunctionName attribute in your lambda function so that AWS does not generate random function name. This is to avoid making changes to the lambda function configuration in case your lambda function gets created again.
28 |
29 | ```
30 | {
31 | "Fn::Join": [
32 | "",
33 | [
34 | "arn:aws:lambda:",
35 | { "Ref" : "AWS::Region" },
36 | ":",
37 | { "Ref" : "AWS::AccountId" },
38 | ":function:"
39 | ]
40 | ]
41 | }
42 | ```
43 |
44 | **USE_EXISTING_LOGS**: This is used for subscribing existing log groups. By setting this parameter to true and invoking the function manually, all the existing log groups matching the pattern will be subscribed to lambda function with `LAMBDA_ARN` as arn
45 |
46 | ### For Developers
47 |
48 | Installing Dependencies
49 | ```
50 | npm install
51 | ```
52 |
53 | Building zip file
54 | ```
55 | npm run build
56 | ```
57 | Upload the generated loggroup-lambda-connector.zip in S3 bucket(don't forget to change bucket name and key in cloudformation template)
58 |
59 | Running the test cases
60 |
61 | ```
62 | python test_loggroup_lambda_connector.py
63 | ```
64 | Run the above command after building the zip file
65 |
66 | ## License
67 |
68 | Apache License 2.0 (Apache-2.0)
69 |
70 |
71 | ## Support
72 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
73 |
74 |
75 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/DLQProcessor.js:
--------------------------------------------------------------------------------
1 | var AWS = require("aws-sdk");
2 | var processLogsHandler = require('./cloudwatchlogs_lambda').processLogs;
3 | var DLQUtils = require("./sumo-dlq-function-utils").DLQUtils;
4 | var Messages = DLQUtils.Messages;
5 | var invokeLambdas = DLQUtils.invokeLambdas;
6 |
7 | exports.consumeMessages = function (env, context, callback) {
8 | var sqs = new AWS.SQS({region: env.AWS_REGION});
9 | var MessagesObj = new Messages(env);
10 | MessagesObj.receiveMessages(10, function (err, data) {
11 | var messages = (data)? data.Messages: null;
12 | if (err) {
13 | callback(err);
14 | } else if (messages && messages.length > 0) {
15 | var fail_cnt = 0, msgCount = 0, payload = '{"is_worker": "1"}';
16 | console.log("Messages Received", messages.length);
17 | for (var i = 0; i < messages.length; i++) {
18 | (function(idx) {
19 | var payload = JSON.parse(messages[idx].Body);
20 | var receiptHandle = messages[idx].ReceiptHandle;
21 | if (!(payload.awslogs && payload.awslogs.data)) {
22 | console.log("Message does not contain awslogs or awslogs.data attributes", payload);
23 | //deleting msg in DLQ after injesting in sumo
24 | MessagesObj.deleteMessage(receiptHandle, function (err, data) {
25 | if (err) console.log(err, err.stack);
26 | });
27 | return;
28 | }
29 | var logdata = payload.awslogs.data;
30 | env.SUMO_CLIENT_HEADER="dlq-aws-lambda";
31 | processLogsHandler(env, logdata, function (err, msg) {
32 | msgCount++;
33 | if (err) {
34 | console.log(err, msg);
35 | fail_cnt++;
36 | } else {
37 | //deleting msg in DLQ after injesting in sumo
38 | MessagesObj.deleteMessage(receiptHandle, function (err, data) {
39 | if (err) console.log(err, err.stack);
40 | });
41 | }
42 | if (msgCount == messages.length) {
43 | if (fail_cnt == 0 && (parseInt(env.is_worker) === 0)) {
44 | invokeLambdas(env.AWS_REGION, parseInt(env.NUM_OF_WORKERS),
45 | context.functionName, payload, context);
46 | }
47 | callback(null, (messages.length-fail_cnt) + ' success');
48 | }
49 | });
50 | })(i);
51 | }
52 |
53 | } else {
54 | callback(null, 'success');
55 | }
56 | });
57 | };
58 |
59 | exports.handler = function (event, context, callback) {
60 |
61 | var env = process.env;
62 | env['is_worker'] = event.is_worker || 0;
63 | exports.consumeMessages(env, context, callback);
64 | };
65 |
66 |
--------------------------------------------------------------------------------
/kinesisfirehose-processor/Readme.md:
--------------------------------------------------------------------------------
1 | # Kinesis Firehose Processor
2 | This function is used for transforming streaming data from kinesis firehose before it sents to destination.
3 | Other use cases might include normalizing data produced by different producers, adding metadata to the record, or converting incoming data to a format suitable for the destination. In Sumo Logic's perspective it solves the problem of adding delimters between consecutive records so that they can be easily processed by Sumo Logic's Hosted Collector configured with [S3 source](https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon_Web_Services/AWS_S3_Source).
4 |
5 | # How it works
6 | When you enable Firehose data transformation, Firehose buffers incoming data and invokes the specified Lambda function with each buffered batch asynchronously. The transformed data is sent from Lambda to Firehose for buffering and then delivered to the destination.
7 |
8 | ### Creating Stack in AWS Cloudformation
9 | you can create the stack by using [aws-cli](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-cli-creating-stack.html) or directly from aws console using webbrowser and uploading kinesisfirehose-lambda-sam.yaml. For more details checkout it's [documentation](https://help.sumologic.com/?cid=39393)
10 | Sumo Logic provides a Cloudformation [template](https://s3.amazonaws.com/appdev-cloudformation-templates/kinesisfirehose-lambda-cft.json) for creating the lambda function download and use it for creating the stack.
11 |
12 | ### Setting up the Lambda Function
13 | Below instructions assumes that the delivery stream already exists.One can also configure the lambda at the time of delivery stream creation. Refer [Setting up Delivery Stream](https://docs.aws.amazon.com/firehose/latest/dev/basic-create.html)
14 | * Go to https://console.aws.amazon.com/firehose/home
15 | * Click on your delivery stream
16 | * In Details Tab, click on edit
17 | * In the edit window, Under Transform source records with AWS Lambda section enable the Source record transformation option. Now a bunch of options will be visible.
18 | * In Lambda function select the function(starting with SumoKFLambdaProcessor) created by Cloudformation template.
19 | * (Optional) you can set buffer size(lambda is invoked with this buffered batch) and buffer interval.
20 | * Now scroll up and click on create new or update button beside IAM Role.
21 | * In the new window click allow to give lambda invoke permission to Amazon Kinesis Firehose.
22 | * Now click on Save
23 |
24 | ### Testing your Lambda Function
25 | * Go to https://console.aws.amazon.com/firehose/home
26 | * Click on your delivery stream
27 | * Expand the Test with demo data section.
28 | * Click on Start sending demo data. After few minutes you can see transformed data in your configured S3 bucket destination.
29 | * You can view logs of lambda function in AWS Cloudwatch (LogGroup name beginning with /aws/lambda/SumoKFLambdaProcessor)
30 |
31 | ### For Developers
32 |
33 | Installing Dependencies
34 | ```
35 | npm install
36 | ```
37 |
38 | Building zip file
39 | ```
40 | npm run build
41 | ```
42 | Upload the generated kinesisfirehose-processor.zip in S3 bucket(don't forget to change bucket name and key in cloudformation template)
43 |
44 |
--------------------------------------------------------------------------------
/kinesisfirehose-processor/kinesisfirehose-lambda-cft.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Resources": {
4 | "SumoKFLambdaExecutionRole": {
5 | "Type": "AWS::IAM::Role",
6 | "Properties": {
7 | "AssumeRolePolicyDocument": {
8 | "Version": "2012-10-17",
9 | "Statement": [{
10 | "Effect": "Allow",
11 | "Principal": {"Service": ["lambda.amazonaws.com"] },
12 | "Action": ["sts:AssumeRole"]
13 | } ]
14 | },
15 | "Path": "/",
16 | "Policies": [
17 | {
18 | "PolicyName": { "Fn::Join": [ "-", [ "CloudWatchCreateLogsRolePolicy", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
19 | "PolicyDocument": {
20 | "Version": "2012-10-17",
21 | "Statement": [{
22 | "Effect": "Allow",
23 | "Action": [
24 | "logs:CreateLogGroup",
25 | "logs:CreateLogStream",
26 | "logs:PutLogEvents",
27 | "logs:DescribeLogGroups",
28 | "logs:DescribeLogStreams",
29 | "logs:PutSubscriptionFilter",
30 | "logs:DescribeSubscriptionFilters"
31 | ],
32 | "Resource": [
33 | { "Fn::Join": [ ":", ["arn", "aws", "logs", { "Ref" : "AWS::Region" }, { "Ref" : "AWS::AccountId" },"log-group","*" ] ] }
34 | ]
35 | }]
36 | }
37 | }
38 | ]
39 | }
40 | },
41 | "SumoKFLambdaProcessor": {
42 | "Type": "AWS::Lambda::Function",
43 | "DependsOn": [
44 | "SumoKFLambdaExecutionRole"
45 | ],
46 | "Properties": {
47 | "Code": {
48 | "S3Bucket": {"Fn::Join": ["", ["appdevzipfiles-", { "Ref" : "AWS::Region" }] ] },
49 | "S3Key": "kinesisfirehose-processor.zip"
50 | },
51 | "Role": {
52 | "Fn::GetAtt": [
53 | "SumoKFLambdaExecutionRole",
54 | "Arn"
55 | ]
56 | },
57 | "FunctionName": { "Fn::Join": [ "-", [ "SumoKFLambdaProcessor", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
58 | "Timeout": 300,
59 | "Handler": "kinesisfirehose-processor.handler",
60 | "Runtime": "nodejs8.10",
61 | "MemorySize": 128
62 | }
63 | }
64 | },
65 | "Outputs": {
66 | "SumoKFLambdaProcessorArn": {
67 | "Value": { "Fn::GetAtt": ["SumoKFLambdaProcessor", "Arn"] }
68 | }
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/cloudwatchevents/README.md:
--------------------------------------------------------------------------------
1 | # Sumo Logic Function for AWS CloudWatch Events
2 |
3 | AWS Lambda function to collect CloudWatch events and post them to [SumoLogic](http://www.sumologic.com) via a [HTTP collector endpoint](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source)
4 | AWS Cloudwatch Events invokes the function asynchronously in response to any changes in AWS resources. The event payload received is then sent to a SumoLogic HTTP source endpoint.
5 |
6 | # Usage
7 |
8 | First create an [HTTP collector endpoint](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source) within SumoLogic. You will need the endpoint URL for the lambda function later.
9 |
10 | ## Create Lambda Function
11 |
12 | 1. Within the AWS Lambda console select create new Lambda function
13 | 2. Select `Blank Function` on the select blueprint page
14 | 3. Leave triggers empty for now, click next
15 | 4. Configure Lambda
16 | * Select Node.js 8.10 as runtime
17 | * Copy code from cloudwatchevents.js into the Lambda function code.
18 | * Add Environment variables (See below)
19 | 5. Scroll down to the `Lambda function handle and role` section, make sure you set the right values that match the function. For role, you can just use the basic execution role. Click next.
20 | 6. Finally click on "Create function" to create the function.
21 | 7. (Optional) Test this new function with sample AWS CloudWatch Events template provided by AWS
22 |
23 |
24 | # Lambda Environment Variables
25 |
26 | The following AWS Lambda environment variables are supported
27 |
28 | * `SUMO_ENDPOINT` (REQUIRED) - SumoLogic HTTP Collector [endpoint URL](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source).
29 | * `SOURCE_CATEGORY_OVERRIDE` (OPTIONAL) - Override _sourceCategory metadata field within SumoLogic. If `none` will not be overridden
30 | * `SOURCE_HOST_OVERRIDE` (OPTIONAL) - Override _sourceHost metadata field within SumoLogic. If `none` will not be overridden
31 | * `SOURCE_NAME_OVERRIDE` (OPTIONAL) - Override _sourceName metadata field within SumoLogic. If `none` will not be overridden
32 |
33 | # Excluding Outer Event Fields
34 | By default, a CloudWatch Event has a format similar to this:
35 |
36 | ```
37 | {
38 | "version":"0",
39 | "id":"0123456d-7e46-ecb4-f5a2-e59cec50b100",
40 | "detail-type":"AWS API Call via CloudTrail",
41 | "source":"aws.logs",
42 | "account":"012345678908",
43 | "time":"2017-11-06T23:36:59Z",
44 | "region":"us-east-1",
45 | "resources":[ ],
46 | "detail":▶{ … }
47 | }
48 | ```
49 |
50 | This event will be sent as-is to Sumo Logic. If you just want to send the ```detail``` key instead, set the ```removeOuterFields``` variable to true.
51 |
52 | # Running Tests
53 | pip install aws-sam-cli
54 | Configure credentials in "~/.aws/credentials"
55 | export SUMO_ENDPOINT = HTTP_SOURCE_URL
56 | Create a S3 bucket in AWS with following policy
57 | ```
58 | {
59 | "Version": "2012-10-17",
60 | "Statement": [
61 | {
62 | "Effect": "Allow",
63 | "Principal": {
64 | "Service": "serverlessrepo.amazonaws.com"
65 | },
66 | "Action": "s3:GetObject",
67 | "Resource": "arn:aws:s3:::bucket_name/*"
68 | }
69 | ]
70 | }
71 | ```
72 | export SAM_S3_BUCKET = bucket_name (configure in previous step)
73 | npm test
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/s3/node.js/s3.js:
--------------------------------------------------------------------------------
1 | var AWS = require('aws-sdk');
2 | var s3 = new AWS.S3();
3 | var https = require('https');
4 | var zlib = require('zlib');
5 |
6 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////
7 | // Remember to change the hostname and path to match your collection API and specific HTTP-source endpoint
8 | // See more at: https://service.sumologic.com/help/Default.htm#Collector_Management_API.htm
9 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////
10 |
11 | var options = { 'hostname': 'endpoint1.collection.sumologic.com',
12 | 'path': 'https://endpoint1.collection.sumologic.com/receiver/v1/http/',
13 | 'method': 'POST'
14 | };
15 |
16 |
17 | function s3LogsToSumo(bucket, objKey,context) {
18 | options.headers = {
19 | 'X-Sumo-Client': 's3-aws-lambda'
20 | };
21 | var req = https.request(options, function(res) {
22 | var body = '';
23 | console.log('Status:', res.statusCode);
24 | res.setEncoding('utf8');
25 | res.on('data', function(chunk) { body += chunk; });
26 | res.on('end', function() {
27 | console.log('Successfully processed HTTPS response');
28 | context.succeed();
29 | });
30 | });
31 |
32 | var finalData = '';
33 | var totalBytes = 0;
34 | var isCompressed = false;
35 | if (objKey.match(/\.gz$/)) {
36 | isCompressed = true;
37 | }
38 |
39 | var finishFnc = function() {
40 | console.log("End of stream");
41 | console.log("Final total byte read: "+totalBytes);
42 | req.end();
43 | context.succeed();
44 | }
45 |
46 | var s3Stream = s3.getObject({Bucket: bucket, Key: objKey}).createReadStream();
47 | s3Stream.on('error', function() {
48 | console.log(
49 | 'Error getting object "' + objKey + '" from bucket "' + bucket + '". ' +
50 | 'Make sure they exist and your bucket is in the same region as this function.');
51 | context.fail();
52 | });
53 |
54 | req.write('Bucket: '+bucket + ' ObjectKey: ' + objKey +'\n');
55 |
56 | if (!isCompressed) {
57 | s3Stream.on('data',function(data) {
58 | //console.log("Read bytes:" +data.length);
59 | finalData += data;
60 | req.write(data+'\n');
61 | totalBytes += data.length;
62 | });
63 | s3Stream.on('end',finishFnc);
64 | } else {
65 | var gunzip = zlib.createGunzip();
66 | s3Stream.pipe(gunzip);
67 |
68 | gunzip.on('data',function(data) {
69 | totalBytes += data.length;
70 | req.write(data.toString()+'\n');
71 | finalData += data.toString();
72 | }).on('end',finishFnc)
73 | .on('error',function(error) {
74 | context.fail(error);
75 | })
76 | }
77 | }
78 |
79 | exports.handler = function(event, context) {
80 | options.agent = new https.Agent(options);
81 | event.Records.forEach(function(record) {
82 | var bucket = record.s3.bucket.name;
83 | var objKey = decodeURIComponent(record.s3.object.key.replace(/\+/g, ' '));
84 | console.log('Bucket: '+bucket + ' ObjectKey: ' + objKey);
85 | s3LogsToSumo(bucket, objKey, context);
86 | });
87 | }
88 |
--------------------------------------------------------------------------------
/cloudwatchlogs/README.md:
--------------------------------------------------------------------------------
1 | # Sumo Logic Functions for AWS CloudWatch Logs
2 |
3 | AWS Lambda function to collector logs from CloudWatch Logs and post them to [SumoLogic](http://www.sumologic.com) via a [HTTP collector endpoint](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source)
4 |
5 | ## Warning: This Lambda Function is no longer recommended solution
6 | We recommend using [SumoLogic Lambda Function for AWS CloudWatch Logs With Dead Letter Queue Support](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Amazon-CloudWatch-Logs) as it is configured with Dead Letter Queue which takes care of messages that can't be processed (consumed) successfully.
7 |
8 |
9 | # Usage
10 |
11 | First create an [HTTP collector endpoint](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source) within SumoLogic. You will need the endpoint URL for the lambda function later.
12 |
13 | ## Create Lambda Function
14 |
15 | 1. Within the AWS Lambda console select create new Lambda function
16 | 2. Select `Blank Function` on the select blueprint page
17 | 3. Leave triggers empty for now, click next
18 | 4. Configure Lambda
19 | * Select Node.js 8.10 as runtime
20 | * Copy code from cloudwatchlogs_lambda.js into the Lambda function code.
21 | * Add Environment variables (See below)
22 | 5. Scroll down to the `Lambda function handle and role` section, make sure you set the right values that match the function. For role, you can just use the basic execution role. Click next.
23 | 6. Finally click on "Create function" to create the function.
24 | 7. (Optional) Test this new function with sample AWS CloudWatch Logs template provided by AWS
25 |
26 | ## Create Stream from CloudWatch Logs
27 |
28 | 1. Within the AWS CloudWatch Logs console, check the Log Group you want to send data to Sumologic.
29 | 2. From Actions button, select "Stream to AWS Lambda".
30 | 3. Select Lambda function created above.
31 | 4. Select `json` as the log format and define any filters.
32 | 5. Click start streaming.
33 |
34 |
35 | # Lambda Environment Variables
36 |
37 | The following AWS Lambda environment variables are supported
38 |
39 | * `SUMO_ENDPOINT` (REQUIRED) - SumoLogic HTTP Collector [endpoint URL](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source).
40 | * `ENCODING` (OPTIONAL) - Encoding to use when decoding CloudWatch log events. Default is 'utf-8'.
41 | * `SOURCE_CATEGORY_OVERRIDE` (OPTIONAL) - Override _sourceCategory metadata field within SumoLogic. If `none` will not be overridden
42 | * `SOURCE_HOST_OVERRIDE` (OPTIONAL) - Override _sourceHost metadata field within SumoLogic. If `none` will not be overridden
43 | * `SOURCE_NAME_OVERRIDE` (OPTIONAL) - Override _sourceName metadata field within SumoLogic. If `none` will not be overridden
44 |
45 | # Dynamic Metadata Fields
46 |
47 | The lambda supports dynamically overriding the _sourceName, _sourceHost and _sourceCategory per log message by setting `_sumo_metadata` within a json log.
48 |
49 | This can be useful when writing to CloudWatch Logs via a lambda function.
50 |
51 | For example:
52 |
53 | ```
54 | exports.handler = (event, context, callback) => {
55 |
56 | var serverIp = '123.123.123.123'
57 |
58 | console.log(JSON.stringify({
59 | 'message': 'something happened..',
60 | '_sumo_metadata': {
61 | 'category': 'prod/appa/console',
62 | 'source': 'other_source',
63 | 'host': serverIp
64 | }
65 |
66 | }));
67 | console.log('some other log message with default sourceCategory');
68 | };
69 |
70 | ```
71 |
--------------------------------------------------------------------------------
/cloudtrail_s3/cloudtrail_s3_to_sumo.js:
--------------------------------------------------------------------------------
1 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 | // CloudTrail S3 bucket log to SumoLogic //
3 | // https://github.com/SumoLogic/sumologic-aws-lambda //
4 | // //
5 | // YOU MUST CREATE A SUMO LOGIC ENDPOINT CALLED SUMO_ENDPOINT AND PASTE IN ENVIRONMENTAL VARIABLES BELOW //
6 | // https://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source //
7 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
8 | // SumoLogic Endpoint to post logs
9 | var SumoURL = process.env.SUMO_ENDPOINT;
10 |
11 | var AWS = require('aws-sdk');
12 | var s3 = new AWS.S3();
13 | var https = require('https');
14 | var zlib = require('zlib');
15 | var url = require('url');
16 |
17 | function s3LogsToSumo(bucket, objKey,context) {
18 | var urlObject = url.parse(SumoURL);
19 | var options = {
20 | 'hostname': urlObject.hostname,
21 | 'path': urlObject.pathname,
22 | 'method': 'POST'
23 | };
24 | options.headers = {
25 | 'X-Sumo-Name': objKey,
26 | 'X-Sumo-Client': 'cloudtrail_s3-aws-lambda'
27 | };
28 | var req = https.request(options, function(res) {
29 | var body = '';
30 | console.log('Status:', res.statusCode);
31 | res.setEncoding('utf8');
32 | res.on('data', function(chunk) { body += chunk; });
33 | res.on('end', function() {
34 | console.log('Successfully processed HTTPS response');
35 | context.succeed();
36 | });
37 | });
38 | var finalData = '';
39 |
40 | if (objKey.match(/CloudTrail-Digest/)) {
41 | console.log("digest file are ignored");
42 | context.succeed();
43 | }
44 |
45 | var s3Stream = s3.getObject({Bucket: bucket, Key: objKey}).createReadStream();
46 | s3Stream.on('error', function() {
47 | console.log(
48 | 'Error getting object "' + objKey + '" from bucket "' + bucket + '". ' +
49 | 'Make sure they exist and your bucket is in the same region as this function.');
50 | context.fail();
51 | });
52 | var gunzip = zlib.createGunzip();
53 | s3Stream.pipe(gunzip);
54 | gunzip.on('data',function(data) {
55 | finalData += data.toString();
56 | }).on('end',function(end){
57 | // READ THE UNZIPPED CloudTrail logs
58 | var records = JSON.parse(finalData);
59 | console.log(records.Records.length + " cloudtrail records in this file");
60 | for (var i = 0, len = records.Records.length; i < len; i++) {
61 | req.write(JSON.stringify(records.Records[i]) + '\n');
62 | }
63 | req.end();
64 | }).on('error',function(error) {
65 | context.fail(error);
66 | });
67 | }
68 |
69 | exports.handler = function(event, context) {
70 | //options.agent = new https.Agent(options);
71 | // Validate URL has been set
72 | var urlObject = url.parse(SumoURL);
73 | if (urlObject.protocol != 'https:' || urlObject.host === null || urlObject.path === null) {
74 | context.fail('Invalid SUMO_ENDPOINT environment variable: ' + SumoURL);
75 | }
76 | var bucket = event.Records[0].s3.bucket.name;
77 | var objKey = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
78 | console.log('Bucket: '+bucket + ' ObjectKey: ' + objKey);
79 | s3LogsToSumo(bucket, objKey, context);
80 | }
81 |
--------------------------------------------------------------------------------
/cloudtrail_s3/README.md:
--------------------------------------------------------------------------------
1 | # Warning: This Lambda Function has been deprecated
2 | We recommend using [S3 Event Notifications Integration](https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon_Web_Services/AWS_S3_Source#S3_Event_Notifications_Integration),
3 |
4 |
5 | Cloudtrail S3 to Sumo Logic
6 | ===========================================
7 |
8 | Files
9 | -----
10 | * *cloudtrail_s3_to_sumo.js*: node.js function to read files from an S3 bucket to a Sumo Logic hosted HTTP collector. Files in the source bucket are gzipped. The function receives S3 notifications on new files uploaded to the source S3 bucket, then reads these files, unzips them, and breakdown the records before finally sends the data to the target Sumo endpoint.
11 |
12 | ## Lambda Setup
13 | For the Sumo collector configuration, do not enable multiline processing or
14 | one message per request -- Additionally, the timeformat should be adjusted to ensure the eventTime is the messageTime.
15 | In the source Timestamp Format section, add a format yyyy-MM-dd'T'HH:mm:ss'Z' with Timestamp locator eventTime\":\"(.*)?\"
16 | .
17 |
18 | In the AWS console, use a code entry type of 'Edit code inline' and paste in the
19 | code. In the Environment variable section, set the following Key to the URL provided from Sumo collector configuration.
20 | SUMO_ENDPOINT
21 |
22 | In configuration specify index.handler as the Handler. Specify a Role that has
23 | sufficient privileges to read from the *source* bucket, and invoke a lambda
24 | function. The code provided is tested with node runtime 4.3, 6.10 and 8.10, Memory setting at 128MB, Timeout 10s.
25 |
26 | Set trigger to S3 bucket create-all events.
27 |
28 | One can use the AWSLambdaBasicExecution and the AWSS3ReadOnlyAccess role, although it is *strongly* recommended to customize them to restrict to relevant resources in production:
29 |
30 |
31 | {
32 | "Version": "2012-10-17",
33 | "Statement": [
34 | {
35 | "Effect": "Allow",
36 | "Action": [
37 | "logs:CreateLogGroup",
38 | "logs:CreateLogStream",
39 | "logs:PutLogEvents"
40 | ],
41 | "Resource": "arn:aws:logs:*:*:*"
42 | }
43 | ]
44 | }
45 |
46 |
47 | AND
48 |
49 |
50 | {
51 | "Version": "2012-10-17",
52 | "Statement": [
53 | {
54 | "Effect": "Allow",
55 | "Action": [
56 | "s3:Get*",
57 | "s3:List*"
58 | ],
59 | "Resource": "*"
60 | }
61 | ]
62 | }
63 |
64 |
65 | Once the function is created, you can tie it to the source S3 bucket. From the S3 Management console, select the bucket, goto its Properties, select Events and add a Notification. From there, provide a name for the notification, select *ObjectCreated (All)* as the Events, and select *Lambda* as the *Send To* option. Finally, select the Lambda function created above and Save.
66 |
67 | This function should just work. If you are going to "test" this function under the AWS console, make sure you are feeding a "good" S3 CreateObject Event sample message. The default "hello world" event sample will error out.
68 |
69 | Note on elapsed time: This value really depended on when did the event was written into the S3 file (file name contains the file creation time) and when did that S3:CreateObject was fired. To analyze the elapsed time, use the example query below.
70 |
71 | _sourceCategory="global/aws/cloudtrail" | _receipttime-_messagetime as delta | delta/1000/60 as delta_min | timeslice 1m | avg(delta_min), max(delta_min), min(delta_min) by _timeslice
72 |
73 | KNOWN ISSUE:
74 | Occassionally, the function will fail with either TypeError or Socket Error. AWS has built-in retries to launch the function again with the same parameters (bucket/filename). There shouldn't be any data loss, but the function log will show those errors. Also, using Sumo to log this Lambda run is highly recommended.
75 |
--------------------------------------------------------------------------------
/deploy_function.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import os
3 | from argparse import ArgumentParser
4 |
5 | regions = [
6 | "us-east-2",
7 | "us-east-1",
8 | "us-west-1",
9 | "us-west-2",
10 | "ap-south-1",
11 | "ap-northeast-2",
12 | "ap-southeast-1",
13 | "ap-southeast-2",
14 | "ap-northeast-1",
15 | "ca-central-1",
16 | # "cn-north-1",
17 | "eu-central-1",
18 | "eu-west-1",
19 | "eu-west-2",
20 | "eu-west-3",
21 | "eu-north-1",
22 | "sa-east-1"
23 | ]
24 |
25 |
26 | def get_bucket_name(bucket_prefix, region):
27 | if region == "eu-north-1":
28 | return '%s-%ss' % (bucket_prefix, region)
29 | return '%s-%s' % (bucket_prefix, region)
30 |
31 |
32 | def upload_code_in_multiple_regions(filepath, bucket_prefix):
33 |
34 | for region in regions:
35 | upload_code_in_S3(filepath, get_bucket_name(bucket_prefix, region), region)
36 |
37 |
38 | def create_buckets(bucket_prefix):
39 | for region in regions:
40 | s3 = boto3.client('s3', region)
41 | bucket_name = get_bucket_name(bucket_prefix, region)
42 | try:
43 | if region == "us-east-1":
44 | response = s3.create_bucket(Bucket=bucket_name) # the operation is idempotent
45 | else:
46 | response = s3.create_bucket(Bucket=bucket_name,
47 | CreateBucketConfiguration={
48 | 'LocationConstraint': region
49 | })
50 | print("Creating bucket", region, response)
51 | except:
52 | pass
53 |
54 |
55 | def upload_code_in_S3(filepath, bucket_name, region):
56 | print("Uploading zip file in S3", region)
57 | s3 = boto3.client('s3', region)
58 | filename = os.path.basename(filepath)
59 | s3.upload_file(filepath, bucket_name, filename,
60 | ExtraArgs={'ACL': 'public-read'})
61 |
62 |
63 | def upload_cftemplate(templatepath, bucket_name, region='us-east-1'):
64 | print("Uploading template file in S3")
65 | s3 = boto3.client('s3', region)
66 | filename = os.path.basename(templatepath)
67 | s3.upload_file(templatepath, bucket_name, filename,
68 | ExtraArgs={'ACL': 'public-read'})
69 |
70 |
71 | if __name__ == '__main__':
72 |
73 | parser = ArgumentParser()
74 | parser.add_argument("-t", "--templatefile", dest="templatefile",
75 | help="CF template")
76 |
77 | parser.add_argument("-z", "--zipfile", dest="zipfile",
78 | help="deployment package")
79 |
80 | parser.add_argument("-d", "--deployment", dest="deployment", default="dev",
81 | help="aws account type")
82 |
83 | args = parser.parse_args()
84 | if args.deployment == "prod":
85 | zip_bucket_prefix = "appdevzipfiles"
86 | template_bucket = "appdev-cloudformation-templates"
87 | else:
88 | zip_bucket_prefix = "appdevstore"
89 | template_bucket = "cf-templates-5d0x5unchag-us-east-1"
90 |
91 | # create_buckets(zip_bucket_prefix)
92 | print(args)
93 | if args.templatefile:
94 | if not os.path.isfile(args.templatefile):
95 | raise Exception("templatefile does not exists")
96 | else:
97 | upload_cftemplate(args.templatefile, template_bucket)
98 |
99 | if args.zipfile:
100 | if not os.path.isfile(args.zipfile):
101 | raise Exception("zipfile does not exists")
102 | else:
103 | upload_code_in_multiple_regions(args.zipfile, zip_bucket_prefix)
104 |
105 | print("Deployment Successfull: ALL files copied to %s" % args.deployment)
106 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/src/loggroup-lambda-connector.js:
--------------------------------------------------------------------------------
1 | var AWS = require("aws-sdk");
2 |
3 | var cwl = new AWS.CloudWatchLogs({apiVersion: '2014-03-28'});
4 |
5 | function subscribeToLambda(lambdaLogGroupName, lambdaArn, errorHandler) {
6 | var params = {
7 | destinationArn: lambdaArn,
8 | filterName: 'SumoLGLBDFilter',
9 | filterPattern: '',
10 | logGroupName: lambdaLogGroupName
11 | };
12 | // handle case where subscription filter exists/case where loggroup generated by target lambda
13 | cwl.putSubscriptionFilter(params, errorHandler);
14 | }
15 |
16 | function filterLogGroups(event, logGroupRegex) {
17 | logGroupRegex = new RegExp(logGroupRegex, "i");
18 | var logGroupName = event.detail.requestParameters.logGroupName;
19 | if (logGroupName.match(logGroupRegex) && event.detail.eventName === "CreateLogGroup") {
20 | return true;
21 | } else {
22 | return false;
23 | }
24 | }
25 |
26 | function subscribeExistingLogGroups(logGroups) {
27 | var logGroupName;
28 | var logGroupRegex = new RegExp(process.env.LOG_GROUP_PATTERN, "i");
29 | var lambdaArn = process.env.LAMBDA_ARN;
30 | for (var i = logGroups.length - 1; i >= 0; i--) {
31 | logGroupName = logGroups[i].logGroupName;
32 | if (logGroupName.match(logGroupRegex)) {
33 | subscribeToLambda(logGroupName, lambdaArn, (function(inner_logGroupName) { return function (err, data) {
34 | if (err) {
35 | console.log("Error in subscribing", inner_logGroupName, err);
36 | } else {
37 | console.log("Successfully subscribed logGroup: ", inner_logGroupName);
38 | }
39 | };})(logGroupName));
40 | } else {
41 | console.log("Unmatched logGroup: ", logGroupName);
42 | }
43 | }
44 | }
45 |
46 | function processExistingLogGroups(token, errorHandler) {
47 |
48 | var params = {
49 | limit: 50,
50 | // logGroupNamePrefix: 'STRING_VALUE',
51 | nextToken: token
52 | };
53 | var p = new Promise(function(resolve, reject) {
54 | cwl.describeLogGroups(params, function(err, data) {
55 | if (err) {
56 | console.log("error in fetching logGroups", err, err.stack);
57 | reject(err);
58 | } else {
59 | console.log("fetched logGroups: " + data.logGroups.length + " nextToken: " + data.nextToken);
60 | resolve(data);
61 | }
62 | });
63 | });
64 | var cb = function (data) {
65 | subscribeExistingLogGroups(data.logGroups);
66 | if (data.nextToken) {// if next set of log groups exists
67 | processExistingLogGroups(data.nextToken, errorHandler)
68 | } else {
69 | errorHandler(null, "Success");
70 | }
71 | };
72 | return p.then(cb).catch(function (err) {
73 | errorHandler(err, "Error in fetching logGroups");
74 | });
75 | }
76 |
77 | function processEvents(env, event, errorHandler) {
78 |
79 | var logGroupName = event.detail.requestParameters.logGroupName;
80 | if (filterLogGroups(event, env.LOG_GROUP_PATTERN)) {
81 | console.log("Subscribing: ", logGroupName, env.LAMBDA_ARN);
82 | subscribeToLambda(logGroupName, env.LAMBDA_ARN, errorHandler);
83 | } else {
84 | console.log("Unsubscribed: ", logGroupName, env.LAMBDA_ARN);
85 | }
86 |
87 | }
88 |
89 | exports.handler = function (event, context, callback) {
90 | function errorHandler(err, msg) {
91 | if (err) {
92 | console.log(err, msg);
93 | callback(err);
94 | } else {
95 | callback(null, "Success");
96 | }
97 | }
98 | if (process.env.USE_EXISTING_LOG_GROUPS == "true") {
99 | processExistingLogGroups(null, errorHandler);
100 | } else {
101 | processEvents(process.env, event, errorHandler);
102 | }
103 |
104 | };
105 |
--------------------------------------------------------------------------------
/securityhub-forwarder/README.md:
--------------------------------------------------------------------------------
1 | # sumologic-securityhub-forwarder
2 |
3 | This lambda function is used for importing findings from Sumo Logic to AWS Security Hub.
4 |
5 |
6 | Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
7 |
8 | 
9 |
10 | ## Setup
11 |
12 | 1. Enable Sumo Logic as a finding provider. Refer the [docs](https://www.google.com/url?q=https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-providers.html&sa=D&ust=1543440246819000&usg=AFQjCNEgjGnz7fQlpLFo2Rkqd5ZeQBDQ3w)
13 |
14 | 2. Deploying the SAM Application
15 | 1. Go to https://serverlessrepo.aws.amazon.com/applications.
16 | 2. Search for sumologic-securityhub-forwarder.
17 | 3. Click on Deploy
18 | 4. Copy the value of SecurityHubForwarderApiUrl from Output which is the API Gateway endpoint.
19 |
20 | 3. Create a [Webhook connection](https://help.sumologic.com/Manage/Connections-and-Integrations/Webhook-Connections/Webhook-Connection-for-AWS-Lambda).Use the value copied in step 1.4 as URL.
21 | Note: SAM application already secures the endpoint with AWS_IAM authorization type
22 | It should have the following payload
23 | ```{
24 | "Types": " Ex: Software and Configuration Checks/Industry and Regulatory Standards/PCI-DSS Controls",
25 | "Description": "{{SearchDescription}}",
26 | "SourceUrl": "{{SearchQueryUrl}}",
27 | "GeneratorID": "{{SearchName}}",
28 | "Severity": ,
29 | "Rows": "{{AggregateResultsJson}}",
30 | "ComplianceStatus": "(Optional) - PASSED/WARNING/FAILED/NOT_AVAILABLE"
31 | }
32 | ```
33 | Also make sure the IAM role or IAM user(whose credentials are used) has permissions to invoke the api in API Gateway. Refer the [docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html)
34 |
35 | 4. Create scheduled searches, as described in this [document](https://help.sumologic.com/Dashboards-and-Alerts/Alerts/02-Schedule-a-Search), and configure the following settings:
36 | * Alert condition is set to “Greater than >” and Number of Results is set to 0.
37 | * Alert Type is set to “Webhook”.
38 | * Connection is set to the name configured in step 2 (of the link document instructions).
39 | * Toggle the customize payload button and fill the fields in the following dialog. The following table explains each field.
40 |
41 | | Field | Description |
42 | | -----------------|-----------------------------------------|
43 | |Types | Type of Finding in the format namespace/category/classifier.This field should match one of the finding types, as defined in Finding Type Taxonomy in AWS docs.
44 | | Description | Details specific to the instance of the finding.This should be non empty.
45 | | SourceURL | Search Query URL pointing to the exact query that generated the finding.
46 | | GeneratorID | Scheduled Search Name that generated this finding.
47 | | Severity | Impact of a finding has on a customer (data loss, malware activity, configuration weakness etc), displayed as an integer ranging from 0 to 100.
48 | | ComplianceStatus | Results of a compliance check. This is an optional field and its value should be one of the following: PASSED/WARNING/FAILED/NOT_AVAILABLE.
49 |
50 |
51 | Also the rows in AggregateResultsJson should contain following mandatory fields
52 | "finding_time"(timestamp), "resource_type", "resource_id", "title"
53 |
54 | “aws_account_id” is optional field in search results. Lambda function will pick up it’s value in following order
55 | search results(each row) > aws_account_id environment variable > defaults to the account in which lambda is running
56 |
57 |
58 | ## License
59 |
60 | Apache License 2.0 (Apache-2.0)
61 |
62 |
63 | ## Support
64 | Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
65 |
66 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/utils.js:
--------------------------------------------------------------------------------
1 | var https = require('https');
2 | var zlib = require('zlib');
3 | var stream = require('stream');
4 |
5 | var encodebase64 = function (data) {
6 | return (new Buffer(data, 'utf8')).toString('base64');
7 | };
8 |
9 | var decodebase64 = function (data) {
10 | return (new Buffer(data, 'base64')).toString('utf8');
11 | };
12 |
13 | Promise.retryMax = function (fn, retry, interval, fnParams) {
14 | return fn.apply(this, fnParams).catch((err) => {
15 | var waitTime = typeof interval === 'function' ? interval() : interval;
16 | console.log("Retries left " + (retry-1) + " delay(in ms) " + waitTime);
17 | return (retry > 1? Promise.wait(waitTime).then(() => Promise.retryMax(fn, retry-1, interval, fnParams)): Promise.reject(err));
18 | });
19 | };
20 |
21 | Promise.wait = function (delay) {
22 | return new Promise((fulfill, reject)=> {
23 | //console.log(Date.now());
24 | setTimeout(fulfill, delay || 0);
25 | });
26 | };
27 |
28 | var exponentialBackoff = function (seed) {
29 | var count = 0;
30 | return function () {
31 | count += 1;
32 | return count * seed;
33 | };
34 | };
35 |
36 | var filterObj = function (obj, predicate) {
37 | var result = {}, key;
38 | for (key in obj) {
39 | if (obj.hasOwnProperty(key) && !predicate(obj[key])) {
40 | result[key] = obj[key];
41 | }
42 | }
43 | return result;
44 | }
45 | var gunzipPromise = function (buffer) {
46 | // to make it backward compatible for multiple concatenated members https://github.com/nodejs/node/pull/5120
47 | return new Promise(function (resolve, reject) {
48 | var uncompressed_bytes = [];
49 | var gunzip = zlib.createGunzip();
50 | gunzip.on('data', function (data) {
51 | uncompressed_bytes.push(data.toString());
52 | }).on("end", function () {
53 | resolve(uncompressed_bytes.join(""));
54 | }).on("error", function (e) {
55 | reject(e);
56 | });
57 | var bufferStream = new stream.PassThrough();
58 | bufferStream.end(buffer);
59 | bufferStream.pipe(gunzip);
60 | });
61 | };
62 | /*Server Errors Ex 429 throttling are thrown inside onEnd
63 | Following Errors are thrown inside onError
64 | ECONNRESET - server closed the socket unexpectedly
65 | ECONNREFUSED - server did not listen
66 | HPE_* codes - server returned garbage
67 | */
68 | var sendRequest = function (Options, data) {
69 | return new Promise(function (resolve, reject) {
70 | var req = https.request(Options, function (res) {
71 | var body = '';
72 | res.setEncoding('utf8');
73 | res.on('data', function (chunk) {
74 | body += chunk;
75 | });
76 | res.on('end', function () {
77 | if (res.statusCode === 200) {
78 | resolve({"status": "SUCCESS", "response": res});
79 | } else {
80 | reject({"status": "FAILED", "response": res});
81 | }
82 | });
83 | });
84 | req.on('error', function (err) {
85 | reject({"status": "FAILED", "error": err});
86 | });
87 | req.write(data);
88 | req.end();
89 | });
90 | };
91 |
92 | var compressData = function(Options, data) {
93 | return new Promise(function (resolve, reject) {
94 | Options.headers['Content-Encoding'] = 'gzip';
95 | zlib.gzip(data,function(err,compressed_data){
96 | if (!err) {
97 | console.log("Data Compressed");
98 | resolve(compressed_data);
99 | } else {
100 | console.log("Failed to CompressData", err);
101 | reject(err);
102 | }
103 | });
104 | });
105 | }
106 |
107 | module.exports = {
108 | encodebase64: encodebase64,
109 | decodebase64: decodebase64,
110 | p_retryMax: Promise.retryMax,
111 | p_wait: Promise.wait,
112 | exponentialBackoff: exponentialBackoff,
113 | gunzipPromise: gunzipPromise,
114 | sendRequest: sendRequest,
115 | filterObj: filterObj,
116 | compressData: compressData
117 | };
118 |
119 |
--------------------------------------------------------------------------------
/securityhub-forwarder/test/test_securityhub_connector.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import copy
3 | import sys
4 | import os
5 |
6 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src"))
7 |
8 | from utils import retry, incrementing_sleep, fixed_sleep
9 | from securityhub_forwarder import lambda_handler
10 |
11 | del sys.path[0]
12 |
13 |
14 | class TestLambda(unittest.TestCase):
15 |
16 | def setUp(self):
17 | #Todo: enable sec hub
18 | self.event = {}
19 | with open("fixtures.json") as f:
20 | self.event['body'] = f.read()
21 | os.environ["AWS_REGION"] = "us-east-1"
22 | class Context:
23 | invoked_function_arn="arn:aws:lambda:us-east-1:956882708938:function:OverbridgeLambda"
24 |
25 | self.context = Context()
26 |
27 | def tearDown(self):
28 | pass
29 |
30 | def test_send_success(self):
31 | result = lambda_handler(self.event, self.context)
32 | self.assertEqual(result['statusCode'], 200)
33 | self.assertTrue(result['body'] == 'FailedCount: 0 SuccessCount: 3 StatusCode: 200 ', "%s body is not matching" % result['body'])
34 |
35 | def test_send_failure(self):
36 | event = copy.copy(self.event)
37 | event['body'] = event['body'].replace('\"Severity\": 30', '\"Severity\":200')
38 | result = lambda_handler(event, self.context)
39 | self.assertEqual(result['statusCode'], 400)
40 | self.assertTrue(result['body'] == 'Bad Request: Param Validation Error - Severity should be between 0 to 100', "%s body is not matching" % result['body'])
41 |
42 | def test_compliance_status_failure(self):
43 | pass
44 |
45 | def test_different_account_id(self):
46 | event = copy.copy(self.event)
47 | # os.environ["AWS_REGION"] = "us-east-1"
48 | # os.environ["AWS_ACCOUNT_ID"] = "456227676011"
49 | os.environ["AWS_REGION"] = "us-west-2"
50 | os.environ["AWS_ACCOUNT_ID"] = "068873283051"
51 | result = lambda_handler(event, self.context)
52 | self.assertEqual(result['statusCode'], 200)
53 | self.assertTrue(result['body'] == 'FailedCount: 0 SuccessCount: 3 StatusCode: 200 ', "%s body is not matching" % result['body'])
54 |
55 | def test_different_region(self):
56 | pass
57 |
58 | def test_validation(self):
59 | event = copy.copy(self.event)
60 | event['body'] = event['body'].replace('\"Types\": \"Software and Configuration Checks/Industry and Regulatory Standards/HIPAA Controls\",', "")
61 | result = lambda_handler(event, self.context)
62 | self.assertEqual(result['statusCode'], 400)
63 | self.assertTrue(result['body'] == "Bad Request: 'Types Fields are missing'", "%s body is not matching" % result['body'])
64 |
65 | def test_retry(self):
66 | class Logger:
67 | def __init__(self):
68 | self.messages = []
69 |
70 | def warning(self, msg):
71 | self.messages.append(msg)
72 | logger1 = Logger()
73 | @retry(ExceptionToCheck=(KeyError,), max_retries=3, logger=logger1, handler_type=fixed_sleep, fixed_wait_time=2)
74 | def func():
75 | data = {}
76 | return data["key"]
77 | with self.assertRaises(Exception) as context:
78 | func()
79 |
80 | self.assertTrue(len(logger1.messages) == 2, "fixed_sleep(2) with 3 retries should contain 2 messages")
81 |
82 | logger2 = Logger()
83 | @retry(ExceptionToCheck=(ValueError,), max_retries=2, logger=logger2, handler_type=incrementing_sleep, wait_time_inc=2)
84 | def func():
85 | data = {}
86 | return data["key"]
87 |
88 | with self.assertRaises(Exception) as context:
89 | func()
90 | self.assertTrue(len(logger2.messages) == 0, "incremental_sleep(2) with 2 retries but with ValueError(retry not allowed)")
91 |
92 |
93 | logger3 = Logger()
94 | @retry(ExceptionToCheck=(KeyError,), max_retries=2, logger=logger3, handler_type=incrementing_sleep, wait_time_inc=2)
95 | def func():
96 | data = {}
97 | return data["key"]
98 |
99 | with self.assertRaises(Exception) as context:
100 | func()
101 | self.assertTrue(len(logger3.messages) == 1, "incremental_sleep(2) with 2 retries should contain 1 message")
102 |
103 | if __name__ == '__main__':
104 |
105 | unittest.main()
106 |
--------------------------------------------------------------------------------
/kinesis/README.md:
--------------------------------------------------------------------------------
1 | ===========================================
2 | Kinesis to Sumo Logic
3 | ===========================================
4 | This function is invoked by AWS Lambda after it detects new records in Kinesis stream. The received collection of events are decompressed, transformed and send to Sumo Logic HTTP source endpoint.
5 |
6 | Files
7 | -----
8 | * *node.js/k2sl_lambda.js*: node.js function to read messages from a Kinesis stream and posts them to a Sumo Logic hosted HTTP collector.
9 |
10 | ## Lambda configuration
11 |
12 | There are no module dependencies for this code, so you can paste it into the
13 | lambda console directly. Note you must set the collector host and the
14 | path that includes your secret key in options for this to work.
15 |
16 | For the Sumo collector configuration, do not enable multiline processing or
17 | one message per request -- the idea is to send as many messages in one request
18 | as possible to Sumo and let Sumo break them apart as needed.
19 |
20 | In the AWS console, use a code entry type of 'Edit code inline' and paste in the
21 | code (doublecheck the hostname and path as per your collector setup).
22 |
23 | In configuration specify index.handler as the Handler. Specify a Role that has
24 | sufficient privileges to read from the kinesis stream, invoke a lambda
25 | function, and write cloud watch logs. I tested with this policy, which is
26 | too loose for production.
27 |
28 |
29 | {
30 | "Version": "2012-10-17",
31 | "Statement": [
32 | {
33 | "Effect": "Allow",
34 | "Action": [
35 | "lambda:InvokeFunction"
36 | ],
37 | "Resource": [
38 | "*"
39 | ]
40 | },
41 | {
42 | "Effect": "Allow",
43 | "Action": [
44 | "kinesis:GetRecords",
45 | "kinesis:GetShardIterator",
46 | "kinesis:DescribeStream",
47 | "kinesis:ListStreams",
48 | "logs:CreateLogGroup",
49 | "logs:CreateLogStream",
50 | "logs:PutLogEvents"
51 | ],
52 | "Resource": "*"
53 | }
54 | ]
55 | }
56 |
57 |
58 | For the Event Source, pick the stream containing the data you want to send to Sumo.
59 |
60 | ## Lambda test event
61 |
62 | Test event in order to test the Lambda
63 |
64 | ```json
65 | {
66 | "Records": [
67 | {
68 | "kinesis": {
69 | "partitionKey": "partitionKey-03",
70 | "kinesisSchemaVersion": "1.0",
71 | "data": "H4sICL9JQFwAA2EA3VJNa4MwGL77K0rOdiRRY9KbrK7ssMuUXWYRN0MJqJEkbhTxvy+x4rreB2PvKeT5eJ8nZPQ2doD87LgCuw1AVwP8C9jI00HJoXf4fSOHOleVaK7QzChetbfy8ptbDnrLK222q6ce3vS7Er0RsnsQjeFKW/3rDM6EPddGdJXDwXx7XJQt17o68fzcc7dxn+RJ+ZRmWXJIryKlH7wzPy3H9TSTRO3kAWJRgCAhkDBCGA2CmFLIII5gzCgMsS0SBpgygqIQh5hRjFFE6LJqdTPC5jJV6x5pFmBCowhCeMNb0rvVYwG4S/liu9uWBdgVAN3BoAB+AQbN1WNtUWHOFrFcYwvPnGcpTQEmsBpP/q90ZP+/Ywz/VMfLP/cm7wsfrETjlgMAAA==",
72 | "sequenceNumber": "49545115243490985018280067714973144582180062593244200961",
73 | "approximateArrivalTimestamp": 1428537600
74 | },
75 | "eventSource": "aws:kinesis",
76 | "eventID": "shardId-000000000000:49545115243490985018280067714973144582180062593244200961",
77 | "invokeIdentityArn": "arn:aws:iam::EXAMPLE",
78 | "eventVersion": "1.0",
79 | "eventName": "aws:kinesis:record",
80 | "eventSourceARN": "arn:aws:kinesis:EXAMPLE",
81 | "awsRegion": "us-east-1"
82 | }
83 | ]
84 | }
85 | ```
86 |
87 |
88 | You should expect to see a response with an array of records. The Data attribute in an Kinesis record is Base64 encoded and compressed with the gzip format. You can examine the raw data from the command line using the following Unix commands:
89 |
90 | `echo -n "" | base64 -d | zcat`
91 |
92 | The Base64 decoded and decompressed data is formatted as JSON with the following structure:
93 |
94 | ```json
95 | {
96 | "owner": "111111111111",
97 | "logGroup": "CloudTrail",
98 | "logStream": "111111111111_CloudTrail_us-east-1",
99 | "subscriptionFilters": [
100 | "Destination"
101 | ],
102 | "messageType": "DATA_MESSAGE",
103 | "logEvents": [
104 | {
105 | "id": "31953106606966983378809025079804211143289615424298221568",
106 | "timestamp": 1432826855000,
107 | "message": "{\"eventVersion\":\"1.03\",\"userIdentity\":{\"type\":\"Root\"}"
108 | },
109 | {
110 | "id": "31953106606966983378809025079804211143289615424298221569",
111 | "timestamp": 1432826855000,
112 | "message": "{\"eventVersion\":\"1.03\",\"userIdentity\":{\"type\":\"Root\"}"
113 | },
114 | {
115 | "id": "31953106606966983378809025079804211143289615424298221570",
116 | "timestamp": 1432826855000,
117 | "message": "{\"eventVersion\":\"1.03\",\"userIdentity\":{\"type\":\"Root\"}"
118 | }
119 | ]
120 | }
121 | ```
122 |
--------------------------------------------------------------------------------
/cloudwatchevents/guarddutybenchmark/template_v2.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Transform: AWS::Serverless-2016-10-31
3 | Description: >
4 | This function is invoked by AWS CloudWatch events in response to state change in your AWS resources which matches a event target definition. The event payload received is then forwarded to Sumo Logic HTTP source endpoint.
5 |
6 | # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst
7 | Globals:
8 | Function:
9 | Timeout: 300
10 |
11 | Metadata:
12 | AWS::CloudFormation::Interface:
13 | ParameterGroups:
14 | -
15 | Label:
16 | default: "Sumo Logic Deployment Configuration"
17 | Parameters:
18 | - SumoDeployment
19 | - SumoAccessID
20 | - SumoAccessKey
21 | -
22 | Label:
23 | default: "Collection Configuration"
24 | Parameters:
25 | - CollectorName
26 | - SourceName
27 | - SourceCategoryName
28 | - RemoveSumoResourcesOnDeleteStack
29 | ParameterLabels:
30 | SumoDeployment:
31 | default: "Deployment Name"
32 | SumoAccessID:
33 | default: "Access ID"
34 | SumoAccessKey:
35 | default: "Access Key"
36 | CollectorName:
37 | default: "Collector Name"
38 | SourceName:
39 | default: "Source Name"
40 | SourceCategoryName:
41 | default: "Source Category Name"
42 | RemoveSumoResourcesOnDeleteStack:
43 | default: "Remove Sumo Resources On Delete Stack"
44 |
45 | Parameters:
46 | CollectorName:
47 | Type: String
48 | Default: GuarddutyCollector
49 | SourceName:
50 | Type: String
51 | Default: GuarddutyEvents
52 | SourceCategoryName:
53 | Type: String
54 | Default: Labs/AWS/Guardduty
55 | SumoAccessID:
56 | Type: String
57 | SumoAccessKey:
58 | Type: String
59 | SumoDeployment:
60 | Type: String
61 | AllowedValues:
62 | - au
63 | - ca
64 | - de
65 | - eu
66 | - jp
67 | - us2
68 | - us1
69 | Description: "Enter au, ca, de, eu, jp, us2, or us1"
70 | RemoveSumoResourcesOnDeleteStack:
71 | AllowedValues:
72 | - true
73 | - false
74 | Default: false
75 | Description: To delete collector, sources and app when stack is deleted, set this parameter to true. Default is false.
76 | Type: String
77 |
78 | Resources:
79 | CloudWatchEventFunction:
80 | Properties:
81 | CodeUri: s3://appdevstore/e62e525a25bb080e521d8bf64909ea41
82 | Environment:
83 | Variables:
84 | SUMO_ENDPOINT: !GetAtt SumoHTTPSource.SUMO_ENDPOINT
85 | Events:
86 | CloudWatchEventTrigger:
87 | Properties:
88 | Pattern:
89 | source:
90 | - aws.guardduty
91 | Type: CloudWatchEvent
92 | Handler: cloudwatchevents.handler
93 | Runtime: nodejs8.10
94 | Type: AWS::Serverless::Function
95 |
96 | SumoAppUtils:
97 | Type: AWS::Serverless::Application
98 | Properties:
99 | Location:
100 | ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
101 | SemanticVersion: 1.0.5
102 |
103 | SumoHostedCollector:
104 | Type: Custom::Collector
105 | Properties:
106 | ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
107 | Region: !Ref "AWS::Region"
108 | CollectorType: Hosted
109 | RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
110 | CollectorName: !Ref CollectorName
111 | SumoAccessID: !Ref SumoAccessID
112 | SumoAccessKey: !Ref SumoAccessKey
113 | SumoDeployment: !Ref SumoDeployment
114 |
115 | SumoHTTPSource:
116 | Type: Custom::HTTPSource
117 | Properties:
118 | ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
119 | Region: !Ref "AWS::Region"
120 | SourceName: !Ref SourceName
121 | RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
122 | SourceCategory: !Ref SourceCategoryName
123 | CollectorId: !GetAtt SumoHostedCollector.COLLECTOR_ID
124 | SumoAccessID: !Ref SumoAccessID
125 | SumoAccessKey: !Ref SumoAccessKey
126 | SumoDeployment: !Ref SumoDeployment
127 | DateFormat: "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
128 | DateLocatorRegex: '.*"updatedAt":"(.*)".*'
129 |
130 | SumoGuardDutyBenchmarkApp:
131 | Type: Custom::App
132 | Properties:
133 | ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
134 | Region: !Ref "AWS::Region"
135 | AppName: "Amazon GuardDuty Benchmark"
136 | RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
137 | AppSources:
138 | gdbenchmark: !Sub "_sourceCategory=${SourceCategoryName}"
139 | SumoAccessID: !Ref SumoAccessID
140 | SumoAccessKey: !Ref SumoAccessKey
141 | SumoDeployment: !Ref SumoDeployment
142 |
143 | Outputs:
144 |
145 | CloudWatchEventFunction:
146 | Description: "CloudWatchEvent Processor Function ARN"
147 | Value: !GetAtt CloudWatchEventFunction.Arn
148 | GuarddutyBenchmarkAppFolder:
149 | Description: "Folder Name"
150 | Value: !GetAtt SumoGuardDutyBenchmarkApp.APP_FOLDER_NAME
151 |
152 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/sumologsclient.js:
--------------------------------------------------------------------------------
1 | var url = require('url');
2 | var utils = require('./utils');
3 |
4 | function SumoLogsClient(config) {
5 | this.options = config.options || {};
6 | if (config.SumoURL) {
7 | var urlObj = url.parse(config.SumoURL);
8 | this.options.hostname = urlObj.hostname;
9 | this.options.path = urlObj.pathname;
10 | this.options.protocol = urlObj.protocol;
11 | }
12 | this.options.method = 'POST';
13 | this.SUMO_CLIENT_HEADER = config.SUMO_CLIENT_HEADER;
14 | }
15 |
16 | SumoLogsClient.prototype.generateHeaders = function(config, message, awslogsData) {
17 | var sourceCategory = config.sourceCategoryOverride || '';
18 | var sourceName = config.sourceNameOverride || ((awslogsData) ? awslogsData.logStream : '');
19 | var sourceHost = config.sourceHostOverride || ((awslogsData) ? awslogsData.logGroup : '');
20 |
21 | var headerObj = {
22 | 'X-Sumo-Name':sourceName, 'X-Sumo-Category':sourceCategory,
23 | 'X-Sumo-Host':sourceHost, 'X-Sumo-Client': config.SUMO_CLIENT_HEADER
24 | };
25 |
26 | var metadataMap = {category: "X-Sumo-Category", sourceName: "X-Sumo-Name", sourceHost: "X-Sumo-Host"};
27 | if (message.hasOwnProperty('_sumo_metadata')) {
28 | var metadataOverride = message._sumo_metadata;
29 | Object.getOwnPropertyNames(metadataOverride).forEach( function(property) {
30 | if (metadataMap[property]) {
31 | var targetProperty = metadataMap[property];
32 | } else {
33 | targetProperty = property;
34 | }
35 | headerObj[targetProperty] = metadataOverride[property];
36 | });
37 | delete message._sumo_metadata;
38 | }
39 | return headerObj;
40 | };
41 |
42 | SumoLogsClient.prototype.createPromises = function(messages, is_compressed) {
43 | var self = this;
44 | return Object.keys(messages).map(function (key) {
45 | var headerArray = key.split(':');
46 | var headers = {
47 | 'X-Sumo-Name': headerArray[0],
48 | 'X-Sumo-Category': headerArray[1],
49 | 'X-Sumo-Host': headerArray[2],
50 | 'X-Sumo-Client': self.SUMO_CLIENT_HEADER
51 | };
52 | var options = Object.assign({}, self.options);
53 | // removing headers with 'none'
54 | options.headers = utils.filterObj(headers, function(k,v) {
55 | return v && (v.toLowerCase() !== 'none');
56 | });
57 | var data = [];
58 | for (var i = 0; i < messages[key].length; i++) {
59 | if (messages[key][i] instanceof Object) {
60 | data.push(JSON.stringify(messages[key][i]));
61 | } else {
62 | data.push(messages[key][i]);
63 | }
64 | }
65 | data = data.join("\n");
66 | var pdata = is_compressed ? utils.compressData(options, data) : Promise.resolve(data);
67 |
68 | // handling catch so that if one promise fails others would still be executed
69 | return pdata.then(function(payload) {
70 | return utils.sendRequest(options, payload);
71 | }).catch(function(err) {
72 | err.failedBucketKey = key;
73 | return err;
74 | });
75 | });
76 | }
77 |
78 | SumoLogsClient.prototype.postToSumo = function(messages, is_compressed) {
79 | var all_promises = this.createPromises(messages, is_compressed);
80 | return Promise.all(all_promises).then(function (values) {
81 | console.log(`${values.length} requests finished`);
82 | var requestSuccessCnt = 0;
83 | var messageErrors = [];
84 | var failedBucketKeys = [];
85 | values.forEach(function (obj) {
86 | if (obj.status === "SUCCESS") {
87 | requestSuccessCnt += 1;
88 | } else {
89 | var message = obj.error?obj.error.message:obj.response.statusMessage;
90 | messageErrors.push(message);
91 | failedBucketKeys.push(obj.failedBucketKey);
92 | }
93 | });
94 | return {
95 | requestSuccessCnt: requestSuccessCnt,
96 | messageErrors: messageErrors,
97 | failedBucketKeys: failedBucketKeys
98 | };
99 | });
100 | };
101 |
102 | SumoLogsClient.prototype.getMetaDataKey = function(headerObj) {
103 | return headerObj['X-Sumo-Name'] + ':' + headerObj['X-Sumo-Category'] + ':' + headerObj['X-Sumo-Host'];
104 | };
105 |
106 |
107 | SumoLogsClient.prototype.createBuckets = function(config, records, awslogsData, isRaw) {
108 | var self = this;
109 | var messageList = {};
110 | // Chunk records before posting to SumoLogic
111 | records.forEach(function (log, idx, arr) {
112 | var headerObj = self.generateHeaders(config, log.message, awslogsData);
113 | var metadataKey = self.getMetaDataKey(headerObj);
114 | var message = isRaw ? log.message : log;
115 | if (metadataKey in messageList) {
116 | messageList[metadataKey].push(message);
117 | } else {
118 | messageList[metadataKey] = [message];
119 | }
120 | });
121 | return messageList;
122 | };
123 |
124 | module.exports = {
125 | SumoLogsClient: SumoLogsClient
126 | };
127 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/Readme.md:
--------------------------------------------------------------------------------
1 | # SumoLogic Lambda Function for AWS CloudWatch Logs With Dead Letter Queue Support
2 |
3 | This is used for collecting Amazon CloudWatch Logs.It provides two lambda functions
4 |
5 | * SumoCWLogsLambda: It’s a lambda function responsible for sending data to Sumo logic HTTP endpoint.It is configured with dead letter queue(SumoCWDeadLetterQueue) which gets the messages which can’t be processed successfully. Also you can subscribe other logs to this function except its own log group.
6 | * SumoCWProcessDLQLambda: It’s a lambda function responsible for reading messages from dead letter queue and retries sending messages.It gets triggered periodically by AWS CloudWatch Events using schedule rule(SumoCWProcessDLQScheduleRule).
7 |
8 | It also configured with CloudWatch Alarm which triggers when number of messages in DeadLetterQueue exceeds threshold defined in cloudformation template.
9 |
10 | ### Creating Stack in AWS Cloudformation
11 | you can create the stack by using [aws-cli](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-cli-creating-stack.html) or directly from aws console using webbrowser and uploading DLQLambdaCloudFormation.json. For more details checkout it's [documentation](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Amazon-CloudWatch-Logs)
12 |
13 | ### Configuring Lambda
14 |
15 | The following AWS Lambda environment variables are supported in both the lambda functions.Please note that both the functions should have same values configured to avoid inconsistencies.
16 |
17 | * SUMO_ENDPOINT (REQUIRED) - SumoLogic HTTP Collector endpoint URL.
18 | * ENCODING (OPTIONAL) - Encoding to use when decoding CloudWatch log events. Default is 'utf-8'.
19 | * SOURCE_CATEGORY_OVERRIDE (OPTIONAL) - Override _sourceCategory metadata field within SumoLogic.
20 | * SOURCE_HOST_OVERRIDE (OPTIONAL) - Override _sourceHost metadata field within SumoLogic.
21 | * SOURCE_NAME_OVERRIDE (OPTIONAL) - Override _sourceName metadata field within SumoLogic.
22 | * INCLUDE_LOG_INFO (OPTIONAL) - Set it to true when loggroup/logstream values needs to be included in logs. Default is false
23 | * LOG_FORMAT - Default is Others. One can choose VPC-JSON for VPC flow logs in json format and VPC-RAW for only RAW message line
24 |
25 | ### Configuring Lambda for VPC Flow Logs
26 | The following AWS Lambda environment variables are supported in both the lambda functions for VPC flow logs.
27 |
28 | * INCLUDE_SECURITY_GROUP_INFO (OPTIONAL) - Set it to true when security-group-ids/direction values needs to be included in logs.The lambda function fetches list of Elastic Network Interfaces using describeNetworkInterfaces api.
29 | One needs to provide permission to lambda by adding the following inline policy in SumoCWProcessDLQLambda Role.
30 | Refer this [doc](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-remove-policies-console) for adding inline policy for a role. Choose [Creating Policies on the JSON Tab](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-json-editor) method and paste the below json after adding the arn of both the lambda functions.
31 | ```
32 | {
33 | "Version": "2012-10-17",
34 | "Statement": [
35 | {
36 | "Effect": "Allow",
37 | "Action": "ec2:DescribeNetworkInterfaces",
38 | "Resource": [
39 |
40 | ]
41 | }
42 | ]
43 | }
44 | ```
45 | This will add two extra keys(security-group-ids and direction) in logs. This option works only when LOG_FORMAT is set to VPC-JSON.
46 | * VPC_CIDR_PREFIX (OPTIONAL) - Comma separated list of ip prefixes for filtering out internal traffic. For Ex: lets say we have `vpcCIDRprefix= 10.8.0.0,10.9.0.0` then it will filter out logs whose destinationIP and sourceIP matches any of the two prefixes 10.8.0.0 and 10.9.0.0. This option works only when LOG_FORMAT is set to VPC-JSON or VPC-RAW.
47 |
48 |
49 | SumoCWProcessDLQLambda supports one extra environment variable
50 | * NUM_OF_WORKERS(REQUIRED): It’s default value is 4. It controls the number of instances of SumoCWProcessDLQLambda to spawn if there is no failure in first attempt.It helps in faster processing of pending messages in dead letter queue.
51 |
52 | # Dynamic Metadata Fields
53 |
54 | The lambda supports dynamically overriding the _sourceName, _sourceHost and _sourceCategory per log message by setting `_sumo_metadata` within a json log.
55 |
56 | This can be useful when writing to CloudWatch Logs via a lambda function.
57 |
58 | For example:
59 |
60 | ```
61 | exports.handler = (event, context, callback) => {
62 |
63 | var serverIp = '123.123.123.123'
64 |
65 | console.log(JSON.stringify({
66 | 'message': 'something happened..',
67 | '_sumo_metadata': {
68 | 'category': 'prod/appa/console',
69 | 'sourceName': 'other_source',
70 | 'sourceHost': serverIp
71 | }
72 |
73 | }));
74 | console.log('some other log message with default sourceCategory');
75 | };
76 |
77 | ```
78 |
79 | ### For Developers
80 |
81 | Installing Dependencies
82 | ```
83 | npm install
84 | ```
85 |
86 | Building zip file
87 | ```
88 | npm run build
89 | ```
90 | Upload the generated cloudwatchlogs-with-dlq.zip in S3 bucket(don't forget to change bucket name and key in cloudformation template)
91 |
92 | Running the test cases
93 |
94 | ```
95 | python test_cwl_lambda.py
96 | ```
97 | Run the above command after building the zip file
98 |
--------------------------------------------------------------------------------
/cloudwatchevents/guarddutybenchmark/packaged_v2.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'This function is invoked by AWS CloudWatch events in response to state
3 | change in your AWS resources which matches a event target definition. The event
4 | payload received is then forwarded to Sumo Logic HTTP source endpoint.
5 |
6 | '
7 | Globals:
8 | Function:
9 | Timeout: 300
10 | Metadata:
11 | AWS::CloudFormation::Interface:
12 | ParameterGroups:
13 | - Label:
14 | default: Sumo Logic Deployment Configuration
15 | Parameters:
16 | - SumoDeployment
17 | - SumoAccessID
18 | - SumoAccessKey
19 | - Label:
20 | default: Collection Configuration
21 | Parameters:
22 | - CollectorName
23 | - SourceName
24 | - SourceCategoryName
25 | - RemoveSumoResourcesOnDeleteStack
26 | ParameterLabels:
27 | CollectorName:
28 | default: Collector Name
29 | RemoveSumoResourcesOnDeleteStack:
30 | default: Remove Sumo Resources On Delete Stack
31 | SourceCategoryName:
32 | default: Source Category Name
33 | SourceName:
34 | default: Source Name
35 | SumoAccessID:
36 | default: Access ID
37 | SumoAccessKey:
38 | default: Access Key
39 | SumoDeployment:
40 | default: Deployment Name
41 | Outputs:
42 | CloudWatchEventFunction:
43 | Description: CloudWatchEvent Processor Function ARN
44 | Value:
45 | Fn::GetAtt:
46 | - CloudWatchEventFunction
47 | - Arn
48 | GuarddutyBenchmarkAppFolder:
49 | Description: Folder Name
50 | Value:
51 | Fn::GetAtt:
52 | - SumoGuardDutyBenchmarkApp
53 | - APP_FOLDER_NAME
54 | Parameters:
55 | CollectorName:
56 | Default: GuarddutyCollector
57 | Type: String
58 | RemoveSumoResourcesOnDeleteStack:
59 | AllowedValues:
60 | - true
61 | - false
62 | Default: false
63 | Description: To delete collector, sources and app when stack is deleted, set this
64 | parameter to true. Default is false.
65 | Type: String
66 | SourceCategoryName:
67 | Default: Labs/AWS/Guardduty
68 | Type: String
69 | SourceName:
70 | Default: GuarddutyEvents
71 | Type: String
72 | SumoAccessID:
73 | Type: String
74 | SumoAccessKey:
75 | Type: String
76 | SumoDeployment:
77 | AllowedValues:
78 | - au
79 | - ca
80 | - de
81 | - eu
82 | - jp
83 | - us2
84 | - us1
85 | Description: Enter au, ca, de, eu, jp, us2, or us1
86 | Type: String
87 | Resources:
88 | CloudWatchEventFunction:
89 | Properties:
90 | CodeUri: s3://appdevstore/e62e525a25bb080e521d8bf64909ea41
91 | Environment:
92 | Variables:
93 | SUMO_ENDPOINT:
94 | Fn::GetAtt:
95 | - SumoHTTPSource
96 | - SUMO_ENDPOINT
97 | Events:
98 | CloudWatchEventTrigger:
99 | Properties:
100 | Pattern:
101 | source:
102 | - aws.guardduty
103 | Type: CloudWatchEvent
104 | Handler: cloudwatchevents.handler
105 | Runtime: nodejs8.10
106 | Type: AWS::Serverless::Function
107 | SumoAppUtils:
108 | Properties:
109 | Location:
110 | ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
111 | SemanticVersion: 1.0.5
112 | Type: AWS::Serverless::Application
113 | SumoGuardDutyBenchmarkApp:
114 | Properties:
115 | AppName: Amazon GuardDuty Benchmark
116 | AppSources:
117 | gdbenchmark:
118 | Fn::Sub: _sourceCategory=${SourceCategoryName}
119 | Region:
120 | Ref: AWS::Region
121 | RemoveOnDeleteStack:
122 | Ref: RemoveSumoResourcesOnDeleteStack
123 | ServiceToken:
124 | Fn::GetAtt:
125 | - SumoAppUtils
126 | - Outputs.SumoAppUtilsFunction
127 | SumoAccessID:
128 | Ref: SumoAccessID
129 | SumoAccessKey:
130 | Ref: SumoAccessKey
131 | SumoDeployment:
132 | Ref: SumoDeployment
133 | Type: Custom::App
134 | SumoHTTPSource:
135 | Properties:
136 | CollectorId:
137 | Fn::GetAtt:
138 | - SumoHostedCollector
139 | - COLLECTOR_ID
140 | DateFormat: yyyy-MM-dd'T'HH:mm:ss.SSS'Z'
141 | DateLocatorRegex: .*"updatedAt":"(.*)".*
142 | Region:
143 | Ref: AWS::Region
144 | RemoveOnDeleteStack:
145 | Ref: RemoveSumoResourcesOnDeleteStack
146 | ServiceToken:
147 | Fn::GetAtt:
148 | - SumoAppUtils
149 | - Outputs.SumoAppUtilsFunction
150 | SourceCategory:
151 | Ref: SourceCategoryName
152 | SourceName:
153 | Ref: SourceName
154 | SumoAccessID:
155 | Ref: SumoAccessID
156 | SumoAccessKey:
157 | Ref: SumoAccessKey
158 | SumoDeployment:
159 | Ref: SumoDeployment
160 | Type: Custom::HTTPSource
161 | SumoHostedCollector:
162 | Properties:
163 | CollectorName:
164 | Ref: CollectorName
165 | CollectorType: Hosted
166 | Region:
167 | Ref: AWS::Region
168 | RemoveOnDeleteStack:
169 | Ref: RemoveSumoResourcesOnDeleteStack
170 | ServiceToken:
171 | Fn::GetAtt:
172 | - SumoAppUtils
173 | - Outputs.SumoAppUtilsFunction
174 | SumoAccessID:
175 | Ref: SumoAccessID
176 | SumoAccessKey:
177 | Ref: SumoAccessKey
178 | SumoDeployment:
179 | Ref: SumoDeployment
180 | Type: Custom::Collector
181 | Transform: AWS::Serverless-2016-10-31
182 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Sumo Logic AWS Lambda Functions [](https://travis-ci.org/SumoLogic/sumologic-aws-lambda)
2 | ==============================
3 |
4 | ## What does AWS Lambda do? ##
5 | AWS Lambda is a compute service that allows users to run code, in response to events, without having to provision and manage servers. A Lambda Function can be triggered automatically from other Amazon services, or from a web or mobile application. For more information, please visit the [AWS Lambda site](https://aws.amazon.com/lambda/).
6 |
7 | ## What do Sumo Logic Lambda Functions do? ##
8 | Sumo Logic Lambda Functions are designed to collect and process data from a variety of sources and pass it onto the Sumo Logic platform. Here, the data can be stored, aggregated, searched, and visualized for a variety of insightful use cases.
9 |
10 | ## What are the different Sumo Logic Lambda Functions available? ##
11 | We put the Lambda functions to read from a particular AWS service (e.g CloudWatch Logs and S3) under each specific folder. Each folder may then have its own instructions to setup the functions.
12 |
13 | ## Collection Solutions
14 | | FunctionName | Description | Collection Use Cases | Setup Documentation
15 | | -------------| ----------- | -------------- | ------------------- |
16 | |[SumoLogic Lambda Function for AWS CloudWatch Logs With Dead Letter Queue Support](cloudwatchlogs-with-dlq)| This project comes with Cloudformation template and two lambda functions which sends CloudWatch logs to Sumo Logic HTTP source endpoint.The first function(invoked by CloudWatch) is configured with DLQ and the second function(invoked periodically by CloudWatch Events) reads from DLQ.| [AWS Lambda ULM App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/AWS_Lambda_ULM/Collect_Logs_and_Metrics_for_AWS_Lambda_ULM) | [Docs](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Amazon-CloudWatch-Logs)|
17 | |[SumoLogic Function for AWS CloudWatch Events](cloudwatchevents) | This function is invoked by AWS CloudWatch events in response to state change in your AWS resources which matches a event target definition. The event payload received is then forwarded to Sumo Logic HTTP source endpoint. | [AWS GuardDuty App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-GuardDuty/Collect-Amazon-GuardDuty-Log-Files) | [Docs](cloudwatchevents/README.md) |
18 | |[SumoLogic Function for Amazon Inspector](inspector) | This function subscribes to a SNS topic where Amazon Inspector publishes its findings.It receives the message payload as an input parameter, transforms it and sends it to Sumo Logic HTTP source endpoint| [Amazon Inspector](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-Inspector-App/) | [Docs](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-Inspector-App/01-Collect-Data-for-Amazon-Inspector) |
19 | |[Kinesis to Sumo Logic](kinesis)| This function is invoked by AWS Lambda after it detects new records in Kinesis stream. The received collection of events are decompressed, transformed and send to Sumo Logic HTTP source endpoint | | [Docs](kinesis/README.md#lambda-configuration) |
20 | |[SumoLogic Lambda Function for AWS CloudWatch Logs](cloudwatchlogs)| This function subscribes to CloudWatch Log Group and is invoked by AWS CloudWatch with log messages as payload. The records received are decompressed, transformed and forwarded to Sumo Logic HTTP source endpoint in chunks.While the function is more simple then the DLQ-based solution above, it doesn't handle failures and retries properly, thus not recommended. | Not Recommended | [Docs](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Create-an-Amazon-Lambda-Function) |
21 | | [S3](s3) AND
[Cloudtrail S3 to Sumo Logic](cloudtrail_s3)| This function receives S3 notifications on new files uploaded to the source S3 bucket, then reads these files, unzips them, and breakdown the records before finally sending to HTTP hosted collector endpoint. | DEPRECATED | [Docs](s3/README.md#lambda-setup)
[Docs](cloudtrail_s3#lambda-setup)|
22 |
23 | ## Helper Functions
24 |
25 | | FunctionName | Description | Setup Documentation
26 | | -------------| ----------- | ------------------- |
27 | |[Kinesis Firehose Processor](kinesisfirehose-processor)|This function is used for transforming streaming data from kinesis firehose before it sents to destination. | [Docs](kinesisfirehose-processor#setting-up-the-lambda-function) |
28 | |[LogGroup Lambda Connector](loggroup-lambda-connector) | This function is used to automatically subscribe newly created and existing Cloudwatch LogGroups to a Lambda function. | [Docs](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Auto-Subscribe_AWS_Log_Groups_to_a_Lambda_Function) |
29 |
30 |
31 | Supported Runtimes
32 | ======================
33 |
34 | * All the nodejs functions are tested with nodejs runtime 4.3 and 8.10.
35 |
36 | * All the python functions are tested with python version 2.7.
37 |
38 | Testing with TravisCI
39 | ======================
40 |
41 | * All the test are currently in python and travis.yml is configured to run any file with prefix "test_" present in lambda function's folder.
42 |
43 | * All the dependencies(defined in package.json) of lambda function are installed first and then build is created.
44 |
45 | * For adding test for new function you need to specify FUNCTION_DIR(lambda function's folder) and node_js(node js version) under jobs field in travis.yml. This is done because currently testing same function in parallel with different node versions throws function resource exists error (name collision) and therefore are run sequentially.
46 |
47 |
48 | ### TLS 1.2 Requirement
49 |
50 | Sumo Logic only accepts connections from clients using TLS version 1.2 or greater. To utilize the content of this repo, ensure that it's running in an execution environment that is configured to use TLS 1.2 or greater.
51 |
--------------------------------------------------------------------------------
/kinesisfirehose-processor/test-kinesisfirehose-lambda-cft.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Resources": {
4 | "SumoKFDeliveryStream": {
5 | "Type" : "AWS::KinesisFirehose::DeliveryStream",
6 | "Properties" : {
7 | "DeliveryStreamName": { "Fn::Join": [ "-", [ "SumoKFDeliveryStream", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
8 | "DeliveryStreamType" : "DirectPut",
9 | "ExtendedS3DestinationConfiguration": {
10 | "BucketARN": {"Fn::Join": ["", ["arn:aws:s3:::", {"Ref":"SumoKFS3bucket"}]]},
11 | "BufferingHints": {
12 | "IntervalInSeconds": "300",
13 | "SizeInMBs": "5"
14 | },
15 | "CompressionFormat": "UNCOMPRESSED",
16 | "Prefix": "firehose/",
17 | "RoleARN": {"Fn::GetAtt" : ["SumoKFDeliveryRole", "Arn"] },
18 | "ProcessingConfiguration" : {
19 | "Enabled": "true",
20 | "Processors": [{
21 | "Parameters": [
22 | {
23 | "ParameterName": "LambdaArn",
24 | "ParameterValue": { "Fn::GetAtt": [ "SumoKFLambdaResources", "Outputs.SumoKFLambdaProcessorArn" ]}
25 | },
26 | {
27 | "ParameterName": "NumberOfRetries",
28 | "ParameterValue": "3"
29 | },
30 | {
31 | "ParameterName": "BufferSizeInMBs",
32 | "ParameterValue": "3"
33 | },
34 | {
35 | "ParameterName": "BufferIntervalInSeconds",
36 | "ParameterValue": "60"
37 | },
38 | ],
39 | "Type": "Lambda"
40 | }]
41 | },
42 | "CloudWatchLoggingOptions": {
43 | "Enabled" : false
44 | },
45 | "EncryptionConfiguration": {
46 | "NoEncryptionConfig": "NoEncryption"
47 | },
48 | "S3BackupMode": "Disabled"
49 | }
50 | }
51 | },
52 | "SumoKFS3bucket": {
53 | "Type": "AWS::S3::Bucket",
54 | "Properties": {
55 | "VersioningConfiguration": {
56 | "Status": "Enabled"
57 | }
58 | }
59 | },
60 | "SumoKFDeliveryRole": {
61 | "Type": "AWS::IAM::Role",
62 | "Properties": {
63 | "AssumeRolePolicyDocument": {
64 | "Version": "2012-10-17",
65 | "Statement": [{
66 | "Sid": "",
67 | "Effect": "Allow",
68 | "Principal": {
69 | "Service": "firehose.amazonaws.com"
70 | },
71 | "Action": "sts:AssumeRole",
72 | "Condition": {
73 | "StringEquals": {
74 | "sts:ExternalId": {"Ref":"AWS::AccountId"}
75 | }
76 | }
77 | }]
78 | }
79 | }
80 | },
81 | "SumoKFDeliveryPolicy": {
82 | "Type": "AWS::IAM::Policy",
83 | "Properties": {
84 | "PolicyName": { "Fn::Join": [ "-", [ "firehose_delivery_policy", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
85 | "PolicyDocument": {
86 | "Version": "2012-10-17",
87 | "Statement": [
88 | {
89 | "Effect": "Allow",
90 | "Action": [
91 | "s3:AbortMultipartUpload",
92 | "s3:GetBucketLocation",
93 | "s3:GetObject",
94 | "s3:ListBucket",
95 | "s3:ListBucketMultipartUploads",
96 | "s3:PutObject"
97 | ],
98 | "Resource": [
99 | {"Fn::Join": ["", ["arn:aws:s3:::", {"Ref":"SumoKFS3bucket"}]]},
100 | {"Fn::Join": ["", ["arn:aws:s3:::", {"Ref":"SumoKFS3bucket"}, "*"]]}
101 | ]
102 | },
103 | {
104 | "Sid": "",
105 | "Effect": "Allow",
106 | "Action": [
107 | "lambda:InvokeFunction",
108 | "lambda:GetFunctionConfiguration"
109 | ],
110 | "Resource": { "Fn::GetAtt": [ "SumoKFLambdaResources", "Outputs.SumoKFLambdaProcessorArn" ]}
111 | }
112 | ]
113 | },
114 | "Roles": [{"Ref": "SumoKFDeliveryRole"}]
115 | }
116 | },
117 | "SumoKFLambdaResources" : {
118 | "Type" : "AWS::CloudFormation::Stack",
119 | "Properties" : {
120 | "TemplateURL" : "https://s3.amazonaws.com/appdev-cloudformation-templates/kinesisfirehose-lambda-cft.json",
121 | "TimeoutInMinutes" : "10"
122 | }
123 | }
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/cloudwatchevents/guardduty/cloudwatchevents.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "AWSTemplateFormatVersion": "2010-09-09",
4 | "Outputs": {
5 | "CloudWatchEventFunction": {
6 | "Description": "CloudWatchEvent Processor Function ARN",
7 | "Value": {
8 | "Fn::GetAtt": [
9 | "CloudWatchEventFunction",
10 | "Arn"
11 | ]
12 | },
13 | "Export" : {
14 | "Name" : { "Fn::Join": [ "-", [ "CloudWatchEventFunctionArn", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] }
15 | }
16 | }
17 | },
18 | "Mappings" : {
19 | "RegionMap" : {
20 | "us-east-1": {"bucketname": "appdevzipfiles-us-east-1"},
21 | "us-east-2": {"bucketname": "appdevzipfiles-us-east-2"},
22 | "us-west-1": {"bucketname": "appdevzipfiles-us-west-1"},
23 | "us-west-2": {"bucketname": "appdevzipfiles-us-west-2"},
24 | "ap-south-1": {"bucketname": "appdevzipfiles-ap-south-1"},
25 | "ap-northeast-2": {"bucketname": "appdevzipfiles-ap-northeast-2"},
26 | "ap-southeast-1": {"bucketname": "appdevzipfiles-ap-southeast-1"},
27 | "ap-southeast-2": {"bucketname": "appdevzipfiles-ap-southeast-2"},
28 | "ap-northeast-1": {"bucketname": "appdevzipfiles-ap-northeast-1"},
29 | "ca-central-1": {"bucketname": "appdevzipfiles-ca-central-1"},
30 | "eu-central-1": {"bucketname": "appdevzipfiles-eu-central-1"},
31 | "eu-west-1": {"bucketname": "appdevzipfiles-eu-west-1"},
32 | "eu-west-2": {"bucketname": "appdevzipfiles-eu-west-2"},
33 | "eu-west-3": {"bucketname": "appdevzipfiles-eu-west-3"},
34 | "eu-north-1": {"bucketname": "appdevzipfiles-eu-north-1s"},
35 | "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"}
36 | }
37 | },
38 | "Resources": {
39 | "CloudWatchEventFunctionCloudWatchEventTrigger": {
40 | "Type": "AWS::Events::Rule",
41 | "Properties": {
42 | "EventPattern": {
43 | "source": [
44 | "aws.guardduty"
45 | ]
46 | },
47 | "Targets": [
48 | {
49 | "Id": "CloudWatchEventFunctionCloudWatchEventTriggerLambdaTarget",
50 | "Arn": {
51 | "Fn::GetAtt": [
52 | "CloudWatchEventFunction",
53 | "Arn"
54 | ]
55 | }
56 | }
57 | ]
58 | }
59 | },
60 | "CloudWatchEventFunction": {
61 | "Type": "AWS::Lambda::Function",
62 | "Properties": {
63 | "Code": {
64 | "S3Bucket": { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
65 | "S3Key": "guardduty.zip"
66 | },
67 | "Tags": [
68 | {
69 | "Value": "SAM",
70 | "Key": "lambda:createdBy"
71 | }
72 | ],
73 | "Environment": {
74 | "Variables": {
75 | "SUMO_ENDPOINT": {
76 | "Ref": "SumoEndpointUrl"
77 | }
78 | }
79 | },
80 | "Handler": "cloudwatchevents.handler",
81 | "Role": {
82 | "Fn::GetAtt": [
83 | "CloudWatchEventFunctionRole",
84 | "Arn"
85 | ]
86 | },
87 | "Timeout": 300,
88 | "Runtime": "nodejs8.10"
89 | }
90 | },
91 | "CloudWatchEventFunctionCloudWatchEventTriggerPermission": {
92 | "Type": "AWS::Lambda::Permission",
93 | "Properties": {
94 | "Action": "lambda:invokeFunction",
95 | "Principal": "events.amazonaws.com",
96 | "FunctionName": {
97 | "Ref": "CloudWatchEventFunction"
98 | },
99 | "SourceArn": {
100 | "Fn::GetAtt": [
101 | "CloudWatchEventFunctionCloudWatchEventTrigger",
102 | "Arn"
103 | ]
104 | }
105 | }
106 | },
107 | "CloudWatchEventFunctionRole": {
108 | "Type": "AWS::IAM::Role",
109 | "Properties": {
110 | "ManagedPolicyArns": [
111 | "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
112 | ],
113 | "AssumeRolePolicyDocument": {
114 | "Version": "2012-10-17",
115 | "Statement": [
116 | {
117 | "Action": [
118 | "sts:AssumeRole"
119 | ],
120 | "Effect": "Allow",
121 | "Principal": {
122 | "Service": [
123 | "lambda.amazonaws.com"
124 | ]
125 | }
126 | }
127 | ]
128 | }
129 | }
130 | }
131 | },
132 | "Description": "This function is invoked by AWS CloudWatch events in response to state change in your AWS resources which matches a event target definition. The event payload received is then forwarded to Sumo Logic HTTP source endpoint.\n",
133 | "Parameters": {
134 | "SumoEndpointUrl": {
135 | "Type": "String"
136 | }
137 | }
138 |
139 | }
140 |
--------------------------------------------------------------------------------
/cloudwatchevents/src/cloudwatchevents.js:
--------------------------------------------------------------------------------
1 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 | // CloudWatch Events to SumoLogic //
3 | // https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents //
4 | // //
5 | // YOU MUST CREATE A SUMO LOGIC ENDPOINT CALLED SUMO_ENDPOINT AND PASTE IN ENVIRONMENTAL VARIABLES BELOW //
6 | // https://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source //
7 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
8 |
9 | // SumoLogic Endpoint to post logs
10 | var SumoURL = process.env.SUMO_ENDPOINT;
11 |
12 | // For some beta AWS services, the default is to remove the outer fields of the received object since they are not useful.
13 | // change this if necessary.
14 | var removeOuterFields = false;
15 |
16 | // The following parameters override the sourceCategoryOverride, sourceHostOverride and sourceNameOverride metadata fields within SumoLogic.
17 | // Not these can also be overridden via json within the message payload. See the README for more information.
18 | var sourceCategoryOverride = process.env.SOURCE_CATEGORY_OVERRIDE || ''; // If empty sourceCategoryOverride will not be overridden
19 | var sourceHostOverride = process.env.SOURCE_HOST_OVERRIDE || ''; // If empty sourceHostOverride will not be set to the name of the logGroup
20 | var sourceNameOverride = process.env.SOURCE_NAME_OVERRIDE || ''; // If empty sourceNameOverride will not be set to the name of the logStream
21 |
22 | var retryInterval = process.env.RETRY_INTERVAL || 5000; // the interval in millisecs between retries
23 | var numOfRetries = process.env.NUMBER_OF_RETRIES || 3; // the number of retries
24 |
25 | var https = require('https');
26 | var zlib = require('zlib');
27 | var url = require('url');
28 |
29 | Promise.retryMax = function(fn,retry,interval,fnParams) {
30 | return fn.apply(this,fnParams).catch( err => {
31 | var waitTime = typeof interval === 'function' ? interval() : interval;
32 | console.log("Retries left " + (retry-1) + " delay(in ms) " + waitTime);
33 | return (retry>1? Promise.wait(waitTime).then(()=> Promise.retryMax(fn,retry-1,interval, fnParams)):Promise.reject(err));
34 | });
35 | }
36 |
37 | Promise.wait = function(delay) {
38 | return new Promise((fulfill,reject)=> {
39 | //console.log(Date.now());
40 | setTimeout(fulfill,delay||0);
41 | });
42 | };
43 |
44 | function exponentialBackoff(seed) {
45 | var count = 0;
46 | return function() {
47 | count++;
48 | return count*seed;
49 | }
50 | }
51 |
52 | function postToSumo(callback, messages) {
53 | var messagesTotal = Object.keys(messages).length;
54 | var messagesSent = 0;
55 | var messageErrors = [];
56 |
57 | var urlObject = url.parse(SumoURL);
58 | var options = {
59 | 'hostname': urlObject.hostname,
60 | 'path': urlObject.pathname,
61 | 'method': 'POST'
62 | };
63 |
64 | var finalizeContext = function () {
65 | var total = messagesSent + messageErrors.length;
66 | if (total == messagesTotal) {
67 | console.log('messagesSent: ' + messagesSent + ' messagesErrors: ' + messageErrors.length);
68 | if (messageErrors.length > 0) {
69 | callback('errors: ' + messageErrors);
70 | } else {
71 | callback(null, "Success");
72 | }
73 | }
74 | };
75 |
76 | function httpSend(options, headers, data) {
77 | return new Promise( (resolve,reject) => {
78 | var curOptions = options;
79 | curOptions.headers = headers;
80 | var req = https.request(curOptions, function (res) {
81 | var body = '';
82 | res.setEncoding('utf8');
83 | res.on('data', function (chunk) {
84 | body += chunk; // don't really do anything with body
85 | });
86 | res.on('end', function () {
87 | if (res.statusCode == 200) {
88 | resolve(body);
89 | } else {
90 | reject({'error':'HTTP Return code ' + res.statusCode,'res':res});
91 | }
92 | });
93 | });
94 | req.on('error', function (e) {
95 | reject({'error':e,'res':null});
96 | });
97 | for (var i = 0; i < data.length; i++) {
98 | req.write(JSON.stringify(data[i]) + '\n');
99 | }
100 | console.log("sending to Sumo...")
101 | req.end();
102 | });
103 | }
104 | Object.keys(messages).forEach(function (key, index) {
105 | var headerArray = key.split(':');
106 | var headers = {
107 | 'X-Sumo-Name': headerArray[0],
108 | 'X-Sumo-Category': headerArray[1],
109 | 'X-Sumo-Host': headerArray[2],
110 | 'X-Sumo-Client': 'cloudwatchevents-aws-lambda'
111 | };
112 | Promise.retryMax(httpSend, numOfRetries, retryInterval, [options, headers, messages[key]]).then((body)=> {
113 | messagesSent++;
114 | finalizeContext()
115 | }).catch((e) => {
116 | messageErrors.push(e.error);
117 | finalizeContext();
118 | });
119 | });
120 | }
121 |
122 | exports.handler = function (event, context, callback) {
123 |
124 | // Used to hold chunks of messages to post to SumoLogic
125 | var messageList = {};
126 | var final_event;
127 | // Validate URL has been set
128 | var urlObject = url.parse(SumoURL);
129 | if (urlObject.protocol != 'https:' || urlObject.host === null || urlObject.path === null) {
130 | callback('Invalid SUMO_ENDPOINT environment variable: ' + SumoURL);
131 | }
132 |
133 | //console.log(event);
134 | if ((event.source==="aws.guardduty") || (removeOuterFields)) {
135 | final_event =event.detail;
136 | } else {
137 | final_event = event;
138 | }
139 | messageList[sourceNameOverride+':'+sourceCategoryOverride+':'+sourceHostOverride]=[final_event];
140 | postToSumo(callback, messageList);
141 | };
142 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/vpcutils.js:
--------------------------------------------------------------------------------
1 | var find = require('lodash.find');
2 | var EC2 = require('aws-sdk/clients/ec2');
3 | var jmespath = require('jmespath');
4 | var ec2 = null;
5 | /*
6 | VPC Log Format
7 | version The VPC Flow Logs version.
8 | account-id The AWS account ID for the flow log.
9 | interface-id The ID of the network interface for which the traffic is recorded.
10 | srcaddr The source IPv4 or IPv6 address.
11 | dstaddr The destination IPv4 or IPv6 address.
12 | srcport The source port of the traffic.
13 | dstport The destination port of the traffic.
14 | protocol The IANA protocol number of the traffic. For more information, see Assigned Internet Protocol Numbers.
15 | packets The number of packets transferred during the capture window.
16 | bytes The number of bytes transferred during the capture window.
17 | start The time, in Unix seconds, of the start of the capture window.
18 | end The time, in Unix seconds, of the end of the capture window.
19 | action The action associated with the traffic:
20 | ACCEPT: The recorded traffic was permitted by the security groups or network ACLs.
21 | REJECT: The recorded traffic was not permitted by the security groups or network ACLs.
22 | log-status The logging status of the flow log:
23 | OK: Data is logging normally to the chosen destinations.
24 | NODATA: There was no network traffic to or from the network interface during the capture window.
25 | SKIPDATA: Some flow log records were skipped during the capture window. This may be because of an internal capacity constraint, or an internal error.
26 | */
27 |
28 | function discardInternalTraffic(vpcCIDRPrefix, records) {
29 | if (!vpcCIDRPrefix) {
30 | return records;
31 | }
32 | var filteredRecords = [];
33 | records.forEach(function (log) {
34 | var vpcMessage = log.message.split(" ");
35 | var srcaddr = vpcMessage[3];
36 | var dstaddr = vpcMessage[4];
37 | var vpcCIDRPrefixes = vpcCIDRPrefix.split(",").map((x) => x.trim()).filter((x) => x);
38 | var isSrcIPinternal = vpcCIDRPrefixes.reduce((r, v) => r || srcaddr.startsWith(v), false);
39 | var isDstIPinternal = vpcCIDRPrefixes.reduce((r, v) => r || dstaddr.startsWith(v), false);
40 | if (!(isSrcIPinternal && isDstIPinternal)) {
41 | filteredRecords.push(log);
42 | }
43 | });
44 | return filteredRecords;
45 | }
46 |
47 |
48 | /**
49 | * Describes the Network Interfaces associated with this account.
50 | *
51 | * @return `Promise` for async processing
52 | */
53 | function listNetworkInterfaces(allIPaddresses) {
54 | if (!ec2) {
55 | ec2 = new EC2({region: process.env.AWS_REGION});
56 | }
57 | var params = {
58 | Filters: [
59 | {
60 | Name: 'private-ip-address',
61 | Values: allIPaddresses
62 | }
63 | ]
64 | }
65 | return ec2.describeNetworkInterfaces(params).promise();
66 | }
67 |
68 | /**
69 | * Builds a listing of Elastic Network Interfaces (ENI) associated with this account and
70 | * returns an Object representing that ENI, specifically its unique identifier, associated
71 | * security groups, and primary private IP address.
72 | *
73 | * Per AWS documentation, we only capture the primary, private IPv4 address of the ENI:
74 | *
75 | * - If your network interface has multiple IPv4 addresses and traffic is sent to a secondary private IPv4
76 | * address, the flow log displays the primary private IPv4 address in the destination IP address field.
77 | * - In the case of both `srcaddr` and `dstaddr` in VPC Flow Logs: the IPv4 address of the network interface
78 | * is always its private IPv4 address.
79 | *
80 | * @see http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html
81 | *
82 | * Returns structure like:
83 | * [
84 | * { interfaceId: 'eni-c1a7da8c',
85 | * securityGroupIds: [ 'sg-b2b454d4' ],
86 | * ipAddress: '10.0.1.24' },
87 | * { interfaceId: 'eni-03cbb94e',
88 | * securityGroupIds: [ 'sg-a3b252c5' ]
89 | * ipAddress: '10.0.2.33'}
90 | * ...
91 | * ]
92 | */
93 | function buildEniToSecurityGroupMapping(allIPaddresses) {
94 | //console.log(allIPaddresses.length + " ip addresses found in logs");
95 | return listNetworkInterfaces(allIPaddresses).then(function (interfaces) {
96 | console.log(interfaces["NetworkInterfaces"].length + " Interfaces Fetched");
97 | return jmespath.search(interfaces,
98 | `NetworkInterfaces[].{
99 | interfaceId: NetworkInterfaceId,
100 | securityGroupIds: Groups[].GroupId,
101 | ipAddress: PrivateIpAddresses[?Primary].PrivateIpAddress,
102 | subnetId: SubnetId,
103 | vpcId: VpcId
104 | }`);
105 | });
106 | }
107 | //filter on interfaceID
108 | function includeSecurityGroupIds(records) {
109 | var allIPaddresses = [];
110 | records.forEach(function(log) {
111 | var vpcMessage = log.message.split(" ");
112 | allIPaddresses.push(vpcMessage[3]);
113 | allIPaddresses.push(vpcMessage[4]);
114 | });
115 | allIPaddresses = Array.from(new Set(allIPaddresses));
116 | return buildEniToSecurityGroupMapping(allIPaddresses).then(function (mapping) {
117 | records.forEach(function (log) {
118 | var vpcMessage = log.message.split(" ");
119 | var eniData = find(mapping, {'interfaceId': vpcMessage[2]});
120 | if (eniData && eniData.ipAddress.length > 0) {
121 | log['security-group-ids'] = eniData.securityGroupIds;
122 | if (vpcMessage[4] === eniData.ipAddress[0]) {
123 | // destination matches eni's privateIP
124 | var srcEniData = find(mapping, {'ipAddress': vpcMessage[3]});
125 | log['direction'] = (srcEniData && (srcEniData.subnetId == eniData.subnetId) ? "internal" : "inbound");
126 | } else {
127 | // sources matches eni's privateIP
128 | var destEniData = find(mapping, {'ipAddress': vpcMessage[4]});
129 | log['direction'] = (destEniData && (destEniData.subnetId == eniData.subnetId) ? "internal" : "outbound");
130 | }
131 | log['subnet-id'] = eniData.subnetId;
132 | log['vpc-id'] = eniData.vpcId;
133 | log['aws-region'] = process.env.AWS_REGION;
134 | } else {
135 | console.log(`No ENI data found for interface ${vpcMessage[2]}`);
136 | }
137 | });
138 | return records;
139 | }).catch(function (err) {
140 | console.log("Error in includeSecurityGroupIds", err);
141 | return records;
142 | });
143 | }
144 |
145 | module.exports = {
146 | discardInternalTraffic: discardInternalTraffic,
147 | includeSecurityGroupIds: includeSecurityGroupIds
148 | };
149 |
--------------------------------------------------------------------------------
/securityhub-forwarder/src/securityhub_forwarder.py:
--------------------------------------------------------------------------------
1 | import json
2 | from datetime import datetime
3 | import os
4 | import logging
5 | import traceback
6 | import uuid
7 | import sys
8 | sys.path.insert(0, '/opt')
9 | import boto3
10 | from botocore.exceptions import ClientError
11 | from utils import retry
12 |
13 |
14 | def get_product_arn(securityhub_region):
15 | PROVIDER_ACCOUNT_ID = "956882708938"
16 | return "arn:aws:securityhub:%s:%s:product/sumologicinc/sumologic-mda" % (securityhub_region, PROVIDER_ACCOUNT_ID)
17 |
18 |
19 | def get_logger():
20 | logger = logging.getLogger()
21 | logger.setLevel(logging.INFO)
22 | return logger
23 |
24 | logger = get_logger()
25 |
26 |
27 | def get_lambda_account_id(context):
28 | lambda_account_id = context.invoked_function_arn.split(":")[4]
29 | return lambda_account_id
30 |
31 |
32 | def generate_id(search_name, finding_account_id, securityhub_region):
33 | uid = uuid.uuid4()
34 | #Todo uuid generated from fields: ResourceID, ResourceType , Severity, Compliance, Type, Title and AWS AccountId
35 | fid = "sumologic:%s:%s:%s/finding/%s" % (securityhub_region, finding_account_id, search_name, uid)
36 | return fid
37 |
38 |
39 | def convert_to_utc(timestamp):
40 | #Todo change to convert to RFC3339
41 | try:
42 | if not isinstance(timestamp, int):
43 | timestamp = timestamp.replace(",", "")
44 | ts = int(timestamp)
45 | else:
46 | ts = timestamp
47 | timestamp = str(timestamp)
48 | ts = ts/1000 if len(timestamp) >= 13 else ts # converting to seconds
49 | utcdate = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
50 | except Exception as e:
51 | logger.error("Unable to convert %s Error %s" % (timestamp, e))
52 | utcdate = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
53 | return utcdate
54 |
55 |
56 | def generate_findings(data, finding_account_id, securityhub_region):
57 | #Todo remove externalid, change to security hub, add productarn,update sdk, chunking
58 | all_findings = []
59 | product_arn = get_product_arn(securityhub_region)
60 | for row in data['Rows']:
61 | row["finding_time"] = convert_to_utc(row["finding_time"])
62 | finding_account_id = row.get("aws_account_id", finding_account_id)
63 | finding = {
64 | "SchemaVersion": "2018-10-08",
65 | "RecordState": "ACTIVE",
66 | "ProductArn": product_arn,
67 | "Description": data.get("Description", ""),
68 | "SourceUrl": data.get("SourceUrl", ""),
69 | "GeneratorId": data["GeneratorID"],
70 | "AwsAccountId": finding_account_id,
71 | "Id": generate_id(data["GeneratorID"], finding_account_id, securityhub_region),
72 | "Types": [data["Types"]],
73 | "CreatedAt": row["finding_time"],
74 | "UpdatedAt": row["finding_time"],
75 | "FirstObservedAt": row["finding_time"],
76 | "Resources": [{
77 | "Type": row["resource_type"],
78 | "Id": row["resource_id"]
79 | }],
80 | "Severity": {
81 | "Normalized": int(data["Severity"])
82 | },
83 | "Title": row["title"]
84 | }
85 | if data.get("ComplianceStatus"):
86 | finding["Compliance"] = {"Status": data["ComplianceStatus"]}
87 | all_findings.append(finding)
88 |
89 | return all_findings
90 |
91 |
92 | def check_required_params(data):
93 | data_params = set(("GeneratorID", "Types", "Rows", "Severity"))
94 | row_params = set(("finding_time", "resource_type", "resource_id", "title"))
95 | missing_fields = data_params - set(data.keys())
96 | missing_fields = missing_fields | (row_params - set(data['Rows'][0].keys()))
97 | if missing_fields:
98 | raise KeyError("%s Fields are missing" % ",".join(missing_fields))
99 | severity = int(data.get("Severity"))
100 | if severity > 100 or severity < 0:
101 | raise ValueError("Severity should be between 0 to 100")
102 | if data.get("ComplianceStatus") and data["ComplianceStatus"] not in ("PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"):
103 | raise ValueError("ComplianceStatus should be PASSED/WARNING/FAILED/NOT_AVAILABLE")
104 |
105 |
106 | def validate_params(data):
107 | try:
108 | data = json.loads(data)
109 | data['Rows'] = json.loads(data.get('Rows', '[{}]'))
110 | check_required_params(data)
111 | except ValueError as e:
112 | return None, "Param Validation Error - %s" % str(e)
113 | except KeyError as e:
114 | return None, str(e)
115 | else:
116 | return data, None
117 |
118 |
119 | def subscribe_to_sumo(securityhub_cli, securityhub_region):
120 | product_arn = get_product_arn(securityhub_region)
121 | try:
122 | resp = securityhub_cli.start_product_subscription(ProductArn=product_arn)
123 | subscription_arn = resp.get("ProductSubscriptionArn")
124 | status_code = resp['ResponseMetadata']['HTTPStatusCode']
125 | logger.info("Subscribing to Sumo Logic Product StatusCode: %s ProductSubscriptionArn: %s" % (
126 | status_code, subscription_arn))
127 | except ClientError as e:
128 | status_code = e.response['ResponseMetadata']['HTTPStatusCode']
129 | raise Exception("Failed to Subscribe to Sumo Logic Product StatusCode: %s Error: %s" % (status_code, str(e)))
130 |
131 |
132 | def process_response(resp):
133 | status_code = resp["ResponseMetadata"].get("HTTPStatusCode")
134 | failed_count = resp.get("FailedCount", 0)
135 | success_count = resp.get("SuccessCount")
136 | body = "FailedCount: %d SuccessCount: %d StatusCode: %d " % (
137 | failed_count, success_count, status_code)
138 |
139 | if failed_count > 0:
140 | err_msg = set()
141 | for row in resp["Findings"]:
142 | err_msg.add(row["ErrorMessage"])
143 | body += "ErrorMessage: %s" % ",".join(err_msg)
144 | return status_code, body
145 |
146 |
147 | @retry(ExceptionToCheck=(Exception,), max_retries=1, multiplier=2, logger=logger)
148 | def insert_findings(findings, securityhub_region):
149 | logger.info("inserting findings %d" % len(findings))
150 |
151 | securityhub_cli = boto3.client('securityhub', region_name=securityhub_region)
152 | try:
153 | resp = securityhub_cli.batch_import_findings(
154 | Findings=findings
155 | )
156 | status_code, body = process_response(resp)
157 | except ClientError as e:
158 | if e.response['Error']['Code'] == 'AccessDeniedException':
159 | status_code = e.response["ResponseMetadata"]["HTTPStatusCode"]
160 | body = e.response["Error"]["Message"] + " .Enable Sumo Logic as a Finding Provider"
161 | logger.error(body)
162 | # disabling automatic subscription to security hub
163 | # subscribe_to_sumo(securityhub_cli, securityhub_region)
164 | # resp = securityhub_cli.batch_import_findings(
165 | # Findings=findings
166 | # )
167 | # status_code, body = process_response(resp)
168 | else:
169 | status_code = e.response["ResponseMetadata"]["HTTPStatusCode"]
170 | body = e.response["Error"]["Message"]
171 |
172 | logger.info(body)
173 | return status_code, body
174 |
175 |
176 | def lambda_handler(event, context):
177 | lambda_account_id = get_lambda_account_id(context)
178 | lambda_region = os.getenv("AWS_REGION")
179 | logger.info("Invoking lambda_handler in Region %s AccountId %s" % (lambda_region, lambda_account_id))
180 | finding_account_id = os.getenv("AWS_ACCOUNT_ID", lambda_account_id)
181 | securityhub_region = os.getenv("REGION", lambda_region)
182 | # logger.info("event %s" % event)
183 | data, err = validate_params(event['body'])
184 | # data, err = validate_params(event)
185 | if not err:
186 | try:
187 | findings = generate_findings(data, finding_account_id, securityhub_region)
188 | status_code, body = insert_findings(findings, securityhub_region)
189 | except Exception as e:
190 | status_code, body = 500, "Error: %s Traceback: %s" % (e, traceback.format_exc())
191 | logger.error(body)
192 | else:
193 | status_code = 400
194 | body = "Bad Request: %s" % err
195 | return {
196 | "statusCode": status_code,
197 | "body": body
198 | }
199 |
--------------------------------------------------------------------------------
/cloudwatchlogs-with-dlq/cloudwatchlogs_lambda.js:
--------------------------------------------------------------------------------
1 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 | // CloudWatch Logs to SumoLogic //
3 | // https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchlogs //
4 | // //
5 | // YOU MUST CREATE A SUMO LOGIC ENDPOINT CALLED SUMO_ENDPOINT AND PASTE IN ENVIRONMENTAL VARIABLES BELOW //
6 | // https://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source //
7 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
8 |
9 | // Include logStream and logGroup as json fields within the message. Required for SumoLogic AWS Lambda App
10 |
11 | // Regex used to detect logs coming from lambda functions.
12 | // The regex will parse out the requestID and strip the timestamp
13 | // Example: 2016-11-10T23:11:54.523Z 108af3bb-a79b-11e6-8bd7-91c363cc05d9 some message
14 | var consoleFormatRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z\s(\w+?-\w+?-\w+?-\w+?-\w+)\s/;
15 |
16 | // Used to extract RequestID
17 | var requestIdRegex = /(?:RequestId:|Z)\s+([\w\d\-]+)/;
18 | var url = require('url');
19 | var vpcutils = require('./vpcutils');
20 | var SumoLogsClient = require('./sumo-dlq-function-utils').SumoLogsClient;
21 | var Utils = require('./sumo-dlq-function-utils').Utils;
22 |
23 | function createRecords(config, events, awslogsData) {
24 | var records = [];
25 | var lastRequestID = null;
26 | console.log('Log events: ' + events.length);
27 |
28 | events.forEach(function (log) {
29 | // Remove any trailing \n
30 | log.message = log.message.replace(/\n$/, '');
31 | // Try extract requestID
32 | var requestId = requestIdRegex.exec(log.message);
33 | if (requestId !== null) {
34 | lastRequestID = requestId[1];
35 | }
36 | // Attempt to detect console log and auto extract requestID and message
37 | var consoleLog = consoleFormatRegex.exec(log.message);
38 | if (consoleLog !== null) {
39 | lastRequestID = consoleLog[1];
40 | log.message = log.message.substring(consoleLog[0].length);
41 | }
42 | if (lastRequestID) {
43 | log.requestID = lastRequestID;
44 | }
45 | // Auto detect if message is json
46 | try {
47 | log.message = JSON.parse(log.message);
48 | } catch (err) {
49 | // Do nothing, leave as text
50 | log.message = log.message.trim();
51 | }
52 | // delete id as it's not very useful
53 | delete log.id;
54 | if (config.LogFormat.startsWith("VPC")) {
55 | delete log.timestamp;
56 | }
57 | delete log.extractedFields;
58 |
59 | if (config.includeLogInfo) {
60 | log.logStream = awslogsData.logStream;
61 | log.logGroup = awslogsData.logGroup;
62 | }
63 | records.push(log);
64 | });
65 | return records;
66 | }
67 |
68 | function getConfig(env) {
69 | var config = {
70 | // SumoLogic Endpoint to post logs
71 | "SumoURL": env.SUMO_ENDPOINT,
72 |
73 | // The following parameters override the sourceCategory, sourceHost and sourceName metadata fields within SumoLogic.
74 | // Not these can also be overridden via json within the message payload. See the README for more information.
75 | "sourceCategoryOverride": ("SOURCE_CATEGORY_OVERRIDE" in env) ? env.SOURCE_CATEGORY_OVERRIDE: '', // If none sourceCategoryOverride will not be overridden
76 | "sourceHostOverride": ("SOURCE_HOST_OVERRIDE" in env) ? env.SOURCE_HOST_OVERRIDE : '', // If none sourceHostOverride will not be set to the name of the logGroup
77 | "sourceNameOverride": ("SOURCE_NAME_OVERRIDE" in env) ? env.SOURCE_NAME_OVERRIDE : '', // If none sourceNameOverride will not be set to the name of the logStream
78 | "SUMO_CLIENT_HEADER": env.SUMO_CLIENT_HEADER || 'cwl-aws-lambda',
79 | // CloudWatch logs encoding
80 | "encoding": env.ENCODING || 'utf-8', // default is utf-8
81 | "LogFormat": env.LOG_FORMAT || 'Others',
82 | "compressData": env.COMPRESS_DATA || true,
83 | "vpcCIDRPrefix": env.VPC_CIDR_PREFIX || '',
84 | "includeLogInfo": ("INCLUDE_LOG_INFO" in env) ? env.INCLUDE_LOG_INFO === "true" : false,
85 | "includeSecurityGroupInfo": ("INCLUDE_SECURITY_GROUP_INFO" in env) ? env.INCLUDE_SECURITY_GROUP_INFO === "true" : false
86 | };
87 | if (!config.SumoURL) {
88 | return new Error('Undefined SUMO_ENDPOINT environment variable');
89 | }
90 | // Validate URL has been set
91 | var urlObject = url.parse(config.SumoURL);
92 | if (urlObject.protocol !== 'https:' || urlObject.host === null || urlObject.path === null) {
93 | return new Error('Invalid SUMO_ENDPOINT environment variable: ' + config.SumoURL);
94 | }
95 | return config;
96 | }
97 |
98 | function transformRecords(config, records) {
99 | return new Promise(function (resolve, reject) {
100 | if (config.LogFormat === "VPC-JSON" && config.includeSecurityGroupInfo) {
101 | vpcutils.includeSecurityGroupIds(records).then(function (modifiedRecords) {
102 | if (modifiedRecords && modifiedRecords.length > 0 && "security-group-ids" in modifiedRecords[0]) {
103 | console.log("SecurityGroupInfo Added");
104 | }
105 | resolve(modifiedRecords);
106 | });
107 | } else {
108 | resolve(records);
109 | }
110 | });
111 | }
112 |
113 | function filterRecords(config, records) {
114 | var filteredRecords = records;
115 | if (config.LogFormat.startsWith("VPC") && config.vpcCIDRPrefix) {
116 | filteredRecords = vpcutils.discardInternalTraffic(config.vpcCIDRPrefix, records);
117 | console.log(records.length - filteredRecords.length + " records discarded as InternalTraffic");
118 | }
119 | return filteredRecords;
120 | }
121 |
122 | exports.processLogs = function (env, eventAwslogsData, callback) {
123 | var zippedInput = new Buffer(eventAwslogsData, 'base64');
124 | var config = getConfig(env);
125 | if (config instanceof Error) {
126 | console.log("Error in getConfig: ", config);
127 | callback(config, null);
128 | return;
129 | }
130 | var awslogsData;
131 | Utils.gunzipPromise(zippedInput).then(function (data) {
132 | console.log("Successfully Unzipped");
133 | awslogsData = JSON.parse(data.toString(config.encoding));
134 | var records = [];
135 | if (awslogsData.messageType === 'CONTROL_MESSAGE') {
136 | console.log('Skipping Control Message');
137 | } else {
138 | records = createRecords(config, awslogsData.logEvents, awslogsData);
139 | console.log(records.length + " Records Found");
140 | }
141 | return records;
142 | }).then(function (records) {
143 | records = filterRecords(config, records);
144 | if (records.length > 0) {
145 | return transformRecords(config, records).then(function (records) {
146 | var SumoLogsClientObj = new SumoLogsClient(config);
147 | var messageList = SumoLogsClientObj.createBuckets(config, records, awslogsData, config.LogFormat === "VPC-RAW");
148 | console.log("Buckets Created: " + Object.keys(messageList).length);
149 | // console.log(messageList);
150 | return SumoLogsClientObj.postToSumo(messageList, config.compressData);
151 | });
152 | }
153 | }).then(function (result) {
154 | if (!result) {
155 | callback(null, "No Records");
156 | } else {
157 | var msg = `RequestSent: ${result.requestSuccessCnt} RequestError: ${result.messageErrors.length}`;
158 | console.log(msg);
159 | callback(result.messageErrors.length > 0 ? result.messageErrors.join() : null, msg);
160 | }
161 | }).catch(function (err) {
162 | console.log(err);
163 | callback(err, null);
164 | });
165 | };
166 |
167 | exports.handler = function (event, context, callback) {
168 |
169 | exports.processLogs(process.env, event.awslogs.data, callback);
170 |
171 | };
172 |
--------------------------------------------------------------------------------
/inspector/python/inspector.py:
--------------------------------------------------------------------------------
1 |
2 | import json
3 | import httplib
4 | import base64,zlib
5 | import urlparse
6 | import boto3
7 | import datetime
8 | import logging
9 |
10 | ##################################################################
11 | # Configuration #
12 | ##################################################################
13 | # Enter Sumo Http source endpoint here.
14 | sumoEndpoint = "https://endpoint1.collection.sumologic.com/receiver/v1/http/"
15 | # include auxiliary data (e.g for assessment template, run, or target) in the collected event or not
16 | contextLookup = True
17 |
18 |
19 | ##################################################################
20 | # Main Code #
21 | ##################################################################
22 | up = urlparse.urlparse(sumoEndpoint)
23 | options = { 'hostname': up.hostname,
24 | 'path': up.path,
25 | 'method': 'POST'
26 | };
27 |
28 | # Internal variables used for this Lambda function
29 | resourceMap = {'finding':{},'target':{},'run':{},'template':{}, 'rulesPackage':{}}
30 | # prepare logger
31 | logger = logging.getLogger()
32 | logger.setLevel(logging.INFO)
33 |
34 | # main function to send data to a Sumo HTTP source
35 | def sendSumo(msg, toCompress = False):
36 | conn = httplib.HTTPSConnection(options['hostname'])
37 | if (toCompress):
38 | headers = {"Content-Encoding": "gzip"}
39 | finalData = compress(msg)
40 | else:
41 | headers = {"Content-type": "text/html","Accept": "text/plain"}
42 | finalData =msg
43 | headers.update({"X-Sumo-Client": "inspector-aws-lambda"})
44 | conn.request(options['method'], options['path'], finalData,headers)
45 | response = conn.getresponse()
46 | conn.close()
47 | return (response.status,response.reason)
48 |
49 |
50 | # Simple function to compress data
51 | def compress(data, compresslevel=9):
52 | compress = zlib.compressobj(compresslevel, zlib.DEFLATED, 16 + zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
53 | compressedData = compress.compress(data)
54 | compressedData += compress.flush()
55 | return compressedData
56 |
57 | # This function looks up an Inspector object based on its arn and type. Returned object will be used to provide extra context for the final message to Sumo
58 | def lookup(objectId,objectType = 'run'):
59 | client = boto3.client('inspector')
60 | finalObj = None
61 |
62 | objectMap = resourceMap.get(objectType)
63 | if (objectMap is None):
64 | resourceMap[objectType]= objectMap = {}
65 | try:
66 | if (objectType=='run'):
67 | run = objectMap.get(objectId)
68 | if (run is None):
69 | runs = client.describe_assessment_runs(assessmentRunArns=[objectId])
70 | if (runs is not None):
71 | run = runs['assessmentRuns'][0]
72 | # For run item, we only collect important properties
73 | objectMap[objectId] = finalObj = {'name':run['name'],'createdAt':'%s' % run['createdAt'], 'state':run['state'],'durationInSeconds':run['durationInSeconds'],'startedAt':'%s' % run['startedAt'],'assessmentTemplateArn':run['assessmentTemplateArn']}
74 | else:
75 | finalObj = run
76 | elif (objectType=='template'):
77 | template = objectMap.get(objectId)
78 | if (template is None):
79 | templates = client.describe_assessment_templates(assessmentTemplateArns=[objectId])
80 | if (templates is not None):
81 | finalObj = objectMap[objectId] = templates['assessmentTemplates'][0]
82 | else:
83 | finalObj = template
84 | elif (objectType=='rulesPackage'):
85 | rulesPackage = objectMap.get(objectId)
86 | if (rulesPackage is None):
87 | rulesPackages = client.describe_rules_packages(rulesPackageArns=[objectId])
88 | if (rulesPackages is not None):
89 | finalObj = objectMap[objectId] = rulesPackages['rulesPackages'][0]
90 | else:
91 | finalObj = rulesPackage
92 | elif (objectType=='target'):
93 | target = objectMap.get(objectId)
94 | if (target is None):
95 | targets = client.describe_assessment_targets(assessmentTargetArns=[objectId])
96 | if (targets is not None):
97 | finalObj = objectMap[objectId] = targets['assessmentTargets'][0]
98 | else:
99 | finalObj = target
100 | elif (objectType == 'finding'):
101 | finding = objectMap.get(objectId)
102 | if (finding is None):
103 | findings = client.describe_findings(findingArns=[objectId])
104 | if (findings is not None):
105 | finalObj = objectMap[objectId] = findings['findings'][0]
106 | else:
107 | finalObj = finding
108 | except Exception as e:
109 | logger.error(e)
110 | raise
111 | return finalObj
112 |
113 | # simple utility function to deserialize datetime objects
114 | def json_deserializer(obj):
115 | if isinstance(obj, datetime.datetime):
116 | return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
117 | elif isinstance(obj, date):
118 | return obj.strftime('%Y-%m-%d')
119 | # Let the base class default method raise the TypeError
120 | return json.JSONEncoder.default(self, obj)
121 |
122 |
123 | def sumo_inspector_handler(event, context):
124 | if ('Records' in event):
125 | for record in event['Records']:
126 | # get actual SNS message
127 | snsObj = record['Sns']
128 | dataObj = {'Timestamp':snsObj['Timestamp'],'Message':snsObj['Message'],'MessageId':snsObj['MessageId']}
129 | msgObj = json.loads(snsObj['Message'])
130 | if (contextLookup):
131 | # do reverse lookup of each of the following items in Message: target, run, template.
132 | if ('template' in msgObj):
133 | lookupItem = lookup(msgObj['template'],'template')
134 | if (lookupItem is not None):
135 | logger.info("Got a template item back")
136 | msgObj['templateLookup']= lookupItem
137 | else:
138 | print("Could not lookup template: %s" % msgObj['template'])
139 | if ('run' in msgObj):
140 | lookupItem = lookup(msgObj['run'],'run')
141 | if (lookupItem is not None):
142 | msgObj['runLookup']= lookupItem
143 | else:
144 | logger.info("Could not lookup run: %s" % msgObj['run'])
145 | if ('target' in msgObj):
146 | lookupItem = lookup(msgObj['target'],'target')
147 | if (lookupItem is not None):
148 | msgObj['targetLookup']= lookupItem
149 | else:
150 | logger.info("Could not lookup target: %s" % msgObj['target'])
151 | if ('finding' in msgObj):
152 | # now query findings
153 | finding = lookup(msgObj['finding'],'finding')
154 | if (finding is not None):
155 |
156 | # now query rulesPackage inside the finding
157 | rulesPackage = lookup(finding['serviceAttributes']['rulesPackageArn'],'rulesPackage')
158 | if (rulesPackage is not None):
159 | finding['rulesPackageLookup'] = rulesPackage
160 | else:
161 | logger.info("Cannot lookup rulesPackageArn: %s"% finding['serviceAttributes']['rulesPackageArn'])
162 | msgObj['findingDetails'] = finding
163 | # construct final data object
164 | dataObj = {'Timestamp':snsObj['Timestamp'],'Message':msgObj,'MessageId':snsObj['MessageId']}
165 |
166 | # now send this object to Sumo side
167 | rs = sendSumo(json.dumps(dataObj,default=json_deserializer),toCompress=True)
168 |
169 | if (rs[0]!=200):
170 | logger.info('Error sending data to sumo with code: %d and message: %s '% (rs[0],rs[1]))
171 | logger.info(json.dumps(dataObj,default=json_deserializer))
172 | else:
173 | logger.info("Sent data to Sumo successfully")
174 | else:
175 | logger.info('Unrecoganized data')
176 |
177 |
--------------------------------------------------------------------------------
/cloudwatchlogs/cloudwatchlogs_lambda.js:
--------------------------------------------------------------------------------
1 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 | // CloudWatch Logs to SumoLogic //
3 | // https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchlogs //
4 | // //
5 | // YOU MUST CREATE A SUMO LOGIC ENDPOINT CALLED SUMO_ENDPOINT AND PASTE IN ENVIRONMENTAL VARIABLES BELOW //
6 | // https://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source //
7 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
8 |
9 | // SumoLogic Endpoint to post logs
10 | var SumoURL = process.env.SUMO_ENDPOINT;
11 |
12 | // The following parameters override the sourceCategoryOverride, sourceHostOverride and sourceNameOverride metadata fields within SumoLogic.
13 | // Not these can also be overridden via json within the message payload. See the README for more information.
14 | var sourceCategoryOverride = process.env.SOURCE_CATEGORY_OVERRIDE || 'none'; // If none sourceCategoryOverride will not be overridden
15 | var sourceHostOverride = process.env.SOURCE_HOST_OVERRIDE || 'none'; // If none sourceHostOverride will not be set to the name of the logGroup
16 | var sourceNameOverride = process.env.SOURCE_NAME_OVERRIDE || 'none'; // If none sourceNameOverride will not be set to the name of the logStream
17 |
18 | // CloudWatch logs encoding
19 | var encoding = process.env.ENCODING || 'utf-8'; // default is utf-8
20 |
21 | // Include logStream and logGroup as json fields within the message. Required for SumoLogic AWS Lambda App
22 | var includeLogInfo = false; // default is true
23 |
24 | // Regex used to detect logs coming from lambda functions.
25 | // The regex will parse out the requestID and strip the timestamp
26 | // Example: 2016-11-10T23:11:54.523Z 108af3bb-a79b-11e6-8bd7-91c363cc05d9 some message
27 | var consoleFormatRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z\s(\w+?-\w+?-\w+?-\w+?-\w+)\s/;
28 |
29 | // Used to extract RequestID
30 | var requestIdRegex = /(?:RequestId:|Z)\s+([\w\d\-]+)/;
31 |
32 | var https = require('https');
33 | var zlib = require('zlib');
34 | var url = require('url');
35 |
36 |
37 | function sumoMetaKey(awslogsData, message) {
38 | var sourceCategory = '';
39 | var sourceName = '';
40 | var sourceHost = '';
41 |
42 | if (sourceCategoryOverride !== null && sourceCategoryOverride !== '' && sourceCategoryOverride != 'none') {
43 | sourceCategory = sourceCategoryOverride;
44 | }
45 |
46 | if (sourceHostOverride !== null && sourceHostOverride !== '' && sourceHostOverride != 'none') {
47 | sourceHost = sourceHostOverride;
48 | } else {
49 | sourceHost = awslogsData.logGroup;
50 | }
51 |
52 | if (sourceNameOverride !== null && sourceNameOverride !== '' && sourceNameOverride != 'none') {
53 | sourceName = sourceNameOverride;
54 | } else {
55 | sourceName = awslogsData.logStream;
56 | }
57 |
58 | // Ability to override metadata within the message
59 | // Useful within Lambda function console.log to dynamically set metadata fields within SumoLogic.
60 | if (message.hasOwnProperty('_sumo_metadata')) {
61 | var metadataOverride = message._sumo_metadata;
62 | if (metadataOverride.category) {
63 | sourceCategory = metadataOverride.category;
64 | }
65 | if (metadataOverride.host) {
66 | sourceHost = metadataOverride.host;
67 | }
68 | if (metadataOverride.source) {
69 | sourceName = metadataOverride.source;
70 | }
71 | delete message._sumo_metadata;
72 | }
73 | return sourceName + ':' + sourceCategory + ':' + sourceHost;
74 |
75 | }
76 |
77 | function postToSumo(callback, messages) {
78 | var messagesTotal = Object.keys(messages).length;
79 | var messagesSent = 0;
80 | var messageErrors = [];
81 |
82 | var urlObject = url.parse(SumoURL);
83 | var options = {
84 | 'hostname': urlObject.hostname,
85 | 'path': urlObject.pathname,
86 | 'method': 'POST'
87 | };
88 |
89 | var finalizeContext = function () {
90 | var total = messagesSent + messageErrors.length;
91 | if (total == messagesTotal) {
92 | console.log('messagesSent: ' + messagesSent + ' messagesErrors: ' + messageErrors.length);
93 | if (messageErrors.length > 0) {
94 | callback('errors: ' + messageErrors);
95 | } else {
96 | callback(null, "Success");
97 | }
98 | }
99 | };
100 |
101 |
102 | Object.keys(messages).forEach(function (key, index) {
103 | var headerArray = key.split(':');
104 |
105 | options.headers = {
106 | 'X-Sumo-Name': headerArray[0],
107 | 'X-Sumo-Category': headerArray[1],
108 | 'X-Sumo-Host': headerArray[2],
109 | 'X-Sumo-Client': 'cwl-aws-lambda'
110 | };
111 |
112 | var req = https.request(options, function (res) {
113 | res.setEncoding('utf8');
114 | res.on('data', function (chunk) {});
115 | res.on('end', function () {
116 | if (res.statusCode == 200) {
117 | messagesSent++;
118 | } else {
119 | messageErrors.push('HTTP Return code ' + res.statusCode);
120 | }
121 | finalizeContext();
122 | });
123 | });
124 |
125 | req.on('error', function (e) {
126 | messageErrors.push(e.message);
127 | finalizeContext();
128 | });
129 |
130 | for (var i = 0; i < messages[key].length; i++) {
131 | req.write(JSON.stringify(messages[key][i]) + '\n');
132 | }
133 | req.end();
134 | });
135 | }
136 |
137 |
138 | exports.handler = function (event, context, callback) {
139 |
140 | // Used to hold chunks of messages to post to SumoLogic
141 | var messageList = {};
142 |
143 | // Validate URL has been set
144 | var urlObject = url.parse(SumoURL);
145 | if (urlObject.protocol != 'https:' || urlObject.host === null || urlObject.path === null) {
146 | callback('Invalid SUMO_ENDPOINT environment variable: ' + SumoURL);
147 | }
148 |
149 | var zippedInput = new Buffer(event.awslogs.data, 'base64');
150 |
151 | zlib.gunzip(zippedInput, function (e, buffer) {
152 | if (e) {
153 | callback(e);
154 | }
155 |
156 | var awslogsData = JSON.parse(buffer.toString(encoding));
157 |
158 | if (awslogsData.messageType === 'CONTROL_MESSAGE') {
159 | console.log('Control message');
160 | callback(null, 'Success');
161 | }
162 |
163 | var lastRequestID = null;
164 |
165 | console.log('Log events: ' + awslogsData.logEvents.length);
166 |
167 | // Chunk log events before posting to SumoLogic
168 | awslogsData.logEvents.forEach(function (log, idx, arr) {
169 |
170 | // Remove any trailing \n
171 | log.message = log.message.replace(/\n$/, '');
172 |
173 | // Try extract requestID
174 | var requestId = requestIdRegex.exec(log.message);
175 | if (requestId !== null) {
176 | lastRequestID = requestId[1];
177 | }
178 |
179 | // Attempt to detect console log and auto extract requestID and message
180 | var consoleLog = consoleFormatRegex.exec(log.message);
181 | if (consoleLog !== null) {
182 | lastRequestID = consoleLog[1];
183 | log.message = log.message.substring(consoleLog[0].length);
184 | }
185 |
186 | // Auto detect if message is json
187 | try {
188 | log.message = JSON.parse(log.message);
189 | } catch (err) {
190 | // Do nothing, leave as text
191 | log.message = log.message.trim();
192 | }
193 |
194 | // delete id as it's not very useful
195 | delete log.id;
196 |
197 | if (includeLogInfo) {
198 | log.logStream = awslogsData.logStream;
199 | log.logGroup = awslogsData.logGroup;
200 | }
201 |
202 | if (lastRequestID) {
203 | log.requestID = lastRequestID;
204 | }
205 |
206 | var metadataKey = sumoMetaKey(awslogsData, log.message);
207 |
208 | if (metadataKey in messageList) {
209 | messageList[metadataKey].push(log);
210 | } else {
211 | messageList[metadataKey] = [log];
212 | }
213 | });
214 |
215 | // Push messages to Sumo
216 | postToSumo(callback, messageList);
217 |
218 | });
219 | };
220 |
--------------------------------------------------------------------------------
/loggroup-lambda-connector/test/loggroup-lambda-cft.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Outputs": {
4 | "SumoLogGroupLambdaConnector": {
5 | "Description": "SumoLogGroupLambdaConnector Function ARN",
6 | "Value": {
7 | "Fn::GetAtt": [
8 | "SumoLogGroupLambdaConnector",
9 | "Arn"
10 | ]
11 | },
12 | "Export" : {
13 | "Name" : { "Fn::Join": [ "-", [ "SumoLogGroupLambdaConnectorArn", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] }
14 | }
15 | }
16 | },
17 | "Mappings" : {
18 | "RegionMap" : {
19 | "us-east-1": {"bucketname": "appdevzipfiles-us-east-1"},
20 | "us-east-2": {"bucketname": "appdevzipfiles-us-east-2"},
21 | "us-west-1": {"bucketname": "appdevzipfiles-us-west-1"},
22 | "us-west-2": {"bucketname": "appdevzipfiles-us-west-2"},
23 | "ap-south-1": {"bucketname": "appdevzipfiles-ap-south-1"},
24 | "ap-northeast-2": {"bucketname": "appdevzipfiles-ap-northeast-2"},
25 | "ap-southeast-1": {"bucketname": "appdevzipfiles-ap-southeast-1"},
26 | "ap-southeast-2": {"bucketname": "appdevzipfiles-ap-southeast-2"},
27 | "ap-northeast-1": {"bucketname": "appdevzipfiles-ap-northeast-1"},
28 | "ca-central-1": {"bucketname": "appdevzipfiles-ca-central-1"},
29 | "eu-central-1": {"bucketname": "appdevzipfiles-eu-central-1"},
30 | "eu-west-1": {"bucketname": "appdevzipfiles-eu-west-1"},
31 | "eu-west-2": {"bucketname": "appdevzipfiles-eu-west-2"},
32 | "eu-west-3": {"bucketname": "appdevzipfiles-eu-west-3"},
33 | "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"}
34 | }
35 | },
36 | "Resources": {
37 | "SumoLogGroupLambdaConnector": {
38 | "Type": "AWS::Lambda::Function",
39 | "Properties": {
40 | "Code": {
41 | "S3Bucket": {"Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
42 | "S3Key": "loggroup-lambda-connector.zip"
43 | },
44 | "Tags": [
45 | {
46 | "Value": "SAM",
47 | "Key": "lambda:createdBy"
48 | }
49 | ],
50 | "MemorySize": 128,
51 | "Environment": {
52 | "Variables": {
53 | "LOG_GROUP_PATTERN": {
54 | "Ref": "LogGroupPattern"
55 | },
56 | "USE_EXISTING_LOG_GROUPS": {
57 | "Ref": "UseExistingLogs"
58 | },
59 | "LAMBDA_ARN": {
60 | "Ref": "LambdaARN"
61 | }
62 | }
63 | },
64 | "Handler": "loggroup-lambda-connector.handler",
65 | "Role": {
66 | "Fn::GetAtt": [
67 | "SumoLogGroupLambdaConnectorRole",
68 | "Arn"
69 | ]
70 | },
71 | "Timeout": 300,
72 | "Runtime": "nodejs8.10"
73 | }
74 | },
75 | "SumoLogGroupLambdaConnectorRole": {
76 | "Type": "AWS::IAM::Role",
77 | "Properties": {
78 | "ManagedPolicyArns": [
79 | "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
80 | ],
81 | "Policies": [
82 | {
83 | "PolicyName": { "Fn::Join": [ "-", [ "SumoLogGroupLambdaConnectorPolicy", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
84 | "PolicyDocument": {
85 | "Statement": [
86 | {
87 | "Action": [
88 | "logs:DescribeLogGroups",
89 | "logs:DescribeLogStreams",
90 | "logs:PutSubscriptionFilter"
91 | ],
92 | "Resource": [
93 | {
94 | "Fn::Sub": "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*"
95 | }
96 | ],
97 | "Effect": "Allow",
98 | "Sid": "ReadWriteFilterPolicy"
99 | }
100 | ]
101 | }
102 | }
103 | ],
104 | "AssumeRolePolicyDocument": {
105 | "Version": "2012-10-17",
106 | "Statement": [
107 | {
108 | "Action": [
109 | "sts:AssumeRole"
110 | ],
111 | "Effect": "Allow",
112 | "Principal": {
113 | "Service": [
114 | "lambda.amazonaws.com"
115 | ]
116 | }
117 | }
118 | ]
119 | }
120 | }
121 | },
122 | "SumoCWLambdaInvokePermission": {
123 | "Type": "AWS::Lambda::Permission",
124 | "Properties": {
125 | "Action": "lambda:InvokeFunction",
126 | "SourceAccount": {
127 | "Ref": "AWS::AccountId"
128 | },
129 | "Principal": {
130 | "Fn::Sub": "logs.${AWS::Region}.amazonaws.com"
131 | },
132 | "FunctionName": {
133 | "Ref": "LambdaARN"
134 | },
135 | "SourceArn": {
136 | "Fn::Sub": "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*:*"
137 | }
138 | }
139 | },
140 | "SumoLogGroupLambdaConnectorLambdaTrigger": {
141 | "Type": "AWS::Events::Rule",
142 | "Properties": {
143 | "EventPattern": {
144 | "source": [
145 | "aws.logs"
146 | ],
147 | "detail": {
148 | "eventName": [
149 | "CreateLogGroup"
150 | ],
151 | "eventSource": [
152 | "logs.amazonaws.com"
153 | ]
154 | }
155 | },
156 | "Targets": [
157 | {
158 | "Id": "SumoLogGroupLambdaConnectorLambdaTriggerLambdaTarget",
159 | "Arn": {
160 | "Fn::GetAtt": [
161 | "SumoLogGroupLambdaConnector",
162 | "Arn"
163 | ]
164 | }
165 | }
166 | ]
167 | }
168 | },
169 | "SumoLogGroupLambdaConnectorLambdaTriggerPermission": {
170 | "Type": "AWS::Lambda::Permission",
171 | "Properties": {
172 | "Action": "lambda:invokeFunction",
173 | "Principal": "events.amazonaws.com",
174 | "FunctionName": {
175 | "Ref": "SumoLogGroupLambdaConnector"
176 | },
177 | "SourceArn": {
178 | "Fn::GetAtt": [
179 | "SumoLogGroupLambdaConnectorLambdaTrigger",
180 | "Arn"
181 | ]
182 | }
183 | }
184 | }
185 | },
186 | "Description": "\"Lambda Function for automatic subscription of any Sumo Logic lambda function with loggroups matching an input pattern.\"\n",
187 | "Parameters": {
188 | "UseExistingLogs": {
189 | "Default": "false",
190 | "Type": "String",
191 | "Description": "Select true for subscribing existing logs",
192 | "AllowedValues": [
193 | "true",
194 | "false"
195 | ]
196 | },
197 | "LambdaARN": {
198 | "Default": "arn:aws:lambda:us-east-1:123456789000:function:TestLambda",
199 | "Type": "String",
200 | "Description": "Enter ARN for target lambda function"
201 | },
202 | "LogGroupPattern": {
203 | "Default": "Test",
204 | "Type": "String",
205 | "Description": "Enter regex for matching logGroups"
206 | }
207 | }
208 | }
209 |
--------------------------------------------------------------------------------
/sumologic-app-utils/src/sumologic.py:
--------------------------------------------------------------------------------
1 | import json
2 | import requests
3 |
4 | try:
5 | import cookielib
6 | except ImportError:
7 | import http.cookiejar as cookielib
8 |
9 | DEFAULT_VERSION = 'v1'
10 |
11 |
12 | class SumoLogic(object):
13 |
14 | def __init__(self, accessId, accessKey, endpoint=None, cookieFile='cookies.txt'):
15 | self.session = requests.Session()
16 | self.session.auth = (accessId, accessKey)
17 | self.session.headers = {'content-type': 'application/json', 'accept': 'application/json'}
18 | cj = cookielib.FileCookieJar(cookieFile)
19 | self.session.cookies = cj
20 | if endpoint is None:
21 | self.endpoint = self._get_endpoint()
22 | else:
23 | self.endpoint = endpoint
24 | if self.endpoint[-1:] == "/":
25 | raise Exception("Endpoint should not end with a slash character")
26 |
27 | def _get_endpoint(self):
28 | """
29 | SumoLogic REST API endpoint changes based on the geo location of the client.
30 | For example, If the client geolocation is Australia then the REST end point is
31 | https://api.au.sumologic.com/api/v1
32 | When the default REST endpoint (https://api.sumologic.com/api/v1) is used the server
33 | responds with a 401 and causes the SumoLogic class instantiation to fail and this very
34 | unhelpful message is shown 'Full authentication is required to access this resource'
35 | This method makes a request to the default REST endpoint and resolves the 401 to learn
36 | the right endpoint
37 | """
38 | self.endpoint = 'https://api.sumologic.com/api'
39 | self.response = self.session.get('https://api.sumologic.com/api/v1/collectors') # Dummy call to get endpoint
40 | endpoint = self.response.url.replace('/v1/collectors', '') # dirty hack to sanitise URI and retain domain
41 | print("SDK Endpoint", endpoint)
42 | return endpoint
43 |
44 | def get_versioned_endpoint(self, version):
45 | return self.endpoint+'/%s' % version
46 |
47 | def delete(self, method, params=None, version=DEFAULT_VERSION):
48 | endpoint = self.get_versioned_endpoint(version)
49 | r = self.session.delete(endpoint + method, params=params)
50 | if 400 <= r.status_code < 600:
51 | r.reason = r.text
52 | r.raise_for_status()
53 | return r
54 |
55 | def get(self, method, params=None, version=DEFAULT_VERSION):
56 | endpoint = self.get_versioned_endpoint(version)
57 | r = self.session.get(endpoint + method, params=params)
58 | if 400 <= r.status_code < 600:
59 | r.reason = r.text
60 | r.raise_for_status()
61 | return r
62 |
63 | def post(self, method, params, headers=None, version=DEFAULT_VERSION):
64 | endpoint = self.get_versioned_endpoint(version)
65 | r = self.session.post(endpoint + method, data=json.dumps(params), headers=headers)
66 | if 400 <= r.status_code < 600:
67 | r.reason = r.text
68 | r.raise_for_status()
69 | return r
70 |
71 | def put(self, method, params, headers=None, version=DEFAULT_VERSION):
72 | endpoint = self.get_versioned_endpoint(version)
73 | r = self.session.put(endpoint + method, data=json.dumps(params), headers=headers)
74 | if 400 <= r.status_code < 600:
75 | r.reason = r.text
76 | r.raise_for_status()
77 | return r
78 |
79 | def search(self, query, fromTime=None, toTime=None, timeZone='UTC'):
80 | params = {'q': query, 'from': fromTime, 'to': toTime, 'tz': timeZone}
81 | r = self.get('/logs/search', params)
82 | return json.loads(r.text)
83 |
84 | def search_job(self, query, fromTime=None, toTime=None, timeZone='UTC', byReceiptTime=None):
85 | params = {'query': query, 'from': fromTime, 'to': toTime, 'timeZone': timeZone, 'byReceiptTime': byReceiptTime}
86 | r = self.post('/search/jobs', params)
87 | return json.loads(r.text)
88 |
89 | def search_job_status(self, search_job):
90 | r = self.get('/search/jobs/' + str(search_job['id']))
91 | return json.loads(r.text)
92 |
93 | def search_job_messages(self, search_job, limit=None, offset=0):
94 | params = {'limit': limit, 'offset': offset}
95 | r = self.get('/search/jobs/' + str(search_job['id']) + '/messages', params)
96 | return json.loads(r.text)
97 |
98 | def search_job_records(self, search_job, limit=None, offset=0):
99 | params = {'limit': limit, 'offset': offset}
100 | r = self.get('/search/jobs/' + str(search_job['id']) + '/records', params)
101 | return json.loads(r.text)
102 |
103 | def delete_search_job(self, search_job):
104 | return self.delete('/search/jobs/' + str(search_job['id']))
105 |
106 | def collectors(self, limit=None, offset=None, filter_type=None):
107 | params = {'limit': limit, 'offset': offset}
108 | if filter_type:
109 | params['filter'] = filter_type
110 | r = self.get('/collectors', params)
111 | return json.loads(r.text)['collectors']
112 |
113 | def collector(self, collector_id):
114 | r = self.get('/collectors/' + str(collector_id))
115 | return json.loads(r.text), r.headers['etag']
116 |
117 | def create_collector(self, collector, headers=None):
118 | return self.post('/collectors', collector, headers)
119 |
120 | def update_collector(self, collector, etag):
121 | headers = {'If-Match': etag}
122 | return self.put('/collectors/' + str(collector['collector']['id']), collector, headers)
123 |
124 | def delete_collector(self, collector):
125 | return self.delete('/collectors/' + str(collector['collector']['id']))
126 |
127 | def sources(self, collector_id, limit=None, offset=None):
128 | params = {'limit': limit, 'offset': offset}
129 | r = self.get('/collectors/' + str(collector_id) + '/sources', params)
130 | return json.loads(r.text)['sources']
131 |
132 | def source(self, collector_id, source_id):
133 | r = self.get('/collectors/' + str(collector_id) + '/sources/' + str(source_id))
134 | return json.loads(r.text), r.headers['etag']
135 |
136 | def create_source(self, collector_id, source):
137 | return self.post('/collectors/' + str(collector_id) + '/sources', source)
138 |
139 | def update_source(self, collector_id, source, etag):
140 | headers = {'If-Match': etag}
141 | return self.put('/collectors/' + str(collector_id) + '/sources/' + str(source['source']['id']), source, headers)
142 |
143 | def delete_source(self, collector_id, source):
144 | return self.delete('/collectors/' + str(collector_id) + '/sources/' + str(source['source']['id']))
145 |
146 | def dashboards(self, monitors=False):
147 | params = {'monitors': monitors}
148 | r = self.get('/dashboards', params)
149 | return json.loads(r.text)['dashboards']
150 |
151 | def dashboard(self, dashboard_id):
152 | r = self.get('/dashboards/' + str(dashboard_id))
153 | return json.loads(r.text)['dashboard']
154 |
155 | def dashboard_data(self, dashboard_id):
156 | r = self.get('/dashboards/' + str(dashboard_id) + '/data')
157 | return json.loads(r.text)['dashboardMonitorDatas']
158 |
159 | def search_metrics(self, query, fromTime=None, toTime=None, requestedDataPoints=600, maxDataPoints=800):
160 | '''Perform a single Sumo metrics query'''
161 | def millisectimestamp(ts):
162 | '''Convert UNIX timestamp to milliseconds'''
163 | if ts > 10**12:
164 | ts = ts/(10**(len(str(ts))-13))
165 | else:
166 | ts = ts*10**(12-len(str(ts)))
167 | return int(ts)
168 |
169 | params = {'query': [{"query": query, "rowId": "A"}],
170 | 'startTime': millisectimestamp(fromTime),
171 | 'endTime': millisectimestamp(toTime),
172 | 'requestedDataPoints': requestedDataPoints,
173 | 'maxDataPoints': maxDataPoints}
174 | r = self.post('/metrics/results', params)
175 | return json.loads(r.text)
176 |
177 | def delete_folder(self, folder_id):
178 | return self.delete('/content/%s/delete' % folder_id, version='v2')
179 |
180 | def create_folder(self, name, description, parent_folder_id):
181 | content = {
182 | "name": name,
183 | "description": description,
184 | "parentId": parent_folder_id
185 | }
186 | return self.post('/content/folders', params=content, version='v2')
187 |
188 | def get_personal_folder(self):
189 | return self.get('/content/folders/personal', version='v2')
190 |
191 | def import_content(self, folder_id, content, is_overwrite="false"):
192 | return self.post('/content/folders/%s/import?overwrite=%s' % (folder_id, is_overwrite), params=content, version='v2')
193 |
194 | def check_import_status(self, folder_id, job_id):
195 | return self.get('/content/folders/%s/import/%s/status' % (folder_id, job_id), version='v2')
196 |
--------------------------------------------------------------------------------