├── .gitignore ├── main.go ├── Makefile ├── glide.yaml ├── beater ├── config.go └── ctbeat.go ├── etc ├── beat.yml └── fields.yml ├── LICENSE.md ├── glide.lock ├── conf └── cloudtrail_cf.template ├── README.md └── cloudtrailbeat.yml /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | cloudtrailbeat 3 | testconfig.yml 4 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/aidan-/cloudtrailbeat/beater" 7 | "github.com/elastic/beats/libbeat/beat" 8 | ) 9 | 10 | var Version = "0.0.3" 11 | var Name = "cloudtrailbeat" 12 | 13 | func main() { 14 | if err := beat.Run(Name, Version, beater.New()); err != nil { 15 | os.Exit(1) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BEATNAME=cloudtrailbeat 2 | BEAT_DIR=github.com/aidan-/cloudtrailbeat 3 | ES_BEATS=./vendor/github.com/elastic/beats 4 | GOPACKAGES=$(shell glide novendor) 5 | SYSTEM_TESTS=false 6 | 7 | # Only crosscompile for linux because other OS'es use cgo. 8 | #GOX_OS=linux darwin windows solaris freebsd netbsd openbsd 9 | GOX_OS=linux 10 | 11 | include $(ES_BEATS)/libbeat/scripts/Makefile 12 | -------------------------------------------------------------------------------- /glide.yaml: -------------------------------------------------------------------------------- 1 | package: github.com/aidan-/cloudtrailbeat 2 | import: 3 | - package: github.com/aws/aws-sdk-go 4 | subpackages: 5 | - aws 6 | - aws/credentials 7 | - aws/session 8 | - service/s3 9 | - service/sqs 10 | - package: github.com/elastic/beats 11 | subpackages: 12 | - libbeat/beat 13 | - libbeat/cfgfile 14 | - libbeat/common 15 | - libbeat/logp 16 | - libbeat/publisher 17 | -------------------------------------------------------------------------------- /beater/config.go: -------------------------------------------------------------------------------- 1 | package beater 2 | 3 | type CloudTrailConfig struct { 4 | SQSUrl *string `config:"sqs_url"` 5 | AWSCredentialProvider *string `config:"aws_credential_provider"` 6 | AWSRegion *string `config:"aws_region"` 7 | NoPurge *bool `config:"no_purge"` 8 | NumQueueFetch *int `config:"num_queue_fetch"` 9 | SleepTime *int `config:"sleep_time"` 10 | } 11 | 12 | type ConfigSettings struct { 13 | Input CloudTrailConfig 14 | } 15 | -------------------------------------------------------------------------------- /etc/beat.yml: -------------------------------------------------------------------------------- 1 | ################### CloudTrailbeat Configuration Example ######################### 2 | 3 | ################################################################################## 4 | input: 5 | # Full URL of SQS which will be polled to identify new CloudTrail log events 6 | # default: no default 7 | sqs_url: "https://sqs.us-east-1.amazonaws.com/xxxxxxxxxxxxxx/cloudtrail-events" 8 | 9 | # AWS region 10 | # default: us-east-1 11 | aws_region: "us-east-1" 12 | 13 | # number of messages to fetch from SQS per retrieval 14 | # default: 1 15 | num_queue_fetch: 1 16 | 17 | # number of seconds to sleep after queue has been emptied 18 | # default: 300 19 | sleep_time: 300 20 | 21 | # don't purge messages from SQS after they have been successfully processed. 22 | # this is useful for debug purposes. 23 | # default: false 24 | no_purge: true 25 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Aidan Rowe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /glide.lock: -------------------------------------------------------------------------------- 1 | hash: 1ef9799c10583143248f2f2077ee739beefcfd5c9e5eb99f024196b4c800f316 2 | updated: 2016-05-24T11:07:59.689104092+10:00 3 | imports: 4 | - name: github.com/aidan-/cloudtrailbeat 5 | version: 452a0c492ef3690e20c27740b45f565860de9f37 6 | subpackages: 7 | - beater 8 | - name: github.com/aws/aws-sdk-go 9 | version: c76e8918e8f08490e3bb154178a84a0b2bdf8d6e 10 | subpackages: 11 | - aws 12 | - aws/credentials 13 | - aws/session 14 | - service/s3 15 | - service/sqs 16 | - aws/awserr 17 | - aws/client 18 | - aws/corehandlers 19 | - aws/defaults 20 | - aws/request 21 | - private/endpoints 22 | - aws/awsutil 23 | - aws/client/metadata 24 | - private/protocol 25 | - private/protocol/restxml 26 | - private/signer/v4 27 | - private/waiter 28 | - private/protocol/query 29 | - aws/credentials/ec2rolecreds 30 | - aws/ec2metadata 31 | - private/protocol/rest 32 | - private/protocol/xml/xmlutil 33 | - private/protocol/query/queryutil 34 | - name: github.com/dustin/go-humanize 35 | version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 36 | - name: github.com/eapache/go-resiliency 37 | version: b86b1ec0dd4209a588dc1285cdd471e73525c0b3 38 | subpackages: 39 | - breaker 40 | - name: github.com/eapache/queue 41 | version: ded5959c0d4e360646dc9e9908cff48666781367 42 | - name: github.com/elastic/beats 43 | version: a0f543d88691f3081d63fcce83defc57a631cb8c 44 | subpackages: 45 | - libbeat/beat 46 | - libbeat/cfgfile 47 | - libbeat/common 48 | - libbeat/logp 49 | - libbeat/publisher 50 | - libbeat/filter 51 | - libbeat/filter/rules 52 | - libbeat/paths 53 | - libbeat/service 54 | - libbeat/common/op 55 | - libbeat/outputs 56 | - libbeat/outputs/console 57 | - libbeat/outputs/elasticsearch 58 | - libbeat/outputs/fileout 59 | - libbeat/outputs/kafka 60 | - libbeat/outputs/logstash 61 | - libbeat/outputs/redis 62 | - libbeat/common/streambuf 63 | - libbeat/outputs/mode 64 | - libbeat/outputs/mode/modeutil 65 | - libbeat/outputs/transport 66 | - libbeat/outputs/mode/lb 67 | - libbeat/outputs/mode/single 68 | - name: github.com/garyburd/redigo 69 | version: 8873b2f1995f59d4bcdd2b0dc9858e2cb9bf0c13 70 | subpackages: 71 | - redis 72 | - internal 73 | - name: github.com/go-ini/ini 74 | version: 12f418cc7edc5a618a51407b7ac1f1f512139df3 75 | - name: github.com/golang/snappy 76 | version: 894fd4616c897c201d223c3c0c128e8c648c96a2 77 | - name: github.com/jmespath/go-jmespath 78 | version: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 79 | - name: github.com/klauspost/crc32 80 | version: 6973dcf6594efa905c08260fe9120cae92ab4305 81 | - name: github.com/nranchev/go-libGeoIP 82 | version: c78e8bd2dd3599feb21fd30886043979e82fe948 83 | - name: github.com/satori/go.uuid 84 | version: f9ab0dce87d815821e221626b772e3475a0d2749 85 | - name: github.com/Shopify/sarama 86 | version: 4ba9bba6adb6697bcec3841e1ecdfecf5227c3b9 87 | - name: github.com/urso/ucfg 88 | version: 55e5fb343ba418d5ae520df8d603f292af0fd500 89 | subpackages: 90 | - yaml 91 | - name: golang.org/x/net 92 | version: d58ca6618b994150e624f6888d871f4709db51a0 93 | subpackages: 94 | - proxy 95 | - publicsuffix 96 | - name: golang.org/x/sys 97 | version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 98 | subpackages: 99 | - windows/svc 100 | - windows/svc/debug 101 | - windows 102 | - name: gopkg.in/yaml.v2 103 | version: a83829b6f1293c91addabc89d0571c246397bbf4 104 | devImports: [] 105 | -------------------------------------------------------------------------------- /conf/cloudtrail_cf.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Parameters": { 4 | "S3BucketName": { 5 | "Description": "Name of S3 bucket to create to store CloudTrail log events", 6 | "Type": "String" 7 | }, 8 | "SNSTopicName": { 9 | "Description": "Name of SNS topic name to be created", 10 | "Type": "String" 11 | }, 12 | "SNSDisplayName": { 13 | "Description": "Display name to be used for SNS topic", 14 | "Type": "String" 15 | }, 16 | "SQSQueueName": { 17 | "Description": "Name of SQS Queue to be created", 18 | "Type": "String" 19 | } 20 | }, 21 | "Resources": { 22 | "S3Bucket": { 23 | "DeletionPolicy": "Retain", 24 | "Type": "AWS::S3::Bucket", 25 | "Properties": { 26 | "BucketName": { 27 | "Ref": "S3BucketName" 28 | } 29 | } 30 | }, 31 | "BucketPolicy": { 32 | "Type": "AWS::S3::BucketPolicy", 33 | "Properties": { 34 | "Bucket": { 35 | "Ref": "S3Bucket" 36 | }, 37 | "PolicyDocument": { 38 | "Version": "2012-10-17", 39 | "Statement": [{ 40 | "Sid": "AWSCloudTrailAclCheck", 41 | "Effect": "Allow", 42 | "Principal": { 43 | "Service": "cloudtrail.amazonaws.com" 44 | }, 45 | "Action": "s3:GetBucketAcl", 46 | "Resource": { 47 | "Fn::Join": ["", ["arn:aws:s3:::", { 48 | "Ref": "S3Bucket" 49 | }]] 50 | } 51 | }, { 52 | "Sid": "AWSCloudTrailWrite", 53 | "Effect": "Allow", 54 | "Principal": { 55 | "Service": "cloudtrail.amazonaws.com" 56 | }, 57 | "Action": "s3:PutObject", 58 | "Resource": { 59 | "Fn::Join": ["", ["arn:aws:s3:::", { 60 | "Ref": "S3Bucket" 61 | }, "/AWSLogs/", { 62 | "Ref": "AWS::AccountId" 63 | }, "/*"]] 64 | }, 65 | "Condition": { 66 | "StringEquals": { 67 | "s3:x-amz-acl": "bucket-owner-full-control" 68 | } 69 | } 70 | }] 71 | } 72 | } 73 | }, 74 | "Topic": { 75 | "Type": "AWS::SNS::Topic", 76 | "Properties": { 77 | "DisplayName": { "Ref": "SNSDisplayName" }, 78 | "Subscription": [{ 79 | "Endpoint": { 80 | "Fn::GetAtt": ["Queue", "Arn"] 81 | }, 82 | "Protocol": "sqs" 83 | }] 84 | } 85 | }, 86 | "TopicPolicy": { 87 | "Type": "AWS::SNS::TopicPolicy", 88 | "Properties": { 89 | "Topics": [{ 90 | "Ref": "Topic" 91 | }], 92 | "PolicyDocument": { 93 | "Version": "2008-10-17", 94 | "Statement": [{ 95 | "Sid": "AWSCloudTrailSNSPolicy", 96 | "Effect": "Allow", 97 | "Principal": { 98 | "Service": "cloudtrail.amazonaws.com" 99 | }, 100 | "Resource": "*", 101 | "Action": "SNS:Publish" 102 | }] 103 | } 104 | } 105 | }, 106 | "Queue": { 107 | "Type": "AWS::SQS::Queue", 108 | "Properties": { 109 | "QueueName": { 110 | "Ref": "SQSQueueName" 111 | } 112 | } 113 | }, 114 | "QueuePolicy": { 115 | "Type": "AWS::SQS::QueuePolicy", 116 | "Properties": { 117 | "PolicyDocument": { 118 | "Version": "2012-10-17", 119 | "Id": "QueuePolicy", 120 | "Statement": [{ 121 | "Sid": "Allow-SendMessage-To-Both-Queues-From-SNS-Topic", 122 | "Effect": "Allow", 123 | "Principal": "*", 124 | "Action": ["sqs:SendMessage"], 125 | "Resource": "*", 126 | "Condition": { 127 | "ArnEquals": { 128 | "aws:SourceArn": { 129 | "Ref": "Topic" 130 | } 131 | } 132 | } 133 | }] 134 | }, 135 | "Queues": [{ 136 | "Ref": "Queue" 137 | }] 138 | } 139 | }, 140 | "Trail": { 141 | "DependsOn": ["BucketPolicy", "TopicPolicy", "QueuePolicy"], 142 | "Type": "AWS::CloudTrail::Trail", 143 | "Properties": { 144 | "S3BucketName": { 145 | "Ref": "S3Bucket" 146 | }, 147 | "SnsTopicName": { 148 | "Fn::GetAtt": ["Topic", "TopicName"] 149 | }, 150 | "IsLogging": true, 151 | "IncludeGlobalServiceEvents": true, 152 | "IsMultiRegionTrail": true 153 | } 154 | } 155 | }, 156 | "Outputs": { 157 | "S3BucketURI" : { 158 | "Description": "URI of S3 Bucket storing CloudTrail logs", 159 | "Value": { "Ref" : "S3Bucket" } 160 | }, 161 | "SQSEndpoint" : { 162 | "Description": "URI of SQS endpoint", 163 | "Value": { "Ref" : "Queue" } 164 | } 165 | } 166 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CloudTrailBeat 2 | Current status: **beta release** 3 | 4 | ## Overview 5 | This is a beat for the [Amazon Web Services (AWS) CloudTrail](https://aws.amazon.com/cloudtrail/) service. CloudTrailBeat relies on a combination of SNS, SQS and S3 to create a processing 'pipeline' to process new log events quickly and efficiently. The beat polls the SQS queue for notification of when a new CloudTrail log file is available for download in S3. Each log file is then downloaded, processed and sent to the configured receiver (logstash, elasticsearch, etc). You are then able to query the data using Kibana (or any other tool) to analyse events involving API calls and IAM authentications. 6 | 7 | ## Getting Started 8 | ### Requirements 9 | * [Golang](https://golang.org/dl/) 1.6 10 | * [Glide](https://github.com/Masterminds/glide) >= 0.10.0 11 | 12 | ### Building 13 | These steps assume you already have a working [Go environment](https://golang.org/doc/install). 14 | 15 | ```bash 16 | git clone https://github.com/aidan-/cloudtrailbeat.git 17 | cd cloudtrailbeat 18 | glide install 19 | make 20 | ``` 21 | 22 | ### AWS Configuration 23 | #### Pipeline configuration 24 | Confguring CloudTrail is relatively straight forward and can be done quite easily through the AWS web console. The [official documentation](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-create-and-update-a-trail.html) outlines the steps required to configure everything, just ensure you complete the optional step 3. 25 | 26 | If you would prefer to use CloudFormation to configure your environment, you can use the [provided template](conf/cloudtrail_cf.template) which will configure all of the neccessary services (CloudTrail, S3, SQS). 27 | 28 | Once configured, you can confirm everything is working by inspecting the configured S3 bucket as well as the SQS queue. 29 | 30 | #### Access control configuration 31 | CloudTrailBeat supports usage of both IAM roles and API keys, but as per AWS best practices, if CloudTrailBeat is being run from an EC2 you should be using IAM roles. The following IAM Policy provides the minimal access required to process new CloudTrail events and initiate backfilling. Make sure you replace the S3 and SQS ARN's with the values appropriate to your configuration. 32 | 33 | ```JSON 34 | { 35 | "Version": "2012-10-17", 36 | "Statement": [ 37 | { 38 | "Sid": "AllowS3BucketAccess", 39 | "Effect": "Allow", 40 | "Action": [ 41 | "s3:ListBucket" 42 | ], 43 | "Resource": [ 44 | "arn:aws:s3:::" 45 | ] 46 | }, 47 | { 48 | "Sid": "AllowObjectRetrieval", 49 | "Effect": "Allow", 50 | "Action": [ 51 | "s3:GetObject" 52 | ], 53 | "Resource": [ 54 | "arn:aws:s3:::/*" 55 | ] 56 | }, 57 | { 58 | "Sid": "AllowSQS", 59 | "Effect": "Allow", 60 | "Action": [ 61 | "sqs:DeleteMessage", 62 | "sqs:ReceiveMessage", 63 | "sqs:SendMessage" 64 | ], 65 | "Resource": [ 66 | "arn:aws:sqs:" 67 | ] 68 | } 69 | ] 70 | } 71 | ``` 72 | 73 | ### Running CloudTrailBeat 74 | 1. Build CloudTrailBeat using the steps list above 75 | 2. Modify the included *cloudtrailbeat.yml* file as required 76 | 1. Change the *sqs_url* field under the *input* section with the appropriate SQS url 77 | 2. Configure the *output* section to send the events to your logstash/elasticsearch instance. More information on Beat output configuration can be found in the [official documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration-details.html). 78 | 3. If you are not using IAM Roles to grant access to the SQS and S3 buckets, you will also need to configure *~/.aws/credentials* with the an appropriate key and secret. The [AWS docs](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) give a thorough explanation on setting up the required credentials files. 79 | 4. Run CloudTrailBeat in debug mode: `cloudtrailbeat -c /path/to/cloudtrailbeat.yml -d "*"` 80 | 81 | You should now see a bunch of events scrolling through your terminal and in your output source. 82 | 83 | If you are happy with the output, you will need to edit the configuration file to set `no_purge` to `false` (or delete the line). 84 | 85 | #### Backfilling 86 | If you would like to backfill events that have been cleared from the SQS or expired, you can run CloudTrailBeat with the `-b` flag the name of the bucket that contains the CloudTrail logs. Example: 87 | 88 | `cloudtrailbeat -c /path/to/cloudtrailbeat.yml -d "*" -b example-cloudtrail-bucket` 89 | 90 | If you would like to backfill only a subset of a bucket, you can also include the flag `-p` with the desired bucket prefix. Example: 91 | 92 | `cloudtrailbeat -c /path/to/cloudtrailbeat.yml -d "*" -b example-cloudtrail-bucket -f AWSLogs/xxxxx/CloudTrail/ap-northeast-1/2016/05` 93 | 94 | ## Thanks 95 | This beat is heavily inspired by [AppliedTrust/traildash](https://github.com/AppliedTrust/traildash) with some updates and additional functionality. 96 | 97 | ## Todo 98 | - Test cases 99 | - Example Kibana configurations and Elasticsearch templates 100 | -------------------------------------------------------------------------------- /etc/fields.yml: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | 3 | defaults: 4 | type: string 5 | required: false 6 | index: not_analyzed 7 | doc_values: true 8 | ignore_above: 1024 9 | 10 | env: 11 | type: group 12 | description: > 13 | Contains common fields available in all event types. 14 | fields: 15 | - name: beat.name 16 | description: > 17 | The name of the Beat sending the log messages. If the shipper name is set 18 | in the configuration file, then that value is used. If it is not set, 19 | the hostname is used. 20 | 21 | - name: beat.hostname 22 | description: > 23 | The hostname as returned by the operating system on which the Beat is 24 | running. 25 | 26 | - name: "@timestamp" 27 | type: date 28 | required: true 29 | format: YYYY-MM-DDTHH:MM:SS.milliZ 30 | example: 2015-01-24T14:06:05.071Z 31 | description: > 32 | The timestamp when the HTTP url was called. The precision is in 33 | milliseconds. The timezone is UTC. 34 | 35 | - name: type 36 | required: true 37 | description: > 38 | The name of the log event. This field is set to the value specified for the `document_type` option in the url section of the Execbeat config file. 39 | 40 | - name: fields 41 | type: dict 42 | required: false 43 | description: > 44 | Contains user configurable fields. 45 | 46 | cloudtrail: 47 | type: group 48 | description: > 49 | Contains information about a CloudTrail event 50 | fields: 51 | - name: cloudtrail 52 | type: group 53 | description: > 54 | Contains CloudTrail access logs. 55 | 56 | fields: 57 | - name: eventTime 58 | type: string 59 | description: > 60 | The date and time the request was made, in coordinated universal time (UTC). 61 | 62 | - name: eventVersion 63 | type: string 64 | description: > 65 | The version of the log event format. The current version is 1.02. 66 | 67 | - name: userIdentity 68 | type: string 69 | description: > 70 | Information about the user that made a request. For more information, see CloudTrail userIdentity Element 71 | 72 | - name: eventSource 73 | type: string 74 | description: > 75 | The service that the request was made to. This name is normally a short form of the service name without spaces plus .amazonaws.com. For example, the eventSource field lists a call to AWS CloudFormation as cloudformation.amazonaws.com, a call to Amazon EC2 as ec2.amazonaws.com, and a call to Amazon Simple Workflow Service as swf.amazonaws.com. One exception to this convention is CloudWatch, for which the event source is monitoring.amazonaws.com. 76 | 77 | - name: eventName 78 | type: string 79 | description: > 80 | The requested action, which is one of the actions listed in the API Reference for the service. 81 | 82 | - name: awsRegion 83 | type: string 84 | description: > 85 | The AWS region that the request was made to; for example, us-east-1. 86 | 87 | - name: sourceIPAddress 88 | type: string 89 | description: > 90 | The apparent IP address that the request was made from. For actions that originate from the service console, the address reported is for the underlying customer resource, not the console web server. For services in AWS, only the DNS name is displayed. 91 | 92 | - name: errorCode 93 | type: string 94 | description: > 95 | The AWS service error if the request returns an error. 96 | 97 | - name: errorMessage 98 | type: string 99 | description: > 100 | If the request returns an error, the description of the error. This message includes messages for authorization failures. 101 | 102 | - name: requestParameters 103 | type: string 104 | description: > 105 | The parameters, if any, that were sent with the request. 106 | 107 | - name: responseElements 108 | type: string 109 | description: > 110 | The response element for actions that make changes (create, update, or delete actions). If an action does not change state (for example, a request to get or list objects), this element is omitted. 111 | 112 | - name: requestID 113 | type: string 114 | description: > 115 | Value generated by the service being called that identifies the request. 116 | 117 | - name: eventID 118 | type: string 119 | description: > 120 | GUID generated by CloudTrail to uniquely identify each event. You can use this value to identify a single event. For example, you can use the ID as a primary key to retrieve log data from a searchable database. 121 | 122 | - name: eventType 123 | type: string 124 | description: > 125 | Identifies the type of event that generated the event record. 126 | 127 | - name: apiVersion 128 | type: string 129 | description: > 130 | Identifies the API version number associated with the AwsApiCall eventType value. 131 | 132 | - name: recipientAccountID 133 | type: string 134 | description: > 135 | Represents the account ID that received this event. 136 | 137 | -------------------------------------------------------------------------------- /cloudtrailbeat.yml: -------------------------------------------------------------------------------- 1 | ################### CloudTrailbeat Configuration Example ######################### 2 | 3 | ################################################################################## 4 | input: 5 | # Full URL of SQS which will be polled to identify new CloudTrail log events 6 | # default: no default 7 | sqs_url: "https://sqs.us-east-1.amazonaws.com/xxxxxxxxxxxxxx/cloudtrail-events" 8 | 9 | # AWS region 10 | # default: us-east-1 11 | aws_region: "us-east-1" 12 | 13 | # number of messages to fetch from SQS per retrieval 14 | # default: 1 15 | num_queue_fetch: 1 16 | 17 | # number of seconds to sleep after queue has been emptied 18 | # default: 300 19 | sleep_time: 300 20 | 21 | # don't purge messages from SQS after they have been successfully processed. 22 | # this is useful for debug purposes. 23 | # default: false 24 | no_purge: true 25 | 26 | ############################# Output ########################################## 27 | 28 | # Configure what outputs to use when sending the data collected by the beat. 29 | # Multiple outputs may be used. 30 | output: 31 | 32 | ### Elasticsearch as output 33 | elasticsearch: 34 | # Array of hosts to connect to. 35 | # Scheme and port can be left out and will be set to the default (http and 9200) 36 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path 37 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 38 | hosts: ["localhost:9200"] 39 | 40 | # Optional protocol and basic auth credentials. 41 | #protocol: "https" 42 | #username: "admin" 43 | #password: "s3cr3t" 44 | 45 | # Dictionary of HTTP parameters to pass within the url with index operations. 46 | #parameters: 47 | #param1: value1 48 | #param2: value2 49 | 50 | # Number of workers per Elasticsearch host. 51 | #worker: 1 52 | 53 | # Optional index name. The default is "beatname" and generates 54 | # [beatname-]YYYY.MM.DD keys. 55 | #index: "beatname" 56 | 57 | # A template is used to set the mapping in Elasticsearch 58 | # By default template loading is disabled and no template is loaded. 59 | # These settings can be adjusted to load your own template or overwrite existing ones 60 | #template: 61 | 62 | # Template name. By default the template name is beatname. 63 | #name: "beatname" 64 | 65 | # Path to template file 66 | #path: "beatname.template.json" 67 | 68 | # Overwrite existing template 69 | #overwrite: false 70 | 71 | # Optional HTTP Path 72 | #path: "/elasticsearch" 73 | 74 | # Proxy server url 75 | #proxy_url: http://proxy:3128 76 | 77 | # The number of times a particular Elasticsearch index operation is attempted. If 78 | # the indexing operation doesn't succeed after this many retries, the events are 79 | # dropped. The default is 3. 80 | #max_retries: 3 81 | 82 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 83 | # The default is 50. 84 | #bulk_max_size: 50 85 | 86 | # Configure http request timeout before failing an request to Elasticsearch. 87 | #timeout: 90 88 | 89 | # The number of seconds to wait for new events between two bulk API index requests. 90 | # If `bulk_max_size` is reached before this interval expires, addition bulk index 91 | # requests are made. 92 | #flush_interval: 1 93 | 94 | # Boolean that sets if the topology is kept in Elasticsearch. The default is 95 | # false. This option makes sense only for Packetbeat. 96 | #save_topology: false 97 | 98 | # The time to live in seconds for the topology information that is stored in 99 | # Elasticsearch. The default is 15 seconds. 100 | #topology_expire: 15 101 | 102 | # tls configuration. By default is off. 103 | #tls: 104 | # List of root certificates for HTTPS server verifications 105 | #certificate_authorities: ["/etc/pki/root/ca.pem"] 106 | 107 | # Certificate for TLS client authentication 108 | #certificate: "/etc/pki/client/cert.pem" 109 | 110 | # Client Certificate Key 111 | #certificate_key: "/etc/pki/client/cert.key" 112 | 113 | # Controls whether the client verifies server certificates and host name. 114 | # If insecure is set to true, all server host names and certificates will be 115 | # accepted. In this mode TLS based connections are susceptible to 116 | # man-in-the-middle attacks. Use only for testing. 117 | #insecure: true 118 | 119 | # Configure cipher suites to be used for TLS connections 120 | #cipher_suites: [] 121 | 122 | # Configure curve types for ECDHE based cipher suites 123 | #curve_types: [] 124 | 125 | # Configure minimum TLS version allowed for connection to logstash 126 | #min_version: 1.0 127 | 128 | # Configure maximum TLS version allowed for connection to logstash 129 | #max_version: 1.2 130 | 131 | 132 | ### Logstash as output 133 | #logstash: 134 | # The Logstash hosts 135 | #hosts: ["localhost:5044"] 136 | 137 | # Number of workers per Logstash host. 138 | #worker: 1 139 | 140 | # Set gzip compression level. 141 | #compression_level: 3 142 | 143 | # Optional load balance the events between the Logstash hosts 144 | #loadbalance: true 145 | 146 | # Optional index name. The default index name depends on the each beat. 147 | # For Packetbeat, the default is set to packetbeat, for Topbeat 148 | # top topbeat and for Filebeat to filebeat. 149 | #index: beatname 150 | 151 | # Optional TLS. By default is off. 152 | #tls: 153 | # List of root certificates for HTTPS server verifications 154 | #certificate_authorities: ["/etc/pki/root/ca.pem"] 155 | 156 | # Certificate for TLS client authentication 157 | #certificate: "/etc/pki/client/cert.pem" 158 | 159 | # Client Certificate Key 160 | #certificate_key: "/etc/pki/client/cert.key" 161 | 162 | # Controls whether the client verifies server certificates and host name. 163 | # If insecure is set to true, all server host names and certificates will be 164 | # accepted. In this mode TLS based connections are susceptible to 165 | # man-in-the-middle attacks. Use only for testing. 166 | #insecure: true 167 | 168 | # Configure cipher suites to be used for TLS connections 169 | #cipher_suites: [] 170 | 171 | # Configure curve types for ECDHE based cipher suites 172 | #curve_types: [] 173 | 174 | 175 | ### File as output 176 | #file: 177 | # Path to the directory where to save the generated files. The option is mandatory. 178 | #path: "/tmp/beatname" 179 | 180 | # Name of the generated files. The default is `beatname` and it generates files: `beatname`, `beatname.1`, `beatname.2`, etc. 181 | #filename: beatname 182 | 183 | # Maximum size in kilobytes of each file. When this size is reached, the files are 184 | # rotated. The default value is 10 MB. 185 | #rotate_every_kb: 10000 186 | 187 | # Maximum number of files under path. When this number of files is reached, the 188 | # oldest file is deleted and the rest are shifted from last to first. The default 189 | # is 7 files. 190 | #number_of_files: 7 191 | 192 | 193 | ### Console output 194 | # console: 195 | # Pretty print json event 196 | #pretty: false 197 | 198 | 199 | ############################# Shipper ######################################### 200 | 201 | shipper: 202 | # The name of the shipper that publishes the network data. It can be used to group 203 | # all the transactions sent by a single shipper in the web interface. 204 | # If this options is not defined, the hostname is used. 205 | #name: 206 | 207 | # The tags of the shipper are included in their own field with each 208 | # transaction published. Tags make it easy to group servers by different 209 | # logical properties. 210 | #tags: ["service-X", "web-tier"] 211 | 212 | # Optional fields that you can specify to add additional information to the 213 | # output. Fields can be scalar values, arrays, dictionaries, or any nested 214 | # combination of these. 215 | #fields: 216 | # env: staging 217 | 218 | # If this option is set to true, the custom fields are stored as top-level 219 | # fields in the output document instead of being grouped under a fields 220 | # sub-dictionary. Default is false. 221 | #fields_under_root: false 222 | 223 | # Uncomment the following if you want to ignore transactions created 224 | # by the server on which the shipper is installed. This option is useful 225 | # to remove duplicates if shippers are installed on multiple servers. 226 | #ignore_outgoing: true 227 | 228 | # How often (in seconds) shippers are publishing their IPs to the topology map. 229 | # The default is 10 seconds. 230 | #refresh_topology_freq: 10 231 | 232 | # Expiration time (in seconds) of the IPs published by a shipper to the topology map. 233 | # All the IPs will be deleted afterwards. Note, that the value must be higher than 234 | # refresh_topology_freq. The default is 15 seconds. 235 | #topology_expire: 15 236 | 237 | # Internal queue size for single events in processing pipeline 238 | #queue_size: 1000 239 | 240 | # Sets the maximum number of CPUs that can be executing simultaneously. The 241 | # default is the number of logical CPUs available in the system. 242 | #max_procs: 243 | 244 | # Configure local GeoIP database support. 245 | # If no paths are not configured geoip is disabled. 246 | #geoip: 247 | #paths: 248 | # - "/usr/share/GeoIP/GeoLiteCity.dat" 249 | # - "/usr/local/var/GeoIP/GeoLiteCity.dat" 250 | 251 | 252 | ############################# Logging ######################################### 253 | 254 | # There are three options for the log ouput: syslog, file, stderr. 255 | # Under Windows systems, the log files are per default sent to the file output, 256 | # under all other system per default to syslog. 257 | logging: 258 | 259 | # Send all logging output to syslog. On Windows default is false, otherwise 260 | # default is true. 261 | #to_syslog: true 262 | 263 | # Write all logging output to files. Beats automatically rotate files if rotateeverybytes 264 | # limit is reached. 265 | #to_files: false 266 | 267 | # To enable logging to files, to_files option has to be set to true 268 | files: 269 | # The directory where the log files will written to. 270 | #path: /var/log/mybeat 271 | 272 | # The name of the files where the logs are written to. 273 | #name: mybeat 274 | 275 | # Configure log file size limit. If limit is reached, log file will be 276 | # automatically rotated 277 | rotateeverybytes: 10485760 # = 10MB 278 | 279 | # Number of rotated log files to keep. Oldest files will be deleted first. 280 | #keepfiles: 7 281 | 282 | # Enable debug output for selected components. To enable all selectors use ["*"] 283 | # Other available selectors are beat, publish, service 284 | # Multiple selectors can be chained. 285 | #selectors: [ ] 286 | 287 | # Sets log level. The default log level is error. 288 | # Available log levels are: critical, error, warning, info, debug 289 | #level: error 290 | 291 | -------------------------------------------------------------------------------- /beater/ctbeat.go: -------------------------------------------------------------------------------- 1 | package beater 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "io/ioutil" 9 | "strings" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go/aws" 13 | "github.com/aws/aws-sdk-go/aws/credentials" 14 | "github.com/aws/aws-sdk-go/aws/session" 15 | "github.com/aws/aws-sdk-go/service/s3" 16 | "github.com/aws/aws-sdk-go/service/sqs" 17 | 18 | "github.com/elastic/beats/libbeat/beat" 19 | "github.com/elastic/beats/libbeat/cfgfile" 20 | "github.com/elastic/beats/libbeat/common" 21 | "github.com/elastic/beats/libbeat/logp" 22 | "github.com/elastic/beats/libbeat/publisher" 23 | ) 24 | 25 | const logTimeFormat = "2006-01-02T15:04:05Z" 26 | 27 | // CloudTrailbeat contains configuration options specific to the current 28 | // running instance as defined in cmd line arguments and the configuration 29 | // file. 30 | type CloudTrailbeat struct { 31 | sqsURL string 32 | awsConfig *aws.Config 33 | numQueueFetch int 34 | sleepTime time.Duration 35 | noPurge bool 36 | 37 | backfillBucket string 38 | backfillPrefix string 39 | 40 | CTbConfig ConfigSettings 41 | CmdLineArgs CmdLineArgs 42 | events publisher.Client 43 | done chan struct{} 44 | } 45 | 46 | // CmdLineArgs is used by the flag package to parse custom flags specific 47 | // to CloudTrailbeat 48 | type CmdLineArgs struct { 49 | backfillBucket *string 50 | backfillPrefix *string 51 | } 52 | 53 | var cmdLineArgs CmdLineArgs 54 | 55 | // SQS message extracted from raw sqs event Body 56 | type sqsMessage struct { 57 | Type string 58 | MessageID string 59 | TopicArn string 60 | Message string 61 | Timestamp string 62 | SignatureVersion string 63 | Signature string 64 | SigningCertURL string 65 | UnsubscribeURL string 66 | } 67 | 68 | // CloudTrail specific information extracted from sqsMessage and sqsMessage.Message 69 | type ctMessage struct { 70 | S3Bucket string `json:"s3Bucket"` 71 | S3ObjectKey []string `json:"s3ObjectKey"` 72 | MessageID string `json:",omitempty"` 73 | ReceiptHandle string `json:",omitempty"` 74 | } 75 | 76 | // data struct matching the defined fields of a CloudTrail Record as 77 | // described in: 78 | // http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html 79 | type cloudtrailLog struct { 80 | Records []cloudtrailEvent 81 | } 82 | type cloudtrailEvent struct { 83 | EventTime string `json:"eventTime"` 84 | EventVersion string `json:"eventVersion"` 85 | EventSource string `json:"eventSource"` 86 | UserIdentity map[string]interface{} `json:"userIdentity"` 87 | EventName string `json:"eventName"` 88 | AwsRegion string `json:"awsRegion"` 89 | SourceIPAddress string `json:"sourceIPAddress"` 90 | UserAgent string `json:"userAgent"` 91 | ErrorCode string `json:"errorCode"` 92 | ErrorMessage string `json:"errorMessage,omitempty"` 93 | RequestParameters map[string]interface{} `json:"requestParameters"` 94 | RequestID string `json:"requestID"` 95 | EventID string `json:"eventID"` 96 | EventType string `json:"eventType"` 97 | APIVersion string `json:"apiVersion"` 98 | RecipientAccountID string `json:"recipientAccountID"` 99 | //ResponseElements map[string]interface{} `json:"responseElements"` 100 | } 101 | 102 | func init() { 103 | cmdLineArgs = CmdLineArgs{ 104 | backfillBucket: flag.String("b", "", "Name of S3 bucket used for backfilling"), 105 | backfillPrefix: flag.String("p", "", "Prefix to be used when listing objects from S3 bucket"), 106 | } 107 | } 108 | 109 | func New() *CloudTrailbeat { 110 | cb := &CloudTrailbeat{} 111 | cb.CmdLineArgs = cmdLineArgs 112 | 113 | return cb 114 | } 115 | 116 | func (cb *CloudTrailbeat) Config(b *beat.Beat) error { 117 | if err := cfgfile.Read(&cb.CTbConfig, ""); err != nil { 118 | logp.Err("Error reading configuration file: %v", err) 119 | return err 120 | } 121 | 122 | //Validate and instantiate configuration file variables 123 | if cb.CTbConfig.Input.SQSUrl != nil { 124 | cb.sqsURL = *cb.CTbConfig.Input.SQSUrl 125 | } else { 126 | return errors.New("Invalid SQS URL in configuration file") 127 | } 128 | 129 | if cb.CTbConfig.Input.NumQueueFetch != nil { 130 | cb.numQueueFetch = *cb.CTbConfig.Input.NumQueueFetch 131 | } else { 132 | cb.numQueueFetch = 1 133 | } 134 | 135 | if cb.CTbConfig.Input.SleepTime != nil { 136 | cb.sleepTime = time.Duration(*cb.CTbConfig.Input.SleepTime) * time.Second 137 | } else { 138 | cb.sleepTime = time.Minute * 5 139 | } 140 | 141 | if cb.CTbConfig.Input.NoPurge != nil { 142 | cb.noPurge = *cb.CTbConfig.Input.NoPurge 143 | } else { 144 | cb.noPurge = false 145 | } 146 | 147 | // use AWS credentials from configuration file if provided, fall back to ENV and ~/.aws/credentials 148 | if cb.CTbConfig.Input.AWSCredentialProvider != nil { 149 | cb.awsConfig = &aws.Config{ 150 | Credentials: credentials.NewSharedCredentials("", "cb.CTbConfig.Input.AWSCredentialProvider"), 151 | } 152 | } else { 153 | cb.awsConfig = aws.NewConfig() 154 | } 155 | 156 | if cb.CTbConfig.Input.AWSRegion != nil { 157 | cb.awsConfig = cb.awsConfig.WithRegion(*cb.CTbConfig.Input.AWSRegion) 158 | } 159 | 160 | // parse cmd line flags to determine if backfill or queue mode is being used 161 | if cb.CmdLineArgs.backfillBucket != nil { 162 | cb.backfillBucket = *cb.CmdLineArgs.backfillBucket 163 | 164 | if cb.CmdLineArgs.backfillPrefix != nil { 165 | cb.backfillPrefix = *cb.CmdLineArgs.backfillPrefix 166 | } 167 | } 168 | 169 | logp.Debug("cloudtrailbeat", "Init cloudtrailbeat") 170 | logp.Debug("cloudtrailbeat", "SQS Url: %s", cb.sqsURL) 171 | logp.Debug("cloudtrailbeat", "Number of items to fetch from queue: %d", cb.numQueueFetch) 172 | logp.Debug("cloudtrailbeat", "Time to sleep when queue is empty: %.0f", cb.sleepTime.Seconds()) 173 | logp.Debug("cloudtrailbeat", "Events will be deleted from SQS when processed: %t", cb.noPurge) 174 | logp.Debug("cloudtrailbeat", "Backfill bucket: %s", cb.backfillBucket) 175 | logp.Debug("cloudtrailbeat", "Backfill prefix: %s", cb.backfillPrefix) 176 | 177 | return nil 178 | } 179 | 180 | func (cb *CloudTrailbeat) Setup(b *beat.Beat) error { 181 | cb.events = b.Events 182 | cb.done = make(chan struct{}) 183 | return nil 184 | } 185 | 186 | func (cb *CloudTrailbeat) Run(b *beat.Beat) error { 187 | if cb.backfillBucket != "" { 188 | logp.Info("Running in backfill mode") 189 | if err := cb.runBackfill(); err != nil { 190 | return fmt.Errorf("Error backfilling logs: %s", err) 191 | } 192 | } else { 193 | logp.Info("Running in queue mode") 194 | if err := cb.runQueue(); err != nil { 195 | return fmt.Errorf("Error processing queue: %s", err) 196 | } 197 | } 198 | return nil 199 | } 200 | 201 | func (cb *CloudTrailbeat) runQueue() error { 202 | for { 203 | select { 204 | case <-cb.done: 205 | return nil 206 | default: 207 | } 208 | 209 | messages, err := cb.fetchMessages() 210 | if err != nil { 211 | logp.Err("Error fetching messages from SQS: %v", err) 212 | break 213 | } 214 | 215 | if len(messages) == 0 { 216 | logp.Info("No new events to process, sleeping for %.0f seconds", cb.sleepTime.Seconds()) 217 | time.Sleep(cb.sleepTime) 218 | continue 219 | } 220 | 221 | logp.Info("Fetched %d new CloudTrail events from SQS.", len(messages)) 222 | // fetch and process each log file 223 | for _, m := range messages { 224 | logp.Info("Downloading and processing log file: s3://%s/%s", m.S3Bucket, m.S3ObjectKey) 225 | lf, err := cb.readLogfile(m) 226 | if err != nil { 227 | logp.Err("Error reading log file [id: %s]: %s", m.MessageID, err) 228 | continue 229 | } 230 | 231 | if err := cb.publishEvents(lf); err != nil { 232 | logp.Err("Error publishing CloudTrail events [id: %s]: %s", m.MessageID, err) 233 | continue 234 | } 235 | if !cb.noPurge { 236 | if err := cb.deleteMessage(m); err != nil { 237 | logp.Err("Error deleting proccessed SQS event [id: %s]: %s", m.MessageID, err) 238 | } 239 | } 240 | } 241 | } 242 | 243 | return nil 244 | } 245 | 246 | func (cb *CloudTrailbeat) runBackfill() error { 247 | logp.Info("Backfilling using S3 bucket: s3://%s/%s", cb.backfillBucket, cb.backfillPrefix) 248 | 249 | s := s3.New(session.New(cb.awsConfig)) 250 | q := s3.ListObjectsInput{ 251 | Bucket: aws.String(cb.backfillBucket), 252 | Prefix: aws.String(cb.backfillPrefix), 253 | } 254 | 255 | if list, err := s.ListObjects(&q); err == nil { 256 | for _, e := range list.Contents { 257 | if strings.HasSuffix(*e.Key, ".json.gz") { 258 | logp.Info("Found log file to add to queue: %s", *e.Key) 259 | if err := cb.pushQueue(cb.backfillBucket, *e.Key); err != nil { 260 | logp.Err("Failed to push log file onto queue: %s", err) 261 | return fmt.Errorf("Queue push failed: %s", err) 262 | } 263 | } 264 | } 265 | } else { 266 | logp.Err("Unable to list objects in bucket: %s", err) 267 | return fmt.Errorf("Failed to list bucket objects: %s", err) 268 | } 269 | return nil 270 | } 271 | 272 | func (cb *CloudTrailbeat) pushQueue(bucket, key string) error { 273 | body := ctMessage{ 274 | S3Bucket: bucket, 275 | S3ObjectKey: []string{key}, 276 | } 277 | b, err := json.Marshal(body) 278 | if err != nil { 279 | return err 280 | } 281 | 282 | msg := sqsMessage{Message: string(b)} 283 | m, err := json.Marshal(msg) 284 | if err != nil { 285 | return err 286 | } 287 | 288 | q := sqs.New(session.New(cb.awsConfig)) 289 | _, err = q.SendMessage(&sqs.SendMessageInput{ 290 | QueueUrl: aws.String(cb.sqsURL), 291 | MessageBody: aws.String(string(m)), 292 | }) 293 | if err != nil { 294 | return err 295 | } 296 | 297 | return nil 298 | } 299 | 300 | func (cb *CloudTrailbeat) Stop() { 301 | close(cb.done) 302 | } 303 | 304 | func (cb *CloudTrailbeat) Cleanup(b *beat.Beat) error { 305 | return nil 306 | } 307 | 308 | func (cb *CloudTrailbeat) publishEvents(ct cloudtrailLog) error { 309 | if len(ct.Records) < 1 { 310 | return nil 311 | } 312 | 313 | events := make([]common.MapStr, 0, len(ct.Records)) 314 | 315 | for _, cte := range ct.Records { 316 | timestamp, err := time.Parse(logTimeFormat, cte.EventTime) 317 | if err != nil { 318 | logp.Err("Unable to parse EventTime : %s", cte.EventTime) 319 | } 320 | 321 | be := common.MapStr{ 322 | "@timestamp": common.Time(timestamp), 323 | "type": "CloudTrail", 324 | "cloudtrail": cte, 325 | } 326 | 327 | events = append(events, be) 328 | } 329 | if !cb.events.PublishEvents(events, publisher.Sync, publisher.Guaranteed) { 330 | return fmt.Errorf("Error publishing events") 331 | } 332 | 333 | return nil 334 | } 335 | 336 | func (cb *CloudTrailbeat) readLogfile(m ctMessage) (cloudtrailLog, error) { 337 | events := cloudtrailLog{} 338 | 339 | s := s3.New(session.New(cb.awsConfig)) 340 | q := s3.GetObjectInput{ 341 | Bucket: aws.String(m.S3Bucket), 342 | Key: aws.String(m.S3ObjectKey[0]), 343 | } 344 | o, err := s.GetObject(&q) 345 | if err != nil { 346 | return events, err 347 | } 348 | b, err := ioutil.ReadAll(o.Body) 349 | if err != nil { 350 | return events, err 351 | } 352 | 353 | if err := json.Unmarshal(b, &events); err != nil { 354 | return events, fmt.Errorf("Error unmarshaling cloutrail JSON: %s", err.Error()) 355 | } 356 | 357 | return events, nil 358 | } 359 | 360 | func (cb *CloudTrailbeat) fetchMessages() ([]ctMessage, error) { 361 | var m []ctMessage 362 | 363 | q := sqs.New(session.New(cb.awsConfig)) 364 | params := &sqs.ReceiveMessageInput{ 365 | QueueUrl: aws.String(cb.sqsURL), 366 | MaxNumberOfMessages: aws.Int64(int64(cb.numQueueFetch)), 367 | } 368 | 369 | resp, err := q.ReceiveMessage(params) 370 | if err != nil { 371 | return m, fmt.Errorf("SQS ReceiveMessage error: %s", err.Error()) 372 | } 373 | 374 | //no new meesages in queue 375 | if len(resp.Messages) == 0 { 376 | return nil, nil 377 | } 378 | 379 | for _, e := range resp.Messages { 380 | tmsg := sqsMessage{} 381 | if err := json.Unmarshal([]byte(*e.Body), &tmsg); err != nil { 382 | return nil, fmt.Errorf("SQS message JSON parse error [id: %s]: %s", *e.MessageId, err.Error()) 383 | } 384 | 385 | event := ctMessage{} 386 | if err := json.Unmarshal([]byte(tmsg.Message), &event); err != nil { 387 | return nil, fmt.Errorf("SQS body JSON parse error [id: %s]: %s", *e.MessageId, err.Error()) 388 | } 389 | 390 | if tmsg.Message == "CloudTrail validation message." { 391 | if !cb.noPurge { 392 | if err := cb.deleteMessage(event); err != nil { 393 | return nil, fmt.Errorf("Error deleting 'validation message' [id: %s]: %s", tmsg.MessageID, err) 394 | } 395 | } 396 | continue 397 | } 398 | 399 | event.MessageID = tmsg.MessageID 400 | event.ReceiptHandle = *e.ReceiptHandle 401 | 402 | m = append(m, event) 403 | } 404 | 405 | return m, nil 406 | } 407 | 408 | func (cb *CloudTrailbeat) deleteMessage(m ctMessage) error { 409 | q := sqs.New(session.New(cb.awsConfig)) 410 | params := &sqs.DeleteMessageInput{ 411 | QueueUrl: aws.String(cb.sqsURL), 412 | ReceiptHandle: aws.String(m.ReceiptHandle), 413 | } 414 | 415 | _, err := q.DeleteMessage(params) 416 | if err != nil { 417 | return err 418 | } 419 | 420 | return nil 421 | } 422 | --------------------------------------------------------------------------------