├── cloudwatch_log_trigger ├── README_node.txt ├── kinesis_logs_forwarder.js ├── cloudwatch_log_sender.py ├── cloudwatch_log_sender_py27.py └── cloudwatch_log_sender.js ├── sar.yml ├── LICENSE ├── cloudwatch_log_sender.py └── cloudwatch_log_sender_no_gzip.py /cloudwatch_log_trigger/README_node.txt: -------------------------------------------------------------------------------- 1 | 1. Add all relevant code, globals and imports to your code. 2 | 2. Call forwardLogs with the event that triggered your Lambda. 3 | * Note - forwardLogs functions returns a Promise! 4 | 3. Set for your Lambda the following enviroment variables: 5 | - EPSAGON_REGION: us-east-1 6 | - EPSAGON_KINESIS_NAME: logs-sender-logs-stream-kinesis-us-east-1-production 7 | - EPSAGON_AWS_ACCESS_KEY_ID: xxx 8 | - EPSAGON_AWS_SECRET_ACCESS_KEY: xxx -------------------------------------------------------------------------------- /sar.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: 'AWS::Serverless-2016-10-31' 3 | Description: 'Epsagon Logs Sender' 4 | Parameters: 5 | EpsagonLogsKinesis: 6 | Type: String 7 | AWSSecretID: 8 | Type: String 9 | AWSSecretKey: 10 | Type: String 11 | Region: 12 | Type: String 13 | Stage: 14 | Type: String 15 | Resources: 16 | EpsagonLogsSender: 17 | Properties: 18 | CodeUri: s3://epsagon-logs-sender/cloudwatch_log_sender.zip 19 | Environment: 20 | Variables: 21 | EPSAGON_KINESIS: 22 | Ref: EpsagonLogsKinesis 23 | AWS_ID: 24 | Ref: AWSSecretID 25 | AWS_KEY: 26 | Ref: AWSSecretKey 27 | REGION: 28 | Ref: Region 29 | STAGE: 30 | Ref: Stage 31 | Handler: cloudwatch_log_sender.handler 32 | MemorySize: 128 33 | Runtime: python3.6 34 | Timeout: 60 35 | Type: AWS::Serverless::Function -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Epsagon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /cloudwatch_log_trigger/kinesis_logs_forwarder.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Forward logs to epsagon using AWS kinesis 3 | */ 4 | 5 | const util = require("util"); 6 | const zlib = require("zlib"); 7 | const AWS = require("aws-sdk"); 8 | 9 | const epsagonToken = process.env.EPSAGON_TOKEN; 10 | const userLogsKinesis = process.env.EPSAGON_LOGS_KINESIS; 11 | const gunzip = util.promisify(zlib.gunzip); 12 | const kinesisClient = new AWS.Kinesis({ 13 | region: process.env.EPSAGON_REGION, 14 | accessKeyId: process.env.EPSAGON_AWS_ACCESS_KEY_ID, 15 | secretAccessKey: process.env.EPSAGON_AWS_SECRET_ACCESS_KEY, 16 | }); 17 | 18 | function epsagon_debug(error) { 19 | if ((process.env.EPSAGON_DEBUG || "").toUpperCase() === "TRUE") { 20 | // eslint-disable-next-line no-console 21 | console.log(error); 22 | } 23 | } 24 | 25 | function createRecordData(logEvents, transformEvent) { 26 | transformEvent = transformEvent || ((event) => event); 27 | logEvents = logEvents.map(transformEvent); 28 | 29 | return JSON.stringify({ token: epsagonToken, events: logEvents }); 30 | } 31 | 32 | function createRecord(logsData) { 33 | return { 34 | Data: createRecordData(logsData.logEvents), 35 | PartitionKey: epsagonToken, 36 | }; 37 | } 38 | 39 | async function mapToEpsagonRecord(record) { 40 | const data = record.kinesis.data; 41 | const decoded = new Buffer.from(data, "base64"); 42 | const unzipped = await gunzip(decoded); 43 | const logsData = JSON.parse(unzipped.toString("utf-8")); 44 | 45 | if (logsData.messageType !== "DATA_MESSAGE") { 46 | epsagon_debug("invalid messageType"); 47 | return; 48 | } 49 | 50 | return createRecord(logsData); 51 | } 52 | 53 | async function forwardLambdaHandler(event, _) { 54 | const records = await Promise.all(event.Records.map(mapToEpsagonRecord)); 55 | const filtered = records.filter((record) => record); 56 | 57 | epsagon_debug(records); 58 | epsagon_debug(userLogsKinesis); 59 | 60 | if (filtered.length === 0) return; 61 | 62 | try { 63 | const data = await kinesisClient 64 | .putRecords({ 65 | Records: filtered, 66 | StreamName: userLogsKinesis, 67 | }) 68 | .promise(); 69 | 70 | epsagon_debug("Record sent"); 71 | epsagon_debug(data); 72 | } catch (e) { 73 | epsagon_debug(e); 74 | return; 75 | } 76 | } 77 | 78 | module.exports = { forwardLambdaHandler }; 79 | -------------------------------------------------------------------------------- /cloudwatch_log_trigger/cloudwatch_log_sender.py: -------------------------------------------------------------------------------- 1 | """Epsagon Logs Parser, parse CloudWatch Logs event""" 2 | 3 | import json 4 | import gzip 5 | import base64 6 | import os 7 | import re 8 | import traceback 9 | import boto3 10 | 11 | FILTER_PATTERNS = ( 12 | 'REPORT', 'Task timed out', 'Process exited before completing', 'Traceback', 13 | 'module initialization error:', 'Unable to import module', 'errorMessage', 14 | '.java:0', '.java:1', '.java:2', '.java:3', '.java:4', '.java:5', '.java:6', 15 | '.java:7', '.java:8', '.java:9' 16 | ) 17 | 18 | 19 | AWS_SECRET = os.environ.get('EPSAGON_AWS_SECRET_ACCESS_KEY').strip() 20 | AWS_KEY = os.environ.get('EPSAGON_AWS_ACCESS_KEY_ID').strip() 21 | REGION = os.environ.get('EPSAGON_REGION').strip() 22 | CURRENT_REGION = os.environ.get('AWS_REGION').strip() 23 | KINESIS_NAME = os.environ.get('EPSAGON_KINESIS_NAME').strip() 24 | REGEX = re.compile( 25 | '|'.join([f'.*{pattern}.*' for pattern in FILTER_PATTERNS]), 26 | re.DOTALL 27 | ) 28 | 29 | kinesis = boto3.client( 30 | 'kinesis', 31 | aws_access_key_id=AWS_KEY, 32 | aws_secret_access_key=AWS_SECRET, 33 | region_name=REGION, 34 | ) 35 | 36 | 37 | def epsagon_debug(message): 38 | if os.getenv('EPSAGON_DEBUG', '').upper() == 'TRUE': 39 | print(message) 40 | 41 | 42 | def filter_events(record_data): 43 | """ 44 | Filter events relevant for Epsagon. 45 | :param record_data: Record data that holds the vents. 46 | :return: dict / None. 47 | """ 48 | record = None 49 | if record_data['messageType'] == 'DATA_MESSAGE': 50 | original_events = record_data['logEvents'] 51 | partition_key = record_data['logStream'] 52 | record_data['subscriptionFilters'] = ( 53 | [f'Epsagon#{record_data["owner"]}#{CURRENT_REGION}'] 54 | ) 55 | events = [] 56 | epsagon_debug(f'Found total of {len(original_events)} events') 57 | epsagon_debug(f'Original events: {original_events}') 58 | for event in original_events: 59 | if REGEX.match(event['message']) is not None: 60 | events.append(event) 61 | epsagon_debug(f'Filtered total of {len(events)} events.') 62 | if events: 63 | record_data['logEvents'] = events 64 | record = { 65 | 'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 66 | 'PartitionKey': partition_key 67 | } 68 | return record 69 | 70 | 71 | def forward_logs_to_epsagon(event): 72 | """ 73 | Send filtered CloudWatch logs to Epsagon Kinesis. 74 | :param event: The triggered event from CloudWatch logs. 75 | """ 76 | try: 77 | record_data = json.loads( 78 | gzip.decompress( 79 | base64.b64decode(event['awslogs']['data']) 80 | ) 81 | ) 82 | filtered_event = filter_events(record_data) 83 | if not filtered_event: 84 | epsagon_debug('No logs match') 85 | return False 86 | 87 | original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') 88 | original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') 89 | os.environ['AWS_ACCESS_KEY_ID'] = AWS_KEY 90 | os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET 91 | try: 92 | kinesis.put_record( 93 | StreamName=KINESIS_NAME, 94 | Data=filtered_event['Data'], 95 | PartitionKey=filtered_event['PartitionKey'], 96 | ) 97 | finally: 98 | os.environ['AWS_ACCESS_KEY_ID'] = original_access_key 99 | os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key 100 | 101 | except Exception as err: 102 | epsagon_debug('Encountered error: {}'.format(err)) 103 | epsagon_debug(traceback.format_exc()) 104 | 105 | return True 106 | -------------------------------------------------------------------------------- /cloudwatch_log_sender.py: -------------------------------------------------------------------------------- 1 | """Epsagon Logs Parser""" 2 | 3 | import json 4 | import gzip 5 | import base64 6 | import os 7 | import re 8 | import traceback 9 | 10 | import boto3 11 | 12 | FILTER_PATTERNS = ( 13 | 'REPORT', 'Task timed out', 'Process exited before completing', 'Traceback', 14 | 'module initialization error:', 'Unable to import module', 'errorMessage', 15 | '.java:0', '.java:1', '.java:2', '.java:3', '.java:4', '.java:5', '.java:6', 16 | '.java:7', '.java:8', '.java:9' 17 | ) 18 | 19 | DEGUG_STRING = 'DEBUG' 20 | STAGE = os.environ.get('STAGE', '').strip() 21 | AWS_ID = os.environ.get('AWS_ID').strip() 22 | AWS_KEY = os.environ.get('AWS_KEY').strip() 23 | REGION = os.environ.get('EPSAGON_REGION').strip() 24 | CURRENT_REGION = os.environ.get('AWS_REGION').strip() 25 | KINESIS_NAME = os.environ.get('EPSAGON_KINESIS').strip() 26 | OVERRIDE_SUBSCRIPTIONS = os.environ.get('OVERRIDE_SUBSCRIPTIONS', '').strip() 27 | 28 | REGEX = re.compile( 29 | '|'.join([f'.*{pattern}.*' for pattern in FILTER_PATTERNS]), 30 | re.DOTALL 31 | ) 32 | 33 | kinesis = boto3.client( 34 | 'kinesis', 35 | aws_access_key_id=AWS_ID, 36 | aws_secret_access_key=AWS_KEY, 37 | region_name=REGION, 38 | ) 39 | 40 | 41 | def filter_events(record_data, partition_key): 42 | """ 43 | Filter events relevant for Epsagon. 44 | :param record_data: Record data that holds the vents. 45 | :param partition_key: The record's partition key. 46 | :return: dict / None. 47 | """ 48 | if record_data['messageType'] == 'DATA_MESSAGE': 49 | original_events = record_data['logEvents'] 50 | if OVERRIDE_SUBSCRIPTIONS.lower() == 'true': 51 | record_data['subscriptionFilters'] = ( 52 | [f'Epsagon#{record_data["owner"]}#{CURRENT_REGION}'] 53 | ) 54 | events = [] 55 | print_if_needed(f'Found total of {len(original_events)} events') 56 | print_if_needed(f'Original events: {original_events}') 57 | for event in original_events: 58 | if REGEX.match(event['message']) is not None: 59 | events.append(event) 60 | print_if_needed(f'Filtered total of {len(events)} events.') 61 | if events: 62 | record_data['logEvents'] = events 63 | return { 64 | 'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 65 | 'PartitionKey': partition_key 66 | } 67 | return 68 | 69 | 70 | def handler(event, _): 71 | """ 72 | Send filtered CloudWatch logs to Epsagon Kinesis. 73 | :param event: The triggered event from Kinesis. 74 | """ 75 | try: 76 | records_to_send = [] 77 | for record in event['Records']: 78 | partition_key = record['kinesis']['partitionKey'] 79 | compressed_record_data = record['kinesis']['data'] 80 | record_data = json.loads( 81 | gzip.decompress( 82 | base64.b64decode(compressed_record_data) 83 | ) 84 | ) 85 | filtered_events = filter_events(record_data, partition_key) 86 | if filtered_events: 87 | records_to_send.append(filtered_events) 88 | 89 | original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') 90 | original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') 91 | os.environ['AWS_ACCESS_KEY_ID'] = AWS_ID 92 | os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_KEY 93 | try: 94 | if records_to_send: 95 | print_if_needed( 96 | f'Sending {len(records_to_send)} events to Kinesis' 97 | ) 98 | kinesis.put_records(StreamName=KINESIS_NAME, 99 | Records=records_to_send) 100 | finally: 101 | os.environ['AWS_ACCESS_KEY_ID'] = original_access_key 102 | os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key 103 | 104 | except Exception as e: 105 | print(traceback.format_exc()) 106 | 107 | return True 108 | 109 | 110 | def print_if_needed(message): 111 | if STAGE.lower() == DEGUG_STRING.lower(): 112 | print(message) 113 | -------------------------------------------------------------------------------- /cloudwatch_log_sender_no_gzip.py: -------------------------------------------------------------------------------- 1 | """Epsagon Logs Parser""" 2 | 3 | import json 4 | import gzip 5 | import base64 6 | import os 7 | import re 8 | import traceback 9 | 10 | import boto3 11 | 12 | FILTER_PATTERNS = ( 13 | 'REPORT', 'Task timed out', 'Process exited before completing', 'Traceback', 14 | 'module initialization error:', 'Unable to import module', 'errorMessage', 15 | '.java:0', '.java:1', '.java:2', '.java:3', '.java:4', '.java:5', '.java:6', 16 | '.java:7', '.java:8', '.java:9' 17 | ) 18 | 19 | DEGUG_STRING = 'DEBUG' 20 | STAGE = os.environ.get('STAGE', '').strip() 21 | AWS_ID = os.environ.get('AWS_ID').strip() 22 | AWS_KEY = os.environ.get('AWS_KEY').strip() 23 | REGION = os.environ.get('EPSAGON_REGION').strip() 24 | CURRENT_REGION = os.environ.get('AWS_REGION').strip() 25 | KINESIS_NAME = os.environ.get('EPSAGON_KINESIS').strip() 26 | OVERRIDE_SUBSCRIPTIONS = os.environ.get('OVERRIDE_SUBSCRIPTIONS', '').strip() 27 | 28 | REGEX = re.compile( 29 | '|'.join([f'.*{pattern}.*' for pattern in FILTER_PATTERNS]), 30 | re.DOTALL 31 | ) 32 | 33 | kinesis = boto3.client( 34 | 'kinesis', 35 | aws_access_key_id=AWS_ID, 36 | aws_secret_access_key=AWS_KEY, 37 | region_name=REGION, 38 | ) 39 | 40 | 41 | def filter_events(record_data, partition_key): 42 | """ 43 | Filter events relevant for Epsagon. 44 | :param record_data: Record data that holds the vents. 45 | :param partition_key: The record's partition key. 46 | :return: dict / None. 47 | """ 48 | print_if_needed(f'record data: {record_data}') 49 | print_if_needed(f'partition key: {partition_key}') 50 | if record_data['messageType'] == 'DATA_MESSAGE': 51 | original_events = record_data['logEvents'] 52 | if OVERRIDE_SUBSCRIPTIONS.lower() == 'true': 53 | record_data['subscriptionFilters'] = ( 54 | [f'Epsagon#{record_data["owner"]}#{CURRENT_REGION}'] 55 | ) 56 | events = [] 57 | print_if_needed(f'Found total of {len(original_events)} events') 58 | print_if_needed(f'Original events: {original_events}') 59 | for event in original_events: 60 | if REGEX.match(event['message']) is not None: 61 | events.append(event) 62 | print_if_needed(f'Filtered total of {len(events)} events.') 63 | if events: 64 | record_data['logEvents'] = events 65 | return { 66 | 'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 67 | 'PartitionKey': partition_key 68 | } 69 | return 70 | 71 | 72 | def handler(event, _): 73 | """ 74 | Send filtered CloudWatch logs to Epsagon Kinesis. 75 | :param event: The triggered event from Kinesis. 76 | """ 77 | try: 78 | records_to_send = [] 79 | for record in event['Records']: 80 | partition_key = record['kinesis']['partitionKey'] 81 | compressed_record_data = record['kinesis']['data'] 82 | record_data = json.loads( 83 | base64.b64decode(compressed_record_data) 84 | ) 85 | filtered_events = filter_events(record_data, partition_key) 86 | if filtered_events: 87 | records_to_send.append(filtered_events) 88 | 89 | original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') 90 | original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') 91 | os.environ['AWS_ACCESS_KEY_ID'] = AWS_ID 92 | os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_KEY 93 | try: 94 | if records_to_send: 95 | print_if_needed( 96 | f'Sending {len(records_to_send)} events to Kinesis' 97 | ) 98 | kinesis.put_records(StreamName=KINESIS_NAME, 99 | Records=records_to_send) 100 | finally: 101 | os.environ['AWS_ACCESS_KEY_ID'] = original_access_key 102 | os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key 103 | 104 | except Exception as e: 105 | print(traceback.format_exc()) 106 | 107 | return True 108 | 109 | 110 | def print_if_needed(message): 111 | if STAGE.lower() == DEGUG_STRING.lower(): 112 | print(message) 113 | -------------------------------------------------------------------------------- /cloudwatch_log_trigger/cloudwatch_log_sender_py27.py: -------------------------------------------------------------------------------- 1 | """ 2 | Epsagon Logs Parser, parse CloudWatch Logs event 3 | 4 | 1. Add this code snippet, and call forward_logs_to_epsagon(event) with the original event. 5 | 2. Set for your Lambda the following enviroment variables: 6 | - EPSAGON_REGION: us-east-1 7 | - EPSAGON_KINESIS_NAME: logs-sender-logs-stream-kinesis-us-east-1-production 8 | - EPSAGON_AWS_ROLE: xxx 9 | 3. Add this permission to your Lambda's IAM role: 10 | { 11 | "Sid": "VisualEditorX", 12 | "Effect": "Allow", 13 | "Action": "sts:AssumeRole", 14 | "Resource": "*" 15 | } 16 | """ 17 | 18 | import json 19 | import gzip 20 | import base64 21 | import os 22 | import re 23 | import traceback 24 | from StringIO import StringIO 25 | import boto3 26 | 27 | FILTER_PATTERNS = ( 28 | 'REPORT', 'Task timed out', 'Process exited before completing', 'Traceback', 29 | 'module initialization error:', 'Unable to import module', 'errorMessage', 30 | '.java:0', '.java:1', '.java:2', '.java:3', '.java:4', '.java:5', '.java:6', 31 | '.java:7', '.java:8', '.java:9' 32 | ) 33 | 34 | 35 | AWS_ROLE = os.environ.get('EPSAGON_AWS_ROLE').strip() 36 | REGION = os.environ.get('EPSAGON_REGION').strip() 37 | KINESIS_NAME = os.environ.get('EPSAGON_KINESIS_NAME').strip() 38 | REGEX = re.compile( 39 | '|'.join(['.*{}.*'.format(pattern) for pattern in FILTER_PATTERNS]), 40 | re.DOTALL 41 | ) 42 | 43 | sts = boto3.client('sts') 44 | assumed_role = sts.assume_role( 45 | RoleArn=AWS_ROLE, 46 | RoleSessionName='EpsagonRole' 47 | ) 48 | 49 | credentials = assumed_role['Credentials'] 50 | kinesis = boto3.client( 51 | 'kinesis', 52 | aws_access_key_id=credentials['AccessKeyId'], 53 | aws_secret_access_key=credentials['SecretAccessKey'], 54 | aws_session_token=credentials['SessionToken'], 55 | region_name=REGION, 56 | ) 57 | 58 | 59 | def epsagon_debug(message): 60 | if os.getenv('EPSAGON_DEBUG', '').upper() == 'TRUE': 61 | print(message) 62 | 63 | 64 | def filter_events(record_data): 65 | """ 66 | Filter events relevant for Epsagon. 67 | :param record_data: Record data that holds the vents. 68 | :return: dict / None. 69 | """ 70 | record = None 71 | if record_data['messageType'] == 'DATA_MESSAGE': 72 | original_events = record_data['logEvents'] 73 | partition_key = record_data['logStream'] 74 | record_data['subscriptionFilters'] = ( 75 | ['Epsagon#{}#{}'.format(record_data['owner'], os.getenv('AWS_REGION'))] 76 | ) 77 | events = [] 78 | epsagon_debug('Found total of {} events'.format(len(original_events))) 79 | epsagon_debug('Original events: {}'.format(original_events)) 80 | for event in original_events: 81 | if REGEX.match(event['message']) is not None: 82 | events.append(event) 83 | epsagon_debug('Filtered total of {} events.'.format(len(events))) 84 | if events: 85 | record_data['logEvents'] = events 86 | out = StringIO() 87 | with gzip.GzipFile(fileobj=out, mode='w') as f: 88 | f.write(json.dumps(record_data).encode('ascii')) 89 | 90 | record = { 91 | 'Data': out.getvalue(), 92 | 'PartitionKey': partition_key 93 | } 94 | return record 95 | 96 | 97 | def forward_logs_to_epsagon(event): 98 | """ 99 | Send filtered CloudWatch logs to Epsagon Kinesis. 100 | :param event: The triggered event from CloudWatch logs. 101 | """ 102 | try: 103 | logs_data_decoded = event['awslogs']['data'].decode('base64') 104 | logs_data_unzipped = gzip.GzipFile(fileobj=StringIO(logs_data_decoded)).read() 105 | record_data = json.loads(logs_data_unzipped) 106 | 107 | filtered_event = filter_events(record_data) 108 | if not filtered_event: 109 | epsagon_debug('No logs match') 110 | return False 111 | 112 | kinesis.put_record( 113 | StreamName=KINESIS_NAME, 114 | Data=filtered_event['Data'], 115 | PartitionKey=filtered_event['PartitionKey'], 116 | ) 117 | 118 | except Exception as err: 119 | epsagon_debug('Encountered error: {}'.format(err)) 120 | epsagon_debug(traceback.format_exc()) 121 | 122 | return True 123 | -------------------------------------------------------------------------------- /cloudwatch_log_trigger/cloudwatch_log_sender.js: -------------------------------------------------------------------------------- 1 | var zlib = require('zlib'); 2 | var util = require('util'); 3 | var AWS = require('aws-sdk'); 4 | 5 | 6 | const PATTERNS = [ 7 | 'REPORT', 'Unable to import module', 'Task timed out', 8 | 'Process exited before completing', 'Traceback', 9 | 'module initialization error:', 'errorMessage' 10 | ]; 11 | const MAX_STR_SIZE = 100; 12 | 13 | const KINESIS_CLIENT = new AWS.Kinesis({ 14 | region: process.env.EPSAGON_REGION, 15 | accessKeyId: process.env.EPSAGON_AWS_ACCESS_KEY_ID, 16 | secretAccessKey: process.env.EPSAGON_AWS_SECRET_ACCESS_KEY 17 | }); 18 | 19 | function epsagon_debug(error) { 20 | if ((process.env.EPSAGON_DEBUG || '').toUpperCase() === 'TRUE') { 21 | // eslint-disable-next-line no-console 22 | console.log(error); 23 | } 24 | } 25 | 26 | module.exports.forwardLogsData = function forwardLogsData(logsData) { 27 | return new Promise((resolve, reject) => { 28 | epsagon_debug('Attempting to forward logs'); 29 | 30 | var zippedInput = new Buffer.from(logsData, 'base64'); 31 | epsagon_debug(util.format('Size before compression %d bytes', zippedInput.length)); 32 | zlib.gunzip(zippedInput, function (e, buffer) { 33 | if (e) { 34 | epsagon_debug(e); 35 | resolve(); 36 | return; 37 | } 38 | 39 | epsagon_debug(util.format('Size after decompression %d bytes', buffer.length)); 40 | 41 | var awslogsData = JSON.parse(buffer.toString('utf-8')); 42 | 43 | if (awslogsData.messageType !== 'DATA_MESSAGE') { 44 | epsagon_debug('Message other than DATA_MESSAGE received'); 45 | resolve(); 46 | return; 47 | } 48 | 49 | var forwadedMsgs = []; 50 | 51 | epsagon_debug(util.format('Scanning %d lines', awslogsData.logEvents.length)); 52 | 53 | awslogsData.logEvents.forEach(function (log, idx, arr) { 54 | PATTERNS.forEach(pattern => { 55 | if (log.message.slice(0, MAX_STR_SIZE).indexOf(pattern) >= 0) { 56 | forwadedMsgs.push(log) 57 | } 58 | }) 59 | }); 60 | 61 | epsagon_debug('Done scanning'); 62 | 63 | if (forwadedMsgs.length !== 0) { 64 | awslogsData.logEvents = forwadedMsgs; 65 | awslogsData.subscriptionFilters = []; 66 | awslogsData.subscriptionFilters.push(util.format('Epsagon#%s#%s', awslogsData.owner, process.env.AWS_REGION)); 67 | 68 | zlib.gzip(new Buffer.from(JSON.stringify(awslogsData), 'ascii'), function (e, buffer) { 69 | if (e) { 70 | epsagon_debug('Failed compressing result'); 71 | resolve(); 72 | return; 73 | } 74 | 75 | // Forward to Epsagon Kinesis 76 | try { 77 | var params = { 78 | Data: buffer, 79 | PartitionKey: awslogsData.logStream, 80 | StreamName: process.env.EPSAGON_KINESIS_NAME 81 | }; 82 | 83 | epsagon_debug(util.format('About to forward %d records', forwadedMsgs.length)); 84 | 85 | KINESIS_CLIENT.putRecord(params, function(err, data) { 86 | epsagon_debug('Record sent'); 87 | epsagon_debug(data); 88 | if (err) { 89 | epsagon_debug(err); 90 | } 91 | resolve(); 92 | }); 93 | } 94 | catch (e) { 95 | epsagon_debug('Got error in forwarding'); 96 | epsagon_debug(e); 97 | resolve(); 98 | } 99 | }) 100 | } 101 | }); 102 | }); 103 | } 104 | 105 | // Lambda handler for forwarding logs either from CloudWatch logs trigger, or from a 106 | // Kinesis stream of a CloudWatch subscription 107 | module.exports.forwardLogsLambdaHandler = function forwardLogsLambdaHandler(event, context) { 108 | if (event.awslogs) { 109 | return module.exports.forwardLogsData(event.awslogs.data); 110 | } 111 | 112 | if (event.Records) { 113 | event.Records.forEach(function(record) { 114 | module.exports.forwardLogsData(record.kinesis.data); 115 | }); 116 | } 117 | }; 118 | 119 | --------------------------------------------------------------------------------