26 | */
27 | public class SumologicMessageModelPipeline implements
28 | IKinesisConnectorPipeline {
29 | private static final Logger LOG = Logger.getLogger(SumologicMessageModelPipeline.class.getName());
30 |
31 | @Override
32 | public IEmitter getEmitter(KinesisConnectorConfiguration configuration) {
33 | return new SumologicEmitter(configuration);
34 | }
35 |
36 | @Override
37 | public IBuffer getBuffer(KinesisConnectorConfiguration configuration) {
38 | return new BasicMemoryBuffer(configuration);
39 | }
40 |
41 | @Override
42 | public ITransformer
43 | getTransformer(KinesisConnectorConfiguration configuration) {
44 |
45 | // Load specified class
46 | String argClass = ((KinesisConnectorForSumologicConfiguration)configuration).TRANSFORMER_CLASS;
47 | String className = "com.sumologic.client."+argClass;
48 | ClassLoader classLoader = SumologicMessageModelPipeline.class.getClassLoader();
49 | Class ModelClass = null;
50 | try {
51 | ModelClass = classLoader.loadClass(className);
52 | ITransformer ITransformerObject = (ITransformer)ModelClass.newInstance();
53 | LOG.info("Using transformer: "+ITransformerObject.getClass().getName());
54 | return ITransformerObject;
55 | } catch (ClassNotFoundException e) {
56 | LOG.error("Class not found: "+className+" error: "+e.getMessage());
57 | } catch (InstantiationException e) {
58 | LOG.error("Class not found: "+className+" error: "+e.getMessage());
59 | } catch (IllegalAccessException e) {
60 | LOG.error("Class not found: "+className+" error: "+e.getMessage());
61 | }
62 |
63 | return new DefaultKinesisMessageModelSumologicTransformer();
64 | }
65 |
66 | @Override
67 | public IFilter getFilter(KinesisConnectorConfiguration configuration) {
68 | return new AllPassFilter();
69 | }
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/SumologicSender.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client;
2 |
3 | import java.io.IOException;
4 | import java.util.concurrent.ExecutionException;
5 |
6 | import org.apache.commons.logging.Log;
7 | import org.apache.commons.logging.LogFactory;
8 |
9 | import com.ning.http.client.AsyncHttpClient;
10 | import com.ning.http.client.AsyncHttpClientConfig;
11 | import com.ning.http.client.AsyncHttpClient.BoundRequestBuilder;
12 | import com.ning.http.client.AsyncHttpClientConfig.Builder;
13 | import com.ning.http.client.Response;
14 |
15 | public class SumologicSender {
16 | private static final Log LOG = LogFactory.getLog(SumologicSender.class);
17 |
18 | private String url = null;
19 | private AsyncHttpClient client = null;
20 |
21 | public SumologicSender(String url) {
22 | this.url = url;
23 |
24 | Builder builder = new AsyncHttpClientConfig.Builder();
25 | this.client = new AsyncHttpClient(builder.build());
26 | }
27 |
28 | private BoundRequestBuilder clientPreparePost(String url){
29 | if (this.client.isClosed()){
30 | Builder builder = new AsyncHttpClientConfig.Builder();
31 | this.client = new AsyncHttpClient(builder.build());
32 | }
33 | return this.client.preparePost(url);
34 | }
35 |
36 | public boolean sendToSumologic(String data) throws IOException{
37 | int statusCode = -1;
38 |
39 | BoundRequestBuilder builder = null;
40 | builder = this.clientPreparePost(url);
41 |
42 | byte[] compressedData = SumologicKinesisUtils.compressGzip(data);
43 | if (compressedData == null) {
44 | LOG.error("Unable to compress data to send: "+data);
45 | return false;
46 | }
47 |
48 | LOG.info("HTTP POST body of size " + compressedData.length + " bytes");
49 |
50 | builder.setHeader("Content-Encoding", "gzip");
51 | builder.setBody(compressedData);
52 |
53 | Response response = null;
54 | try {
55 | response = builder.execute().get();
56 | statusCode = response.getStatusCode();
57 | } catch (InterruptedException e) {
58 | LOG.error("Can't send POST to Sumologic "+e.getMessage());
59 | } catch (ExecutionException e) {
60 | LOG.error("Can't send POST to Sumologic "+e.getMessage());
61 | }
62 |
63 | // Check if the request was successful;
64 | if (statusCode != 200) {
65 | LOG.warn(String.format("Received HTTP error from Sumo Service: %d", statusCode));
66 | return false;
67 | }
68 | else{
69 | return true;
70 | }
71 | }
72 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/implementations/SumologicEmitter.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.implementations;
2 |
3 | import java.io.IOException;
4 | import java.util.ArrayList;
5 | import java.util.LinkedList;
6 | import java.util.List;
7 | import java.util.Queue;
8 |
9 | import org.apache.log4j.Logger;
10 |
11 | import com.sumologic.client.SumologicSender;
12 | import com.sumologic.client.KinesisConnectorForSumologicConfiguration;
13 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
14 | import com.amazonaws.services.kinesis.connectors.UnmodifiableBuffer;
15 | import com.amazonaws.services.kinesis.connectors.interfaces.IEmitter;
16 |
17 | /**
18 | * This class is used to store records from a stream to Sumologic log files. It requires the use of a
19 | * SumologicTransformer, which is able to transform records into a format that can be sent to
20 | * Sumologic.
21 | */
22 | public class SumologicEmitter implements IEmitter {
23 | private static final Logger LOG = Logger.getLogger(SumologicEmitter.class.getName());
24 |
25 | private SumologicSender sender;
26 | private KinesisConnectorForSumologicConfiguration config;
27 | private static final boolean SEND_RECORDS_IN_BATCHES = true;
28 | private long batchSize = 1000L;
29 |
30 | public SumologicEmitter(KinesisConnectorConfiguration configuration) {
31 | this.config = (KinesisConnectorForSumologicConfiguration) configuration;
32 | sender = new SumologicSender(this.config.SUMOLOGIC_URL);
33 | batchSize = this.config.BUFFER_RECORD_COUNT_LIMIT;
34 | }
35 |
36 | public SumologicEmitter(String url) {
37 | sender = new SumologicSender(url);
38 | }
39 |
40 | @Override
41 | public List emit(final UnmodifiableBuffer buffer)
42 | throws IOException {
43 | List records = buffer.getRecords();
44 | if (SEND_RECORDS_IN_BATCHES) {
45 | return sendBatchConcatenating(records);
46 | } else {
47 | return sendRecordsOneByOne(records);
48 | }
49 | }
50 |
51 | public List sendBatchConcatenating(List records) {
52 | boolean success = false;
53 | List failedRecords = new ArrayList();
54 | List currentBatch = new ArrayList();
55 | Queue unprocessedRecords = new LinkedList(records);
56 |
57 | String message = "";
58 | int recordCount = 0;
59 | for(String record: records) {
60 | currentBatch.add(record);
61 | unprocessedRecords.poll();
62 | message += record;
63 | message += "\n";
64 | recordCount++;
65 | if (recordCount >= batchSize) {
66 | try {
67 | LOG.info("Sending batch of: "+recordCount+" records");
68 | success = sender.sendToSumologic(message);
69 | } catch (IOException e) {
70 | LOG.warn("Couldn't send batch of " + recordCount
71 | + " record to Sumologic: "+e.getMessage());
72 | success = false;
73 | }
74 | if (!success) {
75 | failedRecords.addAll(currentBatch);
76 | failedRecords.addAll(unprocessedRecords);
77 | return failedRecords;
78 | }
79 | currentBatch = new ArrayList();
80 | recordCount = 0;
81 | message = "";
82 | }
83 | }
84 | try {
85 | LOG.info("Sending batch of: "+recordCount+" records");
86 | success = sender.sendToSumologic(message);
87 | } catch (IOException e) {
88 | LOG.warn("Couldn't send record to Sumologic: "+e.getMessage());
89 | success = false;
90 | }
91 | if (!success) {
92 | failedRecords.addAll(currentBatch);
93 | failedRecords.addAll(unprocessedRecords);
94 | return failedRecords;
95 | }
96 |
97 | return failedRecords;
98 | }
99 |
100 | public List sendRecordsOneByOne (List records) {
101 | ArrayList failedRecords = new ArrayList();
102 | for (String record: records) {
103 | try {
104 | if (!sender.sendToSumologic(record)) {
105 | failedRecords.add(record);
106 | }
107 | } catch (IOException e) {
108 | LOG.warn("Couldn't send record: "+record);
109 | }
110 | }
111 | LOG.info("Sent records: "+(records.size()-failedRecords.size())+" failed: "+failedRecords.size());
112 | return failedRecords;
113 | }
114 |
115 | @Override
116 | public void fail(List records) {
117 | for (String record : records) {
118 | LOG.error("Could not emit record: " + record);
119 | }
120 | }
121 |
122 | @Override
123 | public void shutdown() {
124 | }
125 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/implementations/SumologicTransformer.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.implementations;
2 |
3 | import com.amazonaws.services.kinesis.connectors.interfaces.ITransformer;
4 |
5 | /**
6 | * This interface defines an ITransformer that can transform an object of any type if necesary.
7 | *
8 | * @param
9 | */
10 | public interface SumologicTransformer extends ITransformer {
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/model/CloudWatchLogsMessageModel.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.model;
2 |
3 | import java.util.ArrayList;
4 | import java.util.HashMap;
5 | import java.util.List;
6 | import java.util.Map;
7 |
8 | import org.apache.commons.lang.builder.ToStringBuilder;
9 |
10 | import com.fasterxml.jackson.annotation.JsonAnyGetter;
11 | import com.fasterxml.jackson.annotation.JsonAnySetter;
12 | import com.fasterxml.jackson.annotation.JsonIgnore;
13 | import com.fasterxml.jackson.annotation.JsonInclude;
14 | import com.fasterxml.jackson.annotation.JsonProperty;
15 | import com.fasterxml.jackson.annotation.JsonPropertyOrder;
16 |
17 | @JsonInclude(JsonInclude.Include.NON_NULL)
18 | @JsonPropertyOrder({
19 | "logEvents",
20 | "logGroup",
21 | "logStream",
22 | "messageType",
23 | "owner",
24 | "subscriptionFilters"
25 | })
26 |
27 | public class CloudWatchLogsMessageModel {
28 |
29 | @JsonProperty("logEvents")
30 | private List logEvents = new ArrayList();
31 | @JsonProperty("logGroup")
32 | private String logGroup;
33 | @JsonProperty("logStream")
34 | private String logStream;
35 | @JsonProperty("messageType")
36 | private String messageType;
37 | @JsonProperty("owner")
38 | private String owner;
39 | @JsonProperty("subscriptionFilters")
40 | private List subscriptionFilters = new ArrayList();
41 | @JsonIgnore
42 | private Map additionalProperties = new HashMap();
43 |
44 | @JsonProperty("logEvents")
45 | public List getLogEvents() {
46 | return logEvents;
47 | }
48 |
49 | @JsonProperty("logEvents")
50 | public void setLogEvents(List logEvents) {
51 | this.logEvents = logEvents;
52 | }
53 |
54 | @JsonProperty("logGroup")
55 | public String getLogGroup() {
56 | return logGroup;
57 | }
58 |
59 | @JsonProperty("logGroup")
60 | public void setLogGroup(String logGroup) {
61 | this.logGroup = logGroup;
62 | }
63 |
64 | @JsonProperty("logStream")
65 | public String getLogStream() {
66 | return logStream;
67 | }
68 |
69 | @JsonProperty("logStream")
70 | public void setLogStream(String logStream) {
71 | this.logStream = logStream;
72 | }
73 |
74 | @JsonProperty("messageType")
75 | public String getMessageType() {
76 | return messageType;
77 | }
78 |
79 | @JsonProperty("messageType")
80 | public void setMessageType(String messageType) {
81 | this.messageType = messageType;
82 | }
83 |
84 | @JsonProperty("owner")
85 | public String getOwner() {
86 | return owner;
87 | }
88 |
89 | @JsonProperty("owner")
90 | public void setOwner(String owner) {
91 | this.owner = owner;
92 | }
93 |
94 | @JsonProperty("subscriptionFilters")
95 | public List getSubscriptionFilters() {
96 | return subscriptionFilters;
97 | }
98 |
99 | @JsonProperty("subscriptionFilters")
100 | public void setSubscriptionFilters(List subscriptionFilters) {
101 | this.subscriptionFilters = subscriptionFilters;
102 | }
103 |
104 | @JsonAnyGetter
105 | public Map getAdditionalProperties() {
106 | return this.additionalProperties;
107 | }
108 |
109 | @JsonAnySetter
110 | public void setAdditionalProperty(String name, Object value) {
111 | this.additionalProperties.put(name, value);
112 | }
113 |
114 | @Override
115 | public String toString() {
116 | return ToStringBuilder.reflectionToString(this);
117 | }
118 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/model/LogEvent.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.model;
2 |
3 | import java.util.HashMap;
4 | import java.util.Map;
5 |
6 | import com.fasterxml.jackson.annotation.JsonAnyGetter;
7 | import com.fasterxml.jackson.annotation.JsonAnySetter;
8 | import com.fasterxml.jackson.annotation.JsonIgnore;
9 | import com.fasterxml.jackson.annotation.JsonInclude;
10 | import com.fasterxml.jackson.annotation.JsonProperty;
11 | import com.fasterxml.jackson.annotation.JsonPropertyOrder;
12 |
13 | @JsonInclude(JsonInclude.Include.NON_NULL)
14 | @JsonPropertyOrder({
15 | "id",
16 | "message",
17 | "timestamp"
18 | })
19 |
20 | public class LogEvent {
21 |
22 | @JsonProperty("id")
23 | private String id;
24 | @JsonProperty("message")
25 | private String message;
26 | @JsonProperty("timestamp")
27 | private Long timestamp;
28 | @JsonIgnore
29 | private Map additionalProperties = new HashMap();
30 |
31 | @JsonProperty("id")
32 | public String getId() {
33 | return id;
34 | }
35 |
36 | @JsonProperty("id")
37 | public void setId(String id) {
38 | this.id = id;
39 | }
40 |
41 | @JsonProperty("message")
42 | public String getMessage() {
43 | return message;
44 | }
45 |
46 | @JsonProperty("message")
47 | public void setMessage(String message) {
48 | this.message = message;
49 | }
50 |
51 | @JsonProperty("timestamp")
52 | public Long getTimestamp() {
53 | return timestamp;
54 | }
55 |
56 | @JsonProperty("timestamp")
57 | public void setTimestamp(Long timestamp) {
58 | this.timestamp = timestamp;
59 | }
60 |
61 | @JsonAnyGetter
62 | public Map getAdditionalProperties() {
63 | return this.additionalProperties;
64 | }
65 |
66 | @JsonAnySetter
67 | public void setAdditionalProperty(String name, Object value) {
68 | this.additionalProperties.put(name, value);
69 | }
70 |
71 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/client/model/SimpleKinesisMessageModel.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.model;
2 |
3 | import java.io.Serializable;
4 |
5 | public class SimpleKinesisMessageModel implements Serializable {
6 | private String data;
7 | private int id;
8 |
9 | public SimpleKinesisMessageModel(String data) {
10 | this.data = data;
11 | this.id = 1;
12 | }
13 |
14 | public String getData() {
15 | return data;
16 | }
17 |
18 | public void setData(String data) {
19 | this.data = data;
20 | }
21 |
22 | public int getId() {
23 | return id;
24 | }
25 |
26 | public void setId(int id) {
27 | this.id = id;
28 | }
29 |
30 | public String toString() {
31 | return data;
32 | }
33 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/BatchedStreamSource.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.ByteArrayOutputStream;
5 | import java.io.IOException;
6 | import java.io.InputStream;
7 | import java.io.InputStreamReader;
8 | import java.io.ObjectOutputStream;
9 | import java.nio.ByteBuffer;
10 | import java.util.ArrayList;
11 | import java.util.List;
12 | import java.util.UUID;
13 |
14 | import org.apache.log4j.Logger;
15 |
16 | import com.sumologic.client.model.SimpleKinesisMessageModel;
17 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
18 | import com.amazonaws.services.kinesis.model.PutRecordRequest;
19 |
20 | /**
21 | * This class is a data source for supplying input to the Amazon Kinesis stream. It reads lines from the
22 | * input file specified in the constructor and batches up records before emitting them.
23 | */
24 | public class BatchedStreamSource extends StreamSource {
25 | private static final Logger LOG = Logger.getLogger(BatchedStreamSource.class.getName());
26 |
27 | private static int NUM_BYTES_PER_PUT_REQUEST = 50000;
28 | List buffer;
29 |
30 | public BatchedStreamSource(KinesisConnectorConfiguration config, String inputFile) {
31 | this(config, inputFile, false);
32 | }
33 |
34 | public BatchedStreamSource(KinesisConnectorConfiguration config, String inputFile, boolean loopOverStreamSource) {
35 | super(config, inputFile, loopOverStreamSource);
36 | buffer = new ArrayList();
37 | }
38 |
39 | @Override
40 | protected void processInputStream(InputStream inputStream, int iteration) throws IOException {
41 | try (BufferedReader br = new BufferedReader(new InputStreamReader(inputStream))) {
42 | String line;
43 | int lines = 0;
44 |
45 | while ((line = br.readLine()) != null) {
46 | SimpleKinesisMessageModel kinesisMessageModel = objectMapper.readValue(line, SimpleKinesisMessageModel.class);
47 | buffer.add(kinesisMessageModel);
48 | if (numBytesInBuffer() > NUM_BYTES_PER_PUT_REQUEST) {
49 | /*
50 | * We need to remove the last record to ensure this data blob is accepted by the Amazon Kinesis
51 | * client which restricts the data blob to be less than 50 KB.
52 | */
53 | SimpleKinesisMessageModel lastRecord = buffer.remove(buffer.size() - 1);
54 | flushBuffer();
55 | /*
56 | * We add it back so it will be part of the next batch.
57 | */
58 | buffer.add(lastRecord);
59 | }
60 | lines++;
61 | }
62 | if (!buffer.isEmpty()) {
63 | flushBuffer();
64 | }
65 |
66 | LOG.info("Added " + lines + " records to stream source.");
67 | }
68 | }
69 |
70 | private byte[] bufferToBytes() throws IOException {
71 | ByteArrayOutputStream bos = new ByteArrayOutputStream();
72 | ObjectOutputStream oos = new ObjectOutputStream(bos);
73 | oos.writeObject(buffer);
74 | return bos.toByteArray();
75 | }
76 |
77 | private int numBytesInBuffer() throws IOException {
78 | return bufferToBytes().length;
79 | }
80 |
81 | private void flushBuffer() throws IOException {
82 | PutRecordRequest putRecordRequest = new PutRecordRequest();
83 | putRecordRequest.setStreamName(config.KINESIS_INPUT_STREAM);
84 | putRecordRequest.setData(ByteBuffer.wrap(bufferToBytes()));
85 | putRecordRequest.setPartitionKey(String.valueOf(UUID.randomUUID()));
86 | kinesisClient.putRecord(putRecordRequest);
87 | buffer.clear();
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/KinesisConnectorExecutor.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import java.io.IOException;
4 | import java.io.InputStream;
5 | import java.util.Properties;
6 |
7 | import org.apache.log4j.Logger;
8 |
9 | import com.sumologic.client.KinesisConnectorForSumologicConfiguration;
10 | import com.amazonaws.auth.*;
11 | import com.amazonaws.auth.profile.ProfileCredentialsProvider;
12 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
13 |
14 | /**
15 | * This class defines the execution of a Amazon Kinesis Connector.
16 | *
17 | */
18 | public abstract class KinesisConnectorExecutor extends KinesisConnectorExecutorBase {
19 | private static final Logger LOG = Logger.getLogger(KinesisConnectorExecutor.class.getName());
20 |
21 | // Create Stream Source constants
22 | private static final String CREATE_STREAM_SOURCE = "createStreamSource";
23 | private static final String LOOP_OVER_STREAM_SOURCE = "loopOverStreamSource";
24 | private static final String INPUT_STREAM_FILE = "inputStreamFile";
25 |
26 | private static final boolean DEFAULT_CREATE_STREAM_SOURCE = false;
27 | private static final boolean DEFAULT_LOOP_OVER_STREAM_SOURCE = false;
28 |
29 |
30 | // Class variables
31 | protected final KinesisConnectorForSumologicConfiguration config;
32 | private final Properties properties;
33 |
34 | public KinesisConnectorExecutor() {
35 | // Load ENV vars into properties
36 | LOG.info("Using ENV vars for properties");
37 |
38 | properties = new Properties();
39 | System.getenv().forEach(properties::setProperty);
40 |
41 | this.config = new KinesisConnectorForSumologicConfiguration(properties, getAWSCredentialsProvider());
42 |
43 | // Send sample data to AWS Kinesis if specified in the properties file
44 | setupInputStream();
45 |
46 | // Initialize executor with configurations
47 | super.initialize((KinesisConnectorConfiguration)config);
48 | }
49 |
50 | /**
51 | * Create a new KinesisConnectorExecutor based on the provided configuration (*.propertes) file.
52 | *
53 | * @param configFile
54 | * The name of the configuration file to look for on the classpath
55 | */
56 | public KinesisConnectorExecutor(String configFile) {
57 | // Load configuration properties
58 | InputStream configStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(configFile);
59 |
60 | if (configStream == null) {
61 | String msg = "Could not find resource " + configFile + " in the classpath";
62 | throw new IllegalStateException(msg);
63 | }
64 | properties = new Properties();
65 | try {
66 | properties.load(configStream);
67 | configStream.close();
68 | } catch (IOException e) {
69 | String msg = "Could not load properties file " + configFile + " from classpath";
70 | throw new IllegalStateException(msg, e);
71 | }
72 | this.config = new KinesisConnectorForSumologicConfiguration(properties, getAWSCredentialsProvider(configFile));
73 |
74 | LOG.info("Using " + configFile);
75 |
76 | // Send sample data to AWS Kinesis if specified in the properties file
77 | setupInputStream();
78 |
79 | // Initialize executor with configurations
80 | super.initialize((KinesisConnectorConfiguration)config);
81 | }
82 |
83 | /**
84 | * Returns an {@link AWSCredentialsProvider} with the permissions necessary to accomplish all specified
85 | * tasks. At the minimum it will require read permissions for Amazon Kinesis. Additional read permissions
86 | * and write permissions may be required based on the Pipeline used.
87 | *
88 | * @return
89 | */
90 | public AWSCredentialsProvider getAWSCredentialsProvider() {
91 | return new DefaultAWSCredentialsProviderChain();
92 | }
93 |
94 | public AWSCredentialsProvider getAWSCredentialsProvider(String configFile) {
95 | return new AWSCredentialsProviderChain(
96 | new EnvironmentVariableCredentialsProvider(),
97 | new SystemPropertiesCredentialsProvider(),
98 | new ProfileCredentialsProvider(),
99 | new EC2ContainerCredentialsProviderWrapper(),
100 | new ClasspathPropertiesFileCredentialsProvider(configFile)
101 | );
102 | }
103 |
104 | /**
105 | * Helper method to spawn the {@link StreamSource} in a separate thread.
106 | */
107 | private void setupInputStream() {
108 | if (parseBoolean(CREATE_STREAM_SOURCE, DEFAULT_CREATE_STREAM_SOURCE, properties)) {
109 | String inputFile = properties.getProperty(INPUT_STREAM_FILE);
110 | StreamSource streamSource;
111 | if (config.BATCH_RECORDS_IN_PUT_REQUEST) {
112 | streamSource =
113 | new BatchedStreamSource(config, inputFile, parseBoolean(LOOP_OVER_STREAM_SOURCE,
114 | DEFAULT_LOOP_OVER_STREAM_SOURCE,
115 | properties));
116 |
117 | } else {
118 | streamSource =
119 | new StreamSource(config, inputFile, parseBoolean(LOOP_OVER_STREAM_SOURCE,
120 | DEFAULT_LOOP_OVER_STREAM_SOURCE,
121 | properties));
122 | }
123 | Thread streamSourceThread = new Thread(streamSource);
124 | LOG.info("Starting stream source.");
125 | streamSourceThread.start();
126 | }
127 | }
128 |
129 |
130 | /**
131 | * Helper method used to parse boolean properties.
132 | *
133 | * @param property
134 | * The String key for the property
135 | * @param defaultValue
136 | * The default value for the boolean property
137 | * @param properties
138 | * The properties file to get property from
139 | * @return property from property file, or if it is not specified, the default value
140 | */
141 | private static boolean parseBoolean(String property, boolean defaultValue, Properties properties) {
142 | return Boolean.parseBoolean(properties.getProperty(property, Boolean.toString(defaultValue)));
143 |
144 | }
145 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/KinesisConnectorExecutorBase.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import org.apache.log4j.Logger;
4 |
5 | import com.sumologic.kinesis.KinesisConnectorRecordProcessorFactory;
6 | import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
7 | import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker;
8 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
9 | import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
10 | import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
11 |
12 | public abstract class KinesisConnectorExecutorBase implements Runnable {
13 | private static final Logger LOG = Logger.getLogger(KinesisConnectorExecutorBase.class.getName());
14 |
15 | // Amazon Kinesis Client Library worker to process records
16 | protected Worker worker;
17 |
18 | /**
19 | * Initialize the Amazon Kinesis Client Library configuration and worker
20 | *
21 | * @param kinesisConnectorConfiguration Amazon Kinesis connector configuration
22 | */
23 | protected void initialize(KinesisConnectorConfiguration kinesisConnectorConfiguration) {
24 | initialize(kinesisConnectorConfiguration, new NullMetricsFactory());
25 | }
26 |
27 | /**
28 | * Initialize the Amazon Kinesis Client Library configuration and worker with metrics factory
29 | *
30 | * @param kinesisConnectorConfiguration Amazon Kinesis connector configuration
31 | * @param metricFactory would be used to emit metrics in Amazon Kinesis Client Library
32 | */
33 | protected void
34 | initialize(KinesisConnectorConfiguration kinesisConnectorConfiguration, IMetricsFactory metricFactory) {
35 |
36 | KinesisClientLibConfiguration kinesisClientLibConfiguration =
37 | new KinesisClientLibConfiguration(kinesisConnectorConfiguration.APP_NAME,
38 | kinesisConnectorConfiguration.KINESIS_INPUT_STREAM,
39 | kinesisConnectorConfiguration.AWS_CREDENTIALS_PROVIDER,
40 | kinesisConnectorConfiguration.WORKER_ID).withKinesisEndpoint(kinesisConnectorConfiguration.KINESIS_ENDPOINT)
41 | .withFailoverTimeMillis(kinesisConnectorConfiguration.FAILOVER_TIME)
42 | .withMaxRecords(kinesisConnectorConfiguration.MAX_RECORDS)
43 | .withInitialPositionInStream(kinesisConnectorConfiguration.INITIAL_POSITION_IN_STREAM)
44 | .withIdleTimeBetweenReadsInMillis(kinesisConnectorConfiguration.IDLE_TIME_BETWEEN_READS)
45 | .withCallProcessRecordsEvenForEmptyRecordList(KinesisConnectorConfiguration.DEFAULT_CALL_PROCESS_RECORDS_EVEN_FOR_EMPTY_LIST)
46 | .withCleanupLeasesUponShardCompletion(kinesisConnectorConfiguration.CLEANUP_TERMINATED_SHARDS_BEFORE_EXPIRY)
47 | .withParentShardPollIntervalMillis(kinesisConnectorConfiguration.PARENT_SHARD_POLL_INTERVAL)
48 | .withShardSyncIntervalMillis(kinesisConnectorConfiguration.SHARD_SYNC_INTERVAL)
49 | .withTaskBackoffTimeMillis(kinesisConnectorConfiguration.BACKOFF_INTERVAL)
50 | .withMetricsBufferTimeMillis(kinesisConnectorConfiguration.CLOUDWATCH_BUFFER_TIME)
51 | .withMetricsMaxQueueSize(kinesisConnectorConfiguration.CLOUDWATCH_MAX_QUEUE_SIZE)
52 | .withUserAgent(kinesisConnectorConfiguration.APP_NAME + ","
53 | + kinesisConnectorConfiguration.CONNECTOR_DESTINATION + ","
54 | + KinesisConnectorConfiguration.KINESIS_CONNECTOR_USER_AGENT)
55 | .withRegionName(kinesisConnectorConfiguration.REGION_NAME);
56 |
57 |
58 | if (!kinesisConnectorConfiguration.CALL_PROCESS_RECORDS_EVEN_FOR_EMPTY_LIST) {
59 | LOG.warn("The false value of callProcessRecordsEvenForEmptyList will be ignored. It must be set to true for the bufferTimeMillisecondsLimit to work correctly.");
60 | }
61 |
62 | if (kinesisConnectorConfiguration.IDLE_TIME_BETWEEN_READS > kinesisConnectorConfiguration.BUFFER_MILLISECONDS_LIMIT) {
63 | LOG.warn("idleTimeBetweenReads is greater than bufferTimeMillisecondsLimit. For best results, ensure that bufferTimeMillisecondsLimit is more than or equal to idleTimeBetweenReads ");
64 | }
65 |
66 | // If a metrics factory was specified, use it.
67 | if (metricFactory != null) {
68 | worker =
69 | new Worker(getKinesisConnectorRecordProcessorFactory(),
70 | kinesisClientLibConfiguration,
71 | metricFactory);
72 | } else {
73 | worker = new Worker(getKinesisConnectorRecordProcessorFactory(), kinesisClientLibConfiguration);
74 | }
75 | LOG.info(getClass().getSimpleName() + " worker created");
76 | }
77 |
78 | @Override
79 | public void run() {
80 | if (worker != null) {
81 | // Start Amazon Kinesis Client Library worker to process records
82 | LOG.info("Starting worker in " + getClass().getSimpleName());
83 | try {
84 | worker.run();
85 | } catch (Throwable t) {
86 | LOG.error(t);
87 | throw t;
88 | } finally {
89 | LOG.error("Worker " + getClass().getSimpleName() + " is not running.");
90 | }
91 | } else {
92 | throw new RuntimeException("Initialize must be called before run.");
93 | }
94 | }
95 |
96 | /**
97 | * This method returns a {@link KinesisConnectorRecordProcessorFactory} that contains the
98 | * appropriate {@link IKinesisConnectorPipeline} for the Amazon Kinesis Enabled Application
99 | *
100 | * @return a {@link KinesisConnectorRecordProcessorFactory} that contains the appropriate
101 | * {@link IKinesisConnectorPipeline} for the Amazon Kinesis Enabled Application
102 | */
103 | public abstract KinesisConnectorRecordProcessorFactory getKinesisConnectorRecordProcessorFactory();
104 | }
105 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/KinesisConnectorMetricsExecutor.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory;
4 | import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
5 |
6 | /**
7 | * This class defines the execution of an Amazon Kinesis Connector with Amazon CloudWatch metrics.
8 | *
9 | */
10 | public abstract class KinesisConnectorMetricsExecutor extends KinesisConnectorExecutor {
11 |
12 | /**
13 | * Creates a new KinesisConnectorMetricsExecutor.
14 | *
15 | * @param configFile The name of the configuration file to look for on the classpath
16 | */
17 | public KinesisConnectorMetricsExecutor(String configFile) {
18 | super(configFile);
19 |
20 | // Amazon CloudWatch Metrics Factory used to emit metrics in KCL
21 | IMetricsFactory mFactory =
22 | new CWMetricsFactory(config.AWS_CREDENTIALS_PROVIDER,
23 | config.CLOUDWATCH_NAMESPACE,
24 | config.CLOUDWATCH_BUFFER_TIME,
25 | config.CLOUDWATCH_MAX_QUEUE_SIZE);
26 | super.initialize(config, mFactory);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/KinesisConnectorRecordProcessor.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import java.io.IOException;
4 | import java.util.ArrayList;
5 | import java.util.Collection;
6 | import java.util.List;
7 |
8 | import org.apache.log4j.Logger;
9 |
10 | import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
11 | import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
12 | import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
13 | import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
14 | import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor;
15 | import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
16 | import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason;
17 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
18 | import com.amazonaws.services.kinesis.connectors.UnmodifiableBuffer;
19 | import com.amazonaws.services.kinesis.connectors.interfaces.IBuffer;
20 | import com.amazonaws.services.kinesis.connectors.interfaces.ICollectionTransformer;
21 | import com.amazonaws.services.kinesis.connectors.interfaces.IEmitter;
22 | import com.amazonaws.services.kinesis.connectors.interfaces.IFilter;
23 | import com.amazonaws.services.kinesis.connectors.interfaces.ITransformer;
24 | import com.amazonaws.services.kinesis.connectors.interfaces.ITransformerBase;
25 | import com.amazonaws.services.kinesis.model.Record;
26 |
27 | /**
28 | * This is the base class for any KinesisConnector. It is configured by a constructor that takes in
29 | * as parameters implementations of the IBuffer, ITransformer, and IEmitter dependencies defined in
30 | * a IKinesisConnectorPipeline. It is typed to match the class that records are transformed into for
31 | * filtering and manipulation. This class is produced by a KinesisConnectorRecordProcessorFactory.
32 | *
33 | * When a Worker calls processRecords() on this class, the pipeline is used in the following way:
34 | *
35 | *
Records are transformed into the corresponding data model (parameter type T) via the ITransformer.
36 | *
Transformed records are passed to the IBuffer.consumeRecord() method, which may optionally filter based on the
37 | * IFilter in the pipeline.
38 | *
When the buffer is full (IBuffer.shouldFlush() returns true), records are transformed with the ITransformer to
39 | * the output type (parameter type U) and a call is made to IEmitter.emit(). IEmitter.emit() returning an empty list is
40 | * considered a success, so the record processor will checkpoint and emit will not be retried. Non-empty return values
41 | * will result in additional calls to emit with failed records as the unprocessed list until the retry limit is reached.
42 | * Upon exceeding the retry limit or an exception being thrown, the IEmitter.fail() method will be called with the
43 | * unprocessed records.
44 | *
When the shutdown() method of this class is invoked, a call is made to the IEmitter.shutdown() method which
45 | * should close any existing client connections.
46 | *
47 | *
48 | */
49 | public class KinesisConnectorRecordProcessor implements IRecordProcessor {
50 |
51 | private final IEmitter emitter;
52 | private final ITransformerBase transformer;
53 | private final IFilter filter;
54 | private final IBuffer buffer;
55 | private final int retryLimit;
56 | private final long backoffInterval;
57 | private boolean isShutdown = false;
58 |
59 | private static final Logger LOG = Logger.getLogger(KinesisConnectorRecordProcessor.class.getName());
60 |
61 | private String shardId;
62 |
63 | public KinesisConnectorRecordProcessor(IBuffer buffer,
64 | IFilter filter,
65 | IEmitter emitter,
66 | ITransformerBase transformer,
67 | KinesisConnectorConfiguration configuration) {
68 | if (buffer == null || filter == null || emitter == null || transformer == null) {
69 | throw new IllegalArgumentException("buffer, filter, emitter, and transformer must not be null");
70 | }
71 | this.buffer = buffer;
72 | this.filter = filter;
73 | this.emitter = emitter;
74 | this.transformer = transformer;
75 | // Limit must be greater than zero
76 | if (configuration.RETRY_LIMIT <= 0) {
77 | retryLimit = 1;
78 | } else {
79 | retryLimit = configuration.RETRY_LIMIT;
80 | }
81 | this.backoffInterval = configuration.BACKOFF_INTERVAL;
82 | }
83 |
84 | @Override
85 | public void initialize(String shardId) {
86 | this.shardId = shardId;
87 | }
88 |
89 | @Override
90 | public void processRecords(List records, IRecordProcessorCheckpointer checkpointer) {
91 | // Note: This method will be called even for empty record lists. This is needed for checking the buffer time
92 | // threshold.
93 | if (isShutdown) {
94 | LOG.warn("processRecords called on shutdown record processor for shardId: " + shardId);
95 | return;
96 | }
97 | if (shardId == null) {
98 | throw new IllegalStateException("Record processor not initialized");
99 | }
100 |
101 | // Transform each Amazon Kinesis Record and add the result to the buffer
102 | for (Record record : records) {
103 | try {
104 | if (transformer instanceof ITransformer) {
105 | ITransformer singleTransformer = (ITransformer) transformer;
106 | filterAndBufferRecord(singleTransformer.toClass(record), record);
107 | } else if (transformer instanceof ICollectionTransformer) {
108 | ICollectionTransformer listTransformer = (ICollectionTransformer) transformer;
109 | Collection transformedRecords = listTransformer.toClass(record);
110 | for (T transformedRecord : transformedRecords) {
111 | filterAndBufferRecord(transformedRecord, record);
112 | }
113 | } else {
114 | throw new RuntimeException("Transformer must implement ITransformer or ICollectionTransformer");
115 | }
116 | } catch (IOException e) {
117 | LOG.error(e);
118 | }
119 | }
120 |
121 | if (buffer.shouldFlush()) {
122 | List emitItems = transformToOutput(buffer.getRecords());
123 | emit(checkpointer, emitItems);
124 | }
125 | }
126 |
127 | private void filterAndBufferRecord(T transformedRecord, Record record) {
128 | if (filter.keepRecord(transformedRecord)) {
129 | buffer.consumeRecord(transformedRecord, record.getData().array().length, record.getSequenceNumber());
130 | }
131 | }
132 |
133 | private List transformToOutput(List items) {
134 | List emitItems = new ArrayList();
135 | for (T item : items) {
136 | try {
137 | emitItems.add(transformer.fromClass(item));
138 | } catch (IOException e) {
139 | LOG.error("Failed to transform record " + item + " to output type", e);
140 | }
141 | }
142 | return emitItems;
143 | }
144 |
145 | private void emit(IRecordProcessorCheckpointer checkpointer, List emitItems) {
146 | List unprocessed = new ArrayList(emitItems);
147 | try {
148 | for (int numTries = 0; numTries < retryLimit; numTries++) {
149 | unprocessed = emitter.emit(new UnmodifiableBuffer(buffer, unprocessed));
150 | if (unprocessed.isEmpty()) {
151 | break;
152 | }
153 | try {
154 | Thread.sleep(backoffInterval);
155 | } catch (InterruptedException e) {
156 | }
157 | }
158 | if (!unprocessed.isEmpty()) {
159 | emitter.fail(unprocessed);
160 | }
161 | final String lastSequenceNumberProcessed = buffer.getLastSequenceNumber();
162 | buffer.clear();
163 | // checkpoint once all the records have been consumed
164 | if (lastSequenceNumberProcessed != null && unprocessed.isEmpty()) {
165 | checkpointer.checkpoint(lastSequenceNumberProcessed);
166 | }
167 | } catch (IOException | KinesisClientLibDependencyException | InvalidStateException | ThrottlingException
168 | | ShutdownException e) {
169 | LOG.error(e);
170 | emitter.fail(unprocessed);
171 | }
172 | }
173 |
174 | @Override
175 | public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) {
176 | LOG.info("Shutting down record processor with shardId: " + shardId + " with reason " + reason);
177 | if (isShutdown) {
178 | LOG.warn("Record processor for shardId: " + shardId + " has been shutdown multiple times.");
179 | return;
180 | }
181 | switch (reason) {
182 | case TERMINATE:
183 | emit(checkpointer, transformToOutput(buffer.getRecords()));
184 | try {
185 | checkpointer.checkpoint();
186 | } catch (KinesisClientLibDependencyException | InvalidStateException | ThrottlingException | ShutdownException e) {
187 | LOG.error(e);
188 | }
189 | break;
190 | case ZOMBIE:
191 | break;
192 | default:
193 | throw new IllegalStateException("invalid shutdown reason");
194 | }
195 | emitter.shutdown();
196 | isShutdown = true;
197 | }
198 |
199 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/KinesisConnectorRecordProcessorFactory.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor;
4 | import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory;
5 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
6 | import com.amazonaws.services.kinesis.connectors.interfaces.IBuffer;
7 | import com.amazonaws.services.kinesis.connectors.interfaces.IEmitter;
8 | import com.amazonaws.services.kinesis.connectors.interfaces.IFilter;
9 | import com.amazonaws.services.kinesis.connectors.interfaces.IKinesisConnectorPipeline;
10 | import com.amazonaws.services.kinesis.connectors.interfaces.ITransformerBase;
11 |
12 | /**
13 | * This class is used to generate KinesisConnectorRecordProcessors that operate using the user's
14 | * implemented classes. The createProcessor() method sets the dependencies of the
15 | * KinesisConnectorRecordProcessor that are specified in the KinesisConnectorPipeline argument,
16 | * which accesses instances of the users implementations.
17 | */
18 | public class KinesisConnectorRecordProcessorFactory implements IRecordProcessorFactory {
19 |
20 | private IKinesisConnectorPipeline pipeline;
21 | private KinesisConnectorConfiguration configuration;
22 |
23 | public KinesisConnectorRecordProcessorFactory(IKinesisConnectorPipeline pipeline,
24 | KinesisConnectorConfiguration configuration) {
25 | this.configuration = configuration;
26 | this.pipeline = pipeline;
27 | }
28 |
29 | @Override
30 | public IRecordProcessor createProcessor() {
31 | try {
32 | IBuffer buffer = pipeline.getBuffer(configuration);
33 | IEmitter emitter = pipeline.getEmitter(configuration);
34 | ITransformerBase transformer = pipeline.getTransformer(configuration);
35 | IFilter filter = pipeline.getFilter(configuration);
36 | KinesisConnectorRecordProcessor processor =
37 | new KinesisConnectorRecordProcessor(buffer, filter, emitter, transformer, configuration);
38 | return processor;
39 | } catch (Throwable t) {
40 | throw new RuntimeException(t);
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/StreamSource.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.IOException;
5 | import java.io.InputStream;
6 | import java.io.InputStreamReader;
7 | import java.nio.ByteBuffer;
8 |
9 | import org.apache.log4j.Logger;
10 |
11 | import com.sumologic.client.model.SimpleKinesisMessageModel;
12 | import com.sumologic.kinesis.utils.KinesisUtils;
13 | import com.amazonaws.auth.AWSCredentialsProvider;
14 | import com.amazonaws.regions.RegionUtils;
15 | import com.amazonaws.services.kinesis.AmazonKinesisClient;
16 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
17 | import com.amazonaws.services.kinesis.model.PutRecordRequest;
18 | import com.fasterxml.jackson.databind.ObjectMapper;
19 |
20 | /**
21 | * This class is a data source for supplying input to the Amazon Kinesis stream. It reads lines from the
22 | * input file specified in the constructor and emits them by calling String.getBytes() into the
23 | * stream defined in the KinesisConnectorConfiguration.
24 | */
25 | public class StreamSource implements Runnable {
26 | private static final Logger LOG = Logger.getLogger(StreamSource.class.getName());
27 | protected AmazonKinesisClient kinesisClient;
28 | protected KinesisConnectorConfiguration config;
29 | protected final String inputFile;
30 | protected final boolean loopOverInputFile;
31 | protected ObjectMapper objectMapper;
32 |
33 | /**
34 | * Creates a new StreamSource.
35 | *
36 | * @param config
37 | * Configuration to determine which stream to put records to and get {@link AWSCredentialsProvider}
38 | * @param inputFile
39 | * File containing record data to emit on each line
40 | */
41 | public StreamSource(KinesisConnectorConfiguration config, String inputFile) {
42 | this(config, inputFile, false);
43 | }
44 |
45 | /**
46 | * Creates a new StreamSource.
47 | *
48 | * @param config
49 | * Configuration to determine which stream to put records to and get {@link AWSCredentialsProvider}
50 | * @param inputFile
51 | * File containing record data to emit on each line
52 | * @param loopOverStreamSource
53 | * Loop over the stream source to continually put records
54 | */
55 | public StreamSource(KinesisConnectorConfiguration config, String inputFile, boolean loopOverStreamSource) {
56 | this.config = config;
57 | this.inputFile = inputFile;
58 | this.loopOverInputFile = loopOverStreamSource;
59 | this.objectMapper = new ObjectMapper();
60 | kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
61 | kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
62 | if (config.KINESIS_ENDPOINT != null) {
63 | kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
64 | }
65 | KinesisUtils.createInputStream(config);
66 | }
67 |
68 | @Override
69 | public void run() {
70 | int iteration = 0;
71 | do {
72 | InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(inputFile);
73 | if (inputStream == null) {
74 | throw new IllegalStateException("Could not find input file: " + inputFile);
75 | }
76 | if (loopOverInputFile) {
77 | LOG.info("Starting iteration " + iteration + " over input file.");
78 | }
79 | try {
80 | processInputStream(inputStream, iteration);
81 | } catch (IOException e) {
82 | LOG.error("Encountered exception while putting data in source stream.", e);
83 | break;
84 | }
85 | iteration++;
86 | } while (loopOverInputFile);
87 | }
88 |
89 | /**
90 | * Process the input file and send PutRecordRequests to Amazon Kinesis.
91 | *
92 | * This function serves to Isolate StreamSource logic so subclasses
93 | * can process input files differently.
94 | *
95 | * @param inputStream
96 | * the input stream to process
97 | * @param iteration
98 | * the iteration if looping over file
99 | * @throws IOException
100 | * throw exception if error processing inputStream.
101 | */
102 | protected void processInputStream(InputStream inputStream, int iteration) throws IOException {
103 | try (BufferedReader br = new BufferedReader(new InputStreamReader(inputStream))) {
104 | String line;
105 | int lines = 0;
106 | while ((line = br.readLine()) != null) {
107 | SimpleKinesisMessageModel kinesisMessageModel = new SimpleKinesisMessageModel(line);
108 | //SimpleKinesisMessageModel kinesisMessageModel = objectMapper.readValue(line, SimpleKinesisMessageModel.class);
109 |
110 | PutRecordRequest putRecordRequest = new PutRecordRequest();
111 | putRecordRequest.setStreamName(config.KINESIS_INPUT_STREAM);
112 | putRecordRequest.setData(ByteBuffer.wrap(line.getBytes()));
113 | putRecordRequest.setPartitionKey(Integer.toString(kinesisMessageModel.getId()));
114 | kinesisClient.putRecord(putRecordRequest);
115 | lines++;
116 | }
117 | LOG.info("Added " + lines + " records to stream source.");
118 | }
119 | }
120 |
121 | @Override
122 | protected void finalize() throws Throwable {
123 | super.finalize();
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/src/main/java/com/sumologic/kinesis/utils/KinesisUtils.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.kinesis.utils;
2 |
3 | import java.util.List;
4 |
5 | import org.apache.commons.logging.Log;
6 | import org.apache.commons.logging.LogFactory;
7 |
8 | import com.amazonaws.AmazonServiceException;
9 | import com.amazonaws.auth.AWSCredentialsProvider;
10 | import com.amazonaws.regions.RegionUtils;
11 | import com.amazonaws.services.kinesis.AmazonKinesisClient;
12 | import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
13 | import com.amazonaws.services.kinesis.model.CreateStreamRequest;
14 | import com.amazonaws.services.kinesis.model.DeleteStreamRequest;
15 | import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
16 | import com.amazonaws.services.kinesis.model.ListStreamsRequest;
17 | import com.amazonaws.services.kinesis.model.ListStreamsResult;
18 | import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
19 |
20 | /**
21 | * Utilities to create and delete Amazon Kinesis streams.
22 | */
23 | public class KinesisUtils {
24 |
25 | private static Log LOG = LogFactory.getLog(KinesisUtils.class);
26 |
27 | /**
28 | * Creates the Amazon Kinesis stream specified by config.KINESIS_INPUT_STREAM
29 | *
30 | * @param config
31 | * The configuration with the specified input stream name and {@link AWSCredentialsProvider}
32 | * @param shardCount
33 | * The shard count to create the stream with
34 | */
35 | public static void createInputStream(KinesisConnectorConfiguration config) {
36 | AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
37 | kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
38 | if (config.KINESIS_ENDPOINT != null) {
39 | kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
40 | }
41 | createAndWaitForStreamToBecomeAvailable(kinesisClient,
42 | config.KINESIS_INPUT_STREAM,
43 | config.KINESIS_INPUT_STREAM_SHARD_COUNT);
44 | }
45 |
46 | /**
47 | * Creates the Amazon Kinesis stream specified by config.KINESIS_OUTPUT_STREAM.
48 | *
49 | * @param config
50 | * The configuration with the specified output stream name and {@link AWSCredentialsProvider}
51 | * @param shardCount
52 | * The shard count to create the stream with
53 | */
54 | public static void createOutputStream(KinesisConnectorConfiguration config) {
55 | AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
56 | kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
57 | if (config.KINESIS_ENDPOINT != null) {
58 | kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
59 | }
60 | createAndWaitForStreamToBecomeAvailable(kinesisClient,
61 | config.KINESIS_OUTPUT_STREAM,
62 | config.KINESIS_OUTPUT_STREAM_SHARD_COUNT);
63 | }
64 |
65 | /**
66 | * Creates an Amazon Kinesis stream if it does not exist and waits for it to become available
67 | *
68 | * @param kinesisClient
69 | * The {@link AmazonKinesisClient} with Amazon Kinesis read and write privileges
70 | * @param streamName
71 | * The Amazon Kinesis stream name to create
72 | * @param shardCount
73 | * The shard count to create the stream with
74 | * @throws IllegalStateException
75 | * Invalid Amazon Kinesis stream state
76 | * @throws IllegalStateException
77 | * Stream does not go active before the timeout
78 | */
79 | public static void createAndWaitForStreamToBecomeAvailable(AmazonKinesisClient kinesisClient,
80 | String streamName,
81 | int shardCount) {
82 | if (streamExists(kinesisClient, streamName)) {
83 | String state = streamState(kinesisClient, streamName);
84 | switch (state) {
85 | case "DELETING":
86 | long startTime = System.currentTimeMillis();
87 | long endTime = startTime + 1000 * 120;
88 | while (System.currentTimeMillis() < endTime && streamExists(kinesisClient, streamName)) {
89 | try {
90 | LOG.info("...Deleting Stream " + streamName + "...");
91 | Thread.sleep(1000 * 10);
92 | } catch (InterruptedException e) {
93 | }
94 | }
95 | if (streamExists(kinesisClient, streamName)) {
96 | LOG.error("KinesisUtils timed out waiting for stream " + streamName + " to delete");
97 | throw new IllegalStateException("KinesisUtils timed out waiting for stream " + streamName
98 | + " to delete");
99 | }
100 | case "ACTIVE":
101 | LOG.info("Stream " + streamName + " is ACTIVE");
102 | return;
103 | case "CREATING":
104 | break;
105 | case "UPDATING":
106 | LOG.info("Stream " + streamName + " is UPDATING");
107 | return;
108 | default:
109 | throw new IllegalStateException("Illegal stream state: " + state);
110 | }
111 | } else {
112 | CreateStreamRequest createStreamRequest = new CreateStreamRequest();
113 | createStreamRequest.setStreamName(streamName);
114 | createStreamRequest.setShardCount(shardCount);
115 | kinesisClient.createStream(createStreamRequest);
116 | LOG.info("Stream " + streamName + " created");
117 | }
118 | long startTime = System.currentTimeMillis();
119 | long endTime = startTime + (10 * 60 * 1000);
120 | while (System.currentTimeMillis() < endTime) {
121 | try {
122 | Thread.sleep(1000 * 10);
123 | } catch (Exception e) {
124 | }
125 | try {
126 | String streamStatus = streamState(kinesisClient, streamName);
127 | if (streamStatus.equals("ACTIVE")) {
128 | LOG.info("Stream " + streamName + " is ACTIVE");
129 | return;
130 | }
131 | } catch (ResourceNotFoundException e) {
132 | throw new IllegalStateException("Stream " + streamName + " never went active");
133 | }
134 | }
135 | }
136 |
137 | /**
138 | * Helper method to determine if an Amazon Kinesis stream exists.
139 | *
140 | * @param kinesisClient
141 | * The {@link AmazonKinesisClient} with Amazon Kinesis read privileges
142 | * @param streamName
143 | * The Amazon Kinesis stream to check for
144 | * @return true if the Amazon Kinesis stream exists, otherwise return false
145 | */
146 | private static boolean streamExists(AmazonKinesisClient kinesisClient, String streamName) {
147 | DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
148 | describeStreamRequest.setStreamName(streamName);
149 | try {
150 | kinesisClient.describeStream(describeStreamRequest);
151 | return true;
152 | } catch (ResourceNotFoundException e) {
153 | return false;
154 | }
155 | }
156 |
157 | /**
158 | * Return the state of a Amazon Kinesis stream.
159 | *
160 | * @param kinesisClient
161 | * The {@link AmazonKinesisClient} with Amazon Kinesis read privileges
162 | * @param streamName
163 | * The Amazon Kinesis stream to get the state of
164 | * @return String representation of the Stream state
165 | */
166 | private static String streamState(AmazonKinesisClient kinesisClient, String streamName) {
167 | DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
168 | describeStreamRequest.setStreamName(streamName);
169 | try {
170 | return kinesisClient.describeStream(describeStreamRequest).getStreamDescription().getStreamStatus();
171 | } catch (AmazonServiceException e) {
172 | return null;
173 | }
174 | }
175 |
176 | /**
177 | * Gets a list of all Amazon Kinesis streams
178 | *
179 | * @param kinesisClient
180 | * The {@link AmazonKinesisClient} with Amazon Kinesis read privileges
181 | * @return list of Amazon Kinesis streams
182 | */
183 | public static List listAllStreams(AmazonKinesisClient kinesisClient) {
184 |
185 | ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
186 | listStreamsRequest.setLimit(10);
187 | ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
188 | List streamNames = listStreamsResult.getStreamNames();
189 | while (listStreamsResult.isHasMoreStreams()) {
190 | if (streamNames.size() > 0) {
191 | listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
192 | }
193 |
194 | listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
195 | streamNames.addAll(listStreamsResult.getStreamNames());
196 | }
197 | return streamNames;
198 | }
199 |
200 | /**
201 | * Deletes the input stream specified by config.KINESIS_INPUT_STREAM
202 | *
203 | * @param config
204 | * The configuration containing the stream name and {@link AWSCredentialsProvider}
205 | */
206 | public static void deleteInputStream(KinesisConnectorConfiguration config) {
207 | AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
208 | kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
209 | if (config.KINESIS_ENDPOINT != null) {
210 | kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
211 | }
212 | deleteStream(kinesisClient, config.KINESIS_INPUT_STREAM);
213 | }
214 |
215 | /**
216 | * Deletes the output stream specified by config.KINESIS_OUTPUT_STREAM
217 | *
218 | * @param config
219 | * The configuration containing the stream name and {@link AWSCredentialsProvider}
220 | */
221 | public static void deleteOutputStream(KinesisConnectorConfiguration config) {
222 | AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
223 | kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
224 | if (config.KINESIS_ENDPOINT != null) {
225 | kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
226 | }
227 | deleteStream(kinesisClient, config.KINESIS_OUTPUT_STREAM);
228 | }
229 |
230 | /**
231 | * Deletes an Amazon Kinesis stream if it exists.
232 | *
233 | * @param kinesisClient
234 | * The {@link AmazonKinesisClient} with Amazon Kinesis read and write privileges
235 | * @param streamName
236 | * The Amazon Kinesis stream to delete
237 | */
238 | public static void deleteStream(AmazonKinesisClient kinesisClient, String streamName) {
239 | if (streamExists(kinesisClient, streamName)) {
240 | DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
241 | deleteStreamRequest.setStreamName(streamName);
242 | kinesisClient.deleteStream(deleteStreamRequest);
243 | LOG.info("Deleting stream " + streamName);
244 | } else {
245 | LOG.warn("Stream " + streamName + " does not exist");
246 | }
247 | }
248 |
249 | }
250 |
--------------------------------------------------------------------------------
/src/main/resources/SumologicConnector.properties.stub:
--------------------------------------------------------------------------------
1 | # Fill in your AWS Access Key ID and Secret Access Key
2 | # http://aws.amazon.com/security-credentials
3 | accessKey = [ACCESS-KEY]
4 | secretKey = [SECRET-KEY]
5 |
6 | # KinesisConnector Application Settings
7 | # Since Kinesis Creates a DynamoDB table for each APP,
8 | # each appName must be unique for different kinesisInputStreams and connectorDestinations
9 | appName = kinesisToSumologicConnector
10 |
11 | # By specifying the region name, the connector will connect from the Amazon Kinesis stream in this region
12 | # unless the endpoint for Amazon Kinesis is explicitly specified. The Amazon DynamoDB lease table and Amazon CloudWatch
13 | # metrics for connector will be created in this region. All resources in outgoing destination will
14 | # not be affected by this region name.
15 | regionName = us-east-1
16 | retryLimit = 3
17 | backoffInterval = 50000
18 | bufferRecordCountLimit = 100
19 | bufferMillisecondsLimit = 10000
20 |
21 | # Amazon Kinesis parameters for KinesisConnector
22 |
23 | # Uncomment the following property if you would like to explicitly configure the Amazon Kinesis endpoint.
24 | # This property will configure the connector's Amazon Kinesis client to read from this specific endpoint,
25 | # overwriting the regionName property for ONLY the Amazon Kinesis client. The lease table and Amazon CloudWatch
26 | # metrics will still use the regionName property.
27 | # kinesisEndpoint = https\://kinesis.us-west-2.amazonaws.com
28 |
29 | # Kinesis Stream where data will be grabbed from
30 | kinesisInputStream = VPC
31 |
32 | # Optional Amazon Kinesis parameters for automatically creating the stream
33 | createKinesisInputStream = false
34 | createKinesisOutputStream = false
35 | kinesisInputStreamShardCount = 2
36 | kinesisOutputStreamShardCount = 2
37 |
38 | # Transformer class that will be used to handle records
39 | transformerClass = CloudWatchMessageModelSumologicTransformer
40 |
41 | # Specifies the input file from which the StreamSource will read records
42 | createStreamSource = false
43 | inputStreamFile = users.txt
44 |
45 | # Connector name to be appendend to the UserAgent
46 | connectorDestination = sumologic
47 |
48 | # Sumologic HTTP Collector URL
49 | sumologicUrl = [SUMOLOGIC-URL]
50 |
--------------------------------------------------------------------------------
/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
6 | log4j.appender.stdout.layout.ConversionPattern=%d{DATE} %5p %c{1}:%L - %m%n
7 |
--------------------------------------------------------------------------------
/src/test/java/com/sumologic/client/CloudWatchMessageModelSumologicTransformerTest.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client;
2 |
3 | import org.junit.Assert;
4 | import org.junit.Before;
5 | import org.junit.Rule;
6 | import org.junit.Test;
7 | import org.junit.Ignore;
8 |
9 | import com.amazonaws.services.kinesis.model.Record;
10 | import com.sumologic.client.model.CloudWatchLogsMessageModel;
11 | import com.sumologic.client.model.SimpleKinesisMessageModel;
12 |
13 | import java.io.IOException;
14 | import java.nio.charset.Charset;
15 | import java.nio.charset.CharsetEncoder;
16 | import java.nio.CharBuffer;
17 | import java.nio.ByteBuffer;
18 |
19 |
20 | public class CloudWatchMessageModelSumologicTransformerTest {
21 | public static Charset charset = Charset.forName("UTF-8");
22 | public static CharsetEncoder encoder = charset.newEncoder();
23 |
24 | @Test
25 | public void theTransformerShouldFailGracefullyWhenUnableToCompress () {
26 | CloudWatchMessageModelSumologicTransformer transfomer = new CloudWatchMessageModelSumologicTransformer();
27 |
28 | String randomData = "Some random string without GZIP compression";
29 | ByteBuffer bufferedData = null;
30 | try {
31 | bufferedData = encoder.encode(CharBuffer.wrap(randomData));
32 | } catch (Exception e) {
33 | Assert.fail("Getting error: "+e.getMessage());
34 | }
35 |
36 | Record mockedRecord = new Record();
37 | mockedRecord.setData(bufferedData);
38 |
39 | CloudWatchLogsMessageModel messageModel = transfomer.toClass(mockedRecord);
40 |
41 |
42 | Assert.assertNull(messageModel);
43 | }
44 |
45 | @Test
46 | public void theTransformerShouldSucceedWhenTransformingAProperJSON() {
47 | CloudWatchMessageModelSumologicTransformer transfomer = new CloudWatchMessageModelSumologicTransformer();
48 |
49 | String jsonData = ""
50 | +"{"
51 | + "\"logEvents\": [{"
52 | + "\"id\": \"3889492387492837492374982374897239847289374892\","
53 | + "\"message\": \"1 23423532532 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 ACCEPT OK\","
54 | + "\"timestamp\": \"2342342342300\""
55 | + "}],"
56 | + "\"logGroup\": \"MyFirstVPC\","
57 | + "\"logStream\": \"eni-ac6a7de4-all\","
58 | + "\"messageType\": \"DATA_MESSAGE\","
59 | + "\"owner\": \"2342352352\","
60 | + "\"subscriptionFilters\": [\"MyFirstVPC\"]"
61 | + "}"
62 | +"";
63 |
64 | byte[] compressData = SumologicKinesisUtils.compressGzip(jsonData);
65 |
66 | ByteBuffer bufferedData = null;
67 | try {
68 | bufferedData = ByteBuffer.wrap(compressData);
69 | } catch (Exception e) {
70 | Assert.fail("Getting error: "+e.getMessage());
71 | }
72 |
73 | Record mockedRecord = new Record();
74 | mockedRecord.setData(bufferedData);
75 |
76 | CloudWatchLogsMessageModel messageModel = transfomer.toClass(mockedRecord);
77 |
78 | Assert.assertNotNull(messageModel);
79 | }
80 |
81 | @Test
82 | public void theTransformerShouldFailWhenTransformingAJSONWithTrailingCommas() {
83 | CloudWatchMessageModelSumologicTransformer transfomer = new CloudWatchMessageModelSumologicTransformer();
84 |
85 | String jsonData = ""
86 | +"{"
87 | + "\"logEvents\": [{"
88 | + "\"id\": \"3889492387492837492374982374897239847289374892\","
89 | + "\"message\": \"1 23423532532 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 ACCEPT OK\","
90 | + "\"timestamp\": \"2342342342300\""
91 | + "}],"
92 | + "\"logGroup\": \"MyFirstVPC\","
93 | + "\"logStream\": \"eni-ac6a7de4-all\","
94 | + "\"messageType\": \"DATA_MESSAGE\","
95 | + "\"owner\": \"2342352352\","
96 | + "\"subscriptionFilters\": [\"MyFirstVPC\"],"
97 | + "}"
98 | +"";
99 |
100 | byte[] compressData = SumologicKinesisUtils.compressGzip(jsonData);
101 |
102 | ByteBuffer bufferedData = null;
103 | try {
104 | bufferedData = ByteBuffer.wrap(compressData);
105 | } catch (Exception e) {
106 | Assert.fail("Getting error: "+e.getMessage());
107 | }
108 |
109 | Record mockedRecord = new Record();
110 | mockedRecord.setData(bufferedData);
111 |
112 | CloudWatchLogsMessageModel messageModel = null;
113 | messageModel = transfomer.toClass(mockedRecord);
114 |
115 | Assert.assertNull(messageModel);
116 | }
117 |
118 | @Test
119 | public void theTransfomerShouldSeparateBatchesOfLogs() {
120 | CloudWatchMessageModelSumologicTransformer transfomer = new CloudWatchMessageModelSumologicTransformer();
121 |
122 | String jsonData = ""
123 | +"{"
124 | + "\"logEvents\": [{"
125 | + "\"id\": \"3889492387492837492374982374897239847289374892\","
126 | + "\"message\": \"1 23423532532 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 ACCEPT OK\","
127 | + "\"timestamp\": \"2342342342300\""
128 | + "},"
129 | + "{"
130 | + "\"id\": \"3289429357928375892739857238975235235235\","
131 | + "\"message\": \"1 23423516 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 REJECT OK\","
132 | + "\"timestamp\": \"2342352351616\""
133 | + "}],"
134 | + "\"logGroup\": \"MyFirstVPC\","
135 | + "\"logStream\": \"eni-ac6a7de4-all\","
136 | + "\"messageType\": \"DATA_MESSAGE\","
137 | + "\"owner\": \"2342352352\","
138 | + "\"subscriptionFilters\": [\"MyFirstVPC\"]"
139 | + "}"
140 | +"";
141 |
142 | byte[] compressData = SumologicKinesisUtils.compressGzip(jsonData);
143 |
144 | ByteBuffer bufferedData = null;
145 | try {
146 | bufferedData = ByteBuffer.wrap(compressData);
147 | } catch (Exception e) {
148 | Assert.fail("Getting error: "+e.getMessage());
149 | }
150 |
151 | Record mockedRecord = new Record();
152 | mockedRecord.setData(bufferedData);
153 |
154 | CloudWatchLogsMessageModel messageModel = null;
155 | messageModel = transfomer.toClass(mockedRecord);
156 |
157 | String debatchedMessage = transfomer.fromClass(messageModel);
158 | System.out.println(debatchedMessage);
159 |
160 | String[] messages = debatchedMessage.split("\n");
161 | Assert.assertTrue(messages.length == 2);
162 | }
163 | }
--------------------------------------------------------------------------------
/src/test/java/com/sumologic/client/SumologicKinesisUtilsTest.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client;
2 |
3 | import org.junit.Assert;
4 | import org.junit.Test;
5 |
6 | public class SumologicKinesisUtilsTest {
7 | @Test
8 | public void compressDecompressGzipTest() {
9 | String data = "a string of characters";
10 |
11 | byte[] compressData = SumologicKinesisUtils.compressGzip(data);
12 | String result = SumologicKinesisUtils.decompressGzip(compressData);
13 |
14 | Assert.assertTrue(data.equals(result));
15 | }
16 |
17 | @Test
18 | public void properJSONVerificationShouldReturnTrue() {
19 | String jsonData = ""
20 | +"{"
21 | + "\"logEvents\": [{"
22 | + "\"id\": \"3889492387492837492374982374897239847289374892\","
23 | + "\"message\": \"1 23423532532 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 ACCEPT OK\","
24 | + "\"timestamp\": \"2342342342300\""
25 | + "}],"
26 | + "\"logGroup\": \"MyFirstVPC\","
27 | + "\"logStream\": \"eni-ac6a7de4-all\","
28 | + "\"messageType\": \"DATA_MESSAGE\","
29 | + "\"owner\": \"2342352352\","
30 | + "\"subscriptionFilters\": [\"MyFirstVPC\"]"
31 | + "}"
32 | +"";
33 |
34 | Assert.assertTrue(SumologicKinesisUtils.verifyJSON(jsonData));
35 | }
36 |
37 | @Test
38 | public void malformedJSONVerificationShouldReturnTrue() {
39 | String jsonData = ""
40 | +"{"
41 | + "\"logEvents\": [{"
42 | + "\"id\": \"3889492387492837492374982374897239847289374892\","
43 | + "\"message\": \"1 23423532532 eni-ac9342k3492 10.1.1.75 66.175.209.17 123 123 17 1 76 1437755534 1437755549 ACCEPT OK\","
44 | + "\"timestamp\": \"2342342342300\""
45 | + "}],"
46 | + "\"logGroup\": \"MyFirstVPC\","
47 | + "\"logStream\": \"eni-ac6a7de4-all\","
48 | + "\"messageType\": \"DATA_MESSAGE\","
49 | + "\"owner\": \"2342352352\","
50 | + "\"subscriptionFilters\": [\"MyFirstVPC\"],"
51 | + "}"
52 | +"";
53 |
54 | Assert.assertFalse(SumologicKinesisUtils.verifyJSON(jsonData));
55 | }
56 | }
--------------------------------------------------------------------------------
/src/test/java/com/sumologic/client/SumologicSenderTest.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client;
2 |
3 | import java.io.IOException;
4 | import java.nio.charset.Charset;
5 | import java.util.ArrayList;
6 | import java.util.Arrays;
7 | import java.util.List;
8 |
9 | import org.junit.Assert;
10 | import org.junit.Before;
11 | import org.junit.Rule;
12 | import org.junit.Test;
13 | import org.junit.Ignore;
14 |
15 | import static com.github.tomakehurst.wiremock.client.WireMock.*;
16 |
17 | import com.github.tomakehurst.wiremock.client.WireMock;
18 | import com.github.tomakehurst.wiremock.junit.WireMockRule;
19 | import com.sumologic.client.SumologicSender;
20 | import com.sumologic.client.implementations.SumologicEmitter;
21 |
22 | public class SumologicSenderTest {
23 |
24 | @Rule
25 | public WireMockRule wireMockRule = new WireMockRule(8089);
26 |
27 | private static final String MOCKED_HOST = "http://localhost:8089";
28 | private static final String MOCKED_COLLECTION = "/sumologic/collections/1234";
29 |
30 |
31 | @Before
32 | public void setUp() {
33 | mockEmitMessages();
34 | }
35 |
36 | @Test
37 | public void theSenderShouldReturnFalseWhenFailing () {
38 | String url = MOCKED_HOST + "/sumologic/collections/fake-url";
39 |
40 | String data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
41 | + "\nIn nisl tortor, dictum nec tristique ut, tincidunt vitae tortor. "
42 | + "\nNam vitae urna ac sem vulputate dignissim at ac nibh. ";
43 |
44 | SumologicSender sender = new SumologicSender(url);
45 | try{
46 | boolean response = sender.sendToSumologic(data);
47 | Assert.assertFalse(response);
48 | } catch (IOException e) {
49 | Assert.fail("Got an exception during test: "+e.getMessage());
50 | }
51 | }
52 |
53 | @Test
54 | public void theSenderShouldReturnTrueOnSuccess () {
55 | String url = MOCKED_HOST + MOCKED_COLLECTION;
56 |
57 | String data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
58 | + "\nIn nisl tortor, dictum nec tristique ut, tincidunt vitae tortor. "
59 | + "\nNam vitae urna ac sem vulputate dignissim at ac nibh. ";
60 |
61 | SumologicSender sender = new SumologicSender(url);
62 | try{
63 | boolean response = sender.sendToSumologic(data);
64 | Assert.assertTrue(response);
65 | } catch (IOException e) {
66 | Assert.fail("Got an exception during test: "+e.getMessage());
67 | }
68 | }
69 |
70 |
71 | private void mockEmitMessages () {
72 | WireMock.stubFor(WireMock.post(WireMock.urlMatching(MOCKED_COLLECTION))
73 | .willReturn(WireMock.aResponse()
74 | .withStatus(200)
75 | .withHeader("Content-Type", "text/html")
76 | .withBody("")));
77 | }
78 |
79 | }
--------------------------------------------------------------------------------
/src/test/java/com/sumologic/client/implementations/SumologicEmitterTest.java:
--------------------------------------------------------------------------------
1 | package com.sumologic.client.implementations;
2 |
3 | import java.util.ArrayList;
4 | import java.util.List;
5 |
6 | import org.junit.Assert;
7 | import org.junit.Before;
8 | import org.junit.Rule;
9 | import org.junit.Test;
10 | import org.junit.Ignore;
11 |
12 | import static com.github.tomakehurst.wiremock.client.WireMock.*;
13 |
14 | import com.github.tomakehurst.wiremock.client.WireMock;
15 | import com.github.tomakehurst.wiremock.junit.WireMockRule;
16 | import com.sumologic.client.implementations.SumologicEmitter;
17 |
18 | public class SumologicEmitterTest {
19 |
20 | @Rule
21 | public WireMockRule wireMockRule = new WireMockRule(8089);
22 |
23 | private static final String MOCKED_HOST = "http://localhost:8089";
24 | private static final String MOCKED_COLLECTION = "/sumologic/collections/1234";
25 |
26 | @Before
27 | public void setUp() {
28 | mockEmitMessages();
29 | }
30 |
31 | @Test
32 | public void theEmitterShouldReturnTheListParameterWhenFailing () {
33 | String url = MOCKED_HOST + "/sumologic/collections/fake-url";
34 |
35 | List messages = new ArrayList();
36 | messages.add("This is message #1");
37 | messages.add("This is message #2");
38 | messages.add("This is message #3");
39 | messages.add("This is message #4");
40 |
41 | SumologicEmitter emitter = new SumologicEmitter(url);
42 | List notEmittedMessages = emitter.sendBatchConcatenating(messages);
43 |
44 | Assert.assertEquals(messages, notEmittedMessages);
45 | }
46 |
47 | @Test
48 | public void theEmitterShouldReturnAnEmptyListOnSuccess () {
49 | String url = MOCKED_HOST + MOCKED_COLLECTION;
50 |
51 | List messages = new ArrayList();
52 | messages.add("This is message #1");
53 | messages.add("This is message #2");
54 | messages.add("This is message #3");
55 | messages.add("This is message #4");
56 |
57 | SumologicEmitter emitter = new SumologicEmitter(url);
58 | List notEmittedMessages = emitter.sendBatchConcatenating(messages);
59 |
60 | Assert.assertEquals(0, notEmittedMessages.size());
61 | }
62 |
63 | private void mockEmitMessages () {
64 | WireMock.stubFor(WireMock.post(WireMock.urlMatching(MOCKED_COLLECTION))
65 | .willReturn(WireMock.aResponse()
66 | .withStatus(200)
67 | .withHeader("Content-Type", "text/html")
68 | .withBody("")));
69 | }
70 |
71 | }
--------------------------------------------------------------------------------