├── .gitignore
├── README.md
├── kafkaflink
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── com
│ │ └── aman
│ │ └── kafkalink
│ │ ├── AsyncRegisterApiInvocation.java
│ │ ├── FlinkReadFromKafka.java
│ │ ├── config
│ │ ├── FlinkKafkaConsumerConfig.java
│ │ └── FlinkKafkaProducerConfig.java
│ │ └── entity
│ │ ├── MessageType.java
│ │ ├── RegisterRequest.java
│ │ ├── RegisterRequestSchema.java
│ │ ├── RegisterResponse.java
│ │ └── RegisterResponseSerializer.java
│ └── resources
│ └── log4j.properties
└── wsvertx
├── .idea
├── compiler.xml
├── encodings.xml
├── misc.xml
└── uiDesigner.xml
├── dependency-reduced-pom.xml
├── pom.xml
└── src
└── main
├── java
└── org
│ └── aman
│ └── wsvertx
│ ├── ClientSocketRequestVerticle.java
│ ├── EventBusKafkaReceiverVerticle.java
│ ├── EventBusKafkaSenderVerticle.java
│ ├── KafkaConsumerVerticle.java
│ ├── KafkaProducerVerticle.java
│ ├── MainVerticle.java
│ ├── ServerSocketEventBusVerticle.java
│ ├── config
│ ├── KafkaConsumerConfig.java
│ └── KafkaProducerConfig.java
│ ├── model
│ ├── codec
│ │ └── RegisterRequestCodec.java
│ └── payload
│ │ └── RegisterRequest.java
│ └── util
│ └── Util.java
└── resources
└── log4j.properties
/.gitignore:
--------------------------------------------------------------------------------
1 | # Built application files
2 | *.apk
3 | *.ap_
4 |
5 | # Files for the ART/Dalvik VM
6 | *.dex
7 |
8 | # Java class files
9 | *.class
10 |
11 | # Generated files
12 | bin/
13 | gen/
14 | out/
15 |
16 | # Gradle files
17 | .gradle/
18 | build/
19 |
20 | # Local configuration file (sdk path, etc)
21 | local.properties
22 |
23 | # Proguard folder generated by Eclipse
24 | proguard/
25 |
26 | # Log Files
27 | *.log
28 |
29 | # Android Studio Navigation editor temp files
30 | .navigation/
31 |
32 | # Android Studio captures folder
33 | captures/
34 |
35 | # IntelliJ
36 | *.iml
37 | .idea/workspace.xml
38 | .idea/tasks.xml
39 | .idea/gradle.xml
40 | .idea/assetWizardSettings.xml
41 | .idea/dictionaries
42 | .idea/libraries
43 | .idea/caches
44 |
45 | # Keystore files
46 | # Uncomment the following line if you do not want to check your keystore files in.
47 | #*.jks
48 |
49 | # External native build folder generated in Android Studio 2.2 and later
50 | .externalNativeBuild
51 |
52 | # Google Services (e.g. APIs or Firebase)
53 | google-services.json
54 |
55 | # Freeline
56 | freeline.py
57 | freeline/
58 | freeline_project_description.json
59 |
60 | # fastlane
61 | fastlane/report.xml
62 | fastlane/Preview.html
63 | fastlane/screenshots
64 | fastlane/test_output
65 | fastlane/readme.md
66 | */workspace.xml[1~[D[D[D[D[D[D[D[D[D[D[D[D[D[D[D[D[D[C[
67 | *workspace.xml
68 | wsvertx/.idea/
69 | wsvertx/.idea
70 |
71 | kafkaflink/.idea
72 | kafkaflink/.idea/
73 | wsvertx/target/*
74 | wsvertex/[D[
75 | wsvertx/target/
76 | kafkaflink/target/*
77 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Websockets-Vertx-Kafka-Flink
2 | ### A simple request response cycle using Websockets, Eclipse Vert-x server, Apache Kafka, Apache Flink. ###
3 | ---
4 |
5 | - An incoming request gets routed to a non blocking Vertx server which then writes the request to a specific Kafka topic.
6 | - A Flink consumer implemented as another side project consumes the messages from the given request topic
7 | - (Optional) Flink job hits a Rest API hosted on a Spring boot server. You can use Jax-Rs or even hardcode the response
8 | - Flink writes the API result to another topic. Every message has a unique sender id. Flink sends the response with the same
9 | - Finally the Vertx Kafka consumer listens for responses from the response topic and sends an event to a websocket handler
10 | - Websocket consumer for a specific id writes the response to its own socket thus completing the entire async request cycle
11 |
12 |
13 |  
14 |
15 | ---
16 | ### Prerequisites ###
17 | * Java 1.8
18 | * Apache Kafka 2.0.0
19 | * Apache Zookeeper 3.4.8
20 | * Eclipse Vertx 3.5.3
21 | * Apache Flink 1.6.0
22 | ---
23 |
24 |
25 | ### Setting up Apache Kafka ###
26 | ```
27 | # Start Zookeeper instance
28 | $ zookeeper-server-start.bat ..\..\config\zookeeper.properties
29 |
30 | # Start Kafka server
31 | $ kafka-server-start.bat ..\..\config\server.properties
32 |
33 | # Create a request topic
34 | $ kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic flink-demo
35 |
36 | # Create a response queue
37 | $ kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic flink-demo-resp
38 |
39 | # Verify the consumer of request queue flink-demo
40 | $ kafka-console-consumer.bat --bootstrap-server localhost:9092 --from-beginning --topic flink-demo
41 |
42 | # Verify the consumer of response queue flink-demo-resp
43 | $ kafka-console-consumer.bat --bootstrap-server localhost:9092 --from-beginning --topic flink-demo-resp
44 |
45 | ```
46 | Make sure following is appended to **config\server.properties**
47 | ```
48 | port = 9092
49 | advertised.host.name = localhost
50 | ```
51 |
52 | Note: Replace .bat files with .sh files when working in a UNIX environment.
53 |
54 | ---
55 |
56 | ### What you do in the Flink Job depends on the use case. Options are ###
57 | * Make async rest API call
58 | * Interact with a database using an async clients
59 | * Return a mock response
60 |
61 | ### Caveats ###
62 | * Here, we are making a request using the AsyncHTTP Client to an endpoint hosted on a Spring Boot Server
63 | * The rest API Server is listening on port 9004
64 | * You are free to experiment in this department.
65 | * If you choose to continue using the Rest API given in this project, make sure you have an endpoint implementation.
66 |
67 | ### Setting up the project ###
68 | * Run the kafka-flink connector project that waits for incoming data stream from kafka queue "flink_resp"
69 | * Run the ws-vertx project that invokes an event on the event bus which writes a sample API request to the topic.
70 | * Verify that the message is written correctly on the topic "flink-demo"
71 | * Flink Kafka connector consumes the message, serializes it, transforms the data stream into a response stream
72 | * Flink job now writes the response back to the response topic "flink-demo-resp"
73 |
74 | ### Testing the web socket flow ###
75 | * Incuded within the vertx flow is a client socket verticle that emulates a single web socket request
76 | * It is fired as soon as the server verticle is deployed. [Optional] Look for the following
77 | ```
78 | # Uncomment the below line for local UI testing: It creates a websocket request to the given server
79 | //vertx.deployVerticle(new ClientSocketRequestVerticle());
80 |
81 | ```
82 |
83 | * You can however choose to send websocket requests from a client manually. Use the following
84 | ```
85 | # Use the following websocket URL
86 | ws://127.0.0.1:9443/wsapi/register
87 |
88 | # Once the socket opens, begin sending messages in the correct format
89 | {
90 | "email": "your email",
91 | "password": "your password ",
92 | "registerAsAdmin": true
93 | }
94 |
95 | ```
96 |
97 | ----
98 | ### Websockets ###
99 | * Websocket for communication between app & backend
100 | * Async messages, non-blocking communication layer
101 | * Full duplex communication channels over single TCP
102 |
103 | ---
104 | ### Vert-x ###
105 | * A toolkit ecosystem, to build reactive application on JVM
106 | * Vert-x library helps implement non-blocking asynchronous event bus implementation.
107 | * Helps manage Websocket queue
108 |
109 | ---
110 | ### Kafka ###
111 | * Distributed streaming platform.
112 | * Kafka provides a fully integrated Streams API to allow an application to act as a stream processor, consuming an input stream from one or more topics and producing an output stream to one or more output topics, effectively transforming the input streams to output streams.
113 | * Handles out-of-order data.
114 |
115 | ---
116 | ### Flink ###
117 | * Open-source platform for distributed stream and batch data processing.
118 | * Provides data distribution, communication, and fault tolerance for distributed computations over data streams.
119 | * Builds batch processing on top of the streaming engine, overlaying native iteration support, managed memory, and program optimization.
120 |
--------------------------------------------------------------------------------
/kafkaflink/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 | com.prudential.kafkaflink
5 | kafkaflink
6 | 0.0.1
7 |
8 |
9 | UTF-8
10 | 1.8
11 | 3.5.1
12 | 2.4.3
13 | 2.21.0
14 | 1.5.0
15 |
16 |
17 |
18 |
19 |
20 |
21 | org.apache.flink
22 | flink-connector-kafka-0.10_2.11
23 | compile
24 | 1.6.0
25 |
26 |
27 |
28 | org.apache.flink
29 | flink-core
30 | compile
31 | 1.6.0
32 |
33 |
34 |
35 | org.apache.flink
36 | flink-streaming-java_2.11
37 | compile
38 | 1.6.0
39 |
40 |
41 |
42 | org.apache.flink
43 | flink-java
44 | compile
45 | 1.6.0
46 |
47 |
48 |
49 | org.apache.flink
50 | flink-streaming-core
51 | compile
52 | 0.9.1
53 |
54 |
55 | org.apache.flink
56 | flink-clients
57 | compile
58 | 0.9.1
59 |
60 |
61 |
62 | org.asynchttpclient
63 | async-http-client
64 | 2.5.2
65 |
66 |
67 |
68 | org.apache.httpcomponents
69 | httpasyncclient
70 | 4.1.2
71 |
72 |
73 |
74 | com.google.code.gson
75 | gson
76 | 2.8.0
77 |
78 |
79 |
80 | io.netty
81 | netty-all
82 | 4.0.27.Final
83 |
84 |
85 |
86 |
87 | log4j
88 | log4j
89 | 1.2.17
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
100 |
101 | org.apache.maven.plugins
102 | maven-shade-plugin
103 | 2.4.3
104 |
105 |
106 |
107 | package
108 |
109 | shade
110 |
111 |
112 |
113 |
114 |
117 |
118 |
124 |
125 |
126 |
127 |
128 |
129 | org.apache.flink:*
130 |
131 | org/apache/flink/shaded/**
132 | web-docs/**
133 |
134 |
135 |
136 |
137 |
138 |
140 | com.dataartisans.Job
141 |
142 |
143 | false
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 | org.apache.maven.plugins
152 | maven-jar-plugin
153 | 2.5
154 |
155 |
156 |
157 | com.aman.kafkalink.FlinkReadFromKafka
158 |
159 |
160 |
161 |
162 |
163 |
164 | org.apache.maven.plugins
165 | maven-compiler-plugin
166 |
167 | 8
168 | 8
169 |
170 |
171 |
172 |
173 |
174 |
175 |
--------------------------------------------------------------------------------
/kafkaflink/src/main/java/com/aman/kafkalink/AsyncRegisterApiInvocation.java:
--------------------------------------------------------------------------------
1 | package com.aman.kafkalink;
2 |
3 | import com.aman.kafkalink.entity.MessageType;
4 | import com.aman.kafkalink.entity.RegisterRequest;
5 | import com.aman.kafkalink.entity.RegisterResponse;
6 | import com.google.gson.Gson;
7 | import org.apache.flink.configuration.Configuration;
8 | import org.apache.flink.streaming.api.functions.async.ResultFuture;
9 | import org.apache.flink.streaming.api.functions.async.RichAsyncFunction;
10 | import org.apache.log4j.Logger;
11 | import org.asynchttpclient.AsyncCompletionHandler;
12 | import org.asynchttpclient.AsyncHttpClient;
13 | import org.asynchttpclient.DefaultAsyncHttpClient;
14 | import org.asynchttpclient.Request;
15 | import org.asynchttpclient.Response;
16 |
17 | import java.util.Collections;
18 |
19 |
20 | public class AsyncRegisterApiInvocation extends RichAsyncFunction {
21 |
22 | private static final long serialVersionUID = 1L;
23 | private static final Logger logger = Logger.getLogger(AsyncRegisterApiInvocation.class);
24 | private final Integer apiTimeoutMs;
25 |
26 | /**
27 | * The Asynchronous client that can issue concurrent requests with callbacks
28 | */
29 | private transient AsyncHttpClient asyncHttpClient = null;
30 |
31 | public AsyncRegisterApiInvocation(Integer apiTimeoutMs) {
32 | this.apiTimeoutMs = apiTimeoutMs;
33 | }
34 |
35 | @Override
36 | public void open(Configuration parameters) {
37 | logger.info("Opening connection " + parameters.toString());
38 | this.asyncHttpClient = new DefaultAsyncHttpClient();
39 | }
40 |
41 | @Override
42 | public void close() throws Exception {
43 | logger.info("Closing connection");
44 | super.close();
45 | asyncHttpClient.close();
46 | }
47 |
48 | @Override
49 | public void timeout(RegisterRequest registerRequest, ResultFuture resultFuture) throws Exception {
50 | RegisterResponse registerResponse = new RegisterResponse();
51 | registerResponse.setSuccess(false);
52 | registerResponse.setSenderId(registerRequest.getSenderId());
53 | registerResponse.setError("[TimeoutException Api-Invocation]");
54 | registerResponse.setCause("Timeout occurred during registration");
55 | resultFuture.complete(Collections.singletonList(registerResponse));
56 | }
57 |
58 | @Override
59 | public void asyncInvoke(RegisterRequest registerRequest, final ResultFuture resultFuture) throws Exception {
60 | // issue the asynchronous request, receive a future for result
61 | Gson g = new Gson();
62 | String jsonContent = g.toJson(registerRequest);
63 | Request request = asyncHttpClient.preparePost("http://localhost:9004/api/auth/register").setHeader("Content" +
64 | "-Type", "application" +
65 | "/json")
66 | .setHeader("Content-Length", "" + jsonContent.length()).setBody(jsonContent)
67 | .setBody(jsonContent)
68 | .setRequestTimeout(this.apiTimeoutMs)
69 | .build();
70 |
71 | try {
72 | asyncHttpClient.executeRequest(request, new AsyncCompletionHandler() {
73 | @Override
74 | public RegisterResponse onCompleted(Response response) throws Exception {
75 | logger.info("Spring returned" + response.getResponseBody());
76 | Gson g = new Gson();
77 | RegisterResponse responseMessage = g.fromJson(response.getResponseBody(),
78 | RegisterResponse.class);
79 |
80 | responseMessage.setSenderId(registerRequest.getSenderId());
81 | responseMessage.setSuccess(true);
82 | responseMessage.setData(response.getResponseBody());
83 | responseMessage.setMessageType(MessageType.REST);
84 | responseMessage.setMessageType(MessageType.REST);
85 | resultFuture.complete(Collections.singletonList(responseMessage));
86 | return responseMessage;
87 | }
88 |
89 | @Override
90 | public void onThrowable(Throwable t) {
91 | RegisterResponse registerResponse = new RegisterResponse();
92 | registerResponse.setSuccess(false);
93 | registerResponse.setSenderId(registerRequest.getSenderId());
94 | registerResponse.setError(t.getMessage());
95 | registerResponse.setCause(t.getCause().getMessage());
96 | resultFuture.complete(Collections.singletonList(registerResponse));
97 | }
98 | });
99 |
100 | } catch (Exception ex) {
101 | logger.error("Exception [HTTP] Client " + ex);
102 | }
103 | }
104 |
105 |
106 | }
107 |
--------------------------------------------------------------------------------
/kafkaflink/src/main/java/com/aman/kafkalink/FlinkReadFromKafka.java:
--------------------------------------------------------------------------------
1 | package com.aman.kafkalink;
2 |
3 | import com.aman.kafkalink.config.FlinkKafkaConsumerConfig;
4 | import com.aman.kafkalink.config.FlinkKafkaProducerConfig;
5 | import com.aman.kafkalink.entity.RegisterRequest;
6 | import com.aman.kafkalink.entity.RegisterRequestSchema;
7 | import com.aman.kafkalink.entity.RegisterResponse;
8 | import com.aman.kafkalink.entity.RegisterResponseSerializer;
9 | import org.apache.flink.streaming.api.datastream.AsyncDataStream;
10 | import org.apache.flink.streaming.api.datastream.DataStream;
11 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
12 | import org.apache.flink.streaming.api.functions.ProcessFunction;
13 | import org.apache.flink.streaming.api.functions.async.AsyncFunction;
14 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
15 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010;
16 | import org.apache.flink.util.Collector;
17 | import org.apache.log4j.Logger;
18 |
19 | import java.util.Properties;
20 | import java.util.concurrent.TimeUnit;
21 |
22 | public class FlinkReadFromKafka {
23 |
24 | private static final Logger logger = Logger.getLogger(FlinkReadFromKafka.class);
25 |
26 | public static void main(String[] args) throws Exception {
27 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
28 | env.setParallelism(1);
29 | Properties consumerProp = FlinkKafkaConsumerConfig.getKafkaConsumerConfig();
30 |
31 | // Create a flink consumer from the topic with a custom serializer for "RegisterRequest"
32 | FlinkKafkaConsumer010 consumer = new FlinkKafkaConsumer010<>(consumerProp.getProperty(
33 | "topic"),
34 | new RegisterRequestSchema(), consumerProp);
35 |
36 | // Start reading partitions from the consumer group’s committed offsets in Kafka brokers
37 | consumer.setStartFromGroupOffsets();
38 |
39 | // Create a flink data stream from the consumer source i.e Kafka topic
40 | DataStream messageStream = env.addSource(consumer);
41 |
42 | logger.info(messageStream.process(new ProcessFunction() {
43 | @Override
44 | public void processElement(RegisterRequest RegisterRequest, Context context, Collector