├── .gitignore ├── LICENSE ├── README.md ├── pom.xml └── src └── main └── scala └── com └── tmalaska └── flinktraining └── example ├── eventtime └── EventTimeHeartBeatExample.scala ├── session ├── HeartBeat.scala ├── SessionKafkaProducer.scala └── StreamingSessionExample.scala └── wordcount ├── MapWithStateWordCount.scala ├── SimpleWordCount.scala ├── StreamingSQL.scala └── TriggerEvictWordCount.scala /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | pom.xml.tag 3 | pom.xml.releaseBackup 4 | pom.xml.versionsBackup 5 | pom.xml.next 6 | release.properties 7 | dependency-reduced-pom.xml 8 | buildNumber.properties 9 | .mvn/timing.properties 10 | 11 | # Avoid ignoring Maven wrapper jar file (.jar files are usually ignored) 12 | !/.mvn/wrapper/maven-wrapper.jar 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flink Session Example 2 | 3 | This is a collection of some simple examples of Apache Flink for teaching. 4 | 5 | # Setting Up Kafka Locally 6 | The examples in this repo will all be using Kafka as the source of the Stream. 7 | To make it easy for people we will use Docker to install and run Kafka 8 | 9 | ### Set Up Kafka 10 | The following command will download, install, and run a Kafka Container to your local. 11 | 12 | ``` 13 | docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=127.0.0.1 --env ADVERTISED_PORT=9092 --name mykafka -d spotify/kafka 14 | ``` 15 | 16 | To see the container running you can use the docker ps cmd line function 17 | ``` 18 | docker ps 19 | ``` 20 | Then you should see something like the following output 21 | ``` 22 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 23 | a7b9cf39eb05 spotify/kafka "supervisord -n" 39 seconds ago Up 38 seconds 0.0.0.0:2181->2181/tcp, 0.0.0.0:9092->9092/tcp mykafka 24 | ``` 25 | 26 | ### Create Kafka Topic 27 | Now that we have Kafka running we need to set up a topic to use. 28 | The following command will log you into the container using the bash terminal and once there you can use the Kafka cmds 29 | to set up a topic 30 | 31 | ``` 32 | docker exec -it mykafka bash 33 | 34 | cd /opt/kafka_2.11-0.10.1.0/bin 35 | 36 | ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 2 --topic flink_training 37 | ``` 38 | 39 | If you want to see the topic just use the following cmd 40 | ``` 41 | ./kafka-topics.sh --list --zookeeper localhost:2181 42 | ``` 43 | 44 | If you want to see the messages going into our newly made topic, the following command will make a consumer on the 45 | terminal so you can see events in clear text. 46 | ``` 47 | ./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic flink_training --from-beginning 48 | ``` 49 | 50 | # Word Counts Examples 51 | 52 | ## Sending Messages to Kafka 53 | 54 | ### Send Word Count Data to Kafka 55 | Generate some word count data we are going to start a cmd line producer in a new terminal. 56 | The following cmd lines will set up a producer for us to type in word to the terminal that will feed our word count exapmle 57 | 58 | ``` 59 | docker exec -it mykafka bash 60 | 61 | cd /opt/kafka_2.11-0.10.1.0/bin 62 | 63 | ./kafka-console-producer.sh --broker-list localhost:9092 --topic flink_training 64 | ``` 65 | 66 | ## Word Count Exmaples 67 | 68 | ### Simple Word Count Example 69 | The most basic example is com.tmalaska.flinktraining.example.wordcount.SimpleWordCount 70 | 71 | This example uses basic processing time and a count window. To start it use the following parameters 72 | 73 | ``` 74 | localhost 9092 flink_training group_id tumbleTime 75 | ``` 76 | 77 | These parameters are 78 | - Kafka Broker Host IP 79 | - Kafka Broker Port 80 | - Kafka Topic 81 | - Kafka Consumer Group 82 | - The type of window: tumbleTime, slidingCount, slidingTime, count 83 | 84 | ### Trigger Evict Word Count Example 85 | The most basic example is com.tmalaska.flinktraining.example.wordcount.TriggerEvictWordCount 86 | 87 | This example shows how to use triggers and evict to control the window 88 | 89 | ``` 90 | localhost 9092 flink_training group_id 91 | ``` 92 | 93 | These parameters are 94 | - Kafka Broker Host IP 95 | - Kafka Broker Port 96 | - Kafka Topic 97 | - Kafka Consumer Group 98 | 99 | ### Streaming SQL Example 100 | The most basic example is com.tmalaska.flinktraining.example.wordcount.StreamingSQL 101 | 102 | This example uses append only SQL to simpliy code 103 | 104 | ``` 105 | localhost 9092 flink_training group_id 106 | ``` 107 | 108 | These parameters are 109 | - Kafka Broker Host IP 110 | - Kafka Broker Port 111 | - Kafka Topic 112 | - Kafka Consumer Group 113 | 114 | ### Map With State Word Count Example 115 | The most basic example is com.tmalaska.flinktraining.example.wordcount.MapWithStateWordCount 116 | 117 | This example shows us how to use the MapWithState method and the Keyed State Values 118 | 119 | ``` 120 | localhost 9092 flink_training group_id 121 | ``` 122 | 123 | These parameters are 124 | - Kafka Broker Host IP 125 | - Kafka Broker Port 126 | - Kafka Topic 127 | - Kafka Consumer Group 128 | 129 | 130 | 131 | ## Generated Session Messages to Send to Kafka 132 | We will be sending messages to Kafka in a JSON format. We are using JSON because it is human readable. 133 | The Goal here is teaching and not performance. 134 | 135 | We will be generating out JSON from the HeartBeat Case Class. 136 | 137 | ``` 138 | case class HeartBeat(entityId:String, eventTime:Long) 139 | ``` 140 | 141 | ### Run Message Session Generator 142 | To generate messages and send them to Kafka we can use the SessionKafkaProducer object in the 143 | com.tmalaska.flinktraining.example.session package. 144 | 145 | We will need to give SessionKafkaProducer the following parameters 146 | ``` 147 | localhost 9092 flink_training 10 100 1000 4 148 | ``` 149 | 150 | The parameters are as follows: 151 | - Host of the Kafka Broker 152 | - Port of the Kafka Broker 153 | - Name of the Topic we want to write too 154 | - The number of entities ID to be sent 155 | - The max number of messages that could be sent for a given entity 156 | - The wait time between loops of sending out messages for each entity 157 | - N in 1/N, where these are the odds to not send a message for a entity on a given loop 158 | 159 | **Whats with the Entity Count and the 1/N missing stuff?** 160 | This generator is all about session generation. We are going to have flink jobs that do sessionization. 161 | So we need a generator that will generate data that can be sessionizated. That means we need to have messages 162 | related to a given entity that are close enough to continue a session and far apart enough to close a session and 163 | start a new one. 164 | 165 | ##Run Session Examples 166 | 167 | ### Streaming Session Example 168 | The most advanced example in this project is the sessionization example. 169 | That is because this example uses the processFunction command, which gives you the most detailed 170 | control of or in memory window. 171 | 172 | The class is com.tmalaska.flinktraining.example.session.StreamingSessionExample 173 | 174 | ``` 175 | localhost 9092 flink_training group_id 10000 176 | ``` 177 | 178 | These parameters are 179 | - Kafka Broker Host IP 180 | - Kafka Broker Port 181 | - Kafka Topic 182 | - Kafka Consumer Group 183 | - milliseconds for a session gap 184 | 185 | ### Event Time Example 186 | Lastly is a event time example with our session data. 187 | 188 | Use com.tmalaska.flinktrainingBeatExample 189 | 190 | ``` 191 | localhost 9092 flink_training group_id 192 | ``` 193 | 194 | These parameters are 195 | - Kafka Broker Host IP 196 | - Kafka Broker Port 197 | - Kafka Topic 198 | - Kafka Consumer Group 199 | 200 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.tmalaska 8 | flink.training 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 1.5.1 13 | 2.11.8 14 | 2.11.8 15 | 16 | 17 | 18 | 19 | 20 | org.apache.flink 21 | flink-scala_2.11 22 | ${flink.version} 23 | 24 | 25 | org.scala-lang 26 | scala-library 27 | ${scala.version} 28 | 29 | 30 | 31 | org.scala-lang 32 | scala-library 33 | ${scala.version} 34 | 35 | 36 | 37 | org.apache.flink 38 | flink-core 39 | ${flink.version} 40 | 41 | 42 | org.apache.flink 43 | flink-clients_2.11 44 | ${flink.version} 45 | 46 | 47 | org.apache.flink 48 | flink-table_2.11 49 | ${flink.version} 50 | 51 | 52 | org.apache.flink 53 | flink-streaming-scala_2.11 54 | ${flink.version} 55 | 56 | 57 | org.apache.flink 58 | flink-streaming-java_2.11 59 | ${flink.version} 60 | 61 | 62 | org.apache.flink 63 | flink-connector-kafka-0.11_2.11 64 | ${flink.version} 65 | 66 | 67 | net.liftweb 68 | lift-json_2.11 69 | 3.2.0 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | org.apache.maven.plugins 78 | maven-compiler-plugin 79 | 3.3 80 | 81 | 1.8 82 | 1.8 83 | 84 | 85 | 86 | 87 | net.alchim31.maven 88 | scala-maven-plugin 89 | 3.2.0 90 | 91 | UTF-8 92 | ${scala.version} 93 | 94 | 95 | 96 | scala-compile-first 97 | process-resources 98 | 99 | add-source 100 | compile 101 | 102 | 103 | 104 | scala-test-compile 105 | process-test-resources 106 | 107 | testCompile 108 | 109 | 110 | 111 | 112 | 113 | 114 | org.scalatest 115 | scalatest-maven-plugin 116 | 1.0 117 | 118 | ${project.build.directory}/surefire-reports 119 | . 120 | WDF TestSuite.txt 121 | false 122 | 123 | 124 | 125 | test 126 | test 127 | 128 | test 129 | 130 | 131 | true 132 | 133 | 134 | 135 | integration-test 136 | integration-test 137 | 138 | test 139 | 140 | 141 | Integration-Test 142 | 143 | -Xmx1536m -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m 144 | 145 | false 146 | 147 | 148 | 149 | 150 | 151 | org.apache.maven.plugins 152 | maven-shade-plugin 153 | 2.2 154 | 155 | false 156 | target/FlinkTraining.jar 157 | 158 | 159 | *:* 160 | 161 | 162 | 163 | 164 | *:* 165 | 166 | META-INF/*.SF 167 | META-INF/*.DSA 168 | META-INF/*.RSA 169 | 170 | 171 | 172 | 173 | 174 | 175 | package 176 | 177 | shade 178 | 179 | 180 | 181 | 183 | 185 | reference.conf 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/eventtime/EventTimeHeartBeatExample.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.eventtime 2 | 3 | import java.util.Properties 4 | import java.util.concurrent.TimeUnit 5 | 6 | import com.tmalaska.flinktraining.example.session.HeartBeat 7 | import net.liftweb.json.DefaultFormats 8 | import net.liftweb.json.Serialization.read 9 | import org.apache.flink.api.scala._ 10 | import org.apache.flink.api.common.serialization.SimpleStringSchema 11 | import org.apache.flink.streaming.api.TimeCharacteristic 12 | import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks 13 | import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor 14 | import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment} 15 | import org.apache.flink.streaming.api.watermark.Watermark 16 | import org.apache.flink.streaming.api.windowing.time.Time 17 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 18 | 19 | object EventTimeHeartBeatExample { 20 | def main(args: Array[String]) { 21 | 22 | val kafkaServerURL = args(0) 23 | val kafkaServerPort = args(1) 24 | val kafkaTopic = args(2) 25 | val groupId = args(3) 26 | val typeOfWindow = args(4) 27 | 28 | val env = StreamExecutionEnvironment.getExecutionEnvironment 29 | env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) 30 | 31 | // create a stream using socket 32 | 33 | val properties = new Properties 34 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 35 | properties.setProperty("zookeeper.connect", "localhost:2181") 36 | properties.setProperty("group.id", groupId) 37 | 38 | println("kafkaTopic:" + kafkaTopic) 39 | 40 | val heartbeatStream:DataStream[HeartBeat] = env.addSource( 41 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 42 | .map(json => { 43 | implicit val formats = DefaultFormats 44 | read[HeartBeat](json) 45 | }) 46 | .assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks[HeartBeat]() { 47 | override def getCurrentWatermark: Watermark = { 48 | new Watermark(System.currentTimeMillis() - 10000) 49 | } 50 | 51 | override def extractTimestamp(element: HeartBeat, previousElementTimestamp: Long): Long = { 52 | element.eventTime 53 | } 54 | }) 55 | 56 | // implement word count 57 | val entityCount = heartbeatStream 58 | .map(heartBeat => (heartBeat.entityId, 1)) 59 | 60 | val keyValuePair = entityCount.keyBy(0) 61 | 62 | val countPair = if (typeOfWindow.equals("slidingCount")) { 63 | //Slide by count. Have a sliding window of 5 messages and trigger or slide 2 messages 64 | keyValuePair.countWindow(5, 2).sum(1) 65 | } else if (typeOfWindow.equals("tumbleTime")) { 66 | //Tumble by time. Trigger and Slide by 5 seconds 67 | keyValuePair.timeWindow(Time.of(5, TimeUnit.SECONDS)).sum(1) 68 | } else if (typeOfWindow.equals("slidingTime")) { 69 | //Slide by time. Have a sliding window of 5 seconds that tiggers every 2 seconds 70 | keyValuePair.timeWindow(Time.of(5, TimeUnit.SECONDS), Time.of(2, TimeUnit.SECONDS)).sum(1) 71 | } else { 72 | //Tumble by time. Trigger every 5 seconds 73 | keyValuePair.countWindow(5).sum(1) 74 | } 75 | 76 | // print the results 77 | 78 | countPair.print() 79 | 80 | // execute the program 81 | 82 | env.execute("Scala WordCount Example") 83 | 84 | } 85 | } 86 | 87 | 88 | class MessageTimestamp extends AssignerWithPeriodicWatermarks[HeartBeat] { 89 | override def getCurrentWatermark: Watermark = { 90 | //TODO 91 | null 92 | } 93 | 94 | override def extractTimestamp(t: HeartBeat, l: Long): Long = { 95 | //TODO 96 | -1 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/session/HeartBeat.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.session 2 | 3 | case class HeartBeat(entityId:String, eventTime:Long) -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/session/SessionKafkaProducer.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.session 2 | 3 | import java.util.{Properties, Random} 4 | 5 | import net.liftweb.json.DefaultFormats 6 | import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} 7 | import net.liftweb.json.Serialization.write 8 | 9 | object SessionKafkaProducer { 10 | def main(args:Array[String]): Unit = { 11 | 12 | implicit val formats = DefaultFormats 13 | 14 | val kafkaServerURL = args(0) 15 | val kafkaServerPort = args(1) 16 | val topic = args(2) 17 | val numberOfEntities = args(3).toInt 18 | val numberOfMessagesPerEntity = args(4).toInt 19 | val waitTimeBetweenMessageBatch = args(5).toInt 20 | val chancesOfMissing = args(6).toInt 21 | 22 | val props = new Properties() 23 | props.put("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 24 | props.put("acks", "all") 25 | props.put("retries", "0") 26 | props.put("batch.size", "16384") 27 | props.put("linger.ms", "1") 28 | props.put("buffer.memory", "33554432") 29 | props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer") 30 | props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer") 31 | 32 | val producer = new KafkaProducer[String, String](props) 33 | 34 | val r = new Random() 35 | var sentCount = 0 36 | 37 | println("About to send to " + topic) 38 | for (j <- 0 to numberOfMessagesPerEntity) { 39 | for (i <- 0 to numberOfEntities) { 40 | if (r.nextInt(chancesOfMissing) != 0) { 41 | val message = write(HeartBeat(i.toString, System.currentTimeMillis())) 42 | val producerRecord = new ProducerRecord[String,String](topic, message) 43 | producer.send(producerRecord) 44 | sentCount += 1 45 | } 46 | } 47 | println("Sent Count:" + sentCount) 48 | Thread.sleep(waitTimeBetweenMessageBatch) 49 | } 50 | 51 | producer.close() 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/session/StreamingSessionExample.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.session 2 | 3 | import java.util.Properties 4 | 5 | import net.liftweb.json.DefaultFormats 6 | import net.liftweb.json.Serialization.read 7 | import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor} 8 | import org.apache.flink.configuration.Configuration 9 | import org.apache.flink.streaming.api.functions.ProcessFunction 10 | import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment} 11 | import org.apache.flink.util.Collector 12 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 13 | import org.apache.flink.api.common.serialization.SimpleStringSchema 14 | import org.apache.flink.api.scala._ 15 | 16 | object StreamingSessionExample { 17 | def main(args:Array[String]): Unit = { 18 | val kafkaServerURL = args(0) 19 | val kafkaServerPort = args(1) 20 | val kafkaTopic = args(2) 21 | val groupId = args(3) 22 | val sessionTimeOut = args(4).toInt 23 | 24 | val env = StreamExecutionEnvironment.getExecutionEnvironment 25 | 26 | //val socketStream = env.socketTextStream("localhost",9999, '\n') 27 | 28 | val properties = new Properties 29 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 30 | properties.setProperty("zookeeper.connect", "localhost:2181") 31 | properties.setProperty("group.id", groupId) 32 | 33 | println("kafkaTopic:" + kafkaTopic) 34 | 35 | val messageStream:DataStream[String] = env.addSource( 36 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 37 | 38 | val heartBeatStream = messageStream 39 | .map(str => { 40 | implicit val formats = DefaultFormats 41 | println("str:" + str) 42 | val hb = read[HeartBeat](str) 43 | (hb.entityId, hb.eventTime) 44 | }).keyBy(0).process(new MyProcessFunction(sessionTimeOut)) 45 | 46 | heartBeatStream.map(session => { 47 | println("session:" + session) 48 | session 49 | }) 50 | 51 | heartBeatStream.print() 52 | 53 | env.execute() 54 | } 55 | } 56 | 57 | class MyProcessFunction(sessionTimeOut:Int) extends ProcessFunction[(String,Long), SessionObj] { 58 | 59 | 60 | private var state:ValueState[SessionObj] = null 61 | 62 | 63 | override def open(parameters: Configuration): Unit = { 64 | state = getRuntimeContext.getState(new ValueStateDescriptor[SessionObj]("myState", classOf[SessionObj])) 65 | } 66 | 67 | override def processElement(value: (String, Long), 68 | ctx: ProcessFunction[(String, Long), SessionObj]#Context, 69 | out: Collector[SessionObj]): Unit = { 70 | val currentSession = state.value() 71 | var outBoundSessionRecord:SessionObj = null 72 | if (currentSession == null) { 73 | outBoundSessionRecord = SessionObj(value._2, value._2, 1) 74 | } else { 75 | outBoundSessionRecord = SessionObj(currentSession.startTime, value._2, currentSession.heartbeatCount + 1) 76 | 77 | } 78 | state.update(outBoundSessionRecord) 79 | out.collect(outBoundSessionRecord) 80 | ctx.timerService.registerEventTimeTimer(System.currentTimeMillis() + sessionTimeOut) 81 | } 82 | 83 | override def onTimer(timestamp: Long, 84 | ctx: ProcessFunction[(String, Long), SessionObj]#OnTimerContext, 85 | out: Collector[SessionObj]): Unit = { 86 | val result = state.value 87 | if (result != null && result.latestEndTime + sessionTimeOut < System.currentTimeMillis()) { // emit the state on timeout 88 | state.clear() 89 | } 90 | } 91 | } 92 | 93 | case class SessionObj(startTime:Long, latestEndTime:Long, heartbeatCount:Int) -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/wordcount/MapWithStateWordCount.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.wordcount 2 | 3 | import java.util.Properties 4 | 5 | import org.apache.flink.api.common.serialization.SimpleStringSchema 6 | import org.apache.flink.streaming.api.TimeCharacteristic 7 | import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment 8 | import org.apache.flink.streaming.api.scala._ 9 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 10 | 11 | object MapWithStateWordCount { 12 | def main(args: Array[String]) { 13 | 14 | val kafkaServerURL = args(0) 15 | val kafkaServerPort = args(1) 16 | val kafkaTopic = args(2) 17 | val groupId = args(3) 18 | 19 | val env = StreamExecutionEnvironment.getExecutionEnvironment 20 | 21 | // create a stream using socket 22 | 23 | val properties = new Properties 24 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 25 | properties.setProperty("group.id", groupId) 26 | 27 | println("kafkaTopic:" + kafkaTopic) 28 | 29 | val wordCountStream: DataStream[String] = env.addSource( 30 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 31 | 32 | // implement word count 33 | 34 | val wordsStream = wordCountStream 35 | .flatMap(line => line.toUpperCase.split(' ')) 36 | .map(word => (word,1)) 37 | 38 | //Do a (control,B) 39 | val countPair = wordsStream.keyBy(0).mapWithState((in: (String, Int), count: Option[Int]) => 40 | count match { 41 | case Some(c) => ( (in._1, c + in._2), Some(c + in._2) ) 42 | case None => ( (in._1, in._2), Some(in._2) ) 43 | }) 44 | 45 | wordsStream.keyBy(0) 46 | 47 | countPair.print() 48 | 49 | // execute the program 50 | 51 | env.execute() 52 | 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/wordcount/SimpleWordCount.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.wordcount 2 | 3 | import java.util.Properties 4 | import java.util.concurrent.TimeUnit 5 | 6 | import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment} 7 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 8 | import org.apache.flink.api.common.serialization.SimpleStringSchema 9 | import org.apache.flink.api.scala._ 10 | import org.apache.flink.streaming.api.windowing.time.Time 11 | 12 | /** 13 | * Created by tmalaska on 7/1/17. 14 | */ 15 | object SimpleWordCount { 16 | def main(args: Array[String]) { 17 | 18 | val kafkaServerURL = args(0) 19 | val kafkaServerPort = args(1) 20 | val kafkaTopic = args(2) 21 | val groupId = args(3) 22 | val typeOfWindow = args(4) 23 | 24 | val env = StreamExecutionEnvironment.getExecutionEnvironment 25 | 26 | // create a stream using socket 27 | 28 | val properties = new Properties 29 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 30 | properties.setProperty("zookeeper.connect", "localhost:2181") 31 | properties.setProperty("group.id", groupId) 32 | 33 | println("kafkaTopic:" + kafkaTopic) 34 | 35 | val wordCountStream:DataStream[String] = env.addSource( 36 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 37 | 38 | // implement word count 39 | val wordsStream = wordCountStream 40 | .flatMap(line => line.toUpperCase.split(' ')) 41 | .map(word => (word, 1)) 42 | //.flatMap{_.toUpperCase.split(' ')} 43 | //.map{ (_,1) } 44 | 45 | val keyValuePair = wordsStream.keyBy(0) 46 | 47 | val countPair = if (typeOfWindow.equals("slidingCount")) { 48 | //Slide by count. Have a sliding window of 5 messages and trigger or slide 2 messages 49 | keyValuePair.countWindow(5, 2).sum(1) 50 | } else if (typeOfWindow.equals("tumbleTime")) { 51 | //Tumble by time. Trigger and Slide by 5 seconds 52 | keyValuePair.timeWindow(Time.of(5, TimeUnit.SECONDS)).sum(1) 53 | } else if (typeOfWindow.equals("slidingTime")) { 54 | //Slide by time. Have a sliding window of 5 seconds that tiggers every 2 seconds 55 | keyValuePair.timeWindow(Time.of(5, TimeUnit.SECONDS), Time.of(2, TimeUnit.SECONDS)).sum(1) 56 | } else { 57 | //Tumble by time. Trigger every 5 seconds 58 | keyValuePair.countWindow(5).sum(1) 59 | } 60 | 61 | // print the results 62 | 63 | countPair.print() 64 | 65 | // execute the program 66 | 67 | env.execute("Scala WordCount Example") 68 | 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/wordcount/StreamingSQL.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.wordcount 2 | 3 | import java.util.Properties 4 | 5 | import org.apache.flink.api.common.serialization.SimpleStringSchema 6 | import org.apache.flink.api.scala._ 7 | import org.apache.flink.streaming.api.functions.sink.SinkFunction 8 | import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment} 9 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 10 | import org.apache.flink.table.api.scala._ 11 | import org.apache.flink.table.api.{Table, TableEnvironment} 12 | import org.apache.flink.types.Row 13 | 14 | /** 15 | * Created by tmalaska on 10/30/17. 16 | */ 17 | object StreamingSQL { 18 | def main(args:Array[String]): Unit = { 19 | val kafkaServerURL = args(0) 20 | val kafkaServerPort = args(1) 21 | val kafkaTopic = args(2) 22 | val groupId = args(3) 23 | 24 | val env = StreamExecutionEnvironment.getExecutionEnvironment 25 | val tableEnv = TableEnvironment.getTableEnvironment(env) 26 | 27 | val properties = new Properties 28 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 29 | properties.setProperty("zookeeper.connect", "localhost:2181") 30 | properties.setProperty("group.id", groupId) 31 | 32 | println("kafkaTopic:" + kafkaTopic) 33 | 34 | val entityCountStream:DataStream[(String, Int)] = env.addSource( 35 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 36 | .flatMap(line => line.toUpperCase.split(' ')) 37 | .map(word => (word, 1)) 38 | 39 | tableEnv.registerDataStream("myTable2", entityCountStream, 'word, 'frequency) 40 | 41 | 42 | val roleUp = tableEnv.sqlQuery("SELECT word, SUM(frequency) FROM myTable2 GROUP BY word") 43 | 44 | val typeInfo = createTypeInformation[(String, Int)] 45 | val outStream = roleUp.toRetractStream(typeInfo) 46 | outStream.print() 47 | env.execute("Scala SQL Example") 48 | 49 | } 50 | } 51 | 52 | class CustomSinkFunction() extends SinkFunction[Row] { 53 | @throws[Exception] 54 | def invoke(value: Row): Unit = { 55 | //Do something 56 | println("-" + value) 57 | 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/scala/com/tmalaska/flinktraining/example/wordcount/TriggerEvictWordCount.scala: -------------------------------------------------------------------------------- 1 | package com.tmalaska.flinktraining.example.wordcount 2 | 3 | import java.util.Properties 4 | 5 | import org.apache.flink.api.common.serialization.SimpleStringSchema 6 | import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment 7 | import org.apache.flink.streaming.api.windowing.evictors.CountEvictor 8 | import org.apache.flink.streaming.api.windowing.triggers.CountTrigger 9 | import org.apache.flink.streaming.api.scala._ 10 | import org.apache.flink.streaming.api.windowing.assigners.GlobalWindows 11 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010 12 | 13 | object TriggerEvictWordCount { 14 | def main(args: Array[String]) { 15 | val kafkaServerURL = args(0) 16 | val kafkaServerPort = args(1) 17 | val kafkaTopic = args(2) 18 | val groupId = args(3) 19 | 20 | val env = StreamExecutionEnvironment.getExecutionEnvironment 21 | 22 | // create a stream using socket 23 | 24 | val properties = new Properties 25 | properties.setProperty("bootstrap.servers", kafkaServerURL + ":" + kafkaServerPort) 26 | properties.setProperty("group.id", groupId) 27 | 28 | println("kafkaTopic:" + kafkaTopic) 29 | 30 | val wordCountStream: DataStream[String] = env.addSource( 31 | new FlinkKafkaConsumer010(kafkaTopic, new SimpleStringSchema(), properties)) 32 | 33 | // implement word count 34 | 35 | val wordsStream = wordCountStream 36 | .flatMap(line => line.toUpperCase.split(' ')) 37 | .map(word => (word, 1)) 38 | 39 | //Link to Trigger API page https://ci.apache.org/projects/flink/flink-docs-release-1.3/api/java/org/apache/flink/streaming/api/windowing/triggers/Trigger.html 40 | //Link to Evictor API page https://ci.apache.org/projects/flink/flink-docs-release-1.3/api/java/org/apache/flink/streaming/api/windowing/evictors/Evictor.html 41 | 42 | val keyValuePair = wordsStream.keyBy(0).window(GlobalWindows.create()). 43 | trigger(CountTrigger.of(5)).evictor(CountEvictor.of(50)) 44 | 45 | val countPair = keyValuePair.sum(1) 46 | 47 | // print the results 48 | 49 | countPair.print() 50 | 51 | // execute the program 52 | 53 | env.execute() 54 | } 55 | } 56 | --------------------------------------------------------------------------------