├── .gitignore
├── README.md
├── distributed-components
├── rpc-learning
│ ├── .gitignore
│ ├── flink
│ │ ├── pom.xml
│ │ └── src
│ │ │ └── main
│ │ │ └── java
│ │ │ └── org
│ │ │ └── apache
│ │ │ └── flink
│ │ │ └── runtime
│ │ │ └── rpc
│ │ │ └── pekko
│ │ │ ├── ClientServerRpcExample.java
│ │ │ ├── HelloGateway.java
│ │ │ └── HelloRpcEndpoint.java
│ └── pom.xml
└── zookeeper-learning
│ ├── .gitignore
│ ├── pom.xml
│ └── src
│ └── main
│ └── java
│ └── org
│ └── apache
│ └── zookeeper
│ └── learning
│ ├── framework
│ └── CrudExamples.java
│ ├── leader
│ ├── election
│ │ ├── ExampleClient.java
│ │ └── LeaderSelectorExample.java
│ └── latch
│ │ └── LeaderLatchExample.java
│ └── watcher
│ └── WatcherExample.java
├── flink-learning
├── .gitignore
├── flink-common
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── org
│ │ ├── apacge
│ │ └── flink
│ │ │ └── common
│ │ │ ├── converter
│ │ │ └── JdbcRowConverter.java
│ │ │ └── join
│ │ │ ├── JdbcRowLookupFunction.java
│ │ │ └── RowLookupFunction.java
│ │ └── apache
│ │ └── flink
│ │ └── connector
│ │ └── jdbc
│ │ └── internal
│ │ └── options
│ │ └── JdbcLookupOptions.java
├── flink-datastream
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── flink
│ │ │ └── learning
│ │ │ ├── broadcast
│ │ │ └── BroadCastExample.java
│ │ │ ├── join
│ │ │ ├── CustomIntervalJoin.java
│ │ │ ├── IntervalJoin.java
│ │ │ └── WindowJoin.java
│ │ │ ├── state
│ │ │ └── MapStateExample.java
│ │ │ ├── tool
│ │ │ ├── IdPartitioner.java
│ │ │ └── KafkaPartitionProducer.java
│ │ │ ├── utils
│ │ │ ├── Order.java
│ │ │ ├── OrderMapper.java
│ │ │ ├── Shipment.java
│ │ │ └── ShipmentMapper.java
│ │ │ └── watermark
│ │ │ ├── CustomWatermarkGeneratorExample.java
│ │ │ ├── NonSourceWatermarkAssignExample.java
│ │ │ ├── SourceWatermarkAssignExample.java
│ │ │ ├── SourceWithIdlenessExample.java
│ │ │ └── WatermarkAfterFilterExample.java
│ │ └── resources
│ │ ├── join
│ │ └── window_join.txt
│ │ └── log4j2.properties
├── flink-sql
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── flink
│ │ │ └── learning
│ │ │ └── table
│ │ │ ├── basic
│ │ │ └── SQLExample.java
│ │ │ ├── conversion
│ │ │ ├── LookupJoinExample.java
│ │ │ ├── SimpleExample.java
│ │ │ └── WatermarkExample.java
│ │ │ ├── cube
│ │ │ └── SimpleCubeExample.java
│ │ │ ├── sink
│ │ │ └── SinkModeTest.java
│ │ │ └── utils
│ │ │ ├── Order.java
│ │ │ ├── OrderMapper.java
│ │ │ ├── Shipment.java
│ │ │ ├── ShipmentMapper.java
│ │ │ └── Utils.java
│ │ └── resources
│ │ └── log4j2.properties
├── flink-udx
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── org
│ │ └── apache
│ │ └── flink
│ │ └── learning
│ │ └── udaf
│ │ ├── GenericRecord.java
│ │ ├── GenericRecordAccumulator.java
│ │ └── GenericRecordAgg.java
└── pom.xml
├── hadoop-learning
├── hdfs-learning
│ ├── dependency-reduced-pom.xml
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── HDFSClientExample.java
│ │ └── resources
│ │ └── log4j.properties
├── mapreduce-learning
│ ├── dependency-reduced-pom.xml
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ ├── PhoneFlowCount.java
│ │ └── WordCount.java
│ │ └── resources
│ │ ├── HTTP_20130313143750.dat
│ │ ├── log4j.properties
│ │ └── wordcount.txt
├── pom.xml
├── rpc-learning
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ ├── Client.java
│ │ ├── MyInterface.java
│ │ ├── MyInterfaceImpl.java
│ │ └── Server.java
└── yarn-learning
│ ├── dependency-reduced-pom.xml
│ ├── pom.xml
│ └── src
│ └── main
│ ├── java
│ └── org
│ │ └── apache
│ │ └── hadoop
│ │ └── yarn
│ │ └── applications
│ │ └── distributedshell
│ │ ├── ApplicationMaster.java
│ │ ├── Client.java
│ │ ├── DSConstants.java
│ │ ├── Log4jPropertyHelper.java
│ │ ├── PlacementSpec.java
│ │ └── readme.md
│ └── resources
│ ├── log4j.properties
│ └── yarn-site.xml
├── hbase-learning
├── hbase1
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── hbase
│ │ │ └── learning
│ │ │ └── filter
│ │ │ ├── CustomFilter.java
│ │ │ ├── FilterExample.java
│ │ │ ├── Test.java
│ │ │ ├── TestRowMutations.java
│ │ │ └── Util.java
│ │ ├── resources
│ │ └── hbase-site.xml
│ │ └── scala
│ │ └── org
│ │ └── apache
│ │ └── hbase
│ │ ├── bulkload
│ │ ├── BulkLoadPartitioner.scala
│ │ ├── ByteArrayWrapper.scala
│ │ └── KeyFamilyQualifier.scala
│ │ └── learning
│ │ └── HBaseSparkBulkLoad.scala
├── hbase2
│ └── pom.xml
└── pom.xml
├── hive-learning
├── pom.xml
└── src
│ └── main
│ └── java
│ ├── MyStringLength.java
│ ├── MyUDTF.java
│ └── format
│ ├── CustomTextInputFormat.java
│ └── CustomTextOutputFormat.java
├── kafka-learning
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── apache
│ └── kafka
│ └── learning
│ └── transaction
│ ├── ConsumerExample.java
│ └── ProducerExample.java
├── spark-learning
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── JavaWordCount.java
│ ├── resources
│ ├── inverted_index.txt
│ └── log4j.properties
│ └── scala
│ ├── InvertedIndex.scala
│ └── WordCount.scala
└── sql
├── antlr-learning
├── README.md
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── antlr
│ └── v4
│ └── examples
│ └── playdb
│ ├── PlayDB.java
│ ├── SQLVisitor.java
│ ├── SelectStmt.java
│ ├── WhereExpr.java
│ └── parser
│ ├── SQLite.g4
│ ├── SQLite.interp
│ ├── SQLite.tokens
│ ├── SQLiteBaseListener.java
│ ├── SQLiteBaseVisitor.java
│ ├── SQLiteLexer.interp
│ ├── SQLiteLexer.java
│ ├── SQLiteLexer.tokens
│ ├── SQLiteListener.java
│ ├── SQLiteParser.java
│ └── SQLiteVisitor.java
├── calcite-learning
├── README.md
├── calcite-adapter
│ ├── calcite-adapter-hbase
│ │ ├── pom.xml
│ │ └── src
│ │ │ ├── main
│ │ │ ├── java
│ │ │ │ └── org
│ │ │ │ │ └── apache
│ │ │ │ │ └── calcite
│ │ │ │ │ └── adapter
│ │ │ │ │ └── hbase
│ │ │ │ │ ├── HBaseEnumerator.java
│ │ │ │ │ ├── HBaseProject.java
│ │ │ │ │ ├── HBaseProjectTableScanRule.java
│ │ │ │ │ ├── HBaseRel.java
│ │ │ │ │ ├── HBaseRules.java
│ │ │ │ │ ├── HBaseScannableTable.java
│ │ │ │ │ ├── HBaseSchema.java
│ │ │ │ │ ├── HBaseSchemaFactory.java
│ │ │ │ │ ├── HBaseTable.java
│ │ │ │ │ ├── HBaseTableScan.java
│ │ │ │ │ ├── HBaseToEnumerableConverter.java
│ │ │ │ │ └── HBaseTranslatableTable.java
│ │ │ └── resources
│ │ │ │ ├── model.json
│ │ │ │ └── schema.csv
│ │ │ └── test
│ │ │ └── java
│ │ │ └── org
│ │ │ └── apache
│ │ │ └── calcite
│ │ │ └── adapter
│ │ │ └── hbase
│ │ │ ├── CodeGenTest.java
│ │ │ ├── HBaseExample.java
│ │ │ └── HBaseTools.java
│ └── pom.xml
├── calcite-avatica
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── org
│ │ └── apache
│ │ └── calcite
│ │ └── avatica
│ │ └── example
│ │ └── simple
│ │ ├── Client.java
│ │ ├── CustomAvaticaServer.java
│ │ └── ServerExample.java
├── calcite-parser
│ ├── pom.xml
│ └── src
│ │ ├── main
│ │ ├── codegen
│ │ │ ├── config.fmpp
│ │ │ ├── data
│ │ │ │ └── Parser.tdd
│ │ │ └── includes
│ │ │ │ ├── compoundIdentifier.ftl
│ │ │ │ └── parserImpls.ftl
│ │ ├── java
│ │ │ └── org
│ │ │ │ └── apache
│ │ │ │ └── calcite
│ │ │ │ ├── example
│ │ │ │ ├── CalciteUtil.java
│ │ │ │ ├── converter
│ │ │ │ │ └── CalciteSQLConverter.java
│ │ │ │ ├── optimizer
│ │ │ │ │ ├── AbstractConverterExample.java
│ │ │ │ │ ├── CalciteRBO.java
│ │ │ │ │ ├── HepPlannerExample.java
│ │ │ │ │ ├── IterativeMergeJoinExample.java
│ │ │ │ │ ├── JoinExample.java
│ │ │ │ │ ├── PruningJoinExample.java
│ │ │ │ │ ├── RelSetMergeExample.java
│ │ │ │ │ ├── TableScanExample.java
│ │ │ │ │ ├── VolcanoPlannerExample.java
│ │ │ │ │ └── VolcanoPlannerExample1.java
│ │ │ │ ├── overall
│ │ │ │ │ ├── BindableMain.java
│ │ │ │ │ ├── Main.java
│ │ │ │ │ ├── Optimizer.java
│ │ │ │ │ ├── SimpleDataContext.java
│ │ │ │ │ ├── SimpleSchema.java
│ │ │ │ │ ├── SimpleTable.java
│ │ │ │ │ └── SimpleTableStatistic.java
│ │ │ │ ├── parser
│ │ │ │ │ ├── CalciteSQLParser.java
│ │ │ │ │ └── ddl
│ │ │ │ │ │ ├── SqlCreateTable.java
│ │ │ │ │ │ └── SqlTableOption.java
│ │ │ │ ├── pretty
│ │ │ │ │ └── SQLPrettyExample.java
│ │ │ │ └── schemas
│ │ │ │ │ └── HrClusteredSchema.java
│ │ │ │ └── sql
│ │ │ │ ├── SqlSubmit.java
│ │ │ │ ├── dialect
│ │ │ │ └── FlinkSqlDialect.java
│ │ │ │ └── pretty
│ │ │ │ └── FlinkSqlPrettyWriter.java
│ │ └── resources
│ │ │ ├── order.csv
│ │ │ ├── sample.txt
│ │ │ └── user.csv
│ │ └── test
│ │ └── java
│ │ └── org
│ │ └── apache
│ │ └── calcite
│ │ └── example
│ │ └── rel
│ │ └── RelBuilderTest.java
└── pom.xml
└── javacc-learning
├── pom.xml
└── src
└── main
├── codegen
├── Adder.jj
└── Calculator.jj
└── java
└── javacc
└── learning
└── calculator
├── Main.java
├── ast
├── CosNode.java
├── ExprNode.java
├── FactorialNode.java
├── Node.java
├── Operator.java
├── SinNode.java
├── TanNode.java
├── TermNode.java
├── UnaryNode.java
└── ValueNode.java
└── visitor
├── ASTVisitor.java
├── CalculateVisitor.java
└── DumpVisitor.java
/.gitignore:
--------------------------------------------------------------------------------
1 | .cache
2 | scalastyle-output.xml
3 | .classpath
4 | .idea
5 | .metadata
6 | .settings
7 | .project
8 | .version.properties
9 | filter.properties
10 | logs.zip
11 | target
12 | tmp
13 | *.class
14 | *.iml
15 | *.swp
16 | *.jar
17 | *.zip
18 | *.log
19 | *.pyc
20 | .DS_Store
21 | build-target
22 | flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/avro/
23 | flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/generated/
24 | flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/generated/
25 | flink-runtime-web/web-dashboard/node/
26 | flink-runtime-web/web-dashboard/node_modules/
27 | flink-runtime-web/web-dashboard/web/
28 | flink-python/dist/
29 | flink-python/build/
30 | flink-python/pyflink.egg-info/
31 | flink-python/apache_flink.egg-info/
32 | flink-python/docs/_build
33 | flink-python/.tox/
34 | flink-python/dev/download
35 | flink-python/dev/.conda/
36 | flink-python/dev/log/
37 | flink-python/dev/.stage.txt
38 | flink-python/.eggs/
39 | atlassian-ide-plugin.xml
40 | out/
41 | /docs/api
42 | /docs/content
43 | /docs/.bundle
44 | /docs/.rubydeps
45 | /docs/ruby2/.bundle
46 | /docs/ruby2/.rubydeps
47 | /docs/.jekyll-metadata
48 | *.ipr
49 | *.iws
50 | tools/flink
51 | tools/flink-*
52 | tools/releasing/release
53 | tools/japicmp-output
54 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This repository contains some examples for learning data systems. Specifically contains:
2 | + Examples of SQL parsing & optimizing tools, such as Apache Calcite, Antlr.
3 | + Examples of big data computing systems, such as Flink, Spark.
4 | + Examples of big data storage systems, such as Hadoop, HBase, Kafka.
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | !.mvn/wrapper/maven-wrapper.jar
3 | !**/src/main/**/target/
4 | !**/src/test/**/target/
5 |
6 | ### IntelliJ IDEA ###
7 | .idea/modules.xml
8 | .idea/jarRepositories.xml
9 | .idea/compiler.xml
10 | .idea/libraries/
11 | *.iws
12 | *.iml
13 | *.ipr
14 |
15 | ### Eclipse ###
16 | .apt_generated
17 | .classpath
18 | .factorypath
19 | .project
20 | .settings
21 | .springBeans
22 | .sts4-cache
23 |
24 | ### NetBeans ###
25 | /nbproject/private/
26 | /nbbuild/
27 | /dist/
28 | /nbdist/
29 | /.nb-gradle/
30 | build/
31 | !**/src/main/**/build/
32 | !**/src/test/**/build/
33 |
34 | ### VS Code ###
35 | .vscode/
36 |
37 | ### Mac OS ###
38 | .DS_Store
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/flink/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.example
8 | rpc-learning
9 | 1.0-SNAPSHOT
10 |
11 |
12 | flink
13 |
14 |
15 | 11
16 | 11
17 | UTF-8
18 | 1.19.0
19 |
20 |
21 |
22 |
23 | org.apache.flink
24 | flink-core
25 | ${flink.version}
26 |
27 |
28 | org.apache.flink
29 | flink-rpc-core
30 | ${flink.version}
31 |
32 |
33 | org.apache.flink
34 | flink-rpc-akka
35 | ${flink.version}
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/flink/src/main/java/org/apache/flink/runtime/rpc/pekko/ClientServerRpcExample.java:
--------------------------------------------------------------------------------
1 | package org.apache.flink.runtime.rpc.pekko;
2 |
3 | import org.apache.flink.configuration.Configuration;
4 | import org.apache.flink.runtime.rpc.RpcService;
5 |
6 | import java.util.Optional;
7 |
8 | public class ClientServerRpcExample {
9 |
10 | private static RpcService rpcService1;
11 | private static RpcService rpcService2;
12 |
13 | public static void open() throws Exception {
14 | rpcService1 =
15 | PekkoRpcServiceUtils.createRemoteRpcService(
16 | new Configuration(),
17 | "localhost",
18 | "0",
19 | null,
20 | Optional.empty());
21 | rpcService2 =
22 | PekkoRpcServiceUtils.createRemoteRpcService(
23 | new Configuration(),
24 | "localhost",
25 | "0",
26 | null,
27 | Optional.empty());
28 | }
29 |
30 | public static void close() throws Exception {
31 | if (rpcService1 != null) {
32 | rpcService1.closeAsync().get();
33 | }
34 | if (rpcService2 != null) {
35 | rpcService2.closeAsync().get();
36 | }
37 | }
38 |
39 | public static void main(String[] args) throws Exception {
40 | open();
41 |
42 | HelloRpcEndpoint helloRpcEndpoint = new HelloRpcEndpoint(rpcService1);
43 | helloRpcEndpoint.start();
44 |
45 | HelloGateway helloGateway =
46 | rpcService2.connect(helloRpcEndpoint.getAddress(), HelloGateway.class).get();
47 | String result = helloGateway.hello();
48 | System.out.println(result);
49 |
50 | close();
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/flink/src/main/java/org/apache/flink/runtime/rpc/pekko/HelloGateway.java:
--------------------------------------------------------------------------------
1 | package org.apache.flink.runtime.rpc.pekko;
2 |
3 | import org.apache.flink.runtime.rpc.RpcGateway;
4 |
5 | public interface HelloGateway extends RpcGateway {
6 |
7 | String hello();
8 | }
9 |
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/flink/src/main/java/org/apache/flink/runtime/rpc/pekko/HelloRpcEndpoint.java:
--------------------------------------------------------------------------------
1 | package org.apache.flink.runtime.rpc.pekko;
2 |
3 | import org.apache.flink.runtime.rpc.RpcEndpoint;
4 | import org.apache.flink.runtime.rpc.RpcService;
5 |
6 | public class HelloRpcEndpoint extends RpcEndpoint implements HelloGateway {
7 |
8 | public HelloRpcEndpoint(RpcService rpcService) {
9 | super(rpcService);
10 | }
11 |
12 | @Override
13 | public String hello() {
14 | return "Hello";
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/distributed-components/rpc-learning/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.example
8 | rpc-learning
9 | 1.0-SNAPSHOT
10 | pom
11 |
12 | flink
13 |
14 |
15 |
16 | 11
17 | 11
18 | UTF-8
19 |
20 |
21 |
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | !.mvn/wrapper/maven-wrapper.jar
3 | !**/src/main/**/target/
4 | !**/src/test/**/target/
5 |
6 | ### IntelliJ IDEA ###
7 | .idea/modules.xml
8 | .idea/jarRepositories.xml
9 | .idea/compiler.xml
10 | .idea/libraries/
11 | *.iws
12 | *.iml
13 | *.ipr
14 |
15 | ### Eclipse ###
16 | .apt_generated
17 | .classpath
18 | .factorypath
19 | .project
20 | .settings
21 | .springBeans
22 | .sts4-cache
23 |
24 | ### NetBeans ###
25 | /nbproject/private/
26 | /nbbuild/
27 | /dist/
28 | /nbdist/
29 | /.nb-gradle/
30 | build/
31 | !**/src/main/**/build/
32 | !**/src/test/**/build/
33 |
34 | ### VS Code ###
35 | .vscode/
36 |
37 | ### Mac OS ###
38 | .DS_Store
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.apache.zookeeper.learning
8 | zookeeper-learning
9 | 1.0-SNAPSHOT
10 |
11 |
12 | 5.6.0
13 |
14 |
15 |
16 |
17 | org.apache.curator
18 | curator-recipes
19 | ${curator.version}
20 |
21 |
22 |
23 | org.apache.curator
24 | curator-test
25 | ${curator.version}
26 |
27 |
28 |
29 | org.apache.curator
30 | curator-x-discovery
31 | ${curator.version}
32 |
33 |
34 |
35 | org.apache.curator
36 | curator-x-async
37 | ${curator.version}
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/src/main/java/org/apache/zookeeper/learning/leader/election/ExampleClient.java:
--------------------------------------------------------------------------------
1 | package org.apache.zookeeper.learning.leader.election;
2 |
3 | import java.io.Closeable;
4 | import java.io.IOException;
5 | import java.util.concurrent.TimeUnit;
6 | import java.util.concurrent.atomic.AtomicInteger;
7 | import org.apache.curator.framework.CuratorFramework;
8 | import org.apache.curator.framework.recipes.leader.LeaderSelector;
9 | import org.apache.curator.framework.recipes.leader.LeaderSelectorListenerAdapter;
10 |
11 | /**
12 | * An example leader selector client. Note that {@link LeaderSelectorListenerAdapter} which
13 | * has the recommended handling for connection state issues
14 | */
15 | public class ExampleClient extends LeaderSelectorListenerAdapter implements Closeable {
16 | private final String name;
17 | private final LeaderSelector leaderSelector;
18 | private final AtomicInteger leaderCount = new AtomicInteger();
19 |
20 | public ExampleClient(CuratorFramework client, String path, String name) {
21 | this.name = name;
22 |
23 | // create a leader selector using the given path for management
24 | // all participants in a given leader selection must use the same path
25 | // ExampleClient here is also a LeaderSelectorListener but this isn't required
26 | leaderSelector = new LeaderSelector(client, path, this);
27 |
28 | // for most cases you will want your instance to requeue when it relinquishes leadership
29 | leaderSelector.autoRequeue();
30 | }
31 |
32 | public void start() throws IOException {
33 | // the selection for this instance doesn't start until the leader selector is started
34 | // leader selection is done in the background so this call to leaderSelector.start() returns immediately
35 | leaderSelector.start();
36 | }
37 |
38 | @Override
39 | public void close() throws IOException {
40 | leaderSelector.close();
41 | }
42 |
43 | @Override
44 | public void takeLeadership(CuratorFramework client) throws Exception {
45 | // we are now the leader. This method should not return until we want to relinquish leadership
46 |
47 | final int waitSeconds = (int) (5 * Math.random()) + 1;
48 |
49 | System.out.println(name + " is now the leader. Waiting " + waitSeconds + " seconds...");
50 | System.out.println(name + " has been leader " + leaderCount.getAndIncrement() + " time(s) before.");
51 | try {
52 | Thread.sleep(TimeUnit.SECONDS.toMillis(waitSeconds));
53 | } catch (InterruptedException e) {
54 | System.err.println(name + " was interrupted.");
55 | Thread.currentThread().interrupt();
56 | } finally {
57 | System.out.println(name + " relinquishing leadership.\n");
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/src/main/java/org/apache/zookeeper/learning/leader/election/LeaderSelectorExample.java:
--------------------------------------------------------------------------------
1 | package org.apache.zookeeper.learning.leader.election;
2 |
3 | import com.google.common.collect.Lists;
4 | import java.io.BufferedReader;
5 | import java.io.InputStreamReader;
6 | import java.util.List;
7 | import org.apache.curator.framework.CuratorFramework;
8 | import org.apache.curator.framework.CuratorFrameworkFactory;
9 | import org.apache.curator.retry.ExponentialBackoffRetry;
10 | import org.apache.curator.test.TestingServer;
11 | import org.apache.curator.utils.CloseableUtils;
12 |
13 | public class LeaderSelectorExample {
14 | private static final int CLIENT_QTY = 10;
15 |
16 | private static final String PATH = "/examples/leader";
17 |
18 | public static void main(String[] args) throws Exception {
19 | // all of the useful sample code is in ExampleClient.java
20 |
21 | System.out.println(
22 | "Create " + CLIENT_QTY
23 | + " clients, have each negotiate for leadership and then wait a random number of seconds before letting another leader election occur.");
24 | System.out.println(
25 | "Notice that leader election is fair: all clients will become leader and will do so the same number of times.");
26 |
27 | List clients = Lists.newArrayList();
28 | List examples = Lists.newArrayList();
29 | TestingServer server = new TestingServer();
30 | try {
31 | for (int i = 0; i < CLIENT_QTY; ++i) {
32 | CuratorFramework client = CuratorFrameworkFactory.newClient(
33 | server.getConnectString(), new ExponentialBackoffRetry(1000, 3));
34 | clients.add(client);
35 |
36 | ExampleClient example = new ExampleClient(client, PATH, "Client #" + i);
37 | examples.add(example);
38 |
39 | client.start();
40 | example.start();
41 | }
42 |
43 | System.out.println("Press enter/return to quit\n");
44 | new BufferedReader(new InputStreamReader(System.in)).readLine();
45 | } finally {
46 | System.out.println("Shutting down...");
47 |
48 | for (ExampleClient exampleClient : examples) {
49 | CloseableUtils.closeQuietly(exampleClient);
50 | }
51 | for (CuratorFramework client : clients) {
52 | CloseableUtils.closeQuietly(client);
53 | }
54 |
55 | CloseableUtils.closeQuietly(server);
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/src/main/java/org/apache/zookeeper/learning/leader/latch/LeaderLatchExample.java:
--------------------------------------------------------------------------------
1 | package org.apache.zookeeper.learning.leader.latch;
2 |
3 | import org.apache.curator.RetryPolicy;
4 | import org.apache.curator.framework.CuratorFramework;
5 | import org.apache.curator.framework.CuratorFrameworkFactory;
6 | import org.apache.curator.framework.recipes.leader.LeaderLatch;
7 | import org.apache.curator.framework.recipes.leader.LeaderLatchListener;
8 | import org.apache.curator.retry.ExponentialBackoffRetry;
9 |
10 | import java.util.concurrent.ExecutorService;
11 | import java.util.concurrent.Executors;
12 |
13 | public class LeaderLatchExample {
14 |
15 | private static final String PATH = "/examples/leader";
16 |
17 | private static final Integer CLIENT_COUNT = 5;
18 |
19 | public static void main(String[] args) throws Exception {
20 | ExecutorService service = Executors.newFixedThreadPool(CLIENT_COUNT);
21 |
22 | for (int i = 0; i < CLIENT_COUNT ; i++) {
23 | final int index = i;
24 | service.submit(() -> {
25 | try {
26 | schedule(index);
27 | } catch (Exception e) {
28 | e.printStackTrace();
29 | }
30 | });
31 | }
32 |
33 | Thread.sleep(30 * 1000);
34 | service.shutdownNow();
35 | }
36 |
37 | private static void schedule(int thread) throws Exception {
38 | CuratorFramework client = getClient(thread);
39 |
40 | LeaderLatch latch = new LeaderLatch(client, PATH, String.valueOf(thread));
41 |
42 | latch.addListener(new LeaderLatchListener() {
43 |
44 | @Override
45 | public void notLeader() {
46 | System.out.println("Client [" + thread + "] I am the follower !");
47 | }
48 |
49 | @Override
50 | public void isLeader() {
51 | System.out.println("Client [" + thread + "] I am the leader !");
52 | }
53 | });
54 |
55 | latch.start();
56 |
57 | Thread.sleep(2 * (thread + 5) * 1000);
58 |
59 | latch.close(LeaderLatch.CloseMode.NOTIFY_LEADER);
60 | client.close();
61 | System.out.println("Client [" + latch.getId() + "] Server closed...");
62 | }
63 |
64 | private static CuratorFramework getClient(final int thread) {
65 | RetryPolicy rp = new ExponentialBackoffRetry(1000, 3);
66 |
67 | CuratorFramework client = CuratorFrameworkFactory.builder()
68 | .connectString("localhost:2181")
69 | .sessionTimeoutMs(1000000)
70 | .connectionTimeoutMs(3000)
71 | .retryPolicy(rp)
72 | .build();
73 | client.start();
74 | System.out.println("Client [" + thread + "] Server connected...");
75 | return client;
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/distributed-components/zookeeper-learning/src/main/java/org/apache/zookeeper/learning/watcher/WatcherExample.java:
--------------------------------------------------------------------------------
1 | package org.apache.zookeeper.learning.watcher;
2 |
3 | import org.apache.curator.framework.CuratorFramework;
4 | import org.apache.curator.framework.CuratorFrameworkFactory;
5 | import org.apache.curator.retry.ExponentialBackoffRetry;
6 | import org.apache.zookeeper.WatchedEvent;
7 | import org.apache.zookeeper.Watcher;
8 | import org.apache.zookeeper.data.Stat;
9 |
10 | public class WatcherExample {
11 |
12 | public static void main(String[] args) throws Exception {
13 | String connectionString = "localhost:2181";
14 | ExponentialBackoffRetry retryPolicy =
15 | new ExponentialBackoffRetry(1000, 3);
16 | CuratorFramework client = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
17 | client.start();
18 |
19 | String workerPath = "/test/listener/remoteNode";
20 | String subWorkerPath = "/test/listener/remoteNode/id-";
21 |
22 | Stat stat = client.checkExists().forPath(workerPath);
23 | if (stat == null) {
24 | client.create().creatingParentsIfNeeded().forPath(workerPath);
25 | }
26 |
27 | Watcher watcher = new Watcher() {
28 | @Override
29 | public void process(WatchedEvent watchedEvent) {
30 | System.out.println("Received watched event: " + watchedEvent);
31 | }
32 | };
33 | byte[] content = client.getData().usingWatcher(watcher).forPath(workerPath);
34 | System.out.println("Content: " + new String(content));
35 |
36 | client.setData().forPath(workerPath, "1".getBytes());
37 | client.setData().forPath(workerPath, "2".getBytes());
38 |
39 | client.close();
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/flink-learning/.gitignore:
--------------------------------------------------------------------------------
1 | dependency-reduced-pom.xml
--------------------------------------------------------------------------------
/flink-learning/flink-common/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | flink-learning
7 | org.apache.flink.learning
8 | 1.0-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | flink-common
13 |
14 |
15 |
16 |
17 | org.apache.flink
18 | flink-core
19 |
20 |
21 | org.apache.flink
22 | flink-streaming-java
23 |
24 |
25 | org.apache.flink
26 | flink-clients
27 |
28 |
29 |
30 |
31 | org.apache.flink
32 | flink-rpc-core
33 | ${flink.version}
34 |
35 |
36 | org.apache.flink
37 | flink-rpc-akka
38 | ${flink.version}
39 |
40 |
41 |
42 |
43 | org.apache.flink
44 | flink-table-api-java
45 |
46 |
47 | org.apache.flink
48 | flink-table-api-java-bridge
49 |
50 |
51 | org.apache.flink
52 | flink-table-planner_${scala.binary.version}
53 |
54 |
55 |
56 |
57 | org.apache.flink
58 | flink-connector-jdbc
59 |
60 |
61 |
62 | mysql
63 | mysql-connector-java
64 | 8.0.33
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/flink-learning/flink-common/src/main/java/org/apacge/flink/common/converter/JdbcRowConverter.java:
--------------------------------------------------------------------------------
1 | package org.apacge.flink.common.converter;
2 |
3 | import org.apache.flink.types.Row;
4 |
5 | import java.io.Serializable;
6 | import java.sql.PreparedStatement;
7 | import java.sql.ResultSet;
8 | import java.sql.ResultSetMetaData;
9 | import java.sql.SQLException;
10 |
11 | public class JdbcRowConverter implements Serializable {
12 |
13 | public Row toInternal(ResultSet resultSet) throws SQLException {
14 | ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
15 | int arity = resultSetMetaData.getColumnCount();
16 | Row row = new Row(arity);
17 | for (int i = 0; i < arity; ++i) {
18 | row.setField(i, resultSet.getObject(i + 1));
19 | }
20 | return row;
21 | }
22 |
23 | PreparedStatement toExternal(Row row, PreparedStatement preparedStatement) {
24 | return null;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/flink-learning/flink-common/src/main/java/org/apacge/flink/common/join/RowLookupFunction.java:
--------------------------------------------------------------------------------
1 | package org.apacge.flink.common.join;
2 |
3 | import org.apache.flink.streaming.api.functions.ProcessFunction;
4 | import org.apache.flink.types.Row;
5 |
6 | public abstract class RowLookupFunction extends ProcessFunction {
7 |
8 | protected boolean isOuterJoin;
9 | }
10 |
--------------------------------------------------------------------------------
/flink-learning/flink-datastream/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | flink-learning
7 | org.apache.flink.learning
8 | 1.0-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | flink-datastream
13 |
14 |
15 |
16 | org.apache.flink
17 | flink-core
18 |
19 |
20 | org.apache.flink
21 | flink-streaming-java
22 |
23 |
24 | org.apache.flink
25 | flink-clients
26 |
27 |
28 |
29 | org.apache.flink
30 | flink-connector-kafka
31 |
32 |
33 |
34 |
35 |
36 | org.apache.logging.log4j
37 | log4j-slf4j-impl
38 |
39 |
40 |
41 | org.apache.logging.log4j
42 | log4j-api
43 |
44 |
45 |
46 | org.apache.logging.log4j
47 | log4j-core
48 |
49 |
50 |
51 |
52 | org.apache.logging.log4j
53 | log4j-1.2-api
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/flink-learning/flink-datastream/src/main/java/org/apache/flink/learning/join/CustomIntervalJoin.java:
--------------------------------------------------------------------------------
1 | package org.apache.flink.learning.join;
2 |
3 | import org.apache.flink.api.common.eventtime.WatermarkStrategy;
4 | import org.apache.flink.learning.utils.Order;
5 | import org.apache.flink.learning.utils.OrderMapper;
6 | import org.apache.flink.learning.utils.Shipment;
7 | import org.apache.flink.learning.utils.ShipmentMapper;
8 | import org.apache.flink.streaming.api.datastream.DataStream;
9 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
10 | import org.apache.flink.streaming.api.functions.co.CoProcessFunction;
11 | import org.apache.flink.streaming.api.functions.co.KeyedCoProcessFunction;
12 | import org.apache.flink.util.Collector;
13 |
14 | public class CustomIntervalJoin {
15 |
16 | public static void main(String[] args) {
17 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
18 | env.setParallelism(1);
19 |
20 | DataStream order = env
21 | .socketTextStream("localhost", 8888)
22 | .flatMap(new OrderMapper())
23 | .assignTimestampsAndWatermarks(WatermarkStrategy
24 | .forMonotonousTimestamps()
25 | .withTimestampAssigner((event, time) -> event.getTimestamp()));
26 |
27 | DataStream shipment = env
28 | .socketTextStream("localhost", 9999)
29 | .flatMap(new ShipmentMapper())
30 | .assignTimestampsAndWatermarks(WatermarkStrategy
31 | .forMonotonousTimestamps()
32 | .withTimestampAssigner((event, time) -> event.getTimestamp()));
33 |
34 | order
35 | .connect(shipment)
36 | .keyBy(Order::getOrderId, Shipment::getShipId)
37 | .process(new CoProcessFunction() {
38 | @Override
39 | public void processElement1(Order order, CoProcessFunction.Context context, Collector