5 | Kafka
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/EXAMPLES/01 Multiple Partitions/basic-kafka-commands.txt:
--------------------------------------------------------------------------------
1 | Basic KAFKA Commands
2 |
3 | START ZOOKEEPER
4 | bin/zookeeper-server-start.sh config/zookeeper.properties
5 |
6 | START KAFKA BROKER
7 | bin/kafka-server-start.sh config/server.properties
8 |
9 | CREATE TOPIC
10 | bin/kafka-topics.sh \
11 | --bootstrap-server localhost:9092 \
12 | --create \
13 | --replication-factor 1 \
14 | --partitions 3 \
15 | --topic animals
16 |
17 | LIST TOPICS
18 | bin/kafka-topics.sh \
19 | --bootstrap-server localhost:9092 \
20 | --list
21 |
22 | TOPIC DETAILS
23 | bin/kafka-topics.sh \
24 | --bootstrap-server localhost:9092 \
25 | --describe \
26 | --topic animals
27 |
28 | START CONSOLE PRODUCER
29 | bin/kafka-console-producer.sh \
30 | --broker-list localhost:9092 \
31 | --topic animals
32 |
33 | START CONSOLE CONSUMER
34 | bin/kafka-console-consumer.sh \
35 | --bootstrap-server localhost:9092 \
36 | --topic test
37 |
38 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING
39 | bin/kafka-console-consumer.sh \
40 | --bootstrap-server localhost:9092 \
41 | --topic animals \
42 | --from-beginning
43 |
44 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING FROM SPECIFIC PARTITION
45 | bin/kafka-console-consumer.sh \
46 | --bootstrap-server localhost:9092 \
47 | --partition 2 \
48 | --topic animals \
49 | --from-beginning
50 |
51 | START CONSOLE CONSUMER AND READ MESSAGES FROM SPECIFIC OFFSET FROM SPECIFIC PARTITION
52 | bin/kafka-console-consumer.sh \
53 | --bootstrap-server localhost:9092 \
54 | --partition 2 \
55 | --topic animals \
56 | --offset 0
57 |
58 | START CONSOLE CONSUMER WITH SPECIFIC CONSUMER GROUP
59 | bin/kafka-console-consumer.sh \
60 | --bootstrap-server localhost:9092 \
61 | --topic test \
62 | --group test \
63 | --from-beginning
64 |
65 | LIST CONSUMER GROUPS
66 | bin/kafka-consumer-groups.sh \
67 | --bootstrap-server localhost:9092 \
68 | --list
69 |
70 | CONSUMER GROUP DETAILS
71 | bin/kafka-consumer-groups.sh \
72 | --bootstrap-server localhost:9092 \
73 | --group test \
74 | --describe
75 |
76 |
--------------------------------------------------------------------------------
/EXAMPLES/02 Multiple Brokers/basic-kafka-commands.txt:
--------------------------------------------------------------------------------
1 | Basic KAFKA Commands
2 |
3 | START ZOOKEEPER
4 | bin/zookeeper-server-start.sh config/zookeeper.properties
5 |
6 | START KAFKA BROKER
7 | bin/kafka-server-start.sh config/server0.properties
8 | bin/kafka-server-start.sh config/server1.properties
9 | bin/kafka-server-start.sh config/server2.properties
10 |
11 | GET INFORMATION FROM ZOOKEEPER ABOUT ACTIVE BROKER IDS
12 | bin/zookeeper-shell.sh localhost:2181 ls /brokers/ids
13 |
14 | GET INFORMATION FROM ZOOKEEPER ABOUT SPECIFIC BROKER BY ID
15 | bin/zookeeper-shell.sh localhost:2181 get /brokers/ids/0
16 |
17 | CREATE TOPIC
18 | bin/kafka-topics.sh \
19 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
20 | --create \
21 | --replication-factor 3 \
22 | --partitions 5 \
23 | --topic animals
24 |
25 | LIST TOPICS
26 | bin/kafka-topics.sh \
27 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
28 | --list
29 |
30 | TOPIC DETAILS
31 | bin/kafka-topics.sh \
32 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
33 | --describe \
34 | --topic cars
35 |
36 | START CONSOLE PRODUCER
37 | bin/kafka-console-producer.sh \
38 | --broker-list localhost:9092,localhost:9093,localhost:9094 \
39 | --topic cars
40 |
41 | START CONSOLE CONSUMER
42 | bin/kafka-console-consumer.sh \
43 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
44 | --topic cars
45 |
46 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING
47 | bin/kafka-console-consumer.sh \
48 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
49 | --topic test-topic \
50 | --from-beginning
51 |
52 |
53 |
--------------------------------------------------------------------------------
/EXAMPLES/02 Multiple Brokers/config/server0.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=0
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | #listeners=PLAINTEXT://:9092
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-0
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/02 Multiple Brokers/config/server1.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=1
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9093
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-1
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/02 Multiple Brokers/config/server2.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=2
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9094
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-2
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/03 Multiple Brokers and Replication/basic-kafka-commands.txt:
--------------------------------------------------------------------------------
1 | Basic KAFKA Commands
2 |
3 | START ZOOKEEPER
4 | bin/zookeeper-server-start.sh config/zookeeper.properties
5 |
6 | START KAFKA BROKER
7 | bin/kafka-server-start.sh config/server0.properties
8 | bin/kafka-server-start.sh config/server1.properties
9 | bin/kafka-server-start.sh config/server2.properties
10 |
11 | GET INFORMATION FROM ZOOKEEPER ABOUT ACTIVE BROKER IDS
12 | bin/zookeeper-shell.sh localhost:2181 ls /brokers/ids
13 |
14 | GET INFORMATION FROM ZOOKEEPER ABOUT SPECIFIC BROKER BY ID
15 | bin/zookeeper-shell.sh localhost:2181 get /brokers/ids/0
16 |
17 | CREATE TOPIC
18 | bin/kafka-topics.sh \
19 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
20 | --create \
21 | --replication-factor 3 \
22 | --partitions 7 \
23 | --topic months
24 |
25 | LIST TOPICS
26 | bin/kafka-topics.sh \
27 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
28 | --list
29 |
30 | TOPIC DETAILS
31 | bin/kafka-topics.sh \
32 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
33 | --describe \
34 | --topic months
35 |
36 | START CONSOLE PRODUCER
37 | bin/kafka-console-producer.sh \
38 | --broker-list localhost:9092,localhost:9093,localhost:9094 \
39 | --topic months
40 |
41 | START CONSOLE CONSUMER
42 | bin/kafka-console-consumer.sh \
43 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
44 | --topic months
45 |
46 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING
47 | bin/kafka-console-consumer.sh \
48 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
49 | --topic months \
50 | --from-beginning
51 |
52 | START CONSOLE CONSUMER AND READ MESSAGES FROM SPECIFIC PARTITION
53 | bin/kafka-console-consumer.sh \
54 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
55 | --topic months \
56 | --partition 6 \
57 | --from-beginning
58 |
59 | START CONSOLE CONSUMER AND READ MESSAGES FROM SPECIFIC PARTITION AND SPECIFIC OFFSET
60 | bin/kafka-console-consumer.sh \
61 | --bootstrap-server localhost:9092,localhost:9093,localhost:9094 \
62 | --topic months \
63 | --partition 3 \
64 | --offset 2
65 |
66 |
67 |
--------------------------------------------------------------------------------
/EXAMPLES/03 Multiple Brokers and Replication/config/server0.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=0
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | #listeners=PLAINTEXT://:9092
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-0
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/03 Multiple Brokers and Replication/config/server1.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=1
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9093
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-1
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/03 Multiple Brokers and Replication/config/server2.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=2
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9094
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-2
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/04 Consumer Groups/basic-kafka-commands.txt:
--------------------------------------------------------------------------------
1 | Basic KAFKA Commands
2 |
3 | START ZOOKEEPER
4 | bin/zookeeper-server-start.sh config/zookeeper.properties
5 |
6 | START KAFKA BROKER
7 | bin/kafka-server-start.sh config/server.properties
8 |
9 | CREATE TOPIC
10 | bin/kafka-topics.sh \
11 | --bootstrap-server localhost:9092 \
12 | --create \
13 | --replication-factor 1 \
14 | --partitions 5 \
15 | --topic numbers
16 |
17 | LIST TOPICS
18 | bin/kafka-topics.sh \
19 | --bootstrap-server localhost:9092 \
20 | --list
21 |
22 | TOPIC DETAILS
23 | bin/kafka-topics.sh \
24 | --bootstrap-server localhost:9092 \
25 | --describe \
26 | --topic numbers
27 |
28 | START CONSOLE PRODUCER
29 | bin/kafka-console-producer.sh \
30 | --broker-list localhost:9092 \
31 | --topic numbers
32 |
33 | START CONSOLE CONSUMER
34 | bin/kafka-console-consumer.sh \
35 | --bootstrap-server localhost:9092 \
36 | --topic numbers
37 |
38 | START CONSOLE CONSUMER AND READ FROM SPECIFIC PARTITION
39 | bin/kafka-console-consumer.sh \
40 | --bootstrap-server localhost:9092 \
41 | --partition 4 \
42 | --from-beginning \
43 | --topic numbers
44 |
45 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING
46 | bin/kafka-console-consumer.sh \
47 | --bootstrap-server localhost:9092 \
48 | --topic numbers \
49 | --from-beginning
50 |
51 | START CONSOLE CONSUMER WITH SPECIFIC CONSUMER GROUP
52 | bin/kafka-console-consumer.sh \
53 | --bootstrap-server localhost:9092 \
54 | --topic numbers \
55 | --group nums \
56 | --from-beginning
57 |
58 | LIST CONSUMER GROUPS
59 | bin/kafka-consumer-groups.sh \
60 | --bootstrap-server localhost:9092 \
61 | --list
62 |
63 | CONSUMER GROUP DETAILS
64 | bin/kafka-consumer-groups.sh \
65 | --bootstrap-server localhost:9092 \
66 | --group nums \
67 | --describe
68 |
69 |
--------------------------------------------------------------------------------
/EXAMPLES/05 Performance Testing/basic-kafka-commands.txt:
--------------------------------------------------------------------------------
1 | Basic KAFKA Commands
2 |
3 | START ZOOKEEPER
4 | bin/zookeeper-server-start.sh config/zookeeper.properties
5 |
6 | START KAFKA BROKER
7 | bin/kafka-server-start.sh config/server0.properties
8 | bin/kafka-server-start.sh config/server1.properties
9 | bin/kafka-server-start.sh config/server2.properties
10 |
11 | CREATE TOPIC
12 | bin/kafka-topics.sh \
13 | --bootstrap-server localhost:9092 \
14 | --create \
15 | --replication-factor 3 \
16 | --partitions 100 \
17 | --topic perf
18 |
19 | LIST TOPICS
20 | bin/kafka-topics.sh \
21 | --bootstrap-server localhost:9092 \
22 | --list
23 |
24 | TOPIC DETAILS
25 | bin/kafka-topics.sh \
26 | --bootstrap-server localhost:9092 \
27 | --describe \
28 | --topic perf
29 |
30 | START CONSOLE PRODUCER
31 | bin/kafka-console-producer.sh \
32 | --broker-list localhost:9092 \
33 | --topic perf
34 |
35 | START CONSOLE CONSUMER
36 | bin/kafka-console-consumer.sh \
37 | --bootstrap-server localhost:9092 \
38 | --topic perf
39 |
40 | START CONSOLE CONSUMER AND READ FROM SPECIFIC PARTITION
41 | bin/kafka-console-consumer.sh \
42 | --bootstrap-server localhost:9092 \
43 | --partition 4 \
44 | --from-beginning \
45 | --topic perf
46 |
47 | START CONSOLE CONSUMER AND READ MESSAGES FROM BEGINNING
48 | bin/kafka-console-consumer.sh \
49 | --bootstrap-server localhost:9092 \
50 | --topic perf \
51 | --from-beginning
52 |
53 | START CONSOLE CONSUMER WITH SPECIFIC CONSUMER GROUP
54 | bin/kafka-console-consumer.sh \
55 | --bootstrap-server localhost:9092 \
56 | --topic perf2 \
57 | --group perf \
58 | --from-beginning
59 |
60 | LIST CONSUMER GROUPS
61 | bin/kafka-consumer-groups.sh \
62 | --bootstrap-server localhost:9092 \
63 | --list
64 |
65 | CONSUMER GROUP DETAILS
66 | bin/kafka-consumer-groups.sh \
67 | --bootstrap-server localhost:9092 \
68 | --group perf \
69 | --describe
70 |
71 | PRODUCER PERFORMANCE TEST
72 | bin/kafka-producer-perf-test.sh \
73 | --topic perf2 \
74 | --num-records 1000 \
75 | --throughput 10 \
76 | --record-size 1000 \
77 | --producer-props \
78 | bootstrap.servers=localhost:9092
79 |
80 |
81 | CONSUMER PERFORMANCE TEST
82 | bin/kafka-consumer-perf-test.sh \
83 | --broker-list localhost:9092 \
84 | --topic perf \
85 | --messages 1000000
86 |
87 |
--------------------------------------------------------------------------------
/EXAMPLES/05 Performance Testing/config/server0.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=0
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | #listeners=PLAINTEXT://:9092
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-0
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/05 Performance Testing/config/server1.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=1
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9093
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-1
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/EXAMPLES/05 Performance Testing/config/server2.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id=2
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | listeners=PLAINTEXT://:9094
32 |
33 | # Hostname and port the broker will advertise to producers and consumers. If not set,
34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
35 | # returned from java.net.InetAddress.getCanonicalHostName().
36 | #advertised.listeners=PLAINTEXT://your.host.name:9092
37 |
38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
40 |
41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
42 | num.network.threads=3
43 |
44 | # The number of threads that the server uses for processing requests, which may include disk I/O
45 | num.io.threads=8
46 |
47 | # The send buffer (SO_SNDBUF) used by the socket server
48 | socket.send.buffer.bytes=102400
49 |
50 | # The receive buffer (SO_RCVBUF) used by the socket server
51 | socket.receive.buffer.bytes=102400
52 |
53 | # The maximum size of a request that the socket server will accept (protection against OOM)
54 | socket.request.max.bytes=104857600
55 |
56 |
57 | ############################# Log Basics #############################
58 |
59 | # A comma separated list of directories under which to store log files
60 | log.dirs=/tmp/kafka-logs-2
61 |
62 | # The default number of log partitions per topic. More partitions allow greater
63 | # parallelism for consumption, but this will also result in more files across
64 | # the brokers.
65 | num.partitions=1
66 |
67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
68 | # This value is recommended to be increased for installations with data dirs located in RAID array.
69 | num.recovery.threads.per.data.dir=1
70 |
71 | ############################# Internal Topic Settings #############################
72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
73 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
74 | offsets.topic.replication.factor=1
75 | transaction.state.log.replication.factor=1
76 | transaction.state.log.min.isr=1
77 |
78 | ############################# Log Flush Policy #############################
79 |
80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
81 | # the OS cache lazily. The following configurations control the flush of data to disk.
82 | # There are a few important trade-offs here:
83 | # 1. Durability: Unflushed data may be lost if you are not using replication.
84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
86 | # The settings below allow one to configure the flush policy to flush data after a period of time or
87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
88 |
89 | # The number of messages to accept before forcing a flush of data to disk
90 | #log.flush.interval.messages=10000
91 |
92 | # The maximum amount of time a message can sit in a log before we force a flush
93 | #log.flush.interval.ms=1000
94 |
95 | ############################# Log Retention Policy #############################
96 |
97 | # The following configurations control the disposal of log segments. The policy can
98 | # be set to delete segments after a period of time, or after a given size has accumulated.
99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
100 | # from the end of the log.
101 |
102 | # The minimum age of a log file to be eligible for deletion due to age
103 | log.retention.hours=168
104 |
105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
107 | #log.retention.bytes=1073741824
108 |
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | log.segment.bytes=1073741824
111 |
112 | # The interval at which log segments are checked to see if they can be deleted according
113 | # to the retention policies
114 | log.retention.check.interval.ms=300000
115 |
116 | ############################# Zookeeper #############################
117 |
118 | # Zookeeper connection string (see zookeeper docs for details).
119 | # This is a comma separated host:port pairs, each corresponding to a zk
120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
121 | # You can also append an optional chroot string to the urls to specify the
122 | # root directory for all kafka znodes.
123 | zookeeper.connect=localhost:2181
124 |
125 | # Timeout in ms for connecting to zookeeper
126 | zookeeper.connection.timeout.ms=6000
127 |
128 |
129 | ############################# Group Coordinator Settings #############################
130 |
131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
133 | # The default value for this is 3 seconds.
134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
136 | group.initial.rebalance.delay.ms=0
137 |
--------------------------------------------------------------------------------
/PROJECTS/Java/Kafka/.idea/compiler.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/PROJECTS/Java/Kafka/.idea/dictionaries/bogdan.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | acks
5 |
6 |
7 |
--------------------------------------------------------------------------------
/PROJECTS/Java/Kafka/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/PROJECTS/Java/Kafka/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |