5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/env/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | kafka_count = [ENV['KAFKA_NODES'].to_i, 1].max
5 | couchbase_count = [ENV['COUCHBASE_NODES'].to_i, 1].max
6 |
7 | Vagrant.configure(2) do |config|
8 | kafka_nodes = (1..kafka_count).map do |idx|
9 | {group: 'kafka', name: "kafka#{idx}.vagrant", address: "192.168.3.#{80 + idx}"}
10 | end
11 | couchbase_nodes = (1..couchbase_count).map do |idx|
12 | {group: 'couchbase', name: "couchbase#{idx}.vagrant", address: "192.168.4.#{80 + idx}"}
13 | end
14 | hosts = kafka_nodes + couchbase_nodes
15 | hosts.each do |host|
16 | config.vm.define(host[:name]) do |node|
17 | node.vm.box = 'http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7.box'
18 | node.vm.network :private_network, ip: host[:address]
19 | node.vm.hostname = host[:name]
20 | node.vm.provision 'ansible' do |ansible|
21 | ansible.playbook = 'vagrant.yml'
22 | ansible.extra_vars = {
23 | hosts: hosts,
24 | kafka_hosts: kafka_nodes,
25 | couchbase_hosts: couchbase_nodes}
26 | ansible.groups = {
27 | 'kafka' => kafka_nodes.map { |n| n[:name] },
28 | 'couchbase' => couchbase_nodes.map { |n| n[:name] },
29 | 'couchbase_primary' => [couchbase_nodes.last[:name]]
30 | }
31 | end
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/env/roles/kafka/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: download kafka 0.8.1.1
2 | get_url: url=http://ftp.byfly.by/pub/apache.org/kafka/0.8.1.1/kafka_2.10-0.8.1.1.tgz
3 | dest=/home/vagrant/kafka_2.10-0.8.1.1.tgz
4 |
5 | - name: uncompress kafka 0.8.1.1
6 | sudo: yes
7 | unarchive: src=/home/vagrant/kafka_2.10-0.8.1.1.tgz
8 | dest=/opt copy=no
9 |
10 | - name: make symlink /opt/kafka
11 | sudo: yes
12 | file: src=/opt/kafka_2.10-0.8.1.1
13 | dest=/opt/kafka
14 | state=link
15 |
16 | - name: install zookeeper service
17 | sudo: yes
18 | action: template src=zookeeper.service.j2
19 | dest=/usr/lib/systemd/system/zookeeper.service
20 | register: zookeeper
21 |
22 | - name: install kafka service
23 | sudo: yes
24 | action: template src=kafka.service.j2
25 | dest=/usr/lib/systemd/system/kafka.service
26 | register: kafka
27 |
28 | - name: enable automatic topic creation
29 | sudo: yes
30 | template: src=server.properties.j2 dest=/opt/kafka/config/server.properties
31 | register: kafka_config
32 |
33 | - name: reload daemon information
34 | sudo: yes
35 | command: systemctl daemon-reload
36 | when: zookeeper|changed or kafka|changed or kafka_config|changed
37 |
38 | - name: restart zookeeper daemon
39 | sudo: yes
40 | service: name=zookeeper state=restarted enabled=true
41 | when: zookeeper|changed
42 |
43 | - name: restart kafka daemon
44 | sudo: yes
45 | service: name=kafka state=restarted enabled=true
46 | when: zookeeper|changed or kafka|changed
47 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | /**
25 | * @author Sergey Avseyev
26 | */
27 | public class Example {
28 | public static void main(String args[]) {
29 | PrintConsumer example = new PrintConsumer("kafka1.vagrant", 9092);
30 |
31 | example.run("default", 0);
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/SampleFilter.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.kafka.DCPEvent;
26 | import com.couchbase.kafka.filter.Filter;
27 |
28 | public class SampleFilter implements Filter {
29 | @Override
30 | public boolean pass(DCPEvent dcpEvent) {
31 | System.out.println("RECEIVED: " + dcpEvent);
32 | return true;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/DCPEventFactory.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.deps.com.lmax.disruptor.EventFactory;
26 |
27 | /**
28 | * A factory to preallocate {@link DCPEvent}s.
29 | *
30 | * @author Sergey Avseyev
31 | */
32 | public class DCPEventFactory implements EventFactory {
33 | @Override
34 | public DCPEvent newInstance() {
35 | return new DCPEvent();
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/filter/Filter.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.filter;
24 |
25 | import com.couchbase.kafka.DCPEvent;
26 |
27 | /**
28 | * General interface to select Couchbase events, which has to be sent to Kafka.
29 | *
30 | * @author Sergey Avseyev
31 | */
32 | public interface Filter {
33 | /**
34 | * Decides whether dcpEvent should be sent to Kafka.
35 | *
36 | * @param dcpEvent event object from Couchbase.
37 | * @return true if event should be sent to Kafka.
38 | */
39 | boolean pass(DCPEvent dcpEvent);
40 | }
41 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/PrintConsumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | /**
26 | * @author Sergey Avseyev
27 | */
28 | public class PrintConsumer extends AbstractConsumer {
29 | public PrintConsumer(String[] seedBrokers, int port) {
30 | super(seedBrokers, port);
31 | }
32 |
33 | public PrintConsumer(String seedBroker, int port) {
34 | super(seedBroker, port);
35 | }
36 |
37 | @Override
38 | public void handleMessage(long offset, byte[] bytes) {
39 | System.out.println(String.valueOf(offset) + ": " + new String(bytes));
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/Direction.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | /**
26 | * @author Sergey Avseyev
27 | */
28 | public enum Direction {
29 | /**
30 | * Start from the earliest point in history and
31 | * stop, when current view has reached.
32 | */
33 | TO_CURRENT,
34 | /**
35 | * Start from current state, and wait for all
36 | * further changes infinitely.
37 | */
38 | FROM_CURRENT,
39 | /**
40 | * Start from the earliest point in history and
41 | * wait for all further changes infinitely.
42 | */
43 | EVERYTHING
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/state/StreamStateUpdatedEvent.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2016 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.state;
24 |
25 | /**
26 | * This event generated when {@link ConnectorState} is being updated.
27 | *
28 | * @author Sergey Avseyev
29 | * @since 1.3.0
30 | */
31 | public class StreamStateUpdatedEvent {
32 | private final ConnectorState connectorState;
33 | private final short partition;
34 |
35 | public StreamStateUpdatedEvent(ConnectorState connectorState, short partition) {
36 | this.connectorState = connectorState;
37 | this.partition = partition;
38 | }
39 |
40 | public ConnectorState connectorState() {
41 | return connectorState;
42 | }
43 |
44 | public short partition() {
45 | return partition;
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/coder/AbstractEncoder.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.coder;
24 |
25 | import com.couchbase.kafka.DCPEvent;
26 | import kafka.serializer.Encoder;
27 | import kafka.utils.VerifiableProperties;
28 |
29 | /**
30 | * Defines base for all encoders of Couchbase events.
31 | *
32 | * @author Sergey Avseyev
33 | */
34 | public abstract class AbstractEncoder implements Encoder {
35 | public AbstractEncoder(final VerifiableProperties properties) {
36 | }
37 |
38 | /**
39 | * Serializes dcpEvent to stream of bytes.
40 | *
41 | * @param dcpEvent event from Couchbase
42 | * @return array of bytes to send to Kafka
43 | */
44 | @Override
45 | public abstract byte[] toBytes(DCPEvent dcpEvent);
46 | }
47 |
--------------------------------------------------------------------------------
/samples/generator/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-generator
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | java-client
15 | 2.2.2
16 |
17 |
18 |
19 |
20 |
21 | org.apache.maven.plugins
22 | maven-compiler-plugin
23 | 3.3
24 |
25 | 1.7
26 | 1.7
27 |
28 |
29 |
30 | org.apache.maven.plugins
31 | maven-assembly-plugin
32 | 2.3
33 |
34 |
35 | jar-with-dependencies
36 |
37 |
38 |
39 | example.Example
40 |
41 |
42 |
43 |
44 |
45 | maven-dependency-plugin
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/filter/MutationsFilter.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.filter;
24 |
25 | import com.couchbase.client.core.message.dcp.MutationMessage;
26 | import com.couchbase.client.core.message.dcp.RemoveMessage;
27 | import com.couchbase.kafka.DCPEvent;
28 |
29 | /**
30 | * The {@link MutationsFilter} allows only mutations to be sent to Kafka.
31 | *
32 | * @author Sergey Avseyev
33 | */
34 | public class MutationsFilter implements Filter {
35 |
36 | /**
37 | * Returns true if event is mutation.
38 | *
39 | * @param dcpEvent event object from Couchbase.
40 | * @return true if event is mutation.
41 | */
42 | public boolean pass(final DCPEvent dcpEvent) {
43 | return dcpEvent.message() instanceof MutationMessage
44 | || dcpEvent.message() instanceof RemoveMessage;
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/SampleEncoder.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.client.core.message.dcp.MutationMessage;
26 | import com.couchbase.kafka.DCPEvent;
27 | import com.couchbase.kafka.coder.AbstractEncoder;
28 | import kafka.utils.VerifiableProperties;
29 |
30 | /**
31 | * @author Sergey Avseyev
32 | */
33 | public class SampleEncoder extends AbstractEncoder {
34 | public SampleEncoder(final VerifiableProperties properties) {
35 | super(properties);
36 | }
37 |
38 | @Override
39 | public byte[] toBytes(final DCPEvent dcpEvent) {
40 | if (dcpEvent.message() instanceof MutationMessage) {
41 | MutationMessage message = (MutationMessage) dcpEvent.message();
42 | return message.key().getBytes();
43 | } else {
44 | return dcpEvent.message().toString().getBytes();
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/env/roles/couchbase_cluster/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install epel
2 | sudo: yes
3 | yum: name=epel-release state=installed
4 |
5 | - name: install python-httplib2
6 | sudo: yes
7 | yum: name=python-httplib2 state=installed
8 |
9 | - name: check primary node state
10 | uri: url=http://{{ cluster }}/pools user={{ username }} password={{ password }}
11 | register: result
12 |
13 | - name: initialize primary node
14 | shell: /opt/couchbase/bin/couchbase-cli node-init -c {{ cluster }} -u {{ username }} -p {{ password }} --node-init-hostname={{ primary_host }}
15 | when: result.json.pools|count == 0
16 |
17 | - name: initialize cluster
18 | shell: /opt/couchbase/bin/couchbase-cli cluster-init -c {{ cluster }} -u {{ username }} -p {{ password }} --cluster-init-username={{ username }} --cluster-init-password={{ password }} --cluster-init-port={{ port }} --cluster-init-ramsize={{ ram }}
19 | when: result.json.pools|count == 0
20 |
21 | - name: check primary node state
22 | uri: url=http://{{ couchbase_hosts[0].name }}:{{ port }}/pools user={{ username }} password={{ password }}
23 | register: result
24 |
25 | - name: join additional nodes
26 | shell: /opt/couchbase/bin/couchbase-cli server-add -c {{ cluster }} -u {{ username }} -p {{ password }} --server-add={{ item.name }}:{{ port }} --server-add-username={{ username }} --server-add-password={{ password }}
27 | with_items: couchbase_hosts[:-1]
28 | when: result.json.pools|count == 0
29 |
30 | - name: rebalance cluster
31 | shell: /opt/couchbase/bin/couchbase-cli rebalance -c {{ cluster }} -u {{ username }} -p {{ password }}
32 | when: result.json.pools|count == 0
33 |
34 | - name: list buckets
35 | shell: /opt/couchbase/bin/couchbase-cli bucket-list -c {{ cluster }} -u {{ username }} -p {{ password }}
36 | register: result
37 |
38 | - name: create bucket '{{ bucket_name }}'
39 | shell: /opt/couchbase/bin/couchbase-cli bucket-create -c {{ cluster }} -u {{ username }} -p {{ password }} --bucket={{ bucket_name }} --bucket-type=couchbase --bucket-ramsize={{ bucket_size }} --bucket-replica={{ bucket_replica }}
40 | when: not bucket_name in result.stdout_lines
41 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/state/NullStateSerializer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2016 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.state;
24 |
25 | import com.couchbase.kafka.CouchbaseKafkaEnvironment;
26 |
27 | /**
28 | * Represents serialized which does not persist the state.
29 | *
30 | * @author Sergey Avseyev
31 | */
32 | public class NullStateSerializer implements StateSerializer {
33 |
34 | public NullStateSerializer(final CouchbaseKafkaEnvironment environment) {
35 | }
36 |
37 | @Override
38 | public void dump(ConnectorState connectorState) {
39 | }
40 |
41 | @Override
42 | public void dump(ConnectorState connectorState, short partition) {
43 | }
44 |
45 | @Override
46 | public ConnectorState load(ConnectorState connectorState) {
47 | return new ConnectorState();
48 | }
49 |
50 | @Override
51 | public StreamState load(ConnectorState connectorState, short partition) {
52 | return new StreamState(partition, 0, 0);
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/samples/consumer/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-consumer
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | kafka-connector
15 | 2.0.1
16 |
17 |
18 | org.apache.kafka
19 | kafka_2.10
20 | 0.8.2.0
21 |
22 |
23 |
24 |
25 |
26 | org.apache.maven.plugins
27 | maven-compiler-plugin
28 | 3.3
29 |
30 | 1.7
31 | 1.7
32 |
33 |
34 |
35 | org.apache.maven.plugins
36 | maven-assembly-plugin
37 | 2.3
38 |
39 |
40 | jar-with-dependencies
41 |
42 |
43 |
44 | example.Example
45 |
46 |
47 |
48 |
49 |
50 | maven-dependency-plugin
51 |
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/NullStateSerializer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.kafka.CouchbaseKafkaEnvironment;
26 | import com.couchbase.kafka.state.ConnectorState;
27 | import com.couchbase.kafka.state.StateSerializer;
28 | import com.couchbase.kafka.state.StreamState;
29 |
30 | /**
31 | * @author Sergey Avseyev
32 | */
33 | public class NullStateSerializer implements StateSerializer {
34 |
35 | public NullStateSerializer(final CouchbaseKafkaEnvironment environment) {
36 | }
37 |
38 | @Override
39 | public void dump(ConnectorState connectorState) {
40 | }
41 |
42 | @Override
43 | public void dump(ConnectorState connectorState, short partition) {
44 | }
45 |
46 | @Override
47 | public ConnectorState load(ConnectorState connectorState) {
48 | return new ConnectorState();
49 | }
50 |
51 | @Override
52 | public StreamState load(ConnectorState connectorState, short partition) {
53 | return new StreamState(partition, 0, 0);
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | import com.couchbase.kafka.CouchbaseKafkaConnector;
25 | import com.couchbase.kafka.DefaultCouchbaseKafkaEnvironment;
26 |
27 | public class Example {
28 | public static void main(String[] args) {
29 | DefaultCouchbaseKafkaEnvironment.Builder builder =
30 | (DefaultCouchbaseKafkaEnvironment.Builder) DefaultCouchbaseKafkaEnvironment.builder()
31 | .kafkaFilterClass("example.SampleFilter")
32 | .kafkaTopic("default")
33 | .kafkaZookeeperAddress("kafka1.vagrant")
34 | .couchbaseNodes("couchbase1.vagrant")
35 | .couchbaseBucket("default")
36 | .kafkaValueSerializerClass("example.SampleEncoder")
37 | .couchbaseStateSerializerClass("example.NullStateSerializer")
38 | .dcpEnabled(true);
39 | CouchbaseKafkaConnector connector = CouchbaseKafkaConnector.create(builder.build());
40 | connector.run();
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/samples/generator/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | import com.couchbase.client.java.Bucket;
25 | import com.couchbase.client.java.Cluster;
26 | import com.couchbase.client.java.CouchbaseCluster;
27 | import com.couchbase.client.java.document.JsonDocument;
28 | import com.couchbase.client.java.document.json.JsonObject;
29 |
30 | import java.io.BufferedReader;
31 | import java.io.IOException;
32 | import java.io.InputStreamReader;
33 | import java.util.Random;
34 |
35 | /**
36 | * @author Sergey Avseyev
37 | */
38 | public class Example {
39 | public static void main(String args[]) throws IOException {
40 | Random random = new Random();
41 | Cluster cluster = CouchbaseCluster.create("couchbase1.vagrant");
42 | Bucket bucket = cluster.openBucket();
43 |
44 | BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
45 |
46 | String line;
47 | do {
48 | System.out.print("> ");
49 | line = input.readLine();
50 | if (line == null) {
51 | break;
52 | }
53 | String key = "key-" + random.nextInt(10);
54 | JsonObject value = JsonObject.create().put("line", line);
55 | bucket.upsert(JsonDocument.create(key, value));
56 | System.out.printf(">> key=%s, value=%s\n", key, value);
57 | } while (true);
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/samples/producer/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-producer
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | kafka-connector
15 | 2.0.1
16 |
17 |
18 | com.couchbase.client
19 | core-io
20 | 1.3.1
21 |
22 |
23 | org.apache.kafka
24 | kafka_2.10
25 | 0.8.2.0
26 |
27 |
28 |
29 |
30 | couchbase
31 | couchbase repo
32 | http://files.couchbase.com/maven2
33 | false
34 |
35 |
36 |
37 |
38 |
39 | org.apache.maven.plugins
40 | maven-compiler-plugin
41 | 3.3
42 |
43 | 1.7
44 | 1.7
45 |
46 |
47 |
48 | org.apache.maven.plugins
49 | maven-assembly-plugin
50 | 2.3
51 |
52 |
53 | jar-with-dependencies
54 |
55 |
56 |
57 | example.Example
58 |
59 |
60 |
61 |
62 |
63 | maven-dependency-plugin
64 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/state/StreamState.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2016 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.state;
24 |
25 | import com.couchbase.client.core.message.kv.MutationToken;
26 |
27 | /**
28 | * This class represents state of DCP stream.
29 | *
30 | * @since 1.3.0
31 | */
32 | public class StreamState {
33 | private final MutationToken token;
34 |
35 | public StreamState(MutationToken token) {
36 | this.token = token;
37 | }
38 |
39 | public StreamState(short partition, long vbucketUUID, long sequenceNumber) {
40 | this.token = new MutationToken(partition, vbucketUUID, sequenceNumber, null);
41 | }
42 |
43 | /**
44 | * A unique identifier that is generated that is assigned to each VBucket.
45 | * This number is generated on an unclean shutdown or when a VBucket becomes
46 | * active.
47 | *
48 | * @return the stream vbucketUUID.
49 | */
50 | public long vbucketUUID() {
51 | return token.vbucketUUID();
52 | }
53 |
54 | /**
55 | * Specified the last by sequence number that has been seen by the consumer.
56 | *
57 | * @return the stream last sequence number.
58 | */
59 | public long sequenceNumber() {
60 | return token.sequenceNumber();
61 | }
62 |
63 | /**
64 | * The partition number (vBucket), to which this state belongs.
65 | *
66 | * @return the stream partition number.
67 | */
68 | public short partition() {
69 | return (short) token.vbucketID();
70 | }
71 |
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/DCPEvent.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.endpoint.dcp.DCPConnection;
26 | import com.couchbase.client.core.message.CouchbaseMessage;
27 | import com.couchbase.client.core.message.dcp.DCPMessage;
28 |
29 |
30 | /**
31 | * A pre allocated event which carries a {@link CouchbaseMessage} and associated information.
32 | *
33 | * @author Sergey Avseyev
34 | */
35 |
36 | public class DCPEvent {
37 | /**
38 | * Current message from the stream.
39 | */
40 | private CouchbaseMessage message;
41 |
42 | /**
43 | * DCP connection instance
44 | */
45 | private DCPConnection connection;
46 |
47 | /**
48 | * Set the new message as a payload for this event.
49 | *
50 | * @param message the message to override.
51 | * @return the {@link DCPEvent} for method chaining.
52 | */
53 | public DCPEvent setMessage(final CouchbaseMessage message) {
54 | this.message = message;
55 | return this;
56 | }
57 |
58 | /**
59 | * Get the message from the payload.
60 | *
61 | * @return the actual message.
62 | */
63 | public CouchbaseMessage message() {
64 | return message;
65 | }
66 |
67 |
68 | public void setConnection(DCPConnection connection) {
69 | this.connection = connection;
70 | }
71 |
72 | /**
73 | * Get the associated DCP connection object.
74 | *
75 | * @return connection.
76 | */
77 | public DCPConnection connection() {
78 | return connection;
79 | }
80 |
81 | /**
82 | * Extract key from the payload.
83 | *
84 | * @return the key of message or null.
85 | */
86 | public String key() {
87 | if (message instanceof DCPMessage) {
88 | return ((DCPMessage) message).key();
89 | } else {
90 | return null;
91 | }
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/CouchbaseKafkaEnvironment.java:
--------------------------------------------------------------------------------
1 | package com.couchbase.kafka;
2 |
3 | import com.couchbase.client.core.env.CoreEnvironment;
4 |
5 | import java.util.List;
6 |
7 | /**
8 | * A {@link CouchbaseKafkaEnvironment} settings related to Kafka connection, in addition to all the core building blocks
9 | * like environment settings and thread pools inherited from {@link CoreEnvironment} so
10 | * that the application can work with it properly.
11 | *
12 | * This interface defines the contract. How properties are loaded is chosen by the implementation. See the
13 | * {@link DefaultCouchbaseKafkaEnvironment} class for the default implementation.
14 | *
15 | * Note that the {@link CouchbaseKafkaEnvironment} is stateful, so be sure to call {@link CoreEnvironment#shutdown()}
16 | * properly.
17 | *
18 | * @author Sergey Avseyev
19 | */
20 | public interface CouchbaseKafkaEnvironment extends CoreEnvironment {
21 | /**
22 | * Full name of class used to encode objects to byte[] to store in Kafka. It have to implement
23 | * {@link kafka.serializer.Encoder} parametrized with DCPEvent.
24 | *
25 | * @return class name of encoder
26 | */
27 | String kafkaValueSerializerClass();
28 |
29 | /**
30 | * Full name of class used to encode object keys to byte[] to store in Kafka. It have to implement
31 | * {@link kafka.serializer.Encoder} parametrized with String.
32 | *
33 | * @return class name of encoder
34 | */
35 | String kafkaKeySerializerClass();
36 |
37 | /**
38 | * Full name of class used to filter data stream from Couchbase. It have to implement
39 | * {@link com.couchbase.kafka.filter.Filter}.
40 | *
41 | * @return class name of filter
42 | */
43 | String kafkaFilterClass();
44 |
45 | /**
46 | * Returns the size of the events ringbuffer.
47 | *
48 | * @return the size of the ringbuffer.
49 | */
50 | int kafkaEventBufferSize();
51 |
52 | /**
53 | * Full name of class used to serialize state of the Couchbase streams. It have to
54 | * implement {@link com.couchbase.kafka.state.StateSerializer}.
55 | *
56 | * @return class name of the serializer
57 | */
58 | String couchbaseStateSerializerClass();
59 |
60 | /**
61 | * Minimum time between dumping the state
62 | *
63 | * @return time in milliseconds
64 | */
65 | long couchbaseStateSerializationThreshold();
66 |
67 | /**
68 | * List of Couchbase nodes used to connect.
69 | *
70 | * @return list of node addresses
71 | */
72 | List couchbaseNodes();
73 |
74 | /**
75 | * Name of the bucket in Couchbase.
76 | *
77 | * @return name of the bucket
78 | */
79 | String couchbaseBucket();
80 |
81 | /**
82 | * Password if the bucket is protected.
83 | *
84 | * @return couchbase password.
85 | */
86 | String couchbasePassword();
87 |
88 | /**
89 | * Zookeeper address to pass into kafka client.
90 | *
91 | * @return zookeeper node address.
92 | */
93 | String kafkaZookeeperAddress();
94 |
95 | /**
96 | * Kafka topic to post events.
97 | *
98 | * @return kafka topic name.
99 | */
100 | String kafkaTopic();
101 |
102 | /**
103 | * The default timeout for connect operations, set to {@link DefaultCouchbaseKafkaEnvironment#CONNECT_TIMEOUT}.
104 | *
105 | * @return the default connect timeout.
106 | */
107 | long connectTimeout();
108 | }
109 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/state/ConnectorState.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2016 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.state;
24 |
25 | import rx.Observable;
26 | import rx.subjects.PublishSubject;
27 | import rx.subjects.Subject;
28 |
29 | import java.util.HashMap;
30 | import java.util.Iterator;
31 | import java.util.Map;
32 |
33 | /**
34 | * Implements state of the @{link {@link com.couchbase.kafka.CouchbaseKafkaConnector} instance.
35 | */
36 | public class ConnectorState implements Iterable {
37 | private final Map streams;
38 | private final Subject updates =
39 | PublishSubject.create().toSerialized();
40 |
41 | public ConnectorState() {
42 | this.streams = new HashMap(1024);
43 | }
44 |
45 | @Override
46 | public Iterator iterator() {
47 | return streams.values().iterator();
48 | }
49 |
50 | /**
51 | * Set/update the stream state
52 | *
53 | * @param streamState new state for stream
54 | */
55 | public void put(StreamState streamState) {
56 | streams.put(streamState.partition(), streamState);
57 | }
58 |
59 | /**
60 | * Returns the stream state.
61 | *
62 | * @param partition partition of the stream.
63 | * @return the state associated or null
64 | */
65 | public StreamState get(short partition) {
66 | return streams.get(partition);
67 | }
68 |
69 | public short[] partitions() {
70 | short[] partitions = new short[streams.size()];
71 | int i = 0;
72 | for (Short partition : streams.keySet()) {
73 | partitions[i++] = partition;
74 | }
75 | return partitions;
76 | }
77 |
78 | public ConnectorState clone() {
79 | ConnectorState newState = new ConnectorState();
80 | for (Map.Entry entry : streams.entrySet()) {
81 | newState.streams.put(entry.getKey(), entry.getValue());
82 | }
83 | return newState;
84 | }
85 |
86 | public void update(short partition, long sequenceNumber) {
87 | StreamState state = streams.get(partition);
88 | streams.put(partition,
89 | new StreamState(partition, state.vbucketUUID(), Math.max(state.sequenceNumber(), sequenceNumber)));
90 | updates.onNext(new StreamStateUpdatedEvent(this, partition));
91 | }
92 |
93 | public Observable updates() {
94 | return updates;
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/KafkaWriter.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.message.dcp.DCPMessage;
26 | import com.couchbase.client.core.message.dcp.MutationMessage;
27 | import com.couchbase.client.deps.com.lmax.disruptor.EventHandler;
28 | import com.couchbase.kafka.filter.Filter;
29 | import kafka.javaapi.producer.Producer;
30 | import kafka.producer.KeyedMessage;
31 |
32 | /**
33 | * {@link KafkaWriter} is in charge of filtering and routing events to the Kafka cluster.
34 | *
35 | * @author Sergey Avseyev
36 | */
37 | public class KafkaWriter implements EventHandler {
38 |
39 | private final Producer producer;
40 | private final String topic;
41 | private final Filter filter;
42 |
43 | /**
44 | * Creates a new {@link KafkaWriter}.
45 | *
46 | * @param environment the environment object, which carries settings.
47 | * @param producer the kafka producer object.
48 | * @param filter the filter to select events to publish.
49 | */
50 | public KafkaWriter(final CouchbaseKafkaEnvironment environment, final Producer producer, final Filter filter) {
51 | this(environment.kafkaTopic(), environment, producer, filter);
52 | }
53 |
54 | /**
55 | * Creates a new {@link KafkaWriter}.
56 | *
57 | * @param kafkaTopic name of Kafka topic to override {@link CouchbaseKafkaEnvironment#kafkaTopic()}.
58 | * @param environment the environment object, which carries settings.
59 | * @param producer the kafka producer object.
60 | * @param filter the filter to select events to publish.
61 | */
62 | public KafkaWriter(final String kafkaTopic, final CouchbaseKafkaEnvironment environment, final Producer producer, final Filter filter) {
63 | this.topic = kafkaTopic;
64 | this.producer = producer;
65 | this.filter = filter;
66 | }
67 |
68 | /**
69 | * Handles {@link DCPEvent}s that come into the response RingBuffer.
70 | */
71 | @Override
72 | public void onEvent(final DCPEvent event, final long sequence, final boolean endOfBatch) throws Exception {
73 | try {
74 | if (filter.pass(event)) {
75 | KeyedMessage payload =
76 | new KeyedMessage(topic, event.key(), event);
77 | producer.send(payload);
78 | }
79 | } finally {
80 | if (event.message() instanceof MutationMessage) {
81 | MutationMessage mutation = (MutationMessage) event.message();
82 | mutation.content().release();
83 | }
84 | if (event.message() instanceof DCPMessage) {
85 | event.connection().consumed((DCPMessage) event.message());
86 | }
87 | }
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/coder/JsonEncoder.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.coder;
24 |
25 | import com.couchbase.client.core.message.dcp.MutationMessage;
26 | import com.couchbase.client.core.message.dcp.RemoveMessage;
27 | import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonParseException;
28 | import com.couchbase.client.deps.com.fasterxml.jackson.databind.ObjectMapper;
29 | import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode;
30 | import com.couchbase.client.deps.io.netty.util.CharsetUtil;
31 | import com.couchbase.kafka.DCPEvent;
32 | import kafka.utils.VerifiableProperties;
33 | import org.slf4j.Logger;
34 | import org.slf4j.LoggerFactory;
35 |
36 | import java.io.IOException;
37 |
38 |
39 | /**
40 | * The {@link JsonEncoder} converts events from Couchbase to JSON.
41 | *
42 | * If the document body looks like JSON, it inserts it as a sub-tree of the resulting object,
43 | * otherwise, it puts it as a String.
44 | *
45 | * @author Sergey Avseyev
46 | */
47 | public class JsonEncoder extends AbstractEncoder {
48 | private static final ObjectMapper MAPPER = new ObjectMapper();
49 | private static final Logger LOGGER = LoggerFactory.getLogger(JsonEncoder.class);
50 |
51 | public JsonEncoder(final VerifiableProperties properties) {
52 | super(properties);
53 | }
54 |
55 | /**
56 | * Encodes {@link DCPEvent} to JSON object.
57 | *
58 | * @param value event from Couchbase.
59 | * @return JSON object in form of byte array.
60 | */
61 | @Override
62 | public byte[] toBytes(final DCPEvent value) {
63 | try {
64 | ObjectNode message = MAPPER.createObjectNode();
65 | if (value.message() instanceof MutationMessage) {
66 | MutationMessage mutation = (MutationMessage) value.message();
67 | message.put("event", "mutation");
68 | message.put("key", mutation.key());
69 | message.put("expiration", mutation.expiration());
70 | message.put("flags", mutation.flags());
71 | message.put("cas", mutation.cas());
72 | message.put("lockTime", mutation.lockTime());
73 | message.put("bySeqno", mutation.bySequenceNumber());
74 | message.put("revSeqno", mutation.revisionSequenceNumber());
75 | try {
76 | message.set("content", MAPPER.readTree(mutation.content().toString(CharsetUtil.UTF_8)));
77 | } catch (JsonParseException e) {
78 | message.put("content", mutation.content().toString(CharsetUtil.UTF_8));
79 | }
80 | } else if (value.message() instanceof RemoveMessage) {
81 | RemoveMessage mutation = (RemoveMessage) value.message();
82 | message.put("event", "removal");
83 | message.put("key", mutation.key());
84 | message.put("cas", mutation.cas());
85 | message.put("bySeqno", mutation.bySequenceNumber());
86 | message.put("revSeqno", mutation.revisionSequenceNumber());
87 | }
88 | return message.toString().getBytes();
89 | } catch (IOException ex) {
90 | LOGGER.warn("Error while encoding DCP message", ex);
91 | }
92 | return new byte[]{};
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/OLD-README.md:
--------------------------------------------------------------------------------
1 | # This project is obsolete
2 |
3 | This project is superseded by [kafka-connect-couchbase](https://github.com/couchbase/kafka-connect-couchbase)
4 | which integrates with the Kafka Connect framework.
5 |
6 |
7 | # Couchbase Kafka Connector
8 |
9 | Welcome to the official Couchbase Kafka connector! It provides functionality to direct a stream of events from Couchbase
10 | Server to Kafka.
11 |
12 | You can read the quickstart guide below or consult the documentation here: http://developer.couchbase.com/documentation/server/4.1/connectors/kafka-2.0/kafka-intro.html
13 |
14 | The issue tracker can be found at [https://issues.couchbase.com/browse/KAFKAC](https://issues.couchbase.com/browse/KAFKAC).
15 |
16 | ## Quickstart
17 |
18 | A sample `build.gradle`:
19 |
20 | ```groovy
21 | apply plugin: 'java'
22 |
23 | repositories {
24 | mavenCentral()
25 | maven { url { "http://files.couchbase.com/maven2" } }
26 | mavenLocal()
27 | }
28 |
29 | dependencies {
30 | compile(group: 'com.couchbase.client', name: 'kafka-connector', version: '2.0.1')
31 | }
32 | ```
33 |
34 | Using the library is pretty easy. Let's say we would like to receive every modification from the Couchbase Server
35 | and send to Kafka only the document body (by default the connector serializes the document body and metadata to JSON). To achieve that, you need to define a filter class that allows only instances of `MutationMessage` to pass through:
36 |
37 | ```java
38 | package example;
39 |
40 | import com.couchbase.client.core.message.dcp.MutationMessage;
41 | import com.couchbase.kafka.DCPEvent;
42 | import com.couchbase.kafka.filter.Filter;
43 |
44 | public class SampleFilter implements Filter {
45 | @Override
46 | public boolean pass(final DCPEvent dcpEvent) {
47 | return dcpEvent.message() instanceof MutationMessage;
48 | }
49 | }
50 | ```
51 |
52 | And you also need an encoder class, which takes document value converts it to byte array:
53 |
54 | ```java
55 | package example;
56 |
57 | import com.couchbase.client.core.message.dcp.MutationMessage;
58 | import com.couchbase.client.deps.io.netty.util.CharsetUtil;
59 | import com.couchbase.kafka.DCPEvent;
60 | import com.couchbase.kafka.coder.AbstractEncoder;
61 | import kafka.utils.VerifiableProperties;
62 |
63 | public class SampleEncoder extends AbstractEncoder {
64 | public SampleEncoder(final VerifiableProperties properties) {
65 | super(properties);
66 | }
67 |
68 | @Override
69 | public byte[] toBytes(final DCPEvent dcpEvent) {
70 | MutationMessage message = (MutationMessage)dcpEvent.message();
71 | return message.content().toString(CharsetUtil.UTF_8).getBytes();
72 | }
73 | }
74 | ```
75 |
76 | That essentially is enough to setup a Couchbase-Kafka bridge:
77 |
78 | ```java
79 | package example;
80 |
81 | import com.couchbase.kafka.CouchbaseKafkaConnector;
82 | import com.couchbase.kafka.CouchbaseKafkaEnvironment;
83 | import com.couchbase.kafka.DefaultCouchbaseKafkaEnvironment;
84 |
85 | public class Example {
86 | public static void main(String[] args) {
87 | DefaultCouchbaseKafkaEnvironment.Builder builder =
88 | (DefaultCouchbaseKafkaEnvironment.Builder) DefaultCouchbaseKafkaEnvironment
89 | .builder()
90 | .kafkaFilterClass("example.SampleFilter")
91 | .kafkaValueSerializerClass("example.SampleEncoder")
92 | .kafkaTopic("default")
93 | .kafkaZookeeperAddress("kafka1.vagrant")
94 | .couchbaseNodes("couchbase1.vagrant")
95 | .couchbaseBucket("default")
96 | .dcpEnabled(true);
97 | CouchbaseKafkaConnector connector = CouchbaseKafkaConnector.create(builder.build());
98 | connector.run();
99 | }
100 | }
101 | ```
102 |
103 | It is also possible to start with some known state or to watch a limited set of partitions. The example below will stream
104 | only partition 115 starting from the beginning (see also `currentState()` and `loadState()` helpers).
105 |
106 | ```java
107 | ConnectorState startState = connector.startState(115);
108 | ConnectorState endState = connector.endState(115);
109 | connector.run(startState, endState);
110 | ```
111 |
112 | The `couchbase1.vagrant` and `kafka1.vagrant` addresses above are the locations of Couchbase Server and Kafka respectively,
113 | which could be easily set up using provisioning scripts from the `env/` directory. Just navigate there and run `vagrant up`.
114 | Vagrant scripts using Ansible ([installation guide](http://docs.ansible.com/intro_installation.html)).
115 |
116 | ## License
117 |
118 | Copyright 2015 Couchbase Inc.
119 |
120 | Licensed under the Apache License, Version 2.0.
121 |
122 | See [the Apache 2.0 license](http://www.apache.org/licenses/LICENSE-2.0).
123 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/state/ZookeeperStateSerializer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka.state;
24 |
25 | import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode;
26 | import com.couchbase.client.deps.com.fasterxml.jackson.databind.ObjectMapper;
27 | import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode;
28 | import com.couchbase.kafka.CouchbaseKafkaEnvironment;
29 | import kafka.utils.ZKStringSerializer$;
30 | import org.I0Itec.zkclient.ZkClient;
31 | import org.slf4j.Logger;
32 | import org.slf4j.LoggerFactory;
33 |
34 | import java.io.IOException;
35 |
36 | /**
37 | * @author Sergey Avseyev
38 | */
39 | public class ZookeeperStateSerializer implements StateSerializer {
40 | private static final ObjectMapper MAPPER = new ObjectMapper();
41 | private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperStateSerializer.class);
42 | private final ZkClient zkClient;
43 | private final String bucket;
44 | private final long stateSerializationThreshold;
45 | private long updatedAt = 0;
46 |
47 | public ZookeeperStateSerializer(final CouchbaseKafkaEnvironment environment) {
48 | this.zkClient = new ZkClient(environment.kafkaZookeeperAddress(), 4000, 6000, ZKStringSerializer$.MODULE$);
49 | this.bucket = environment.couchbaseBucket();
50 | this.stateSerializationThreshold = environment.couchbaseStateSerializationThreshold();
51 | }
52 |
53 |
54 | @Override
55 | public void dump(final ConnectorState connectorState) {
56 | long now = System.currentTimeMillis();
57 | if (now - updatedAt > stateSerializationThreshold) {
58 | for (StreamState streamState : connectorState) {
59 | writeState(streamState);
60 | }
61 | updatedAt = now;
62 | }
63 | }
64 |
65 | @Override
66 | public void dump(final ConnectorState connectorState, final short partition) {
67 | long now = System.currentTimeMillis();
68 | if (now - updatedAt > stateSerializationThreshold) {
69 | final StreamState streamState = connectorState.get(partition);
70 | writeState(streamState);
71 | updatedAt = now;
72 | }
73 | }
74 |
75 | @Override
76 | public ConnectorState load(final ConnectorState connectorState) {
77 | for (StreamState streamState : connectorState) {
78 | StreamState newState = load(connectorState, streamState.partition());
79 | if (newState != null) {
80 | connectorState.put(newState);
81 | }
82 | }
83 | return connectorState;
84 | }
85 |
86 | @Override
87 | public StreamState load(final ConnectorState connectorState, final short partition) {
88 | String json = zkClient.readData(pathForState(partition), true);
89 | if (json == null) {
90 | return null;
91 | }
92 | try {
93 | JsonNode tree = MAPPER.readTree(json);
94 | return new StreamState(
95 | partition,
96 | tree.get("vbucketUUID").asLong(0),
97 | tree.get("sequenceNumber").asLong(0)
98 | );
99 | } catch (IOException ex) {
100 | LOGGER.warn("Error while decoding state", ex);
101 | return null;
102 | }
103 | }
104 |
105 | private String pathForState(final short partition) {
106 | return String.format("/couchbase-kafka-connector2/%s/%d", bucket, partition);
107 | }
108 |
109 | private void writeState(final StreamState streamState) {
110 | ObjectNode json = MAPPER.createObjectNode();
111 | json.put("vbucketUUID", streamState.vbucketUUID());
112 | json.put("sequenceNumber", streamState.sequenceNumber());
113 | zkClient.createPersistent(pathForState(streamState.partition()), true);
114 | zkClient.writeData(pathForState(streamState.partition()), json.toString());
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/src/test/java/com/couchbase/kafka/CouchbaseProducerTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.env.DefaultCoreEnvironment;
26 | import com.couchbase.client.java.AsyncCluster;
27 | import com.couchbase.client.java.Bucket;
28 | import com.couchbase.client.java.CouchbaseCluster;
29 | import com.couchbase.client.java.bucket.BucketManager;
30 | import com.couchbase.client.java.bucket.BucketType;
31 | import com.couchbase.client.java.cluster.ClusterManager;
32 | import com.couchbase.client.java.cluster.DefaultBucketSettings;
33 | import com.couchbase.client.java.document.Document;
34 | import com.couchbase.client.java.document.StringDocument;
35 | import kafka.admin.AdminUtils;
36 | import kafka.javaapi.producer.Producer;
37 | import kafka.producer.KeyedMessage;
38 | import kafka.producer.ProducerConfig;
39 | import kafka.server.KafkaConfig;
40 | import kafka.server.KafkaServer;
41 | import kafka.utils.MockTime;
42 | import kafka.utils.TestUtils;
43 | import kafka.utils.TestZKUtils;
44 | import kafka.utils.Time;
45 | import kafka.utils.ZKStringSerializer$;
46 | import kafka.zk.EmbeddedZookeeper;
47 | import org.I0Itec.zkclient.ZkClient;
48 | import org.junit.After;
49 | import org.junit.Before;
50 | import org.junit.Test;
51 |
52 | import java.util.ArrayList;
53 | import java.util.List;
54 | import java.util.Properties;
55 |
56 |
57 | /**
58 | * @author Sergey Avseyev
59 | */
60 | public class CouchbaseProducerTest {
61 | /*
62 | private int brokerId = 0;
63 | private String topicName = "test";
64 | private String bucketName = "kafka";
65 | private int brokerPort;
66 | private KafkaServer kafkaServer;
67 | private ZkClient zkClient;
68 | private EmbeddedZookeeper zkServer;
69 | private Bucket bucket;
70 | private KafkaEnvironment env;
71 |
72 | @Before
73 | public void setup() {
74 | // setup Zookeeper
75 | String zkConnect = TestZKUtils.zookeeperConnect();
76 | zkServer = new EmbeddedZookeeper(zkConnect);
77 | zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
78 |
79 | // setup Broker
80 | brokerPort = TestUtils.choosePort();
81 | Properties props = TestUtils.createBrokerConfig(brokerId, brokerPort, false);
82 |
83 | KafkaConfig config = new KafkaConfig(props);
84 | Time mock = new MockTime();
85 | kafkaServer = TestUtils.createServer(config, mock);
86 |
87 | // create topicName
88 | AdminUtils.createTopic(zkClient, topicName, 1, 1, new Properties());
89 |
90 | List servers = new ArrayList();
91 | servers.add(kafkaServer);
92 | TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topicName, 0, 5000);
93 |
94 | CouchbaseCluster cluster = CouchbaseCluster.create();
95 | ClusterManager clusterManager = cluster.clusterManager("Administrator", "password");
96 | boolean exists = clusterManager.hasBucket(bucketName);
97 |
98 | if (!exists) {
99 | clusterManager.insertBucket(DefaultBucketSettings
100 | .builder()
101 | .name(bucketName)
102 | .quota(256)
103 | .enableFlush(true)
104 | .type(BucketType.COUCHBASE)
105 | .build());
106 | }
107 |
108 | bucket = cluster.openBucket(bucketName, "");
109 | BucketManager bucketManager = bucket.bucketManager();
110 | bucketManager.flush();
111 |
112 | env = DefaultKafkaEnvironment.builder()
113 | .dcpEnabled(true)
114 | .build();
115 | }
116 |
117 | @After
118 | public void teardown() {
119 | kafkaServer.shutdown();
120 | zkClient.close();
121 | zkServer.shutdown();
122 | }
123 |
124 | @Test
125 | public void producerTest() throws InterruptedException {
126 | List couchbaseNodes = new ArrayList<>();
127 | couchbaseNodes.add("locahost:8091");
128 | CouchbaseProducer producer = new CouchbaseProducer(
129 | couchbaseNodes,
130 | "default",
131 | topicName,
132 | zkServer.connectString(),
133 | env
134 | );
135 |
136 | bucket.insert(StringDocument.create("foo", "bar"));
137 | producer.run();
138 | }
139 | */
140 | }
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # For Cygwin, ensure paths are in UNIX format before anything is touched.
46 | if $cygwin ; then
47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
48 | fi
49 |
50 | # Attempt to set APP_HOME
51 | # Resolve links: $0 may be a link
52 | PRG="$0"
53 | # Need this for relative symlinks.
54 | while [ -h "$PRG" ] ; do
55 | ls=`ls -ld "$PRG"`
56 | link=`expr "$ls" : '.*-> \(.*\)$'`
57 | if expr "$link" : '/.*' > /dev/null; then
58 | PRG="$link"
59 | else
60 | PRG=`dirname "$PRG"`"/$link"
61 | fi
62 | done
63 | SAVED="`pwd`"
64 | cd "`dirname \"$PRG\"`/" >&-
65 | APP_HOME="`pwd -P`"
66 | cd "$SAVED" >&-
67 |
68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
69 |
70 | # Determine the Java command to use to start the JVM.
71 | if [ -n "$JAVA_HOME" ] ; then
72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
73 | # IBM's JDK on AIX uses strange locations for the executables
74 | JAVACMD="$JAVA_HOME/jre/sh/java"
75 | else
76 | JAVACMD="$JAVA_HOME/bin/java"
77 | fi
78 | if [ ! -x "$JAVACMD" ] ; then
79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
80 |
81 | Please set the JAVA_HOME variable in your environment to match the
82 | location of your Java installation."
83 | fi
84 | else
85 | JAVACMD="java"
86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
87 |
88 | Please set the JAVA_HOME variable in your environment to match the
89 | location of your Java installation."
90 | fi
91 |
92 | # Increase the maximum file descriptors if we can.
93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
94 | MAX_FD_LIMIT=`ulimit -H -n`
95 | if [ $? -eq 0 ] ; then
96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
97 | MAX_FD="$MAX_FD_LIMIT"
98 | fi
99 | ulimit -n $MAX_FD
100 | if [ $? -ne 0 ] ; then
101 | warn "Could not set maximum file descriptor limit: $MAX_FD"
102 | fi
103 | else
104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
105 | fi
106 | fi
107 |
108 | # For Darwin, add options to specify how the application appears in the dock
109 | if $darwin; then
110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
111 | fi
112 |
113 | # For Cygwin, switch paths to Windows format before running java
114 | if $cygwin ; then
115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
158 | function splitJvmOpts() {
159 | JVM_OPTS=("$@")
160 | }
161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
163 |
164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
165 |
--------------------------------------------------------------------------------
/env/roles/kafka/templates/server.properties.j2:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # see kafka.server.KafkaConfig for additional details and defaults
16 |
17 | ############################# Server Basics #############################
18 |
19 | # The id of the broker. This must be set to a unique integer for each broker.
20 | broker.id=0
21 |
22 | ############################# Socket Server Settings #############################
23 |
24 | # The port the socket server listens on
25 | port=9092
26 |
27 | # Hostname the broker will bind to. If not set, the server will bind to all interfaces
28 | #host.name=localhost
29 |
30 | # Hostname the broker will advertise to producers and consumers. If not set, it uses the
31 | # value for "host.name" if configured. Otherwise, it will use the value returned from
32 | # java.net.InetAddress.getCanonicalHostName().
33 | #advertised.host.name=
34 |
35 | # The port to publish to ZooKeeper for clients to use. If this is not set,
36 | # it will publish the same port that the broker binds to.
37 | #advertised.port=
38 |
39 | # The number of threads handling network requests
40 | num.network.threads=2
41 |
42 | # The number of threads doing disk I/O
43 | num.io.threads=8
44 |
45 | # The send buffer (SO_SNDBUF) used by the socket server
46 | socket.send.buffer.bytes=1048576
47 |
48 | # The receive buffer (SO_RCVBUF) used by the socket server
49 | socket.receive.buffer.bytes=1048576
50 |
51 | # The maximum size of a request that the socket server will accept (protection against OOM)
52 | socket.request.max.bytes=104857600
53 |
54 |
55 | ############################# Log Basics #############################
56 |
57 | # A comma seperated list of directories under which to store log files
58 | log.dirs=/tmp/kafka-logs
59 |
60 | # The default number of log partitions per topic. More partitions allow greater
61 | # parallelism for consumption, but this will also result in more files across
62 | # the brokers.
63 | num.partitions=1
64 |
65 | ############################# Log Flush Policy #############################
66 |
67 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
68 | # the OS cache lazily. The following configurations control the flush of data to disk.
69 | # There are a few important trade-offs here:
70 | # 1. Durability: Unflushed data may be lost if you are not using replication.
71 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
72 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
73 | # The settings below allow one to configure the flush policy to flush data after a period of time or
74 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
75 |
76 | # The number of messages to accept before forcing a flush of data to disk
77 | #log.flush.interval.messages=10000
78 |
79 | # The maximum amount of time a message can sit in a log before we force a flush
80 | #log.flush.interval.ms=1000
81 |
82 | ############################# Log Retention Policy #############################
83 |
84 | # The following configurations control the disposal of log segments. The policy can
85 | # be set to delete segments after a period of time, or after a given size has accumulated.
86 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
87 | # from the end of the log.
88 |
89 | # The minimum age of a log file to be eligible for deletion
90 | log.retention.hours=168
91 |
92 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
93 | # segments don't drop below log.retention.bytes.
94 | #log.retention.bytes=1073741824
95 |
96 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
97 | log.segment.bytes=536870912
98 |
99 | # The interval at which log segments are checked to see if they can be deleted according
100 | # to the retention policies
101 | log.retention.check.interval.ms=60000
102 |
103 | # By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
104 | # If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
105 | log.cleaner.enable=false
106 |
107 | ############################# Zookeeper #############################
108 |
109 | # Zookeeper connection string (see zookeeper docs for details).
110 | # This is a comma separated host:port pairs, each corresponding to a zk
111 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
112 | # You can also append an optional chroot string to the urls to specify the
113 | # root directory for all kafka znodes.
114 | zookeeper.connect=localhost:2181
115 |
116 | # Timeout in ms for connecting to zookeeper
117 | zookeeper.connection.timeout.ms=1000000
118 | auto.create.topics.enable=true
119 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/AbstractConsumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import kafka.api.FetchRequest;
26 | import kafka.api.FetchRequestBuilder;
27 | import kafka.api.PartitionOffsetRequestInfo;
28 | import kafka.common.ErrorMapping;
29 | import kafka.common.TopicAndPartition;
30 | import kafka.javaapi.FetchResponse;
31 | import kafka.javaapi.OffsetResponse;
32 | import kafka.javaapi.PartitionMetadata;
33 | import kafka.javaapi.TopicMetadata;
34 | import kafka.javaapi.TopicMetadataRequest;
35 | import kafka.javaapi.consumer.SimpleConsumer;
36 | import kafka.message.MessageAndOffset;
37 | import org.slf4j.Logger;
38 | import org.slf4j.LoggerFactory;
39 |
40 | import java.nio.ByteBuffer;
41 | import java.util.ArrayList;
42 | import java.util.Arrays;
43 | import java.util.Collections;
44 | import java.util.HashMap;
45 | import java.util.List;
46 | import java.util.Map;
47 |
48 | /**
49 | * @author Sergey Avseyev
50 | */
51 | public abstract class AbstractConsumer {
52 | private static Logger LOGGER = LoggerFactory.getLogger(AbstractConsumer.class);
53 |
54 | private final List seedBrokers;
55 | private final int port;
56 | private List replicaBrokers = new ArrayList();
57 |
58 | public AbstractConsumer(String seedBroker, int port) {
59 | this(new String[]{seedBroker}, port);
60 | }
61 |
62 | public AbstractConsumer(String[] seedBrokers, int port) {
63 | this.replicaBrokers = new ArrayList();
64 | this.seedBrokers = Arrays.asList(seedBrokers);
65 | this.port = port;
66 | }
67 |
68 | public abstract void handleMessage(long offset, byte[] bytes);
69 |
70 | public void run(String topic, int partition) {
71 | // find the meta data about the topic and partition we are interested in
72 | PartitionMetadata metadata = findLeader(seedBrokers, port, topic, partition);
73 | if (metadata == null) {
74 | LOGGER.error("Can't find metadata for Topic and Partition");
75 | return;
76 | }
77 | if (metadata.leader() == null) {
78 | LOGGER.error("Can't find Leader for Topic and Partition");
79 | return;
80 | }
81 | String leadBroker = metadata.leader().host();
82 | String clientName = "Client_" + topic + "_" + partition;
83 |
84 | SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
85 |
86 | long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
87 |
88 | int numErrors = 0;
89 | while (true) {
90 | if (consumer == null) {
91 | consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
92 | }
93 | FetchRequest req = new FetchRequestBuilder()
94 | .clientId(clientName)
95 | .addFetch(topic, partition, readOffset, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
96 | .build();
97 | FetchResponse fetchResponse = consumer.fetch(req);
98 |
99 | if (fetchResponse.hasError()) {
100 | numErrors++;
101 | short code = fetchResponse.errorCode(topic, partition);
102 |
103 | LOGGER.error("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
104 | if (numErrors > 5) break;
105 | if (code == ErrorMapping.OffsetOutOfRangeCode()) {
106 | // We asked for an invalid offset. For simple case ask for the last element to reset
107 | readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
108 | continue;
109 | }
110 | consumer.close();
111 | consumer = null;
112 | leadBroker = findNewLeader(leadBroker, topic, partition, port);
113 | continue;
114 | }
115 | numErrors = 0;
116 |
117 | long numRead = 0;
118 | for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
119 | long currentOffset = messageAndOffset.offset();
120 | if (currentOffset < readOffset) {
121 | LOGGER.error("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
122 | continue;
123 | }
124 | readOffset = messageAndOffset.nextOffset();
125 | ByteBuffer payload = messageAndOffset.message().payload();
126 |
127 | byte[] bytes = new byte[payload.limit()];
128 | payload.get(bytes);
129 |
130 | handleMessage(messageAndOffset.offset(), bytes);
131 | numRead++;
132 | }
133 |
134 | if (numRead == 0) {
135 | try {
136 | Thread.sleep(1000);
137 | } catch (InterruptedException e) {
138 | LOGGER.error("Unable to sleep", e);
139 | }
140 | }
141 | }
142 | consumer.close();
143 | }
144 |
145 | private long getLastOffset(SimpleConsumer consumer, String topic, int partition,
146 | long whichTime, String clientName) {
147 | TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
148 |
149 | Map requestInfo = new HashMap();
150 | requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
151 | kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
152 | requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
153 | OffsetResponse response = consumer.getOffsetsBefore(request);
154 |
155 | if (response.hasError()) {
156 | LOGGER.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
157 | return 0;
158 | }
159 | long[] offsets = response.offsets(topic, partition);
160 | return offsets[0];
161 | }
162 |
163 | private String findNewLeader(String oldLeader, String topic, int partition, int port) {
164 | for (int i = 0; i < 3; i++) {
165 | PartitionMetadata metadata = findLeader(replicaBrokers, port, topic, partition);
166 | if (metadata == null
167 | || metadata.leader() == null
168 | || oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
169 | // first time through if the leader hasn't changed give ZooKeeper a second to recover
170 | // second time, assume the broker did recover before failover, or it was a non-Broker issue
171 | try {
172 | Thread.sleep(1000);
173 | } catch (InterruptedException e) {
174 | LOGGER.error("Unable to sleep", e);
175 | }
176 | } else {
177 | return metadata.leader().host();
178 | }
179 | }
180 | throw new IllegalStateException("Unable to find new leader after Broker failure");
181 | }
182 |
183 |
184 | private PartitionMetadata findLeader(List seedBrokers, int port, String topic, int partition) {
185 | PartitionMetadata returnMetaData = null;
186 | loop:
187 | for (String seed : seedBrokers) {
188 | SimpleConsumer consumer = null;
189 | try {
190 | consumer = new SimpleConsumer(seed, port, 100000, 64 * 1024, "leaderLookup");
191 | List topics = Collections.singletonList(topic);
192 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
193 | kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
194 |
195 | List metaData = resp.topicsMetadata();
196 | for (TopicMetadata item : metaData) {
197 | for (PartitionMetadata part : item.partitionsMetadata()) {
198 | if (part.partitionId() == partition) {
199 | returnMetaData = part;
200 | break loop;
201 | }
202 | }
203 | }
204 | } catch (Exception e) {
205 | LOGGER.error("Error communicating with Broker [" + seed + "] to find Leader for [" + topic
206 | + ", " + partition + "] Reason: " + e);
207 | } finally {
208 | if (consumer != null) consumer.close();
209 | }
210 | }
211 | if (returnMetaData != null) {
212 | replicaBrokers.clear();
213 | for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
214 | replicaBrokers.add(replica.host());
215 | }
216 | }
217 | return returnMetaData;
218 | }
219 |
220 | }
221 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/CouchbaseReader.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.ClusterFacade;
26 | import com.couchbase.client.core.endpoint.dcp.DCPConnection;
27 | import com.couchbase.client.core.logging.CouchbaseLogger;
28 | import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
29 | import com.couchbase.client.core.message.CouchbaseMessage;
30 | import com.couchbase.client.core.message.ResponseStatus;
31 | import com.couchbase.client.core.message.cluster.OpenBucketRequest;
32 | import com.couchbase.client.core.message.cluster.OpenBucketResponse;
33 | import com.couchbase.client.core.message.cluster.SeedNodesRequest;
34 | import com.couchbase.client.core.message.cluster.SeedNodesResponse;
35 | import com.couchbase.client.core.message.dcp.DCPRequest;
36 | import com.couchbase.client.core.message.dcp.MutationMessage;
37 | import com.couchbase.client.core.message.dcp.OpenConnectionRequest;
38 | import com.couchbase.client.core.message.dcp.OpenConnectionResponse;
39 | import com.couchbase.client.core.message.dcp.RemoveMessage;
40 | import com.couchbase.client.core.message.dcp.SnapshotMarkerMessage;
41 | import com.couchbase.client.core.message.kv.MutationToken;
42 | import com.couchbase.client.deps.com.lmax.disruptor.EventTranslatorTwoArg;
43 | import com.couchbase.client.deps.com.lmax.disruptor.RingBuffer;
44 | import com.couchbase.kafka.state.ConnectorState;
45 | import com.couchbase.kafka.state.StateSerializer;
46 | import com.couchbase.kafka.state.StreamState;
47 | import com.couchbase.kafka.state.StreamStateUpdatedEvent;
48 | import rx.Observable;
49 | import rx.functions.Action1;
50 | import rx.functions.Action2;
51 | import rx.functions.Func0;
52 | import rx.functions.Func1;
53 |
54 | import java.util.Arrays;
55 | import java.util.List;
56 | import java.util.concurrent.TimeUnit;
57 |
58 | /**
59 | * {@link CouchbaseReader} is in charge of accepting events from Couchbase.
60 | *
61 | * @author Sergey Avseyev
62 | */
63 | public class CouchbaseReader {
64 | private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(CouchbaseReader.class);
65 |
66 | private static final EventTranslatorTwoArg TRANSLATOR =
67 | new EventTranslatorTwoArg() {
68 | @Override
69 | public void translateTo(final DCPEvent event, final long sequence,
70 | final DCPConnection connection, final CouchbaseMessage message) {
71 | event.setMessage(message);
72 | event.setConnection(connection);
73 | }
74 | };
75 |
76 | private final ClusterFacade core;
77 | private final RingBuffer dcpRingBuffer;
78 | private final List nodes;
79 | private final String bucket;
80 | private final String password;
81 | private final StateSerializer stateSerializer;
82 | private final String connectionName;
83 | private final CouchbaseKafkaEnvironment environment;
84 | private DCPConnection connection;
85 |
86 | /**
87 | * Creates a new {@link CouchbaseReader}.
88 | *
89 | * @param core the core reference.
90 | * @param environment the environment object, which carries settings.
91 | * @param dcpRingBuffer the buffer where to publish new events.
92 | * @param stateSerializer the object to serialize the state of DCP streams.
93 | */
94 | public CouchbaseReader(final ClusterFacade core, final CouchbaseKafkaEnvironment environment,
95 | final RingBuffer dcpRingBuffer, final StateSerializer stateSerializer) {
96 | this(environment.couchbaseNodes(), environment.couchbaseBucket(), environment.couchbasePassword(),
97 | core, environment, dcpRingBuffer, stateSerializer);
98 | }
99 |
100 | /**
101 | * Creates a new {@link CouchbaseReader}.
102 | *
103 | * @param couchbaseNodes list of the Couchbase nodes to override {@link CouchbaseKafkaEnvironment#couchbaseNodes()}
104 | * @param couchbaseBucket bucket name to override {@link CouchbaseKafkaEnvironment#couchbaseBucket()}
105 | * @param couchbasePassword password to override {@link CouchbaseKafkaEnvironment#couchbasePassword()}
106 | * @param core the core reference.
107 | * @param environment the environment object, which carries settings.
108 | * @param dcpRingBuffer the buffer where to publish new events.
109 | * @param stateSerializer the object to serialize the state of DCP streams.
110 | */
111 | public CouchbaseReader(final List couchbaseNodes, final String couchbaseBucket, final String couchbasePassword,
112 | final ClusterFacade core, final CouchbaseKafkaEnvironment environment,
113 | final RingBuffer dcpRingBuffer, final StateSerializer stateSerializer) {
114 | this.core = core;
115 | this.dcpRingBuffer = dcpRingBuffer;
116 | this.nodes = couchbaseNodes;
117 | this.bucket = couchbaseBucket;
118 | this.password = couchbasePassword;
119 | this.stateSerializer = stateSerializer;
120 | this.connectionName = "CouchbaseKafka(" + this.hashCode() + ")";
121 | this.environment = environment;
122 | }
123 |
124 | /**
125 | * Performs connection with default timeout.
126 | */
127 | public void connect() {
128 | connect(environment.connectTimeout(), TimeUnit.SECONDS);
129 | }
130 |
131 | /**
132 | * Performs connection with arbitrary timeout
133 | *
134 | * @param timeout the custom timeout.
135 | * @param timeUnit the unit for the timeout.
136 | */
137 | public void connect(final long timeout, final TimeUnit timeUnit) {
138 | OpenConnectionResponse response = core
139 | .send(new SeedNodesRequest(nodes))
140 | .flatMap(new Func1>() {
141 | @Override
142 | public Observable call(SeedNodesResponse response) {
143 | return core.send(new OpenBucketRequest(bucket, password));
144 | }
145 | })
146 | .flatMap(new Func1>() {
147 | @Override
148 | public Observable call(OpenBucketResponse response) {
149 | return core.send(new OpenConnectionRequest(connectionName, bucket));
150 | }
151 | })
152 | .timeout(timeout, timeUnit)
153 | .toBlocking()
154 | .single();
155 | this.connection = response.connection();
156 | }
157 |
158 |
159 | /**
160 | * Returns current state of the cluster.
161 | *
162 | * @return and object, which contains current sequence number for each partition on the cluster.
163 | */
164 | public ConnectorState currentState() {
165 | return connection.getCurrentState()
166 | .collect(new Func0() {
167 | @Override
168 | public ConnectorState call() {
169 | return new ConnectorState();
170 | }
171 | }, new Action2() {
172 | @Override
173 | public void call(ConnectorState connectorState, MutationToken token) {
174 | connectorState.put(new StreamState(token));
175 | }
176 | })
177 | .toBlocking().single();
178 | }
179 |
180 | /**
181 | * Executes worker reading loop, which relays events from Couchbase to Kafka.
182 | *
183 | * @param fromState initial state for the streams
184 | * @param toState target state for the streams
185 | */
186 | public void run(final ConnectorState fromState, final ConnectorState toState) {
187 | if (!Arrays.equals(fromState.partitions(), toState.partitions())) {
188 | throw new IllegalArgumentException("partitions in FROM state do not match partitions in TO state");
189 | }
190 |
191 | final ConnectorState connectorState = fromState.clone();
192 | connectorState.updates().subscribe(
193 | new Action1() {
194 | @Override
195 | public void call(StreamStateUpdatedEvent event) {
196 | stateSerializer.dump(event.connectorState(), event.partition());
197 | }
198 | });
199 |
200 | Observable.from(fromState)
201 | .flatMap(new Func1>() {
202 | @Override
203 | public Observable call(StreamState begin) {
204 | StreamState end = toState.get(begin.partition());
205 | return connection.addStream(begin.partition(), begin.vbucketUUID(),
206 | begin.sequenceNumber(), end.sequenceNumber(),
207 | begin.sequenceNumber(), end.sequenceNumber());
208 | }
209 | })
210 | .toList()
211 | .flatMap(new Func1, Observable>() {
212 | @Override
213 | public Observable call(List statuses) {
214 | return connection.subject();
215 | }
216 | })
217 | .onBackpressureBuffer(environment.kafkaEventBufferSize())
218 | .toBlocking()
219 | .forEach(new Action1() {
220 | @Override
221 | public void call(final DCPRequest dcpRequest) {
222 | if (dcpRequest instanceof SnapshotMarkerMessage) {
223 | SnapshotMarkerMessage snapshotMarker = (SnapshotMarkerMessage) dcpRequest;
224 | connectorState.update(snapshotMarker.partition(), snapshotMarker.endSequenceNumber());
225 | } else if (dcpRequest instanceof RemoveMessage) {
226 | RemoveMessage msg = (RemoveMessage) dcpRequest;
227 | connectorState.update(msg.partition(), msg.bySequenceNumber());
228 | } else if (dcpRequest instanceof MutationMessage) {
229 | MutationMessage msg = (MutationMessage) dcpRequest;
230 | connectorState.update(msg.partition(), msg.bySequenceNumber());
231 | }
232 | dcpRingBuffer.publishEvent(TRANSLATOR, connection, dcpRequest);
233 | }
234 | });
235 | }
236 | }
237 |
--------------------------------------------------------------------------------
/.idea/modules/couchbase-kafka-connector.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
31 |
32 |
33 |
202 |
203 |
204 |
215 |
216 |
217 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/DefaultCouchbaseKafkaEnvironment.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.env.DefaultCoreEnvironment;
26 | import com.couchbase.client.core.logging.CouchbaseLogger;
27 | import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
28 |
29 | import java.util.Arrays;
30 | import java.util.Collections;
31 | import java.util.List;
32 | import java.util.Properties;
33 | import java.util.concurrent.TimeUnit;
34 |
35 | /**
36 | * @author Sergey Avseyev
37 | */
38 | public class DefaultCouchbaseKafkaEnvironment extends DefaultCoreEnvironment implements CouchbaseKafkaEnvironment {
39 | private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(CouchbaseKafkaEnvironment.class);
40 |
41 | private static final String KAFKA_KEY_SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
42 | private static final String KAFKA_VALUE_SERIALIZER_CLASS = "com.couchbase.kafka.coder.JsonEncoder";
43 | private static final String KAFKA_FILTER_CLASS = "com.couchbase.kafka.filter.MutationsFilter";
44 | private static final int KAFKA_EVENT_BUFFER_SIZE = 16384;
45 | private static final String KAFKA_ZOOKEEPER_ADDRESS = "127.0.0.1:2181";
46 | private static final String KAFKA_TOPIC = "default";
47 | private static final String COUCHBASE_STATE_SERIALIZER_CLASS = "com.couchbase.kafka.state.ZookeeperStateSerializer";
48 | private static final long COUCHBASE_STATE_SERIALIZATION_THRESHOLD = 2;
49 | private static final String COUCHBASE_BUCKET = "default";
50 | private static final String COUCHBASE_PASSWORD = "";
51 | private static final String COUCHBASE_NODE = "127.0.0.1";
52 | private static final long CONNECT_TIMEOUT = TimeUnit.SECONDS.toMillis(5);
53 |
54 | private final String kafkaKeySerializerClass;
55 | private final String kafkaFilterClass;
56 | private final String kafkaValueSerializerClass;
57 | private final int kafkaEventBufferSize;
58 | private final String kafkaTopic;
59 | private final String kafkaZookeeperAddress;
60 | private final String couchbaseStateSerializerClass;
61 | private final long couchbaseStateSerializationThreshold;
62 | private final String couchbasePassword;
63 | private final String couchbaseBucket;
64 | private final List couchbaseNodes;
65 | private final long connectTimeout;
66 |
67 |
68 | public static String SDK_PACKAGE_NAME_AND_VERSION = "couchbase-kafka-connector";
69 | private static final String VERSION_PROPERTIES = "com.couchbase.kafka.properties";
70 |
71 | /**
72 | * Sets up the package version and user agent.
73 | *
74 | * Note that because the class loader loads classes on demand, one class from the package
75 | * is loaded upfront.
76 | */
77 | static {
78 | try {
79 | Class connectorClass = CouchbaseKafkaConnector.class;
80 | if (connectorClass == null) {
81 | throw new IllegalStateException("Could not locate CouchbaseKafkaConnector");
82 | }
83 |
84 | String version = null;
85 | String gitVersion = null;
86 | try {
87 | Properties versionProp = new Properties();
88 | versionProp.load(DefaultCoreEnvironment.class.getClassLoader().getResourceAsStream(VERSION_PROPERTIES));
89 | version = versionProp.getProperty("specificationVersion");
90 | gitVersion = versionProp.getProperty("implementationVersion");
91 | } catch (Exception e) {
92 | LOGGER.info("Could not retrieve version properties, defaulting.", e);
93 | }
94 | SDK_PACKAGE_NAME_AND_VERSION = String.format("couchbase-kafka-connector/%s (git: %s)",
95 | version == null ? "unknown" : version, gitVersion == null ? "unknown" : gitVersion);
96 |
97 | // this will overwrite the USER_AGENT in Core
98 | // making core send user_agent with kafka connector version information
99 | USER_AGENT = String.format("%s (%s/%s %s; %s %s)",
100 | SDK_PACKAGE_NAME_AND_VERSION,
101 | System.getProperty("os.name"),
102 | System.getProperty("os.version"),
103 | System.getProperty("os.arch"),
104 | System.getProperty("java.vm.name"),
105 | System.getProperty("java.runtime.version")
106 | );
107 | } catch (Exception ex) {
108 | LOGGER.info("Could not set up user agent and packages, defaulting.", ex);
109 | }
110 | }
111 |
112 | /**
113 | * Creates a {@link CouchbaseKafkaEnvironment} with default settings applied.
114 | *
115 | * @return a {@link DefaultCouchbaseKafkaEnvironment} with default settings.
116 | */
117 | public static DefaultCouchbaseKafkaEnvironment create() {
118 | return new DefaultCouchbaseKafkaEnvironment(builder());
119 | }
120 |
121 | /**
122 | * Returns the {@link Builder} to customize environment settings.
123 | *
124 | * @return the {@link Builder}.
125 | */
126 | public static Builder builder() {
127 | return new Builder();
128 | }
129 |
130 | protected DefaultCouchbaseKafkaEnvironment(final Builder builder) {
131 | super(builder);
132 |
133 | if (!dcpEnabled()) {
134 | throw new IllegalStateException("Kafka integration cannot work without DCP enabled.");
135 | }
136 |
137 | kafkaKeySerializerClass = stringPropertyOr("kafka.keySerializerClass", builder.kafkaKeySerializerClass);
138 | kafkaValueSerializerClass = stringPropertyOr("kafka.valueSerializerClass", builder.kafkaValueSerializerClass);
139 | kafkaFilterClass = stringPropertyOr("kafka.filterClass", builder.kafkaFilterClass);
140 | kafkaEventBufferSize = intPropertyOr("kafka.eventBufferSize", builder.kafkaEventBufferSize);
141 | kafkaTopic = stringPropertyOr("kafka.topic", builder.kafkaTopic);
142 | kafkaZookeeperAddress = stringPropertyOr("kafka.zookeeperAddress", builder.kafkaZookeeperAddress);
143 | couchbaseStateSerializerClass = stringPropertyOr("couchbaseStateSerializerClass", builder.couchbaseStateSerializerClass);
144 | couchbaseStateSerializationThreshold = longPropertyOr("couchbaseStateSerializationThreshold", builder.couchbaseStateSerializationThreshold);
145 | couchbaseNodes = stringListPropertyOr("couchbase.nodes", builder.couchbaseNodes);
146 | couchbaseBucket = stringPropertyOr("couchbase.bucket", builder.couchbaseBucket);
147 | couchbasePassword = stringPropertyOr("couchbase.password", builder.couchbasePassword);
148 | connectTimeout = longPropertyOr("connectTimeout", builder.connectTimeout);
149 | }
150 |
151 | @Override
152 | public String kafkaValueSerializerClass() {
153 | return kafkaValueSerializerClass;
154 | }
155 |
156 | @Override
157 | public String kafkaKeySerializerClass() {
158 | return kafkaKeySerializerClass;
159 | }
160 |
161 | @Override
162 | public String kafkaFilterClass() {
163 | return kafkaFilterClass;
164 | }
165 |
166 | @Override
167 | public int kafkaEventBufferSize() {
168 | return kafkaEventBufferSize;
169 | }
170 |
171 | @Override
172 | public String kafkaZookeeperAddress() {
173 | return kafkaZookeeperAddress;
174 | }
175 |
176 | @Override
177 | public String kafkaTopic() {
178 | return kafkaTopic;
179 | }
180 |
181 | @Override
182 | public long connectTimeout() {
183 | return connectTimeout;
184 | }
185 |
186 | @Override
187 | public long couchbaseStateSerializationThreshold() {
188 | return couchbaseStateSerializationThreshold;
189 | }
190 |
191 | @Override
192 | public String couchbaseStateSerializerClass() {
193 | return couchbaseStateSerializerClass;
194 | }
195 |
196 | @Override
197 | public List couchbaseNodes() {
198 | return couchbaseNodes;
199 | }
200 |
201 | @Override
202 | public String couchbaseBucket() {
203 | return couchbaseBucket;
204 | }
205 |
206 | @Override
207 | public String couchbasePassword() {
208 | return couchbasePassword;
209 | }
210 |
211 | private List stringListPropertyOr(String path, List def) {
212 | String found = stringPropertyOr(path, null);
213 | if (found == null) {
214 | return def;
215 | } else {
216 | return Arrays.asList(found.split(";"));
217 | }
218 | }
219 |
220 | @Override
221 | protected StringBuilder dumpParameters(StringBuilder sb) {
222 | //first dump core's parameters
223 | super.dumpParameters(sb);
224 | //dump kafka-connector specific parameters
225 | sb.append(", kafkaKeySerializerClass=").append(this.kafkaKeySerializerClass);
226 | sb.append(", kafkaFilterClass=").append(this.kafkaFilterClass);
227 | sb.append(", kafkaValueSerializerClass=").append(this.kafkaValueSerializerClass);
228 | sb.append(", kafkaEventBufferSize=").append(this.kafkaEventBufferSize);
229 | sb.append(", kafkaTopic=").append(this.kafkaTopic);
230 | sb.append(", kafkaZookeeperAddress=").append(this.kafkaZookeeperAddress);
231 | sb.append(", couchbaseStateSerializerClass=").append(this.couchbaseStateSerializerClass);
232 | sb.append(", couchbaseStateSerializationThreshold=").append(this.couchbaseStateSerializationThreshold);
233 | sb.append(", couchbaseBucket=").append(this.couchbaseBucket);
234 | StringBuilder nodes = new StringBuilder();
235 | for (String node:this.couchbaseNodes) {
236 | if (nodes.length() == 0) {
237 | nodes.append(node);
238 | } else {
239 | nodes.append("," + node);
240 | }
241 | }
242 | sb.append(", couchbaseNodes=").append(nodes.toString());
243 | return sb;
244 | }
245 |
246 | @Override
247 | public String toString() {
248 | StringBuilder sb = new StringBuilder("CouchbaseKafkaEnvironment: {");
249 | this.dumpParameters(sb).append('}');
250 | return sb.toString();
251 | }
252 |
253 | public static class Builder extends DefaultCoreEnvironment.Builder {
254 | private String kafkaKeySerializerClass = KAFKA_KEY_SERIALIZER_CLASS;
255 | private String kafkaValueSerializerClass = KAFKA_VALUE_SERIALIZER_CLASS;
256 | private int kafkaEventBufferSize = KAFKA_EVENT_BUFFER_SIZE;
257 | private String kafkaFilterClass = KAFKA_FILTER_CLASS;
258 | private String kafkaTopic = KAFKA_TOPIC;
259 | private String kafkaZookeeperAddress = KAFKA_ZOOKEEPER_ADDRESS;
260 | private String couchbaseStateSerializerClass = COUCHBASE_STATE_SERIALIZER_CLASS;
261 | private List couchbaseNodes;
262 | private String couchbaseBucket = COUCHBASE_BUCKET;
263 | private String couchbasePassword = COUCHBASE_PASSWORD;
264 | private long couchbaseStateSerializationThreshold = COUCHBASE_STATE_SERIALIZATION_THRESHOLD;
265 | private long connectTimeout = CONNECT_TIMEOUT;
266 |
267 | public Builder() {
268 | couchbaseNodes = Collections.singletonList(COUCHBASE_NODE);
269 | }
270 |
271 | public Builder kafkaValueSerializerClass(final String className) {
272 | this.kafkaValueSerializerClass = className;
273 | return this;
274 | }
275 |
276 | public Builder kafkaKeySerializerClass(final String className) {
277 | this.kafkaKeySerializerClass = className;
278 | return this;
279 | }
280 |
281 | public Builder kafkaFilterClass(final String className) {
282 | this.kafkaFilterClass = className;
283 | return this;
284 | }
285 |
286 | public Builder kafkaEventBufferSize(final int eventBufferSize) {
287 | this.kafkaEventBufferSize = eventBufferSize;
288 | return this;
289 | }
290 |
291 | public Builder kafkaTopic(final String kafkaTopic) {
292 | this.kafkaTopic = kafkaTopic;
293 | return this;
294 | }
295 |
296 | public Builder kafkaZookeeperAddress(final String kafkaZookeeperAddress) {
297 | this.kafkaZookeeperAddress = kafkaZookeeperAddress;
298 | return this;
299 | }
300 |
301 | public Builder couchbaseStateSerializerClass(final String couchbaseStateSerializerClass) {
302 | this.couchbaseStateSerializerClass = couchbaseStateSerializerClass;
303 | return this;
304 | }
305 |
306 | public Builder couchbaseStateSerializationThreshold(final long couchbaseStateSerializationThreshold) {
307 | this.couchbaseStateSerializationThreshold = couchbaseStateSerializationThreshold;
308 | return this;
309 | }
310 |
311 | public Builder couchbaseNodes(final List couchbaseNodes) {
312 | this.couchbaseNodes = couchbaseNodes;
313 | return this;
314 | }
315 |
316 | public Builder couchbaseNodes(final String couchbaseNode) {
317 | this.couchbaseNodes(Collections.singletonList(couchbaseNode));
318 | return this;
319 | }
320 |
321 | public Builder couchbaseBucket(final String couchbaseBucket) {
322 | this.couchbaseBucket = couchbaseBucket;
323 | return this;
324 | }
325 |
326 | public Builder couchbasePassword(final String couchbasePassword) {
327 | this.couchbasePassword = couchbasePassword;
328 | return this;
329 | }
330 |
331 | /**
332 | * The default timeout for connect operations, set to {@link DefaultCouchbaseKafkaEnvironment#CONNECT_TIMEOUT}.
333 | *
334 | * @return the default connect timeout.
335 | */
336 | public Builder connectTimeout(final long connectTimeout) {
337 | this.connectTimeout = connectTimeout;
338 | return this;
339 | }
340 |
341 | @Override
342 | public DefaultCouchbaseKafkaEnvironment build() {
343 | return new DefaultCouchbaseKafkaEnvironment(this);
344 | }
345 | }
346 | }
347 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/CouchbaseKafkaConnector.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.ClusterFacade;
26 | import com.couchbase.client.core.CouchbaseCore;
27 | import com.couchbase.client.core.logging.CouchbaseLogger;
28 | import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
29 | import com.couchbase.client.deps.com.lmax.disruptor.ExceptionHandler;
30 | import com.couchbase.client.deps.com.lmax.disruptor.RingBuffer;
31 | import com.couchbase.client.deps.com.lmax.disruptor.dsl.Disruptor;
32 | import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory;
33 | import com.couchbase.kafka.filter.Filter;
34 | import com.couchbase.kafka.state.ConnectorState;
35 | import com.couchbase.kafka.state.StateSerializer;
36 | import com.couchbase.kafka.state.StreamState;
37 | import kafka.cluster.Broker;
38 | import kafka.javaapi.producer.Producer;
39 | import kafka.producer.ProducerConfig;
40 | import kafka.utils.ZKStringSerializer$;
41 | import kafka.utils.ZkUtils;
42 | import org.I0Itec.zkclient.ZkClient;
43 | import scala.collection.Iterator;
44 |
45 | import java.lang.reflect.InvocationTargetException;
46 | import java.util.ArrayList;
47 | import java.util.Collections;
48 | import java.util.List;
49 | import java.util.Properties;
50 | import java.util.concurrent.ExecutorService;
51 | import java.util.concurrent.Executors;
52 |
53 | /**
54 | * {@link CouchbaseKafkaConnector} is an entry point of the library. It sets up connections with both Couchbase and
55 | * Kafka clusters. And carries all events from Couchbase to Kafka.
56 | *
57 | * The example below will transfer all mutations from Couchbase bucket "my-bucket" as JSON to Kafka topic "my-topic".
58 | *