8 |
9 |
--------------------------------------------------------------------------------
/env/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 |
--------------------------------------------------------------------------------
/env/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | kafka_count = [ENV['KAFKA_NODES'].to_i, 1].max
5 | couchbase_count = [ENV['COUCHBASE_NODES'].to_i, 1].max
6 |
7 | Vagrant.configure(2) do |config|
8 | kafka_nodes = (1..kafka_count).map do |idx|
9 | {group: 'kafka', name: "kafka#{idx}.vagrant", address: "192.168.3.#{80 + idx}"}
10 | end
11 | couchbase_nodes = (1..couchbase_count).map do |idx|
12 | {group: 'couchbase', name: "couchbase#{idx}.vagrant", address: "192.168.4.#{80 + idx}"}
13 | end
14 | hosts = kafka_nodes + couchbase_nodes
15 | hosts.each do |host|
16 | config.vm.define(host[:name]) do |node|
17 | node.vm.box = 'http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7.box'
18 | node.vm.network :private_network, ip: host[:address]
19 | node.vm.hostname = host[:name]
20 | node.vm.provision 'ansible' do |ansible|
21 | ansible.playbook = 'vagrant.yml'
22 | ansible.extra_vars = {
23 | hosts: hosts,
24 | kafka_hosts: kafka_nodes,
25 | couchbase_hosts: couchbase_nodes}
26 | ansible.groups = {
27 | 'kafka' => kafka_nodes.map { |n| n[:name] },
28 | 'couchbase' => couchbase_nodes.map { |n| n[:name] },
29 | 'couchbase_primary' => [couchbase_nodes.last[:name]]
30 | }
31 | end
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/env/roles/couchbase/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: download couchbase server
2 | get_url: url=http://packages.couchbase.com/releases/4.0.0/couchbase-server-enterprise-4.0.0-centos7.x86_64.rpm
3 | dest=/home/vagrant/couchbase-server.rpm
4 |
5 | - name: install couchbase server
6 | sudo: yes
7 | yum: name=/home/vagrant/couchbase-server.rpm state=present
8 |
9 | - name: start service
10 | service: name=couchbase-server state=started
11 |
--------------------------------------------------------------------------------
/env/roles/couchbase_cluster/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install epel
2 | sudo: yes
3 | yum: name=epel-release state=installed
4 |
5 | - name: install python-httplib2
6 | sudo: yes
7 | yum: name=python-httplib2 state=installed
8 |
9 | - name: check primary node state
10 | uri: url=http://{{ cluster }}/pools user={{ username }} password={{ password }}
11 | register: result
12 |
13 | - name: initialize primary node
14 | shell: /opt/couchbase/bin/couchbase-cli node-init -c {{ cluster }} -u {{ username }} -p {{ password }} --node-init-hostname={{ primary_host }}
15 | when: result.json.pools|count == 0
16 |
17 | - name: initialize cluster
18 | shell: /opt/couchbase/bin/couchbase-cli cluster-init -c {{ cluster }} -u {{ username }} -p {{ password }} --cluster-init-username={{ username }} --cluster-init-password={{ password }} --cluster-init-port={{ port }} --cluster-init-ramsize={{ ram }}
19 | when: result.json.pools|count == 0
20 |
21 | - name: check primary node state
22 | uri: url=http://{{ couchbase_hosts[0].name }}:{{ port }}/pools user={{ username }} password={{ password }}
23 | register: result
24 |
25 | - name: join additional nodes
26 | shell: /opt/couchbase/bin/couchbase-cli server-add -c {{ cluster }} -u {{ username }} -p {{ password }} --server-add={{ item.name }}:{{ port }} --server-add-username={{ username }} --server-add-password={{ password }}
27 | with_items: couchbase_hosts[:-1]
28 | when: result.json.pools|count == 0
29 |
30 | - name: rebalance cluster
31 | shell: /opt/couchbase/bin/couchbase-cli rebalance -c {{ cluster }} -u {{ username }} -p {{ password }}
32 | when: result.json.pools|count == 0
33 |
34 | - name: list buckets
35 | shell: /opt/couchbase/bin/couchbase-cli bucket-list -c {{ cluster }} -u {{ username }} -p {{ password }}
36 | register: result
37 |
38 | - name: create bucket '{{ bucket_name }}'
39 | shell: /opt/couchbase/bin/couchbase-cli bucket-create -c {{ cluster }} -u {{ username }} -p {{ password }} --bucket={{ bucket_name }} --bucket-type=couchbase --bucket-ramsize={{ bucket_size }} --bucket-replica={{ bucket_replica }}
40 | when: not bucket_name in result.stdout_lines
41 |
--------------------------------------------------------------------------------
/env/roles/couchbase_cluster/vars/main.yml:
--------------------------------------------------------------------------------
1 | port: 8091
2 | primary_host: "{{ hostvars[groups['couchbase_primary'][0]]['inventory_hostname'] }}"
3 | cluster: "{{ hostvars[groups['couchbase_primary'][0]]['inventory_hostname'] }}:8091"
4 | username: Administrator
5 | password: password
6 | ram: 300
7 | bucket_name: default
8 | bucket_size: 100
9 | bucket_replica: 1
10 |
--------------------------------------------------------------------------------
/env/roles/firewalld/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: make sure firewalld stopped and disabled
2 | sudo: yes
3 | service: name=firewalld state=stopped enabled=false
4 |
--------------------------------------------------------------------------------
/env/roles/hosts/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: build hosts file
2 | sudo: true
3 | lineinfile: dest=/etc/hosts regexp='.*{{ item.name }}$'
4 | line="{{ item.address }} {{ item.name }}"
5 | state=present
6 | with_items: hosts
7 |
--------------------------------------------------------------------------------
/env/roles/java/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install jre 1.7
2 | sudo: yes
3 | yum: name=java-1.7.0-openjdk
4 | state=latest
5 |
--------------------------------------------------------------------------------
/env/roles/kafka/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: download kafka 0.8.1.1
2 | get_url: url=http://ftp.byfly.by/pub/apache.org/kafka/0.8.1.1/kafka_2.10-0.8.1.1.tgz
3 | dest=/home/vagrant/kafka_2.10-0.8.1.1.tgz
4 |
5 | - name: uncompress kafka 0.8.1.1
6 | sudo: yes
7 | unarchive: src=/home/vagrant/kafka_2.10-0.8.1.1.tgz
8 | dest=/opt copy=no
9 |
10 | - name: make symlink /opt/kafka
11 | sudo: yes
12 | file: src=/opt/kafka_2.10-0.8.1.1
13 | dest=/opt/kafka
14 | state=link
15 |
16 | - name: install zookeeper service
17 | sudo: yes
18 | action: template src=zookeeper.service.j2
19 | dest=/usr/lib/systemd/system/zookeeper.service
20 | register: zookeeper
21 |
22 | - name: install kafka service
23 | sudo: yes
24 | action: template src=kafka.service.j2
25 | dest=/usr/lib/systemd/system/kafka.service
26 | register: kafka
27 |
28 | - name: enable automatic topic creation
29 | sudo: yes
30 | template: src=server.properties.j2 dest=/opt/kafka/config/server.properties
31 | register: kafka_config
32 |
33 | - name: reload daemon information
34 | sudo: yes
35 | command: systemctl daemon-reload
36 | when: zookeeper|changed or kafka|changed or kafka_config|changed
37 |
38 | - name: restart zookeeper daemon
39 | sudo: yes
40 | service: name=zookeeper state=restarted enabled=true
41 | when: zookeeper|changed
42 |
43 | - name: restart kafka daemon
44 | sudo: yes
45 | service: name=kafka state=restarted enabled=true
46 | when: zookeeper|changed or kafka|changed
47 |
--------------------------------------------------------------------------------
/env/roles/kafka/templates/kafka.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka service
3 | Requires=zookeeper.service
4 | After=zookeeper.service
5 |
6 | [Service]
7 | Type=forking
8 | User=root
9 | ExecStart=/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
10 | ExecStop=/opt/kafka/bin/kafka-server-stop.sh
11 | TimeoutSec=10
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/env/roles/kafka/templates/server.properties.j2:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # see kafka.server.KafkaConfig for additional details and defaults
16 |
17 | ############################# Server Basics #############################
18 |
19 | # The id of the broker. This must be set to a unique integer for each broker.
20 | broker.id=0
21 |
22 | ############################# Socket Server Settings #############################
23 |
24 | # The port the socket server listens on
25 | port=9092
26 |
27 | # Hostname the broker will bind to. If not set, the server will bind to all interfaces
28 | #host.name=localhost
29 |
30 | # Hostname the broker will advertise to producers and consumers. If not set, it uses the
31 | # value for "host.name" if configured. Otherwise, it will use the value returned from
32 | # java.net.InetAddress.getCanonicalHostName().
33 | #advertised.host.name=
34 |
35 | # The port to publish to ZooKeeper for clients to use. If this is not set,
36 | # it will publish the same port that the broker binds to.
37 | #advertised.port=
38 |
39 | # The number of threads handling network requests
40 | num.network.threads=2
41 |
42 | # The number of threads doing disk I/O
43 | num.io.threads=8
44 |
45 | # The send buffer (SO_SNDBUF) used by the socket server
46 | socket.send.buffer.bytes=1048576
47 |
48 | # The receive buffer (SO_RCVBUF) used by the socket server
49 | socket.receive.buffer.bytes=1048576
50 |
51 | # The maximum size of a request that the socket server will accept (protection against OOM)
52 | socket.request.max.bytes=104857600
53 |
54 |
55 | ############################# Log Basics #############################
56 |
57 | # A comma seperated list of directories under which to store log files
58 | log.dirs=/tmp/kafka-logs
59 |
60 | # The default number of log partitions per topic. More partitions allow greater
61 | # parallelism for consumption, but this will also result in more files across
62 | # the brokers.
63 | num.partitions=1
64 |
65 | ############################# Log Flush Policy #############################
66 |
67 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
68 | # the OS cache lazily. The following configurations control the flush of data to disk.
69 | # There are a few important trade-offs here:
70 | # 1. Durability: Unflushed data may be lost if you are not using replication.
71 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
72 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
73 | # The settings below allow one to configure the flush policy to flush data after a period of time or
74 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
75 |
76 | # The number of messages to accept before forcing a flush of data to disk
77 | #log.flush.interval.messages=10000
78 |
79 | # The maximum amount of time a message can sit in a log before we force a flush
80 | #log.flush.interval.ms=1000
81 |
82 | ############################# Log Retention Policy #############################
83 |
84 | # The following configurations control the disposal of log segments. The policy can
85 | # be set to delete segments after a period of time, or after a given size has accumulated.
86 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
87 | # from the end of the log.
88 |
89 | # The minimum age of a log file to be eligible for deletion
90 | log.retention.hours=168
91 |
92 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
93 | # segments don't drop below log.retention.bytes.
94 | #log.retention.bytes=1073741824
95 |
96 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
97 | log.segment.bytes=536870912
98 |
99 | # The interval at which log segments are checked to see if they can be deleted according
100 | # to the retention policies
101 | log.retention.check.interval.ms=60000
102 |
103 | # By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
104 | # If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
105 | log.cleaner.enable=false
106 |
107 | ############################# Zookeeper #############################
108 |
109 | # Zookeeper connection string (see zookeeper docs for details).
110 | # This is a comma separated host:port pairs, each corresponding to a zk
111 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
112 | # You can also append an optional chroot string to the urls to specify the
113 | # root directory for all kafka znodes.
114 | zookeeper.connect=localhost:2181
115 |
116 | # Timeout in ms for connecting to zookeeper
117 | zookeeper.connection.timeout.ms=1000000
118 | auto.create.topics.enable=true
119 |
--------------------------------------------------------------------------------
/env/roles/kafka/templates/zookeeper.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper service
3 | After=syslog.target network.target remote-fs.target nss-lookup.target
4 |
5 | [Service]
6 | Type=forking
7 | User=root
8 | ExecStart=/opt/kafka/bin/zookeeper-server-start.sh -daemon /opt/kafka/config/zookeeper.properties
9 | ExecStop=/opt/kafka/bin/zookeeper-server-stop.sh
10 | TimeoutSec=10
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/env/roles/scala/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install scala 2.10.4
2 | sudo: yes
3 | yum: name=http://www.scala-lang.org/files/archive/scala-2.10.4.rpm
4 | state=present
5 |
--------------------------------------------------------------------------------
/env/vagrant.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | roles:
4 | - hosts
5 | - firewalld
6 |
7 | - hosts: kafka
8 | roles:
9 | - java
10 | - scala
11 | - kafka
12 |
13 | - hosts: couchbase
14 | roles:
15 | - couchbase
16 |
17 | - hosts: couchbase_primary
18 | roles:
19 | - couchbase_cluster
20 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | version=2.0.1
2 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/couchbase/couchbase-kafka-connector/23f3fe040ddce35b8981bdc8be70b3c4b0f63618/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Mon Jan 19 00:21:12 FET 2015
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.2.1-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # For Cygwin, ensure paths are in UNIX format before anything is touched.
46 | if $cygwin ; then
47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
48 | fi
49 |
50 | # Attempt to set APP_HOME
51 | # Resolve links: $0 may be a link
52 | PRG="$0"
53 | # Need this for relative symlinks.
54 | while [ -h "$PRG" ] ; do
55 | ls=`ls -ld "$PRG"`
56 | link=`expr "$ls" : '.*-> \(.*\)$'`
57 | if expr "$link" : '/.*' > /dev/null; then
58 | PRG="$link"
59 | else
60 | PRG=`dirname "$PRG"`"/$link"
61 | fi
62 | done
63 | SAVED="`pwd`"
64 | cd "`dirname \"$PRG\"`/" >&-
65 | APP_HOME="`pwd -P`"
66 | cd "$SAVED" >&-
67 |
68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
69 |
70 | # Determine the Java command to use to start the JVM.
71 | if [ -n "$JAVA_HOME" ] ; then
72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
73 | # IBM's JDK on AIX uses strange locations for the executables
74 | JAVACMD="$JAVA_HOME/jre/sh/java"
75 | else
76 | JAVACMD="$JAVA_HOME/bin/java"
77 | fi
78 | if [ ! -x "$JAVACMD" ] ; then
79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
80 |
81 | Please set the JAVA_HOME variable in your environment to match the
82 | location of your Java installation."
83 | fi
84 | else
85 | JAVACMD="java"
86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
87 |
88 | Please set the JAVA_HOME variable in your environment to match the
89 | location of your Java installation."
90 | fi
91 |
92 | # Increase the maximum file descriptors if we can.
93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
94 | MAX_FD_LIMIT=`ulimit -H -n`
95 | if [ $? -eq 0 ] ; then
96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
97 | MAX_FD="$MAX_FD_LIMIT"
98 | fi
99 | ulimit -n $MAX_FD
100 | if [ $? -ne 0 ] ; then
101 | warn "Could not set maximum file descriptor limit: $MAX_FD"
102 | fi
103 | else
104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
105 | fi
106 | fi
107 |
108 | # For Darwin, add options to specify how the application appears in the dock
109 | if $darwin; then
110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
111 | fi
112 |
113 | # For Cygwin, switch paths to Windows format before running java
114 | if $cygwin ; then
115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
158 | function splitJvmOpts() {
159 | JVM_OPTS=("$@")
160 | }
161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
163 |
164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
165 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/samples/consumer/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .idea
3 | target
4 |
--------------------------------------------------------------------------------
/samples/consumer/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-consumer
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | kafka-connector
15 | 2.0.1
16 |
17 |
18 | org.apache.kafka
19 | kafka_2.10
20 | 0.8.2.0
21 |
22 |
23 |
24 |
25 |
26 | org.apache.maven.plugins
27 | maven-compiler-plugin
28 | 3.3
29 |
30 | 1.7
31 | 1.7
32 |
33 |
34 |
35 | org.apache.maven.plugins
36 | maven-assembly-plugin
37 | 2.3
38 |
39 |
40 | jar-with-dependencies
41 |
42 |
43 |
44 | example.Example
45 |
46 |
47 |
48 |
49 |
50 | maven-dependency-plugin
51 |
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/AbstractConsumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import kafka.api.FetchRequest;
26 | import kafka.api.FetchRequestBuilder;
27 | import kafka.api.PartitionOffsetRequestInfo;
28 | import kafka.common.ErrorMapping;
29 | import kafka.common.TopicAndPartition;
30 | import kafka.javaapi.FetchResponse;
31 | import kafka.javaapi.OffsetResponse;
32 | import kafka.javaapi.PartitionMetadata;
33 | import kafka.javaapi.TopicMetadata;
34 | import kafka.javaapi.TopicMetadataRequest;
35 | import kafka.javaapi.consumer.SimpleConsumer;
36 | import kafka.message.MessageAndOffset;
37 | import org.slf4j.Logger;
38 | import org.slf4j.LoggerFactory;
39 |
40 | import java.nio.ByteBuffer;
41 | import java.util.ArrayList;
42 | import java.util.Arrays;
43 | import java.util.Collections;
44 | import java.util.HashMap;
45 | import java.util.List;
46 | import java.util.Map;
47 |
48 | /**
49 | * @author Sergey Avseyev
50 | */
51 | public abstract class AbstractConsumer {
52 | private static Logger LOGGER = LoggerFactory.getLogger(AbstractConsumer.class);
53 |
54 | private final List seedBrokers;
55 | private final int port;
56 | private List replicaBrokers = new ArrayList();
57 |
58 | public AbstractConsumer(String seedBroker, int port) {
59 | this(new String[]{seedBroker}, port);
60 | }
61 |
62 | public AbstractConsumer(String[] seedBrokers, int port) {
63 | this.replicaBrokers = new ArrayList();
64 | this.seedBrokers = Arrays.asList(seedBrokers);
65 | this.port = port;
66 | }
67 |
68 | public abstract void handleMessage(long offset, byte[] bytes);
69 |
70 | public void run(String topic, int partition) {
71 | // find the meta data about the topic and partition we are interested in
72 | PartitionMetadata metadata = findLeader(seedBrokers, port, topic, partition);
73 | if (metadata == null) {
74 | LOGGER.error("Can't find metadata for Topic and Partition");
75 | return;
76 | }
77 | if (metadata.leader() == null) {
78 | LOGGER.error("Can't find Leader for Topic and Partition");
79 | return;
80 | }
81 | String leadBroker = metadata.leader().host();
82 | String clientName = "Client_" + topic + "_" + partition;
83 |
84 | SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
85 |
86 | long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
87 |
88 | int numErrors = 0;
89 | while (true) {
90 | if (consumer == null) {
91 | consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
92 | }
93 | FetchRequest req = new FetchRequestBuilder()
94 | .clientId(clientName)
95 | .addFetch(topic, partition, readOffset, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
96 | .build();
97 | FetchResponse fetchResponse = consumer.fetch(req);
98 |
99 | if (fetchResponse.hasError()) {
100 | numErrors++;
101 | short code = fetchResponse.errorCode(topic, partition);
102 |
103 | LOGGER.error("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
104 | if (numErrors > 5) break;
105 | if (code == ErrorMapping.OffsetOutOfRangeCode()) {
106 | // We asked for an invalid offset. For simple case ask for the last element to reset
107 | readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
108 | continue;
109 | }
110 | consumer.close();
111 | consumer = null;
112 | leadBroker = findNewLeader(leadBroker, topic, partition, port);
113 | continue;
114 | }
115 | numErrors = 0;
116 |
117 | long numRead = 0;
118 | for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
119 | long currentOffset = messageAndOffset.offset();
120 | if (currentOffset < readOffset) {
121 | LOGGER.error("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
122 | continue;
123 | }
124 | readOffset = messageAndOffset.nextOffset();
125 | ByteBuffer payload = messageAndOffset.message().payload();
126 |
127 | byte[] bytes = new byte[payload.limit()];
128 | payload.get(bytes);
129 |
130 | handleMessage(messageAndOffset.offset(), bytes);
131 | numRead++;
132 | }
133 |
134 | if (numRead == 0) {
135 | try {
136 | Thread.sleep(1000);
137 | } catch (InterruptedException e) {
138 | LOGGER.error("Unable to sleep", e);
139 | }
140 | }
141 | }
142 | consumer.close();
143 | }
144 |
145 | private long getLastOffset(SimpleConsumer consumer, String topic, int partition,
146 | long whichTime, String clientName) {
147 | TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
148 |
149 | Map requestInfo = new HashMap();
150 | requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
151 | kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
152 | requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
153 | OffsetResponse response = consumer.getOffsetsBefore(request);
154 |
155 | if (response.hasError()) {
156 | LOGGER.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
157 | return 0;
158 | }
159 | long[] offsets = response.offsets(topic, partition);
160 | return offsets[0];
161 | }
162 |
163 | private String findNewLeader(String oldLeader, String topic, int partition, int port) {
164 | for (int i = 0; i < 3; i++) {
165 | PartitionMetadata metadata = findLeader(replicaBrokers, port, topic, partition);
166 | if (metadata == null
167 | || metadata.leader() == null
168 | || oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
169 | // first time through if the leader hasn't changed give ZooKeeper a second to recover
170 | // second time, assume the broker did recover before failover, or it was a non-Broker issue
171 | try {
172 | Thread.sleep(1000);
173 | } catch (InterruptedException e) {
174 | LOGGER.error("Unable to sleep", e);
175 | }
176 | } else {
177 | return metadata.leader().host();
178 | }
179 | }
180 | throw new IllegalStateException("Unable to find new leader after Broker failure");
181 | }
182 |
183 |
184 | private PartitionMetadata findLeader(List seedBrokers, int port, String topic, int partition) {
185 | PartitionMetadata returnMetaData = null;
186 | loop:
187 | for (String seed : seedBrokers) {
188 | SimpleConsumer consumer = null;
189 | try {
190 | consumer = new SimpleConsumer(seed, port, 100000, 64 * 1024, "leaderLookup");
191 | List topics = Collections.singletonList(topic);
192 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
193 | kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
194 |
195 | List metaData = resp.topicsMetadata();
196 | for (TopicMetadata item : metaData) {
197 | for (PartitionMetadata part : item.partitionsMetadata()) {
198 | if (part.partitionId() == partition) {
199 | returnMetaData = part;
200 | break loop;
201 | }
202 | }
203 | }
204 | } catch (Exception e) {
205 | LOGGER.error("Error communicating with Broker [" + seed + "] to find Leader for [" + topic
206 | + ", " + partition + "] Reason: " + e);
207 | } finally {
208 | if (consumer != null) consumer.close();
209 | }
210 | }
211 | if (returnMetaData != null) {
212 | replicaBrokers.clear();
213 | for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
214 | replicaBrokers.add(replica.host());
215 | }
216 | }
217 | return returnMetaData;
218 | }
219 |
220 | }
221 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | /**
25 | * @author Sergey Avseyev
26 | */
27 | public class Example {
28 | public static void main(String args[]) {
29 | PrintConsumer example = new PrintConsumer("kafka1.vagrant", 9092);
30 |
31 | example.run("default", 0);
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/java/example/PrintConsumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | /**
26 | * @author Sergey Avseyev
27 | */
28 | public class PrintConsumer extends AbstractConsumer {
29 | public PrintConsumer(String[] seedBrokers, int port) {
30 | super(seedBrokers, port);
31 | }
32 |
33 | public PrintConsumer(String seedBroker, int port) {
34 | super(seedBroker, port);
35 | }
36 |
37 | @Override
38 | public void handleMessage(long offset, byte[] bytes) {
39 | System.out.println(String.valueOf(offset) + ": " + new String(bytes));
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/samples/consumer/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stdout
2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
3 | log4j.appender.stdout.Target=System.out
4 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
--------------------------------------------------------------------------------
/samples/generator/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .idea
3 | target
4 |
--------------------------------------------------------------------------------
/samples/generator/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-generator
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | java-client
15 | 2.2.2
16 |
17 |
18 |
19 |
20 |
21 | org.apache.maven.plugins
22 | maven-compiler-plugin
23 | 3.3
24 |
25 | 1.7
26 | 1.7
27 |
28 |
29 |
30 | org.apache.maven.plugins
31 | maven-assembly-plugin
32 | 2.3
33 |
34 |
35 | jar-with-dependencies
36 |
37 |
38 |
39 | example.Example
40 |
41 |
42 |
43 |
44 |
45 | maven-dependency-plugin
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/samples/generator/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | import com.couchbase.client.java.Bucket;
25 | import com.couchbase.client.java.Cluster;
26 | import com.couchbase.client.java.CouchbaseCluster;
27 | import com.couchbase.client.java.document.JsonDocument;
28 | import com.couchbase.client.java.document.json.JsonObject;
29 |
30 | import java.io.BufferedReader;
31 | import java.io.IOException;
32 | import java.io.InputStreamReader;
33 | import java.util.Random;
34 |
35 | /**
36 | * @author Sergey Avseyev
37 | */
38 | public class Example {
39 | public static void main(String args[]) throws IOException {
40 | Random random = new Random();
41 | Cluster cluster = CouchbaseCluster.create("couchbase1.vagrant");
42 | Bucket bucket = cluster.openBucket();
43 |
44 | BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
45 |
46 | String line;
47 | do {
48 | System.out.print("> ");
49 | line = input.readLine();
50 | if (line == null) {
51 | break;
52 | }
53 | String key = "key-" + random.nextInt(10);
54 | JsonObject value = JsonObject.create().put("line", line);
55 | bucket.upsert(JsonDocument.create(key, value));
56 | System.out.printf(">> key=%s, value=%s\n", key, value);
57 | } while (true);
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/samples/generator/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stdout
2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
3 | log4j.appender.stdout.Target=System.out
4 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
--------------------------------------------------------------------------------
/samples/producer/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .idea
3 | target
4 |
--------------------------------------------------------------------------------
/samples/producer/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | couchbaselabs
8 | kafka-samples-producer
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | com.couchbase.client
14 | kafka-connector
15 | 2.0.1
16 |
17 |
18 | com.couchbase.client
19 | core-io
20 | 1.3.1
21 |
22 |
23 | org.apache.kafka
24 | kafka_2.10
25 | 0.8.2.0
26 |
27 |
28 |
29 |
30 | couchbase
31 | couchbase repo
32 | http://files.couchbase.com/maven2
33 | false
34 |
35 |
36 |
37 |
38 |
39 | org.apache.maven.plugins
40 | maven-compiler-plugin
41 | 3.3
42 |
43 | 1.7
44 | 1.7
45 |
46 |
47 |
48 | org.apache.maven.plugins
49 | maven-assembly-plugin
50 | 2.3
51 |
52 |
53 | jar-with-dependencies
54 |
55 |
56 |
57 | example.Example
58 |
59 |
60 |
61 |
62 |
63 | maven-dependency-plugin
64 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/Example.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 | package example;
23 |
24 | import com.couchbase.kafka.CouchbaseKafkaConnector;
25 | import com.couchbase.kafka.DefaultCouchbaseKafkaEnvironment;
26 |
27 | public class Example {
28 | public static void main(String[] args) {
29 | DefaultCouchbaseKafkaEnvironment.Builder builder =
30 | (DefaultCouchbaseKafkaEnvironment.Builder) DefaultCouchbaseKafkaEnvironment.builder()
31 | .kafkaFilterClass("example.SampleFilter")
32 | .kafkaTopic("default")
33 | .kafkaZookeeperAddress("kafka1.vagrant")
34 | .couchbaseNodes("couchbase1.vagrant")
35 | .couchbaseBucket("default")
36 | .kafkaValueSerializerClass("example.SampleEncoder")
37 | .couchbaseStateSerializerClass("example.NullStateSerializer")
38 | .dcpEnabled(true);
39 | CouchbaseKafkaConnector connector = CouchbaseKafkaConnector.create(builder.build());
40 | connector.run();
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/NullStateSerializer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.kafka.CouchbaseKafkaEnvironment;
26 | import com.couchbase.kafka.state.ConnectorState;
27 | import com.couchbase.kafka.state.StateSerializer;
28 | import com.couchbase.kafka.state.StreamState;
29 |
30 | /**
31 | * @author Sergey Avseyev
32 | */
33 | public class NullStateSerializer implements StateSerializer {
34 |
35 | public NullStateSerializer(final CouchbaseKafkaEnvironment environment) {
36 | }
37 |
38 | @Override
39 | public void dump(ConnectorState connectorState) {
40 | }
41 |
42 | @Override
43 | public void dump(ConnectorState connectorState, short partition) {
44 | }
45 |
46 | @Override
47 | public ConnectorState load(ConnectorState connectorState) {
48 | return new ConnectorState();
49 | }
50 |
51 | @Override
52 | public StreamState load(ConnectorState connectorState, short partition) {
53 | return new StreamState(partition, 0, 0);
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/SampleEncoder.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.client.core.message.dcp.MutationMessage;
26 | import com.couchbase.kafka.DCPEvent;
27 | import com.couchbase.kafka.coder.AbstractEncoder;
28 | import kafka.utils.VerifiableProperties;
29 |
30 | /**
31 | * @author Sergey Avseyev
32 | */
33 | public class SampleEncoder extends AbstractEncoder {
34 | public SampleEncoder(final VerifiableProperties properties) {
35 | super(properties);
36 | }
37 |
38 | @Override
39 | public byte[] toBytes(final DCPEvent dcpEvent) {
40 | if (dcpEvent.message() instanceof MutationMessage) {
41 | MutationMessage message = (MutationMessage) dcpEvent.message();
42 | return message.key().getBytes();
43 | } else {
44 | return dcpEvent.message().toString().getBytes();
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/samples/producer/src/main/java/example/SampleFilter.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package example;
24 |
25 | import com.couchbase.kafka.DCPEvent;
26 | import com.couchbase.kafka.filter.Filter;
27 |
28 | public class SampleFilter implements Filter {
29 | @Override
30 | public boolean pass(DCPEvent dcpEvent) {
31 | System.out.println("RECEIVED: " + dcpEvent);
32 | return true;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/samples/producer/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stdout
2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
3 | log4j.appender.stdout.Target=System.out
4 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | rootProject.name = 'couchbase-kafka-connector'
2 |
3 |
--------------------------------------------------------------------------------
/src/main/java/com/couchbase/kafka/CouchbaseKafkaConnector.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (C) 2015 Couchbase, Inc.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy
5 | * of this software and associated documentation files (the "Software"), to deal
6 | * in the Software without restriction, including without limitation the rights
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | * copies of the Software, and to permit persons to whom the Software is
9 | * furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
20 | * IN THE SOFTWARE.
21 | */
22 |
23 | package com.couchbase.kafka;
24 |
25 | import com.couchbase.client.core.ClusterFacade;
26 | import com.couchbase.client.core.CouchbaseCore;
27 | import com.couchbase.client.core.logging.CouchbaseLogger;
28 | import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
29 | import com.couchbase.client.deps.com.lmax.disruptor.ExceptionHandler;
30 | import com.couchbase.client.deps.com.lmax.disruptor.RingBuffer;
31 | import com.couchbase.client.deps.com.lmax.disruptor.dsl.Disruptor;
32 | import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory;
33 | import com.couchbase.kafka.filter.Filter;
34 | import com.couchbase.kafka.state.ConnectorState;
35 | import com.couchbase.kafka.state.StateSerializer;
36 | import com.couchbase.kafka.state.StreamState;
37 | import kafka.cluster.Broker;
38 | import kafka.javaapi.producer.Producer;
39 | import kafka.producer.ProducerConfig;
40 | import kafka.utils.ZKStringSerializer$;
41 | import kafka.utils.ZkUtils;
42 | import org.I0Itec.zkclient.ZkClient;
43 | import scala.collection.Iterator;
44 |
45 | import java.lang.reflect.InvocationTargetException;
46 | import java.util.ArrayList;
47 | import java.util.Collections;
48 | import java.util.List;
49 | import java.util.Properties;
50 | import java.util.concurrent.ExecutorService;
51 | import java.util.concurrent.Executors;
52 |
53 | /**
54 | * {@link CouchbaseKafkaConnector} is an entry point of the library. It sets up connections with both Couchbase and
55 | * Kafka clusters. And carries all events from Couchbase to Kafka.
56 | *
57 | * The example below will transfer all mutations from Couchbase bucket "my-bucket" as JSON to Kafka topic "my-topic".
58 | *