├── src ├── test │ ├── resources │ │ ├── application.conf │ │ └── log4j.properties │ └── scala │ │ ├── BaseSpec.scala │ │ ├── AkkaKafkaSpec.scala │ │ └── KafkaSpec.scala └── main │ ├── resources │ └── scalago.avsc │ └── scala │ ├── Stub.scala │ ├── AkkaKafka.scala │ ├── scalago │ └── ScalaGoKafka.scala │ ├── KafkaConsumer.scala │ └── KafkaProducer.scala ├── checks ├── kafka_2.10-0.8.2.0.tgz.md5 ├── kafka_2.10-0.8.2.0.tgz.sha1 ├── kafka_2.10-0.8.2.1.tgz.md5 ├── kafka_2.10-0.8.2.1.tgz.sha1 ├── kafka_2.10-0.8.2.0.tgz.sha2 ├── kafka_2.10-0.8.2.1.tgz.sha2 ├── kafka_2.10-0.8.2.0.tgz.asc └── kafka_2.10-0.8.2.1.tgz.asc ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradle.properties ├── stop-scala-go.sh ├── .gitignore ├── run-go.sh ├── vagrant ├── README.md ├── verify_hash.sh ├── bootstrap.sh ├── verify.sh ├── kafkacat.sh ├── kafka.sh ├── init.sh ├── zk.sh └── broker.sh ├── Dockerfile ├── README.md ├── start-scala-go.sh ├── Vagrantfile ├── gradlew.bat ├── config └── server.properties ├── gradlew ├── LICENSE └── KEYS /src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka.consumer.auto.offset.reset = "smallest" -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.0.tgz.md5: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.0.tgz: 8C 56 86 46 14 2F 2E 3C 91 5D 96 94 D1 B8 8D 3D 2 | -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.0.tgz.sha1: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.0.tgz: 0B15 0466 1F36 9877 AF43 458F D062 99C1 356A 453F 2 | -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.1.tgz.md5: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.1.tgz: 44 6E AB 1F 53 29 EB 03 66 29 26 AA 1C B0 84 5D 2 | -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.1.tgz.sha1: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.1.tgz: 6107 0CE0 8383 9CFE D015 0C33 B3D5 5311 C7AE 300E 2 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elodina/scala-kafka/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | mavenURL="" 2 | mavenUsername="" 3 | mavenPassword="" 4 | 5 | org.gradle.jvmargs=-XX:MaxPermSize=1g -Xmx1g 6 | signing.keyId= 7 | signing.password= 8 | signing.secretKeyRingFile= 9 | -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.0.tgz.sha2: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.0.tgz: D2CD5B94 3D657A03 4EC1E8E9 B7B47E1E B9E580C8 110895A1 2 | BE1247E4 1B1A3058 1D7C335E 32670889 BF8DE974 68870E48 3 | 1C593559 9D11EE46 DC70AB58 0FC83709 -------------------------------------------------------------------------------- /src/main/resources/scalago.avsc: -------------------------------------------------------------------------------- 1 | {"namespace": "scalago", 2 | "type": "record", 3 | "name": "PingPong", 4 | "fields": [ 5 | {"name": "counter", "type": "long"}, 6 | {"name": "name", "type": "string"}, 7 | {"name": "uuid", "type": "string"} 8 | ] 9 | } -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.1.tgz.sha2: -------------------------------------------------------------------------------- 1 | kafka_2.10-0.8.2.1.tgz: 48D73A18 EAE615A5 AEB52B11 58FD9A4B B96B7A3C 50FE5259 2 | 9FFB8858 6F10C422 C2F00789 58CAFE1C 63518970 F1E3984F 3 | C70C95A3 B2896514 D9D02EB7 E119AEF8 4 | -------------------------------------------------------------------------------- /stop-scala-go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker kill scala 4 | docker kill golang 5 | docker kill registry 6 | docker kill broker1 7 | docker kill zkserver 8 | 9 | docker rm scala 10 | docker rm golang 11 | docker rm registry 12 | docker rm broker1 13 | docker rm zkserver -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | .idea/* 3 | *.class 4 | *.log 5 | .vagrant 6 | .gradle 7 | # sbt specific 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/plugins/project/ 14 | logs/* 15 | # Scala-IDE specific 16 | .scala_dependencies 17 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Thu Apr 30 13:50:04 EDT 2015 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.3-bin.zip 7 | -------------------------------------------------------------------------------- /run-go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | go get github.com/tools/godep 4 | mkdir -p $GOPATH/src/github.com/stealthly 5 | cd $GOPATH/src/github.com/stealthly 6 | git clone https://github.com/stealthly/go-kafka.git 7 | cd $GOPATH/src/github.com/stealthly/go-kafka 8 | godep restore 9 | go run scala_go_kafka.go $1 $2 -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.0.tgz.asc: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP SIGNATURE----- 2 | Version: GnuPG/MacGPG2 v2.0.22 (Darwin) 3 | 4 | iQIcBAABAgAGBQJUtc8NAAoJEKth9w/gph7qt/MQAN/MWjojHGRzq8NdkTQfhj0G 5 | TB02Y3fLENIrqqLI2GK0A2GfJ01bTqD1QiQo8UEPH7dBEIxSJa76ezpkj119XAiQ 6 | 8VtGjniJJzjHAS7gvy2HSCI/BBF0PMKPvxHN0wgatj+DT+Sgs3y2WdbYCPZBScAT 7 | RVyiDq3XFibhhCea+GyK5pcTrvOb6uKOzlXLQE3DTfWG9OFSLg8VFnEkAZcae/ff 8 | vzNQpMp+xlHxR2NVAEgMpkZJIPcdg2MK1DajB/IXJ4JNRAr+PCry5xWDqg7W06Tp 9 | FM0N0PV7DYPOXRFvT5A61Ed77GjYNBCOyO6qMhfDgaXoRg/Xix4gsJ3MlWLcoQAI 10 | M/eYQv0RHiwGsbxXqInuKkyOwumMFkfWHP+plu/JBxuOUl7A5Dw9l/aUXh40uJ8E 11 | O2fPpDS2nb7DBronBfdXnUK+6hpyELNxD/y0Oqo8r8cE8liVtG63ZrE7KbfcieFR 12 | EWdUlrJWANUuUwnsEfmR3ThZ1yi3oCnHaBxvZdp+HyrxfwpAH12icgwJ9EHXN9mV 13 | TA34FPq5XA1L7DSU3gJiMDl4v0yYMH45wO1GezHM4pZaSq65NFEtWug3U9AVvMNM 14 | WVZz45go4/ZEa81GDky+X2GvkRZyHgr2gushuV5jj48YaxNgxCrweVPAp26ogWkK 15 | 37z6jtFakRpzeQQQsBW1 16 | =AuuO 17 | -----END PGP SIGNATURE----- 18 | -------------------------------------------------------------------------------- /checks/kafka_2.10-0.8.2.1.tgz.asc: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP SIGNATURE----- 2 | Version: GnuPG/MacGPG2 v2.0.22 (Darwin) 3 | Comment: GPGTools - https://gpgtools.org 4 | 5 | iQIcBAABCgAGBQJU7561AAoJEKth9w/gph7qQnkP/RQ4BHi3Cd/0nZscvoSPt9zw 6 | 4iMS0n7dKFDRkZXRYbodUt4H4No6RtL65ZKigqAij9GO0hqpqG4X79JJl9wXFGMm 7 | 01CVhOiFIzBjYWUn3ot8fThwoBfkqm/BHXMahRePzvazL1sgX5drcZWo4igW4ybm 8 | l5g0LBAj8guR1+soqlpB8A85svC4L2XAN7WpSYtkyB+x335lfK6ssq1vOy0jugql 9 | wGmgvsoMeZohVXj7NoweHfryIJXx+Qgm7PRjAiNBkBG1Qiguz4rk33fZeGpadMXb 10 | el7LtYYHU0PdTeplzH1LGlKN79d44T9LTyKpWrxH/bKMGjUKmne6LwCPnKCR+Cgn 11 | xlok+x/Ar0sZUVd7oPqYPfN+WyNWvhqKZRd5X+Y8fEUrk8O7xUK4879tu81zbKhm 12 | A44lhtkj6A+IulqROkUXwwEyOiOKcDAvCML4uy+2HZjKjkT0gsd+4LBstq0zGqSX 13 | A7GIqd9aWfgWe2ucK/rCmLoAI5Csu1knr3lUvJ1ASOT7FnJ0VCjmZXydHJG9ZgdA 14 | K+4Dp6LoiwZBs+hoduJON695wFHJre+G+N0Y5AjJKKC0zyY/W99I5SOHr6muRcfp 15 | oa7krfxMb8fAYpFTBZ40yYAXUGDZ4pATZDPb7Lnk52xaVbE9GUskAUh/+QMzU7MF 16 | I4k/oTQjUMSQoAhdZaby 17 | =om7K 18 | -----END PGP SIGNATURE----- 19 | -------------------------------------------------------------------------------- /src/main/scala/Stub.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package net.elodina.testing 19 | 20 | object Stub { 21 | def main(args: Array[String]) = { 22 | println("stub") 23 | } 24 | } -------------------------------------------------------------------------------- /vagrant/README.md: -------------------------------------------------------------------------------- 1 | # Apache Kafka # 2 | 3 | Using Vagrant to get up and running. 4 | 5 | 1) Install Vagrant [http://www.vagrantup.com/](http://www.vagrantup.com/) 6 | 2) Install Virtual Box [https://www.virtualbox.org/](https://www.virtualbox.org/) 7 | 8 | In the main kafka folder 9 | 10 | 1) ./sbt update 11 | 2) ./sbt package 12 | 3) ./sbt assembly-package-dependency 13 | 4) vagrant up 14 | 15 | once this is done 16 | * Zookeeper will be running 192.168.50.5 17 | * Broker 1 on 192.168.50.10 18 | * Broker 2 on 192.168.50.20 19 | * Broker 3 on 192.168.50.30 20 | 21 | When you are all up and running you will be back at a command brompt. 22 | 23 | If you want you can login to the machines using vagrant ssh but you don't need to. 24 | 25 | You can access the brokers and zookeeper by their IP 26 | 27 | e.g. 28 | 29 | bin/kafka-console-producer.sh --broker-list 192.168.50.10:9092,192.168.50.20:9092,192.168.50.30:9092 --topic sandbox 30 | 31 | bin/kafka-console-consumer.sh --zookeeper 192.168.50.5:2181 --topic sandbox --from-beginning 32 | -------------------------------------------------------------------------------- /vagrant/verify_hash.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | hs=$(gpg --print-md $2 $1) 34 | hf=$(cat /vagrant/checks/$1.$3) 35 | if [ "$hs" == "$hf" ]; then 36 | echo "$2 ok" 37 | else 38 | echo "error $hs != $hf" 39 | founderror 40 | fi 41 | 42 | exitscript -------------------------------------------------------------------------------- /vagrant/bootstrap.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | apt-get install -y git 34 | 35 | cd /opt 36 | mkdir elodina 37 | chmod a+rw elodina 38 | cd elodina 39 | git clone https://github.com/elodina/scala-kafka.git 40 | cd scala-kafka/vagrant 41 | ./init.sh 42 | 43 | exitscript -------------------------------------------------------------------------------- /vagrant/verify.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | release=$1 34 | 35 | gpg --import /vagrant/KEYS 36 | gpg --verify $release.asc $release 37 | 38 | /vagrant/vagrant/verify_hash.sh $release MD5 md5 39 | /vagrant/vagrant/verify_hash.sh $release SHA1 sha1 40 | /vagrant/vagrant/verify_hash.sh $release SHA512 sha2 41 | 42 | exitscript -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | FROM stealthly/docker-java 17 | 18 | ENV REGISTRY_VERSION 1.0 19 | ENV SCALA_VERSION 2.10.4 20 | ENV REGISTRY_URL http://packages.confluent.io/archive/$REGISTRY_VERSION/confluent-$REGISTRY_VERSION-$SCALA_VERSION.tar.gz -O /tmp/registry.tgz 21 | ENV REGISTRY_HOME /opt/confluent-$REGISTRY_VERSION 22 | 23 | RUN wget -q $REGISTRY_URL -O /tmp/confluent-$REGISTRY_VERSION-$SCALA_VERSION.tgz 24 | RUN tar xfz /tmp/confluent-$REGISTRY_VERSION-$SCALA_VERSION.tgz -C /opt 25 | 26 | EXPOSE 8081 27 | 28 | CMD $REGISTRY_HOME/bin/schema-registry-start $REGISTRY_HOME/etc/schema-registry/schema-registry.properties 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | scala-kafka 2 | =========== 3 | 4 | Quick up and running using Scala for Apache Kafka 5 | 6 | Use Vagrant to get up and running. 7 | 8 | 1) Install Vagrant [http://www.vagrantup.com/](http://www.vagrantup.com/) 9 | 2) Install Virtual Box [https://www.virtualbox.org/](https://www.virtualbox.org/) 10 | 11 | In the main kafka folder 12 | 13 | 1) vagrant up 14 | 2) ./gradlew test 15 | 16 | once this is done 17 | * Zookeeper will be running 192.168.86.5 18 | * Broker 1 on 192.168.86.10 19 | * All the tests in src/test/scala/* should pass 20 | 21 | If you want you can login to the machines using vagrant ssh but you don't need to. 22 | 23 | You can access the brokers and zookeeper by their IP from your local without having to go into vm. 24 | 25 | e.g. 26 | 27 | bin/kafka-console-producer.sh --broker-list 192.168.86.10:9092 --topic 28 | 29 | bin/kafka-console-consumer.sh --zookeeper 192.168.86.5:2181 --topic --from-beginning 30 | 31 | scala-go-kafka 32 | =============== 33 | 34 | Scala-kafka and Go-kafka working together using Apache Avro and Schema Registry 35 | 36 | Install Docker [https://docs.docker.com/installation/#installation](https://docs.docker.com/installation/#installation) 37 | 38 | In the main scala-kafka folder 39 | 40 | 1) ./gradlew scalago jar 41 | 2) ./start-scala-go.sh 42 | 3) ./stop-scala-go.sh -------------------------------------------------------------------------------- /start-scala-go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ZOOKEEPER=`docker ps -a | awk '{print $NF}' | grep "zookeeper$"` 4 | ZOOKEEPER_RUNNING=$? 5 | if [ $ZOOKEEPER_RUNNING -eq 0 ] ; 6 | then 7 | echo "Zookeeper is already running" 8 | else 9 | echo "Starting Zookeeper" 10 | docker run --net=host --name zkserver -d stealthly/docker-zookeeper 11 | sleep 5 12 | fi 13 | 14 | ID=1 15 | PORT=9092 16 | HOST_IP=localhost 17 | 18 | docker run --net=host --name=broker$ID -p $PORT:$PORT -e BROKER_ID=$ID -e HOST_IP=$HOST_IP -e PORT=$PORT -d stealthly/docker-kafka 19 | sleep 5 20 | IMAGE=$(docker images | grep "stealthly/confluent-platform " | awk '{print $3}') 21 | if [ -z $IMAGE ]; then 22 | docker build -t stealthly/confluent-platform . 23 | fi 24 | docker run --name registry -d --net=host -p 8081:8081 stealthly/confluent-platform 25 | 26 | #let kafka initialize properly before establishing producers and consumers 27 | sleep 10 28 | go_topic=`head -c 100 /dev/urandom | base64 | sed 's/[+=/A-Z]//g' | tail -c 16` 29 | scala_topic=`head -c 100 /dev/urandom | base64 | sed 's/[+=/A-Z]//g' | tail -c 16` 30 | 31 | docker run --name scala --net=host -v $(pwd)/build/libs:/scala-kafka/build/libs -d stealthly/docker-java java -jar /scala-kafka/build/libs/scala-kafka-0.1.0.0.jar $scala_topic $go_topic 32 | docker run --name golang --net=host -v $(pwd):/go-files -d golang:1.3.0 /go-files/run-go.sh $go_topic $scala_topic 33 | 34 | # output logs to stdout 35 | docker logs -f scala & docker logs -f golang & -------------------------------------------------------------------------------- /vagrant/kafkacat.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | apt-get -y update 34 | apt-get install -y wget git gcc g++ zlib1g-dev zlib1g make curl screen vim wget git 35 | mkdir -p /opt/github/edenhill 36 | chmod a+rw -R /opt/github/edenhill 37 | cd /opt/github/edenhill 38 | git clone https://github.com/edenhill/kafkacat.git 39 | cd /opt/github/edenhill/kafkacat 40 | ./bootstrap.sh 41 | cp kafkacat /usr/local/bin/ 42 | exitscript -------------------------------------------------------------------------------- /vagrant/kafka.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | mkdir -p /opt/apache 34 | cd /opt/apache 35 | version=0.8.2.1 36 | scala=2.10 37 | release=kafka_$scala-$version 38 | 39 | url=archive.apache.org/dist/kafka 40 | wget https://$url/$version/$release.tgz 41 | wget https://$url/$version/$release.tgz.md5 42 | wget https://$url/$version/$release.tgz.sha1 43 | wget https://$url/$version/$release.tgz.sha2 44 | wget https://$url/$version/$release.tgz.asc 45 | tar -xvf $release.tgz 46 | /vagrant/vagrant/verify.sh $release.tgz 47 | ln -s /opt/apache/$release kafka 48 | exitscript 49 | -------------------------------------------------------------------------------- /vagrant/init.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | apt-get -y update 34 | apt-get install -y software-properties-common python-software-properties curl wget git screen ntp 35 | ntpq -p 36 | add-apt-repository -y ppa:webupd8team/java 37 | apt-get -y update 38 | /bin/echo debconf shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections 39 | apt-get -y install oracle-java7-installer oracle-java7-set-default 40 | 41 | chmod a+rw -R /opt 42 | cd / 43 | ln -s /opt/elodina/scala-kafka/ vagrant 44 | 45 | /vagrant/vagrant/kafka.sh #install kafka 46 | /vagrant/vagrant/kafkacat.sh #install the kafkacat utility 47 | 48 | exitscript -------------------------------------------------------------------------------- /vagrant/zk.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | apt-get -y update 34 | apt-get install -y software-properties-common python-software-properties 35 | add-apt-repository -y ppa:webupd8team/java 36 | apt-get -y update 37 | /bin/echo debconf shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections 38 | apt-get -y install oracle-java6-installer oracle-java6-set-default 39 | 40 | /vagrant/vagrant/kafka.sh #install kafka 41 | /vagrant/vagrant/kafkacat.sh #install the kafkacat utility 42 | 43 | /opt/apache/kafka/bin/zookeeper-server-start.sh /opt/apache/kafka/config/zookeeper.properties 1>> /tmp/zk.log 2>> /tmp/zk.log & 44 | 45 | exitscript -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # -*- mode: ruby -*- 16 | # vi: set ft=ruby : 17 | 18 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 19 | VAGRANTFILE_API_VERSION = "2" 20 | 21 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 22 | config.vm.box = "precise64" 23 | 24 | # The url from where the 'config.vm.box' box will be fetched if it 25 | # doesn't already exist on the user's system. 26 | config.vm.box_url = "http://files.vagrantup.com/precise64.box" 27 | 28 | config.vm.define "zookeeper" do |zookeeper| 29 | zookeeper.vm.network :private_network, ip: "192.168.86.5" 30 | zookeeper.vm.provider :virtualbox do |vb| 31 | vb.customize ["modifyvm", :id, "--memory", "512"] 32 | end 33 | zookeeper.vm.provision "shell", path: "vagrant/zk.sh" 34 | end 35 | 36 | config.vm.define "brokerOne" do |brokerOne| 37 | brokerOne.vm.network :private_network, ip: "192.168.86.10" 38 | brokerOne.vm.provider :virtualbox do |vb| 39 | vb.customize ["modifyvm", :id, "--memory", "512"] 40 | end 41 | brokerOne.vm.provision "shell", path: "vagrant/broker.sh", :args => "1" 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /src/main/scala/AkkaKafka.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package kafka.akka 19 | 20 | import scala.collection.JavaConverters._ 21 | import org.apache.thrift.protocol.TBinaryProtocol 22 | import org.apache.thrift.transport.TIOStreamTransport 23 | import org.apache.thrift.{TSerializer, TDeserializer} 24 | import akka.actor.{Actor, Props, ActorSystem} 25 | import akka.routing.RoundRobinRouter 26 | import kafka.utils.Logging 27 | import java.util.concurrent.atomic._ 28 | import kafka.producer._ 29 | 30 | class KafkaAkkaProducer extends Actor with Logging { 31 | private val producerCreated = new AtomicBoolean(false) 32 | 33 | var producer: KafkaProducer = null 34 | 35 | def init(topic: String, zklist: String) = { 36 | if (producerCreated.getAndSet(true)) { 37 | throw new RuntimeException(this.getClass.getSimpleName + " this kafka akka actor has a producer already") 38 | } 39 | 40 | producer = new KafkaProducer(topic,zklist) 41 | } 42 | 43 | def receive = { 44 | case t: (String, String) ⇒ { 45 | init(t._1, t._2) 46 | } 47 | case msg: String ⇒ 48 | { 49 | producer.send(msg) 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /vagrant/broker.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | #!/bin/sh -Eux 17 | 18 | # Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR 19 | trap founderror 1 2 3 15 ERR 20 | 21 | founderror() 22 | { 23 | exit 1 24 | } 25 | 26 | exitscript() 27 | { 28 | #remove lock file 29 | #rm $lockfile 30 | exit 0 31 | } 32 | 33 | apt-get -y update 34 | apt-get install -y software-properties-common python-software-properties 35 | add-apt-repository -y ppa:webupd8team/java 36 | apt-get -y update 37 | /bin/echo debconf shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections 38 | apt-get -y install oracle-java6-installer oracle-java6-set-default 39 | 40 | chmod a+rw /opt 41 | cd /opt 42 | ln -s /vagrant kafka 43 | cd kafka 44 | IP=$(ifconfig | grep 'inet addr:'| grep 168 | grep 192|cut -d: -f2 | awk '{ print $1}') 45 | sed 's/broker.id=0/'broker.id=$1'/' /opt/kafka/config/server.properties > /tmp/prop1.tmp 46 | sed 's/#advertised.host.name=/'advertised.host.name=$IP'/' /tmp/prop1.tmp > /tmp/prop2.tmp 47 | sed 's/#host.name=localhost/'host.name=$IP'/' /tmp/prop2.tmp > /tmp/prop3.tmp 48 | sed 's/zookeeper.connect=localhost:2181/'zookeeper.connect=192.168.86.5:2181'/' /tmp/prop3.tmp > /opt/server.properties 49 | 50 | /vagrant/vagrant/kafka.sh #install kafka 51 | /vagrant/vagrant/kafkacat.sh #install the kafkacat utility 52 | 53 | /opt/apache/kafka/bin/kafka-server-start.sh /opt/server.properties 1>> /tmp/broker.log 2>> /tmp/broker.log & 54 | 55 | exitscript -------------------------------------------------------------------------------- /src/test/scala/BaseSpec.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package net.elodina.testing 18 | 19 | import java.util.Properties 20 | 21 | import org.apache.kafka.clients.producer.{KafkaProducer => NewKafkaProducer} 22 | import org.apache.kafka.clients.producer.ProducerConfig 23 | 24 | trait BaseSpec { 25 | def createNewKafkaProducer(brokerList: String, 26 | acks: Int = -1, 27 | metadataFetchTimeout: Long = 3000L, 28 | blockOnBufferFull: Boolean = true, 29 | bufferSize: Long = 1024L * 1024L, 30 | retries: Int = 0): NewKafkaProducer[Array[Byte], Array[Byte]] = { 31 | 32 | val producerProps = new Properties() 33 | producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) 34 | producerProps.put(ProducerConfig.ACKS_CONFIG, acks.toString) 35 | producerProps.put(ProducerConfig.METADATA_FETCH_TIMEOUT_CONFIG, metadataFetchTimeout.toString) 36 | producerProps.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, blockOnBufferFull.toString) 37 | producerProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferSize.toString) 38 | producerProps.put(ProducerConfig.RETRIES_CONFIG, retries.toString) 39 | producerProps.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, "100") 40 | producerProps.put(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, "200") 41 | producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") 42 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") 43 | 44 | new NewKafkaProducer[Array[Byte], Array[Byte]](producerProps) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 12 | set DEFAULT_JVM_OPTS= 13 | 14 | set DIRNAME=%~dp0 15 | if "%DIRNAME%" == "" set DIRNAME=. 16 | set APP_BASE_NAME=%~n0 17 | set APP_HOME=%DIRNAME% 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windowz variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | if "%@eval[2+2]" == "4" goto 4NT_args 53 | 54 | :win9xME_args 55 | @rem Slurp the command line arguments. 56 | set CMD_LINE_ARGS= 57 | set _SKIP=2 58 | 59 | :win9xME_args_slurp 60 | if "x%~1" == "x" goto execute 61 | 62 | set CMD_LINE_ARGS=%* 63 | goto execute 64 | 65 | :4NT_args 66 | @rem Get arguments from the 4NT Shell from JP Software 67 | set CMD_LINE_ARGS=%$ 68 | 69 | :execute 70 | @rem Setup the command line 71 | 72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 73 | 74 | @rem Execute Gradle 75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 76 | 77 | :end 78 | @rem End local scope for the variables with windows NT shell 79 | if "%ERRORLEVEL%"=="0" goto mainEnd 80 | 81 | :fail 82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 83 | rem the _cmd.exe /c_ return code! 84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 85 | exit /b 1 86 | 87 | :mainEnd 88 | if "%OS%"=="Windows_NT" endlocal 89 | 90 | :omega 91 | -------------------------------------------------------------------------------- /src/main/scala/scalago/ScalaGoKafka.scala: -------------------------------------------------------------------------------- 1 | package scalago 2 | 3 | import java.util.{Properties, UUID} 4 | import kafka.utils.VerifiableProperties 5 | import org.apache.kafka.clients.producer.{ProducerRecord, ProducerConfig, KafkaProducer} 6 | import org.apache.avro.Schema 7 | import org.apache.avro.generic.{GenericData, GenericRecord} 8 | import kafka.consumer.{ConsumerConfig, Consumer} 9 | import io.confluent.kafka.serializers.{KafkaAvroDecoder, KafkaAvroSerializer} 10 | 11 | object ScalaGoKafka { 12 | var readTopic: String = null 13 | var writeTopic: String = null 14 | val group = "ping-pong-scala-group" 15 | 16 | val broker = "localhost:9092" 17 | val zookeeper = "localhost:2181" 18 | val schemaRepo = "http://localhost:8081" 19 | 20 | val producerProps = new Properties() 21 | producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker) 22 | producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer]) 23 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer]) 24 | producerProps.put("schema.registry.url", schemaRepo) 25 | 26 | val consumerProps = new Properties() 27 | consumerProps.put("zookeeper.connect", zookeeper) 28 | consumerProps.put("group.id", group) 29 | consumerProps.put("auto.offset.reset", "smallest") 30 | consumerProps.put("schema.registry.url", schemaRepo) 31 | 32 | val consumerVerifiableProps = new VerifiableProperties(consumerProps) 33 | val keyDecoder = new KafkaAvroDecoder(consumerVerifiableProps) 34 | val valueDecoder = new KafkaAvroDecoder(consumerVerifiableProps) 35 | 36 | lazy val producer = new KafkaProducer[String, GenericRecord](producerProps) 37 | lazy val consumerIterator = Consumer.create(new ConsumerConfig(consumerProps)).createMessageStreams(Map(readTopic -> 1), keyDecoder, valueDecoder).get(readTopic).get(0).iterator() 38 | 39 | def main(args: Array[String]) { 40 | parseArgs(args) 41 | 42 | println("scala > Started!") 43 | 44 | val rec = new GenericData.Record(new Schema.Parser().parse(getClass.getResourceAsStream("/scalago.avsc"))) 45 | rec.put("counter", 1L) 46 | rec.put("name", s"ping-pong-${System.currentTimeMillis()}") 47 | rec.put("uuid", UUID.randomUUID().toString) 48 | 49 | val record = new ProducerRecord[String, GenericRecord](writeTopic, "0", rec) 50 | producer.send(record).get 51 | pingPongLoop() 52 | } 53 | 54 | def parseArgs(args: Array[String]) { 55 | if (args.length < 2) { 56 | println("USAGE: ScalaGoKafka $READ_TOPIC $WRITE_TOPIC") 57 | System.exit(1) 58 | } 59 | 60 | readTopic = args(0) 61 | writeTopic = args(1) 62 | } 63 | 64 | def pingPongLoop() { 65 | while (true) { 66 | val messageAndMetadata = consumerIterator.next() 67 | val record = messageAndMetadata.message().asInstanceOf[GenericRecord] 68 | Thread.sleep(2000) 69 | 70 | println("scala > received " + record) 71 | modify(record) 72 | producer.send(new ProducerRecord[String, GenericRecord](writeTopic, "0", record)).get 73 | } 74 | } 75 | 76 | def modify(rec: GenericRecord) { 77 | rec.put("counter", rec.get("counter").asInstanceOf[Long] + 1) 78 | rec.put("uuid", UUID.randomUUID().toString) 79 | } 80 | } -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | kafka.logs.dir=logs 17 | 18 | log4j.rootLogger=INFO, stdout 19 | 20 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 23 | 24 | log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender 25 | log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH 26 | log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log 27 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout 28 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 29 | 30 | log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender 31 | log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH 32 | log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log 33 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout 34 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 35 | 36 | log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender 37 | log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH 38 | log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log 39 | log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout 40 | log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 41 | 42 | log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender 43 | log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH 44 | log4j.appender.cleanerAppender.File=log-cleaner.log 45 | log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout 46 | log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 47 | 48 | log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender 49 | log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH 50 | log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log 51 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout 52 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 53 | 54 | log4j.appender.KAFKA=kafka.producer.KafkaLog4jAppender 55 | log4j.appender.KAFKA.Layout=org.apache.log4j.PatternLayout 56 | log4j.appender.KAFKA.Layout.ConversionPattern=[%d] %p %m (%c)%n 57 | log4j.appender.KAFKA.BrokerList=192.168.86.10:9092 58 | log4j.appender.KAFKA.Topic=logs 59 | 60 | # Turn on all our debugging info 61 | #log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, KAFKA 62 | #log4j.logger.kafka.client.ClientUtils=DEBUG, KAFKA 63 | #log4j.logger.kafka.perf=DEBUG, KAFKA 64 | #log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, KAFKA 65 | #log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG 66 | log4j.logger.kafka=INFO, KAFKA, kafkaAppender 67 | 68 | log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender 69 | log4j.additivity.kafka.network.RequestChannel$=false 70 | 71 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender 72 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender 73 | #log4j.additivity.kafka.server.KafkaApis=false 74 | log4j.logger.kafka.request.logger=WARN, requestAppender 75 | log4j.additivity.kafka.request.logger=false 76 | 77 | log4j.logger.kafka.controller=TRACE, controllerAppender 78 | log4j.additivity.kafka.controller=false 79 | 80 | log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender 81 | log4j.additivity.kafka.log.LogCleaner=false 82 | log4j.logger.kafka.log.Cleaner=INFO, cleanerAppender 83 | log4j.additivity.kafka.log.Cleaner=false 84 | 85 | log4j.logger.state.change.logger=TRACE, stateChangeAppender 86 | log4j.additivity.state.change.logger=false 87 | -------------------------------------------------------------------------------- /config/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # see kafka.server.KafkaConfig for additional details and defaults 16 | 17 | ############################# Server Basics ############################# 18 | 19 | # The id of the broker. This must be set to a unique integer for each broker. 20 | broker.id=0 21 | 22 | ############################# Socket Server Settings ############################# 23 | 24 | # The port the socket server listens on 25 | port=9092 26 | 27 | # Hostname the broker will bind to and advertise to producers and consumers. 28 | # If not set, the server will bind to all interfaces and advertise the value returned from 29 | # from java.net.InetAddress.getCanonicalHostName(). 30 | #host.name=localhost 31 | 32 | # The number of threads handling network requests 33 | num.network.threads=2 34 | 35 | # The number of threads doing disk I/O 36 | num.io.threads=2 37 | 38 | # The send buffer (SO_SNDBUF) used by the socket server 39 | socket.send.buffer.bytes=1048576 40 | 41 | # The receive buffer (SO_RCVBUF) used by the socket server 42 | socket.receive.buffer.bytes=1048576 43 | 44 | # The maximum size of a request that the socket server will accept (protection against OOM) 45 | socket.request.max.bytes=104857600 46 | 47 | 48 | ############################# Log Basics ############################# 49 | 50 | # A comma seperated list of directories under which to store log files 51 | log.dirs=/tmp/kafka-logs 52 | 53 | # The number of logical partitions per topic per server. More partitions allow greater parallelism 54 | # for consumption, but also mean more files. 55 | num.partitions=1 56 | 57 | ############################# Log Flush Policy ############################# 58 | 59 | # The following configurations control the flush of data to disk. This is among the most 60 | # important performance knob in kafka. 61 | # There are a few important trade-offs here: 62 | # 1. Durability: Unflushed data may be lost if you are not using replication. 63 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 64 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 65 | # The settings below allow one to configure the flush policy to flush data after a period of time or 66 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 67 | 68 | # The number of messages to accept before forcing a flush of data to disk 69 | log.flush.interval.messages=10000 70 | 71 | # The maximum amount of time a message can sit in a log before we force a flush 72 | log.flush.interval.ms=1000 73 | 74 | # Per-topic overrides for log.flush.interval.ms 75 | #log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 76 | 77 | ############################# Log Retention Policy ############################# 78 | 79 | # The following configurations control the disposal of log segments. The policy can 80 | # be set to delete segments after a period of time, or after a given size has accumulated. 81 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 82 | # from the end of the log. 83 | 84 | # The minimum age of a log file to be eligible for deletion 85 | log.retention.hours=168 86 | 87 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 88 | # segments don't drop below log.retention.bytes. 89 | #log.retention.bytes=1073741824 90 | 91 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 92 | log.segment.bytes=536870912 93 | 94 | # The interval at which log segments are checked to see if they can be deleted according 95 | # to the retention policies 96 | log.cleanup.interval.mins=1 97 | 98 | ############################# Zookeeper ############################# 99 | 100 | # Zookeeper connection string (see zookeeper docs for details). 101 | # This is a comma separated host:port pairs, each corresponding to a zk 102 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 103 | # You can also append an optional chroot string to the urls to specify the 104 | # root directory for all kafka znodes. 105 | zookeeper.connect=localhost:2181 106 | 107 | # Timeout in ms for connecting to zookeeper 108 | zookeeper.connection.timeout.ms=1000000 109 | 110 | 111 | -------------------------------------------------------------------------------- /src/main/scala/KafkaConsumer.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package kafka.consumer 19 | 20 | import kafka.message._ 21 | import kafka.serializer._ 22 | import kafka.utils._ 23 | import java.util.Properties 24 | import kafka.utils.Logging 25 | import scala.collection.JavaConversions._ 26 | 27 | class KafkaConsumer( 28 | topic: String, 29 | /** topic 30 | * The high-level API hides the details of brokers from the consumer and allows consuming off the cluster of machines 31 | * without concern for the underlying topology. It also maintains the state of what has been consumed. The high-level API 32 | * also provides the ability to subscribe to topics that match a filter expression (i.e., either a whitelist or a blacklist 33 | * regular expression). This topic is a whitelist only but can change with re-factoring below on the filterSpec 34 | */ 35 | groupId: String, 36 | /** groupId 37 | * A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same 38 | * group id multiple processes indicate that they are all part of the same consumer group. 39 | */ 40 | zookeeperConnect: String, 41 | /** 42 | * Specifies the zookeeper connection string in the form hostname:port where host and port are the host and port of 43 | * a zookeeper server. To allow connecting through other zookeeper nodes when that zookeeper machine is down you can also 44 | * specify multiple hosts in the form hostname1:port1,hostname2:port2,hostname3:port3. The server may also have a zookeeper 45 | * chroot path as part of it's zookeeper connection string which puts its data under some path in the global zookeeper namespace. 46 | * If so the consumer should use the same chroot path in its connection string. For example to give a chroot path of /chroot/path 47 | * you would give the connection string as hostname1:port1,hostname2:port2,hostname3:port3/chroot/path. 48 | */ 49 | readFromStartOfStream: Boolean = true 50 | /** 51 | * What to do when there is no initial offset in Zookeeper or if an offset is out of range: 52 | * 1) smallest : automatically reset the offset to the smallest offset 53 | * 2) largest : automatically reset the offset to the largest offset 54 | * 3) anything else: throw exception to the consumer. If this is set to largest, the consumer may lose some 55 | messages when the number of partitions, for the topics it subscribes to, changes on the broker. 56 | 57 | **************************************************************************************** 58 | To prevent data loss during partition addition, set auto.offset.reset to smallest 59 | 60 | This make sense to change to true if you know you are listening for new data only as of 61 | after you connect to the stream new things are coming out. you can audit/reconcile in 62 | another consumer which this flag allows you to toggle if it is catch-up and new stuff or 63 | just new stuff coming out of the stream. This will also block waiting for new stuff so 64 | it makes a good listener. 65 | 66 | //readFromStartOfStream: Boolean = true 67 | readFromStartOfStream: Boolean = false 68 | **************************************************************************************** 69 | 70 | */ 71 | ) extends Logging { 72 | 73 | val props = new Properties() 74 | props.put("group.id", groupId) 75 | props.put("zookeeper.connect", zookeeperConnect) 76 | props.put("auto.offset.reset", if(readFromStartOfStream) "smallest" else "largest") 77 | 78 | val config = new ConsumerConfig(props) 79 | val connector = Consumer.create(config) 80 | 81 | val filterSpec = new Whitelist(topic) 82 | 83 | info("setup:start topic=%s for zk=%s and groupId=%s".format(topic,zookeeperConnect,groupId)) 84 | val stream = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), new DefaultDecoder()).get(0) 85 | info("setup:complete topic=%s for zk=%s and groupId=%s".format(topic,zookeeperConnect,groupId)) 86 | 87 | def read(write: (Array[Byte])=>Unit) = { 88 | info("reading on stream now") 89 | for(messageAndTopic <- stream) { 90 | try { 91 | info("writing from stream") 92 | write(messageAndTopic.message) 93 | info("written to stream") 94 | } catch { 95 | case e: Throwable => 96 | if (true) { //this is objective even how to conditionalize on it 97 | error("Error processing message, skipping this message: ", e) 98 | } else { 99 | throw e 100 | } 101 | } 102 | } 103 | } 104 | 105 | def close() { 106 | connector.shutdown() 107 | } 108 | } -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 10 | DEFAULT_JVM_OPTS="" 11 | 12 | APP_NAME="Gradle" 13 | APP_BASE_NAME=`basename "$0"` 14 | 15 | # Use the maximum available, or set MAX_FD != -1 to use that value. 16 | MAX_FD="maximum" 17 | 18 | warn ( ) { 19 | echo "$*" 20 | } 21 | 22 | die ( ) { 23 | echo 24 | echo "$*" 25 | echo 26 | exit 1 27 | } 28 | 29 | # OS specific support (must be 'true' or 'false'). 30 | cygwin=false 31 | msys=false 32 | darwin=false 33 | case "`uname`" in 34 | CYGWIN* ) 35 | cygwin=true 36 | ;; 37 | Darwin* ) 38 | darwin=true 39 | ;; 40 | MINGW* ) 41 | msys=true 42 | ;; 43 | esac 44 | 45 | # For Cygwin, ensure paths are in UNIX format before anything is touched. 46 | if $cygwin ; then 47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 48 | fi 49 | 50 | # Attempt to set APP_HOME 51 | # Resolve links: $0 may be a link 52 | PRG="$0" 53 | # Need this for relative symlinks. 54 | while [ -h "$PRG" ] ; do 55 | ls=`ls -ld "$PRG"` 56 | link=`expr "$ls" : '.*-> \(.*\)$'` 57 | if expr "$link" : '/.*' > /dev/null; then 58 | PRG="$link" 59 | else 60 | PRG=`dirname "$PRG"`"/$link" 61 | fi 62 | done 63 | SAVED="`pwd`" 64 | cd "`dirname \"$PRG\"`/" >&- 65 | APP_HOME="`pwd -P`" 66 | cd "$SAVED" >&- 67 | 68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 69 | 70 | # Determine the Java command to use to start the JVM. 71 | if [ -n "$JAVA_HOME" ] ; then 72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 73 | # IBM's JDK on AIX uses strange locations for the executables 74 | JAVACMD="$JAVA_HOME/jre/sh/java" 75 | else 76 | JAVACMD="$JAVA_HOME/bin/java" 77 | fi 78 | if [ ! -x "$JAVACMD" ] ; then 79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 80 | 81 | Please set the JAVA_HOME variable in your environment to match the 82 | location of your Java installation." 83 | fi 84 | else 85 | JAVACMD="java" 86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 87 | 88 | Please set the JAVA_HOME variable in your environment to match the 89 | location of your Java installation." 90 | fi 91 | 92 | # Increase the maximum file descriptors if we can. 93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then 94 | MAX_FD_LIMIT=`ulimit -H -n` 95 | if [ $? -eq 0 ] ; then 96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 97 | MAX_FD="$MAX_FD_LIMIT" 98 | fi 99 | ulimit -n $MAX_FD 100 | if [ $? -ne 0 ] ; then 101 | warn "Could not set maximum file descriptor limit: $MAX_FD" 102 | fi 103 | else 104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 105 | fi 106 | fi 107 | 108 | # For Darwin, add options to specify how the application appears in the dock 109 | if $darwin; then 110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 111 | fi 112 | 113 | # For Cygwin, switch paths to Windows format before running java 114 | if $cygwin ; then 115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules 158 | function splitJvmOpts() { 159 | JVM_OPTS=("$@") 160 | } 161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS 162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" 163 | 164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" 165 | -------------------------------------------------------------------------------- /src/main/scala/KafkaProducer.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package kafka.producer 19 | 20 | import scala.collection.JavaConversions._ 21 | import joptsimple._ 22 | import java.util.{Properties, UUID} 23 | import java.io._ 24 | import kafka.common._ 25 | import kafka.message._ 26 | import kafka.serializer._ 27 | import java.util.Properties 28 | 29 | case class KafkaProducer( 30 | topic: String, 31 | brokerList: String, 32 | /** brokerList 33 | * This is for bootstrapping and the producer will only use it for getting metadata (topics, partitions and replicas). 34 | * The socket connections for sending the actual data will be established based on the broker information returned in 35 | * the metadata. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a 36 | * subset of brokers. 37 | */ 38 | clientId: String = UUID.randomUUID().toString, 39 | /** clientId 40 | * The client id is a user-specified string sent in each request to help trace calls. It should logically identify 41 | * the application making the request. 42 | */ 43 | synchronously: Boolean = true, 44 | /** synchronously 45 | * This parameter specifies whether the messages are sent asynchronously in a background thread. 46 | * Valid values are false for asynchronous send and true for synchronous send. By setting the producer 47 | * to async we allow batching together of requests (which is great for throughput) but open the possibility 48 | * of a failure of the client machine dropping unsent data. 49 | */ 50 | compress: Boolean = true, 51 | /** compress 52 | * This parameter allows you to specify the compression codec for all data generated by this producer. 53 | * When set to true gzip is used. To override and use snappy you need to implement that as the default 54 | * codec for compression using SnappyCompressionCodec.codec instead of DefaultCompressionCodec.codec below. 55 | */ 56 | 57 | batchSize: Integer = 200, 58 | /** batchSize 59 | * The number of messages to send in one batch when using async mode. 60 | * The producer will wait until either this number of messages are ready 61 | * to send or queue.buffer.max.ms is reached. 62 | */ 63 | messageSendMaxRetries: Integer = 3, 64 | /** messageSendMaxRetries 65 | * This property will cause the producer to automatically retry a failed send request. 66 | * This property specifies the number of retries when such failures occur. Note that 67 | * setting a non-zero value here can lead to duplicates in the case of network errors 68 | * that cause a message to be sent but the acknowledgement to be lost. 69 | */ 70 | requestRequiredAcks: Integer = -1 71 | /** requestRequiredAcks 72 | * 0) which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). 73 | * This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). 74 | * 1) which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides 75 | * better durability as the client waits until the server acknowledges the request as successful (only messages that were 76 | * written to the now-dead leader but not yet replicated will be lost). 77 | * -1) which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option 78 | * provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. 79 | */ 80 | ) { 81 | 82 | val props = new Properties() 83 | 84 | val codec = if(compress) DefaultCompressionCodec.codec else NoCompressionCodec.codec 85 | 86 | props.put("compression.codec", codec.toString) 87 | props.put("producer.type", if(synchronously) "sync" else "async") 88 | props.put("metadata.broker.list", brokerList) 89 | props.put("batch.num.messages", batchSize.toString) 90 | props.put("message.send.max.retries", messageSendMaxRetries.toString) 91 | props.put("request.required.acks",requestRequiredAcks.toString) 92 | props.put("client.id",clientId.toString) 93 | 94 | val producer = new Producer[AnyRef, AnyRef](new ProducerConfig(props)) 95 | 96 | def kafkaMesssage(message: Array[Byte], partition: Array[Byte]): KeyedMessage[AnyRef, AnyRef] = { 97 | if (partition == null) { 98 | new KeyedMessage(topic,message) 99 | } else { 100 | new KeyedMessage(topic,partition,message) 101 | } 102 | } 103 | 104 | def send(message: String, partition: String = null): Unit = send(message.getBytes("UTF8"), if (partition == null) null else partition.getBytes("UTF8")) 105 | 106 | def send(message: Array[Byte], partition: Array[Byte]): Unit = { 107 | try { 108 | producer.send(kafkaMesssage(message, partition)) 109 | } catch { 110 | case e: Exception => 111 | e.printStackTrace 112 | System.exit(1) 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/test/scala/AkkaKafkaSpec.scala: -------------------------------------------------------------------------------- 1 | package net.elodina.testing 2 | 3 | import akka.actor._ 4 | import akka.util.Timeout 5 | import akka.testkit.{ImplicitSender, TestKit} 6 | import com.sclasen.akka.kafka._ 7 | import kafka.akka.KafkaAkkaProducer 8 | import kafka.consumer.Whitelist 9 | import kafka.producer.KafkaProducer 10 | import kafka.serializer.StringDecoder 11 | import kafka.utils.Logging 12 | import org.specs2.mutable.{After, Specification} 13 | import java.util.UUID 14 | import concurrent.duration._ 15 | 16 | import org.specs2.time.NoTimeConversions 17 | 18 | abstract class AkkaTestkit extends TestKit(ActorSystem()) with After with ImplicitSender { 19 | // make sure we shut down the actor system after all tests have run 20 | def after = system.shutdown() 21 | } 22 | 23 | class ConsumerActor(testActor: ActorRef, onReceive: (String => _) = (message: String) => ()) extends Actor { 24 | def receive = { 25 | case message: String => 26 | onReceive(message) 27 | sender ! StreamFSM.Processed 28 | testActor ! message 29 | } 30 | } 31 | 32 | class BatchActor[T](testActor: ActorRef, onReceive: (IndexedSeq[_] => T)) extends Actor { 33 | def receive = { 34 | case xs: IndexedSeq[_] => 35 | onReceive(xs) 36 | sender ! BatchConnectorFSM.BatchProcessed 37 | testActor ! xs 38 | } 39 | } 40 | 41 | class AkkaKafkaSpec extends Specification with NoTimeConversions with Logging { 42 | sequential 43 | 44 | val BROKERS = "192.168.86.10:9092" 45 | val ZOOKEEPER_CONNECT = "192.168.86.5:2181" 46 | 47 | val messages = 100 48 | val timeout = 10.seconds 49 | val batchTimeout = 5.seconds 50 | 51 | "Simple Producer and AkkaConsumer" should { 52 | "send string to broker and consume that string back" in new AkkaTestkit { 53 | val testMessage = UUID.randomUUID().toString 54 | val testTopic = UUID.randomUUID().toString 55 | val testGroupId = UUID.randomUUID().toString 56 | 57 | info(">>> starting sample broker testing") 58 | val producer = new KafkaProducer(testTopic, BROKERS) 59 | producer.send(testMessage) 60 | info(">>> message sent") 61 | 62 | var testStatus = false 63 | 64 | val actor = system.actorOf(Props(new ConsumerActor(testActor, (message: String) => { 65 | if (message == testMessage) testStatus = true 66 | }))) 67 | val consumerProps = AkkaConsumerProps.forSystem(system, ZOOKEEPER_CONNECT, testTopic, testGroupId, 1, new StringDecoder(), new StringDecoder(), actor) 68 | val consumer = new AkkaConsumer(consumerProps) 69 | info(">>> starting consumer") 70 | consumer.start() 71 | expectMsg(testMessage) 72 | testStatus must beTrue 73 | consumer.stop() 74 | } 75 | 76 | s"send $messages messages to broker and consume them back in different consumer groups" in new AkkaTestkit { 77 | val testMessage = UUID.randomUUID().toString 78 | val testTopic = UUID.randomUUID().toString 79 | val testGroupId_1 = UUID.randomUUID().toString 80 | val testGroupId_2 = UUID.randomUUID().toString 81 | 82 | info(">>> starting sample broker testing") 83 | val producer = new KafkaProducer(testTopic, BROKERS) 84 | (1 to messages).foreach(i => producer.send(UUID.randomUUID().toString)) 85 | info(">>> messages sent") 86 | 87 | val actor1 = system.actorOf(Props(new ConsumerActor(testActor))) 88 | val consumerProps1 = AkkaConsumerProps.forSystem(system, ZOOKEEPER_CONNECT, testTopic, testGroupId_1, 2, new StringDecoder(), new StringDecoder(), actor1) 89 | 90 | val actor2 = system.actorOf(Props(new ConsumerActor(testActor))) 91 | val consumerProps2 = AkkaConsumerProps.forSystem(system, ZOOKEEPER_CONNECT, testTopic, testGroupId_2, 2, new StringDecoder(), new StringDecoder(), actor2) 92 | 93 | val consumer1 = new AkkaConsumer(consumerProps1) 94 | val consumer2 = new AkkaConsumer(consumerProps2) 95 | consumer1.start() 96 | consumer2.start() 97 | 98 | receiveN(messages * 2, timeout) 99 | consumer1.stop() 100 | consumer2.stop() 101 | } 102 | 103 | "work fine with topic filters" in new AkkaTestkit { 104 | val white = "white" 105 | val black = "black" 106 | val whitelistedTopic = white + System.currentTimeMillis() 107 | val blacklistedTopic = black + System.currentTimeMillis() 108 | val whiteProducer = new KafkaProducer(whitelistedTopic, BROKERS) 109 | whiteProducer.send(white) 110 | val blackProducer = new KafkaProducer(blacklistedTopic, BROKERS) 111 | blackProducer.send(black) 112 | 113 | val actor = system.actorOf(Props(new ConsumerActor(testActor, (message: String) => { 114 | message must_== white 115 | }))) 116 | val consumerProps = AkkaConsumerProps.forSystemWithFilter(system, ZOOKEEPER_CONNECT, new Whitelist(s"$white.*"), UUID.randomUUID().toString, 2, new StringDecoder(), new StringDecoder(), actor) 117 | val consumer = new AkkaConsumer(consumerProps) 118 | consumer.start() 119 | receiveWhile(max = timeout/2) { 120 | case msg: String => msg must_== white 121 | } 122 | consumer.stop() 123 | } 124 | } 125 | 126 | "AkkaProducer and AkkaConsumer" should { 127 | s"send $messages messages with Akka producer and consume them back" in new AkkaTestkit { 128 | val testTopic = UUID.randomUUID().toString 129 | val testGroupId = UUID.randomUUID().toString 130 | val producer = system.actorOf(Props[KafkaAkkaProducer]) 131 | 132 | producer !(testTopic, BROKERS) 133 | (1 to messages).foreach(i => producer ! UUID.randomUUID().toString) 134 | 135 | val actor = system.actorOf(Props(new ConsumerActor(testActor))) 136 | val consumerProps = AkkaConsumerProps.forSystem(system, ZOOKEEPER_CONNECT, testTopic, testGroupId, 1, new StringDecoder(), new StringDecoder(), actor) 137 | val consumer = new AkkaConsumer(consumerProps) 138 | consumer.start() 139 | receiveN(messages, timeout) 140 | 141 | //produce 50 more messages 142 | (1 to messages / 2).foreach(i => producer ! UUID.randomUUID().toString) 143 | receiveN(messages / 2, timeout) 144 | consumer.stop() 145 | } 146 | } 147 | 148 | "AkkaProducer and AkkaBatchConsumer" should { 149 | "send and receive 100 messages in one batch" in new AkkaTestkit { 150 | val testTopic = UUID.randomUUID().toString 151 | val testGroupId = UUID.randomUUID().toString 152 | val producer = system.actorOf(Props[KafkaAkkaProducer]) 153 | 154 | producer ! testTopic -> BROKERS 155 | (1 to messages).foreach(i => producer ! UUID.randomUUID().toString) 156 | 157 | val actor = system.actorOf(Props(new BatchActor(testActor, (xs: IndexedSeq[_]) => { 158 | xs.length must_== messages 159 | }))) 160 | val consumerProps = batchConsumerProps(system, testTopic, testGroupId, actor, batchTimeout) 161 | val consumer = new AkkaBatchConsumer(consumerProps) 162 | consumer.start() 163 | //we expect EXACTLY 1 message within the given time 164 | within(timeout) { 165 | receiveN(1) 166 | expectNoMsg() 167 | } 168 | consumer.stop() 169 | } 170 | 171 | "ensure that non-full batches won't be received until batch timeout comes" in new AkkaTestkit { 172 | val testTopic = UUID.randomUUID().toString 173 | val testGroupId = UUID.randomUUID().toString 174 | val producer = system.actorOf(Props[KafkaAkkaProducer]) 175 | 176 | producer ! testTopic -> BROKERS 177 | 178 | val halfMessages = messages / 2 179 | val actor = system.actorOf(Props(new BatchActor(testActor, (xs: IndexedSeq[_]) => { 180 | xs.length must_== halfMessages 181 | }))) 182 | val consumerProps = batchConsumerProps(system, testTopic, testGroupId, actor, batchTimeout) 183 | val consumer = new AkkaBatchConsumer(consumerProps) 184 | consumer.start() 185 | //produce halfMessages new messages and make sure they are not consumed until the batchTimeout comes 186 | within(batchTimeout) { 187 | (1 to halfMessages).foreach(i => producer ! UUID.randomUUID().toString) 188 | expectNoMsg() 189 | } 190 | //now we should receive exactly one batch of size halfMessages 191 | receiveN(1) 192 | expectNoMsg() 193 | consumer.stop() 194 | } 195 | } 196 | 197 | private def batchConsumerProps(system: ActorSystem, topic: String, group: String, actor: ActorRef, batchTimeout: Timeout): AkkaBatchConsumerProps[String, String, String, IndexedSeq[String]] = { 198 | AkkaBatchConsumerProps.forSystem[String, String, String, IndexedSeq[String]]( 199 | system = system, 200 | zkConnect = ZOOKEEPER_CONNECT, 201 | topic = topic, 202 | group = group, 203 | streams = 1, 204 | keyDecoder = new StringDecoder(), 205 | msgDecoder = new StringDecoder(), 206 | receiver = actor, 207 | batchHandler = (batch: IndexedSeq[String]) => batch, 208 | batchSize = messages, 209 | batchTimeout = batchTimeout) 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/test/scala/KafkaSpec.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package net.elodina.testing 19 | 20 | import java.util.concurrent.{ExecutionException, TimeUnit} 21 | 22 | import org.apache.kafka.clients.producer.{Callback, RecordMetadata, ProducerRecord} 23 | import org.apache.kafka.common.errors.RecordTooLargeException 24 | import org.specs2.mutable._ 25 | import java.util.UUID 26 | import kafka.consumer._ 27 | import kafka.producer._ 28 | import kafka.utils._ 29 | import kafka.akka._ 30 | import akka.actor.{Props, ActorSystem} 31 | import akka.routing.RoundRobinRouter 32 | 33 | import scala.concurrent.duration.FiniteDuration 34 | import scala.concurrent.{Await, Promise} 35 | 36 | class KafkaSpec extends Specification with Logging with BaseSpec { 37 | 38 | 39 | "Simple Producer and Consumer" should { 40 | "send string to broker and consume that string back" in { 41 | val testMessage = UUID.randomUUID().toString 42 | val testTopic = UUID.randomUUID().toString 43 | val groupId_1 = UUID.randomUUID().toString 44 | 45 | var testStatus = false 46 | 47 | info("starting sample broker testing") 48 | val producer = new KafkaProducer(testTopic,"192.168.86.10:9092") 49 | producer.send(testMessage) 50 | 51 | val consumer = new KafkaConsumer(testTopic,groupId_1,"192.168.86.5:2181") 52 | 53 | def exec(binaryObject: Array[Byte]) = { 54 | val message = new String(binaryObject) 55 | info("testMessage = " + testMessage + " and consumed message = " + message) 56 | testMessage must_== message 57 | consumer.close() 58 | testStatus = true 59 | } 60 | 61 | info("KafkaSpec is waiting some seconds") 62 | consumer.read(exec) 63 | info("KafkaSpec consumed") 64 | 65 | testStatus must beTrue // we need to get to this point but a failure in exec will fail the test 66 | } 67 | 68 | "send string to broker and consume that string back in different consumer groups" in { 69 | val testMessage = UUID.randomUUID().toString 70 | val testTopic = UUID.randomUUID().toString 71 | val groupId_1 = UUID.randomUUID().toString 72 | val groupId_2 = UUID.randomUUID().toString 73 | 74 | var testStatus1 = false 75 | var testStatus2 = false 76 | 77 | info("starting sample broker testing") 78 | val producer = new KafkaProducer(testTopic,"192.168.86.10:9092") 79 | producer.send(testMessage) 80 | 81 | val consumer1 = new KafkaConsumer(testTopic,groupId_1,"192.168.86.5:2181") 82 | 83 | def exec1(binaryObject: Array[Byte]) = { 84 | val message1 = new String(binaryObject) 85 | info("testMessage 1 = " + testMessage + " and consumed message 1 = " + message1) 86 | testMessage must_== message1 87 | consumer1.close() 88 | testStatus1 = true 89 | } 90 | 91 | info("KafkaSpec : consumer 1 - is waiting some seconds") 92 | consumer1.read(exec1) 93 | info("KafkaSpec : consumer 1 - consumed") 94 | 95 | val consumer2 = new KafkaConsumer(testTopic,groupId_2,"192.168.86.5:2181") 96 | 97 | def exec2(binaryObject: Array[Byte]) = { 98 | val message2 = new String(binaryObject) 99 | info("testMessage 2 = " + testMessage + " and consumed message 2 = " + message2) 100 | testMessage must_== message2 101 | consumer2.close() 102 | testStatus2 = true 103 | } 104 | 105 | info("KafkaSpec : consumer 2 - is waiting some seconds") 106 | consumer2.read(exec2) 107 | info("KafkaSpec : consumer 2 - consumed") 108 | 109 | testStatus2 must beTrue // we need to get to this point but a failure in exec will fail the test 110 | } 111 | } 112 | 113 | "New Shiny Producer and Simple Consumer" should { 114 | "send string to broker and consume that string back" in { 115 | 116 | val testMessage = UUID.randomUUID().toString 117 | val testTopic = UUID.randomUUID().toString 118 | val groupId_1 = UUID.randomUUID().toString 119 | 120 | var testStatus = false 121 | 122 | info("starting sample broker testing") 123 | val newProducer = createNewKafkaProducer("192.168.86.10:9092") 124 | val record = new ProducerRecord(testTopic, 0, "key".getBytes, testMessage.getBytes) 125 | newProducer.send(record) 126 | 127 | val consumer = new KafkaConsumer(testTopic, groupId_1, "192.168.86.5:2181") 128 | 129 | def exec(binaryObject: Array[Byte]) = { 130 | val message = new String(binaryObject) 131 | info("testMessage = " + testMessage + " and consumed message = " + message) 132 | testMessage must_== message 133 | consumer.close() 134 | testStatus = true 135 | } 136 | 137 | info("KafkaSpec is waiting some seconds") 138 | consumer.read(exec) 139 | info("KafkaSpec consumed") 140 | 141 | testStatus must beTrue // we need to get to this point but a failure in exec will fail the test 142 | } 143 | 144 | } 145 | 146 | "New Shiny Producer" should { 147 | "get the same the data in callback and send request result" in { 148 | val testMessage = UUID.randomUUID().toString 149 | val testTopic = UUID.randomUUID().toString 150 | 151 | info("starting sample broker testing") 152 | val newProducer = createNewKafkaProducer("192.168.86.10:9092", acks = 0) 153 | val record = new ProducerRecord(testTopic, 0, "key".getBytes, testMessage.getBytes) 154 | 155 | val p = Promise[(RecordMetadata, Exception)]() 156 | val response = newProducer.send(record, new Callback { 157 | override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { 158 | p.success((metadata, exception)) 159 | } 160 | }) 161 | 162 | val reqMetadata = response.get(2, TimeUnit.SECONDS) 163 | val (callbackMeta, callbackEx) = Await.result(p.future, new FiniteDuration(3, TimeUnit.SECONDS)) 164 | reqMetadata.offset() must_== callbackMeta.offset() 165 | reqMetadata.topic() must_== callbackMeta.topic() 166 | reqMetadata.partition() must_== callbackMeta.partition() 167 | callbackEx must_== null 168 | } 169 | 170 | "return exception in callback when buffer cannot accept message" in { 171 | val testMessage = UUID.randomUUID().toString 172 | val testTopic = UUID.randomUUID().toString 173 | 174 | info("starting sample broker testing") 175 | val newProducer = createNewKafkaProducer("192.168.86.10:9092", acks = 1, bufferSize = 1L) 176 | val record = new ProducerRecord(testTopic, 0, "key".getBytes, testMessage.getBytes) 177 | 178 | val p = Promise[(RecordMetadata, Exception)]() 179 | val response = newProducer.send(record, new Callback { 180 | override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { 181 | p.success((metadata, exception)) 182 | } 183 | }) 184 | 185 | response.get(2, TimeUnit.SECONDS) must throwA[ExecutionException] 186 | val (callbackMeta, callbackEx) = Await.result(p.future, new FiniteDuration(1, TimeUnit.SECONDS)) 187 | callbackMeta must_== null 188 | callbackEx must_!= null 189 | callbackEx.getClass must_== classOf[RecordTooLargeException] 190 | } 191 | } 192 | 193 | "Akka Producer and Consumer" should { 194 | "send string to broker and consume that string back in different consumer groups" in { 195 | val testMessage = UUID.randomUUID().toString 196 | val testTopic = UUID.randomUUID().toString 197 | val groupId_1 = UUID.randomUUID().toString 198 | val groupId_2 = UUID.randomUUID().toString 199 | 200 | var testStatus1 = false 201 | var testStatus2 = false 202 | 203 | info("starting akka producertesting") 204 | val system = ActorSystem("testing") 205 | 206 | val actorCount = 1 207 | 208 | val producer = system.actorOf(Props[KafkaAkkaProducer].withRouter(RoundRobinRouter(actorCount)), "router") 209 | 210 | 1 to actorCount foreach { i =>( 211 | producer ! (testTopic,"192.168.86.10:9092")) 212 | } 213 | 214 | producer ! testMessage 215 | 216 | val consumer1 = new KafkaConsumer(testTopic,groupId_1,"192.168.86.5:2181") 217 | 218 | def exec1(binaryObject: Array[Byte]) = { 219 | val message1 = new String(binaryObject) 220 | info("testMessage 1 = " + testMessage + " and consumed message 1 = " + message1) 221 | testMessage must_== message1 222 | consumer1.close() 223 | testStatus1 = true 224 | } 225 | 226 | info("KafkaSpec : consumer 1 - is waiting some seconds") 227 | consumer1.read(exec1) 228 | info("KafkaSpec : consumer 1 - consumed") 229 | 230 | val consumer2 = new KafkaConsumer(testTopic,groupId_2,"192.168.86.5:2181") 231 | 232 | def exec2(binaryObject: Array[Byte]) = { 233 | val message2 = new String(binaryObject) 234 | info("testMessage 2 = " + testMessage + " and consumed message 2 = " + message2) 235 | testMessage must_== message2 236 | consumer2.close() 237 | testStatus2 = true 238 | } 239 | 240 | info("KafkaSpec : consumer 2 - is waiting some seconds") 241 | consumer2.read(exec2) 242 | info("KafkaSpec : consumer 2 - consumed") 243 | 244 | testStatus2 must beTrue // we need to get to this point but a failure in exec will fail the test 245 | } 246 | } 247 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /KEYS: -------------------------------------------------------------------------------- 1 | This file contains the PGP keys of various developers. 2 | 3 | Users: pgp < KEYS 4 | gpg --import KEYS 5 | Developers: 6 | pgp -kxa and append it to this file. 7 | (pgpk -ll && pgpk -xa ) >> this file. 8 | (gpg --list-sigs 9 | && gpg --armor --export ) >> this file. 10 | 11 | pub 4096R/99369B56 2011-10-06 12 | uid Neha Narkhede (Key for signing code and releases) 13 | sig 3 99369B56 2011-10-06 Neha Narkhede (Key for signing code and releases) 14 | sub 4096R/A71D126A 2011-10-06 15 | sig 99369B56 2011-10-06 Neha Narkhede (Key for signing code and releases) 16 | 17 | -----BEGIN PGP PUBLIC KEY BLOCK----- 18 | Version: GnuPG/MacGPG2 v2.0.17 (Darwin) 19 | Comment: GPGTools - http://gpgtools.org 20 | 21 | mQENBEt9wioBCADh0bdDopK7wdLLt6YIEA3KWdXmRhhmY2PDikKZq5EQlwkAmdZF 22 | /CTcheArdAXXuxfPN8kZp4MJE01mrgyZA9S8tsYG1GarPTpYUDXxZJJgswSKNAbU 23 | j4sL1sYm89pzsm57Mjt+4ek9F+GJeEBOiog3/4oaaVeuzFT2LhLbD2PY5CS/5MdZ 24 | t4KaqPAHoAQQGwzGSzaXxXbGKlQUDm0W33jjyWyON/eHHLUHbFwlb9f+du8DJLQ6 25 | WQjFR/xSzSxztJFUF1JUBm6l9ZMMMeAojTEBKXeh9Bo7iWcVU/nJ+VebAE8IpgU4 26 | 9TnNf/o2iJnKKTyKLW5QDR83gIFsml/6sja3ABEBAAGJAR8EIAEKAAkFAkyaTuQC 27 | HQEACgkQaCVaMFMDgyhOlAgAixphPIQp1z/5fuuiXLBTPzLLRsR3UQ9jeAowUdrA 28 | utnn7uVHvAqcUy/WLGJyRMVQl3y8oNAQXmNSfeLzp2Bs/uRuOf4B2b2XBX3F6AqX 29 | 1UI7ASzyG1cowHNZ+Oq3Edg6YwQCt+5QwrlLCcp4eo3J0/NwxrRqYM5TdFVnvN3L 30 | PmHMPlOhLfZJRf2/g9dcWpIcLEYaGgJMD4uUogaN2CT2GXjntFsdgRG7jYasTK6+ 31 | TF3ML/rB/tbNcMo23IiQ8GKaGO04uQUlyo2b3ix4uUZGtIIIFtZMOcgSLTKSavn/ 32 | hPMe6wPOkB/Onno0zDUBCrpACxjcW9fyTALBlrJTCrYpz7QjQ2hyaXMgRG91Z2xh 33 | cyA8Y2RvdWdsYXNAYXBhY2hlLm9yZz6JATgEEwECACIFAkt9wioCGwMGCwkIBwMC 34 | BhUIAgkKCwQWAgMBAh4BAheAAAoJEGglWjBTA4MoN1QH+gJLOaIUhIqECvwz77js 35 | 9HfadKmKqwc0SGDpoZm2tMeUsp8+26fhqEdprNcfKV530+M4e4l/ka4Y96p7C+EW 36 | kAuMQ41F9YnvxX9dATc+RgLueVkaUxMjNLPAIwQkUyQoeR/tLcMpNI/lbWdwEJRK 37 | unufbxDGIXVjnN8y9dkmlZ5NGsAWa2ZoYDrpKHFwTKvNv+J24rYuFbuBktlKk1L7 38 | f0N/4dCeFlHjoaVykS0BuQrEA8/ZPzE4qPw7FFWUb3M+gZH7xulSQnc+Q4oCVdYw 39 | FKsoB5iz924R4g09Yk9l8wXIkKvhCFwkioapZLqFQyse1hcsEUmBJzLBjjXWkw9+ 40 | pXe5AQ0ES33CKgEIALqimXtgSq6zB21b2z4mVIAgSHFRQOa3q5jhWYvHALnwSIIc 41 | AvPHnPfWUuRyaGjJZbVgLdB0o+ooxYT8SdZtPVFMpVvoAaLH/4t1DJvQTXYUW+vE 42 | Z6cWeC5rr+JXcwLa8xhSYYhv3y7Lf3AwdKq6bzF5+7/rwDu7K5LhzS+hVrhln4Uq 43 | yFNQdnWoXIsBIt0wxut3KuRyNICjYs6Jr0zkFR7azCJCDml6+NnGX/jCtK5HE0AZ 44 | f11VOgds1kH1bohI0FkzRXk5PbMgvnxRZNlSGxzTUceYystJq/iIuzvSH3ixMKqL 45 | Se0KAOj1FrcpGsP+RF17sooIMjntg2MZKr7STTsAEQEAAYkBHwQYAQIACQUCS33C 46 | KgIbDAAKCRBoJVowUwODKP9GB/9TsH6lBFz/ueae0AD6gwGGRySkc+zilFYYBAbR 47 | jEpPpoyj5KCVYcQ1Q5SPeLzy7/4hQJlf0F0gg8w3G4Axjnaej4oDuNIGBKGKTLsl 48 | SaBI2P44wlVdDOaUhgAVB/zzCqUidLYVwoOmT3OLz01lZgPXKihw+I5JtNvdpOa+ 49 | xXHQ7dwezmUMChHZADF198/2pRIamqwMD0MyApaMFeb5posM0KYHj+k6x8uMQB8m 50 | V6IdsgqZpglXO7gfuvz3mQqye50e0xIhCLdX6JwIOnLGzUPJW9ds33rWkzk9MW+h 51 | JnjuiZeYbtlLzCzuvRVjnSblrUUCaOm1RFVWrw6yVVTGHErmmQINBE6OPIgBEAC5 52 | XC3CGy1peReJPUk2J5iCvmdSIOEaiOfULaz+1WiKgBMovUAKCbIuCB9ZMjfT0gce 53 | Agqj4UeCeRhPi2dVxVl/r1LCyGMJ1hZyEGDk7No1QkCemnCD4yiBD+BX+9V1Zuqa 54 | r1goV5tMXoE0kq0cyoih/c6t8qrP5Wf/BTS118TBFAHRql/yYle+g3YEk0uZ4yue 55 | dCVQIrjTWe46fSAacF8eGluGQVTbNj2aRacfu+UYPj8G241F8gTOTjZ3fJ80KTOI 56 | tjiV8Gy+I6HDIWd7XMwNCO8z2G2lto0CBoDuaUukXgB5/CNFQL1S8Zxl2idxEQSt 57 | ZzKTKmZZcom+GNMvL8xCE8sMgMSiZ8Q54fRevTQrDLPpIhl0g6V1v7vOfokWxf7K 58 | 6t+KZXB7oZbc1YrW/kcjDUAMlS4gAQ4K7ngEK3qIEVquvXx4nLVM++zn0/0PVzES 59 | 2gMnacP9mdsVsPbJx+Jwk6hZi1mYBS7DyZkoVJYgXqLdCtWebH+VqdHiEHBrWYxH 60 | eoyHWjViJKZYh3ojcKuZMicptEzA25hz5HaKEkKv1PEYeKEDG8tLVf09dU+HHdK7 61 | Sbca08hXFhDIiDlksFa67Zy7MriJRTun8rTCD1DFH3wyuubRu8bDo7UobBgSK3td 62 | cBRKM8i3e3JQZejhKlROhJ56xIT3ivO3Coe0hTUDXQARAQABtEtOZWhhIE5hcmto 63 | ZWRlIChLZXkgZm9yIHNpZ25pbmcgY29kZSBhbmQgcmVsZWFzZXMpIDxuZWhhbmFy 64 | a2hlZGVAYXBhY2hlLm9yZz6JAjgEEwECACIFAk6OPIgCGwMGCwkIBwMCBhUIAgkK 65 | CwQWAgMBAh4BAheAAAoJEGx0AWyZNptWXSsP/i75oSJ50HJBRSni0oT6SxDG0Ybs 66 | Gj3Ojwn2FppOOpVW7kjSRPTb/gSTHYlxNvBxI+nkyKx4Z2+IloDaH2LUsxdAkBor 67 | 9a58ykW9BD5Yd/5nRvduHp71XV28b3/xnN5H6kbCUr5yWZPVZ4//o8L12PS3Jy0i 68 | c9cQQF7vzuqQOvPpncruBChikEnSrwFmZI+UMRBBduCck6PNyeWTjb8ipfJtvwfO 69 | vQHX5+AOSz6zwICkzVfOC/nVgSgJtOW/5sF+aZRGylQNpduwl3hB/fqFGuPqPFQD 70 | NoVmCen4YQkzXcKF/cWum89GQgrH5sPlUqI0FFbeieSWV6fXeOjk6Jeyccd6VBFm 71 | 3jSM6GVEwUqxe0pCBkmkL8QOWBmwhgVdb9e+Sonq6RTcbZlZEOu4tmAuLmdlYMry 72 | Vdw/zIe4qoLufbgKljo9467Kp+yWdXGp8kk55bLyqDnxzyUzYGwVcCiAthP4cWKN 73 | Aygibby4eyHe+D2GjlQNBMfl3lzcFD4SpJBHMgfF4/7eOv79/estNiAI8GXaPdVd 74 | bHFNwNalSsdrL4V6rsDuCmjAqpRe3blhYTg133bMAXMbvah0w4O/zvZKcD8tS+oy 75 | enMEezN/n1yl+33BL6VPbslnNx+rJ6ZXeuFq1QuXdwpcnUGL2hzJH7RVlGDcZpar 76 | goB5Qax+bx0f4uZhuQINBE6OPIgBEADnVeGW0oQWDB+redSEpyA2VdgTidGhxLcF 77 | qIO6e4HnK5K6XpiCe4TE9tCBYpHz+IQLk2MtkkiNh1PlFIRWVZUqlxUD07kTvxaB 78 | Oy9eS5P7SGIzbnvVcpyaZkKcvGf5mRAf6Wm+RVCUraGhPg2Gzwua3vcaalIEYzyU 79 | 3jfjS4ieUtyRdKxZ7y62mzOT5bL0YZLgiTt4HgfMhMxxOD/JOVZkoCvVmOghGLok 80 | F08eFC8UqXA0tkyV5AOt0V+Beea30IGbcKDdzZGfaniyYJG6qRvblZnHFw3/+2mp 81 | kj30AtOdPbmKmWgc+UQQrwEwn2WhOBHnovEKDdhTGSEH1tg3EcFpsZw8YRDSH7pH 82 | Bpb6kzjDn1UQ8fFkoPEA62OmPBeE9x8n8XlMJsxMh8p18kxRQDTYuOmS/7UK5ty2 83 | EU7RibhBTjz125Ta5w7V+fb12m1XMrdj1vP4o9Dnjb2f3LqoiPG3lyCgyNqpIKoc 84 | YX0cgFEiUzFpWY4gnFuHNi5ie6Tl5FAZz5chAew5ItlwatSGoEx9DS//cSWc9b+w 85 | djV3uvhqWaTX5zGVFu9pOvR08BrB7vgR6Rl8lrEoZzu5x0r7KjJgfb9efnuLadn+ 86 | kL/4+f43XMAQFQBGHBDxREOhokIXkQzzbad+SQu7Fz4Zkfnj1A8Xvf5TopIC8Ruk 87 | 6lLByh9chQARAQABiQIfBBgBAgAJBQJOjjyIAhsMAAoJEGx0AWyZNptWw54P/iey 88 | +h3QvPN5BQwlp44YC/oMQrbB9mH3oq6z0vqC/MainKHnclGDxkgzmq6IBaE8npgr 89 | JqcoL7WZ4Kid6StGBCA09Ah+ZhwCYl+0Nob8/jZbqO6ghTb/SJibWkYjwRpSfouh 90 | bAT20HAkC1H4Aw0HQ2a0vehBJZ6chPksKSYl9lh2Q+wLCXDz+VaYOvkaJ6reMe1g 91 | TiuCb20Xn+N7BVIQbcVb8XJ6thlaeKK03r+Sum8FOmvcbLME9VsXnseVCaqQQa49 92 | 54CtSFmiarnu6xN1AGzuSHRzlhZaCksFRDjs5Qw1EvVpgUckM8CdfRj2wtyTNB1e 93 | eFj6gT0Q9k1ZQv58whR6ZY1uavObiq/CemGc7ZpXZTbVHicW6WNpYTz9aziHzUYx 94 | zVed8gfTmcPi7Mi+FZC2NjX4BAA6ICeK33wWO0OBq/rcXLsUhCprrKZY4SUsKrtV 95 | YxbBRTWJFMsBVuXTX+uPdey7WxA0XCmKIcp2wDpptkq25yvu2UgAwrXyTpg0StuV 96 | XN0Wj3YqFrse3tXiE+Vqe5/3H6Rc2+Rjh8ysMkmg/dKCAuys6Is2vAyfpXgZzcQO 97 | kyHxwitfr4d8mm6GW3/YluGi7oPCnH3FEsIoUZMyfGs8XW/zwH26mCIpYpu9rDHh 98 | V/dDwg+iqlaqtN6rIS4E1gML3K33OVsdUvlcodhE 99 | =gNdQ 100 | -----END PGP PUBLIC KEY BLOCK----- 101 | -----BEGIN PGP PUBLIC KEY BLOCK----- 102 | Version: GnuPG v2.0.14 (GNU/Linux) 103 | 104 | mQINBE6OPIgBEAC5XC3CGy1peReJPUk2J5iCvmdSIOEaiOfULaz+1WiKgBMovUAK 105 | CbIuCB9ZMjfT0gceAgqj4UeCeRhPi2dVxVl/r1LCyGMJ1hZyEGDk7No1QkCemnCD 106 | 4yiBD+BX+9V1Zuqar1goV5tMXoE0kq0cyoih/c6t8qrP5Wf/BTS118TBFAHRql/y 107 | Yle+g3YEk0uZ4yuedCVQIrjTWe46fSAacF8eGluGQVTbNj2aRacfu+UYPj8G241F 108 | 8gTOTjZ3fJ80KTOItjiV8Gy+I6HDIWd7XMwNCO8z2G2lto0CBoDuaUukXgB5/CNF 109 | QL1S8Zxl2idxEQStZzKTKmZZcom+GNMvL8xCE8sMgMSiZ8Q54fRevTQrDLPpIhl0 110 | g6V1v7vOfokWxf7K6t+KZXB7oZbc1YrW/kcjDUAMlS4gAQ4K7ngEK3qIEVquvXx4 111 | nLVM++zn0/0PVzES2gMnacP9mdsVsPbJx+Jwk6hZi1mYBS7DyZkoVJYgXqLdCtWe 112 | bH+VqdHiEHBrWYxHeoyHWjViJKZYh3ojcKuZMicptEzA25hz5HaKEkKv1PEYeKED 113 | G8tLVf09dU+HHdK7Sbca08hXFhDIiDlksFa67Zy7MriJRTun8rTCD1DFH3wyuubR 114 | u8bDo7UobBgSK3tdcBRKM8i3e3JQZejhKlROhJ56xIT3ivO3Coe0hTUDXQARAQAB 115 | tEtOZWhhIE5hcmtoZWRlIChLZXkgZm9yIHNpZ25pbmcgY29kZSBhbmQgcmVsZWFz 116 | ZXMpIDxuZWhhbmFya2hlZGVAYXBhY2hlLm9yZz6JAjgEEwECACIFAk6OPIgCGwMG 117 | CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEGx0AWyZNptWXSsP/i75oSJ50HJB 118 | RSni0oT6SxDG0YbsGj3Ojwn2FppOOpVW7kjSRPTb/gSTHYlxNvBxI+nkyKx4Z2+I 119 | loDaH2LUsxdAkBor9a58ykW9BD5Yd/5nRvduHp71XV28b3/xnN5H6kbCUr5yWZPV 120 | Z4//o8L12PS3Jy0ic9cQQF7vzuqQOvPpncruBChikEnSrwFmZI+UMRBBduCck6PN 121 | yeWTjb8ipfJtvwfOvQHX5+AOSz6zwICkzVfOC/nVgSgJtOW/5sF+aZRGylQNpduw 122 | l3hB/fqFGuPqPFQDNoVmCen4YQkzXcKF/cWum89GQgrH5sPlUqI0FFbeieSWV6fX 123 | eOjk6Jeyccd6VBFm3jSM6GVEwUqxe0pCBkmkL8QOWBmwhgVdb9e+Sonq6RTcbZlZ 124 | EOu4tmAuLmdlYMryVdw/zIe4qoLufbgKljo9467Kp+yWdXGp8kk55bLyqDnxzyUz 125 | YGwVcCiAthP4cWKNAygibby4eyHe+D2GjlQNBMfl3lzcFD4SpJBHMgfF4/7eOv79 126 | /estNiAI8GXaPdVdbHFNwNalSsdrL4V6rsDuCmjAqpRe3blhYTg133bMAXMbvah0 127 | w4O/zvZKcD8tS+oyenMEezN/n1yl+33BL6VPbslnNx+rJ6ZXeuFq1QuXdwpcnUGL 128 | 2hzJH7RVlGDcZpargoB5Qax+bx0f4uZhuQINBE6OPIgBEADnVeGW0oQWDB+redSE 129 | pyA2VdgTidGhxLcFqIO6e4HnK5K6XpiCe4TE9tCBYpHz+IQLk2MtkkiNh1PlFIRW 130 | VZUqlxUD07kTvxaBOy9eS5P7SGIzbnvVcpyaZkKcvGf5mRAf6Wm+RVCUraGhPg2G 131 | zwua3vcaalIEYzyU3jfjS4ieUtyRdKxZ7y62mzOT5bL0YZLgiTt4HgfMhMxxOD/J 132 | OVZkoCvVmOghGLokF08eFC8UqXA0tkyV5AOt0V+Beea30IGbcKDdzZGfaniyYJG6 133 | qRvblZnHFw3/+2mpkj30AtOdPbmKmWgc+UQQrwEwn2WhOBHnovEKDdhTGSEH1tg3 134 | EcFpsZw8YRDSH7pHBpb6kzjDn1UQ8fFkoPEA62OmPBeE9x8n8XlMJsxMh8p18kxR 135 | QDTYuOmS/7UK5ty2EU7RibhBTjz125Ta5w7V+fb12m1XMrdj1vP4o9Dnjb2f3Lqo 136 | iPG3lyCgyNqpIKocYX0cgFEiUzFpWY4gnFuHNi5ie6Tl5FAZz5chAew5ItlwatSG 137 | oEx9DS//cSWc9b+wdjV3uvhqWaTX5zGVFu9pOvR08BrB7vgR6Rl8lrEoZzu5x0r7 138 | KjJgfb9efnuLadn+kL/4+f43XMAQFQBGHBDxREOhokIXkQzzbad+SQu7Fz4Zkfnj 139 | 1A8Xvf5TopIC8Ruk6lLByh9chQARAQABiQIfBBgBAgAJBQJOjjyIAhsMAAoJEGx0 140 | AWyZNptWw54P/iey+h3QvPN5BQwlp44YC/oMQrbB9mH3oq6z0vqC/MainKHnclGD 141 | xkgzmq6IBaE8npgrJqcoL7WZ4Kid6StGBCA09Ah+ZhwCYl+0Nob8/jZbqO6ghTb/ 142 | SJibWkYjwRpSfouhbAT20HAkC1H4Aw0HQ2a0vehBJZ6chPksKSYl9lh2Q+wLCXDz 143 | +VaYOvkaJ6reMe1gTiuCb20Xn+N7BVIQbcVb8XJ6thlaeKK03r+Sum8FOmvcbLME 144 | 9VsXnseVCaqQQa4954CtSFmiarnu6xN1AGzuSHRzlhZaCksFRDjs5Qw1EvVpgUck 145 | M8CdfRj2wtyTNB1eeFj6gT0Q9k1ZQv58whR6ZY1uavObiq/CemGc7ZpXZTbVHicW 146 | 6WNpYTz9aziHzUYxzVed8gfTmcPi7Mi+FZC2NjX4BAA6ICeK33wWO0OBq/rcXLsU 147 | hCprrKZY4SUsKrtVYxbBRTWJFMsBVuXTX+uPdey7WxA0XCmKIcp2wDpptkq25yvu 148 | 2UgAwrXyTpg0StuVXN0Wj3YqFrse3tXiE+Vqe5/3H6Rc2+Rjh8ysMkmg/dKCAuys 149 | 6Is2vAyfpXgZzcQOkyHxwitfr4d8mm6GW3/YluGi7oPCnH3FEsIoUZMyfGs8XW/z 150 | wH26mCIpYpu9rDHhV/dDwg+iqlaqtN6rIS4E1gML3K33OVsdUvlcodhEmQENBE7i 151 | WaIBCADX/FrkgaxVLnDRU3hGfPdt7grEHinVgIct9PXNve1L/MhLq7stpVzJNUVz 152 | 0TKC0njU3sJyYp5Imyojx423huOBTOTtn8KyPTZN5o8hBnr5FfbzBwZj6HcuUYc3 153 | UWCw09SzB1LyrunO+s8n/1jeMzVSFrzlLQrD62jmjVDq3FSieo6EdrQHjU9tUXeb 154 | mLk/VfwNcITuCL3X2Gs6+RutRgqM4OZJaQGo43U8gGRA5ZHvXm+RCbS5F7oItqSK 155 | JalnHxLzjwr4dM8Cc+uGAetcpbj3Vycxt/p0oehRUjdBWe69dVMic6XykwFpgP+9 156 | d/ExrJ8kbYYEqtdEwtVU/l02Cn+bABEBAAG0R05laGEgTmFya2hlZGUgKEtleSB1 157 | c2VkIGZvciBzaWduaW5nIHJlbGVhc2VzKSA8bmVoYS5uYXJraGVkZUBnbWFpbC5j 158 | b20+iQE4BBMBAgAiBQJO4lmiAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAK 159 | CRCEGNwlV1omRQ1kCACkbgPATgZ8WpDG9/jzZHh4Y0k5MHy8wsBEnMaGHmjLiKf8 160 | g/1dJgLiSeizsH/idBGBix4qu/5tZ0ayd2jOE15LaVK1NI1ja57y5wWa6/LFIBTN 161 | wY1ODTiEGlJ/TIDQTRmBWACfv6nb7VBFVQzETLm8RfRQuqYs617E9RY95u26/Rx2 162 | Sxw6QlXUzH/BwrQ6H0eR4rl32P90amIpGd8sHuMhMRI4kcTmzlJxSiokXceU76Fq 163 | 6ACghjwrnaYy18wS2ZNq78Fye2kEabHdTtcXYiw0x5sDSdPwWLG+WrcoLcHVJTPb 164 | QhCftCzTjUZh8ybFJIr8zn7Fy7P6oQpNYdZpROFEuQENBE7iWaIBCADDlUDcfzdR 165 | RjxsyrYnP5Zo/e6Xcr3+1kiGUoxpf73VOsz7F9NqYqiR4ZJr1PwQPKscbQlD23PZ 166 | 81A4p/nRhDAQQA2vcvijDECKu5T7wscIxrE1DYqRuZhlXreGQvhpH75XpDde+vIR 167 | 9xmOz/uY30J4u2YoQaq9/ZKSU3RKzQ4HIOSJlvM9SibV/OFL8eT/kjPNXswczNk3 168 | XLDTJQFJDynMLwdBUH71BeC8q3WMWhQjmv0gVzegNAkRdW3CqNWx2by1v2V0Nkb2 169 | AkdHZQbSo8Q5vMz6flntWqbLWOEmrFa5xYX8QZUgo0oRWcI/0Lw6z+C/caqVJ015 170 | zv1lyJ0Q3ZDvABEBAAGJAR8EGAECAAkFAk7iWaICGwwACgkQhBjcJVdaJkWIfwgA 171 | rY4RXWny/twz3e5HK1p/rCDAwB5cVzXSoITNRBmGDXbyNBievfOr5UGi8TKWVR/o 172 | UxnG87ato8QxjU5s/9np0cgPGvOptN5dgrrzis9iSUhsgGs1UrR4UOo7xLW+jeX6 173 | KDAe0IIx25rOscycKv1inIY7IW/4bg/pmWUXYHUfIJGkChkzx+fdpEygvUFS0dSb 174 | MAlpYHsVMBhlW8cSk7uhWLeH6Abp7++ZdDqhnIfL4XG1IMUslkjQ5TTEfGxETCm2 175 | oe/dVCR4blsExaub6gUxyRiRzxaWaCYt+AT0vuTbBqDQduJqcry0AyTJe7O4+yo4 176 | bEnlTfUDA6C/gOJPKv4JXg== 177 | =EIZj 178 | -----END PGP PUBLIC KEY BLOCK----- 179 | pub 4096R/7F1183BD 2012-06-13 180 | uid Joe Stein (CODE SIGNING KEY) 181 | sig 3 7F1183BD 2012-06-13 Joe Stein (CODE SIGNING KEY) 182 | sub 4096R/3384EA8C 2012-06-13 183 | sig 7F1183BD 2012-06-13 Joe Stein (CODE SIGNING KEY) 184 | 185 | -----BEGIN PGP PUBLIC KEY BLOCK----- 186 | Version: GnuPG/MacGPG2 v2.0.18 (Darwin) 187 | Comment: GPGTools - http://gpgtools.org 188 | 189 | mQINBE/X+B0BEADHcoRCi6OiC3TNh6xDkiE40YdDBM1g/YyF5r1Kim3+09bgGlm4 190 | RLIgdbhh+bQyqPJGQSRal6ypRFRuzlXskJcxkVZdU8XDzCBk6f5d1AgNbgD9YFLo 191 | NjEocYPAE9X10MDQ5a8eI7KBSrFYL4tDI7q8O+ZayaFFdkfgjxw5+Mg81H93pm77 192 | h9rHNOMCxQxCJusrkh5MIqcttJhWSVxukRkYoty3qNLzDjR5C3z5v6Xwdg+pii8g 193 | SEC1DvMXxhJVRPO24uXHKbrL2zbyh2GP0WgSuogcg1smGNh+6EGFhZNJj3uKCljh 194 | 738uE6dmQsDwXwgXtoQfUi4cnMWmRWhxA7XSdt46XdeoWGblFp3hKTWGKHX1AVYY 195 | avUMajg0ISodtW6XIHoxHZzfH5J1LQbDWbWrkXZvIoPI13dqeJQqvqlpmGZtsYYY 196 | 7vH3Tu8FTXLNUx638b8iBvGYSvEbm4GEdtqVTnOC80fYrflRMdWUvVPWIDvGat1F 197 | ucTg71QDmoEU4ZMaSRvn/8gX2DWjTooxh/k1YtJJAGV21P3GwxHPqNaa0IMLwBlV 198 | 589cJMLoPGmXYcn6v6CD+A92ZltUOowx94vlFTZ5W2u/MPUMDQwTi9CpRsue+yyR 199 | Huz/yTi3KEyUTNRn/oXFhvW/f6UZmsLAHbt5TalyWrFIEHz64IRNfZhDcwARAQAB 200 | tDJKb2UgU3RlaW4gKENPREUgU0lHTklORyBLRVkpIDxqb2VzdGVpbkBhcGFjaGUu 201 | b3JnPokCMQQTAQIAGwIbAwIeAQIXgAUCT9f41AULCQgHAwUVCgkICwAKCRCGUYeW 202 | fxGDve+2D/sFNDDpjZA73+ISPoojtitMiFiYA+9CZ9uLdskqS+liTGwNBaD3ypV1 203 | RPdrKwjlb03ELFP7EH48ktUQZBKootVHkuyBQBI8WMEJEaAUYutuEDZgfoICTZ5L 204 | I/XTIW3Ih2wErBcp281nU9lkZ91Kte7gMfjsA1bZPPtL6w4p8YhxD1zMerMQ0HP0 205 | H2jYJopbyTn+BrrkAc9I7NpPvt+XiCewXZGmDPGl4fBTloUy+xxAOWVzUhX8hwmh 206 | N9E77L9gLKBi+IQNn3jWs4Kck7QH5iULXAGZ1YdDoaMOdFO8nwmeDF9/AdyhTj37 207 | zF4CvsoEBnIxZPkxnH56orD1R2MmZd1P8g2yoYrAF9XdIRPnO30JZDXP6czZX9PP 208 | eJcKwXofx5ldzLb6og1ZZGwY742u6SKvc7rj+CryaPqixu4OFTWCtpDkewCiNHwL 209 | OUnhBXDSqxaFgZT3diOypCOzKE1PtygbeVUcxCMtGdF6s1EGMfamRZc+SlXhoG9i 210 | f9nZRrKjSo/PeFxPMTI7s8ACpISplpiaHLx6s/GK+AR51XwG65miHX9Z6empNFXk 211 | iLYxnZjo0nnHU8qM+XSjs5ARFlvGbK5Iw1wUhTVX31nInT9OAojuY8MgPwyPH413 212 | W+qMWRUziWwEmeXLNDPq3BNksfxp3Ac+ztCkzYfKIDa8L4uNGyIAu7kCDQRP1/gd 213 | ARAAxCKGUjsUAqsXKA2TNzOAfJDGpeQOzHFmADJiPXvThJ8hsc8DgfGOTQZfwLPh 214 | P4MYlt1E4dvTukOYIXC/ORCtUdGNaZNh0JNkpGggOUcCJBRjqwA9TXeKUESzdKFG 215 | BX26GZZtGfvzYTz8hSyEU0VnVQbK1ZrcUiMh86oFGOUH6SG3ZFDx+QoMcC5felZx 216 | LzNtOOLOwRKW3XXXsuwEFsBi/Nre/0jLehWgpGzSq2Sx8uIRQ1gn08fA999DEFOk 217 | MqGHs26FijrqUm99pvIgYM1LU0WgvdjQY5wnoopMvYDhtOIIXjgOlnd/D8mBAbei 218 | zt54ag7RvrugjwTKmrtaSd0OhYDbZDQOPVQNGveug1pEl8l/dS6Og2t824DRi7zu 219 | cZyUd6y05FIJpHf+Pjt7M2bijJ7GJZm04iBfG4TZ+zaOwe6OImvC3DWxhFCEJqxq 220 | v5oPpaoqby/R/tKbo48Dq1lZsbt9Rw2moDndYJNuV43XWNfGgwz9PTpM56BG5NIB 221 | j/g4EQgwvy60dNzGQqU2FSIBLcLGGTEDnDJpYStflOgTBNdX6uPceHKoNQ1/qoRg 222 | z+AqcHSSxEAs4MaLiYzFQ5+1UScrCka6Ox6boiMQ4K7PJlOv12jTT6TNEU6SyFlV 223 | LPLhK/XC+qrsmkK3fx6j1ZbwZfkue/74CqwydcVRQeGAF3sAEQEAAYkCHwQYAQIA 224 | CQUCT9f4HQIbDAAKCRCGUYeWfxGDvfEaD/kB5qzWV/Pq9wWz0acrNEGnrdc9s2uB 225 | 1MkB2p+2LeZzgmvAeHTKQy0y3KEd4VY1JQrB+HPOyJARf6fpXkZIOWlDJ6wbHxnZ 226 | 6pAAtpPKIVghkv3TPAvTuyqxMpkBAhHm/hvpj/3OmLPitBOFK3p9F/FAmqHADato 227 | DuzNdkF4v/Xl1LbaBI+lyHA8BM3pQnci+Hq3ozxW5JEqk4HYliPpJe77ifHukJn3 228 | cwlnIz0SHMV7+hzRrJ/yaAmf0cfVZHb7iKBarh3SWm7rN9g1LJlhgkRmP4njkctf 229 | kSWLSAYbjfU/5yOfhEPrD2w5Y3qjErHAmBafYkMp0J1a8Acg2Unb1Q5xh23DFr1m 230 | 7VSjLTu/Qe9XTs+0EMvvVsp8vvQSqs7VP5GRfuA6f2DXlGy1cSdbS8m5mhMMCTei 231 | Rsm1N+68cmON1stRrtpqxktokwNf5GIY1AxC2KT4HLPjS5AQHuJBIO1vmfVnKF3Q 232 | pVzipF+8VFilrN0IerddThBYgcAUQM+9ZzuPX0yCORpEMwlq0WJTn0KlJc/jJjJb 233 | VdyQ2DksBCpN7T+i8tTM9moq/RSieRIjEXKeEfM6EoNVX5x/f4HQKv7YJ9mr1gDJ 234 | v/W3AOBR+NSWhBudk0N6Jroga5DI8X9NGz0I98Z4Nvme5I6jikcjE0NnKuJ/APLU 235 | KzfEt4aFC/3Gzw== 236 | =qUcq 237 | -----END PGP PUBLIC KEY BLOCK----- 238 | pub 4096R/E0A61EEA 2015-01-14 239 | uid Jun Rao 240 | sig 3 E0A61EEA 2015-01-14 Jun Rao 241 | sub 4096R/942424A8 2015-01-14 242 | sig E0A61EEA 2015-01-14 Jun Rao 243 | 244 | -----BEGIN PGP PUBLIC KEY BLOCK----- 245 | Version: GnuPG/MacGPG2 v2.0.22 (Darwin) 246 | 247 | mQINBFS1uQIBEADnz+VGKABlkZae60UPIFqliX71KupZH6JnYEOZQtYwwGGAvuGk 248 | Ws+mDRIfLcE31+C1sDj87pmHfONUbPMI9AhjHVCPwc6rVLfAT6dVdGrmjZ5kRerT 249 | S25+a3O4njZxekMt6oWr0PcZoEkuhN6EoJ5u/BanWwtayWe2poZp+VVF7MG9vheH 250 | nsiM+Wfbvt1UiwIvpJ1U9HfnKL8te8d5FlCuxCJTvv8PYfUpGn2MpcvmrjNknDMc 251 | T2sReeU+6gD+f69OcU2SrvYUB5fyhKmeU42byImLCF0Kzz2dwvFMNzDHTKv42N7Y 252 | zyFEN6r7VrugrAyQiDOAdy8PBrJBCtg309XrakMgWleeH2PfkMpEMVVcR2sOCVAJ 253 | wHfFrXOtrg6mFJpphqIPydRYKCxy6XQyvy91igK8W1fS/iKVVpd+WFw0HF1x9p4L 254 | 3UC+0BYyf2wsQSpE1CRftnhxog8xbkDQJmy1qfoBuYXFXS4ARpewW5JxZbuSsqLC 255 | fUU8rWGn3yngzOQmYURFVjfP0bJmOcwIGmDathX+8CvxmX6J4qsHlrfJEZ9aBhXA 256 | FYGfIK7OmRU/ZgtcOELyT18EHF3mOzQVNcmweKHz1QvJW/lsPRPDy3yqWmrivmVR 257 | /Sw0yTuOiNRj28qSStEJahA1seRVS9upwEcMU4bhN0D/MtLR0s4/9SDqYwARAQAB 258 | tBpKdW4gUmFvIDxqdW5yYW9AZ21haWwuY29tPokCOAQTAQIAIgUCVLW5AgIbAwYL 259 | CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQq2H3D+CmHuqv3RAA4FBD0uHIZj1H 260 | nZd4ddU7YxPPcuuoEaIzEZ3LJFtvyA8vjW50ZRvp1oa95D0qu90lo2Tf3vDaZoxR 261 | isGI1jrcjwjXfkgKqcMOWsGwJfqlTnE54nIzJ5x9C7ePzAQCdlq4WE/3OHc6V/zK 262 | KadAlJajSR24yT/oZUeu8W2oA86HF42fJUacoERATv3XBRtxClDdYkQfYxumOdSn 263 | /hbxaT+SUfoLZKfuWaRCFzvvDpudGUCnnCwSOXhDQjsYxy7Y7WYqirh6LhkJIvy3 264 | ejyZebM9IDk6bAm9VfOKLGGtlAQ8JVkLGidk7+1NMt5eAfxV5/9X6AuiOPdYV+cq 265 | 4p4ZuqPjVbDDmQwsGt15EVZ92kGcVtidhttZ60znslhSrn9HVjYSEi9zT1YdbDSW 266 | r2rwTq1apXfsvS42sT0Xj1x2ALR5Db4cFStpY48QBkg453fmnIBE70oPWwy9wWWY 267 | YniLyYiwSnZAyocaFq/xCY74I6S1FEB7pZwEo2aeR61hmiv0cC33MY3b36HP3LaQ 268 | eSZGEyMjw3Cus3o5BUmrFirokyQQSl1p2dojTppAYWzpxT1D10JfecfLlMmCcres 269 | HhyxWE8xuuEciZLMvWFjKJCSxrbCU9BbJ6JAVp/xzDkfmxiImJS4XQDa0hhiIAMg 270 | ustPi9r867y/nSmpmmuhBHwJzYCdOKm5Ag0EVLW5AgEQALpRBfJ7qvOuZjg4VNZR 271 | lI9n2IzSIFLwKhQibGTl9i5oQD/g1VoumjwZm6N/QWbxoU8qFHWou1KGGEibnTsC 272 | xJJKg1YspC2WLRRqzWzsgHNrF4DAwtRQTolbj/c0c30iNgZVUbguVsZIwUv8Yj90 273 | v1wdVxUrcyHF5KJrNz0I+6qy15z/T97OpQ3y7S2WVrGvb1rY03NpwQcOA6lam5b1 274 | jSQKNNUs5Fldr27YY9PDufOI37trCwLXtveW2N+clO2aKpcrtYC9m2WaFilXbZ3z 275 | 1UdfjLHbMCBal+UabebLM/IQ882fhzXTk8GA/54ILW1kOaMQ8FoqjXqzXiVo3rgm 276 | IqpXvoy0GKL7T8Dawf+AoiyHvNvG7YyEczrNYy2CelHXz7rMfzk60n7rE7dMVt1s 277 | /behN9KIdXKYMUcw/wByiScTbSGagJhZcyApdTYlTvyy8U82DIEPcO46+eUnQRcv 278 | JRPLgLsY21jrXzqUkL1MEESYMerc8nsO+FdkBLrMcuBgQpxlGtLeODG3tnXakBdD 279 | qfdOorDs13HGyzclc26ACwPXofd5qV61npBR3zNGro1Vb1hCDRUOEHBimj/amj/Q 280 | J0CyeDWt00meChlTWefhV7AjAIRxYnX9gcs6FFlyYKt8m4BUQEWBicS6jhVxM1Sa 281 | ucEKSEF/PP+9AfuxBmToiohRABEBAAGJAh8EGAECAAkFAlS1uQICGwwACgkQq2H3 282 | D+CmHuqeaA//cHM2oONqiLtk0oRYa577I0uZj2Pddlq38cv2rhRNXAEpIXTBZRrV 283 | 3PwFVOqpysOC9A9TQe3rlDU/AvFw965UDJ+nFdgN5bc6+O58wr//wjTf3zGrxjGT 284 | LINmRSU7RoiKXdZ9At9/Ra1HDIzGqCu5nHqdYE+EH1ucu1CWIj/caCjgoZ6yRacI 285 | Viawor4b+T3Yov/hWfJzawDWbV9COpeRU4XPfEAaWvuX4ZmZYr2PGI/KEVIbXXUb 286 | RyggTu6s8JdU4K+rytzlB0RkC75yjfWuY+4lD8Dv/ASPD+R8NmP7xuLsGS8YUd7t 287 | H0+U4Y0GI7/ZqHeKw8QjJ+IBkvUuOW1zFRi7utKygueP/jMAhk3HhB/aSoxU7xyC 288 | CTLeS2/KDSdm3BjgiSak7/2sr7SHPzxyR8FgK3AWDrupkx8o92DHh/2i2/47d/iy 289 | uG/+sPuvsx2/C8NIPUke7augQefOWGLUuTnOj5H/oINYu2AUy4ceJajgOKrJcE/3 290 | XsBXGoqGwgGjI7OER1vnPZGoby46vmcMi6MFjA/eITzLzCcGT01F8jb7Rh5kkn/J 291 | el67DxjpxG2aTeBWgmqnk+YfdEIleRKflSyuk9azhoHD4leD2L7i7d9LQGEe0M3R 292 | LyTaTKeMBhCrhjTTJ4dmMozlivQx6gqOwtE14osBHAzoCYtcWjp6GKA= 293 | =npff 294 | -----END PGP PUBLIC KEY BLOCK----- --------------------------------------------------------------------------------