├── clients
├── clojure
│ ├── .gitignore
│ ├── resources
│ │ └── log4j.properties
│ ├── leiningen
│ │ └── run_example.clj
│ ├── test
│ │ └── kafka
│ │ │ ├── print_test.clj
│ │ │ ├── serializable_test.clj
│ │ │ └── buffer_test.clj
│ ├── project.clj
│ ├── src
│ │ └── kafka
│ │ │ ├── print.clj
│ │ │ ├── serializable.clj
│ │ │ ├── example.clj
│ │ │ └── types.clj
│ └── README.md
├── csharp
│ ├── .gitignore
│ ├── lib
│ │ └── nunit
│ │ │ └── 2.5.9
│ │ │ └── nunit.framework.dll
│ └── src
│ │ └── Kafka
│ │ ├── Kafka.Client
│ │ ├── RequestType.cs
│ │ ├── AbstractRequest.cs
│ │ ├── RequestContext.cs
│ │ └── Properties
│ │ │ └── AssemblyInfo.cs
│ │ └── Tests
│ │ ├── Kafka.Client.Tests
│ │ └── Properties
│ │ │ └── AssemblyInfo.cs
│ │ └── Kafka.Client.IntegrationTests
│ │ └── Properties
│ │ └── AssemblyInfo.cs
├── ruby
│ ├── TODO
│ ├── spec
│ │ ├── spec_helper.rb
│ │ ├── kafka_spec.rb
│ │ ├── batch_spec.rb
│ │ └── message_spec.rb
│ ├── lib
│ │ ├── kafka
│ │ │ ├── request_type.rb
│ │ │ ├── batch.rb
│ │ │ ├── error_codes.rb
│ │ │ ├── io.rb
│ │ │ ├── message.rb
│ │ │ └── producer.rb
│ │ └── kafka.rb
│ ├── kafka-rb.gemspec
│ ├── Rakefile
│ └── README.md
├── go
│ ├── tools
│ │ ├── offsets
│ │ │ └── Makefile
│ │ ├── consumer
│ │ │ └── Makefile
│ │ └── publisher
│ │ │ └── Makefile
│ ├── .gitignore
│ ├── Makefile
│ └── src
│ │ ├── timing.go
│ │ ├── converts.go
│ │ └── publisher.go
├── python
│ └── setup.py
├── php
│ ├── src
│ │ ├── tests
│ │ │ ├── phpunit.xml
│ │ │ ├── bootstrap.php
│ │ │ └── Kafka
│ │ │ │ ├── MessageTest.php
│ │ │ │ ├── EncoderTest.php
│ │ │ │ └── ProducerTest.php
│ │ ├── examples
│ │ │ ├── autoloader.php
│ │ │ ├── produce.php
│ │ │ └── consume.php
│ │ └── lib
│ │ │ └── Kafka
│ │ │ ├── Request.php
│ │ │ └── RequestKeys.php
│ └── README.md
└── cpp
│ ├── configure.ac
│ ├── build-aux
│ └── m4
│ │ └── ltversion.m4
│ ├── src
│ ├── example.cpp
│ ├── encoder.hpp
│ └── encoder_helper.hpp
│ ├── README.md
│ └── Makefile.am
├── core
├── src
│ ├── main
│ │ ├── scala
│ │ │ └── kafka
│ │ │ │ ├── server
│ │ │ │ ├── package.html
│ │ │ │ └── MultiMessageSetSend.scala
│ │ │ │ ├── utils
│ │ │ │ ├── package.html
│ │ │ │ ├── MockTime.scala
│ │ │ │ ├── Annotations.scala
│ │ │ │ ├── Range.scala
│ │ │ │ ├── DelayedItem.scala
│ │ │ │ ├── Time.scala
│ │ │ │ ├── KafkaScheduler.scala
│ │ │ │ ├── DumpLogSegments.scala
│ │ │ │ └── Pool.scala
│ │ │ │ ├── log
│ │ │ │ ├── package.html
│ │ │ │ └── LogStats.scala
│ │ │ │ ├── consumer
│ │ │ │ ├── package.html
│ │ │ │ ├── FetchedDataChunk.scala
│ │ │ │ ├── storage
│ │ │ │ │ ├── OffsetStorage.scala
│ │ │ │ │ └── MemoryOffsetStorage.scala
│ │ │ │ └── KafkaMessageStream.scala
│ │ │ │ ├── message
│ │ │ │ ├── package.html
│ │ │ │ ├── ByteBufferBackedInputStream.scala
│ │ │ │ ├── InvalidMessageException.scala
│ │ │ │ ├── MessageAndOffset.scala
│ │ │ │ ├── MessageLengthException.scala
│ │ │ │ └── CompressionCodec.scala
│ │ │ │ ├── common
│ │ │ │ ├── NoBrokersForPartitionException.scala
│ │ │ │ ├── InvalidMessageSizeException.scala
│ │ │ │ ├── UnknownException.scala
│ │ │ │ ├── UnavailableProducerException.scala
│ │ │ │ ├── InvalidConfigException.scala
│ │ │ │ ├── InvalidPartitionException.scala
│ │ │ │ ├── UnknownCodecException.scala
│ │ │ │ ├── OffsetOutOfRangeException.scala
│ │ │ │ └── UnknownMagicByteException.scala
│ │ │ │ ├── network
│ │ │ │ ├── InvalidRequestException.scala
│ │ │ │ ├── Request.scala
│ │ │ │ ├── package.html
│ │ │ │ ├── ConnectionConfig.scala
│ │ │ │ ├── Handler.scala
│ │ │ │ ├── ByteBufferSend.scala
│ │ │ │ └── BoundedByteBufferSend.scala
│ │ │ │ ├── producer
│ │ │ │ ├── async
│ │ │ │ │ ├── AsyncProducerStatsMBean.scala
│ │ │ │ │ ├── MissingConfigException.scala
│ │ │ │ │ ├── QueueClosedException.scala
│ │ │ │ │ ├── QueueFullException.scala
│ │ │ │ │ ├── AsyncProducerStats.scala
│ │ │ │ │ └── EventHandler.scala
│ │ │ │ ├── Partitioner.scala
│ │ │ │ ├── DefaultPartitioner.scala
│ │ │ │ ├── ProducerData.scala
│ │ │ │ ├── SyncProducerConfig.scala
│ │ │ │ └── BrokerPartitionInfo.scala
│ │ │ │ ├── api
│ │ │ │ ├── RequestKeys.scala
│ │ │ │ ├── FetchRequest.scala
│ │ │ │ ├── MultiFetchResponse.scala
│ │ │ │ ├── MultiFetchRequest.scala
│ │ │ │ └── MultiProducerRequest.scala
│ │ │ │ ├── serializer
│ │ │ │ ├── Encoder.scala
│ │ │ │ └── Decoder.scala
│ │ │ │ ├── javaapi
│ │ │ │ ├── producer
│ │ │ │ │ ├── ProducerData.scala
│ │ │ │ │ └── SyncProducer.scala
│ │ │ │ ├── MultiFetchResponse.scala
│ │ │ │ ├── ProducerRequest.scala
│ │ │ │ └── message
│ │ │ │ │ └── MessageSet.scala
│ │ │ │ └── cluster
│ │ │ │ ├── Cluster.scala
│ │ │ │ ├── Broker.scala
│ │ │ │ └── Partition.scala
│ │ └── java
│ │ │ └── kafka
│ │ │ └── javaapi
│ │ │ ├── consumer
│ │ │ └── ConsumerConnector.java
│ │ │ └── producer
│ │ │ └── async
│ │ │ └── EventHandler.java
│ └── test
│ │ ├── resources
│ │ ├── test-kafka-logs
│ │ │ └── MagicByte0-0
│ │ │ │ └── 00000000000000000000.kafka
│ │ └── log4j.properties
│ │ └── scala
│ │ ├── other
│ │ ├── kafka.log4j.properties
│ │ └── kafka
│ │ │ ├── TestTruncate.scala
│ │ │ ├── DeleteZKPath.scala
│ │ │ └── TestKafkaAppender.scala
│ │ └── unit
│ │ └── kafka
│ │ ├── utils
│ │ └── UtilsTest.scala
│ │ ├── zk
│ │ ├── ZooKeeperTestHarness.scala
│ │ └── EmbeddedZookeeper.scala
│ │ ├── message
│ │ └── CompressionUtilsTest.scala
│ │ ├── integration
│ │ ├── KafkaServerTestHarness.scala
│ │ └── ProducerConsumerTestHarness.scala
│ │ ├── consumer
│ │ └── TopicCountTest.scala
│ │ └── javaapi
│ │ └── integration
│ │ └── ProducerConsumerTestHarness.scala
└── lib
│ ├── zkclient-0.1.0.jar
│ └── zookeeper-3.3.3.jar
├── lib
└── sbt-launch.jar
├── sbt
├── perf
├── lib
│ └── jopt-simple-3.1.jar
├── report-html
│ ├── js
│ │ ├── exporting.js
│ │ └── highcharts.js
│ └── report.html
├── .project
├── util-bin
│ ├── run-numtopic-test.sh
│ ├── run-numproducer-test.sh
│ ├── run-numconsumer-sustained.sh
│ ├── run-numproducer-single-topic.sh
│ ├── run-msgsize-test.sh
│ ├── run-fetchsize-test.sh
│ ├── run-numconsumer-test.sh
│ └── remote-kafka-env.sh
├── .classpath
├── README.md
├── run-simulator.sh
└── src
│ └── main
│ └── java
│ └── kafka
│ └── perf
│ ├── KafkaSimulatorMXBean.java
│ └── jmx
│ └── BrokerJmxClient.java
├── bin
├── kafka-consumer-shell.sh
├── kafka-producer-shell.sh
├── kafka-consumer-perf-test.sh
├── kafka-producer-perf-test.sh
├── kafka-server-stop.sh
├── kafka-simple-consumer-shell.sh
├── zookeeper-server-stop.sh
├── kafka-simple-consumer-perf-test.sh
├── zookeeper-server-start.sh
├── kafka-server-start.sh
├── kafka-replay-log-producer.sh
├── kafka-console-consumer.sh
├── kafka-console-consumer-log4j.properties
└── kafka-run-class.sh
├── contrib
├── hadoop-consumer
│ ├── lib
│ │ ├── piggybank.jar
│ │ ├── avro-1.4.0.jar
│ │ ├── pig-0.8.0-core.jar
│ │ ├── hadoop-0.20.2-core.jar
│ │ ├── commons-logging-1.0.4.jar
│ │ ├── jackson-core-asl-1.5.5.jar
│ │ └── jackson-mapper-asl-1.5.5.jar
│ ├── hadoop-setup.sh
│ ├── test
│ │ └── test.properties
│ ├── src
│ │ └── main
│ │ │ └── java
│ │ │ └── kafka
│ │ │ └── etl
│ │ │ └── UndefinedPropertyException.java
│ ├── run-class.sh
│ └── copy-jars.sh
└── hadoop-producer
│ └── lib
│ ├── piggybank.jar
│ ├── avro-1.4.0.jar
│ ├── pig-0.8.0-core.jar
│ ├── hadoop-0.20.2-core.jar
│ ├── commons-logging-1.0.4.jar
│ ├── jackson-core-asl-1.5.5.jar
│ └── jackson-mapper-asl-1.5.5.jar
├── config
├── zookeeper.properties
├── consumer.properties
└── log4j.properties
├── CONTRIBUTORS
├── system_test
├── producer_perf
│ ├── config
│ │ └── zookeeper.properties
│ ├── README
│ └── bin
│ │ ├── run-test.sh
│ │ └── run-compression-test.sh
└── embedded_consumer
│ ├── config
│ ├── zookeeper_source.properties
│ ├── zookeeper_target.properties
│ └── consumer.properties
│ ├── README
│ ├── expected.out
│ └── bin
│ └── expected.out
├── .gitignore
├── project
├── plugins
│ └── Plugins.scala
└── build.properties
└── examples
├── README
├── src
└── main
│ └── java
│ └── kafka
│ └── examples
│ ├── ExampleUtils.java
│ ├── KafkaConsumerProducerDemo.java
│ ├── KafkaProperties.java
│ └── Producer.java
└── bin
├── java-simple-consumer-demo.sh
└── java-producer-consumer-demo.sh
/clients/clojure/.gitignore:
--------------------------------------------------------------------------------
1 | lib
2 | classes
3 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/server/package.html:
--------------------------------------------------------------------------------
1 | The kafka server.
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/package.html:
--------------------------------------------------------------------------------
1 | Utility functions.
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/log/package.html:
--------------------------------------------------------------------------------
1 | The log management system for Kafka.
--------------------------------------------------------------------------------
/clients/csharp/.gitignore:
--------------------------------------------------------------------------------
1 | StyleCop.Cache
2 | bin
3 | obj
4 | *.suo
5 | *.csproj.user
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/consumer/package.html:
--------------------------------------------------------------------------------
1 | This is the consumer API for kafka.
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/package.html:
--------------------------------------------------------------------------------
1 | Messages and everything related to them.
--------------------------------------------------------------------------------
/lib/sbt-launch.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/lib/sbt-launch.jar
--------------------------------------------------------------------------------
/sbt:
--------------------------------------------------------------------------------
1 | java -Xmx1024M -XX:MaxPermSize=512m -jar `dirname $0`/lib/sbt-launch.jar "$@"
2 |
--------------------------------------------------------------------------------
/clients/ruby/TODO:
--------------------------------------------------------------------------------
1 | * should persist the offset somewhere (currently thinking alternatives)
2 |
--------------------------------------------------------------------------------
/clients/ruby/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'kafka'
3 |
4 | include Kafka
--------------------------------------------------------------------------------
/core/lib/zkclient-0.1.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/core/lib/zkclient-0.1.0.jar
--------------------------------------------------------------------------------
/core/lib/zookeeper-3.3.3.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/core/lib/zookeeper-3.3.3.jar
--------------------------------------------------------------------------------
/perf/lib/jopt-simple-3.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/perf/lib/jopt-simple-3.1.jar
--------------------------------------------------------------------------------
/bin/kafka-consumer-shell.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerShell $@
4 |
--------------------------------------------------------------------------------
/bin/kafka-producer-shell.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.ProducerShell $@
4 |
--------------------------------------------------------------------------------
/perf/report-html/js/exporting.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/perf/report-html/js/exporting.js
--------------------------------------------------------------------------------
/perf/report-html/js/highcharts.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/perf/report-html/js/highcharts.js
--------------------------------------------------------------------------------
/bin/kafka-consumer-perf-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance $@
4 |
--------------------------------------------------------------------------------
/bin/kafka-producer-perf-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.ProducerPerformance $@
4 |
--------------------------------------------------------------------------------
/bin/kafka-server-stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGINT
3 |
--------------------------------------------------------------------------------
/bin/kafka-simple-consumer-shell.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.SimpleConsumerShell $@
4 |
--------------------------------------------------------------------------------
/bin/zookeeper-server-stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ps ax | grep -i 'zookeeper' | grep -v grep | awk '{print $1}' | xargs kill -SIGINT
3 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/piggybank.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/piggybank.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/piggybank.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/piggybank.jar
--------------------------------------------------------------------------------
/bin/kafka-simple-consumer-perf-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $(dirname $0)/kafka-run-class.sh kafka.tools.SimpleConsumerPerformance $@
4 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/avro-1.4.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/avro-1.4.0.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/avro-1.4.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/avro-1.4.0.jar
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/hadoop-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | hadoop=${HADOOP_HOME}/bin/hadoop
4 |
5 | $hadoop fs -chmod ugoa+w /tmp
6 |
7 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/pig-0.8.0-core.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/pig-0.8.0-core.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/pig-0.8.0-core.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/pig-0.8.0-core.jar
--------------------------------------------------------------------------------
/clients/csharp/lib/nunit/2.5.9/nunit.framework.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/clients/csharp/lib/nunit/2.5.9/nunit.framework.dll
--------------------------------------------------------------------------------
/clients/ruby/spec/kafka_spec.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/spec_helper'
2 |
3 | describe Kafka do
4 |
5 | before(:each) do
6 | end
7 | end
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/hadoop-0.20.2-core.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/hadoop-0.20.2-core.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/hadoop-0.20.2-core.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/hadoop-0.20.2-core.jar
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/commons-logging-1.0.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/commons-logging-1.0.4.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/commons-logging-1.0.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/commons-logging-1.0.4.jar
--------------------------------------------------------------------------------
/clients/go/tools/offsets/Makefile:
--------------------------------------------------------------------------------
1 | include $(GOROOT)/src/Make.inc
2 |
3 | TARG=offsets
4 | GOFILES=\
5 | offsets.go\
6 |
7 | include $(GOROOT)/src/Make.cmd
8 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/jackson-core-asl-1.5.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/jackson-core-asl-1.5.5.jar
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/lib/jackson-mapper-asl-1.5.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-consumer/lib/jackson-mapper-asl-1.5.5.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/jackson-core-asl-1.5.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/jackson-core-asl-1.5.5.jar
--------------------------------------------------------------------------------
/contrib/hadoop-producer/lib/jackson-mapper-asl-1.5.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/contrib/hadoop-producer/lib/jackson-mapper-asl-1.5.5.jar
--------------------------------------------------------------------------------
/clients/go/tools/consumer/Makefile:
--------------------------------------------------------------------------------
1 | include $(GOROOT)/src/Make.inc
2 |
3 | TARG=consumer
4 | GOFILES=\
5 | consumer.go\
6 |
7 | include $(GOROOT)/src/Make.cmd
8 |
--------------------------------------------------------------------------------
/clients/go/tools/publisher/Makefile:
--------------------------------------------------------------------------------
1 | include $(GOROOT)/src/Make.inc
2 |
3 | TARG=publisher
4 | GOFILES=\
5 | publisher.go\
6 |
7 | include $(GOROOT)/src/Make.cmd
8 |
--------------------------------------------------------------------------------
/config/zookeeper.properties:
--------------------------------------------------------------------------------
1 | # the directory where the snapshot is stored.
2 | dataDir=/tmp/zookeeper
3 | # the port at which the clients will connect
4 | clientPort=2181
5 |
--------------------------------------------------------------------------------
/CONTRIBUTORS:
--------------------------------------------------------------------------------
1 | Jay Kreps
2 | Rui Wang
3 | Jun Rao
4 | Neha Narkhede
5 | Fatih Emekci
6 | Lin Guo
7 | Shirshanka Das
8 | Roshan Sumbaly
9 | Sam Shah
10 | Chris Burroughs
--------------------------------------------------------------------------------
/system_test/producer_perf/config/zookeeper.properties:
--------------------------------------------------------------------------------
1 | # the directory where the snapshot is stored.
2 | dataDir=/tmp/zookeeper
3 | # the port at which the clients will connect
4 | clientPort=2181
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | *classes
3 | target/
4 | lib_managed/
5 | src_managed/
6 | project/boot/
7 | project/plugins/project/
8 | project/sbt_project_definition.iml
9 | .settings
10 | .idea
11 | .project
12 |
--------------------------------------------------------------------------------
/core/src/test/resources/test-kafka-logs/MagicByte0-0/00000000000000000000.kafka:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sandbox/kafka/master/core/src/test/resources/test-kafka-logs/MagicByte0-0/00000000000000000000.kafka
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/request_type.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 | module RequestType
3 | PRODUCE = 0
4 | FETCH = 1
5 | MULTIFETCH = 2
6 | MULTIPRODUCE = 3
7 | OFFSETS = 4
8 | end
9 | end
--------------------------------------------------------------------------------
/system_test/embedded_consumer/config/zookeeper_source.properties:
--------------------------------------------------------------------------------
1 | # the directory where the snapshot is stored.
2 | dataDir=/tmp/zookeeper_source
3 | # the port at which the clients will connect
4 | clientPort=2181
5 |
--------------------------------------------------------------------------------
/system_test/embedded_consumer/config/zookeeper_target.properties:
--------------------------------------------------------------------------------
1 | # the directory where the snapshot is stored.
2 | dataDir=/tmp/zookeeper_target
3 | # the port at which the clients will connect
4 | clientPort=2182
5 |
--------------------------------------------------------------------------------
/bin/zookeeper-server-start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 1 ];
4 | then
5 | echo "USAGE: $0 zookeeper.properties"
6 | exit 1
7 | fi
8 |
9 | $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@
--------------------------------------------------------------------------------
/bin/kafka-server-start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -lt 1 ];
4 | then
5 | echo "USAGE: $0 server.properties [consumer.properties]"
6 | exit 1
7 | fi
8 |
9 | export JMX_PORT="9999"
10 |
11 | $(dirname $0)/kafka-run-class.sh kafka.Kafka $@
12 |
--------------------------------------------------------------------------------
/clients/clojure/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, A1
2 |
3 | log4j.appender.A1=org.apache.log4j.ConsoleAppender
4 | log4j.appender.A1.layout=org.apache.log4j.PatternLayout
5 | log4j.appender.A1.layout.ConversionPattern= %-5p %c - %m%n
6 |
--------------------------------------------------------------------------------
/clients/go/.gitignore:
--------------------------------------------------------------------------------
1 | _go_.6
2 | _obj
3 | 6.out
4 | _gotest_.6
5 | _test
6 | _testmain.go
7 | _testmain.6
8 | tools/*/_obj
9 | tools/*/_go_.6
10 | tools/consumer/consumer
11 | tools/publisher/publisher
12 | tools/consumer/test.txt
13 | tools/offsets/offsets
14 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/batch.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 | class Batch
3 | attr_accessor :messages
4 |
5 | def initialize
6 | self.messages = []
7 | end
8 |
9 | def << (message)
10 | self.messages << message
11 | end
12 | end
13 | end
--------------------------------------------------------------------------------
/project/plugins/Plugins.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 |
3 | class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
4 | val repo = "GH-pages repo" at "http://mpeltonen.github.com/maven/"
5 | val idea = "com.github.mpeltonen" % "sbt-idea-plugin" % "0.1-SNAPSHOT"
6 | }
7 |
--------------------------------------------------------------------------------
/bin/kafka-replay-log-producer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=$(dirname $0)
4 | export KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote -Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
5 | $base_dir/kafka-run-class.sh kafka.tools.ReplayLogProducer $@
6 |
--------------------------------------------------------------------------------
/bin/kafka-console-consumer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=$(dirname $0)
4 | export KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote -Dlog4j.configuration=file:$base_dir/kafka-console-consumer-log4j.properties"
5 | $base_dir/kafka-run-class.sh kafka.consumer.ConsoleConsumer $@
6 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | #Project properties
2 | #Mon Feb 28 11:55:49 PST 2011
3 | project.name=Kafka
4 | sbt.version=0.7.5
5 | project.version=0.7
6 | build.scala.versions=2.8.0
7 | contrib.root.dir=contrib
8 | lib.dir=lib
9 | target.dir=target/scala_2.8.0
10 | dist.dir=dist
11 |
--------------------------------------------------------------------------------
/clients/clojure/leiningen/run_example.clj:
--------------------------------------------------------------------------------
1 | (ns leiningen.run-example
2 | (:use [leiningen.compile :only (eval-in-project)]))
3 |
4 | (defn run-example
5 | [project & args]
6 | (eval-in-project project
7 | `(do
8 | (require 'kafka.example)
9 | (kafka.example/run))))
10 |
11 |
--------------------------------------------------------------------------------
/bin/kafka-console-consumer-log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stderr
2 |
3 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender
4 | log4j.appender.stderr.target=System.err
5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
7 |
8 |
--------------------------------------------------------------------------------
/core/src/test/scala/other/kafka.log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, KAFKA
2 |
3 | log4j.appender.KAFKA=kafka.log4j.KafkaAppender
4 |
5 | log4j.appender.KAFKA.Port=9092
6 | log4j.appender.KAFKA.Host=localhost
7 | log4j.appender.KAFKA.Topic=test-logger
8 | log4j.appender.KAFKA.Serializer=kafka.AppenderStringSerializer
9 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/NoBrokersForPartitionException.scala:
--------------------------------------------------------------------------------
1 | package kafka.common
2 |
3 | /**
4 | * Thrown when a request is made for broker but no brokers with that topic
5 | * exist.
6 | */
7 | class NoBrokersForPartitionException(message: String) extends RuntimeException(message) {
8 | def this() = this(null)
9 | }
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/InvalidMessageSizeException.scala:
--------------------------------------------------------------------------------
1 | package kafka.common
2 |
3 | /**
4 | * Indicates the client has requested a range no longer available on the server
5 | */
6 | class InvalidMessageSizeException(message: String) extends RuntimeException(message) {
7 | def this() = this(null)
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/core/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=OFF, stdout
2 |
3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
5 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
6 |
7 | # Turn on all our debugging info
8 | log4j.logger.kafka=OFF
9 |
10 |
--------------------------------------------------------------------------------
/clients/python/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | from distutils.core import setup
4 |
5 | setup(
6 | name='kafka-python-client',
7 | version='0.6',
8 | description='This library implements a Kafka client',
9 | author='LinkedIn.com',
10 | url='https://github.com/kafka-dev/kafka',
11 | package_dir={'': '.'},
12 | py_modules=[
13 | 'kafka',
14 | ],
15 | )
16 |
--------------------------------------------------------------------------------
/system_test/embedded_consumer/README:
--------------------------------------------------------------------------------
1 | This test replicates messages from 3 kafka brokers to 2 other kafka brokers using the embedded consumer.
2 | At the end, the messages produced at the source brokers should match that at the target brokers.
3 |
4 | To run this test, do
5 | bin/run-test.sh
6 |
7 | The expected output is given in bin/expected.out. There is only 1 thing that's important.
8 | 1. The output should have a line "test passed".
9 |
--------------------------------------------------------------------------------
/clients/clojure/test/kafka/print_test.clj:
--------------------------------------------------------------------------------
1 | (ns kafka.print-test
2 | (:use (kafka types print)
3 | clojure.test))
4 |
5 | (deftest test-pack-unpack
6 | (is (= "test" (unpack (pack "test"))))
7 | (is (= 123 (unpack (pack 123))))
8 | (is (= true (unpack (pack true))))
9 | (is (= [1 2 3] (unpack (pack [1 2 3]))))
10 | (is (= {:a 1} (unpack (pack {:a 1}))))
11 | (is (= '(+ 1 2 3) (unpack (pack '(+ 1 2 3))))))
12 |
13 |
--------------------------------------------------------------------------------
/perf/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | kafka-perf
4 |
5 |
6 |
7 |
8 |
9 | org.eclipse.jdt.core.javabuilder
10 |
11 |
12 |
13 |
14 |
15 | org.eclipse.jdt.core.javanature
16 |
17 |
18 |
--------------------------------------------------------------------------------
/system_test/producer_perf/README:
--------------------------------------------------------------------------------
1 | This test produces a large number of messages to a broker. It measures the throughput and tests
2 | the amount of data received is expected.
3 |
4 | To run this test, do
5 | bin/run-test.sh
6 |
7 | The expected output is given in expected.out. There are 2 things to pay attention to:
8 | 1. The output should have a line "test passed".
9 | 2. The throughput from the producer should be around 300,000 Messages/sec on a typical machine.
10 |
--------------------------------------------------------------------------------
/config/consumer.properties:
--------------------------------------------------------------------------------
1 | # see kafka.consumer.ConsumerConfig for more details
2 |
3 | # zk connection string
4 | # comma separated host:port pairs, each corresponding to a zk
5 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
6 | zk.connect=127.0.0.1:2181
7 |
8 | # timeout in ms for connecting to zookeeper
9 | zk.connectiontimeout.ms=1000000
10 |
11 | #consumer group id
12 | groupid=test-consumer-group
13 |
14 | #consumer timeout
15 | #consumer.timeout.ms=5000
16 |
--------------------------------------------------------------------------------
/system_test/embedded_consumer/config/consumer.properties:
--------------------------------------------------------------------------------
1 | # see kafka.consumer.ConsumerConfig for more details
2 |
3 | # zk connection string
4 | # comma separated host:port pairs, each corresponding to a zk
5 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
6 | zk.connect=localhost:2181
7 |
8 | # timeout in ms for connecting to zookeeper
9 | zk.connectiontimeout.ms=1000000
10 |
11 | #consumer group id
12 | groupid=group1
13 |
14 | embeddedconsumer.topics=test01:1
15 |
--------------------------------------------------------------------------------
/perf/util-bin/run-numtopic-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 `seq -s " " 10 10 50` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=$i -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=20 -numProducer=40 -xaxis=numTopic"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/clients/php/src/tests/phpunit.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 |
9 | ./Kafka
10 |
11 |
12 |
13 |
14 | ./
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/perf/util-bin/run-numproducer-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 `seq -s " " 10 10 50` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=10 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=20 -numProducer=$i -xaxis=numProducer"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/perf/util-bin/run-numconsumer-sustained.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 `seq -s " " 10 10 50` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=10 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=$i -numProducer=10 -xaxis=numConsumer"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/perf/util-bin/run-numproducer-single-topic.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 `seq -s " " 10 10 50` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=1 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=0 -numProducer=$i -xaxis=numProducer"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/perf/util-bin/run-msgsize-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 200 `seq -s " " 1000 1000 10000` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=10 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=20 -numProducer=40 -xaxis=msgSize -msgSize=$i"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/perf/util-bin/run-fetchsize-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | for i in 1 `seq -s " " 10 10 50` ;
11 | do
12 | kafka_startup
13 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=10 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=20 -numProducer=40 -xaxis=fetchSize -msgSize=1000 -fetchSize=$((1024*$i))"
14 | kafka_cleanup
15 | done
16 |
--------------------------------------------------------------------------------
/clients/clojure/test/kafka/serializable_test.clj:
--------------------------------------------------------------------------------
1 | (ns kafka.serializable-test
2 | (:use (kafka types serializable)
3 | clojure.test))
4 |
5 | (deftest test-pack-unpack
6 | (is (= "test" (unpack (pack "test"))))
7 | (is (= 123 (unpack (pack 123))))
8 | (is (= true (unpack (pack true))))
9 | (is (= [1 2 3] (unpack (pack [1 2 3]))))
10 | (is (= {:a 1} (unpack (pack {:a 1}))))
11 | (is (= '(+ 1 2 3) (unpack (pack '(+ 1 2 3)))))
12 | (let [now (java.util.Date.)]
13 | (is (= now (unpack (pack now))))))
14 |
15 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka.rb:
--------------------------------------------------------------------------------
1 | require 'socket'
2 | require 'zlib'
3 |
4 | require File.join(File.dirname(__FILE__), "kafka", "io")
5 | require File.join(File.dirname(__FILE__), "kafka", "request_type")
6 | require File.join(File.dirname(__FILE__), "kafka", "error_codes")
7 | require File.join(File.dirname(__FILE__), "kafka", "batch")
8 | require File.join(File.dirname(__FILE__), "kafka", "message")
9 | require File.join(File.dirname(__FILE__), "kafka", "producer")
10 | require File.join(File.dirname(__FILE__), "kafka", "consumer")
11 |
12 | module Kafka
13 | end
14 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/error_codes.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 | module ErrorCodes
3 | NO_ERROR = 0
4 | OFFSET_OUT_OF_RANGE = 1
5 | INVALID_MESSAGE_CODE = 2
6 | WRONG_PARTITION_CODE = 3
7 | INVALID_RETCH_SIZE_CODE = 4
8 |
9 | STRINGS = {
10 | 0 => 'No error',
11 | 1 => 'Offset out of range',
12 | 2 => 'Invalid message code',
13 | 3 => 'Wrong partition code',
14 | 4 => 'Invalid retch size code',
15 | }
16 |
17 | def self.to_s(code)
18 | STRINGS[code] || 'Unknown error'
19 | end
20 | end
21 | end
22 |
--------------------------------------------------------------------------------
/clients/go/Makefile:
--------------------------------------------------------------------------------
1 | include $(GOROOT)/src/Make.inc
2 |
3 | TARG=kafka
4 | GOFILES=\
5 | src/kafka.go\
6 | src/message.go\
7 | src/converts.go\
8 | src/consumer.go\
9 | src/publisher.go\
10 | src/timing.go\
11 | src/request.go\
12 |
13 | include $(GOROOT)/src/Make.pkg
14 |
15 | tools: force
16 | make -C tools/consumer clean all
17 | make -C tools/publisher clean all
18 | make -C tools/offsets clean all
19 |
20 | format:
21 | gofmt -w -tabwidth=2 -tabindent=false src/*.go tools/consumer/*.go tools/publisher/*.go kafka_test.go
22 |
23 | full: format clean install tools
24 |
25 | .PHONY: force
26 |
--------------------------------------------------------------------------------
/perf/.classpath:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/clients/ruby/spec/batch_spec.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/spec_helper'
2 |
3 | describe Batch do
4 |
5 | before(:each) do
6 | @batch = Batch.new
7 | end
8 |
9 | describe "batch messages" do
10 | it "holds all messages to be sent" do
11 | @batch.should respond_to(:messages)
12 | @batch.messages.class.should eql(Array)
13 | end
14 |
15 | it "supports queueing/adding messages to be send" do
16 | @batch.messages << mock(Kafka::Message.new("one"))
17 | @batch.messages << mock(Kafka::Message.new("two"))
18 | @batch.messages.length.should eql(2)
19 | end
20 | end
21 | end
--------------------------------------------------------------------------------
/clients/cpp/configure.ac:
--------------------------------------------------------------------------------
1 | ## LibKafkaConect
2 | ## A C++ shared libray for connecting to Kafka
3 |
4 | #
5 | # Warning this is the first time I've made a configure.ac/Makefile.am thing
6 | # Please improve it as I have no idea what I am doing
7 | # @benjamg
8 | #
9 |
10 | AC_INIT([LibKafkaConnect], [0.1])
11 | AC_PREREQ([2.59])
12 |
13 | AC_CONFIG_AUX_DIR([build-aux])
14 | AM_INIT_AUTOMAKE([foreign -Wall])
15 |
16 | AC_PROG_LIBTOOL
17 | AC_PROG_CXX
18 | AC_PROG_CPP
19 |
20 | AC_CONFIG_MACRO_DIR([build-aux/m4])
21 |
22 | #
23 | # Version number
24 | #
25 | AC_SUBST([KAFKACONNECT_VERSION], [1:0:1])
26 |
27 | AC_CONFIG_FILES([Makefile])
28 | AC_OUTPUT
--------------------------------------------------------------------------------
/clients/clojure/project.clj:
--------------------------------------------------------------------------------
1 | (defproject kafka-clj "0.1-SNAPSHOT"
2 | :description "Kafka client for Clojure."
3 | :url "http://sna-projects.com/kafka/"
4 | :dependencies [[org.clojure/clojure "1.2.0"]
5 | [org.clojure/clojure-contrib "1.2.0"]
6 | [log4j "1.2.15" :exclusions [javax.mail/mail
7 | javax.jms/jms
8 | com.sun.jdmk/jmxtools
9 | com.sun.jmx/jmxri]]]
10 | :disable-deps-clean false
11 | :warn-on-reflection true
12 | :source-path "src"
13 | :test-path "test")
14 |
--------------------------------------------------------------------------------
/clients/clojure/src/kafka/print.clj:
--------------------------------------------------------------------------------
1 | (ns #^{:doc "Basic Clojure print-dup -> read-string message serialization."}
2 | kafka.print
3 | (:use kafka.types)
4 | (:import (kafka.types Message)))
5 |
6 | (extend-type Object
7 | Pack
8 | (pack [this]
9 | (let [^String st (with-out-str (print-dup this *out*))]
10 | (kafka.types.Message. (.getBytes st "UTF-8")))))
11 |
12 | (extend-type Message
13 | Unpack
14 | (unpack [this]
15 | (let [^bytes ba (.message this)
16 | msg (String. ba "UTF-8")]
17 | (if (not (empty? msg))
18 | (try
19 | (read-string msg)
20 | (catch Exception e
21 | (println "Invalid expression " msg)))))))
22 |
23 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/test/test.properties:
--------------------------------------------------------------------------------
1 | # name of test topic
2 | kafka.etl.topic=SimpleTestEvent
3 |
4 | # hdfs location of jars
5 | hdfs.default.classpath.dir=/tmp/kafka/lib
6 |
7 | # number of test events to be generated
8 | event.count=1000
9 |
10 | # hadoop id and group
11 | hadoop.job.ugi=kafka,hadoop
12 |
13 | # kafka server uri
14 | kafka.server.uri=tcp://localhost:9092
15 |
16 | # hdfs location of input directory
17 | input=/tmp/kafka/data
18 |
19 | # hdfs location of output directory
20 | output=/tmp/kafka/output
21 |
22 | # limit the number of events to be fetched;
23 | # value -1 means no limitation
24 | kafka.request.limit=-1
25 |
26 | # kafka parameters
27 | client.buffer.size=1048576
28 | client.so.timeout=60000
29 |
--------------------------------------------------------------------------------
/perf/util-bin/run-numconsumer-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | REMOTE_KAFKA_LOGIN=$1 # user@host format
4 | REMOTE_SIM_LOGIN=$2
5 | TEST_TIME=$3
6 | REPORT_FILE=$4
7 |
8 | . `dirname $0`/remote-kafka-env.sh
9 |
10 | kafka_startup
11 | # You need to twidle this time value depending on test time below
12 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=1 -reportFile=$REPORT_FILE -time=7 -numConsumer=0 -numProducer=10 -xaxis=numConsumer"
13 | sleep 20
14 |
15 | for i in 1 `seq -s " " 10 10 50` ;
16 | do
17 | ssh $REMOTE_SIM_LOGIN "$SIMULATOR_SCRIPT -kafkaServer=$KAFKA_SERVER -numTopic=1 -reportFile=$REPORT_FILE -time=$TEST_TIME -numConsumer=$i -numProducer=0 -xaxis=numConsumer"
18 | sleep 10
19 | done
20 |
21 | kafka_cleanup
22 |
--------------------------------------------------------------------------------
/clients/php/src/examples/autoloader.php:
--------------------------------------------------------------------------------
1 | file association */
17 | if (($file !== false) && ($file !== null)) {
18 | include $file;
19 | return;
20 | }
21 |
22 | throw new RuntimeException($className. ' not found');
23 | });
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/ByteBufferBackedInputStream.scala:
--------------------------------------------------------------------------------
1 | package kafka.message
2 |
3 | import java.io.InputStream
4 | import java.nio.ByteBuffer
5 | import scala.Math
6 |
7 | class ByteBufferBackedInputStream(buffer:ByteBuffer) extends InputStream {
8 | override def read():Int = {
9 | buffer.hasRemaining match {
10 | case true =>
11 | (buffer.get() & 0xFF)
12 | case false => -1
13 | }
14 | }
15 |
16 | override def read(bytes:Array[Byte], off:Int, len:Int):Int = {
17 | buffer.hasRemaining match {
18 | case true =>
19 | // Read only what's left
20 | val realLen = math.min(len, buffer.remaining())
21 | buffer.get(bytes, off, realLen)
22 | realLen
23 | case false => -1
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/clients/php/README.md:
--------------------------------------------------------------------------------
1 | # kafka-php
2 | kafka-php allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
3 |
4 | ## Requirements
5 | Minimum PHP version: 5.3.3.
6 | You need to have access to your Kafka instance and be able to connect through TCP. You can obtain a copy and instructions on how to setup kafka at https://github.com/kafka-dev/kafka
7 |
8 | ## Installation
9 | Add the lib directory to the include_path and use an autoloader like the one in the examples directory (the code follows the PEAR/Zend one-class-per-file convention).
10 |
11 | ## Usage
12 | The examples directory contains an example of a Producer and a Consumer.
13 |
14 | ## Contact for questions
15 |
16 | Lorenzo Alberton
17 |
18 | l.alberton at(@) quipo.it
19 |
20 | http://twitter.com/lorenzoalberton
21 |
--------------------------------------------------------------------------------
/clients/php/src/examples/produce.php:
--------------------------------------------------------------------------------
1 | #!/usr/bin/php
2 | send($messages, $topic);
28 | printf("\nSuccessfully sent %d messages (%d bytes)\n\n", count($messages), $bytes);
29 | }
30 |
--------------------------------------------------------------------------------
/clients/clojure/src/kafka/serializable.clj:
--------------------------------------------------------------------------------
1 | (ns #^{:doc "Serialization for all Java Serializable objects."}
2 | kafka.serializable
3 | (:use kafka.types)
4 | (:import (kafka.types Message)
5 | (java.io Serializable
6 | ObjectOutputStream ByteArrayOutputStream
7 | ObjectInputStream ByteArrayInputStream)))
8 |
9 | (extend-type Serializable
10 | Pack
11 | (pack [this]
12 | (let [bas (ByteArrayOutputStream.)]
13 | (with-open [oos (ObjectOutputStream. bas)]
14 | (.writeObject oos this))
15 | (kafka.types.Message. (.toByteArray bas)))))
16 |
17 | (extend-type Message
18 | Unpack
19 | (unpack [this]
20 | (with-open [ois (ObjectInputStream. (ByteArrayInputStream. (.message this)))]
21 | (.readObject ois))))
22 |
23 |
--------------------------------------------------------------------------------
/clients/php/src/lib/Kafka/Request.php:
--------------------------------------------------------------------------------
1 |
8 | * @copyright 2011 Lorenzo Alberton
9 | * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
10 | * @version $Revision: $
11 | * @link http://sna-projects.com/kafka/
12 | */
13 |
14 | /**
15 | * Abstract Request class
16 | *
17 | * @category Libraries
18 | * @package Kafka
19 | * @author Lorenzo Alberton
20 | * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
21 | * @link http://sna-projects.com/kafka/
22 | */
23 | abstract class Kafka_Request
24 | {
25 | /**
26 | * @var integer
27 | */
28 | public $id;
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/clients/cpp/build-aux/m4/ltversion.m4:
--------------------------------------------------------------------------------
1 | # ltversion.m4 -- version numbers -*- Autoconf -*-
2 | #
3 | # Copyright (C) 2004 Free Software Foundation, Inc.
4 | # Written by Scott James Remnant, 2004
5 | #
6 | # This file is free software; the Free Software Foundation gives
7 | # unlimited permission to copy and/or distribute it, with or without
8 | # modifications, as long as this notice is preserved.
9 |
10 | # Generated from ltversion.in.
11 |
12 | # serial 3017 ltversion.m4
13 | # This file is part of GNU Libtool
14 |
15 | m4_define([LT_PACKAGE_VERSION], [2.2.6b])
16 | m4_define([LT_PACKAGE_REVISION], [1.3017])
17 |
18 | AC_DEFUN([LTVERSION_VERSION],
19 | [macro_version='2.2.6b'
20 | macro_revision='1.3017'
21 | _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
22 | _LT_DECL(, macro_revision, 0)
23 | ])
24 |
--------------------------------------------------------------------------------
/examples/README:
--------------------------------------------------------------------------------
1 | This directory contains examples of client code that uses kafka.
2 |
3 | The default target for ant is kafka.examples.KafkaConsumerProducerDemo which sends and receives
4 | messages from Kafka server.
5 |
6 | In order to run demo from SBT:
7 | 1. Start Zookeeper and the Kafka server
8 | 2. ./sbt from top-level kafka directory
9 | 3. Switch to the kafka java examples project -> project Kafka Java Examples
10 | 4. execute run -> run
11 | 5. For unlimited producer-consumer run, select option 1
12 | For simple consumer demo, select option 2
13 |
14 | To run the demo using scripts:
15 |
16 | 1. Start Zookeeper and the Kafka server
17 | 2. For simple consumer demo, run bin/java-simple-consumer-demo.sh
18 | 3. For unlimited producer-consumer run, run bin/java-producer-consumer-demo.sh
19 |
20 |
--------------------------------------------------------------------------------
/system_test/embedded_consumer/expected.out:
--------------------------------------------------------------------------------
1 | start the servers ...
2 | start producing messages ...
3 | Total Num Messages: 10000000 bytes: 1994374785 in 106.076 secs
4 | Messages/sec: 94272.0314
5 | MB/sec: 17.9304
6 | [2011-05-02 11:50:29,022] INFO Disconnecting from localhost:9092 (kafka.producer.SyncProducer)
7 | wait for consumer to finish consuming ...
8 | test passed
9 | bin/../../../bin/kafka-server-start.sh: line 11: 359 Terminated $(dirname $0)/kafka-run-class.sh kafka.Kafka $@
10 | bin/../../../bin/zookeeper-server-start.sh: line 9: 357 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@
11 | bin/../../../bin/zookeeper-server-start.sh: line 9: 358 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@
12 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/UnknownException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.common
18 |
19 | /**
20 | * If we don't know what else it is, call it this
21 | */
22 | class UnknownException extends RuntimeException
23 |
--------------------------------------------------------------------------------
/perf/README.md:
--------------------------------------------------------------------------------
1 | # Profiling Kafka #
2 |
3 | See here: http://sna-projects.com/kafka/performance.php
4 |
5 | ## Getting Started Locally ##
6 |
7 | 1. Build Kafka itself, start servers etc.
8 | 2. ./sbt
9 | project perf
10 | package-all
11 | 3. Make sure report-html/data (or whichever dir you want to dump simulator data to) exists.
12 | 4. ./run-simulator.sh
13 |
14 | ## Getting Started With Remote Tests ##
15 |
16 | 1. Look at util-bin/remote-kafka-env.sh and the constants there.
17 | 2. Scripts assume that you have kafka built and installed on the
18 | remote hosts.
19 | 3. Example: `./util-bin/run-fetchsize-test.sh user@host user@host 1 dir/report-html/fetch_size_test`.
20 | 4. Start a web-server or copy the results somewhere you can view with
21 | a browser.You can view the results of a specific test run by setting
22 | `report.html?dataDir=my_test`.
23 |
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/InvalidMessageException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.message
18 |
19 | /**
20 | * Indicates that a message failed its checksum and is corrupt
21 | */
22 | class InvalidMessageException extends RuntimeException
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/InvalidRequestException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | class InvalidRequestException(val message: String) extends RuntimeException(message) {
20 |
21 | def this() = this("")
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/AsyncProducerStatsMBean.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer.async
18 |
19 | trait AsyncProducerStatsMBean {
20 | def getAsyncProducerQueueSize: Int
21 | def getAsyncProducerDroppedEvents: Int
22 | }
23 |
--------------------------------------------------------------------------------
/clients/php/src/lib/Kafka/RequestKeys.php:
--------------------------------------------------------------------------------
1 |
8 | * @copyright 2011 Lorenzo Alberton
9 | * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
10 | * @version $Revision: $
11 | * @link http://sna-projects.com/kafka/
12 | */
13 |
14 | /**
15 | * Some constants for request keys
16 | *
17 | * @category Libraries
18 | * @package Kafka
19 | * @author Lorenzo Alberton
20 | * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
21 | * @link http://sna-projects.com/kafka/
22 | */
23 | class Kafka_RequestKeys
24 | {
25 | const PRODUCE = 0;
26 | const FETCH = 1;
27 | const MULTIFETCH = 2;
28 | const MULTIPRODUCE = 3;
29 | const OFFSETS = 4;
30 | }
31 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/api/RequestKeys.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.api
18 |
19 | object RequestKeys {
20 | val Produce: Short = 0
21 | val Fetch: Short = 1
22 | val MultiFetch: Short = 2
23 | val MultiProduce: Short = 3
24 | val Offsets: Short = 4
25 | }
26 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/Request.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | import java.nio._
20 |
21 | private[kafka] abstract class Request(val id: Short) {
22 |
23 | def sizeInBytes: Int
24 |
25 | def writeTo(buffer: ByteBuffer): Unit
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Kafka.Client/RequestType.cs:
--------------------------------------------------------------------------------
1 | namespace Kafka.Client
2 | {
3 | ///
4 | /// Requests types for Kafka
5 | ///
6 | ///
7 | /// Many of these are not in play yet.
8 | ///
9 | public enum RequestType
10 | {
11 | ///
12 | /// Produce a message.
13 | ///
14 | Produce = 0,
15 |
16 | ///
17 | /// Fetch a message.
18 | ///
19 | Fetch = 1,
20 |
21 | ///
22 | /// Multi-fetch messages.
23 | ///
24 | MultiFetch = 2,
25 |
26 | ///
27 | /// Multi-produce messages.
28 | ///
29 | MultiProduce = 3,
30 |
31 | ///
32 | /// Gets offsets.
33 | ///
34 | Offsets = 4
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/MessageAndOffset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.message
18 |
19 | /**
20 | * Represents message and offset of the next message. This is used in the MessageSet to iterate over it
21 | */
22 | case class MessageAndOffset(val message: Message, val offset: Long)
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/MissingConfigException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer.async
18 |
19 | /* Indicates any missing configuration parameter */
20 | class MissingConfigException(message: String) extends RuntimeException(message) {
21 | def this() = this(null)
22 | }
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/UnavailableProducerException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.common
17 |
18 | /**
19 | * Indicates a producer pool initialization problem
20 | */
21 | class UnavailableProducerException(message: String) extends RuntimeException(message) {
22 | def this() = this(null)
23 | }
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/QueueClosedException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer.async
18 |
19 | /* Indicates that client is sending event to a closed queue */
20 | class QueueClosedException(message: String) extends RuntimeException(message) {
21 | def this() = this(null)
22 | }
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/InvalidConfigException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.common
18 |
19 | /**
20 | * Indicates that the given config parameter has invalid value
21 | */
22 | class InvalidConfigException(message: String) extends RuntimeException(message) {
23 | def this() = this(null)
24 | }
25 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/package.html:
--------------------------------------------------------------------------------
1 | The network server for kafka. Now application specific code here, just general network server stuff.
2 |
3 | The classes Receive and Send encapsulate the incoming and outgoing transmission of bytes. A Handler
4 | is a mapping between a Receive and a Send, and represents the users hook to add logic for mapping requests
5 | to actual processing code. Any uncaught exceptions in the reading or writing of the transmissions will result in
6 | the server logging an error and closing the offending socket. As a result it is the duty of the Handler
7 | implementation to catch and serialize any application-level errors that should be sent to the client.
8 |
9 | This slightly lower-level interface that models sending and receiving rather than requests and responses
10 | is necessary in order to allow the send or receive to be overridden with a non-user-space writing of bytes
11 | using FileChannel.transferTo.
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/QueueFullException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer.async
18 |
19 | /* Indicates the queue for sending messages is full of unsent messages */
20 | class QueueFullException(message: String) extends RuntimeException(message) {
21 | def this() = this(null)
22 | }
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/ConnectionConfig.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | trait ConnectionConfig {
20 | val host: String
21 | val port: Int
22 | val sendBufferSize: Int = -1
23 | val receiveBufferSize: Int = -1
24 | val tcpNoDelay = true
25 | val keepAlive = false
26 | }
27 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/InvalidPartitionException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.common
17 |
18 | /**
19 | * Indicates that the partition id is not between 0 and numPartitions-1
20 | */
21 | class InvalidPartitionException(message: String) extends RuntimeException(message) {
22 | def this() = this(null)
23 | }
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/UnknownCodecException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.common
18 |
19 | /**
20 | * Indicates the client has requested a range no longer available on the server
21 | */
22 | class UnknownCodecException(message: String) extends RuntimeException(message) {
23 | def this() = this(null)
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/MessageLengthException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.message
18 |
19 | /**
20 | * Indicates the presense of a message that exceeds the maximum acceptable
21 | * length (whatever that happens to be)
22 | */
23 | class MessageLengthException(message: String) extends RuntimeException(message)
24 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/OffsetOutOfRangeException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.common
18 |
19 | /**
20 | * Indicates the client has requested a range no longer available on the server
21 | */
22 | class OffsetOutOfRangeException(message: String) extends RuntimeException(message) {
23 | def this() = this(null)
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/common/UnknownMagicByteException.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.common
18 |
19 | /**
20 | * Indicates the client has requested a range no longer available on the server
21 | */
22 | class UnknownMagicByteException(message: String) extends RuntimeException(message) {
23 | def this() = this(null)
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/perf/run-simulator.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=$(dirname $0)/..
4 |
5 | for file in $base_dir/project/boot/scala-2.8.0/lib/*.jar;
6 | do
7 | CLASSPATH=$CLASSPATH:$file
8 | done
9 | for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
10 | do
11 | CLASSPATH=$CLASSPATH:$file
12 | done
13 | for file in $base_dir/core/target/scala_2.8.0/*.jar;
14 | do
15 | CLASSPATH=$CLASSPATH:$file
16 | done
17 | for file in $base_dir/perf/target/scala_2.8.0/*.jar;
18 | do
19 | CLASSPATH=$CLASSPATH:$file
20 | done
21 |
22 |
23 | echo $CLASSPATH
24 |
25 | if [ -z "$KAFKA_PERF_OPTS" ]; then
26 | KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
27 | fi
28 |
29 | if [ -z "$JAVA_HOME" ]; then
30 | JAVA="java"
31 | else
32 | JAVA="$JAVA_HOME/bin/java"
33 | fi
34 |
35 | $JAVA $KAFKA_OPTS -cp $CLASSPATH kafka.perf.KafkaPerfSimulator $@
36 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/src/main/java/kafka/etl/UndefinedPropertyException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.etl;
18 |
19 | public class UndefinedPropertyException extends RuntimeException {
20 |
21 | private static final long serialVersionUID = 1;
22 |
23 | public UndefinedPropertyException(String message) {
24 | super(message);
25 | }
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/Partitioner.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.producer
17 |
18 | trait Partitioner[T] {
19 | /**
20 | * Uses the key to calculate a partition bucket id for routing
21 | * the data to the appropriate broker partition
22 | * @return an integer between 0 and numPartitions-1
23 | */
24 | def partition(key: T, numPartitions: Int): Int
25 | }
26 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/consumer/FetchedDataChunk.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.consumer
18 |
19 | import kafka.message.ByteBufferMessageSet
20 |
21 | private[consumer] class FetchedDataChunk(val messages: ByteBufferMessageSet,
22 | val topicInfo: PartitionTopicInfo,
23 | val fetchOffset: Long)
24 |
--------------------------------------------------------------------------------
/perf/src/main/java/kafka/perf/KafkaSimulatorMXBean.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.perf;
18 |
19 | public interface KafkaSimulatorMXBean
20 | {
21 | public String getMBytesSentPs();
22 | public String getMessagesSentPs();
23 | public String getProducers();
24 | public String getMBytesRecPs();
25 | public String getMessagesRecPs();
26 | public String getConsumers();
27 | }
28 |
--------------------------------------------------------------------------------
/config/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stdout
2 |
3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
5 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
6 |
7 | #log4j.appender.fileAppender=org.apache.log4j.FileAppender
8 | #log4j.appender.fileAppender.File=kafka-request.log
9 | #log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
10 | #log4j.appender.fileAppender.layout.ConversionPattern= %-4r [%t] %-5p %c %x - %m%n
11 |
12 |
13 | # Turn on all our debugging info
14 | log4j.logger.kafka=INFO,stdout
15 | #log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG,stdout
16 | #log4j.logger.kafka.consumer.PartitionTopicInfo=TRACE,stdout
17 | #log4j.logger.kafka.request.logger=TRACE,fileAppender
18 | #log4j.additivity.kafka.request.logger=false
19 | #log4j.logger.kafka.network.Processor=TRACE,fileAppender
20 | #log4j.additivity.kafka.network.Processor=false
21 | #log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
22 |
23 |
--------------------------------------------------------------------------------
/perf/util-bin/remote-kafka-env.sh:
--------------------------------------------------------------------------------
1 |
2 | REMOTE_KAFKA_HOME="~/kafka-perf"
3 | REMOTE_KAFKA_LOG_DIR="$REMOTE_KAFKA_HOME/tmp/kafka-logs"
4 | SIMULATOR_SCRIPT="$REMOTE_KAFKA_HOME/perf/run-simulator.sh"
5 |
6 | REMOTE_KAFKA_HOST=`echo $REMOTE_KAFKA_LOGIN | cut -d @ -f 2`
7 | REMOTE_SIM_HOST=`echo $REMOTE_SIM_LOGIN | cut -d @ -f 2`
8 |
9 | # If we are running the broker on the same box, use the local interface.
10 | KAFKA_SERVER=$REMOTE_KAFKA_HOST
11 | if [[ "$REMOTE_KAFKA_HOST" == "$REMOTE_SIM_HOST" ]];
12 | then
13 | KAFKA_SERVER="localhost"
14 | fi
15 |
16 |
17 | # todo: some echos
18 | # todo: talkative sleep
19 |
20 | function kafka_startup() {
21 | ssh $REMOTE_KAFKA_LOGIN "cd $REMOTE_KAFKA_HOME; ./bin/kafka-server-start.sh config/server.properties 2>&1 > kafka.out" &
22 | sleep 10
23 | }
24 |
25 |
26 | function kafka_cleanup() {
27 | ssh $REMOTE_KAFKA_LOGIN "cd $REMOTE_KAFKA_HOME; ./bin/kafka-server-stop.sh" &
28 | sleep 10
29 | ssh $REMOTE_KAFKA_LOGIN "rm -rf $REMOTE_KAFKA_LOG_DIR" &
30 | sleep 10
31 | }
32 |
--------------------------------------------------------------------------------
/clients/php/src/tests/bootstrap.php:
--------------------------------------------------------------------------------
1 | file association */
17 | if (($file !== false) && ($file !== null)) {
18 | include $file;
19 | return;
20 | }
21 |
22 | throw new RuntimeException($className. ' not found');
23 | }
24 |
25 | // register the autoloader
26 | spl_autoload_register('test_autoload');
27 |
28 | set_include_path(
29 | implode(PATH_SEPARATOR, array(
30 | realpath(dirname(__FILE__).'/../lib'),
31 | get_include_path(),
32 | ))
33 | );
34 |
35 | date_default_timezone_set('Europe/London');
36 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/DefaultPartitioner.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer
18 |
19 | private[kafka] class DefaultPartitioner[T] extends Partitioner[T] {
20 | private val random = new java.util.Random
21 |
22 | def partition(key: T, numPartitions: Int): Int = {
23 | if(key == null)
24 | random.nextInt(numPartitions)
25 | else
26 | key.hashCode % numPartitions
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/clients/clojure/src/kafka/example.clj:
--------------------------------------------------------------------------------
1 | (ns #^{:doc "Producer/Consumer example."}
2 | kafka.example
3 | (:use (clojure.contrib logging)
4 | (kafka types kafka print)))
5 |
6 | (defmacro thread
7 | "Executes body in a thread, logs exceptions."
8 | [ & body]
9 | `(future
10 | (try
11 | ~@body
12 | (catch Exception e#
13 | (error "Exception." e#)))))
14 |
15 | (defn start-consumer
16 | []
17 | (thread
18 | (with-open [c (consumer "localhost" 9092)]
19 | (doseq [m (consume-seq c "test" 0 {:blocking true})]
20 | (println "Consumed <-- " m)))
21 | (println "Finished consuming.")))
22 |
23 | (defn start-producer
24 | []
25 | (thread
26 | (with-open [p (producer "localhost" 9092)]
27 | (doseq [i (range 1 20)]
28 | (let [m (str "Message " i)]
29 | (produce p "test" 0 m)
30 | (println "Produced --> " m)
31 | (Thread/sleep 1000))))
32 | (println "Finished producing.")))
33 |
34 | (defn run
35 | []
36 | (start-consumer)
37 | (start-producer))
38 |
39 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/io.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 | module IO
3 | attr_accessor :socket, :host, :port
4 |
5 | def connect(host, port)
6 | raise ArgumentError, "No host or port specified" unless host && port
7 | self.host = host
8 | self.port = port
9 | self.socket = TCPSocket.new(host, port)
10 | end
11 |
12 | def reconnect
13 | self.disconnect
14 | self.socket = self.connect(self.host, self.port)
15 | end
16 |
17 | def disconnect
18 | self.socket.close rescue nil
19 | self.socket = nil
20 | end
21 |
22 | def write(data)
23 | self.reconnect unless self.socket
24 | self.socket.write(data)
25 | rescue Errno::ECONNRESET, Errno::EPIPE, Errno::ECONNABORTED
26 | self.reconnect
27 | self.socket.write(data) # retry
28 | end
29 |
30 | def read(length)
31 | begin
32 | self.socket.read(length)
33 | rescue Errno::EAGAIN
34 | self.disconnect
35 | raise Errno::EAGAIN, "Timeout reading from the socket"
36 | end
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/examples/src/main/java/kafka/examples/ExampleUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.examples;
17 |
18 | import java.nio.ByteBuffer;
19 |
20 | import kafka.message.Message;
21 |
22 | public class ExampleUtils
23 | {
24 | public static String getMessage(Message message)
25 | {
26 | ByteBuffer buffer = message.payload();
27 | byte [] bytes = new byte[buffer.remaining()];
28 | buffer.get(bytes);
29 | return new String(bytes);
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/utils/UtilsTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import org.apache.log4j.Logger
20 | import org.scalatest.junit.JUnitSuite
21 | import org.junit.Test
22 |
23 | class UtilsTest extends JUnitSuite {
24 |
25 | private val logger = Logger.getLogger(classOf[UtilsTest])
26 |
27 | @Test
28 | def testSwallow() {
29 | Utils.swallow(logger.info, throw new IllegalStateException("test"))
30 | }
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/serializer/Encoder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.serializer
18 |
19 | import kafka.message.Message
20 |
21 | trait Encoder[T] {
22 | def toMessage(event: T):Message
23 | }
24 |
25 | class DefaultEncoder extends Encoder[Message] {
26 | override def toMessage(event: Message):Message = event
27 | }
28 |
29 | class StringEncoder extends Encoder[String] {
30 | override def toMessage(event: String):Message = new Message(event.getBytes)
31 | }
32 |
--------------------------------------------------------------------------------
/examples/src/main/java/kafka/examples/KafkaConsumerProducerDemo.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.examples;
17 |
18 | public class KafkaConsumerProducerDemo implements KafkaProperties
19 | {
20 | public static void main(String[] args)
21 | {
22 | Producer producerThread = new Producer(KafkaProperties.topic);
23 | producerThread.start();
24 |
25 | Consumer consumerThread = new Consumer(KafkaProperties.topic);
26 | consumerThread.start();
27 |
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/clients/cpp/src/example.cpp:
--------------------------------------------------------------------------------
1 |
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | #include
8 |
9 | #include "producer.hpp"
10 |
11 | int main(int argc, char* argv[])
12 | {
13 | std::string hostname = (argc >= 2) ? argv[1] : "localhost";
14 | std::string port = (argc >= 3) ? argv[2] : "9092";
15 |
16 | boost::asio::io_service io_service;
17 | std::auto_ptr work(new boost::asio::io_service::work(io_service));
18 | boost::thread bt(boost::bind(&boost::asio::io_service::run, &io_service));
19 |
20 | kafkaconnect::producer producer(io_service);
21 | producer.connect(hostname, port);
22 |
23 | while(!producer.is_connected())
24 | {
25 | boost::this_thread::sleep(boost::posix_time::seconds(1));
26 | }
27 |
28 | std::vector messages;
29 | messages.push_back("So long and thanks for all the fish");
30 | messages.push_back("Time is an illusion. Lunchtime doubly so.");
31 | producer.send(messages, "test");
32 |
33 | work.reset();
34 | io_service.stop();
35 |
36 | return EXIT_SUCCESS;
37 | }
38 |
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/MockTime.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import java.util.concurrent._
20 |
21 | class MockTime(@volatile var currentMs: Long) extends Time {
22 |
23 | def this() = this(System.currentTimeMillis)
24 |
25 | def milliseconds: Long = currentMs
26 |
27 | def nanoseconds: Long =
28 | TimeUnit.NANOSECONDS.convert(currentMs, TimeUnit.MILLISECONDS)
29 |
30 | def sleep(ms: Long): Unit =
31 | currentMs += ms
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/message.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 |
3 | # A message. The format of an N byte message is the following:
4 | # 1 byte "magic" identifier to allow format changes
5 | # 4 byte CRC32 of the payload
6 | # N - 5 byte payload
7 | class Message
8 |
9 | MAGIC_IDENTIFIER_DEFAULT = 0
10 |
11 | attr_accessor :magic, :checksum, :payload
12 |
13 | def initialize(payload = nil, magic = MAGIC_IDENTIFIER_DEFAULT, checksum = nil)
14 | self.magic = magic
15 | self.payload = payload
16 | self.checksum = checksum || self.calculate_checksum
17 | end
18 |
19 | def calculate_checksum
20 | Zlib.crc32(self.payload)
21 | end
22 |
23 | def valid?
24 | self.checksum == Zlib.crc32(self.payload)
25 | end
26 |
27 | def self.parse_from(binary)
28 | size = binary[0, 4].unpack("N").shift.to_i
29 | magic = binary[4, 1].unpack("C").shift
30 | checksum = binary[5, 4].unpack("N").shift
31 | payload = binary[9, size] # 5 = 1 + 4 is Magic + Checksum
32 | return Kafka::Message.new(payload, magic, checksum)
33 | end
34 | end
35 | end
36 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/Handler.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | private[kafka] object Handler {
20 |
21 | /**
22 | * A request handler is a function that turns an incoming
23 | * transmission into an outgoing transmission
24 | */
25 | type Handler = Receive => Option[Send]
26 |
27 | /**
28 | * A handler mapping finds the right Handler function for a given request
29 | */
30 | type HandlerMapping = (Short, Receive) => Handler
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/zk/ZooKeeperTestHarness.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.zk
18 |
19 | import org.scalatest.junit.JUnit3Suite
20 |
21 | trait ZooKeeperTestHarness extends JUnit3Suite {
22 | val zkConnect: String
23 | var zookeeper: EmbeddedZookeeper = null
24 |
25 | override def setUp() {
26 | zookeeper = new EmbeddedZookeeper(zkConnect)
27 | super.setUp
28 | }
29 |
30 | override def tearDown() {
31 | super.tearDown
32 | zookeeper.shutdown()
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Kafka.Client/AbstractRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 |
6 | namespace Kafka.Client
7 | {
8 | ///
9 | /// Base request to make to Kafka.
10 | ///
11 | public abstract class AbstractRequest
12 | {
13 | ///
14 | /// Gets or sets the topic to publish to.
15 | ///
16 | public string Topic { get; set; }
17 |
18 | ///
19 | /// Gets or sets the partition to publish to.
20 | ///
21 | public int Partition { get; set; }
22 |
23 | ///
24 | /// Converts the request to an array of bytes that is expected by Kafka.
25 | ///
26 | /// An array of bytes that represents the request.
27 | public abstract byte[] GetBytes();
28 |
29 | ///
30 | /// Determines if the request has valid settings.
31 | ///
32 | /// True if valid and false otherwise.
33 | public abstract bool IsValid();
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/clients/php/src/examples/consume.php:
--------------------------------------------------------------------------------
1 | #!/usr/bin/php
2 | fetch($fetchRequest);
28 | foreach ($messages as $msg) {
29 | echo "\nconsumed[$offset]: " . $msg->payload();
30 | }
31 | //advance the offset after consuming each message
32 | $offset += $messages->validBytes();
33 | //echo "\n---[Advancing offset to $offset]------(".date('H:i:s').")";
34 | unset($fetchRequest);
35 | sleep(2);
36 | }
37 |
--------------------------------------------------------------------------------
/examples/bin/java-simple-consumer-demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=$(dirname $0)/../..
4 |
5 | for file in $base_dir/project/boot/scala-2.8.0/lib/*.jar;
6 | do
7 | if [ ${file##*/} != "sbt-launch.jar" ]; then
8 | CLASSPATH=$CLASSPATH:$file
9 | fi
10 | done
11 |
12 | for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
13 | do
14 | CLASSPATH=$CLASSPATH:$file
15 | done
16 |
17 | for file in $base_dir/core/lib/*.jar;
18 | do
19 | CLASSPATH=$CLASSPATH:$file
20 | done
21 |
22 | for file in $base_dir/core/target/scala_2.8.0/*.jar;
23 | do
24 | CLASSPATH=$CLASSPATH:$file
25 | done
26 |
27 | for file in $base_dir/examples/target/scala_2.8.0/*.jar;
28 | do
29 | CLASSPATH=$CLASSPATH:$file
30 | done
31 |
32 | echo $CLASSPATH
33 |
34 | if [ -z "$KAFKA_PERF_OPTS" ]; then
35 | KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
36 | fi
37 |
38 | if [ -z "$JAVA_HOME" ]; then
39 | JAVA="java"
40 | else
41 | JAVA="$JAVA_HOME/bin/java"
42 | fi
43 |
44 | $JAVA $KAFKA_OPTS -cp $CLASSPATH kafka.examples.SimpleConsumerDemo $@
45 |
46 |
--------------------------------------------------------------------------------
/examples/bin/java-producer-consumer-demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=$(dirname $0)/../..
4 |
5 | for file in $base_dir/project/boot/scala-2.8.0/lib/*.jar;
6 | do
7 | if [ ${file##*/} != "sbt-launch.jar" ]; then
8 | CLASSPATH=$CLASSPATH:$file
9 | fi
10 | done
11 |
12 | for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
13 | do
14 | CLASSPATH=$CLASSPATH:$file
15 | done
16 |
17 | for file in $base_dir/core/lib/*.jar;
18 | do
19 | CLASSPATH=$CLASSPATH:$file
20 | done
21 |
22 | for file in $base_dir/core/target/scala_2.8.0/*.jar;
23 | do
24 | CLASSPATH=$CLASSPATH:$file
25 | done
26 |
27 | for file in $base_dir/examples/target/scala_2.8.0/*.jar;
28 | do
29 | CLASSPATH=$CLASSPATH:$file
30 | done
31 |
32 | echo $CLASSPATH
33 |
34 | if [ -z "$KAFKA_PERF_OPTS" ]; then
35 | KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
36 | fi
37 |
38 | if [ -z "$JAVA_HOME" ]; then
39 | JAVA="java"
40 | else
41 | JAVA="$JAVA_HOME/bin/java"
42 | fi
43 |
44 | $JAVA $KAFKA_OPTS -cp $CLASSPATH kafka.examples.KafkaConsumerProducerDemo $@
45 |
46 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/serializer/Decoder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.serializer
18 |
19 | import kafka.message.Message
20 |
21 | trait Decoder[T] {
22 | def toEvent(message: Message):T
23 | }
24 |
25 | class DefaultDecoder extends Decoder[Message] {
26 | def toEvent(message: Message):Message = message
27 | }
28 |
29 | class StringDecoder extends Decoder[String] {
30 | def toEvent(message: Message):String = {
31 | val buf = message.payload
32 | val arr = new Array[Byte](buf.remaining)
33 | buf.get(arr)
34 | new String(arr)
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/run-class.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -lt 1 ];
4 | then
5 | echo "USAGE: $0 classname [opts]"
6 | exit 1
7 | fi
8 |
9 | base_dir=$(dirname $0)/../..
10 |
11 | # include kafka jars
12 | for file in $base_dir/core/target/scala_2.8.0/kafka-*.jar
13 | do
14 | CLASSPATH=$CLASSPATH:$file
15 | done
16 |
17 | for file in $base_dir/contrib/hadoop-consumer/lib_managed/scala_2.8.0/compile/*.jar;
18 | do
19 | CLASSPATH=$CLASSPATH:$file
20 | done
21 |
22 | local_dir=$(dirname $0)
23 |
24 | # include hadoop-consumer jars
25 | for file in $base_dir/contrib/hadoop-consumer/target/scala_2.8.0/*.jar;
26 | do
27 | CLASSPATH=$CLASSPATH:$file
28 | done
29 |
30 | for file in $base_dir/contrib/hadoop-consumer/lib/*.jar;
31 | do
32 | CLASSPATH=$CLASSPATH:$file
33 | done
34 |
35 | CLASSPATH=$CLASSPATH:$base_dir/project/boot/scala-2.8.0/lib/scala-library.jar
36 |
37 | echo $CLASSPATH
38 |
39 | CLASSPATH=dist:$CLASSPATH:${HADOOP_HOME}/conf
40 |
41 | #if [ -z "$KAFKA_OPTS" ]; then
42 | # KAFKA_OPTS="-Xmx512M -server -Dcom.sun.management.jmxremote"
43 | #fi
44 |
45 | if [ -z "$JAVA_HOME" ]; then
46 | JAVA="java"
47 | else
48 | JAVA="$JAVA_HOME/bin/java"
49 | fi
50 |
51 | $JAVA $KAFKA_OPTS -cp $CLASSPATH $@
52 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/Annotations.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | /* Some helpful annotations */
20 |
21 | /**
22 | * Indicates that the annotated class is meant to be threadsafe. For an abstract class it is an part of the interface that an implementation
23 | * must respect
24 | */
25 | class threadsafe extends StaticAnnotation
26 |
27 | /**
28 | * Indicates that the annotated class is not threadsafe
29 | */
30 | class nonthreadsafe extends StaticAnnotation
31 |
32 | /**
33 | * Indicates that the annotated class is immutable
34 | */
35 | class immutable extends StaticAnnotation
36 |
--------------------------------------------------------------------------------
/clients/cpp/README.md:
--------------------------------------------------------------------------------
1 | # C++ kafka library
2 | This library allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
3 |
4 | ## Requirements
5 | Tested on Ubuntu and Redhat both with g++ 4.4 and Boost 1.46.1
6 |
7 | ## Installation
8 | Make sure you have g++ and the latest version of Boost:
9 | http://gcc.gnu.org/
10 | http://www.boost.org/
11 |
12 | ```bash
13 | ./configure
14 | ```
15 |
16 | Run this to generate the makefile for your system. Do this first.
17 |
18 |
19 | ```bash
20 | make
21 | ```
22 |
23 | builds the producer example and the KafkaConnect library
24 |
25 |
26 | ```bash
27 | make check
28 | ```
29 |
30 | builds and runs the unit tests,
31 |
32 |
33 | ```bash
34 | make install
35 | ```
36 |
37 | to install as a shared library to 'default' locations (/usr/local/lib and /usr/local/include on linux)
38 |
39 |
40 | ## Usage
41 | Example.cpp is a very basic Kafka Producer
42 |
43 |
44 | ## API docs
45 | There isn't much code, if I get around to writing the other parts of the library I'll document it sensibly,
46 | for now have a look at the header file: /src/producer.hpp
47 |
48 |
49 | ## Contact for questions
50 |
51 | Ben Gray, MediaSift Ltd.
52 |
53 | http://twitter.com/benjamg
54 |
55 |
56 |
--------------------------------------------------------------------------------
/clients/php/src/tests/Kafka/MessageTest.php:
--------------------------------------------------------------------------------
1 |
5 | */
6 | class Kafka_MessageTest extends PHPUnit_Framework_TestCase
7 | {
8 | private $test;
9 | private $encoded;
10 | private $msg;
11 | public function setUp() {
12 | $this->test = 'a sample string';
13 | $this->encoded = Kafka_Encoder::encode_message($this->test);
14 | $this->msg = new Kafka_Message($this->encoded);
15 |
16 | }
17 |
18 | public function testPayload() {
19 | $this->assertEquals($this->test, $this->msg->payload());
20 | }
21 |
22 | public function testValid() {
23 | $this->assertTrue($this->msg->isValid());
24 | }
25 |
26 | public function testEncode() {
27 | $this->assertEquals($this->encoded, $this->msg->encode());
28 | }
29 |
30 | public function testChecksum() {
31 | $this->assertInternalType('integer', $this->msg->checksum());
32 | }
33 |
34 | public function testSize() {
35 | $this->assertEquals(strlen($this->test), $this->msg->size());
36 | }
37 |
38 | public function testToString() {
39 | $this->assertInternalType('string', $this->msg->__toString());
40 | }
41 |
42 | public function testMagic() {
43 | $this->assertInternalType('integer', $this->msg->magic());
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/clients/clojure/src/kafka/types.clj:
--------------------------------------------------------------------------------
1 | (ns #^{:doc "Base kafka-clj types."}
2 | kafka.types)
3 |
4 | (deftype #^{:doc "Message type, a wrapper around a byte array."}
5 | Message [^bytes message])
6 |
7 | (defprotocol Pack
8 | "Pack protocol converts an object to a Message."
9 | (pack [this] "Convert object to a Message."))
10 |
11 | (defprotocol Unpack
12 | "Unpack protocol, reads an object from a Message."
13 | (unpack [^Message this] "Read an object from the message."))
14 |
15 | (defprotocol Producer
16 | "Producer protocol."
17 | (produce [this topic partition messages] "Send message[s] for a topic to a partition.")
18 | (close [this] "Closes the producer, socket and channel."))
19 |
20 | (defprotocol Consumer
21 | "Consumer protocol."
22 | (consume [this topic partition offset max-size] "Fetch messages. Returns a pair [last-offset, message sequence]")
23 | (offsets [this topic partition time max-offsets] "Query offsets. Returns offsets seq.")
24 |
25 | (consume-seq [this topic partition]
26 | [this topic partition opts] "Creates a sequence over the consumer.")
27 | (close [this] "Close the consumer, socket and channel."))
28 |
29 |
--------------------------------------------------------------------------------
/examples/src/main/java/kafka/examples/KafkaProperties.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.examples;
17 |
18 | public interface KafkaProperties
19 | {
20 | final static String zkConnect = "127.0.0.1:2181";
21 | final static String groupId = "group1";
22 | final static String topic = "topic1";
23 | final static String kafkaServerURL = "localhost";
24 | final static int kafkaServerPort = 9092;
25 | final static int kafkaProducerBufferSize = 64*1024;
26 | final static int connectionTimeOut = 100000;
27 | final static int reconnectInterval = 10000;
28 | final static String topic2 = "topic2";
29 | final static String topic3 = "topic3";
30 | }
31 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/ByteBufferSend.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | import java.nio._
20 | import java.nio.channels._
21 | import kafka.utils._
22 |
23 | @nonthreadsafe
24 | private[kafka] class ByteBufferSend(val buffer: ByteBuffer) extends Send {
25 |
26 | var complete: Boolean = false
27 |
28 | def this(size: Int) = this(ByteBuffer.allocate(size))
29 |
30 | def writeTo(channel: WritableByteChannel): Int = {
31 | expectIncomplete()
32 | var written = 0
33 | written += channel.write(buffer)
34 | if(!buffer.hasRemaining)
35 | complete = true
36 | written
37 | }
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/bin/kafka-run-class.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -lt 1 ];
4 | then
5 | echo "USAGE: $0 classname [opts]"
6 | exit 1
7 | fi
8 |
9 | base_dir=$(dirname $0)/..
10 |
11 | for file in $base_dir/project/boot/scala-2.8.0/lib/*.jar;
12 | do
13 | CLASSPATH=$CLASSPATH:$file
14 | done
15 |
16 | for file in $base_dir/core/target/scala_2.8.0/*.jar;
17 | do
18 | CLASSPATH=$CLASSPATH:$file
19 | done
20 |
21 | for file in $base_dir/core/lib/*.jar;
22 | do
23 | CLASSPATH=$CLASSPATH:$file
24 | done
25 |
26 | for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
27 | do
28 | if [ ${file##*/} != "sbt-launch.jar" ]; then
29 | CLASSPATH=$CLASSPATH:$file
30 | fi
31 | done
32 | if [ -z "$KAFKA_JMX_OPTS" ]; then
33 | KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
34 | fi
35 | if [ -z "$KAFKA_OPTS" ]; then
36 | KAFKA_OPTS="-Xmx512M -server -Dlog4j.configuration=file:$base_dir/config/log4j.properties"
37 | fi
38 | if [ $JMX_PORT ]; then
39 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
40 | fi
41 | if [ -z "$JAVA_HOME" ]; then
42 | JAVA="java"
43 | else
44 | JAVA="$JAVA_HOME/bin/java"
45 | fi
46 |
47 | $JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH $@
48 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/javaapi/producer/ProducerData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.javaapi.producer
17 |
18 | import scala.collection.JavaConversions._
19 |
20 | class ProducerData[K, V](private val topic: String,
21 | private val key: K,
22 | private val data: java.util.List[V]) {
23 |
24 | def this(t: String, d: java.util.List[V]) = this(topic = t, key = null.asInstanceOf[K], data = d)
25 |
26 | def this(t: String, d: V) = this(topic = t, key = null.asInstanceOf[K], data = asList(List(d)))
27 |
28 | def getTopic: String = topic
29 |
30 | def getKey: K = key
31 |
32 | def getData: java.util.List[V] = data
33 | }
34 |
--------------------------------------------------------------------------------
/clients/clojure/test/kafka/buffer_test.clj:
--------------------------------------------------------------------------------
1 | (ns kafka.buffer-test
2 | (:use (kafka buffer)
3 | clojure.test))
4 |
5 | (deftest test-put-get
6 | (with-buffer (buffer 64)
7 | (put (byte 5))
8 | (put (short 10))
9 | (put (int 20))
10 | (put (long 40))
11 | (put "test")
12 | (put (byte-array 3 [(byte 1) (byte 2) (byte 3)]))
13 | (flip)
14 |
15 | (is (= (get-byte) (byte 5)))
16 | (is (= (get-short) (short 10)))
17 | (is (= (get-int) (int 20)))
18 | (is (= (get-long) (long 40)))
19 | (is (= (get-string 4) "test"))
20 | (let [ba (get-array 3)]
21 | (is (= (nth ba 0) (byte 1)))
22 | (is (= (nth ba 1) (byte 2)))
23 | (is (= (nth ba 2) (byte 3))))))
24 |
25 | (deftest test-with-put
26 | (with-buffer (buffer 64)
27 | (with-put 4 count
28 | (put "test 1"))
29 | (flip)
30 |
31 | (is (= (get-int) (int 6)))
32 | (is (= (get-string 6) "test 1"))))
33 |
34 | (deftest test-length-encoded
35 | (with-buffer (buffer 64)
36 | (length-encoded short
37 | (put "test 1"))
38 | (length-encoded int
39 | (put "test 2"))
40 | (flip)
41 |
42 | (is (= (get-short) (short 6)))
43 | (is (= (get-string 6) "test 1"))
44 | (is (= (get-int) (int 6)))
45 | (is (= (get-string 6) "test 2"))))
46 |
47 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/consumer/storage/OffsetStorage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.consumer.storage
18 |
19 | import kafka.utils.Range
20 |
21 | /**
22 | * A method for storing offsets for the consumer.
23 | * This is used to track the progress of the consumer in the stream.
24 | */
25 | trait OffsetStorage {
26 |
27 | /**
28 | * Reserve a range of the length given by increment.
29 | * @param increment The size to reserver
30 | * @return The range reserved
31 | */
32 | def reserve(node: Int, topic: String): Long
33 |
34 | /**
35 | * Update the offset to the new offset
36 | * @param offset The new offset
37 | */
38 | def commit(node: Int, topic: String, offset: Long)
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/AsyncProducerStats.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer.async
18 |
19 | import java.util.concurrent.atomic.AtomicInteger
20 | import java.util.concurrent.{TimeUnit, ScheduledThreadPoolExecutor, ScheduledExecutorService, BlockingQueue}
21 |
22 | class AsyncProducerStats[T](queue: BlockingQueue[QueueItem[T]]) extends AsyncProducerStatsMBean {
23 | val droppedEvents = new AtomicInteger(0)
24 | val numEvents = new AtomicInteger(0)
25 |
26 | def getAsyncProducerQueueSize: Int = queue.size
27 |
28 | def getAsyncProducerDroppedEvents: Int = droppedEvents.get
29 |
30 | def recordDroppedEvents = droppedEvents.getAndAdd(1)
31 |
32 | def recordEvent = numEvents.getAndAdd(1)
33 | }
34 |
--------------------------------------------------------------------------------
/clients/php/src/tests/Kafka/EncoderTest.php:
--------------------------------------------------------------------------------
1 |
10 | */
11 | class Kafka_EncoderTest extends PHPUnit_Framework_TestCase
12 | {
13 | public function testEncodedMessageLength() {
14 | $test = 'a sample string';
15 | $encoded = Kafka_Encoder::encode_message($test);
16 | $this->assertEquals(5 + strlen($test), strlen($encoded));
17 | }
18 |
19 | public function testByteArrayContainsString() {
20 | $test = 'a sample string';
21 | $encoded = Kafka_Encoder::encode_message($test);
22 | $this->assertContains($test, $encoded);
23 | }
24 |
25 | public function testEncodedMessages() {
26 | $topic = 'sample topic';
27 | $partition = 1;
28 | $messages = array(
29 | 'test 1',
30 | 'test 2 abcde',
31 | );
32 | $encoded = Kafka_Encoder::encode_produce_request($topic, $partition, $messages);
33 | $this->assertContains($topic, $encoded);
34 | $this->assertContains($partition, $encoded);
35 | foreach ($messages as $msg) {
36 | $this->assertContains($msg, $encoded);
37 | }
38 | $size = 4 + 2 + 2 + strlen($topic) + 4 + 4;
39 | foreach ($messages as $msg) {
40 | $size += 9 + strlen($msg);
41 | }
42 | $this->assertEquals($size, strlen($encoded));
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/message/CompressionCodec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.message
18 |
19 | object CompressionCodec {
20 | def getCompressionCodec(codec: Int): CompressionCodec = {
21 | codec match {
22 | case 0 => NoCompressionCodec
23 | case 1 => GZIPCompressionCodec
24 | case _ => throw new kafka.common.UnknownCodecException("%d is an unknown compression codec".format(codec))
25 | }
26 | }
27 | }
28 |
29 | sealed trait CompressionCodec { def codec: Int }
30 |
31 | case object DefaultCompressionCodec extends CompressionCodec { val codec = 1 }
32 |
33 | case object GZIPCompressionCodec extends CompressionCodec { val codec = 1 }
34 |
35 | case object NoCompressionCodec extends CompressionCodec { val codec = 0 }
36 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/server/MultiMessageSetSend.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.server
18 |
19 | import java.nio._
20 | import java.nio.channels._
21 | import kafka.network._
22 | import kafka.message._
23 | import kafka.utils._
24 |
25 | /**
26 | * A set of message sets prefixed by size
27 | */
28 | @nonthreadsafe
29 | private[server] class MultiMessageSetSend(val sets: List[MessageSetSend]) extends MultiSend(new ByteBufferSend(6) :: sets) {
30 |
31 | val buffer = this.sends.head.asInstanceOf[ByteBufferSend].buffer
32 | val allMessageSetSize: Int = sets.foldLeft(0)(_ + _.sendSize)
33 | val expectedBytesToWrite: Int = 4 + 2 + allMessageSetSize
34 | buffer.putInt(2 + allMessageSetSize)
35 | buffer.putShort(0)
36 | buffer.rewind()
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/test/scala/other/kafka/TestTruncate.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka
18 |
19 | import java.io._
20 | import java.nio._
21 |
22 | /* This code tests the correct function of java's FileChannel.truncate--some platforms don't work. */
23 | object TestTruncate {
24 |
25 | def main(args: Array[String]): Unit = {
26 | val name = File.createTempFile("kafka", ".test")
27 | name.deleteOnExit()
28 | val file = new RandomAccessFile(name, "rw").getChannel()
29 | val buffer = ByteBuffer.allocate(12)
30 | buffer.putInt(4).putInt(4).putInt(4)
31 | buffer.rewind()
32 | file.write(buffer)
33 | println("position prior to truncate: " + file.position)
34 | file.truncate(4)
35 | println("position after truncate to 4: " + file.position)
36 | }
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/Range.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import scala.math._
20 |
21 | /**
22 | * A generic range value with a start and end
23 | */
24 | trait Range {
25 | /** The first index in the range */
26 | def start: Long
27 | /** The total number of indexes in the range */
28 | def size: Long
29 | /** Return true iff the range is empty */
30 | def isEmpty: Boolean = size == 0
31 |
32 | /** if value is in range */
33 | def contains(value: Long): Boolean = {
34 | if( (size == 0 && value == start) ||
35 | (size > 0 && value >= start && value <= start + size - 1) )
36 | return true
37 | else
38 | return false
39 | }
40 |
41 | override def toString() = "(start=" + start + ", size=" + size + ")"
42 | }
43 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Kafka.Client/RequestContext.cs:
--------------------------------------------------------------------------------
1 | using System.Net.Sockets;
2 |
3 | namespace Kafka.Client
4 | {
5 | ///
6 | /// The context of a request made to Kafka.
7 | ///
8 | ///
9 | /// Must be of type and represents the type of request
10 | /// sent to Kafka.
11 | ///
12 | public class RequestContext where T : AbstractRequest
13 | {
14 | ///
15 | /// Initializes a new instance of the RequestContext class.
16 | ///
17 | /// The network stream that sent the message.
18 | /// The request sent over the stream.
19 | public RequestContext(NetworkStream networkStream, T request)
20 | {
21 | NetworkStream = networkStream;
22 | Request = request;
23 | }
24 |
25 | ///
26 | /// Gets the instance of the request.
27 | ///
28 | public NetworkStream NetworkStream { get; private set; }
29 |
30 | ///
31 | /// Gets the or object
32 | /// associated with the .
33 | ///
34 | public T Request { get; private set; }
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/system_test/embedded_consumer/bin/expected.out:
--------------------------------------------------------------------------------
1 | start the servers ...
2 | start producing messages ...
3 | wait for consumer to finish consuming ...
4 | [2011-05-17 14:49:11,605] INFO Creating async producer for broker id = 2 at localhost:9091 (kafka.producer.ProducerPool)
5 | [2011-05-17 14:49:11,606] INFO Creating async producer for broker id = 1 at localhost:9092 (kafka.producer.ProducerPool)
6 | [2011-05-17 14:49:11,607] INFO Creating async producer for broker id = 3 at localhost:9090 (kafka.producer.ProducerPool)
7 | thread 0: 400000 messages sent 3514012.1233 nMsg/sec 3.3453 MBs/sec
8 | [2011-05-17 14:49:34,382] INFO Closing all async producers (kafka.producer.ProducerPool)
9 | [2011-05-17 14:49:34,383] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer)
10 | [2011-05-17 14:49:34,384] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer)
11 | [2011-05-17 14:49:34,385] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer)
12 | Total Num Messages: 400000 bytes: 79859641 in 22.93 secs
13 | Messages/sec: 17444.3960
14 | MB/sec: 3.3214
15 | test passed
16 | stopping the servers
17 | bin/../../../bin/zookeeper-server-start.sh: line 9: 22584 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@
18 | bin/../../../bin/zookeeper-server-start.sh: line 9: 22585 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@
19 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/cluster/Cluster.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.cluster
18 |
19 | import kafka.utils._
20 | import scala.collection._
21 |
22 | /**
23 | * The set of active brokers in the cluster
24 | */
25 | private[kafka] class Cluster {
26 |
27 | private val brokers = new mutable.HashMap[Int, Broker]
28 |
29 | def this(brokerList: Iterable[Broker]) {
30 | this()
31 | for(broker <- brokerList)
32 | brokers.put(broker.id, broker)
33 | }
34 |
35 | def getBroker(id: Int) = brokers.get(id).get
36 |
37 | def add(broker: Broker) = brokers.put(broker.id, broker)
38 |
39 | def remove(id: Int) = brokers.remove(id)
40 |
41 | def size = brokers.size
42 |
43 | override def toString(): String =
44 | "Cluster(" + brokers.values.mkString(", ") + ")"
45 | }
46 |
--------------------------------------------------------------------------------
/clients/cpp/src/encoder.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * encoder.hpp
3 | *
4 | * Created on: 21 Jun 2011
5 | * Author: Ben Gray (@benjamg)
6 | */
7 |
8 | #ifndef KAFKA_ENCODER_HPP_
9 | #define KAFKA_ENCODER_HPP_
10 |
11 | #include
12 | #include "encoder_helper.hpp"
13 |
14 | namespace kafkaconnect {
15 |
16 | template
17 | void encode(std::ostream& stream, const std::string& topic, const uint32_t partition, const List& messages)
18 | {
19 | // Pre-calculate size of message set
20 | uint32_t messageset_size = 0;
21 | BOOST_FOREACH(const std::string& message, messages)
22 | {
23 | messageset_size += message_format_header_size + message.length();
24 | }
25 |
26 | // Packet format is ... packet size (4 bytes)
27 | encoder_helper::raw(stream, htonl(2 + 2 + topic.size() + 4 + 4 + messageset_size));
28 |
29 | // ... magic number (2 bytes)
30 | encoder_helper::raw(stream, htons(kafka_format_version));
31 |
32 | // ... topic string size (2 bytes) & topic string
33 | encoder_helper::raw(stream, htons(topic.size()));
34 | stream << topic;
35 |
36 | // ... partition (4 bytes)
37 | encoder_helper::raw(stream, htonl(partition));
38 |
39 | // ... message set size (4 bytes) and message set
40 | encoder_helper::raw(stream, htonl(messageset_size));
41 | BOOST_FOREACH(const std::string& message, messages)
42 | {
43 | encoder_helper::message(stream, message);
44 | }
45 | }
46 |
47 | }
48 |
49 | #endif /* KAFKA_ENCODER_HPP_ */
50 |
--------------------------------------------------------------------------------
/contrib/hadoop-consumer/copy-jars.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -lt 1 ];
4 | then
5 | echo "USAGE: $0 dir"
6 | exit 1
7 | fi
8 |
9 | base_dir=$(dirname $0)/../..
10 |
11 | hadoop=${HADOOP_HOME}/bin/hadoop
12 |
13 | echo "$hadoop fs -rmr $1"
14 | $hadoop fs -rmr $1
15 |
16 | echo "$hadoop fs -mkdir $1"
17 | $hadoop fs -mkdir $1
18 |
19 | # include kafka jars
20 | for file in $base_dir/contrib/hadoop-consumer/target/scala_2.8.0/*.jar;
21 | do
22 | echo "$hadoop fs -put $file $1/"
23 | $hadoop fs -put $file $1/
24 | done
25 |
26 | # include kafka jars
27 | echo "$hadoop fs -put $base_dir/core/target/scala_2.8.0/kafka-*.jar; $1/"
28 | $hadoop fs -put $base_dir/core/target/scala_2.8.0/kafka-*.jar $1/
29 |
30 | # include core lib jars
31 | for file in $base_dir/core/lib/*.jar;
32 | do
33 | echo "$hadoop fs -put $file $1/"
34 | $hadoop fs -put $file $1/
35 | done
36 |
37 | for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
38 | do
39 | echo "$hadoop fs -put $file $1/"
40 | $hadoop fs -put $file $1/
41 | done
42 |
43 | # include scala library jar
44 | echo "$hadoop fs -put $base_dir/project/boot/scala-2.8.0/lib/scala-library.jar; $1/"
45 | $hadoop fs -put $base_dir/project/boot/scala-2.8.0/lib/scala-library.jar $1/
46 |
47 | local_dir=$(dirname $0)
48 |
49 | # include hadoop-consumer jars
50 | for file in $local_dir/lib/*.jar;
51 | do
52 | echo "$hadoop fs -put $file $1/"
53 | $hadoop fs -put $file $1/
54 | done
55 |
56 |
--------------------------------------------------------------------------------
/clients/php/src/tests/Kafka/ProducerTest.php:
--------------------------------------------------------------------------------
1 |
7 | */
8 | class Kafka_ProducerMock extends Kafka_Producer {
9 | public function connect() {
10 | if (!is_resource($this->conn)) {
11 | $this->conn = fopen('php://temp', 'w+b');
12 | }
13 | }
14 |
15 | public function getData() {
16 | $this->connect();
17 | rewind($this->conn);
18 | return stream_get_contents($this->conn);
19 | }
20 | }
21 |
22 | /**
23 | * Description of ProducerTest
24 | *
25 | * @author Lorenzo Alberton
26 | */
27 | class Kafka_ProducerTest extends PHPUnit_Framework_TestCase
28 | {
29 | /**
30 | * @var Kafka_Producer
31 | */
32 | private $producer;
33 |
34 | public function setUp() {
35 | $this->producer = new Kafka_ProducerMock('localhost', 1234);
36 | }
37 |
38 | public function tearDown() {
39 | $this->producer->close();
40 | unset($this->producer);
41 | }
42 |
43 |
44 | public function testProducer() {
45 | $messages = array(
46 | 'test 1',
47 | 'test 2 abc',
48 | );
49 | $topic = 'a topic';
50 | $partition = 3;
51 | $this->producer->send($messages, $topic, $partition);
52 | $sent = $this->producer->getData();
53 | $this->assertContains($topic, $sent);
54 | $this->assertContains($partition, $sent);
55 | foreach ($messages as $msg) {
56 | $this->assertContains($msg, $sent);
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/log/LogStats.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.log
18 |
19 | import java.util.concurrent.atomic.AtomicLong
20 |
21 | trait LogStatsMBean {
22 | def getName(): String
23 | def getSize(): Long
24 | def getNumberOfSegments: Int
25 | def getCurrentOffset: Long
26 | def getNumAppendedMessages: Long
27 | }
28 |
29 | class LogStats(val log: Log) extends LogStatsMBean {
30 | private val numCumulatedMessages = new AtomicLong(0)
31 |
32 | def getName(): String = log.name
33 |
34 | def getSize(): Long = log.size
35 |
36 | def getNumberOfSegments: Int = log.numberOfSegments
37 |
38 | def getCurrentOffset: Long = log.getHighwaterMark
39 |
40 | def getNumAppendedMessages: Long = numCumulatedMessages.get
41 |
42 | def recordAppendedMessages(nMessages: Int) = numCumulatedMessages.getAndAdd(nMessages)
43 | }
44 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/message/CompressionUtilsTest.scala:
--------------------------------------------------------------------------------
1 | package kafka.message
2 |
3 | import junit.framework.TestCase
4 | import kafka.utils.TestUtils
5 |
6 | class CompressionUtilTest extends TestCase {
7 |
8 |
9 |
10 | def testSimpleCompressDecompress() {
11 |
12 | val messages = List[Message](new Message("hi there".getBytes), new Message("I am fine".getBytes), new Message("I am not so well today".getBytes))
13 |
14 | val message = CompressionUtils.compress(messages)
15 |
16 | val decompressedMessages = CompressionUtils.decompress(message)
17 |
18 | TestUtils.checkLength(decompressedMessages.iterator,3)
19 |
20 | TestUtils.checkEquals(messages.iterator, decompressedMessages.iterator)
21 | }
22 |
23 | def testComplexCompressDecompress() {
24 |
25 | val messages = List[Message](new Message("hi there".getBytes), new Message("I am fine".getBytes), new Message("I am not so well today".getBytes))
26 |
27 | val message = CompressionUtils.compress(messages.slice(0, 2))
28 |
29 | val complexMessages = List[Message](message):::messages.slice(2,3)
30 |
31 | val complexMessage = CompressionUtils.compress(complexMessages)
32 |
33 | val decompressedMessages = CompressionUtils.decompress(complexMessage)
34 |
35 | TestUtils.checkLength(decompressedMessages.iterator,2)
36 |
37 | TestUtils.checkLength(decompressedMessages.iterator,3)
38 |
39 | TestUtils.checkEquals(messages.iterator, decompressedMessages.iterator)
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/consumer/KafkaMessageStream.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.consumer
18 |
19 | import java.util.concurrent.BlockingQueue
20 | import org.apache.log4j.Logger
21 | import kafka.message.Message
22 |
23 |
24 | /**
25 | * All calls to elements should produce the same thread-safe iterator? Should have a seperate thread
26 | * that feeds messages into a blocking queue for processing.
27 | */
28 | class KafkaMessageStream(private val queue: BlockingQueue[FetchedDataChunk], consumerTimeoutMs: Int)
29 | extends Iterable[Message] with java.lang.Iterable[Message]{
30 |
31 | private val logger = Logger.getLogger(getClass())
32 | private val iter: ConsumerIterator = new ConsumerIterator(queue, consumerTimeoutMs)
33 |
34 | /**
35 | * Create an iterator over messages in the stream.
36 | */
37 | def iterator(): ConsumerIterator = iter
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/ProducerData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer
18 |
19 | /**
20 | * Represents the data to be sent using the Producer send API
21 | * @param topic the topic under which the message is to be published
22 | * @param key the key used by the partitioner to pick a broker partition
23 | * @param data variable length data to be published as Kafka messages under topic
24 | */
25 | class ProducerData[K, V](private val topic: String,
26 | private val key: K,
27 | private val data: Seq[V]) {
28 |
29 | def this(t: String, d: Seq[V]) = this(topic = t, key = null.asInstanceOf[K], data = d)
30 |
31 | def this(t: String, d: V) = this(topic = t, key = null.asInstanceOf[K], data = List(d))
32 |
33 | def getTopic: String = topic
34 |
35 | def getKey: K = key
36 |
37 | def getData: Seq[V] = data
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/consumer/storage/MemoryOffsetStorage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.consumer.storage
18 |
19 | import java.util.concurrent._
20 | import java.util.concurrent.atomic._
21 | import java.util.concurrent.locks._
22 |
23 | class MemoryOffsetStorage extends OffsetStorage {
24 |
25 | val offsetAndLock = new ConcurrentHashMap[(Int, String), (AtomicLong, Lock)]
26 |
27 | def reserve(node: Int, topic: String): Long = {
28 | val key = (node, topic)
29 | if(!offsetAndLock.containsKey(key))
30 | offsetAndLock.putIfAbsent(key, (new AtomicLong(0), new ReentrantLock))
31 | val (offset, lock) = offsetAndLock.get(key)
32 | lock.lock
33 | offset.get
34 | }
35 |
36 | def commit(node: Int, topic: String, offset: Long) = {
37 | val (highwater, lock) = offsetAndLock.get((node, topic))
38 | highwater.set(offset)
39 | lock.unlock
40 | offset
41 | }
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/clients/go/src/timing.go:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 NeuStar, Inc.
3 | * All rights reserved.
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | *
17 | * NeuStar, the Neustar logo and related names and logos are registered
18 | * trademarks, service marks or tradenames of NeuStar, Inc. All other
19 | * product names, company names, marks, logos and symbols may be trademarks
20 | * of their respective owners.
21 | */
22 |
23 |
24 | package kafka
25 |
26 | import (
27 | "log"
28 | "time"
29 | )
30 |
31 | type Timing struct {
32 | label string
33 | start int64
34 | stop int64
35 | }
36 |
37 | func StartTiming(label string) *Timing {
38 | return &Timing{label: label, start: time.Nanoseconds(), stop: 0}
39 | }
40 |
41 | func (t *Timing) Stop() {
42 | t.stop = time.Nanoseconds()
43 | }
44 |
45 | func (t *Timing) Print() {
46 | if t.stop == 0 {
47 | t.Stop()
48 | }
49 | log.Printf("%s took: %f ms\n", t.label, float64((time.Nanoseconds()-t.start))/1000000)
50 | }
51 |
--------------------------------------------------------------------------------
/core/src/test/scala/other/kafka/DeleteZKPath.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka
18 |
19 | import consumer.ConsumerConfig
20 | import utils.{StringSerializer, ZkUtils, Utils}
21 | import org.I0Itec.zkclient.ZkClient
22 |
23 | object DeleteZKPath {
24 | def main(args: Array[String]) {
25 | if(args.length < 2) {
26 | println("USAGE: " + DeleteZKPath.getClass.getName + " consumer.properties zk_path")
27 | System.exit(1)
28 | }
29 |
30 | val config = new ConsumerConfig(Utils.loadProps(args(0)))
31 | val zkPath = args(1)
32 |
33 | val zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs,
34 | StringSerializer)
35 |
36 | try {
37 | ZkUtils.deletePathRecursive(zkClient, zkPath);
38 | System.out.println(zkPath + " is deleted")
39 | } catch {
40 | case e: Exception => System.err.println("Path not deleted " + e.printStackTrace())
41 | }
42 |
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/async/EventHandler.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.producer.async
17 |
18 | import java.util.Properties
19 | import kafka.producer.SyncProducer
20 | import kafka.serializer.Encoder
21 |
22 | /**
23 | * Handler that dispatches the batched data from the queue of the
24 | * asynchronous producer.
25 | */
26 | trait EventHandler[T] {
27 | /**
28 | * Initializes the event handler using a Properties object
29 | * @param props the properties used to initialize the event handler
30 | */
31 | def init(props: Properties) {}
32 |
33 | /**
34 | * Callback to dispatch the batched data and send it to a Kafka server
35 | * @param events the data sent to the producer
36 | * @param producer the low-level producer used to send the data
37 | */
38 | def handle(events: Seq[QueueItem[T]], producer: SyncProducer, encoder: Encoder[T])
39 |
40 | /**
41 | * Cleans up and shuts down the event handler
42 | */
43 | def close {}
44 | }
45 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Kafka.Client/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.CompilerServices;
3 | using System.Runtime.InteropServices;
4 |
5 | // General Information about an assembly is controlled through the following
6 | // set of attributes. Change these attribute values to modify the information
7 | // associated with an assembly.
8 | [assembly: AssemblyTitle("Kafka.Client")]
9 | [assembly: AssemblyDescription("")]
10 | [assembly: AssemblyConfiguration("")]
11 | [assembly: AssemblyCompany("Microsoft")]
12 | [assembly: AssemblyProduct("Kafka.Client")]
13 | [assembly: AssemblyCopyright("Copyright © Microsoft 2011")]
14 | [assembly: AssemblyTrademark("")]
15 | [assembly: AssemblyCulture("")]
16 |
17 | // Setting ComVisible to false makes the types in this assembly not visible
18 | // to COM components. If you need to access a type in this assembly from
19 | // COM, set the ComVisible attribute to true on that type.
20 | [assembly: ComVisible(false)]
21 |
22 | // The following GUID is for the ID of the typelib if this project is exposed to COM
23 | [assembly: Guid("93d702e5-9998-49a8-8c16-5b04b3ba55c1")]
24 |
25 | // Version information for an assembly consists of the following four values:
26 | //
27 | // Major Version
28 | // Minor Version
29 | // Build Number
30 | // Revision
31 | //
32 | // You can specify all the values or you can default the Build and Revision Numbers
33 | // by using the '*' as shown below:
34 | // [assembly: AssemblyVersion("1.0.*")]
35 | [assembly: AssemblyVersion("1.0.0.0")]
36 | [assembly: AssemblyFileVersion("1.0.0.0")]
37 |
--------------------------------------------------------------------------------
/clients/ruby/lib/kafka/producer.rb:
--------------------------------------------------------------------------------
1 | module Kafka
2 | class Producer
3 |
4 | include Kafka::IO
5 |
6 | PRODUCE_REQUEST_ID = Kafka::RequestType::PRODUCE
7 |
8 | attr_accessor :topic, :partition
9 |
10 | def initialize(options = {})
11 | self.topic = options[:topic] || "test"
12 | self.partition = options[:partition] || 0
13 | self.host = options[:host] || "localhost"
14 | self.port = options[:port] || 9092
15 | self.connect(self.host, self.port)
16 | end
17 |
18 | def encode(message)
19 | [message.magic].pack("C") + [message.calculate_checksum].pack("N") + message.payload.to_s
20 | end
21 |
22 | def encode_request(topic, partition, messages)
23 | message_set = Array(messages).collect { |message|
24 | encoded_message = self.encode(message)
25 | [encoded_message.length].pack("N") + encoded_message
26 | }.join("")
27 |
28 | request = [PRODUCE_REQUEST_ID].pack("n")
29 | topic = [topic.length].pack("n") + topic
30 | partition = [partition].pack("N")
31 | messages = [message_set.length].pack("N") + message_set
32 |
33 | data = request + topic + partition + messages
34 |
35 | return [data.length].pack("N") + data
36 | end
37 |
38 | def send(messages)
39 | self.write(self.encode_request(self.topic, self.partition, messages))
40 | end
41 |
42 | def batch(&block)
43 | batch = Kafka::Batch.new
44 | block.call( batch )
45 | self.send(batch.messages)
46 | batch.messages.clear
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/zk/EmbeddedZookeeper.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.zk
18 |
19 | import org.apache.zookeeper.server.ZooKeeperServer
20 | import org.apache.zookeeper.server.NIOServerCnxn
21 | import kafka.utils.TestUtils
22 | import org.I0Itec.zkclient.ZkClient
23 | import java.net.InetSocketAddress
24 | import kafka.utils.{Utils, StringSerializer}
25 |
26 | class EmbeddedZookeeper(val connectString: String) {
27 | val snapshotDir = TestUtils.tempDir()
28 | val logDir = TestUtils.tempDir()
29 | val zookeeper = new ZooKeeperServer(snapshotDir, logDir, 3000)
30 | val port = connectString.split(":")(1).toInt
31 | val factory = new NIOServerCnxn.Factory(new InetSocketAddress("127.0.0.1", port))
32 | factory.startup(zookeeper)
33 | val client = new ZkClient(connectString)
34 | client.setZkSerializer(StringSerializer)
35 |
36 | def shutdown() {
37 | factory.shutdown()
38 | Utils.rm(logDir)
39 | Utils.rm(snapshotDir)
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/core/src/main/java/kafka/javaapi/consumer/ConsumerConnector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.javaapi.consumer;
18 |
19 | import kafka.consumer.KafkaMessageStream;
20 |
21 | import java.util.List;
22 | import java.util.Map;
23 |
24 | public interface ConsumerConnector {
25 | /**
26 | * Create a list of MessageStreams for each topic.
27 | *
28 | * @param topicCountMap a map of (topic, #streams) pair
29 | * @return a map of (topic, list of KafkaMessageStream) pair. The number of items in the
30 | * list is #streams. Each KafkaMessageStream supports an iterator of messages.
31 | */
32 | public Map> createMessageStreams(Map topicCountMap);
33 |
34 | /**
35 | * Commit the offsets of all broker partitions connected by this connector.
36 | */
37 | public void commitOffsets();
38 |
39 | /**
40 | * Shut down the connector
41 | */
42 | public void shutdown();
43 | }
44 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/DelayedItem.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import java.util.concurrent._
20 | import scala.math._
21 |
22 | class DelayedItem[T](val item: T, delay: Long, unit: TimeUnit) extends Delayed {
23 |
24 | val delayMs = unit.toMillis(delay)
25 | val createdMs = System.currentTimeMillis
26 |
27 | def this(item: T, delayMs: Long) =
28 | this(item, delayMs, TimeUnit.MILLISECONDS)
29 |
30 | /**
31 | * The remaining delay time
32 | */
33 | def getDelay(unit: TimeUnit): Long = {
34 | val ellapsedMs = (System.currentTimeMillis - createdMs)
35 | unit.convert(max(delayMs - ellapsedMs, 0), unit)
36 | }
37 |
38 | def compareTo(d: Delayed): Int = {
39 | val delayed = d.asInstanceOf[DelayedItem[T]]
40 | val myEnd = createdMs + delayMs
41 | val yourEnd = delayed.createdMs - delayed.delayMs
42 |
43 | if(myEnd < yourEnd) -1
44 | else if(myEnd > yourEnd) 1
45 | else 0
46 | }
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Tests/Kafka.Client.Tests/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.CompilerServices;
3 | using System.Runtime.InteropServices;
4 |
5 | // General Information about an assembly is controlled through the following
6 | // set of attributes. Change these attribute values to modify the information
7 | // associated with an assembly.
8 | [assembly: AssemblyTitle("Kafka.Client.Tests")]
9 | [assembly: AssemblyDescription("")]
10 | [assembly: AssemblyConfiguration("")]
11 | [assembly: AssemblyCompany("Microsoft")]
12 | [assembly: AssemblyProduct("Kafka.Client.Tests")]
13 | [assembly: AssemblyCopyright("Copyright © Microsoft 2011")]
14 | [assembly: AssemblyTrademark("")]
15 | [assembly: AssemblyCulture("")]
16 |
17 | // Setting ComVisible to false makes the types in this assembly not visible
18 | // to COM components. If you need to access a type in this assembly from
19 | // COM, set the ComVisible attribute to true on that type.
20 | [assembly: ComVisible(false)]
21 |
22 | // The following GUID is for the ID of the typelib if this project is exposed to COM
23 | [assembly: Guid("bf361ee0-5cbb-4fd6-bded-67bedcb603b8")]
24 |
25 | // Version information for an assembly consists of the following four values:
26 | //
27 | // Major Version
28 | // Minor Version
29 | // Build Number
30 | // Revision
31 | //
32 | // You can specify all the values or you can default the Build and Revision Numbers
33 | // by using the '*' as shown below:
34 | // [assembly: AssemblyVersion("1.0.*")]
35 | [assembly: AssemblyVersion("1.0.0.0")]
36 | [assembly: AssemblyFileVersion("1.0.0.0")]
37 |
--------------------------------------------------------------------------------
/clients/cpp/Makefile.am:
--------------------------------------------------------------------------------
1 | ## LibKafkaConect
2 | ## A C++ shared libray for connecting to Kafka
3 |
4 | #
5 | # Warning this is the first time I've made a configure.ac/Makefile.am thing
6 | # Please improve it as I have no idea what I am doing
7 | # @benjamg
8 | #
9 |
10 | ACLOCAL_AMFLAGS = -I build-aux/m4 ${ACLOCAL_FLAGS}
11 | AM_CPPFLAGS = $(DEPS_CFLAGS)
12 | EXAMPLE_LIBS = -lboost_system -lboost_thread -lkafkaconnect
13 |
14 | #
15 | # Shared Library
16 | #
17 |
18 | lib_LTLIBRARIES = libkafkaconnect.la
19 |
20 | libkafkaconnect_la_SOURCES = src/producer.cpp
21 | libkafkaconnect_la_LDFLAGS = -version-info $(KAFKACONNECT_VERSION)
22 |
23 | kafkaconnect_includedir = $(includedir)/kafkaconnect
24 | kafkaconnect_include_HEADERS = src/producer.hpp \
25 | src/encoder.hpp \
26 | src/encoder_helper.hpp
27 |
28 | #
29 | # Examples
30 | #
31 |
32 | noinst_PROGRAMS = producer
33 |
34 | producer_SOURCES = src/example.cpp
35 | producer_LDADD = $(DEPS_LIBS) $(EXAMPLE_LIBS)
36 |
37 | #
38 | # Tests
39 | #
40 |
41 | check_PROGRAMS = tests/encoder_helper tests/encoder tests/producer
42 | TESTS = tests/encoder_helper tests/encoder tests/producer
43 |
44 | tests_encoder_helper_SOURCES = src/tests/encoder_helper_tests.cpp
45 | tests_encoder_helper_LDADD = $(DEPS_LIBS) $(EXAMPLE_LIBS) -lboost_unit_test_framework
46 |
47 | tests_encoder_SOURCES = src/tests/encoder_tests.cpp
48 | tests_encoder_LDADD = $(DEPS_LIBS) $(EXAMPLE_LIBS) -lboost_unit_test_framework
49 |
50 | tests_producer_SOURCES = src/tests/producer_tests.cpp
51 | tests_producer_LDADD = $(DEPS_LIBS) $(EXAMPLE_LIBS) -lboost_unit_test_framework
52 |
--------------------------------------------------------------------------------
/clients/ruby/kafka-rb.gemspec:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | Gem::Specification.new do |s|
4 | s.name = %q{kafka-rb}
5 | s.version = "0.0.5"
6 |
7 | s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
8 | s.authors = ["Alejandro Crosa"]
9 | s.autorequire = %q{kafka-rb}
10 | s.date = %q{2011-01-13}
11 | s.description = %q{kafka-rb allows you to produce and consume messages using the Kafka distributed publish/subscribe messaging service.}
12 | s.email = %q{alejandrocrosa@gmail.com}
13 | s.extra_rdoc_files = ["LICENSE"]
14 | s.files = ["LICENSE", "README.md", "Rakefile", "lib/kafka", "lib/kafka/batch.rb", "lib/kafka/consumer.rb", "lib/kafka/io.rb", "lib/kafka/message.rb", "lib/kafka/producer.rb", "lib/kafka/request_type.rb", "lib/kafka.rb", "spec/batch_spec.rb", "spec/consumer_spec.rb", "spec/io_spec.rb", "spec/kafka_spec.rb", "spec/message_spec.rb", "spec/producer_spec.rb", "spec/spec_helper.rb"]
15 | s.homepage = %q{http://github.com/acrosa/kafka-rb}
16 | s.require_paths = ["lib"]
17 | s.rubygems_version = %q{1.3.7}
18 | s.summary = %q{A Ruby client for the Kafka distributed publish/subscribe messaging service}
19 |
20 | if s.respond_to? :specification_version then
21 | current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
22 | s.specification_version = 3
23 |
24 | if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
25 | s.add_development_dependency(%q, [">= 0"])
26 | else
27 | s.add_dependency(%q, [">= 0"])
28 | end
29 | else
30 | s.add_dependency(%q, [">= 0"])
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/clients/csharp/src/Kafka/Tests/Kafka.Client.IntegrationTests/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.CompilerServices;
3 | using System.Runtime.InteropServices;
4 |
5 | // General Information about an assembly is controlled through the following
6 | // set of attributes. Change these attribute values to modify the information
7 | // associated with an assembly.
8 | [assembly: AssemblyTitle("Kafka.Client.IntegrationTests")]
9 | [assembly: AssemblyDescription("")]
10 | [assembly: AssemblyConfiguration("")]
11 | [assembly: AssemblyCompany("Microsoft")]
12 | [assembly: AssemblyProduct("Kafka.Client.IntegrationTests")]
13 | [assembly: AssemblyCopyright("Copyright © Microsoft 2011")]
14 | [assembly: AssemblyTrademark("")]
15 | [assembly: AssemblyCulture("")]
16 |
17 | // Setting ComVisible to false makes the types in this assembly not visible
18 | // to COM components. If you need to access a type in this assembly from
19 | // COM, set the ComVisible attribute to true on that type.
20 | [assembly: ComVisible(false)]
21 |
22 | // The following GUID is for the ID of the typelib if this project is exposed to COM
23 | [assembly: Guid("7b2387b7-6a58-4e8b-ae06-8aadf1a64949")]
24 |
25 | // Version information for an assembly consists of the following four values:
26 | //
27 | // Major Version
28 | // Minor Version
29 | // Build Number
30 | // Revision
31 | //
32 | // You can specify all the values or you can default the Build and Revision Numbers
33 | // by using the '*' as shown below:
34 | // [assembly: AssemblyVersion("1.0.*")]
35 | [assembly: AssemblyVersion("1.0.0.0")]
36 | [assembly: AssemblyFileVersion("1.0.0.0")]
37 |
--------------------------------------------------------------------------------
/core/src/test/scala/other/kafka/TestKafkaAppender.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka
18 |
19 | import message.Message
20 | import org.apache.log4j.{Logger, PropertyConfigurator}
21 | import serializer.Encoder
22 |
23 | object TestKafkaAppender {
24 |
25 | private val logger = Logger.getLogger(TestKafkaAppender.getClass)
26 |
27 | def main(args:Array[String]) {
28 |
29 | if(args.length < 1) {
30 | println("USAGE: " + TestKafkaAppender.getClass.getName + " log4j_config")
31 | System.exit(1)
32 | }
33 |
34 | try {
35 | PropertyConfigurator.configure(args(0))
36 | } catch {
37 | case e: Exception => System.err.println("KafkaAppender could not be initialized ! Exiting..")
38 | e.printStackTrace()
39 | System.exit(1)
40 | }
41 |
42 | for(i <- 1 to 10)
43 | logger.info("test")
44 | }
45 | }
46 |
47 | class AppenderStringSerializer extends Encoder[AnyRef] {
48 | def toMessage(event: AnyRef):Message = new Message(event.asInstanceOf[String].getBytes)
49 | }
50 |
51 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.integration
18 |
19 | import java.util.Properties
20 | import junit.framework.Assert._
21 | import kafka.producer._
22 | import kafka.consumer._
23 | import kafka.message._
24 | import kafka.server._
25 | import kafka.utils.{Utils, TestUtils}
26 | import org.scalatest.junit.JUnit3Suite
27 |
28 | /**
29 | * A test harness that brings up some number of broker nodes
30 | */
31 | trait KafkaServerTestHarness extends JUnit3Suite {
32 |
33 | val configs: List[KafkaConfig]
34 | var servers: List[KafkaServer] = null
35 |
36 | override def setUp() {
37 | if(configs.size <= 0)
38 | throw new IllegalArgumentException("Must suply at least one server config.")
39 | servers = configs.map(TestUtils.createServer(_))
40 | super.setUp
41 | }
42 |
43 | override def tearDown() {
44 | super.tearDown
45 | servers.map(server => server.shutdown())
46 | servers.map(server => Utils.rm(server.config.logDir))
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/SyncProducerConfig.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.producer
18 |
19 | import kafka.utils.Utils
20 | import java.util.Properties
21 | import kafka.message.{CompressionUtils, CompressionCodec}
22 |
23 | class SyncProducerConfig(val props: Properties) extends SyncProducerConfigShared {
24 | /** the broker to which the producer sends events */
25 | val host = Utils.getString(props, "host")
26 |
27 | /** the port on which the broker is running */
28 | val port = Utils.getInt(props, "port")
29 | }
30 |
31 | trait SyncProducerConfigShared {
32 | val props: Properties
33 |
34 | val bufferSize = Utils.getInt(props, "buffer.size", 100*1024)
35 |
36 | val connectTimeoutMs = Utils.getInt(props, "connect.timeout.ms", 5000)
37 |
38 | /** the socket timeout for network requests */
39 | val socketTimeoutMs = Utils.getInt(props, "socket.timeout.ms", 30000)
40 |
41 | val reconnectInterval = Utils.getInt(props, "reconnect.interval", 30000)
42 |
43 | val maxMessageSize = Utils.getInt(props, "max.message.size", 1000000)
44 | }
45 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/javaapi/MultiFetchResponse.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.javaapi
18 |
19 | import kafka.utils.IteratorTemplate
20 | import java.nio.ByteBuffer
21 | import message.ByteBufferMessageSet
22 |
23 | class MultiFetchResponse(buffer: ByteBuffer, numSets: Int, offsets: Array[Long]) extends java.lang.Iterable[ByteBufferMessageSet] {
24 | val underlyingBuffer = ByteBuffer.wrap(buffer.array)
25 | // this has the side effect of setting the initial position of buffer correctly
26 | val errorCode = underlyingBuffer.getShort
27 |
28 | import Implicits._
29 | val underlying = new kafka.api.MultiFetchResponse(underlyingBuffer, numSets, offsets)
30 |
31 | override def toString() = underlying.toString
32 |
33 | def iterator : java.util.Iterator[ByteBufferMessageSet] = {
34 | new IteratorTemplate[ByteBufferMessageSet] {
35 | val iter = underlying.iterator
36 | override def makeNext(): ByteBufferMessageSet = {
37 | if(iter.hasNext)
38 | iter.next
39 | else
40 | return allDone
41 | }
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/Time.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | /**
20 | * Some common constants
21 | */
22 | object Time {
23 | val NsPerUs = 1000
24 | val UsPerMs = 1000
25 | val MsPerSec = 1000
26 | val NsPerMs = NsPerUs * UsPerMs
27 | val NsPerSec = NsPerMs * MsPerSec
28 | val UsPerSec = UsPerMs * MsPerSec
29 | val SecsPerMin = 60
30 | val MinsPerHour = 60
31 | val HoursPerDay = 24
32 | val SecsPerHour = SecsPerMin * MinsPerHour
33 | val SecsPerDay = SecsPerHour * HoursPerDay
34 | val MinsPerDay = MinsPerHour * HoursPerDay
35 | }
36 |
37 | /**
38 | * A mockable interface for time functions
39 | */
40 | trait Time {
41 |
42 | def milliseconds: Long
43 |
44 | def nanoseconds: Long
45 |
46 | def sleep(ms: Long)
47 | }
48 |
49 | /**
50 | * The normal system implementation of time functions
51 | */
52 | object SystemTime extends Time {
53 |
54 | def milliseconds: Long = System.currentTimeMillis
55 |
56 | def nanoseconds: Long = System.nanoTime
57 |
58 | def sleep(ms: Long): Unit = Thread.sleep(ms)
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/consumer/TopicCountTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.consumer
18 |
19 | import junit.framework.Assert._
20 | import org.junit.Test
21 | import org.scalatest.junit.JUnitSuite
22 | import kafka.cluster.Partition
23 |
24 |
25 | class TopicCountTest extends JUnitSuite {
26 |
27 | @Test
28 | def testBasic() {
29 | val consumer = "conusmer1"
30 | val json = """{ "topic1" : 2, "topic2" : 3 }"""
31 | val topicCount = TopicCount.constructTopicCount(consumer, json)
32 | val topicCountMap = Map(
33 | "topic1" -> 2,
34 | "topic2" -> 3
35 | )
36 | val expectedTopicCount = new TopicCount(consumer, topicCountMap)
37 | assertTrue(expectedTopicCount == topicCount)
38 |
39 | val topicCount2 = TopicCount.constructTopicCount(consumer, expectedTopicCount.toJsonString)
40 | assertTrue(expectedTopicCount == topicCount2)
41 | }
42 |
43 | @Test
44 | def testPartition() {
45 | assertTrue(new Partition(10, 0) == new Partition(10, 0))
46 | assertTrue(new Partition(10, 1) != new Partition(10, 0))
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/clients/go/src/converts.go:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 NeuStar, Inc.
3 | * All rights reserved.
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | *
17 | * NeuStar, the Neustar logo and related names and logos are registered
18 | * trademarks, service marks or tradenames of NeuStar, Inc. All other
19 | * product names, company names, marks, logos and symbols may be trademarks
20 | * of their respective owners.
21 | */
22 |
23 | package kafka
24 |
25 |
26 | import (
27 | "encoding/binary"
28 | )
29 |
30 |
31 | func uint16bytes(value int) []byte {
32 | result := make([]byte, 2)
33 | binary.BigEndian.PutUint16(result, uint16(value))
34 | return result
35 | }
36 |
37 | func uint32bytes(value int) []byte {
38 | result := make([]byte, 4)
39 | binary.BigEndian.PutUint32(result, uint32(value))
40 | return result
41 | }
42 |
43 | func uint32toUint32bytes(value uint32) []byte {
44 | result := make([]byte, 4)
45 | binary.BigEndian.PutUint32(result, value)
46 | return result
47 | }
48 |
49 | func uint64ToUint64bytes(value uint64) []byte {
50 | result := make([]byte, 8)
51 | binary.BigEndian.PutUint64(result, value)
52 | return result
53 | }
54 |
--------------------------------------------------------------------------------
/core/src/main/java/kafka/javaapi/producer/async/EventHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.javaapi.producer.async;
17 |
18 | import kafka.javaapi.producer.SyncProducer;
19 | import kafka.producer.async.QueueItem;
20 | import kafka.serializer.Encoder;
21 |
22 | import java.util.List;
23 | import java.util.Properties;
24 |
25 | /**
26 | * Handler that dispatches the batched data from the queue of the
27 | * asynchronous producer.
28 | */
29 | public interface EventHandler {
30 | /**
31 | * Initializes the event handler using a Properties object
32 | * @param props the properties used to initialize the event handler
33 | */
34 | public void init(Properties props);
35 |
36 | /**
37 | * Callback to dispatch the batched data and send it to a Kafka server
38 | * @param events the data sent to the producer
39 | * @param producer the low-level producer used to send the data
40 | */
41 | public void handle(List> events, SyncProducer producer, Encoder encoder);
42 |
43 | /**
44 | * Cleans up and shuts down the event handler
45 | */
46 | public void close();
47 | }
48 |
--------------------------------------------------------------------------------
/clients/cpp/src/encoder_helper.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * encoder_helper.hpp
3 | *
4 | * Created on: 21 Jun 2011
5 | * Author: Ben Gray (@benjamg)
6 | */
7 |
8 | #ifndef KAFKA_ENCODER_HELPER_HPP_
9 | #define KAFKA_ENCODER_HELPER_HPP_
10 |
11 | #include
12 | #include
13 |
14 | #include
15 | #include
16 |
17 | #include
18 |
19 | namespace kafkaconnect {
20 | namespace test { class encoder_helper; }
21 |
22 | const uint16_t kafka_format_version = 0;
23 |
24 | const uint8_t message_format_magic_number = 0;
25 | const uint8_t message_format_extra_data_size = 1 + 4;
26 | const uint8_t message_format_header_size = message_format_extra_data_size + 4;
27 |
28 | class encoder_helper
29 | {
30 | private:
31 | friend class test::encoder_helper;
32 | template friend void encode(std::ostream&, const std::string&, const uint32_t, const T&);
33 |
34 | static std::ostream& message(std::ostream& stream, const std::string message)
35 | {
36 | // Message format is ... message & data size (4 bytes)
37 | raw(stream, htonl(message_format_extra_data_size + message.length()));
38 |
39 | // ... magic number (1 byte)
40 | stream << message_format_magic_number;
41 |
42 | // ... string crc32 (4 bytes)
43 | boost::crc_32_type result;
44 | result.process_bytes(message.c_str(), message.length());
45 | raw(stream, htonl(result.checksum()));
46 |
47 | // ... message string bytes
48 | stream << message;
49 |
50 | return stream;
51 | }
52 |
53 | template
54 | static std::ostream& raw(std::ostream& stream, const Data& data)
55 | {
56 | stream.write(reinterpret_cast(&data), sizeof(Data));
57 | return stream;
58 | }
59 | };
60 |
61 | }
62 |
63 | #endif /* KAFKA_ENCODER_HELPER_HPP_ */
64 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/api/FetchRequest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.api
18 |
19 | import java.nio._
20 | import kafka.network._
21 | import kafka.utils._
22 |
23 | object FetchRequest {
24 |
25 | def readFrom(buffer: ByteBuffer): FetchRequest = {
26 | val topic = Utils.readShortString(buffer, "UTF-8")
27 | val partition = buffer.getInt()
28 | val offset = buffer.getLong()
29 | val size = buffer.getInt()
30 | new FetchRequest(topic, partition, offset, size)
31 | }
32 | }
33 |
34 | class FetchRequest(val topic: String,
35 | val partition: Int,
36 | val offset: Long,
37 | val maxSize: Int) extends Request(RequestKeys.Fetch) {
38 |
39 | def writeTo(buffer: ByteBuffer) {
40 | Utils.writeShortString(buffer, topic, "UTF-8")
41 | buffer.putInt(partition)
42 | buffer.putLong(offset)
43 | buffer.putInt(maxSize)
44 | }
45 |
46 | def sizeInBytes(): Int = 2 + topic.length + 4 + 8 + 4
47 |
48 | override def toString(): String= "FetchRequest(topic:" + topic + ", part:" + partition +" offset:" + offset +
49 | " maxSize:" + maxSize + ")"
50 | }
51 |
--------------------------------------------------------------------------------
/examples/src/main/java/kafka/examples/Producer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.examples;
17 |
18 | import kafka.javaapi.producer.ProducerData;
19 | import kafka.producer.ProducerConfig;
20 | import java.util.Properties;
21 |
22 | public class Producer extends Thread
23 | {
24 | private final kafka.javaapi.producer.Producer producer;
25 | private final String topic;
26 | private final Properties props = new Properties();
27 |
28 | public Producer(String topic)
29 | {
30 | props.put("serializer.class", "kafka.serializer.StringEncoder");
31 | props.put("zk.connect", "localhost:2181");
32 | // Use random partitioner. Don't need the key type. Just set it to Integer.
33 | // The message is of type String.
34 | producer = new kafka.javaapi.producer.Producer(new ProducerConfig(props));
35 | this.topic = topic;
36 | }
37 |
38 | public void run() {
39 | int messageNo = 1;
40 | while(true)
41 | {
42 | String messageStr = new String("Message_" + messageNo);
43 | producer.send(new ProducerData(topic, messageStr));
44 | messageNo++;
45 | }
46 | }
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/perf/src/main/java/kafka/perf/jmx/BrokerJmxClient.java:
--------------------------------------------------------------------------------
1 | package kafka.perf.jmx;
2 |
3 | import javax.management.JMX;
4 | import javax.management.MBeanServerConnection;
5 | import javax.management.ObjectName;
6 | import javax.management.remote.JMXConnector;
7 | import javax.management.remote.JMXConnectorFactory;
8 | import javax.management.remote.JMXServiceURL;
9 |
10 | import kafka.network.SocketServerStatsMBean;
11 |
12 | public class BrokerJmxClient
13 | {
14 | private final String host;
15 | private final int port;
16 | private final long time;
17 | public BrokerJmxClient(String host, int port,
18 | long time)
19 | {
20 | this.host = host;
21 | this.port = port;
22 | this.time = time;
23 | }
24 |
25 | public MBeanServerConnection getMbeanConnection() throws Exception
26 | {
27 | JMXServiceURL url =
28 | new JMXServiceURL("service:jmx:rmi:///jndi/rmi://"+ host+ ":" + port + "/jmxrmi");
29 | JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
30 | MBeanServerConnection mbsc = jmxc.getMBeanServerConnection();
31 | return mbsc;
32 | }
33 |
34 | public SocketServerStatsMBean createSocketMbean() throws Exception
35 | {
36 |
37 | ObjectName mbeanName = new ObjectName("kafka:type=kafka.SocketServerStats");
38 | SocketServerStatsMBean stats = JMX.newMBeanProxy(getMbeanConnection(), mbeanName, SocketServerStatsMBean.class, true);
39 | return stats;
40 | }
41 |
42 | public String getBrokerStats() throws Exception
43 | {
44 | StringBuffer buf = new StringBuffer();
45 | SocketServerStatsMBean stats = createSocketMbean();
46 | buf.append(stats.getBytesWrittenPerSecond() / (1024 *1024) + "," + stats.getBytesReadPerSecond() / (1024 *1024) );
47 | return buf.toString();
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/cluster/Broker.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.cluster
18 |
19 | import java.util.Arrays
20 | import kafka.utils._
21 | import java.net.InetAddress
22 | import kafka.server.KafkaConfig
23 | import util.parsing.json.JSON
24 |
25 | /**
26 | * A Kafka broker
27 | */
28 | private[kafka] object Broker {
29 | def createBroker(id: Int, brokerInfoString: String): Broker = {
30 | val brokerInfo = brokerInfoString.split(":")
31 | new Broker(id, brokerInfo(0), brokerInfo(1), brokerInfo(2).toInt)
32 | }
33 | }
34 |
35 | private[kafka] class Broker(val id: Int, val creatorId: String, val host: String, val port: Int) {
36 |
37 | override def toString(): String = new String("id:" + id + ",creatorId:" + creatorId + ",host:" + host + ",port:" + port)
38 |
39 | def getZKString(): String = new String(creatorId + ":" + host + ":" + port)
40 |
41 | override def equals(obj: Any): Boolean = {
42 | obj match {
43 | case null => false
44 | case n: Broker => id == n.id && host == n.host && port == n.port
45 | case _ => false
46 | }
47 | }
48 |
49 | override def hashCode(): Int = Utils.hashcode(id, host, port)
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.producer
17 |
18 | import collection.mutable.Map
19 | import collection.SortedSet
20 | import kafka.cluster.{Broker, Partition}
21 |
22 | trait BrokerPartitionInfo {
23 | /**
24 | * Return a sequence of (brokerId, numPartitions).
25 | * @param topic the topic for which this information is to be returned
26 | * @return a sequence of (brokerId, numPartitions). Returns a zero-length
27 | * sequence if no brokers are available.
28 | */
29 | def getBrokerPartitionInfo(topic: String = null): SortedSet[Partition]
30 |
31 | /**
32 | * Generate the host and port information for the broker identified
33 | * by the given broker id
34 | * @param brokerId the broker for which the info is to be returned
35 | * @return host and port of brokerId
36 | */
37 | def getBrokerInfo(brokerId: Int): Option[Broker]
38 |
39 | /**
40 | * Generate a mapping from broker id to the host and port for all brokers
41 | * @return mapping from id to host and port of all brokers
42 | */
43 | def getAllBrokerInfo: Map[Int, Broker]
44 |
45 | /**
46 | * Cleanup
47 | */
48 | def close
49 | }
50 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/KafkaScheduler.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import java.util.concurrent._
20 | import java.util.concurrent.atomic._
21 | import kafka.utils._
22 | import org.apache.log4j.Logger
23 |
24 | /**
25 | * A scheduler for running jobs in the background
26 | * TODO: ScheduledThreadPoolExecutor notriously swallows exceptions
27 | */
28 | class KafkaScheduler(val numThreads: Int, val baseThreadName: String, isDaemon: Boolean) {
29 | private val logger = Logger.getLogger(getClass())
30 | private val threadId = new AtomicLong(0)
31 | private val executor = new ScheduledThreadPoolExecutor(numThreads, new ThreadFactory() {
32 | def newThread(runnable: Runnable): Thread = {
33 | val t = new Thread(runnable, baseThreadName + threadId.getAndIncrement)
34 | t.setDaemon(isDaemon)
35 | t
36 | }
37 | })
38 |
39 | def scheduleWithRate(fun: () => Unit, delayMs: Long, periodMs: Long) =
40 | executor.scheduleAtFixedRate(Utils.loggedRunnable(fun), delayMs, periodMs, TimeUnit.MILLISECONDS)
41 |
42 | def shutdown() = {
43 | executor.shutdownNow
44 | logger.info("shutdown scheduler " + baseThreadName)
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/cluster/Partition.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.cluster
18 |
19 | object Partition {
20 | def parse(s: String): Partition = {
21 | val pieces = s.split("-")
22 | if(pieces.length != 2)
23 | throw new IllegalArgumentException("Expected name in the form x-y.")
24 | new Partition(pieces(0).toInt, pieces(1).toInt)
25 | }
26 | }
27 |
28 | class Partition(val brokerId: Int, val partId: Int) extends Ordered[Partition] {
29 |
30 | def this(name: String) = {
31 | this(1, 1)
32 | }
33 |
34 | def name = brokerId + "-" + partId
35 |
36 | override def toString(): String = name
37 |
38 | def compare(that: Partition) =
39 | if (this.brokerId == that.brokerId)
40 | this.partId - that.partId
41 | else
42 | this.brokerId - that.brokerId
43 |
44 | override def equals(other: Any): Boolean = {
45 | other match {
46 | case that: Partition =>
47 | (that canEqual this) && brokerId == that.brokerId && partId == that.partId
48 | case _ => false
49 | }
50 | }
51 |
52 | def canEqual(other: Any): Boolean = other.isInstanceOf[Partition]
53 |
54 | override def hashCode: Int = 31 * (17 + brokerId) + partId
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/api/MultiFetchResponse.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.api
18 |
19 | import java.nio._
20 | import collection.mutable
21 | import kafka.utils.IteratorTemplate
22 | import kafka.message._
23 |
24 | class MultiFetchResponse(val buffer: ByteBuffer, val numSets: Int, val offsets: Array[Long]) extends Iterable[ByteBufferMessageSet] {
25 | private val messageSets = new mutable.ListBuffer[ByteBufferMessageSet]
26 |
27 | for(i <- 0 until numSets) {
28 | val size = buffer.getInt()
29 | val errorCode: Int = buffer.getShort()
30 | val copy = buffer.slice()
31 | val payloadSize = size - 2
32 | copy.limit(payloadSize)
33 | buffer.position(buffer.position + payloadSize)
34 | messageSets += new ByteBufferMessageSet(copy, offsets(i), errorCode)
35 | }
36 |
37 | def iterator : Iterator[ByteBufferMessageSet] = {
38 | new IteratorTemplate[ByteBufferMessageSet] {
39 | val iter = messageSets.iterator
40 |
41 | override def makeNext(): ByteBufferMessageSet = {
42 | if(iter.hasNext)
43 | iter.next
44 | else
45 | return allDone
46 | }
47 | }
48 | }
49 |
50 | override def toString() = this.messageSets.toString
51 | }
52 |
--------------------------------------------------------------------------------
/clients/ruby/Rakefile:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'rake/gempackagetask'
3 | require 'rubygems/specification'
4 | require 'date'
5 | require 'rspec/core/rake_task'
6 |
7 | GEM = 'kafka-rb'
8 | GEM_NAME = 'Kafka Client'
9 | GEM_VERSION = '0.0.5'
10 | AUTHORS = ['Alejandro Crosa']
11 | EMAIL = "alejandrocrosa@gmail.com"
12 | HOMEPAGE = "http://github.com/acrosa/kafka-rb"
13 | SUMMARY = "A Ruby client for the Kafka distributed publish/subscribe messaging service"
14 | DESCRIPTION = "kafka-rb allows you to produce and consume messages using the Kafka distributed publish/subscribe messaging service."
15 |
16 | spec = Gem::Specification.new do |s|
17 | s.name = GEM
18 | s.version = GEM_VERSION
19 | s.platform = Gem::Platform::RUBY
20 | s.has_rdoc = true
21 | s.extra_rdoc_files = ["LICENSE"]
22 | s.summary = SUMMARY
23 | s.description = DESCRIPTION
24 | s.authors = AUTHORS
25 | s.email = EMAIL
26 | s.homepage = HOMEPAGE
27 | s.add_development_dependency "rspec"
28 | s.require_path = 'lib'
29 | s.autorequire = GEM
30 | s.files = %w(LICENSE README.md Rakefile) + Dir.glob("{lib,tasks,spec}/**/*")
31 | end
32 |
33 | task :default => :spec
34 |
35 | desc "Run specs"
36 | RSpec::Core::RakeTask.new do |t|
37 | t.pattern = FileList['spec/**/*_spec.rb']
38 | t.rspec_opts = %w(-fs --color)
39 | end
40 |
41 | Rake::GemPackageTask.new(spec) do |pkg|
42 | pkg.gem_spec = spec
43 | end
44 |
45 | desc "install the gem locally"
46 | task :install => [:package] do
47 | sh %{sudo gem install pkg/#{GEM}-#{GEM_VERSION}}
48 | end
49 |
50 | desc "create a gemspec file"
51 | task :make_spec do
52 | File.open("#{GEM}.gemspec", "w") do |file|
53 | file.puts spec.to_ruby
54 | end
55 | end
56 |
57 | desc "Run all examples with RCov"
58 | RSpec::Core::RakeTask.new(:rcov) do |t|
59 | t.pattern = FileList['spec/**/*_spec.rb']
60 | t.rcov = true
61 | end
62 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/integration/ProducerConsumerTestHarness.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.integration
18 |
19 | import kafka.consumer.SimpleConsumer
20 | import org.scalatest.junit.JUnit3Suite
21 | import java.util.Properties
22 | import kafka.producer.{SyncProducerConfig, SyncProducer}
23 |
24 | trait ProducerConsumerTestHarness extends JUnit3Suite {
25 |
26 | val port: Int
27 | val host = "localhost"
28 | var producer: SyncProducer = null
29 | var consumer: SimpleConsumer = null
30 |
31 | override def setUp() {
32 | val props = new Properties()
33 | props.put("host", host)
34 | props.put("port", port.toString)
35 | props.put("buffer.size", "65536")
36 | props.put("connect.timeout.ms", "100000")
37 | props.put("reconnect.interval", "10000")
38 | producer = new SyncProducer(new SyncProducerConfig(props))
39 | consumer = new SimpleConsumer(host,
40 | port,
41 | 1000000,
42 | 64*1024)
43 | super.setUp
44 | }
45 |
46 | override def tearDown() {
47 | super.tearDown
48 | producer.close()
49 | consumer.close()
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/DumpLogSegments.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import java.io._
20 | import kafka.message._
21 | import kafka.utils._
22 |
23 | object DumpLogSegments {
24 |
25 | def main(args: Array[String]) {
26 | var isNoPrint = false;
27 | for(arg <- args)
28 | if ("-noprint".compareToIgnoreCase(arg) == 0)
29 | isNoPrint = true;
30 |
31 | for(arg <- args) {
32 | if (! ("-noprint".compareToIgnoreCase(arg) == 0) ) {
33 | val file = new File(arg)
34 | println("Dumping " + file)
35 | var offset = file.getName().split("\\.")(0).toLong
36 | println("Starting offset: " + offset)
37 | val messageSet = new FileMessageSet(file, false)
38 | for(messageAndOffset <- messageSet) {
39 | println("----------------------------------------------")
40 | if (messageAndOffset.message.isValid)
41 | println("offset:\t" + offset)
42 | else
43 | println("offset:\t %d \t invalid".format(offset))
44 | if (!isNoPrint)
45 | println("payload:\t" + Utils.toString(messageAndOffset.message.payload, "UTF-8"))
46 | offset += messageAndOffset.offset
47 | }
48 | }
49 | }
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/network/BoundedByteBufferSend.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.network
18 |
19 | import java.nio._
20 | import java.nio.channels._
21 | import kafka.utils._
22 |
23 | @nonthreadsafe
24 | private[kafka] class BoundedByteBufferSend(val buffer: ByteBuffer) extends Send {
25 |
26 | private var sizeBuffer = ByteBuffer.allocate(4)
27 |
28 | sizeBuffer.putInt(buffer.limit)
29 | sizeBuffer.rewind()
30 |
31 | var complete: Boolean = false
32 |
33 | def this(size: Int) = this(ByteBuffer.allocate(size))
34 |
35 | def this(request: Request) = {
36 | this(request.sizeInBytes + 2)
37 | buffer.putShort(request.id)
38 | request.writeTo(buffer)
39 | buffer.rewind()
40 | }
41 |
42 | def writeTo(channel: WritableByteChannel): Int = {
43 | expectIncomplete()
44 | var written = 0
45 | // try to write the size if we haven't already
46 | if(sizeBuffer.hasRemaining)
47 | written += channel.write(sizeBuffer)
48 | // try to write the actual buffer itself
49 | if(!sizeBuffer.hasRemaining && buffer.hasRemaining)
50 | written += channel.write(buffer)
51 | // if we are done, mark it off
52 | if(!buffer.hasRemaining)
53 | complete = true
54 |
55 | written
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/clients/go/src/publisher.go:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 NeuStar, Inc.
3 | * All rights reserved.
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | *
17 | * NeuStar, the Neustar logo and related names and logos are registered
18 | * trademarks, service marks or tradenames of NeuStar, Inc. All other
19 | * product names, company names, marks, logos and symbols may be trademarks
20 | * of their respective owners.
21 | */
22 |
23 | package kafka
24 |
25 | import (
26 | "container/list"
27 | "os"
28 | )
29 |
30 |
31 | type BrokerPublisher struct {
32 | broker *Broker
33 | }
34 |
35 | func NewBrokerPublisher(hostname string, topic string, partition int) *BrokerPublisher {
36 | return &BrokerPublisher{broker: newBroker(hostname, topic, partition)}
37 | }
38 |
39 |
40 | func (b *BrokerPublisher) Publish(message *Message) (int, os.Error) {
41 | messages := list.New()
42 | messages.PushBack(message)
43 | return b.BatchPublish(messages)
44 | }
45 |
46 | func (b *BrokerPublisher) BatchPublish(messages *list.List) (int, os.Error) {
47 | conn, err := b.broker.connect()
48 | if err != nil {
49 | return -1, err
50 | }
51 | defer conn.Close()
52 | // TODO: MULTIPRODUCE
53 | num, err := conn.Write(b.broker.EncodePublishRequest(messages))
54 | if err != nil {
55 | return -1, err
56 | }
57 |
58 | return num, err
59 | }
60 |
--------------------------------------------------------------------------------
/core/src/test/scala/unit/kafka/javaapi/integration/ProducerConsumerTestHarness.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.javaapi.integration
18 |
19 | import org.scalatest.junit.JUnit3Suite
20 | import java.util.Properties
21 | import kafka.producer.SyncProducerConfig
22 | import kafka.javaapi.producer.SyncProducer
23 | import kafka.javaapi.consumer.SimpleConsumer
24 |
25 | trait ProducerConsumerTestHarness extends JUnit3Suite {
26 |
27 | val port: Int
28 | val host = "localhost"
29 | var producer: SyncProducer = null
30 | var consumer: SimpleConsumer = null
31 |
32 | override def setUp() {
33 | val props = new Properties()
34 | props.put("host", host)
35 | props.put("port", port.toString)
36 | props.put("buffer.size", "65536")
37 | props.put("connect.timeout.ms", "100000")
38 | props.put("reconnect.interval", "10000")
39 | producer = new SyncProducer(new SyncProducerConfig(props))
40 | consumer = new SimpleConsumer(host,
41 | port,
42 | 1000000,
43 | 64*1024)
44 | super.setUp
45 | }
46 |
47 | override def tearDown() {
48 | super.tearDown
49 | producer.close()
50 | consumer.close()
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/clients/ruby/README.md:
--------------------------------------------------------------------------------
1 | # kafka-rb
2 | kafka-rb allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
3 |
4 | ## Requirements
5 | You need to have access to your Kafka instance and be able to connect through TCP. You can obtain a copy and instructions on how to setup kafka at https://github.com/kafka-dev/kafka
6 |
7 | ## Installation
8 | sudo gem install kafka-rb
9 |
10 | (the code works fine with JRuby, Ruby 1.8x and Ruby 1.9.x)
11 |
12 | ## Usage
13 |
14 | ### Sending a simple message
15 |
16 | require 'kafka'
17 | producer = Kafka::Producer.new
18 | message = Kafka::Message.new("some random message content")
19 | producer.send(message)
20 |
21 | ### Sending a sequence of messages
22 |
23 | require 'kafka'
24 | producer = Kafka::Producer.new
25 | message1 = Kafka::Message.new("some random message content")
26 | message2 = Kafka::Message.new("some more content")
27 | producer.send([message1, message2])
28 |
29 | ### Batching a bunch of messages using the block syntax
30 |
31 | require 'kafka'
32 | producer = Kafka::Producer.new
33 | producer.batch do |messages|
34 | puts "Batching a send of multiple messages.."
35 | messages << Kafka::Message.new("first message to send")
36 | messages << Kafka::Message.new("second message to send")
37 | end
38 |
39 | * they will be sent all at once, after the block execution
40 |
41 | ### Consuming messages one by one
42 |
43 | require 'kafka'
44 | consumer = Kafka::Consumer.new
45 | messages = consumer.consume
46 |
47 |
48 | ### Consuming messages using a block loop
49 |
50 | require 'kafka'
51 | consumer = Kafka::Consumer.new
52 | consumer.loop do |messages|
53 | puts "Received"
54 | puts messages
55 | end
56 |
57 |
58 | Contact for questions
59 |
60 | alejandrocrosa at(@) gmail.com
61 |
62 | http://twitter.com/alejandrocrosa
63 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/utils/Pool.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.utils
18 |
19 | import java.util.ArrayList
20 | import java.util.concurrent._
21 | import collection.JavaConversions
22 |
23 | class Pool[K,V] extends Iterable[(K, V)] {
24 |
25 | private val pool = new ConcurrentHashMap[K, V]
26 |
27 | def this(m: collection.Map[K, V]) {
28 | this()
29 | for((k,v) <- m.elements)
30 | pool.put(k, v)
31 | }
32 |
33 | def put(k: K, v: V) = pool.put(k, v)
34 |
35 | def putIfNotExists(k: K, v: V) = pool.putIfAbsent(k, v)
36 |
37 | def contains(id: K) = pool.containsKey(id)
38 |
39 | def get(key: K): V = pool.get(key)
40 |
41 | def remove(key: K): V = pool.remove(key)
42 |
43 | def keys = JavaConversions.asSet(pool.keySet())
44 |
45 | def values: Iterable[V] =
46 | JavaConversions.asIterable(new ArrayList[V](pool.values()))
47 |
48 | def clear: Unit = pool.clear()
49 |
50 | override def size = pool.size
51 |
52 | override def iterator = new Iterator[(K,V)]() {
53 |
54 | private val iter = pool.entrySet.iterator
55 |
56 | def hasNext: Boolean = iter.hasNext
57 |
58 | def next: (K, V) = {
59 | val n = iter.next
60 | (n.getKey, n.getValue)
61 | }
62 |
63 | }
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/clients/clojure/README.md:
--------------------------------------------------------------------------------
1 | # kafka-clj
2 | kafka-clj provides a producer and consumer that supports a basic fetch API as well as a managed sequence interface. Multifetch is not supported yet.
3 |
4 | ## Quick Start
5 |
6 | Download and start [Kafka](http://sna-projects.com/kafka/quickstart.php).
7 |
8 | Pull dependencies with [Leiningen](https://github.com/technomancy/leiningen):
9 |
10 | $ lein deps
11 |
12 | And run the example:
13 |
14 | $ lein run-example
15 |
16 | ## Usage
17 |
18 | ### Sending messages
19 |
20 | (with-open [p (producer "localhost" 9092)]
21 | (produce p "test" 0 "Message 1")
22 | (produce p "test" 0 ["Message 2" "Message 3"]))
23 |
24 | ### Simple consumer
25 |
26 | (with-open [c (consumer "localhost" 9092)]
27 | (let [offs (offsets c "test" 0 -1 10)]
28 | (consume c "test" 0 (last offs) 1000000)))
29 |
30 | ### Consumer sequence
31 |
32 | (with-open [c (consumer "localhost" 9092)]
33 | (doseq [m (consume-seq c "test" 0 {:blocking true})]
34 | (println m)))
35 |
36 | Following options are supported:
37 |
38 | * :blocking _boolean_ default false, sequence returns nil the first time fetch does not return new messages. If set to true, the sequence tries to fetch new messages :repeat-count times every :repeat-timeout milliseconds.
39 | * :repeat-count _int_ number of attempts to fetch new messages before terminating, default 10.
40 | * :repeat-timeout _int_ wait time in milliseconds between fetch attempts, default 1000.
41 | * :offset _long_ initialized to highest offset if not provided.
42 | * :max-size _int_ max result message size, default 1000000.
43 |
44 | ### Serialization
45 |
46 | Load namespace _kafka.print_ for basic print_dup/read-string serialization or _kafka.serializeable_ for Java object serialization. For custom serialization implement Pack and Unpack protocols.
47 |
48 |
49 | Questions? Email adam.smyczek \_at\_ gmail.com.
50 |
51 |
--------------------------------------------------------------------------------
/system_test/producer_perf/bin/run-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | num_messages=2000000
4 | message_size=200
5 |
6 | base_dir=$(dirname $0)/..
7 |
8 | rm -rf /tmp/zookeeper
9 | rm -rf /tmp/kafka-logs
10 |
11 | echo "start the servers ..."
12 | $base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log &
13 | $base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log &
14 |
15 | sleep 4
16 | echo "start producing $num_messages messages ..."
17 | $base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topic test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async --delay-btw-batch-ms 10
18 |
19 | echo "wait for data to be persisted"
20 | cur_offset="-1"
21 | quit=0
22 | while [ $quit -eq 0 ]
23 | do
24 | sleep 2
25 | target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
26 | if [ $target_size -eq $cur_offset ]
27 | then
28 | quit=1
29 | fi
30 | cur_offset=$target_size
31 | done
32 |
33 | sleep 2
34 | actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
35 | msg_full_size=`expr $message_size + 10`
36 | expected_size=`expr $num_messages \* $msg_full_size`
37 |
38 | if [ $actual_size != $expected_size ]
39 | then
40 | echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!"
41 | else
42 | echo "test passed"
43 | fi
44 |
45 | ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
46 | sleep 2
47 | ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
48 |
49 |
--------------------------------------------------------------------------------
/clients/ruby/spec/message_spec.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/spec_helper'
2 |
3 | describe Message do
4 |
5 | before(:each) do
6 | @message = Message.new
7 | end
8 |
9 | describe "Kafka Message" do
10 | it "should have a default magic number" do
11 | Message::MAGIC_IDENTIFIER_DEFAULT.should eql(0)
12 | end
13 |
14 | it "should have a magic field, a checksum and a payload" do
15 | [:magic, :checksum, :payload].each do |field|
16 | @message.should respond_to(field.to_sym)
17 | end
18 | end
19 |
20 | it "should set a default value of zero" do
21 | @message.magic.should eql(Kafka::Message::MAGIC_IDENTIFIER_DEFAULT)
22 | end
23 |
24 | it "should allow to set a custom magic number" do
25 | @message = Message.new("ale", 1)
26 | @message.magic.should eql(1)
27 | end
28 |
29 | it "should calculate the checksum (crc32 of a given message)" do
30 | @message.payload = "ale"
31 | @message.calculate_checksum.should eql(1120192889)
32 | @message.payload = "alejandro"
33 | @message.calculate_checksum.should eql(2865078607)
34 | end
35 |
36 | it "should say if the message is valid using the crc32 signature" do
37 | @message.payload = "alejandro"
38 | @message.checksum = 2865078607
39 | @message.valid?.should eql(true)
40 | @message.checksum = 0
41 | @message.valid?.should eql(false)
42 | @message = Message.new("alejandro", 0, 66666666) # 66666666 is a funny checksum
43 | @message.valid?.should eql(false)
44 | end
45 |
46 | it "should parse a message from bytes" do
47 | bytes = [12].pack("N") + [0].pack("C") + [1120192889].pack("N") + "ale"
48 | message = Kafka::Message.parse_from(bytes)
49 | message.valid?.should eql(true)
50 | message.magic.should eql(0)
51 | message.checksum.should eql(1120192889)
52 | message.payload.should eql("ale")
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/system_test/producer_perf/bin/run-compression-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | num_messages=2000000
4 | message_size=200
5 |
6 | base_dir=$(dirname $0)/..
7 |
8 | rm -rf /tmp/zookeeper
9 | rm -rf /tmp/kafka-logs
10 |
11 | echo "start the servers ..."
12 | $base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log &
13 | $base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log &
14 |
15 | sleep 4
16 | echo "start producing $num_messages messages ..."
17 | $base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topic test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async --delay-btw-batch-ms 10 --compression-codec 1
18 |
19 | echo "wait for data to be persisted"
20 | cur_offset="-1"
21 | quit=0
22 | while [ $quit -eq 0 ]
23 | do
24 | sleep 2
25 | target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
26 | if [ $target_size -eq $cur_offset ]
27 | then
28 | quit=1
29 | fi
30 | cur_offset=$target_size
31 | done
32 |
33 | sleep 2
34 | actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
35 | num_batches=`expr $num_messages \/ $message_size`
36 | expected_size=`expr $num_batches \* 262`
37 |
38 | if [ $actual_size != $expected_size ]
39 | then
40 | echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!"
41 | else
42 | echo "test passed"
43 | fi
44 |
45 | ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
46 | sleep 2
47 | ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
48 |
49 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/api/MultiFetchRequest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.api
18 |
19 | import java.nio._
20 | import kafka.network._
21 | import kafka.utils._
22 | import kafka.api._
23 |
24 | object MultiFetchRequest {
25 | def readFrom(buffer: ByteBuffer): MultiFetchRequest = {
26 | val count = buffer.getShort
27 | val fetches = new Array[FetchRequest](count)
28 | for(i <- 0 until fetches.length)
29 | fetches(i) = FetchRequest.readFrom(buffer)
30 | new MultiFetchRequest(fetches)
31 | }
32 | }
33 |
34 | class MultiFetchRequest(val fetches: Array[FetchRequest]) extends Request(RequestKeys.MultiFetch) {
35 | def writeTo(buffer: ByteBuffer) {
36 | if(fetches.length > Short.MaxValue)
37 | throw new IllegalArgumentException("Number of requests in MultiFetchRequest exceeds " + Short.MaxValue + ".")
38 | buffer.putShort(fetches.length.toShort)
39 | for(fetch <- fetches)
40 | fetch.writeTo(buffer)
41 | }
42 |
43 | def sizeInBytes: Int = {
44 | var size = 2
45 | for(fetch <- fetches)
46 | size += fetch.sizeInBytes
47 | size
48 | }
49 |
50 |
51 | override def toString(): String = {
52 | val buffer = new StringBuffer
53 | for(fetch <- fetches) {
54 | buffer.append(fetch.toString)
55 | buffer.append(",")
56 | }
57 | buffer.toString
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/api/MultiProducerRequest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.api
18 |
19 | import java.nio.ByteBuffer
20 | import kafka.network.Request
21 |
22 | object MultiProducerRequest {
23 | def readFrom(buffer: ByteBuffer): MultiProducerRequest = {
24 | val count = buffer.getShort
25 | val produces = new Array[ProducerRequest](count)
26 | for(i <- 0 until produces.length)
27 | produces(i) = ProducerRequest.readFrom(buffer)
28 | new MultiProducerRequest(produces)
29 | }
30 | }
31 |
32 | class MultiProducerRequest(val produces: Array[ProducerRequest]) extends Request(RequestKeys.MultiProduce) {
33 | def writeTo(buffer: ByteBuffer) {
34 | if(produces.length > Short.MaxValue)
35 | throw new IllegalArgumentException("Number of requests in MultiProducer exceeds " + Short.MaxValue + ".")
36 | buffer.putShort(produces.length.toShort)
37 | for(produce <- produces)
38 | produce.writeTo(buffer)
39 | }
40 |
41 | def sizeInBytes: Int = {
42 | var size = 2
43 | for(produce <- produces)
44 | size += produce.sizeInBytes
45 | size
46 | }
47 |
48 | override def toString(): String = {
49 | val buffer = new StringBuffer
50 | for(produce <- produces) {
51 | buffer.append(produce.toString)
52 | buffer.append(",")
53 | }
54 | buffer.toString
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/javaapi/ProducerRequest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.javaapi
17 |
18 | import kafka.network.Request
19 | import kafka.api.RequestKeys
20 | import java.nio.ByteBuffer
21 |
22 | class ProducerRequest(val topic: String,
23 | val partition: Int,
24 | val messages: kafka.javaapi.message.ByteBufferMessageSet) extends Request(RequestKeys.Produce) {
25 | import Implicits._
26 | private val underlying = new kafka.api.ProducerRequest(topic, partition, messages)
27 |
28 | def writeTo(buffer: ByteBuffer) { underlying.writeTo(buffer) }
29 |
30 | def sizeInBytes(): Int = underlying.sizeInBytes
31 |
32 | def getTranslatedPartition(randomSelector: String => Int): Int =
33 | underlying.getTranslatedPartition(randomSelector)
34 |
35 | override def toString: String =
36 | underlying.toString
37 |
38 | override def equals(other: Any): Boolean = {
39 | other match {
40 | case that: ProducerRequest =>
41 | (that canEqual this) && topic == that.topic && partition == that.partition &&
42 | messages.equals(that.messages)
43 | case _ => false
44 | }
45 | }
46 |
47 | def canEqual(other: Any): Boolean = other.isInstanceOf[ProducerRequest]
48 |
49 | override def hashCode: Int = 31 + (17 * partition) + topic.hashCode + messages.hashCode
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/perf/report-html/report.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Kafka performance report
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/javaapi/message/MessageSet.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package kafka.javaapi.message
18 |
19 | import java.nio.channels.WritableByteChannel
20 | import kafka.message.{MessageAndOffset, InvalidMessageException, Message}
21 |
22 | /**
23 | * A set of messages. A message set has a fixed serialized form, though the container
24 | * for the bytes could be either in-memory or on disk. A The format of each message is
25 | * as follows:
26 | * 4 byte size containing an integer N
27 | * N message bytes as described in the message class
28 | */
29 | abstract class MessageSet extends java.lang.Iterable[MessageAndOffset] {
30 |
31 | /**
32 | * Provides an iterator over the messages in this set
33 | */
34 | def iterator: java.util.Iterator[MessageAndOffset]
35 |
36 | /**
37 | * Gives the total size of this message set in bytes
38 | */
39 | def sizeInBytes: Long
40 |
41 | /**
42 | * Validate the checksum of all the messages in the set. Throws an InvalidMessageException if the checksum doesn't
43 | * match the payload for any message.
44 | */
45 | def validate(): Unit = {
46 | val thisIterator = this.iterator
47 | while(thisIterator.hasNext) {
48 | val messageAndOffset = thisIterator.next
49 | if(!messageAndOffset.message.isValid)
50 | throw new InvalidMessageException
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/core/src/main/scala/kafka/javaapi/producer/SyncProducer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 LinkedIn
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package kafka.javaapi.producer
17 |
18 | import kafka.producer.SyncProducerConfig
19 | import kafka.javaapi.message.ByteBufferMessageSet
20 |
21 | class SyncProducer(syncProducer: kafka.producer.SyncProducer) {
22 |
23 | def this(config: SyncProducerConfig) = this(new kafka.producer.SyncProducer(config))
24 |
25 | val underlying = syncProducer
26 |
27 | def send(topic: String, partition: Int, messages: ByteBufferMessageSet) {
28 | import kafka.javaapi.Implicits._
29 | underlying.send(topic, partition, messages)
30 | }
31 |
32 | def send(topic: String, messages: ByteBufferMessageSet): Unit = send(topic,
33 | kafka.api.ProducerRequest.RandomPartition,
34 | messages)
35 |
36 | def multiSend(produces: Array[kafka.javaapi.ProducerRequest]) {
37 | import kafka.javaapi.Implicits._
38 | val produceRequests = new Array[kafka.api.ProducerRequest](produces.length)
39 | for(i <- 0 until produces.length)
40 | produceRequests(i) = new kafka.api.ProducerRequest(produces(i).topic, produces(i).partition, produces(i).messages)
41 | underlying.multiSend(produceRequests)
42 | }
43 |
44 | def close() {
45 | underlying.close
46 | }
47 | }
48 |
--------------------------------------------------------------------------------