├── .gitignore
├── LICENSE
├── README.md
├── bin
├── kafka-endpoint-log4j.properties
├── kafka-http-endpoint.sh
├── kafka-link-http-consumer.sh
├── kafka-link-http-producer.sh
└── kafka-run-class.sh
├── lib
└── sbt-launch.jar
├── project
└── Build.scala
├── sbt
├── scripts
├── benchmark-httperf.sh
├── dev-producer.sh
├── generage.py
└── produce.sh
└── src
└── main
└── scala
└── com
└── rackspace
└── kafka
└── http
├── Configurator.scala
├── ConsumerServlet.scala
├── ProducerServlet.scala
├── ReplyFormatter.scala
├── RestServer.scala
└── ServletErrorHandler.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | *classes
3 | target/
4 | lib_managed/
5 | src_managed/
6 | project/boot/
7 | project/plugins/project/
8 | project/sbt_project_definition.iml
9 | .idea
10 | .svn
11 | .classpath
12 | *~
13 | *#
14 | .#*
15 | rat.out
16 | TAGS
17 | *.json
18 | *.bson
19 | *.log
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | **WARNING**: This project is obsolete. Please consider switching to either [Kafka-Pixy](https://github.com/mailgun/kafka-pixy) by [Mailgun](http://www.mailgun.com) or [Kafka REST Proxy](http://docs.confluent.io/1.0/kafka-rest/docs/index.html) by [Confluent](http://www.confluent.io)
2 |
3 | Kafka HTTP endpoint
4 | -------------------
5 |
6 | Rationale
7 | ---------
8 | Kafka high level Producer and Consumer APIs are very hard to implement right.
9 | Rest endpoint gives access to native Scala high level consumer and producer APIs.
10 |
11 |
12 | Producer Endpoint API
13 | ----------------------
14 |
15 | Producer endpoint accepts messages in batches in json or bson formats to the topic of choice.
16 |
17 | ```bash
18 | curl -X POST -H "Content-Type: application/json"\
19 | -d '{"messages": [{"key": "key", "value":{"val1":"hello"}}]}\
20 | http://localhost:8090/topics/messages
21 | ```
22 |
23 | Endpoint can be configured to be sync or asyncronous.
24 |
25 |
26 | Consumer Endpoint API
27 | ----------------------
28 |
29 | Consumer endpoint uses long-polling to consume messages in batches in json or bson formats:
30 |
31 | Example request:
32 |
33 | ```bash
34 | curl -H "Accept:application/json" -v http://localhost:8091?batchSize=10
35 | ```
36 |
37 | Request will block till:
38 |
39 | * timeout occurs - in this case the messages consumed during the period will be returned
40 | * the batch of 10 messages has been consumed.
41 |
42 | Example response:
43 |
44 | ```json
45 | {"messages": [{"key": "key" , "value": {"a" : "b"}}, {"key": "key1" , "value": {"c" : "d"}}]}
46 | ```
47 |
48 | Endpoint timeouts and consumer groups are configured for every endpoint. It is also possible to commit offsets
49 | explicitly by issuing POST request to the endpoint:
50 |
51 | ```bash
52 | curl -X POST http://localhost:8091
53 | ```
54 |
55 | Access to consumer endpoint is serialized and there should be one client talking to one endpoint.
56 |
57 | Development
58 | -----------
59 |
60 | * Deps
61 | jdk1.6.0_45
62 |
63 | * Build and release
64 |
65 | ```bash
66 | ./sbt release
67 | ```
68 |
--------------------------------------------------------------------------------
/bin/kafka-endpoint-log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | log4j.rootLogger=INFO, stderr
16 |
17 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender
18 | log4j.appender.stderr.target=System.err
19 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
20 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
21 |
22 | log4j.logger.org.eclipse.jetty=DEBUG, stderr
23 |
--------------------------------------------------------------------------------
/bin/kafka-http-endpoint.sh:
--------------------------------------------------------------------------------
1 | base_dir=$(dirname $0)
2 | export KAFKA_OPTS="-Xmx2048M -server -Dcom.sun.management.jmxremote -Dlog4j.configuration=file:$base_dir/kafka-endpoint-log4j.properties -Dorg.eclipse.jetty.io.ChanelEndPoint.LEVEL=ALL"
3 |
4 | for file in $base_dir/../*.jar;
5 | do
6 | CLASSPATH=$CLASSPATH:$file
7 | done
8 | export CLASSPATH="$CLASSPATH"
9 |
10 | $base_dir/kafka-run-class.sh com.rackspace.kafka.http.RestServer $@
11 |
--------------------------------------------------------------------------------
/bin/kafka-link-http-consumer.sh:
--------------------------------------------------------------------------------
1 | TOPIC=messages
2 | ZOOKEEPER_URLS=${ZK_PORT_2181_TCP_ADDR}:${ZK_PORT_2181_TCP_PORT}
3 | GROUP=hammer
4 |
5 | base_dir=$(dirname $0)
6 | $base_dir/kafka-http-endpoint.sh consumer --group $GROUP --topic $TOPIC --zookeeper $ZOOKEEPER_URLS --consumer-timeout-ms 5000 --statsd-host STATSD_PORT_8125_TCP_ADDR --statsd-port STATSD_PORT_8125_TCP_PORT
7 |
--------------------------------------------------------------------------------
/bin/kafka-link-http-producer.sh:
--------------------------------------------------------------------------------
1 | BROKER_URLS=${KF_PORT_9092_TCP_ADDR}:${KF_PORT_9092_TCP_PORT}
2 |
3 | base_dir=$(dirname $0)
4 | $base_dir/kafka-http-endpoint.sh producer --broker-list $BROKER_URLS --sync --request-required-acks 1 --statsd-host $STATSD_PORT_8125_UDP_ADDR --statsd-port $STATSD_PORT_8125_UDP_PORT --statsd-prefix kafka.http.producer
5 |
--------------------------------------------------------------------------------
/bin/kafka-run-class.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ $# -lt 1 ];
18 | then
19 | echo "USAGE: $0 classname [opts]"
20 | exit 1
21 | fi
22 |
23 | base_dir=$(dirname $0)/..
24 |
25 | SCALA_VERSION=2.9.2
26 |
27 | # classpath addition for release
28 | for file in $base_dir/libs/*.jar;
29 | do
30 | CLASSPATH=$CLASSPATH:$file
31 | done
32 |
33 | for file in $base_dir/kafka*.jar;
34 | do
35 | CLASSPATH=$CLASSPATH:$file
36 | done
37 |
38 | if [ -z "$KAFKA_JMX_OPTS" ]; then
39 | KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
40 | fi
41 |
42 | if [ -z "$KAFKA_OPTS" ]; then
43 | KAFKA_OPTS="-Xmx2048M -server -Dlog4j.configuration=file:$base_dir/config/log4j.properties"
44 | fi
45 |
46 | if [ $JMX_PORT ]; then
47 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
48 | fi
49 |
50 | if [ -z "$JAVA_HOME" ]; then
51 | JAVA="java"
52 | else
53 | JAVA="$JAVA_HOME/bin/java"
54 | fi
55 | $JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH "$@"
56 |
57 | exitval=$?
58 |
59 | if [ $exitval -eq "1" ] ; then
60 | $JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH "$@" >& exception.txt
61 | exception=`cat exception.txt`
62 | noBuildMessage='Please build the project using sbt. Documentation is available at http://kafka.apache.org/'
63 | pattern="(Could not find or load main class)|(java\.lang\.NoClassDefFoundError)"
64 | match=`echo $exception | grep -E "$pattern"`
65 | if [[ -n "$match" ]]; then
66 | echo $noBuildMessage
67 | fi
68 | rm exception.txt
69 | fi
70 |
71 |
72 |
--------------------------------------------------------------------------------
/lib/sbt-launch.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mailgun/kafka-http/378b309cd9a7ea4892b57bba19f6361657c74220/lib/sbt-launch.jar
--------------------------------------------------------------------------------
/project/Build.scala:
--------------------------------------------------------------------------------
1 | /**
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | import sbt._
19 | import Keys._
20 | import Process._
21 |
22 | import scala.xml.{Node, Elem}
23 | import scala.xml.transform.{RewriteRule, RuleTransformer}
24 |
25 | object KafkaHttpBuild extends Build {
26 | val buildNumber = SettingKey[String]("build-number", "Build number defaults to $BUILD_NUMBER environment variable")
27 | val releaseName = SettingKey[String]("release-name", "the full name of this release")
28 | val commonSettings = Seq(
29 | organization := "com.rackspace",
30 | pomExtra :=
31 |
32 | com.rackpace
33 | rackspace
34 | 10
35 |
36 |
37 |
38 | Apache 2
39 | http://www.apache.org/licenses/LICENSE-2.0.txt
40 | repo
41 |
42 | ,
43 | scalacOptions ++= Seq("-deprecation", "-unchecked", "-g:none"),
44 | crossScalaVersions := Seq("2.9.1", "2.9.2"),
45 | scalaVersion := "2.9.2",
46 | version := "0.0.1",
47 | publishTo := Some("Apache Maven Repo" at "https://repository.apache.org/service/local/staging/deploy/maven2"),
48 | credentials += Credentials(Path.userHome / ".m2" / ".credentials"),
49 | buildNumber := System.getProperty("build.number", ""),
50 | version <<= (buildNumber, version) { (build, version) => if (build == "") version else version + "+" + build},
51 | releaseName <<= (name, version, scalaVersion) {(name, version, scalaVersion) => name + "_" + scalaVersion + "-" + version},
52 | javacOptions ++= Seq("-Xlint:unchecked", "-source", "1.5"),
53 | parallelExecution in Test := false, // Prevent tests from overrunning each other
54 |
55 | libraryDependencies ++= Seq(
56 | "log4j" % "log4j" % "1.2.15",
57 | "net.sf.jopt-simple" % "jopt-simple" % "3.2",
58 | "org.slf4j" % "slf4j-simple" % "1.6.4",
59 | "org.eclipse.jetty" % "jetty-server" % "8.1.14.v20131031",
60 | "org.eclipse.jetty" % "jetty-servlet" % "8.1.14.v20131031",
61 | "org.mongodb" % "mongo-java-driver" % "2.11.3",
62 | "org.apache.kafka" % "kafka_2.9.2" % "0.8.0",
63 | "org.slf4j" % "slf4j-api" % "1.6.4",
64 | "org.slf4j" % "slf4j-simple" % "1.6.4"
65 | ),
66 | // The issue is going from log4j 1.2.14 to 1.2.15, the developers added some features which required
67 | // some dependencies on various sun and javax packages.
68 | ivyXML :=
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 | )
88 | libraryDependencies <+= scalaVersion("org.scala-lang" % "scala-compiler" % _ )
89 |
90 | val release = TaskKey[Unit]("release", "Creates a deployable release directory file with dependencies, config, and scripts.")
91 | val releaseTask = release <<= ( packageBin in Compile, dependencyClasspath in Runtime, exportedProducts in Compile,
92 | target, releaseName) map { (packageBin, deps, products, target, releaseName) =>
93 | // NOTE: explicitly exclude sbt-launch.jar dep here, because it can use different scala version and if copied to jars folder will break the build
94 | // Found the hard way :-(
95 | val jarFiles = deps.files.filter(f => !products.files.contains(f) && f.getName.endsWith(".jar") && f.getName != "sbt-launch.jar")
96 | val destination = target / "RELEASE" / releaseName
97 | IO.copyFile(packageBin, destination / packageBin.getName)
98 | IO.copy(jarFiles.map { f => (f, destination / "libs" / f.getName) })
99 | IO.copyDirectory(file("config"), destination / "config")
100 | IO.copyDirectory(file("bin"), destination / "bin")
101 | for {file <- (destination / "bin").listFiles} { file.setExecutable(true, true) }
102 | }
103 | lazy val kafkaHttp = Project(id = "KafkaHttp", base = file(".")).settings((commonSettings ++ releaseTask): _*)
104 | }
105 |
--------------------------------------------------------------------------------
/sbt:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | java -Xmx1024M -XX:MaxPermSize=512m -Dbuild.number="$BUILD_NUMBER" -jar `dirname $0`/lib/sbt-launch.jar "$@"
17 |
--------------------------------------------------------------------------------
/scripts/benchmark-httperf.sh:
--------------------------------------------------------------------------------
1 | FILENAME=session${SIZE}.log
2 | echo "Using session sample: $FILENAME"
3 |
4 | #httperf --hog --client=0/1 --server=localhost --port=8080 --uri=/ --add-header='Content-Type:application/json\nAccept:application/json\n' --method=POST --wsesslog=6000,1,${FILENAME} --max-piped-calls 10 --rate 600
5 | httperf --hog --client=0/1 --server=localhost --port=8080 --add-header='Content-Type:application/bson\nAccept:application/json\n' --wsesslog=200,1,${FILENAME} --max-piped-calls 10 --rate 200
6 |
--------------------------------------------------------------------------------
/scripts/dev-producer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rm -rf /opt/kafka-http
4 | mv /opt/src/target/RELEASE/KafkaHttp_2.9.2-0.0.1/ /opt/kafka-http
5 | chmod +x /opt/kafka-http/bin/*.sh
6 | mkdir -p /var/log/kafka-http
7 | BROKER_LIST=localhost:9092 /opt/scripts/start-producer.sh
8 |
--------------------------------------------------------------------------------
/scripts/generage.py:
--------------------------------------------------------------------------------
1 | from bson import BSON
2 | #from bson.binary import Binary
3 |
4 | data = {"messages": [{"value": {"a": "b"*500000}}]}
5 | print BSON.encode(data)
6 |
--------------------------------------------------------------------------------
/scripts/produce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | FILENAME=session${SIZE}.bson
4 | echo "Using session sample: $FILENAME"
5 |
6 | #ab -c 10 -n 1000 -p $FILENAME -T 'application/multipart-form-data' http://localhost:8080/topics/messages
7 | #ab -v 3 -c 10 -n 1000 -p $FILENAME -T 'application/bson' http://localhost:8080/topics/messages
8 | ab -v 0 -c 100 -n 1000 -p $FILENAME -T 'application/bson' http://localhost:8080/topics/messages
9 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/Configurator.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import java.util.Properties
4 | import java.util.Random
5 |
6 | import javax.servlet.http.HttpServlet
7 |
8 | import joptsimple._
9 | import kafka.utils._
10 | import kafka.serializer._
11 | import kafka.consumer._
12 | import kafka.message._
13 |
14 | import org.apache.log4j.Logger
15 |
16 | object Configurator {
17 | val logger = Logger.getLogger("kafka.http.config")
18 |
19 | def getServlet(args:Array[String]):HttpServlet = {
20 | if(args.length < 1) {
21 | throw new Exception("Provide first parameter as 'consumer' or 'producer'")
22 | }
23 |
24 | var endpoint = args(0)
25 | if(endpoint == "consumer") {
26 | return getConsumerServlet(args)
27 | } else if (endpoint == "producer") {
28 | return getProducerServlet(args)
29 | } else {
30 | throw new Exception("Provide first parameter as 'consumer' or 'producer'")
31 | }
32 | }
33 |
34 | def getConsumerServlet(args:Array[String]):HttpServlet = {
35 | val parser = new OptionParser
36 |
37 | val topic = parser.accepts(
38 | "topic",
39 | "The topic to consume from")
40 | .withRequiredArg
41 | .describedAs("topic")
42 | .ofType(classOf[String])
43 |
44 | val group = parser.accepts(
45 | "group",
46 | "The group id of this consumer")
47 | .withRequiredArg
48 | .describedAs("gid")
49 | .defaultsTo("rest-" + new Random().nextInt(100000))
50 | .ofType(classOf[String])
51 |
52 | val zookeeper = parser.accepts(
53 | "zookeeper",
54 | "Comma separated of zookeeper nodes")
55 | .withRequiredArg
56 | .describedAs("urls")
57 | .ofType(classOf[String])
58 |
59 | val socketBufferSize = parser.accepts(
60 | "socket-buffer-size",
61 | "The size of the tcp RECV size.")
62 | .withRequiredArg
63 | .describedAs("size")
64 | .ofType(classOf[java.lang.Integer])
65 | .defaultsTo(2 * 1024 * 1024)
66 |
67 | val socketTimeoutMs = parser.accepts(
68 | "socket-timeout-ms",
69 | "The socket timeout used for the connection to the broker")
70 | .withRequiredArg
71 | .describedAs("ms")
72 | .ofType(classOf[java.lang.Integer])
73 | .defaultsTo(ConsumerConfig.SocketTimeout)
74 |
75 | val minFetchBytes = parser.accepts(
76 | "min-fetch-bytes",
77 | "The min number of bytes each fetch request waits for.")
78 | .withRequiredArg
79 | .describedAs("bytes")
80 | .ofType(classOf[java.lang.Integer])
81 | .defaultsTo(1)
82 |
83 | val maxWaitMs = parser.accepts(
84 | "max-wait-ms",
85 | "The max amount of time each fetch request waits.")
86 | .withRequiredArg
87 | .describedAs("ms")
88 | .ofType(classOf[java.lang.Integer])
89 | .defaultsTo(100)
90 |
91 | val autoCommit = parser.accepts(
92 | "autocommit",
93 | "If set offsets will be commited automatically during consuming")
94 |
95 | val autoCommitInterval = parser.accepts(
96 | "autocommit-interval-ms",
97 | "The time interval at which to save the current offset in ms")
98 | .withRequiredArg
99 | .describedAs("ms")
100 | .ofType(classOf[java.lang.Integer])
101 | .defaultsTo(ConsumerConfig.AutoCommitInterval)
102 |
103 | val consumerTimeoutMs = parser.accepts(
104 | "consumer-timeout-ms",
105 | "consumer throws timeout exception after waiting this much of time without incoming messages")
106 | .withRequiredArg
107 | .describedAs("prop")
108 | .ofType(classOf[java.lang.Integer])
109 | .defaultsTo(5000)
110 |
111 | val resetBeginning = parser.accepts(
112 | "from-beginning",
113 | "If the consumer does not already have an established offset to consume from, " +
114 | "start with the earliest message present in the log rather than the latest message.")
115 |
116 | val statsdHost = parser.accepts(
117 | "statsd-host",
118 | "Statsd host")
119 | .withRequiredArg
120 | .ofType(classOf[java.lang.String])
121 | .defaultsTo("localhost")
122 |
123 | val statsdPrefix = parser.accepts(
124 | "statsd-prefix",
125 | "Statsd prefix")
126 | .withRequiredArg
127 | .ofType(classOf[java.lang.String])
128 | .defaultsTo("kafka.http.consumer")
129 |
130 | val statsdPort = parser.accepts(
131 | "statsd-port",
132 | "Statsd port")
133 | .withRequiredArg
134 | .ofType(classOf[java.lang.Integer])
135 | .defaultsTo(8125)
136 |
137 | val options: OptionSet = tryParse(parser, args)
138 | CommandLineUtils.checkRequiredArgs(parser, options, topic, group, zookeeper)
139 |
140 | val props = new Properties()
141 |
142 | props.put("zookeeper.connect", options.valueOf(zookeeper).toString)
143 | props.put("zookeeper.session.timeout.ms", "400")
144 | props.put("zookeeper.sync.time.ms", "200")
145 |
146 | props.put("group.id", options.valueOf(group).toString)
147 |
148 | props.put("socket.receive.buffer.bytes", options.valueOf(socketBufferSize).toString)
149 | props.put("socket.timeout.ms", options.valueOf(socketTimeoutMs).toString)
150 |
151 | props.put("auto.commit.enable", if(options.has(autoCommit)) "true" else "false")
152 | props.put("auto.commit.interval.ms", options.valueOf(autoCommitInterval).toString)
153 | props.put("auto.offset.reset", if(options.has(resetBeginning)) "smallest" else "largest")
154 |
155 | props.put("fetch.min.bytes", options.valueOf(minFetchBytes).toString)
156 | props.put("fetch.wait.max.ms", options.valueOf(maxWaitMs).toString)
157 |
158 | props.put("consumer.timeout.ms", options.valueOf(consumerTimeoutMs).toString)
159 |
160 | logger.info("Consumer Properties: %s".format(props.toString))
161 |
162 | val reportingProps = new Properties()
163 |
164 | reportingProps.put("statsd.host", options.valueOf(statsdHost).toString)
165 | reportingProps.put("statsd.port", options.valueOf(statsdPort).toString)
166 | reportingProps.put("statsd.prefix", options.valueOf(statsdPrefix).toString)
167 |
168 | logger.info("Reporting Properties: %s".format(reportingProps.toString))
169 |
170 | return new ConsumerServlet(options.valueOf(topic).toString, props, reportingProps)
171 | }
172 |
173 | def getProducerServlet(args:Array[String]):HttpServlet = {
174 | val parser = new OptionParser
175 |
176 | val brokers = parser.accepts(
177 | "broker-list", "Comma separated of kafka nodes")
178 | .withRequiredArg
179 | .describedAs("brokers")
180 | .ofType(classOf[String])
181 |
182 | val compress = parser.accepts(
183 | "compress",
184 | "If set, messages batches are sent compressed")
185 |
186 | val sync = parser.accepts(
187 | "sync",
188 | "If set message send requests to the brokers are synchronously, one at a time as they arrive.")
189 |
190 | val requestRequiredAcks = parser.accepts(
191 | "request-required-acks",
192 | "The required acks of the producer requests")
193 | .withRequiredArg
194 | .describedAs("request required acks")
195 | .ofType(classOf[java.lang.Integer])
196 | .defaultsTo(0)
197 |
198 | val requestTimeoutMs = parser.accepts(
199 | "request-timeout-ms",
200 | "The ack timeout of the producer requests. Value must be non-negative and non-zero")
201 | .withRequiredArg
202 | .describedAs("request timeout ms")
203 | .ofType(classOf[java.lang.Integer])
204 | .defaultsTo(1500)
205 |
206 | val socketBufferSize = parser.accepts(
207 | "socket-buffer-size",
208 | "The size of the tcp RECV size.")
209 | .withRequiredArg
210 | .describedAs("size")
211 | .ofType(classOf[java.lang.Integer])
212 | .defaultsTo(1024*100)
213 |
214 | val statsdHost = parser.accepts(
215 | "statsd-host",
216 | "Statsd host")
217 | .withRequiredArg
218 | .ofType(classOf[java.lang.String])
219 | .defaultsTo("localhost")
220 |
221 | val statsdPrefix = parser.accepts(
222 | "statsd-prefix",
223 | "Statsd prefix")
224 | .withRequiredArg
225 | .ofType(classOf[java.lang.String])
226 | .defaultsTo("kafka.http.producer")
227 |
228 | val statsdPort = parser.accepts(
229 | "statsd-port",
230 | "Statsd port")
231 | .withRequiredArg
232 | .ofType(classOf[java.lang.Integer])
233 | .defaultsTo(8125)
234 |
235 | val options: OptionSet = tryParse(parser, args)
236 | CommandLineUtils.checkRequiredArgs(parser, options, brokers)
237 |
238 | val props = new Properties()
239 |
240 | props.put("metadata.broker.list", options.valueOf(brokers).toString)
241 | props.put("compression.codec", (if(options.has(compress)) DefaultCompressionCodec.codec else NoCompressionCodec.codec).toString)
242 | props.put("producer.type", if(options.has(sync)) "sync" else "async")
243 | props.put("request.required.acks", options.valueOf(requestRequiredAcks).toString)
244 | props.put("request.timeout.ms", options.valueOf(requestTimeoutMs).toString)
245 | props.put("key.serializer.class", classOf[StringEncoder].getName)
246 | props.put("serializer.class", classOf[DefaultEncoder].getName)
247 | props.put("send.buffer.bytes", options.valueOf(socketBufferSize).toString)
248 |
249 | logger.info("Producer Properties: %s".format(props.toString))
250 |
251 | val reportingProps = new Properties()
252 |
253 | reportingProps.put("statsd.host", options.valueOf(statsdHost).toString)
254 | reportingProps.put("statsd.port", options.valueOf(statsdPort).toString)
255 | reportingProps.put("statsd.prefix", options.valueOf(statsdPrefix).toString)
256 |
257 | logger.info("Reporting Properties: %s".format(reportingProps.toString))
258 |
259 | return new ProducerServlet(props, reportingProps)
260 | }
261 |
262 | def tryParse(parser: OptionParser, args: Array[String]) = {
263 | try {
264 | parser.parse(args : _*)
265 | } catch {
266 | case e: OptionException => {
267 | Utils.croak(e.getMessage)
268 | null
269 | }
270 | }
271 | }
272 | }
273 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/ConsumerServlet.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import scala.collection.JavaConversions._
4 |
5 | import java.io.IOException
6 | import java.util.Properties
7 | import java.util.HashMap
8 |
9 | import javax.servlet.ServletException
10 | import javax.servlet.http.HttpServlet
11 | import javax.servlet.http.HttpServletRequest
12 | import javax.servlet.http.HttpServletResponse
13 |
14 | import org.apache.log4j.Logger;
15 |
16 | import kafka.consumer._
17 | import kafka.message._
18 | import kafka.serializer._
19 |
20 | import org.bson.BSONObject
21 | import org.bson.BasicBSONDecoder
22 | import org.bson.BasicBSONEncoder
23 | import org.bson.BasicBSONObject
24 | import org.bson.types.BasicBSONList
25 | import com.mongodb.util.JSON
26 |
27 | class ConsumerServlet(topic:String, properties: Properties, reportingProps: Properties) extends HttpServlet with ReplyFormatter
28 | {
29 | val consumer = Consumer.create(new ConsumerConfig(properties))
30 | val logger = Logger.getLogger("kafka.rest.consumer")
31 | val stream = consumer.createMessageStreamsByFilter(
32 | new Whitelist(topic), 1, new StringDecoder(), new DefaultDecoder()).get(0)
33 |
34 | override def doPost(request:HttpServletRequest, response:HttpServletResponse)
35 | {
36 | logger.info("Started commiting offsets")
37 | this.synchronized {
38 | consumer.commitOffsets
39 | }
40 | logger.info("Done commiting offsets")
41 |
42 | var obj = new BasicBSONObject()
43 | obj.append("commited", "OK")
44 | replyWith(obj, request, response)
45 | }
46 |
47 | def getBatchSize(request:HttpServletRequest):Int = {
48 | val batchSizeParam = request.getParameter("batchSize")
49 | if(batchSizeParam == null) {
50 | return 1
51 | }
52 |
53 | var batchSize = 0
54 | try {
55 | batchSize = batchSizeParam.toInt
56 | } catch {
57 | case _:Exception => throw new Exception("Parameter 'batchSize' should be int")
58 | }
59 |
60 | if(batchSize <=0) {
61 | throw new Exception("Parameter 'batchSize' should be > 0")
62 | } else if (batchSize > 1000) {
63 | throw new Exception("Parameter 'batchSize' should be < 1000")
64 | }
65 | batchSize
66 | }
67 |
68 | def replyWithMessages(messages:BasicBSONList, request:HttpServletRequest, response:HttpServletResponse) {
69 | var responseObj = new BasicBSONObject()
70 | responseObj.append("messages", messages)
71 | replyWith(responseObj, request, response)
72 | }
73 |
74 | override def doGet(request:HttpServletRequest, response:HttpServletResponse)
75 | {
76 | val batchSize = getBatchSize(request)
77 | this.synchronized {
78 | logger.info("Initiated get, batchSize: %s".format(batchSize))
79 | var messages = new BasicBSONList()
80 | try {
81 | for (item <- stream) {
82 | var key = new String(if (item.key == null) "" else item.key)
83 | var message = new BasicBSONDecoder().readObject(item.message)
84 | var obj = new BasicBSONObject()
85 | obj.append("key", key)
86 | obj.append("value", message)
87 | messages.append(obj)
88 | if(messages.size() >= batchSize) {
89 | logger.info("Collected batch size objects")
90 | replyWithMessages(messages, request, response)
91 | return
92 | }
93 | }
94 | } catch {
95 | case e:kafka.consumer.ConsumerTimeoutException => {
96 | logger.info("Consumer timeout, returning stuff that I have collected")
97 | replyWithMessages(messages, request, response)
98 | }
99 | }
100 | }
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/ProducerServlet.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import scala.collection.JavaConversions._
4 |
5 | import java.io.IOException
6 | import java.util.Properties
7 | import java.util.HashMap
8 | import java.util.concurrent.TimeUnit
9 |
10 | import javax.servlet.ServletException
11 | import javax.servlet.http.HttpServlet
12 | import javax.servlet.http.HttpServletRequest
13 | import javax.servlet.http.HttpServletResponse
14 |
15 | import org.apache.log4j.Logger;
16 |
17 | import kafka.producer._
18 | import kafka.message._
19 | import kafka.serializer._
20 | import scala.collection.mutable._
21 |
22 | import com.mongodb.util.JSON
23 | import com.timgroup.statsd.NonBlockingStatsDClient
24 |
25 | import org.bson.BSON
26 | import org.bson.BSONObject
27 | import org.bson.BasicBSONDecoder
28 | import org.bson.BasicBSONEncoder
29 | import org.bson.BasicBSONObject
30 | import org.bson.types.BasicBSONList
31 |
32 | class ProducerServlet(properties:Properties, reportingProps: Properties) extends HttpServlet with ReplyFormatter
33 | {
34 | val producer = new Producer[String, Array[Byte]](new ProducerConfig(properties))
35 | val logger = Logger.getLogger("kafka.http.producer")
36 |
37 | var statsd = new NonBlockingStatsDClient(
38 | reportingProps.getProperty("statsd.prefix"),
39 | reportingProps.getProperty("statsd.host"),
40 | reportingProps.getProperty("statsd.port").toInt)
41 |
42 | def asList(name: String, o:AnyRef):BasicBSONList = {
43 | try{
44 | return o.asInstanceOf[BasicBSONList]
45 | }
46 | catch {
47 | case e: Exception => {
48 | throw new Exception("Expected list in %s".format(name))
49 | }
50 | }
51 | }
52 |
53 | def asObject(name:String, o:AnyRef):BSONObject = {
54 | try{
55 | return o.asInstanceOf[BSONObject]
56 | }
57 | catch {
58 | case e: Exception => {
59 | throw new Exception("Expected object in %s".format(name))
60 | }
61 | }
62 | }
63 |
64 | def asString(name:String, o:AnyRef):String = {
65 | try{
66 | return o.asInstanceOf[String]
67 | }
68 | catch {
69 | case e: Exception => {
70 | throw new Exception("Expected object in %s".format(name))
71 | }
72 | }
73 | }
74 |
75 | def toKeyedMessage(topic:String, o:BSONObject):KeyedMessage[String, Array[Byte]] = {
76 | var key:String = asString("'key' property", o.get("key"))
77 | var value = asObject("'value' property", o.get("value"))
78 | if(value == null) {
79 | throw new Exception("Expected 'value' property in message")
80 | }
81 | return new KeyedMessage[String, Array[Byte]](topic, key, new BasicBSONEncoder().encode(value))
82 | }
83 |
84 | def getObject(request:HttpServletRequest):MutableList[KeyedMessage[String, Array[Byte]]] = {
85 | var topic = getTopic(request)
86 |
87 | val obj = request.getContentType() match {
88 | case "application/json" => getObjectFromJson(request)
89 | case "application/bson" => getObjectFromBson(request)
90 | case _ => throw new Exception("Unsupported content type: %s".format(request.getContentType()))
91 | }
92 | if(obj == null) {
93 | throw new Exception("Provide a payload for the POST request")
94 | }
95 |
96 | var messagesI = obj.get("messages")
97 | if(messagesI == null) {
98 | throw new Exception("Expected 'messages' list")
99 | }
100 | var messages = asList("'messages' parameter", messagesI)
101 | var list = new MutableList[KeyedMessage[String, Array[Byte]]]()
102 |
103 | for (messageI <- messages) {
104 | var message = asObject("message", messageI)
105 | list += toKeyedMessage(topic, message)
106 | }
107 | list
108 | }
109 |
110 | def getObjectFromBson(request:HttpServletRequest):BSONObject = {
111 | return new BasicBSONDecoder().readObject(request.getInputStream())
112 | }
113 |
114 | def getObjectFromJson(request:HttpServletRequest):BSONObject = {
115 | var body = new StringBuilder
116 | var reader = request.getReader()
117 | var buffer = new Array[Char](4096)
118 | var len:Int = 0
119 |
120 | while ({len = reader.read(buffer, 0, buffer.length); len != -1}) {
121 | body.appendAll(buffer, 0, len);
122 | }
123 | return JSON.parse(body.toString()).asInstanceOf[BSONObject]
124 | }
125 |
126 | def getTopic(request:HttpServletRequest):String = {
127 | var segments = request.getRequestURI().split("/")
128 | if (segments.length != 3 || segments(1) != "topics") {
129 | throw new Exception("Please provide topic /topics/ to post to")
130 | }
131 | return segments(2)
132 | }
133 |
134 | override def doPost(request:HttpServletRequest, response:HttpServletResponse)
135 | {
136 | var topic = getTopic(request)
137 | var messages = getObject(request)
138 |
139 | val start = System.currentTimeMillis()
140 | val data = new KeyedMessage[String, Array[Byte]](topic, "key", new Array[Byte](1))
141 | producer.send(messages:_*)
142 | statsd.recordExecutionTime("submitted", (System.currentTimeMillis() - start).toInt)
143 |
144 | var obj = new BasicBSONObject()
145 | obj.append("accepted", "OK")
146 | replyWith(obj, request, response)
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/ReplyFormatter.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import scala.collection.JavaConversions._
4 |
5 | import javax.servlet.http.HttpServletRequest
6 | import javax.servlet.http.HttpServletResponse
7 |
8 | import org.apache.log4j.Logger;
9 |
10 | import org.bson.BSONObject
11 | import org.bson.BasicBSONDecoder
12 | import org.bson.BasicBSONEncoder
13 | import org.bson.BasicBSONObject
14 | import com.mongodb.util.JSON
15 |
16 | trait ReplyFormatter {
17 | def getReplyContentType(request:HttpServletRequest):String = {
18 | val replyType = request.getHeader("Accept") match {
19 | case "application/json" => "application/json"
20 | case "application/bson" => "application/bson"
21 | case "" | "*/*"=> "application/json"
22 | case _ => throw new Exception("Unsupported content type in Accept: '%s'".format(request.getHeader("Accept")))
23 | }
24 | replyType
25 | }
26 |
27 | def replyWithJson(obj:BSONObject, response:HttpServletResponse, status:Int){
28 | response.setContentType("application/json")
29 | response.setStatus(status)
30 | response.getWriter().print(JSON.serialize(obj))
31 | }
32 |
33 | def replyWithBson(obj:BSONObject, response:HttpServletResponse, status:Int){
34 | response.setContentType("application/bson")
35 | response.setStatus(status)
36 | response.getOutputStream().write(new BasicBSONEncoder().encode(obj))
37 | }
38 |
39 | def replyWithStatus(obj:BSONObject, request:HttpServletRequest, response:HttpServletResponse, status:Int) {
40 | getReplyContentType(request) match {
41 | case "application/json" => replyWithJson(obj, response, status)
42 | case "application/bson" => replyWithBson(obj, response, status)
43 | }
44 | }
45 |
46 | def replyWith(obj:BSONObject, request:HttpServletRequest, response:HttpServletResponse){
47 | replyWithStatus(obj, request, response, HttpServletResponse.SC_OK)
48 | }
49 |
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/RestServer.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import org.eclipse.jetty.server.Server
4 | import org.eclipse.jetty.servlet.ServletContextHandler
5 | import org.eclipse.jetty.servlet.ServletHolder
6 | import org.eclipse.jetty.util.thread.QueuedThreadPool
7 |
8 | import kafka.utils._
9 |
10 | import org.apache.log4j.Logger
11 |
12 | object RestServer
13 | {
14 | val logger = Logger.getLogger("kafka.rest.server")
15 |
16 | def main(args:Array[String]){
17 | try{
18 | var servlet = Configurator.getServlet(args)
19 | var server = new Server(8080)
20 |
21 | server.setAttribute("org.eclipse.jetty.server.Request.maxFormContentSize", 35 * 1024 * 1024)
22 |
23 | for(connector <- server.getConnectors()){
24 | connector.setRequestBufferSize(35 * 1024 * 1024);
25 | }
26 |
27 | // This is to serialize access to endpoints
28 | var threadPool = new QueuedThreadPool(40)
29 | server.setThreadPool(threadPool)
30 | var context = new ServletContextHandler(ServletContextHandler.SESSIONS)
31 |
32 | context.setContextPath("/")
33 | context.setErrorHandler(new ServletErrorHandler())
34 | server.setHandler(context)
35 |
36 | context.addServlet(new ServletHolder(servlet),"/*")
37 |
38 | server.start()
39 | server.join()
40 |
41 | } catch {
42 | case e: Exception => {
43 | logger.error(e.toString)
44 | System.exit(1)
45 | }
46 | }
47 |
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/src/main/scala/com/rackspace/kafka/http/ServletErrorHandler.scala:
--------------------------------------------------------------------------------
1 | package com.rackspace.kafka.http
2 |
3 | import javax.servlet.ServletException
4 | import javax.servlet.http.HttpServlet
5 | import javax.servlet.http.HttpServletRequest
6 | import javax.servlet.http.HttpServletResponse
7 | import javax.servlet.RequestDispatcher
8 |
9 | import org.eclipse.jetty.server.Request
10 | import org.eclipse.jetty.server.handler.ErrorHandler
11 | import org.eclipse.jetty.http.HttpMethods
12 | import org.eclipse.jetty.server.AbstractHttpConnection
13 |
14 | import org.bson.BasicBSONObject
15 |
16 |
17 | class ServletErrorHandler extends ErrorHandler with ReplyFormatter {
18 | override def handle(target:String, baseRequest:Request, request:HttpServletRequest, response:HttpServletResponse){
19 | var connection = AbstractHttpConnection.getCurrentConnection();
20 | var method = request.getMethod();
21 | if(!method.equals(HttpMethods.GET) && !method.equals(HttpMethods.POST) && !method.equals(HttpMethods.HEAD)){
22 | connection.getRequest().setHandled(true);
23 | return;
24 | }
25 | connection.getRequest().setHandled(true);
26 | var obj = new BasicBSONObject()
27 | var err = request.getAttribute(RequestDispatcher.ERROR_EXCEPTION);
28 | if(err != null) {
29 | obj.append("error", err.asInstanceOf[Throwable].getMessage());
30 | } else {
31 | obj.append("error", "Internal server error");
32 | }
33 |
34 | replyWithStatus(obj, request, response, connection.getResponse().getStatus())
35 | }
36 | }
37 |
--------------------------------------------------------------------------------