├── .gitignore ├── LICENSE ├── NOTICE ├── README.md ├── app ├── Global.scala ├── actors │ ├── ClientNotificationManager.scala │ ├── ConnectionManager.scala │ ├── OffsetHistoryManager.scala │ └── Router.scala ├── assets │ └── stylesheets │ │ └── custom.less ├── common │ ├── Message.scala │ ├── Registry.scala │ └── Util.scala ├── controllers │ ├── ApiConsole.scala │ ├── Application.scala │ ├── Broker.scala │ ├── ConsumerGroup.scala │ ├── Group.scala │ ├── IgnoreParamAssets.scala │ ├── OffsetHistory.scala │ ├── Settings.scala │ ├── Topic.scala │ └── Zookeeper.scala ├── kafka │ ├── consumer │ │ └── async │ │ │ ├── ConsumerConnector.scala │ │ │ ├── ConsumerFetcherManager.scala │ │ │ ├── ConsumerFetcherThread.scala │ │ │ ├── PartitionTopicInfo.scala │ │ │ └── ZookeeperConsumerConnector.scala │ └── javaapi │ │ └── consumer │ │ ├── AsyncConsumerConnector.java │ │ ├── EventHandler.scala │ │ └── ZookeeperAsyncConsumerConnector.scala ├── models │ ├── Database.scala │ ├── Group.scala │ ├── OffsetHistory.scala │ ├── OffsetPoint.scala │ ├── Setting.scala │ ├── Status.scala │ └── Zookeeper.scala └── views │ └── index.scala.html ├── build.sbt ├── conf ├── application.conf ├── evolutions │ └── default │ │ ├── 1.sql │ │ ├── 2.sql │ │ └── 3.sql └── routes ├── img ├── brokers.png ├── offset-history.png ├── register-zookeeper.png ├── topic-feed.png ├── topic.png ├── topics.png └── zookeepers.png ├── lib └── finagle-kafka_2.10-0.1.2-SNAPSHOT.jar ├── project ├── build.properties └── plugins.sbt ├── public ├── api-console │ ├── authentication │ │ ├── oauth1.html │ │ └── oauth2.html │ ├── fonts │ │ ├── FontAwesome.otf │ │ ├── LICENSE-OpenSans.txt │ │ ├── OpenSans-Bold.woff │ │ ├── OpenSans-BoldItalic.woff │ │ ├── OpenSans-Italic.woff │ │ ├── OpenSans-Semibold.woff │ │ ├── OpenSans-SemiboldItalic.woff │ │ ├── OpenSans.woff │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ └── fontawesome-webfont.woff │ ├── index.html │ ├── kafka-web-console.raml │ ├── scripts │ │ ├── app.min.js │ │ └── vendor.min.js │ ├── settings.json │ ├── styles │ │ ├── app.css │ │ └── app.min.css │ └── zookeeper.json ├── fonts │ ├── FontAwesome.otf │ ├── fontawesome-webfont.eot │ ├── fontawesome-webfont.svg │ ├── fontawesome-webfont.ttf │ ├── fontawesome-webfont.woff │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.svg │ ├── glyphicons-halflings-regular.ttf │ └── glyphicons-halflings-regular.woff ├── html │ └── partials │ │ ├── broker │ │ └── index.html │ │ ├── offset-history │ │ └── show.html │ │ ├── settings │ │ └── index.html │ │ ├── topic │ │ ├── index.html │ │ └── show.html │ │ └── zookeeper │ │ └── index.html ├── images │ ├── favicon.ico │ ├── kafka_logo.png │ └── zookeeper_small.gif ├── javascripts │ ├── app.js │ ├── brokers-controller.js │ ├── filters.js │ ├── offset-history-controller.js │ ├── services.js │ ├── settings-controller.js │ ├── topic-controller.js │ ├── topics-controller.js │ ├── vendor │ │ ├── angular-animate.min.js │ │ ├── angular-route.min.js │ │ ├── angular.min.js │ │ ├── bootstrap.min.js │ │ ├── d3.v3.min.js │ │ ├── html5shiv.js │ │ ├── jquery-1.9.0.min.js │ │ ├── nv.d3.min.js │ │ ├── respond.min.js │ │ └── underscore-min.js │ └── zookeepers-controller.js └── stylesheets │ ├── bootstrap-theme.min.css │ ├── bootstrap.min.css │ ├── font-awesome.min.css │ └── nv.d3.css └── test ├── ApplicationSpec.scala └── IntegrationSpec.scala /.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | project/project 3 | project/target 4 | target 5 | tmp 6 | .history 7 | dist 8 | /.idea 9 | /*.iml 10 | /out 11 | /.idea_modules 12 | /.classpath 13 | /.project 14 | /RUNNING_PID 15 | /.settings 16 | /*.db 17 | .DS_STORE 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Kafka Web Console 2 | Copyright 2014 Claude Mamo -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Retired 2 | ------- 3 | This project is no longer supported. Please consider [Kafka Manager](https://github.com/yahoo/kafka-manager) instead. 4 | 5 | Kafka Web Console 6 | ========= 7 | Kafka Web Console is a Java web application for monitoring [Apache Kafka](http://kafka.apache.org/). With a **modern** web browser, you can view from the console: 8 | 9 | - Registered brokers 10 | 11 | ![brokers](/img/brokers.png) 12 | 13 | *** 14 | 15 | - Topics, partitions, log sizes, and partition leaders 16 | 17 | ![topics](/img/topics.png) 18 | 19 | *** 20 | 21 | - Consumer groups, individual consumers, consumer owners, partition offsets and lag 22 | 23 | ![topic](/img/topic.png) 24 | 25 | *** 26 | 27 | - Graphs showing consumer offset and lag history as well as consumer/producer message throughput history. 28 | 29 | ![topic](/img/offset-history.png) 30 | 31 | *** 32 | 33 | - Latest published topic messages (requires web browser support for WebSocket) 34 | 35 | ![topic feed](/img/topic-feed.png) 36 | 37 | *** 38 | 39 | Furthermore, the console provides a JSON API described in [RAML](/public/api-console/kafka-web-console.raml). The API can be tested using the embedded API Console accessible through the URL http://*[hostname]*:*[port]*/api/console. 40 | 41 | Requirements 42 | --- 43 | - Play Framework 2.2.x 44 | - Apache Kafka 0.8.x 45 | - Zookeeper 3.3.3 or 3.3.4 46 | 47 | Deployment 48 | ---- 49 | Consult Play!'s documentation for [deployment options and instructions](http://www.playframework.com/documentation/2.2.x/Production). 50 | 51 | Getting Started 52 | --- 53 | 1. Kafka Web Console requires a relational database. By default, the server connects to an embedded H2 database and no database installation or configuration is needed. Consult Play!'s documentation to [specify a database for the console](http://www.playframework.com/documentation/2.2.x/ScalaDatabase). The following databases are supported: 54 | - H2 (default) 55 | - PostgreSql 56 | - Oracle 57 | - DB2 58 | - MySQL 59 | - Apache Derby 60 | - Microsoft SQL Server 61 | 62 | Changing the database might necessitate making minor modifications to the [DDL](conf/evolutions/default) to accommodate the new database. 63 | 64 | 2. Before you can monitor a broker, you need to register the Zookeeper server associated with it: 65 | 66 | ![register zookeeper](/img/register-zookeeper.png) 67 | 68 | Filling in the form and clicking on *Connect* will register the Zookeeper server. Once the console has successfully established a connection with the registered Zookeeper server, it can retrieve all necessary information about brokers, topics, and consumers: 69 | 70 | ![zookeepers](/img/zookeepers.png) 71 | 72 | Support 73 | --- 74 | Please [report](http://github.com/claudemamo/kafka-web-console/issues) any bugs or desired features. 75 | -------------------------------------------------------------------------------- /app/Global.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | import akka.actor.{Terminated, Props} 18 | import common.Registry 19 | import actors._ 20 | import models.{Setting, OffsetHistory} 21 | import org.squeryl.adapters._ 22 | import org.squeryl.internals.DatabaseAdapter 23 | import org.squeryl.{Session, SessionFactory} 24 | import play.api.db.DB 25 | import play.api.libs.concurrent.Akka 26 | import play.api.libs.iteratee.Concurrent 27 | import play.api.{Logger, Application, GlobalSettings} 28 | import Registry.PropertyConstants 29 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 30 | import play.api.Play.current 31 | import scala.Some 32 | 33 | object Global extends GlobalSettings { 34 | 35 | override def onStart(app: Application) { 36 | Registry.registerObject(PropertyConstants.BroadcastChannel, Concurrent.broadcast[String]) 37 | initiateDb(app) 38 | initiateActors() 39 | } 40 | 41 | override def onStop(app: Application) { 42 | Akka.system.actorSelection("akka://application/user/router") ! Terminated 43 | } 44 | 45 | private def initiateDb(app: Application) { 46 | SessionFactory.concreteFactory = app.configuration.getString("db.default.driver") match { 47 | case Some("org.h2.Driver") => Some(() => getSession(new H2Adapter, app)) 48 | case Some("org.postgresql.Driver") => Some(() => getSession(new PostgreSqlAdapter, app)) 49 | case Some("oracle.jdbc.OracleDriver") => Some(() => getSession(new OracleAdapter, app)) 50 | case Some("com.ibm.db2.jcc.DB2Driver") => Some(() => getSession(new DB2Adapter, app)) 51 | case Some("com.mysql.jdbc.Driver") => Some(() => getSession(new MySQLAdapter, app)) 52 | case Some("org.apache.derby.jdbc.EmbeddedDriver") => Some(() => getSession(new DerbyAdapter, app)) 53 | case Some("com.microsoft.sqlserver.jdbc.SQLServerDriver") => Some(() => getSession(new MSSQLServer, app)) 54 | case _ => sys.error("Database driver must be either org.h2.Driver, org.postgresql.Driver, oracle.jdbc.OracleDriver, com.ibm.db2.jcc.DB2Driver, com.mysql.jdbc.Driver, org.apache.derby.jdbc.EmbeddedDriver or com.microsoft.sqlserver.jdbc.SQLServerDriver") 55 | } 56 | } 57 | 58 | private def getSession(adapter: DatabaseAdapter, app: Application) = Session.create(DB.getConnection()(app), adapter) 59 | 60 | private def initiateActors() { 61 | Akka.system.actorOf(Props(new Router()), "router") 62 | Akka.system.actorOf(Props(new ConnectionManager())) 63 | Akka.system.actorOf(Props(new ClientNotificationManager())) 64 | Akka.system.actorOf(Props(new OffsetHistoryManager())) 65 | } 66 | 67 | } -------------------------------------------------------------------------------- /app/actors/ClientNotificationManager.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package actors 18 | 19 | import akka.actor.Actor 20 | import common.{Message, Registry} 21 | import Registry.PropertyConstants 22 | import play.api.libs.iteratee.{Concurrent, Enumerator} 23 | import play.api.libs.json.Json 24 | import play.api.Logger 25 | 26 | class ClientNotificationManager extends Actor { 27 | 28 | private val channel = Registry.lookupObject(PropertyConstants.BroadcastChannel) match { 29 | case Some(broadcastChannel: (_, _)) => broadcastChannel._2.asInstanceOf[Concurrent.Channel[String]] 30 | case _ => sys.error("No broadcast channel found.") 31 | } 32 | 33 | override def receive: Actor.Receive = { 34 | case connectNotification: Message.ConnectNotification => channel.push(Json.toJson(connectNotification.zookeeper).toString()) 35 | case _ => 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /app/actors/ConnectionManager.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package actors 18 | 19 | import common.{Message, Registry} 20 | import Registry.PropertyConstants 21 | import models.{Status, Zookeeper} 22 | import akka.actor.Actor 23 | import com.twitter.zk._ 24 | import com.twitter.util.JavaTimer 25 | import com.twitter.conversions.time._ 26 | import play.api.libs.concurrent.Akka 27 | import scala.concurrent.duration.Duration 28 | import java.util.concurrent.TimeUnit 29 | import play.api.Play.current 30 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 31 | import Message.ConnectNotification 32 | import akka.actor.Terminated 33 | import scala.Some 34 | import org.apache.zookeeper.Watcher.Event.KeeperState 35 | import scala.concurrent.Await 36 | import common.Util._ 37 | 38 | class ConnectionManager() extends Actor { 39 | 40 | private val router = Akka.system.actorSelection("akka://application/user/router") 41 | 42 | override def preStart() { 43 | for (zookeeper <- models.Zookeeper.findAll) { 44 | router ! Message.Connect(zookeeper) 45 | } 46 | } 47 | 48 | override def receive: Actor.Receive = { 49 | case connectMessage: Message.Connect => connect(connectMessage.zookeeper) 50 | case connectNotification: Message.ConnectNotification => Zookeeper.update(connectNotification.zookeeper) 51 | case disconnectMessage: Message.Disconnect => disconnect(disconnectMessage.zookeeper) 52 | case Terminated => terminate() 53 | } 54 | 55 | private def connect(zk: Zookeeper) { 56 | val zkClient = getZkClient(zk, lookupZookeeperConnections()) 57 | 58 | val onSessionEvent: PartialFunction[StateEvent, Unit] = { 59 | case s: StateEvent if s.state == KeeperState.SyncConnected => 60 | ConnectNotification(zk, Status.Connected) 61 | router ! ConnectNotification(zk, Status.Connected) 62 | case s: StateEvent if s.state == KeeperState.Disconnected => 63 | router ! ConnectNotification(zk, Status.Connecting) 64 | case s: StateEvent if s.state == KeeperState.Expired => 65 | router ! ConnectNotification(zk, Status.Connecting) 66 | } 67 | 68 | zkClient.onSessionEvent(onSessionEvent) 69 | 70 | zkClient().onFailure(_ => { 71 | // attempt re-connection only if the Zookeeper hasn't been deleted by the user 72 | if (Zookeeper.findById(zk.id).isDefined) { 73 | router ! ConnectNotification(zk, Status.Connecting) 74 | Akka.system.scheduler.scheduleOnce( 75 | Duration.create(5, TimeUnit.SECONDS), self, Message.Connect(zk) 76 | ) 77 | } 78 | }) 79 | } 80 | 81 | private def terminate() { 82 | shutdownConnections() 83 | Zookeeper.update(Zookeeper.findAll.map(z => Zookeeper(z.name, z.host, z.port, z.groupId, Status.Disconnected.id, z.chroot))) 84 | } 85 | 86 | private def shutdownConnections() { 87 | Registry.lookupObject(PropertyConstants.ZookeeperConnections) match { 88 | case Some(s: Map[_, _]) => 89 | s.asInstanceOf[Map[String, ZkClient]].map(z => Await.result(twitterToScalaFuture(z._2.release()), Duration.Inf)) 90 | case _ => 91 | } 92 | } 93 | 94 | private def getZkClient(zk: Zookeeper, zkConnections: Map[String, ZkClient]): ZkClient = { 95 | zkConnections.filterKeys(_ == zk.name) match { 96 | case zk if zk.size > 0 => zk.head._2 97 | case _ => 98 | val zkClient = ZkClient(zk.toString, 6000 milliseconds, 6000 milliseconds)(new JavaTimer) 99 | Registry.registerObject(PropertyConstants.ZookeeperConnections, Map(zk.name -> zkClient) ++ zkConnections) 100 | zkClient 101 | } 102 | } 103 | 104 | def lookupZookeeperConnections(): Map[String, ZkClient] = { 105 | Registry.lookupObject(PropertyConstants.ZookeeperConnections) match { 106 | case Some(zkConnections: Map[_, _]) => zkConnections.asInstanceOf[Map[String, ZkClient]] 107 | case _ => Registry.registerObject(PropertyConstants.ZookeeperConnections, Map[String, ZkClient]()) 108 | } 109 | } 110 | 111 | private def disconnect(zk: Zookeeper) { 112 | lookupZookeeperConnections().get(zk.name) match { 113 | case Some(zkClient) => 114 | zkClient.release() 115 | Registry.registerObject(PropertyConstants.ZookeeperConnections, lookupZookeeperConnections().filterKeys(_ != zk.name)) 116 | case _ => 117 | } 118 | } 119 | 120 | } -------------------------------------------------------------------------------- /app/actors/OffsetHistoryManager.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package actors 18 | 19 | import akka.actor.{ActorRef, Cancellable, Actor} 20 | import models._ 21 | import common.Util._ 22 | import common.Message 23 | import play.api.libs.concurrent.Akka 24 | import scala.concurrent.duration.Duration 25 | import java.util.concurrent.TimeUnit 26 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 27 | import play.api.Play.current 28 | import java.sql.Timestamp 29 | import java.util.{Properties, Date} 30 | import scala.Some 31 | import org.quartz._ 32 | import org.quartz.impl.StdSchedulerFactory 33 | 34 | private class Executor() extends Job { 35 | def execute(ctx: JobExecutionContext) { 36 | val actor = ctx.getJobDetail.getJobDataMap().get("actor").asInstanceOf[ActorRef] 37 | actor ! Message.Purge 38 | } 39 | } 40 | 41 | class OffsetHistoryManager extends Actor { 42 | 43 | private var fetchOffsetPointsTask: Cancellable = null 44 | private val JobKey = "purge" 45 | private[this] val props = new Properties() 46 | 47 | props.setProperty("org.quartz.scheduler.instanceName", context.self.path.name) 48 | props.setProperty("org.quartz.threadPool.threadCount", "1") 49 | props.setProperty("org.quartz.jobStore.class", "org.quartz.simpl.RAMJobStore") 50 | props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true") 51 | val scheduler = new StdSchedulerFactory(props).getScheduler 52 | 53 | override def preStart() { 54 | scheduler.start() 55 | schedule() 56 | self ! Message.FetchOffsets 57 | } 58 | 59 | override def postStop() { 60 | scheduler.shutdown() 61 | } 62 | 63 | override def receive: Receive = { 64 | case Message.FetchOffsets => { 65 | fetchOffsetPoints() 66 | fetchOffsetPointsTask = Akka.system.scheduler.scheduleOnce(Duration.create(Setting.findByKey(Setting.OffsetFetchInterval.toString).get.value.toLong, TimeUnit.SECONDS), self, Message.FetchOffsets) 67 | } 68 | case Message.SettingsUpdateNotification => { 69 | scheduler.deleteJob(new JobKey(JobKey)) 70 | schedule() 71 | fetchOffsetPointsTask.cancel() 72 | Akka.system.scheduler.scheduleOnce(Duration.create(Setting.findByKey(Setting.OffsetFetchInterval.toString).get.value.toLong, TimeUnit.SECONDS), self, Message.FetchOffsets) 73 | } 74 | case Message.Purge => { 75 | OffsetPoint.truncate() 76 | OffsetHistory.truncate() 77 | } 78 | case _ => 79 | } 80 | 81 | private def getOffsetHistory(zk: Zookeeper, topic: (String, Seq[String])): OffsetHistory = { 82 | OffsetHistory.findByZookeeperIdAndTopic(zk.id, topic._1) match { 83 | case None => OffsetHistory.insert(OffsetHistory(zk.id, topic._1)) 84 | case Some(oH) => oH 85 | } 86 | } 87 | 88 | private def persistOffsetPoint(partitionOffsets: Map[String, Seq[Long]], offsetHistory: OffsetHistory, partitionsLogSize: Seq[Long]) { 89 | val timestamp = new Timestamp(new Date().getTime) 90 | for (e <- partitionOffsets) { 91 | for ((p, i) <- e._2.zipWithIndex) { 92 | OffsetPoint.insert(OffsetPoint(e._1, timestamp, offsetHistory.id, i, p, partitionsLogSize(i))) 93 | } 94 | 95 | } 96 | } 97 | 98 | private def schedule() { 99 | val jdm = new JobDataMap() 100 | jdm.put("actor", self) 101 | val job = JobBuilder.newJob(classOf[Executor]).withIdentity(JobKey).usingJobData(jdm).build() 102 | scheduler.scheduleJob(job, TriggerBuilder.newTrigger().startNow().forJob(job).withSchedule(CronScheduleBuilder.cronSchedule(Setting.findByKey(Setting.PurgeSchedule.toString).get.value)).build()) 103 | } 104 | 105 | private def fetchOffsetPoints() { 106 | connectedZookeepers { (zk, zkClient) => 107 | for { 108 | topics <- getTopics(zkClient) 109 | topic = topics.map { t => 110 | for { 111 | partitionLeaders <- getPartitionLeaders(t._1, zkClient) 112 | partitionsLogSize <- getPartitionsLogSize(t._1, partitionLeaders) 113 | partitionOffsets <- getPartitionOffsets(t._1, zkClient) 114 | } yield persistOffsetPoint(partitionOffsets, getOffsetHistory(zk, t), partitionsLogSize) 115 | } 116 | } yield None 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /app/actors/Router.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package actors 18 | 19 | import akka.actor.Actor 20 | import play.api.libs.concurrent.Akka 21 | import play.api.Play.current 22 | 23 | class Router extends Actor { 24 | 25 | override def receive: Actor.Receive = { 26 | case message if (sender != self) => Akka.system.actorSelection("akka://application/user/*") ! message 27 | } 28 | 29 | } -------------------------------------------------------------------------------- /app/assets/stylesheets/custom.less: -------------------------------------------------------------------------------- 1 | /* 2 | * Base structure 3 | */ 4 | 5 | /* Move down content because we have a fixed navbar that is 50px tall */ 6 | body { 7 | padding-top: 50px; 8 | } 9 | 10 | /* 11 | * Global add-ons 12 | */ 13 | 14 | .sub-header { 15 | padding-bottom: 10px; 16 | border-bottom: 1px solid #eee; 17 | } 18 | 19 | /* 20 | * Sidebar 21 | */ 22 | 23 | /* Hide for mobile, show later */ 24 | .sidebar { 25 | display: none; 26 | } 27 | 28 | @media (min-width: 768px) { 29 | .sidebar { 30 | position: fixed; 31 | top: 0; 32 | left: 0; 33 | bottom: 0; 34 | z-index: 1000; 35 | display: block; 36 | padding: 70px 20px 20px; 37 | background-color: #f5f5f5; 38 | border-right: 2px ridge #eee; 39 | 40 | a { 41 | border-bottom: 1px solid #dddddd; 42 | } 43 | } 44 | } 45 | 46 | /* Sidebar navigation */ 47 | .nav-sidebar { 48 | margin-left: -20px; 49 | margin-right: -21px; /* 20px padding + 1px border */ 50 | margin-bottom: 20px; 51 | margin-top: 1px; 52 | } 53 | 54 | .nav-sidebar > li > a { 55 | padding-left: 20px; 56 | padding-right: 20px; 57 | } 58 | 59 | .nav-sidebar > .active > a { 60 | color: #fff; 61 | background-color: #428bca; 62 | } 63 | 64 | /* 65 | * Main content 66 | */ 67 | 68 | .main { 69 | padding: 20px; 70 | } 71 | 72 | @media (min-width: 768px) { 73 | .main { 74 | padding-left: 40px; 75 | padding-right: 40px; 76 | } 77 | } 78 | 79 | .main .page-header { 80 | margin-top: 0; 81 | } 82 | 83 | .tab-content { 84 | margin-top: 20px; 85 | } 86 | 87 | .clickable { 88 | cursor: pointer; 89 | } 90 | 91 | #consumer-groups a { 92 | cursor: pointer; 93 | font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; 94 | font-size: 14px; 95 | font-weight: normal; 96 | color: #333; 97 | text-decoration: none; 98 | } 99 | 100 | .navbar { 101 | min-height: 71px; 102 | margin-bottom: 0px; 103 | border-bottom: 5px ridge #eee; 104 | } 105 | 106 | .logo { 107 | width: 30px 108 | } 109 | 110 | .title { 111 | font-family: Calibri, Candara, Segoe, "Segoe UI", Optima, Arial, sans-serif; 112 | font-size: 25px; 113 | color: #ffffff; 114 | margin-left: 2px; 115 | } 116 | 117 | .content { 118 | margin-top: 15px; 119 | } 120 | 121 | .section { 122 | i, img { 123 | vertical-align: middle; 124 | margin-right: 5px 125 | } 126 | 127 | a { 128 | font-family: "Arial Rounded MT Bold", "Helvetica Rounded", Arial, sans-serif; 129 | color: #333; 130 | } 131 | } 132 | 133 | .dots { 134 | display: inline 135 | } 136 | 137 | .row-animation.ng-enter { 138 | -webkit-animation: enter_sequence; 139 | animation: enter_sequence; 140 | -webkit-animation-duration: 0.5s; 141 | animation-duration: 0.5s; 142 | -webkit-animation-fill-mode: both; 143 | animation-fill-mode: both; 144 | } 145 | 146 | .row-animation.ng-leave { 147 | -webkit-animation: leave_sequence; 148 | animation: leave_sequence; 149 | -webkit-animation-duration: 0.2s; 150 | animation-duration: 0.2s; 151 | -webkit-animation-fill-mode: both; 152 | animation-fill-mode: both; 153 | } 154 | 155 | @-webkit-keyframes enter_sequence { 156 | 0% { 157 | opacity: 0; 158 | } 159 | 100% { 160 | opacity: 1; 161 | } 162 | } 163 | 164 | @keyframes enter_sequence { 165 | 0% { 166 | opacity: 0; 167 | } 168 | 100% { 169 | opacity: 1; 170 | } 171 | } 172 | 173 | @-webkit-keyframes leave_sequence { 174 | 0% { 175 | opacity: 1; 176 | } 177 | 100% { 178 | opacity: 0; 179 | } 180 | } 181 | 182 | @keyframes leave_sequence { 183 | 0% { 184 | opacity: 1; 185 | } 186 | 100% { 187 | opacity: 0; 188 | } 189 | } 190 | 191 | .chart svg { 192 | height: 300px; 193 | } 194 | 195 | .table-hover > tbody > tr:hover > td, .table-hover > tbody > tr:hover > th { 196 | background-color: #0088CC; 197 | color: #ffffff; 198 | } 199 | 200 | .nav > li > a { 201 | padding: 20px; 202 | } -------------------------------------------------------------------------------- /app/common/Message.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package common 18 | 19 | import models.{Status, Zookeeper} 20 | 21 | object Message { 22 | 23 | case class Connect(zookeeper: Zookeeper) 24 | 25 | case class Disconnect(zookeeper: Zookeeper) 26 | 27 | case class ConnectNotification(zookeeper: Zookeeper) 28 | 29 | case class StatusNotification(status: Status) 30 | 31 | case class Terminate() 32 | 33 | case class FetchOffsets() 34 | 35 | case class SettingsUpdateNotification() 36 | 37 | case class Purge() 38 | 39 | object ConnectNotification { 40 | def apply(zk: Zookeeper, status: Status.Value): ConnectNotification = { 41 | ConnectNotification(Zookeeper(zk.name, zk.host, zk.port, zk.groupId, status.id, zk.chroot, zk.id)) 42 | } 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /app/common/Registry.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package common 18 | 19 | import common.Registry.PropertyConstants.PropertyConstants 20 | import java.util.concurrent.atomic.AtomicReference 21 | 22 | object Registry { 23 | 24 | object PropertyConstants extends Enumeration { 25 | type PropertyConstants = Value 26 | val ZookeeperConnections = Value("ZOOKEEPER-CONNECTIONS") 27 | val BroadcastChannel = Value("BROADCAST-CHANNEL") 28 | } 29 | 30 | private val properties = new AtomicReference(Map[String, Any]()) 31 | 32 | def lookupObject(propertyName: String): Option[Any] = { 33 | properties.get().get(propertyName) 34 | } 35 | 36 | def lookupObject(propertyName: PropertyConstants): Option[Any] = { 37 | this.lookupObject(propertyName.toString()) 38 | } 39 | 40 | def registerObject[A](name: String, value: A): A = { 41 | var dirty = true 42 | 43 | while (dirty) { 44 | val oldProperties = properties.get() 45 | if (properties.compareAndSet(oldProperties, oldProperties ++ Map(name -> value))) { 46 | dirty = false 47 | } 48 | } 49 | 50 | value 51 | } 52 | 53 | def registerObject[A](name: PropertyConstants, value: A): A = { 54 | this.registerObject(name.toString, value) 55 | value 56 | } 57 | 58 | } 59 | 60 | -------------------------------------------------------------------------------- /app/common/Util.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package common 18 | 19 | import play.api.Logger 20 | 21 | import scala.concurrent.{Future, Promise} 22 | import com.twitter.util.{Throw, Return} 23 | import com.twitter.zk.{ZNode, ZkClient} 24 | import common.Registry.PropertyConstants 25 | import models.Zookeeper 26 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 27 | import org.apache.zookeeper.KeeperException.{NotEmptyException, NodeExistsException, NoNodeException} 28 | import okapies.finagle.Kafka 29 | import kafka.api.OffsetRequest 30 | 31 | object Util { 32 | 33 | def twitterToScalaFuture[A](twitterFuture: com.twitter.util.Future[A]): Future[A] = { 34 | val promise = Promise[A]() 35 | twitterFuture respond { 36 | case Return(a) => promise success a 37 | case Throw(e) => promise failure e 38 | } 39 | promise.future 40 | } 41 | 42 | def getPartitionLeaders(topicName: String, zkClient: ZkClient): Future[Seq[String]] = { 43 | Logger.debug("Getting partition leaders for topic " + topicName) 44 | return for { 45 | partitionStates <- getZChildren(zkClient, "/brokers/topics/" + topicName + "/partitions/*/state") 46 | partitionsData <- Future.sequence(partitionStates.map(p => twitterToScalaFuture(p.getData().map(d => (p.path.split("/")(5), new String(d.bytes)))))) 47 | brokerIds = partitionsData.map(d => (d._1, scala.util.parsing.json.JSON.parseFull(d._2).get.asInstanceOf[Map[String, Any]].get("leader").get)) 48 | brokers <- Future.sequence(brokerIds.map(bid => getZChildren(zkClient, "/brokers/ids/" + bid._2.toString.toDouble.toInt).map((bid._1, _)))) 49 | partitionsWithLeaders = brokers.filter(_._2.headOption match { 50 | case Some(s) => true 51 | case _ => false 52 | }) 53 | partitionsWithoutLeaders = brokers.filterNot(b => b._2.headOption match { 54 | case Some(s) => true 55 | case _ => Logger.warn("Partition " + b._1 + " in topic " + topicName + " has no leaders"); false 56 | }) 57 | brokersData <- Future.sequence(partitionsWithLeaders.map(d => twitterToScalaFuture(d._2.head.getData().map((d._1, _))))) 58 | brokersInfo = brokersData.map(d => (d._1, scala.util.parsing.json.JSON.parseFull(new String(d._2.bytes)).get.asInstanceOf[Map[String, Any]])) 59 | brokersAddr = brokersInfo.map(bi => (bi._1, bi._2.get("host").get + ":" + bi._2.get("port").get.toString.toDouble.toInt)) 60 | pidsAndBrokers = brokersAddr ++ partitionsWithoutLeaders.map(pid => (pid._1, "")) 61 | } yield pidsAndBrokers.sortBy(pb => pb._1.toInt).map(pb => pb._2) 62 | } 63 | 64 | def getPartitionsLogSize(topicName: String, partitionLeaders: Seq[String]): Future[Seq[Long]] = { 65 | Logger.debug("Getting partition log sizes for topic " + topicName + " from partition leaders " + partitionLeaders.mkString(", ")) 66 | return for { 67 | clients <- Future.sequence(partitionLeaders.map(addr => Future((addr, Kafka.newRichClient(addr))))) 68 | partitionsLogSize <- Future.sequence(clients.zipWithIndex.map { tu => 69 | val addr = tu._1._1 70 | val client = tu._1._2 71 | var offset = Future(0L) 72 | 73 | if (!addr.isEmpty) { 74 | offset = twitterToScalaFuture(client.offset(topicName, tu._2, OffsetRequest.LatestTime)).map(_.offsets.head).recover { 75 | case e => Logger.warn("Could not connect to partition leader " + addr + ". Error message: " + e.getMessage); 0L 76 | } 77 | } 78 | 79 | client.close() 80 | offset 81 | }) 82 | } yield partitionsLogSize 83 | } 84 | 85 | def getPartitionOffsets(topicName: String, zkClient: ZkClient): Future[Map[String, Seq[Long]]] = { 86 | Logger.debug("Getting partition offsets for topic " + topicName) 87 | return for { 88 | offsetsPartitionsNodes <- getZChildren(zkClient, "/consumers/*/offsets/" + topicName + "/*") 89 | partitionOffsets <- Future.sequence(offsetsPartitionsNodes.map(p => twitterToScalaFuture(p.getData().map(d => (p.path.split("/")(2), p.name, new String(d.bytes).toLong))))) 90 | partitionOffsetsByConsumerGroup = partitionOffsets.groupBy(_._1).map(e1 => e1._1 -> e1._2.map(e2 => (e2._2, e2._3))) 91 | sortedPartitionOffsetsByConsumerGroup = partitionOffsetsByConsumerGroup.map(e => e._1 -> e._2.sortBy(p => p._1.toInt).map(p => p._2)) 92 | } yield sortedPartitionOffsetsByConsumerGroup 93 | } 94 | 95 | def getTopics(zkClient: ZkClient): Future[Map[String, Seq[String]]] = { 96 | return for { 97 | allTopicNodes <- getZChildren(zkClient, "/brokers/topics/*") 98 | allTopics = allTopicNodes.map(p => (p.path.split("/").filter(_ != "")(2), Seq[String]())).toMap 99 | partitions <- getZChildren(zkClient, "/brokers/topics/*/partitions/*") 100 | topics = partitions.map(p => (p.path.split("/").filter(_ != "")(2), p.name)).groupBy(_._1).map(e => e._1 -> e._2.map(_._2)) 101 | } yield topics 102 | } 103 | 104 | def connectedZookeepers[A](block: (Zookeeper, ZkClient) => A): Seq[A] = { 105 | val connectedZks = models.Zookeeper.findByStatusId(models.Status.Connected.id) 106 | 107 | val zkConnections: Map[String, ZkClient] = Registry.lookupObject(PropertyConstants.ZookeeperConnections) match { 108 | case Some(s: Map[_, _]) if connectedZks.size > 0 => s.asInstanceOf[Map[String, ZkClient]] 109 | case _ => Map() 110 | } 111 | 112 | zkConnections match { 113 | case _ if zkConnections.size > 0 => connectedZks.map(zk => block(zk, zkConnections.get(zk.name).get)).toSeq 114 | case _ => Seq.empty 115 | } 116 | 117 | } 118 | 119 | def getZChildren(zkClient: ZkClient, path: String): Future[Seq[ZNode]] = { 120 | val nodes = path.split('/').filter(_ != "").toSeq 121 | 122 | getZChildren(zkClient("/"), nodes) 123 | } 124 | 125 | def getZChildren(zNode: ZNode, path: Seq[String]): Future[Seq[ZNode]] = path match { 126 | 127 | case head +: tail if head == "*" => { 128 | 129 | val subtreesFuture = for { 130 | children <- twitterToScalaFuture(zNode.getChildren()).map(_.children).recover { 131 | case e: NoNodeException => Nil 132 | } 133 | subtrees <- Future.sequence(children.map(getZChildren(_, tail))) 134 | 135 | } yield subtrees 136 | 137 | subtreesFuture.map(_.flatten) 138 | } 139 | case head +: Nil => { 140 | twitterToScalaFuture(zNode(head).exists()).map(_ => Seq(zNode(head))).recover { 141 | case e: NoNodeException => Nil 142 | } 143 | } 144 | case head +: tail => getZChildren(zNode(head), tail) 145 | case Nil => Future(Seq(zNode)) 146 | } 147 | 148 | def deleteZNode(zkClient: ZkClient, path: String): Future[ZNode] = { 149 | deleteZNode(zkClient(path)) 150 | } 151 | 152 | def deleteZNode(zNode: ZNode): Future[ZNode] = { 153 | val delNode = twitterToScalaFuture(zNode.getData()).flatMap { d => 154 | twitterToScalaFuture(zNode.delete(d.stat.getVersion)).recover { 155 | case e: NotEmptyException => { 156 | for { 157 | children <- getZChildren(zNode, Seq("*")) 158 | delChildren <- Future.sequence(children.map(n => deleteZNode(n))) 159 | } yield deleteZNode(zNode) 160 | } 161 | case e: NoNodeException => Future(ZNode) 162 | } 163 | } 164 | 165 | //TODO: investigate why actual type is Future[Object] 166 | delNode.asInstanceOf[Future[ZNode]] 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /app/controllers/ApiConsole.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{AnyContent, Action, Controller} 20 | 21 | object ApiConsole extends Controller{ 22 | def at(path: String, file: String): Action[AnyContent] = { 23 | Assets.at(path, file) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /app/controllers/Application.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc._ 20 | 21 | object Application extends Controller { 22 | 23 | def index = Action { implicit request => 24 | Ok(views.html.index()) 25 | } 26 | } -------------------------------------------------------------------------------- /app/controllers/Broker.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{Action, Controller} 20 | import scala.concurrent.Future 21 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 22 | import common.Util._ 23 | import play.api.libs.json.{Writes, Json} 24 | import scala.Some 25 | import models.Zookeeper 26 | import com.twitter.zk.ZkClient 27 | 28 | object Broker extends Controller { 29 | 30 | implicit object BrokerWrites extends Writes[Seq[(String, Map[String, Any])]] { 31 | def writes(l: Seq[(String, Map[String, Any])]) = { 32 | val brokers = l.map { i => 33 | 34 | val fields = i._2.map { kv => 35 | kv._2 match { 36 | case v: Double => (kv._1, v.toInt.toString) 37 | case _ => (kv._1, kv._2.toString()) 38 | } 39 | } 40 | 41 | fields + ("zookeeper" -> i._1) 42 | } 43 | Json.toJson(brokers) 44 | } 45 | } 46 | 47 | def index = Action.async { 48 | val brokers = connectedZookeepers { (zk, zkClient) => getBrokers(zk, zkClient)} 49 | Future.sequence(brokers).map(l => Ok(Json.toJson(l.flatten))) 50 | } 51 | 52 | private def getBrokers(zk: Zookeeper, zkClient: ZkClient): Future[Seq[(String, Map[String, Any])]] = { 53 | return for { 54 | brokerIds <- getZChildren(zkClient, "/brokers/ids/*") 55 | brokers <- Future.sequence(brokerIds.map(brokerId => twitterToScalaFuture(brokerId.getData()))) 56 | } yield brokers.map(b => (zk.name, scala.util.parsing.json.JSON.parseFull(new String(b.bytes)).get.asInstanceOf[Map[String, Any]])) 57 | } 58 | 59 | } 60 | -------------------------------------------------------------------------------- /app/controllers/ConsumerGroup.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{Action, Controller} 20 | import common.Util._ 21 | import scala.concurrent.Future 22 | import play.api.libs.json.Json 23 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 24 | 25 | object ConsumerGroup extends Controller { 26 | 27 | def show(consumerGroup: String, topic: String, zookeeper: String) = Action.async { 28 | val connectedZks = connectedZookeepers((z, c) => (z, c)).filter(_._1.name == zookeeper) 29 | 30 | if (connectedZks.size > 0) { 31 | val (_, zkClient) = connectedZks.head 32 | val consumerIdsFuture = for { 33 | consumers <- getZChildren(zkClient, "/consumers/" + consumerGroup + "/ids/*") 34 | consumersData <- Future.sequence(consumers.map(c => twitterToScalaFuture(c.getData()))) 35 | consumersMaps = consumersData.map(d => (d.name, scala.util.parsing.json.JSON.parseFull(new String(d.bytes)).get.asInstanceOf[Map[String, Any]])) 36 | topicConsumerMaps = consumersMaps.filter { c => 37 | c._2("subscription").asInstanceOf[Map[String, String]].get(topic) match { 38 | case Some(_) => true 39 | case _ => false 40 | } 41 | } 42 | } yield topicConsumerMaps.map(_._1) 43 | 44 | consumerIdsFuture.map(consumerIds => Ok(Json.toJson(consumerIds))) 45 | } 46 | else { 47 | Future(Ok(Json.toJson(List[String]()))) 48 | } 49 | } 50 | 51 | } 52 | -------------------------------------------------------------------------------- /app/controllers/Group.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{Controller, Action} 20 | import play.api.libs.json._ 21 | 22 | object Group extends Controller { 23 | 24 | def index() = Action { 25 | val zookeepers = models.Group.findByName("ALL").get.zookeepers 26 | 27 | Ok(Json.toJson(zookeepers)) 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /app/controllers/IgnoreParamAssets.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{Controller, AnyContent, Action} 20 | 21 | object IgnoreParamAssets extends Controller { 22 | 23 | def at(path: String, file: String, ignoreParam: String, ignoreParam2: String): Action[AnyContent] = { 24 | Assets.at(path, file) 25 | } 26 | 27 | def at2(path: String, file: String, ignoreParam: String, ignoreParam2: String, ignoreParam3: String): Action[AnyContent] = { 28 | Assets.at(path, file) 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /app/controllers/OffsetHistory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.libs.json.Json 20 | import play.api.mvc._ 21 | import models.OffsetPoint 22 | 23 | object OffsetHistory extends Controller { 24 | 25 | def show(consumerGroup: String, topic: String, zookeeper: String) = Action { 26 | 27 | models.Zookeeper.findByName(zookeeper) match { 28 | case Some(zk) => { 29 | models.OffsetHistory.findByZookeeperIdAndTopic(zk.id, topic) match { 30 | case Some(oH) => Ok(Json.toJson(OffsetPoint.findByOffsetHistoryIdAndConsumerGroup(oH.id, consumerGroup))) 31 | case _ => Ok(Json.toJson(Seq[String]())) 32 | } 33 | } 34 | case _ => Ok(Json.toJson(Seq[String]())) 35 | } 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /app/controllers/Settings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{Controller, Action} 20 | import play.api.libs.json._ 21 | import play.api.data.{Forms, Form} 22 | import play.api.libs.concurrent.Akka 23 | import common.Message 24 | import play.api.data.Forms._ 25 | import scala.Some 26 | import models.Setting 27 | import play.api.Play.current 28 | import play.Logger 29 | 30 | object Settings extends Controller { 31 | 32 | def update() = Action { request => 33 | request.body.asJson match { 34 | case Some(JsArray(settings)) => { 35 | updateSettings(settings) 36 | Ok 37 | } 38 | case _ => BadRequest 39 | } 40 | } 41 | 42 | def index() = Action { 43 | Ok(Json.toJson(Setting.findAll)) 44 | } 45 | 46 | def updateSettings(settings : Seq[JsValue]) { 47 | settings.map { s => 48 | Setting.update(Setting(s.\("key").as[String], s.\("value").as[String])) 49 | Akka.system.actorSelection("akka://application/user/router") ! Message.SettingsUpdateNotification 50 | } 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /app/controllers/Topic.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc.{WebSocket, Action, Controller} 20 | import scala.concurrent.Future 21 | import play.api.libs.concurrent.Execution.Implicits.defaultContext 22 | import common.Util._ 23 | import play.api.libs.json._ 24 | import play.api.libs.json.JsObject 25 | import play.api.libs.iteratee.{Concurrent, Iteratee} 26 | import common.Registry 27 | import common.Registry.PropertyConstants 28 | import java.util 29 | import kafka.javaapi.consumer.EventHandler 30 | import kafka.message.MessageAndMetadata 31 | import kafka.serializer.StringDecoder 32 | import kafka.consumer.async.Consumer 33 | import kafka.consumer.ConsumerConfig 34 | import java.util.Properties 35 | import com.twitter.zk.{ZNode, ZkClient} 36 | import scala.util.Random 37 | import okapies.finagle.Kafka 38 | import kafka.api.OffsetRequest 39 | 40 | object Topic extends Controller { 41 | 42 | object TopicsWrites extends Writes[Seq[Map[String, Object]]] { 43 | def writes(l: Seq[Map[String, Object]]) = { 44 | val topics = l.map { t => 45 | 46 | val js = t.map { e => 47 | val v = e._2 match { 48 | case v: Seq[_] => Json.toJson(e._2.asInstanceOf[Seq[Map[String, String]]]) 49 | case v => Json.toJson(v.toString) 50 | } 51 | (e._1, v) 52 | }.toSeq 53 | 54 | JsObject(js) 55 | } 56 | JsArray(topics) 57 | } 58 | } 59 | 60 | object TopicWrites extends Writes[Seq[Map[String, Object]]] { 61 | def writes(l: Seq[Map[String, Object]]) = { 62 | val topic = l.map { t => 63 | 64 | val js = t.map { e => 65 | val v = e._2 match { 66 | case v: Seq[_] => Json.toJson(e._2.asInstanceOf[Seq[Map[String, String]]]) 67 | case v => Json.toJson(v.toString) 68 | } 69 | (e._1, v) 70 | }.toSeq 71 | 72 | JsObject(js) 73 | } 74 | JsArray(topic) 75 | } 76 | } 77 | 78 | def index = Action.async { 79 | val topicsZks = connectedZookeepers { (zk, zkClient) => 80 | for { 81 | // it's possible to have topics without partitions in Zookeeper 82 | allTopicNodes <- getZChildren(zkClient, "/brokers/topics/*") 83 | allTopics = allTopicNodes.map(p => (p.path.split("/").filter(_ != "")(2), Seq[String]())).toMap 84 | partitions <- getZChildren(zkClient, "/brokers/topics/*/partitions/*") 85 | 86 | topics = partitions.map(p => (p.path.split("/").filter(_ != "")(2), p.name)).groupBy(_._1).map(e => e._1 -> e._2.map(_._2)) 87 | 88 | topicsAndPartitionsAndZookeeper = (allTopics ++ topics).map(e => Map("name" -> e._1, "partitions" -> e._2, "zookeeper" -> zk.name)).toSeq 89 | 90 | topicsAndPartitionsAndZookeeperAndLogSize <- createTopicsInfo(topicsAndPartitionsAndZookeeper, zkClient) 91 | 92 | } yield topicsAndPartitionsAndZookeeperAndLogSize 93 | } 94 | 95 | Future.sequence(topicsZks).map(l => Ok(Json.toJson(l.flatten)(TopicsWrites))) 96 | } 97 | 98 | def show(topic: String, zookeeper: String) = Action.async { 99 | val connectedZks = connectedZookeepers((z, c) => (z, c)).filter(_._1.name == zookeeper) 100 | 101 | if (connectedZks.size > 0) { 102 | val (_, zkClient) = connectedZks.head 103 | 104 | val topicInfo = for { 105 | leaders <- getPartitionLeaders(topic, zkClient) 106 | partitionsLogSize <- getPartitionsLogSize(topic, leaders) 107 | owners <- getPartitionOwners(topic, zkClient) 108 | consumersAndPartitionOffsets <- getPartitionOffsets(topic, zkClient) 109 | } yield createTopicInfo(consumersAndPartitionOffsets, partitionsLogSize, owners) 110 | 111 | topicInfo.map(poc => Ok(Json.toJson(poc)(TopicWrites))) 112 | } 113 | else { 114 | Future(Ok(Json.toJson(List[String]()))) 115 | } 116 | } 117 | 118 | def feed(topic: String, zookeeper: String) = WebSocket.using[String] { implicit request => 119 | 120 | val topicCountMap = new util.HashMap[EventHandler[String, String], Integer]() 121 | val zk = models.Zookeeper.findByName(zookeeper).get 122 | val consumerGroup = "web-console-consumer-" + Random.nextInt(100000) 123 | val consumer = Consumer.create(createConsumerConfig(zk.toString, consumerGroup)) 124 | val zkClient = Registry.lookupObject(PropertyConstants.ZookeeperConnections).get.asInstanceOf[Map[String, ZkClient]](zookeeper) 125 | 126 | val out = Concurrent.unicast[String] { channel: Concurrent.Channel[String] => 127 | 128 | val cb = (messageHolder: MessageAndMetadata[String, String]) => { 129 | channel.push(messageHolder.message) 130 | } 131 | 132 | getZChildren(zkClient, "/brokers/topics/" + topic + "/partitions/*").map { p => 133 | topicCountMap.put(new EventHandler(topic, cb), p.size) 134 | consumer.createMessageStreams(topicCountMap, new StringDecoder(), new StringDecoder()) 135 | } 136 | 137 | } 138 | 139 | val in = Iteratee.foreach[String](println).map { _ => 140 | consumer.commitOffsets() 141 | consumer.shutdown() 142 | deleteZNode(zkClient, "/consumers/" + consumerGroup) 143 | } 144 | 145 | (in, out) 146 | } 147 | 148 | private def createConsumerConfig(zookeeperAddress: String, gid: String): ConsumerConfig = { 149 | val props = new Properties() 150 | props.put("zookeeper.connect", zookeeperAddress) 151 | props.put("group.id", gid) 152 | props.put("zookeeper.session.timeout.ms", "400") 153 | props.put("zookeeper.sync.time.ms", "200") 154 | props.put("auto.commit.interval.ms", "1000") 155 | 156 | return new ConsumerConfig(props) 157 | } 158 | 159 | private def getPartitionOwners(topicName: String, zkClient: ZkClient): Future[Seq[(String, Int, String)]] = { 160 | return for { 161 | owners <- getZChildren(zkClient, "/consumers/*/owners/" + topicName + "/" + "*") 162 | ownerIds <- Future.sequence(owners.map(z => twitterToScalaFuture(z.getData().map(d => (z.path.split("/")(2), z.path.split("/")(5).toInt, new String(d.bytes)))))) 163 | } yield ownerIds 164 | } 165 | 166 | private def createTopicInfo(consumersAndPartitionOffsets: Map[String, Seq[Long]], partitionsLogSize: Seq[Long], 167 | owners: Seq[(String, Int, String)]): Seq[Map[String, Object]] = { 168 | 169 | consumersAndPartitionOffsets.map { cPO => 170 | val offsetSum = cPO._2.map(_.toInt).foldLeft(0)(_ + _) 171 | val partitionsLogSizeSum = partitionsLogSize.foldLeft(0.0)(_ + _).toInt 172 | 173 | Map("consumerGroup" -> cPO._1, 174 | "offset" -> offsetSum.toString, 175 | "lag" -> (partitionsLogSizeSum - offsetSum).toString, 176 | "partitions" -> createPartitionInfo(cPO, partitionsLogSize, owners)) 177 | }.toSeq 178 | } 179 | 180 | private def createTopicsInfo(topics: Seq[Map[String, Object]], zkClient: ZkClient): Future[Seq[Map[String, Object]]] = { 181 | Future.sequence(topics.map { e => 182 | for { 183 | partitionLeaders <- getPartitionLeaders(e("name").toString, zkClient) 184 | partitionsLogSize <- getPartitionsLogSize(e("name").toString, partitionLeaders) 185 | partitions = partitionsLogSize.zipWithIndex.map(pls => Map("id" -> pls._2.toString, "logSize" -> pls._1.toString, "leader" -> partitionLeaders(pls._2))) 186 | logSizeSum = partitionsLogSize.foldLeft(0.0)(_ + _).toInt.toString 187 | } yield Map("name" -> e("name"), "partitions" -> partitions, "zookeeper" -> e("zookeeper"), "logSize" -> logSizeSum) 188 | 189 | }) 190 | } 191 | 192 | private def createPartitionInfo(consumerGroupAndPartitionOffsets: (String, Seq[Long]), 193 | partitionsLogSize: Seq[Long], 194 | owners: Seq[(String, Int, String)]): Seq[Map[String, String]] = { 195 | 196 | consumerGroupAndPartitionOffsets._2.zipWithIndex.map { case (pO, i) => 197 | Map("id" -> i.toString, "offset" -> pO.toString, "lag" -> (partitionsLogSize(i) - pO).toString, 198 | "owner" -> { 199 | owners.find(o => (o._1 == consumerGroupAndPartitionOffsets._1) && (o._2 == i)) match { 200 | case Some(s) => s._3 201 | case None => "" 202 | } 203 | }) 204 | } 205 | } 206 | 207 | } 208 | -------------------------------------------------------------------------------- /app/controllers/Zookeeper.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import play.api.mvc._ 20 | import play.api.data.{Form, Forms} 21 | import play.api.data.Forms._ 22 | import play.api.libs.json.Json 23 | import common.{Message, Registry} 24 | import Registry.PropertyConstants 25 | import play.api.libs.iteratee.{Enumerator, Iteratee} 26 | import play.api.Play.current 27 | import play.api.libs.concurrent.Akka 28 | 29 | object Zookeeper extends Controller { 30 | 31 | val zookeeperForm = Forms.tuple( 32 | "name" -> Forms.text, 33 | "host" -> Forms.text, 34 | "port" -> Forms.number, 35 | "group" -> Forms.text, 36 | "chroot" -> optional(Forms.text) 37 | ) 38 | 39 | def index(group: String) = Action { 40 | 41 | if (group.toUpperCase() == "ALL") { 42 | Ok(Json.toJson(models.Zookeeper.findAll.toSeq)) 43 | } 44 | else { 45 | models.Group.findByName(group.toUpperCase) match { 46 | case Some(z) => Ok(Json.toJson(z.zookeepers)) 47 | case _ => Ok(Json.toJson(List[String]())) 48 | } 49 | } 50 | } 51 | 52 | def create() = Action { implicit request => 53 | val result = Form(zookeeperForm).bindFromRequest.fold( 54 | formFailure => BadRequest, 55 | formSuccess => { 56 | 57 | val name: String = formSuccess._1 58 | val host: String = formSuccess._2 59 | val port: Int = formSuccess._3 60 | val group: String = formSuccess._4 61 | val chroot: String = formSuccess._5 match { 62 | case Some(s) => s 63 | case _ => "" 64 | } 65 | 66 | val zk = models.Zookeeper.insert(models.Zookeeper(name, host, port, models.Group.findByName(group.toUpperCase).get.id, models.Status.Disconnected.id, chroot)) 67 | 68 | Akka.system.actorSelection("akka://application/user/router") ! Message.Connect(zk) 69 | Created 70 | } 71 | ) 72 | 73 | result 74 | } 75 | 76 | def delete(name: String) = Action { 77 | val zk = models.Zookeeper.findByName(name).get 78 | models.Zookeeper.delete(models.Zookeeper.findById(zk.id).get) 79 | Akka.system.actorSelection("akka://application/user/router") ! Message.Disconnect(zk) 80 | NoContent 81 | } 82 | 83 | def feed() = WebSocket.using[String] { implicit request => 84 | 85 | val in = Iteratee.ignore[String] 86 | 87 | val out = Registry.lookupObject(PropertyConstants.BroadcastChannel) match { 88 | case Some(broadcastChannel: (_, _)) => broadcastChannel._1.asInstanceOf[Enumerator[String]] 89 | case _ => Enumerator.empty[String] 90 | } 91 | 92 | (in, out) 93 | } 94 | } -------------------------------------------------------------------------------- /app/kafka/consumer/async/ConsumerConnector.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.consumer.async 35 | 36 | import scala.collection._ 37 | import kafka.consumer.{ConsumerConfig, TopicFilter} 38 | import kafka.serializer.{DefaultDecoder, Decoder} 39 | import kafka.utils.Logging 40 | import kafka.javaapi.consumer.EventHandler 41 | import kafka.message.MessageAndMetadata 42 | 43 | trait ConsumerConnector { 44 | 45 | /** 46 | * Create a list of MessageStreams for each topic. 47 | * 48 | * @param topicCountMap a map of (topic, #streams) pair 49 | * @return a map of (topic, list of KafkaStream) pairs. 50 | * The number of items in the list is #streams. Each stream supports 51 | * an iterator over message/metadata pairs. 52 | */ 53 | def createMessageStreams(topicCountMap: Map[EventHandler[Array[Byte], Array[Byte]], Int]) 54 | 55 | /** 56 | * Create a list of MessageStreams for each topic. 57 | * 58 | * @param topicCountMap a map of (topic, #streams) pair 59 | * @param keyDecoder Decoder to decode the key portion of the message 60 | * @param valueDecoder Decoder to decode the value portion of the message 61 | * @return a map of (topic, list of KafkaStream) pairs. 62 | * The number of items in the list is #streams. Each stream supports 63 | * an iterator over message/metadata pairs. 64 | */ 65 | def createMessageStreams[K,V](topicCountMap: Map[EventHandler[K, V], Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V]) 66 | 67 | /** 68 | * Create a list of message streams for all topics that match a given filter. 69 | * 70 | * @param topicFilter Either a Whitelist or Blacklist TopicFilter object. 71 | * @param numStreams Number of streams to return 72 | * @param keyDecoder Decoder to decode the key portion of the message 73 | * @param valueDecoder Decoder to decode the value portion of the message 74 | * @return a list of KafkaStream each of which provides an 75 | * iterator over message/metadata pairs over allowed topics. 76 | */ 77 | def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter, 78 | numStreams: Int, 79 | keyDecoder: Decoder[K] = new DefaultDecoder(), 80 | valueDecoder: Decoder[V] = new DefaultDecoder(), 81 | cb: MessageAndMetadata[K,V] => Unit) 82 | 83 | /** 84 | * Commit the offsets of all broker partitions connected by this connector. 85 | */ 86 | def commitOffsets 87 | 88 | /** 89 | * Shut down the connector 90 | */ 91 | def shutdown() 92 | } 93 | 94 | object Consumer extends Logging { 95 | /** 96 | * Create a ConsumerConnector 97 | * 98 | * @param config at the minimum, need to specify the groupid of the consumer and the zookeeper 99 | * connection string zookeeper.connect. 100 | */ 101 | def create(config: ConsumerConfig): kafka.javaapi.consumer.AsyncConsumerConnector = { 102 | val consumerConnect = new kafka.javaapi.consumer.ZookeeperAsyncConsumerConnector(config) 103 | consumerConnect 104 | } 105 | 106 | /** 107 | * Create a ConsumerConnector 108 | * 109 | * @param config at the minimum, need to specify the groupid of the consumer and the zookeeper 110 | * connection string zookeeper.connect. 111 | */ 112 | def createJavaAsyncConsumerConnector(config: ConsumerConfig): kafka.javaapi.consumer.AsyncConsumerConnector = { 113 | val consumerConnect = new kafka.javaapi.consumer.ZookeeperAsyncConsumerConnector(config) 114 | consumerConnect 115 | } 116 | 117 | } 118 | -------------------------------------------------------------------------------- /app/kafka/consumer/async/ConsumerFetcherManager.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.consumer.async 35 | 36 | import org.I0Itec.zkclient.ZkClient 37 | import kafka.server.{BrokerAndInitialOffset, AbstractFetcherThread, AbstractFetcherManager} 38 | import kafka.cluster.{Cluster, Broker} 39 | import scala.collection.immutable 40 | import scala.collection.Map 41 | import collection.mutable.HashMap 42 | import scala.collection.mutable 43 | import java.util.concurrent.locks.ReentrantLock 44 | import kafka.utils.Utils.inLock 45 | import kafka.utils.ZkUtils._ 46 | import kafka.utils.{ShutdownableThread, SystemTime} 47 | import kafka.common.TopicAndPartition 48 | import kafka.client.ClientUtils 49 | import java.util.concurrent.atomic.AtomicInteger 50 | import kafka.consumer.ConsumerConfig 51 | 52 | /** 53 | * Usage: 54 | * Once ConsumerFetcherManager is created, startConnections() and stopAllConnections() can be called repeatedly 55 | * until shutdown() is called. 56 | */ 57 | class ConsumerFetcherManager(private val consumerIdString: String, 58 | private val config: ConsumerConfig, 59 | private val zkClient : ZkClient) 60 | extends AbstractFetcherManager("ConsumerFetcherManager-%d".format(SystemTime.milliseconds), 61 | config.clientId, config.numConsumerFetchers) { 62 | private var partitionMap: immutable.Map[TopicAndPartition, PartitionTopicInfo[_,_]] = null 63 | private var cluster: Cluster = null 64 | private val noLeaderPartitionSet = new mutable.HashSet[TopicAndPartition] 65 | private val lock = new ReentrantLock 66 | private val cond = lock.newCondition() 67 | private var leaderFinderThread: ShutdownableThread = null 68 | private val correlationId = new AtomicInteger(0) 69 | 70 | private class LeaderFinderThread(name: String) extends ShutdownableThread(name) { 71 | // thread responsible for adding the fetcher to the right broker when leader is available 72 | override def doWork() { 73 | val leaderForPartitionsMap = new HashMap[TopicAndPartition, Broker] 74 | lock.lock() 75 | try { 76 | while (noLeaderPartitionSet.isEmpty) { 77 | trace("No partition for leader election.") 78 | cond.await() 79 | } 80 | 81 | trace("Partitions without leader %s".format(noLeaderPartitionSet)) 82 | val brokers = getAllBrokersInCluster(zkClient) 83 | val topicsMetadata = ClientUtils.fetchTopicMetadata(noLeaderPartitionSet.map(m => m.topic).toSet, 84 | brokers, 85 | config.clientId, 86 | config.socketTimeoutMs, 87 | correlationId.getAndIncrement).topicsMetadata 88 | if(logger.isDebugEnabled) topicsMetadata.foreach(topicMetadata => debug(topicMetadata.toString())) 89 | topicsMetadata.foreach { tmd => 90 | val topic = tmd.topic 91 | tmd.partitionsMetadata.foreach { pmd => 92 | val topicAndPartition = TopicAndPartition(topic, pmd.partitionId) 93 | if(pmd.leader.isDefined && noLeaderPartitionSet.contains(topicAndPartition)) { 94 | val leaderBroker = pmd.leader.get 95 | leaderForPartitionsMap.put(topicAndPartition, leaderBroker) 96 | noLeaderPartitionSet -= topicAndPartition 97 | } 98 | } 99 | } 100 | } catch { 101 | case t: Throwable => { 102 | if (!isRunning.get()) 103 | throw t /* If this thread is stopped, propagate this exception to kill the thread. */ 104 | else 105 | warn("Failed to find leader for %s".format(noLeaderPartitionSet), t) 106 | } 107 | } finally { 108 | lock.unlock() 109 | } 110 | 111 | try { 112 | addFetcherForPartitions(leaderForPartitionsMap.map{ 113 | case (topicAndPartition, broker) => 114 | topicAndPartition -> BrokerAndInitialOffset(broker, partitionMap(topicAndPartition).getFetchOffset())} 115 | ) 116 | } catch { 117 | case t: Throwable => { 118 | if (!isRunning.get()) 119 | throw t /* If this thread is stopped, propagate this exception to kill the thread. */ 120 | else { 121 | warn("Failed to add leader for partitions %s; will retry".format(leaderForPartitionsMap.keySet.mkString(",")), t) 122 | lock.lock() 123 | noLeaderPartitionSet ++= leaderForPartitionsMap.keySet 124 | lock.unlock() 125 | } 126 | } 127 | } 128 | 129 | shutdownIdleFetcherThreads() 130 | Thread.sleep(config.refreshLeaderBackoffMs) 131 | } 132 | } 133 | 134 | override def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread = { 135 | new ConsumerFetcherThread( 136 | "ConsumerFetcherThread-%s-%d-%d".format(consumerIdString, fetcherId, sourceBroker.id), 137 | config, sourceBroker, partitionMap, this) 138 | } 139 | 140 | def startConnections(topicInfos: Iterable[PartitionTopicInfo[_,_]], cluster: Cluster) { 141 | leaderFinderThread = new LeaderFinderThread(consumerIdString + "-leader-finder-thread") 142 | leaderFinderThread.start() 143 | 144 | inLock(lock) { 145 | partitionMap = topicInfos.map(tpi => (TopicAndPartition(tpi.topic, tpi.partitionId), tpi)).toMap 146 | this.cluster = cluster 147 | noLeaderPartitionSet ++= topicInfos.map(tpi => TopicAndPartition(tpi.topic, tpi.partitionId)) 148 | cond.signalAll() 149 | } 150 | } 151 | 152 | def stopConnections() { 153 | /* 154 | * Stop the leader finder thread first before stopping fetchers. Otherwise, if there are more partitions without 155 | * leader, then the leader finder thread will process these partitions (before shutting down) and add fetchers for 156 | * these partitions. 157 | */ 158 | info("Stopping leader finder thread") 159 | if (leaderFinderThread != null) { 160 | leaderFinderThread.shutdown() 161 | leaderFinderThread = null 162 | } 163 | 164 | info("Stopping all fetchers") 165 | closeAllFetchers() 166 | 167 | // no need to hold the lock for the following since leaderFindThread and all fetchers have been stopped 168 | partitionMap = null 169 | noLeaderPartitionSet.clear() 170 | 171 | info("All connections stopped") 172 | } 173 | 174 | def addPartitionsWithError(partitionList: Iterable[TopicAndPartition]) { 175 | debug("adding partitions with error %s".format(partitionList)) 176 | inLock(lock) { 177 | if (partitionMap != null) { 178 | noLeaderPartitionSet ++= partitionList 179 | cond.signalAll() 180 | } 181 | } 182 | } 183 | } -------------------------------------------------------------------------------- /app/kafka/consumer/async/ConsumerFetcherThread.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.consumer.async 35 | 36 | import kafka.cluster.Broker 37 | import kafka.server.AbstractFetcherThread 38 | import kafka.message.ByteBufferMessageSet 39 | import kafka.api.{Request, OffsetRequest, FetchResponsePartitionData} 40 | import kafka.common.TopicAndPartition 41 | import kafka.consumer.ConsumerConfig 42 | 43 | 44 | class ConsumerFetcherThread(name: String, 45 | val config: ConsumerConfig, 46 | sourceBroker: Broker, 47 | partitionMap: Map[TopicAndPartition, PartitionTopicInfo[_,_]], 48 | val consumerFetcherManager: ConsumerFetcherManager) 49 | extends AbstractFetcherThread(name = name, 50 | clientId = config.clientId + "-" + name, 51 | sourceBroker = sourceBroker, 52 | socketTimeout = config.socketTimeoutMs, 53 | socketBufferSize = config.socketReceiveBufferBytes, 54 | fetchSize = config.fetchMessageMaxBytes, 55 | fetcherBrokerId = Request.OrdinaryConsumerId, 56 | maxWait = config.fetchWaitMaxMs, 57 | minBytes = config.fetchMinBytes, 58 | isInterruptible = true) { 59 | 60 | // process fetched data 61 | def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) { 62 | val pti = partitionMap(topicAndPartition) 63 | if (pti.getFetchOffset != fetchOffset) 64 | throw new RuntimeException("Offset doesn't match for partition [%s,%d] pti offset: %d fetch offset: %d" 65 | .format(topicAndPartition.topic, topicAndPartition.partition, pti.getFetchOffset, fetchOffset)) 66 | pti.doInvoke(partitionData.messages.asInstanceOf[ByteBufferMessageSet]) 67 | } 68 | 69 | // handle a partition whose offset is out of range and return a new fetch offset 70 | def handleOffsetOutOfRange(topicAndPartition: TopicAndPartition): Long = { 71 | var startTimestamp : Long = 0 72 | config.autoOffsetReset match { 73 | case OffsetRequest.SmallestTimeString => startTimestamp = OffsetRequest.EarliestTime 74 | case OffsetRequest.LargestTimeString => startTimestamp = OffsetRequest.LatestTime 75 | case _ => startTimestamp = OffsetRequest.LatestTime 76 | } 77 | val newOffset = simpleConsumer.earliestOrLatestOffset(topicAndPartition, startTimestamp, Request.OrdinaryConsumerId) 78 | val pti = partitionMap(topicAndPartition) 79 | pti.resetFetchOffset(newOffset) 80 | pti.resetConsumeOffset(newOffset) 81 | newOffset 82 | } 83 | 84 | // any logic for partitions whose leader has changed 85 | def handlePartitionsWithErrors(partitions: Iterable[TopicAndPartition]) { 86 | removePartitions(partitions.toSet) 87 | consumerFetcherManager.addPartitionsWithError(partitions) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /app/kafka/consumer/async/PartitionTopicInfo.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.consumer.async 35 | 36 | import java.util.concurrent.atomic._ 37 | import kafka.message._ 38 | import kafka.utils.{Utils, Logging} 39 | import kafka.consumer.ConsumerTopicStatsRegistry 40 | import kafka.common.KafkaException 41 | import kafka.serializer.Decoder 42 | 43 | class PartitionTopicInfo[K, V](val topic: String, 44 | val partitionId: Int, 45 | val cb: MessageAndMetadata[K, V] => Unit, 46 | val keyDecoder: Decoder[K], 47 | val valueDecoder: Decoder[V], 48 | private val consumedOffset: AtomicLong, 49 | private val fetchedOffset: AtomicLong, 50 | private val fetchSize: AtomicInteger, 51 | private val clientId: String) extends Logging { 52 | 53 | debug("initial consumer offset of " + this + " is " + consumedOffset.get) 54 | debug("initial fetch offset of " + this + " is " + fetchedOffset.get) 55 | 56 | private val consumerTopicStats = ConsumerTopicStatsRegistry.getConsumerTopicStat(clientId) 57 | 58 | def getConsumeOffset() = consumedOffset.get 59 | 60 | def getFetchOffset() = fetchedOffset.get 61 | 62 | def resetConsumeOffset(newConsumeOffset: Long) = { 63 | consumedOffset.set(newConsumeOffset) 64 | debug("reset consume offset of " + this + " to " + newConsumeOffset) 65 | } 66 | 67 | def resetFetchOffset(newFetchOffset: Long) = { 68 | fetchedOffset.set(newFetchOffset) 69 | debug("reset fetch offset of ( %s ) to %d".format(this, newFetchOffset)) 70 | } 71 | 72 | /** 73 | * Invoke consumer callback. 74 | */ 75 | def doInvoke(messages: ByteBufferMessageSet) { 76 | val size = messages.validBytes 77 | 78 | if (size > 0) { 79 | val next = messages.shallowIterator.toSeq.last.nextOffset 80 | trace("Updating fetch offset = " + fetchedOffset.get + " to " + next) 81 | invoke(messages) 82 | fetchedOffset.set(next) 83 | debug("updated fetch offset of (%s) to %d".format(this, next)) 84 | consumerTopicStats.getConsumerTopicStats(topic).byteRate.mark(size) 85 | consumerTopicStats.getConsumerAllTopicStats().byteRate.mark(size) 86 | } else if (messages.sizeInBytes > 0) { 87 | invoke(messages) 88 | } 89 | } 90 | 91 | override def toString(): String = topic + ":" + partitionId.toString + ": fetched offset = " + fetchedOffset.get + 92 | ": consumed offset = " + consumedOffset.get 93 | 94 | def invoke(messages: ByteBufferMessageSet) { 95 | 96 | for (currentMessage <- messages) { 97 | // process messages that have not been consumed 98 | if (currentMessage.offset > getConsumeOffset) { 99 | currentMessage.message.ensureValid() 100 | 101 | if (getConsumeOffset() < 0) 102 | throw new KafkaException("Offset returned by the message set is invalid %d".format(getConsumeOffset)) 103 | 104 | resetConsumeOffset(currentMessage.offset) 105 | trace("Setting %s consumed offset to %d".format(topic, consumedOffset)) 106 | 107 | consumerTopicStats.getConsumerTopicStats(topic).messageRate.mark() 108 | consumerTopicStats.getConsumerAllTopicStats().messageRate.mark() 109 | 110 | cb(new MessageAndMetadata(topic, partitionId, currentMessage.message, currentMessage.offset, keyDecoder, valueDecoder)) 111 | } 112 | } 113 | } 114 | } 115 | 116 | object PartitionTopicInfo { 117 | val InvalidOffset = -1L 118 | 119 | def isOffsetInvalid(offset: Long) = offset < 0L 120 | } -------------------------------------------------------------------------------- /app/kafka/javaapi/consumer/AsyncConsumerConnector.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.javaapi.consumer; 35 | 36 | import kafka.consumer.TopicFilter; 37 | import kafka.message.MessageAndMetadata; 38 | import kafka.serializer.Decoder; 39 | import org.I0Itec.zkclient.ZkClient; 40 | import scala.Function1; 41 | import scala.Unit; 42 | 43 | import java.util.Map; 44 | 45 | public interface AsyncConsumerConnector { 46 | /** 47 | * Create a list of MessageStreams of type T for each topic. 48 | * 49 | * @param topicCountMap a map of (topic, callback) pair 50 | * @param keyDecoder a decoder that decodes the message key 51 | * @param valueDecoder a decoder that decodes the message itself 52 | * @return a map of (topic, list of KafkaStream) pairs. 53 | * The number of items in the list is #streams. Each stream supports 54 | * an iterator over message/metadata pairs. 55 | */ 56 | public void 57 | createMessageStreams(Map, Integer> topicCountMap, Decoder keyDecoder, Decoder valueDecoder); 58 | 59 | public void createMessageStreams(Map, Integer> topicCountMap); 60 | 61 | /** 62 | * Create a list of MessageAndTopicStreams containing messages of type T. 63 | * 64 | * @param topicFilter a TopicFilter that specifies which topics to 65 | * subscribe to (encapsulates a whitelist or a blacklist). 66 | * @param numStreams the number of message streams to return. 67 | * @param keyDecoder a decoder that decodes the message key 68 | * @param valueDecoder a decoder that decodes the message itself 69 | * @return a list of KafkaStream. Each stream supports an 70 | * iterator over its MessageAndMetadata elements. 71 | */ 72 | public void 73 | createMessageStreamsByFilter(TopicFilter topicFilter, int numStreams, Decoder keyDecoder, Decoder valueDecoder, 74 | Function1, Unit> cb); 75 | 76 | public void createMessageStreamsByFilter(TopicFilter topicFilter, int numStreams, Function1, Unit> cb); 77 | 78 | public void createMessageStreamsByFilter(TopicFilter topicFilter, Function1, Unit> cb); 79 | 80 | /** 81 | * Commit the offsets of all broker partitions connected by this connector. 82 | */ 83 | public void commitOffsets(); 84 | 85 | /** 86 | * Shut down the connector 87 | */ 88 | public void shutdown(); 89 | } 90 | -------------------------------------------------------------------------------- /app/kafka/javaapi/consumer/EventHandler.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.javaapi.consumer 35 | 36 | import kafka.message.MessageAndMetadata 37 | 38 | 39 | class EventHandler[K, V](val topic: String, 40 | val cb: MessageAndMetadata[K, V] => Unit) 41 | 42 | -------------------------------------------------------------------------------- /app/kafka/javaapi/consumer/ZookeeperAsyncConsumerConnector.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | /* 19 | * Copyright 2014 Claude Mamo 20 | * 21 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 22 | * use this file except in compliance with the License. You may obtain a copy of 23 | * the License at 24 | * 25 | * http://www.apache.org/licenses/LICENSE-2.0 26 | * 27 | * Unless required by applicable law or agreed to in writing, software 28 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 29 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 30 | * License for the specific language governing permissions and limitations under 31 | * the License. 32 | */ 33 | 34 | package kafka.javaapi.consumer 35 | 36 | import kafka.serializer._ 37 | import scala.collection.mutable 38 | import scala.collection.JavaConversions 39 | import kafka.consumer.{ConsumerConfig, TopicFilter} 40 | import kafka.message.MessageAndMetadata 41 | import org.I0Itec.zkclient.ZkClient 42 | 43 | 44 | /** 45 | * This class handles the consumers interaction with zookeeper 46 | * 47 | * Directories: 48 | * 1. Consumer id registry: 49 | * /consumers/[group_id]/ids[consumer_id] -> topic1,...topicN 50 | * A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode 51 | * and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone. 52 | * A consumer subscribes to event changes of the consumer id registry within its group. 53 | * 54 | * The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential 55 | * ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out 56 | * whether the creation of a sequential znode has succeeded or not. More details can be found at 57 | * (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling) 58 | * 59 | * 2. Broker node registry: 60 | * /brokers/[0...N] --> { "host" : "host:port", 61 | * "topics" : {"topic1": ["partition1" ... "partitionN"], ..., 62 | * "topicN": ["partition1" ... "partitionN"] } } 63 | * This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker 64 | * node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode 65 | * is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that 66 | * the broker serves, (3) a list of logical partitions assigned to each topic on the broker. 67 | * A consumer subscribes to event changes of the broker node registry. 68 | * 69 | * 3. Partition owner registry: 70 | * /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id 71 | * This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer 72 | * within a consumer group. The mapping is reestablished after each rebalancing. 73 | * 74 | * 4. Consumer offset tracking: 75 | * /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value 76 | * Each consumer tracks the offset of the latest message consumed for each partition. 77 | * 78 | */ 79 | 80 | private[kafka] class ZookeeperAsyncConsumerConnector(val config: ConsumerConfig, 81 | val enableFetcher: Boolean) extends AsyncConsumerConnector { 82 | 83 | private val underlying = new kafka.consumer.async.ZookeeperConsumerConnector(config, enableFetcher) 84 | 85 | def this(config: ConsumerConfig) = this(config, true) 86 | 87 | // for java client 88 | override def createMessageStreams[K, V](topicCountMap: java.util.Map[EventHandler[K, V], java.lang.Integer], 89 | keyDecoder: Decoder[K], 90 | valueDecoder: Decoder[V]) { 91 | 92 | val scalaTopicCountMap: Map[EventHandler[K, V], Int] = { 93 | import JavaConversions._ 94 | Map.empty[EventHandler[K, V], Int] ++ (topicCountMap.asInstanceOf[java.util.Map[EventHandler[K, V], Int]]: mutable.Map[EventHandler[K, V], Int]) 95 | } 96 | underlying.consume(scalaTopicCountMap, keyDecoder, valueDecoder) 97 | } 98 | 99 | override def createMessageStreams(topicCountMap: java.util.Map[EventHandler[Array[Byte], Array[Byte]], java.lang.Integer]) { 100 | createMessageStreams(topicCountMap, new DefaultDecoder(), new DefaultDecoder()) 101 | } 102 | 103 | override def createMessageStreamsByFilter[K, V](topicFilter: TopicFilter, numStreams: Int, keyDecoder: Decoder[K], valueDecoder: Decoder[V], 104 | cb: MessageAndMetadata[K, V] => Unit) { 105 | import JavaConversions._ 106 | underlying.createMessageStreamsByFilter(topicFilter, numStreams, keyDecoder, valueDecoder, cb) 107 | } 108 | 109 | override def createMessageStreamsByFilter(topicFilter: TopicFilter, numStreams: Int, cb: MessageAndMetadata[Array[Byte], Array[Byte]] => Unit) { 110 | createMessageStreamsByFilter(topicFilter, numStreams, new DefaultDecoder(), new DefaultDecoder(), cb) 111 | } 112 | 113 | override def createMessageStreamsByFilter(topicFilter: TopicFilter, cb: MessageAndMetadata[Array[Byte], Array[Byte]] => Unit) { 114 | createMessageStreamsByFilter(topicFilter, 1, new DefaultDecoder(), new DefaultDecoder(), cb) 115 | } 116 | 117 | override def commitOffsets() { 118 | underlying.commitOffsets 119 | } 120 | 121 | override def shutdown() { 122 | underlying.shutdown 123 | } 124 | 125 | } 126 | -------------------------------------------------------------------------------- /app/models/Database.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.PrimitiveTypeMode._ 20 | import org.squeryl.Schema 21 | 22 | object Database extends Schema { 23 | val zookeepersTable = table[Zookeeper]("zookeepers") 24 | val groupsTable = table[Group]("groups") 25 | val statusTable = table[Status]("status") 26 | val offsetPointsTable = table[OffsetPoint]("offsetPoints") 27 | val offsetHistoryTable = table[OffsetHistory]("offsetHistory") 28 | val settingsTable = table[Setting]("settings") 29 | 30 | val groupToZookeepers = oneToManyRelation(groupsTable, zookeepersTable).via((group, zk) => group.id === zk.groupId) 31 | val statusToZookeepers = oneToManyRelation(statusTable, zookeepersTable).via((status, zk) => status.id === zk.statusId) 32 | val offsetHistoryToOffsetPoints = oneToManyRelation(offsetHistoryTable, offsetPointsTable).via((offsetHistory, offsetPoint) => offsetHistory.id === offsetPoint.offsetHistoryId) 33 | val zookeeperToOffsetHistories = oneToManyRelation(zookeepersTable, offsetHistoryTable).via((zookeeper, offsetHistory) => zookeeper.id === offsetHistory.zookeeperId) 34 | 35 | on(this.zookeepersTable) { 36 | zookeeper => 37 | declare( 38 | zookeeper.id is (primaryKey, autoIncremented), 39 | zookeeper.name is (unique) 40 | ) 41 | } 42 | 43 | on(this.groupsTable) { 44 | group => 45 | declare( 46 | group.id is (primaryKey, autoIncremented), 47 | group.name is (unique) 48 | ) 49 | } 50 | 51 | on(this.statusTable) { 52 | status => 53 | declare( 54 | status.id is (primaryKey, autoIncremented), 55 | status.name is (unique) 56 | ) 57 | } 58 | 59 | on(this.offsetPointsTable) { 60 | offsetPoint => 61 | declare( 62 | offsetPoint.id is (primaryKey, autoIncremented) 63 | ) 64 | } 65 | 66 | on(this.offsetHistoryTable) { 67 | offsetHistory => 68 | declare( 69 | offsetHistory.id is (primaryKey, autoIncremented) 70 | ) 71 | } 72 | 73 | on(this.settingsTable) { 74 | setting => 75 | declare(setting.key is (primaryKey)) 76 | } 77 | 78 | } 79 | -------------------------------------------------------------------------------- /app/models/Group.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.PrimitiveTypeMode._ 20 | import collection.Iterable 21 | import org.squeryl.{Query, KeyedEntity} 22 | import org.squeryl.dsl.OneToMany 23 | 24 | object Group extends Enumeration { 25 | 26 | val All = Value("ALL") 27 | val Development = Value("DEVELOPMENT") 28 | val Production = Value("PRODUCTION") 29 | val Staging = Value("STAGING") 30 | val Test = Value("TEST") 31 | 32 | import Database.groupsTable 33 | 34 | def findAll: Iterable[Group] = inTransaction { 35 | from(groupsTable)(group => select(group)) 36 | } 37 | 38 | def findByName(name: String) = inTransaction { 39 | from(groupsTable)(group => where(group.name === name) select group).headOption 40 | } 41 | } 42 | 43 | case class Group(val name: String) extends KeyedEntity[Long] { 44 | override val id = 0L 45 | 46 | lazy val zookeepers: Seq[Zookeeper] = inTransaction { 47 | Database.groupToZookeepers.left(this).toList 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /app/models/OffsetHistory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.annotations._ 20 | import org.squeryl.{Session, KeyedEntity} 21 | import org.squeryl.PrimitiveTypeMode._ 22 | import models.Database._ 23 | 24 | object OffsetHistory { 25 | 26 | def truncate() = inTransaction { 27 | offsetHistoryTable.deleteWhere(r => 1 === 1) 28 | } 29 | 30 | def delete(offsetHistory: OffsetHistory) = inTransaction { 31 | offsetHistoryTable.deleteWhere(oH => oH.id === offsetHistory.id) 32 | } 33 | 34 | def findByZookeeperIdAndTopic(zookeeperId: Long, topic: String): Option[OffsetHistory] = inTransaction { 35 | from(offsetHistoryTable)(oH => where(oH.zookeeperId === zookeeperId and oH.topic === topic) select (oH)).headOption 36 | } 37 | 38 | def findByZookeeperId(zookeeperId: Long): Seq[OffsetHistory] = inTransaction { 39 | from(offsetHistoryTable)(oH => where(oH.zookeeperId === zookeeperId) select (oH)).toList 40 | } 41 | 42 | def insert(offsetHistory: OffsetHistory): OffsetHistory = inTransaction { 43 | offsetHistoryTable.insert(offsetHistory) 44 | } 45 | } 46 | 47 | case class OffsetHistory(zookeeperId: Long, topic: String) extends KeyedEntity[Long] { 48 | override val id: Long = 0 49 | 50 | lazy val offsetPoints: Seq[OffsetPoint] = inTransaction { 51 | Database.offsetHistoryToOffsetPoints.left(this).toList 52 | } 53 | } -------------------------------------------------------------------------------- /app/models/OffsetPoint.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.annotations._ 20 | import org.squeryl.{Session, KeyedEntity} 21 | import org.squeryl.PrimitiveTypeMode._ 22 | import models.Database._ 23 | import org.squeryl.dsl.CompositeKey2 24 | import java.sql.Timestamp 25 | import play.api.libs.json.{Json, Writes} 26 | 27 | object OffsetPoint { 28 | 29 | implicit object OffsetPointWrites extends Writes[OffsetPoint] { 30 | def writes(offsetPoint: OffsetPoint) = { 31 | 32 | Json.obj( 33 | "consumerGroup" -> offsetPoint.consumerGroup, 34 | "timestamp" -> offsetPoint.timestamp, 35 | "partition" -> offsetPoint.partition, 36 | "offset" -> offsetPoint.offset, 37 | "logSize" -> offsetPoint.logSize 38 | ) 39 | } 40 | } 41 | 42 | def truncate() = inTransaction { 43 | Session.currentSession.connection.createStatement().executeUpdate("TRUNCATE TABLE offsetPoints;") 44 | } 45 | 46 | def findByOffsetHistoryIdAndConsumerGroup(offsetHistoryId: Long, consumerGroup: String): Seq[OffsetPoint] = inTransaction { 47 | from(offsetPointsTable)(oP => where(oP.offsetHistoryId === offsetHistoryId and oP.consumerGroup === consumerGroup) select (oP)).toList 48 | } 49 | 50 | def deleteByOffsetHistoryId(offsetHistoryId: Long) = inTransaction { 51 | offsetPointsTable.deleteWhere(oP => oP.offsetHistoryId === offsetHistoryId) 52 | } 53 | 54 | def delete(offsetPoint: OffsetPoint) = inTransaction { 55 | offsetPointsTable.update(offsetPoint) 56 | } 57 | 58 | def update(offsetPoint: OffsetPoint) = inTransaction { 59 | offsetPointsTable.update(offsetPoint) 60 | } 61 | 62 | def insert(offsetPoint: OffsetPoint): OffsetPoint = inTransaction { 63 | offsetPointsTable.insert(offsetPoint) 64 | } 65 | } 66 | 67 | case class OffsetPoint(consumerGroup: String, timestamp: Timestamp, offsetHistoryId: Long, partition: Int, offset: Long, logSize: Long) extends KeyedEntity[Long] { 68 | override val id: Long = 0 69 | } -------------------------------------------------------------------------------- /app/models/Setting.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.{Session, KeyedEntity} 20 | import org.squeryl.PrimitiveTypeMode._ 21 | import models.Database._ 22 | import org.squeryl.annotations.Column 23 | import play.api.libs.json.{Json, Writes} 24 | 25 | object Setting extends Enumeration { 26 | import Database.settingsTable 27 | 28 | val PurgeSchedule = Value("PURGE_SCHEDULE") 29 | val OffsetFetchInterval = Value("OFFSET_FETCH_INTERVAL") 30 | 31 | implicit object SettingWrites extends Writes[Setting] { 32 | def writes(setting: Setting) = { 33 | Json.obj( 34 | "key" -> setting.key, 35 | "value" -> setting.value 36 | ) 37 | } 38 | } 39 | 40 | def findAll: Seq[Setting] = inTransaction { 41 | from(settingsTable) { 42 | s => select(s) 43 | }.toList 44 | } 45 | 46 | def findByKey(key: String): Option[Setting] = inTransaction { 47 | from(settingsTable)(s => where(s.key === key) select (s)).headOption 48 | } 49 | 50 | def update(setting: Setting) = inTransaction { 51 | settingsTable.update(setting) 52 | } 53 | } 54 | 55 | case class Setting(@Column("key_") id: String, value: String) extends KeyedEntity[String] { 56 | def key = id 57 | } 58 | -------------------------------------------------------------------------------- /app/models/Status.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.{KeyedEntity, Query} 20 | import org.squeryl.PrimitiveTypeMode._ 21 | import scala.collection.Iterable 22 | 23 | object Status extends Enumeration { 24 | type Status = Value 25 | 26 | val Connecting = Value("CONNECTING") 27 | val Connected = Value("CONNECTED") 28 | val Disconnected = Value("DISCONNECTED") 29 | val Deleted = Value("Deleted") 30 | 31 | } 32 | 33 | case class Status(name: String) extends KeyedEntity[Long] { 34 | 35 | override val id = 0L 36 | 37 | lazy val zookeepers: Seq[Zookeeper] = inTransaction { 38 | Database.statusToZookeepers.left(this).toSeq 39 | } 40 | } -------------------------------------------------------------------------------- /app/models/Zookeeper.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | package models 18 | 19 | import org.squeryl.{Query, KeyedEntity} 20 | import org.squeryl.PrimitiveTypeMode._ 21 | import org.squeryl.dsl.{CompositeKey, CompositeKey2, CompositeKey3} 22 | import play.api.libs.json._ 23 | import models.Database._ 24 | import scala.collection.Iterable 25 | import org.squeryl.annotations.Column 26 | 27 | object Zookeeper { 28 | 29 | implicit object ZookeeperWrites extends Writes[Zookeeper] { 30 | def writes(zookeeper: Zookeeper) = { 31 | 32 | Json.obj( 33 | "name" -> zookeeper.name, 34 | "host" -> zookeeper.host, 35 | "port" -> zookeeper.port, 36 | "group" -> Group.apply(zookeeper.groupId.toInt).toString, 37 | "status" -> Status.apply(zookeeper.statusId.toInt).toString, 38 | "chroot" -> zookeeper.chroot 39 | ) 40 | } 41 | } 42 | 43 | def findAll: Seq[Zookeeper] = inTransaction { 44 | from(zookeepersTable) { 45 | zk => select(zk) 46 | }.toList 47 | } 48 | 49 | def findByStatusId(statusId: Long): Seq[Zookeeper] = inTransaction { 50 | from(zookeepersTable)(zk => where(zk.statusId === statusId) select (zk)).toList 51 | } 52 | 53 | def findById(id: Long): Option[Zookeeper] = inTransaction { 54 | zookeepersTable.lookup(id) 55 | } 56 | 57 | def findByName(name: String): Option[Zookeeper] = inTransaction { 58 | from(zookeepersTable)(zk => where(zk.name === name) select (zk)).headOption 59 | } 60 | 61 | def upsert(zookeeper: Zookeeper) = inTransaction { 62 | val zkCount = from(zookeepersTable)(z => where(zookeeper.id === z.id) select (z)).toSeq.size 63 | zkCount match { 64 | case 1 => this.update(zookeeper) 65 | case _ if zkCount < 1 => this.insert(zookeeper) 66 | case _ => 67 | } 68 | } 69 | 70 | def insert(zookeeper: Zookeeper) = inTransaction { 71 | zookeepersTable.insert(zookeeper) 72 | } 73 | 74 | def update(zookeeper: Zookeeper) = inTransaction { 75 | zookeepersTable.update(zookeeper) 76 | } 77 | 78 | def delete(zookeeper: Zookeeper) = inTransaction { 79 | for (offsetHistory <- OffsetHistory.findByZookeeperId(zookeeper.id)) { 80 | OffsetPoint.deleteByOffsetHistoryId(offsetHistory.id) 81 | OffsetHistory.delete(offsetHistory) 82 | } 83 | 84 | zookeepersTable.delete(zookeeper.id) 85 | } 86 | 87 | def update(zookeepers: Iterable[Zookeeper]) { 88 | inTransaction { 89 | zookeepersTable.update(zookeepers) 90 | } 91 | } 92 | } 93 | 94 | case class Zookeeper(name: String, host: String, port: Int, groupId: Long, statusId: Long, chroot: String, id: Long = 0) 95 | extends KeyedEntity[Long] { 96 | 97 | override def toString = "%s:%s/%s".format(host, port, chroot) 98 | 99 | lazy val offsetHistories: Seq[OffsetHistory] = inTransaction { 100 | Database.zookeeperToOffsetHistories.left(this).toList 101 | } 102 | } -------------------------------------------------------------------------------- /app/views/index.scala.html: -------------------------------------------------------------------------------- 1 | @()(implicit request: RequestHeader) 2 | @import helper.twitterBootstrap._ 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | Kafka Web Console 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 42 | 43 | 44 | 54 |
55 |
56 | 88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | 96 | 97 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | name := "kafka-web-console" 2 | 3 | version := "2.1.0-SNAPSHOT" 4 | 5 | libraryDependencies ++= Seq( 6 | jdbc, 7 | cache, 8 | "org.squeryl" % "squeryl_2.10" % "0.9.5-6", 9 | "com.twitter" % "util-zk_2.10" % "6.11.0", 10 | "com.twitter" % "finagle-core_2.10" % "6.15.0", 11 | "org.quartz-scheduler" % "quartz" % "2.2.1", 12 | "org.apache.kafka" % "kafka_2.10" % "0.8.1.1" 13 | exclude("javax.jms", "jms") 14 | exclude("com.sun.jdmk", "jmxtools") 15 | exclude("com.sun.jmx", "jmxri") 16 | ) 17 | 18 | play.Project.playScalaSettings 19 | -------------------------------------------------------------------------------- /conf/application.conf: -------------------------------------------------------------------------------- 1 | # This is the main configuration file for the application. 2 | # ~~~~~ 3 | 4 | # Secret key 5 | # ~~~~~ 6 | # The secret key is used to secure cryptographics functions. 7 | # If you deploy your application to several instances be sure to use the same key! 8 | application.secret="5GE@>>C1;h7lCHG3E0ZmfuppS50i:Jp_P@c>:8fxoJ;n/81fGo2j9jf3yI2RGHQK" 9 | 10 | # The application languages 11 | # ~~~~~ 12 | application.langs="en" 13 | 14 | # Global object class 15 | # ~~~~~ 16 | # Define the Global object class for this application. 17 | # Default to Global in the root package. 18 | # application.global=Global 19 | 20 | # Router 21 | # ~~~~~ 22 | # Define the Router object to use for this application. 23 | # This router will be looked up first when the application is starting up, 24 | # so make sure this is the entry point. 25 | # Furthermore, it's assumed your route file is named properly. 26 | # So for an application router like `my.application.Router`, 27 | # you may need to define a router file `conf/my.application.routes`. 28 | # Default to Routes in the root package (and conf/routes) 29 | # application.router=my.application.Routes 30 | 31 | # Database configuration 32 | # ~~~~~ 33 | # You can declare as many datasources as you want. 34 | # By convention, the default datasource is named `default` 35 | # 36 | db.default.driver=org.h2.Driver 37 | db.default.url="jdbc:h2:file:play" 38 | # db.default.user=sa 39 | # db.default.password="" 40 | 41 | # Evolutions 42 | # ~~~~~ 43 | # You can disable evolutions if needed 44 | # evolutionplugin=disabled 45 | 46 | # Logger 47 | # ~~~~~ 48 | # You can also configure logback (http://logback.qos.ch/), 49 | # by providing an application-logger.xml file in the conf directory. 50 | 51 | # Root logger: 52 | logger.root=ERROR 53 | 54 | # Logger used by the framework: 55 | logger.play=INFO 56 | 57 | # Logger provided to your application: 58 | logger.application=INFO 59 | 60 | -------------------------------------------------------------------------------- /conf/evolutions/default/1.sql: -------------------------------------------------------------------------------- 1 | # --- !Ups 2 | 3 | CREATE TABLE zookeepers ( 4 | name VARCHAR, 5 | host VARCHAR, 6 | port INT, 7 | statusId LONG, 8 | groupId LONG, 9 | PRIMARY KEY (name) 10 | ); 11 | 12 | CREATE TABLE groups ( 13 | id LONG, 14 | name VARCHAR, 15 | PRIMARY KEY (id) 16 | ); 17 | 18 | CREATE TABLE status ( 19 | id LONG, 20 | name VARCHAR, 21 | PRIMARY KEY (id) 22 | ); 23 | 24 | INSERT INTO groups (id, name) VALUES (0, 'ALL'); 25 | INSERT INTO groups (id, name) VALUES (1, 'DEVELOPMENT'); 26 | INSERT INTO groups (id, name) VALUES (2, 'PRODUCTION'); 27 | INSERT INTO groups (id, name) VALUES (3, 'STAGING'); 28 | INSERT INTO groups (id, name) VALUES (4, 'TEST'); 29 | 30 | INSERT INTO status (id, name) VALUES (0, 'CONNECTING'); 31 | INSERT INTO status (id, name) VALUES (1, 'CONNECTED'); 32 | INSERT INTO status (id, name) VALUES (2, 'DISCONNECTED'); 33 | INSERT INTO status (id, name) VALUES (3, 'DELETED'); 34 | 35 | # --- !Downs 36 | 37 | DROP TABLE IF EXISTS zookeepers; 38 | DROP TABLE IF EXISTS groups; 39 | DROP TABLE IF EXISTS status; -------------------------------------------------------------------------------- /conf/evolutions/default/2.sql: -------------------------------------------------------------------------------- 1 | # --- !Ups 2 | 3 | ALTER TABLE zookeepers ADD COLUMN chroot VARCHAR; 4 | 5 | # --- !Downs 6 | 7 | ALTER TABLE zookeepers DROP COLUMN chroot; -------------------------------------------------------------------------------- /conf/evolutions/default/3.sql: -------------------------------------------------------------------------------- 1 | # --- !Ups 2 | 3 | ALTER TABLE zookeepers DROP PRIMARY KEY; 4 | ALTER TABLE zookeepers ADD COLUMN id LONG NOT NULL AUTO_INCREMENT; 5 | ALTER TABLE zookeepers ADD PRIMARY KEY (id); 6 | ALTER TABLE zookeepers ALTER COLUMN name SET NOT NULL; 7 | ALTER TABLE zookeepers ALTER COLUMN host SET NOT NULL; 8 | ALTER TABLE zookeepers ALTER COLUMN port SET NOT NULL; 9 | ALTER TABLE zookeepers ALTER COLUMN statusId SET NOT NULL; 10 | ALTER TABLE zookeepers ALTER COLUMN groupId SET NOT NULL; 11 | ALTER TABLE zookeepers ADD UNIQUE (name); 12 | 13 | CREATE TABLE offsetHistory ( 14 | id LONG AUTO_INCREMENT PRIMARY KEY, 15 | zookeeperId LONG, 16 | topic VARCHAR(255), 17 | FOREIGN KEY (zookeeperId) REFERENCES zookeepers(id), 18 | UNIQUE (zookeeperId, topic) 19 | ); 20 | 21 | CREATE TABLE offsetPoints ( 22 | id LONG AUTO_INCREMENT PRIMARY KEY, 23 | consumerGroup VARCHAR(255), 24 | timestamp TIMESTAMP, 25 | offsetHistoryId LONG, 26 | partition INT, 27 | offset LONG, 28 | logSize LONG, 29 | FOREIGN KEY (offsetHistoryId) REFERENCES offsetHistory(id) 30 | ); 31 | 32 | CREATE TABLE settings ( 33 | key_ VARCHAR(255) PRIMARY KEY, 34 | value VARCHAR(255) 35 | ); 36 | 37 | INSERT INTO settings (key_, value) VALUES ('PURGE_SCHEDULE', '0 0 0 ? * SUN *'); 38 | INSERT INTO settings (key_, value) VALUES ('OFFSET_FETCH_INTERVAL', '30'); 39 | 40 | # --- !Downs 41 | 42 | DROP TABLE IF EXISTS offsetPoints; 43 | DROP TABLE IF EXISTS offsetHistory; 44 | DROP TABLE IF EXISTS settings; 45 | 46 | ALTER TABLE zookeepers DROP PRIMARY KEY; 47 | ALTER TABLE zookeepers DROP COLUMN id; 48 | ALTER TABLE zookeepers ADD PRIMARY KEY (name); 49 | ALTER TABLE zookeepers ALTER COLUMN host SET NULL; 50 | ALTER TABLE zookeepers ALTER COLUMN port SET NULL; 51 | ALTER TABLE zookeepers ALTER COLUMN statusId SET NULL; 52 | ALTER TABLE zookeepers ALTER COLUMN groupId SET NULL; -------------------------------------------------------------------------------- /conf/routes: -------------------------------------------------------------------------------- 1 | # Routes 2 | # This file defines all application routes (Higher priority routes first) 3 | # ~~~~ 4 | 5 | # Home page 6 | GET / controllers.Application.index() 7 | 8 | GET /zookeepers controllers.Assets.at(path="/public", file="html/partials/zookeeper/index.html") 9 | GET /zookeepers.json/feed controllers.Zookeeper.feed() 10 | GET /zookeepers.json/:group controllers.Zookeeper.index(group) 11 | PUT /zookeepers.json controllers.Zookeeper.create() 12 | DELETE /zookeepers.json/:name controllers.Zookeeper.delete(name) 13 | 14 | GET /topics controllers.Assets.at(path="/public", file="html/partials/topic/index.html") 15 | GET /topics.json controllers.Topic.index() 16 | GET /topics.json/:topic/:zookeeper controllers.Topic.show(topic, zookeeper) 17 | GET /topics.json/:topic/:zookeeper/feed controllers.Topic.feed(topic, zookeeper) 18 | GET /topics/:name/:zookeeper controllers.IgnoreParamAssets.at(path="/public", file="html/partials/topic/show.html", name, zookeeper) 19 | 20 | GET /consumergroups.json/:consumerGroup/:topic/:zookeeper controllers.ConsumerGroup.show(consumerGroup, topic, zookeeper) 21 | 22 | GET /groups controllers.Group.index() 23 | 24 | GET /brokers controllers.Assets.at(path="/public", file="html/partials/broker/index.html") 25 | GET /brokers.json controllers.Broker.index() 26 | 27 | GET /offsethistory/:consumerGroup/:topic/:zookeeper controllers.IgnoreParamAssets.at2(path="/public", file="html/partials/offset-history/show.html", consumerGroup, topic, zookeeper) 28 | GET /offsethistory.json/:consumerGroup/:topic/:zookeeper controllers.OffsetHistory.show(consumerGroup, topic, zookeeper) 29 | 30 | GET /settings controllers.Assets.at(path="/public", file="html/partials/settings/index.html") 31 | GET /settings.json controllers.Settings.index() 32 | POST /settings.json controllers.Settings.update() 33 | 34 | # Map static resources from the /public folder to the /assets URL path 35 | GET /assets/*file controllers.Assets.at(path="/public", file) 36 | 37 | GET /api/console controllers.ApiConsole.at(path="/public/api-console", file="index.html") 38 | GET /api/*file controllers.ApiConsole.at(path="/public/api-console", file) 39 | -------------------------------------------------------------------------------- /img/brokers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/brokers.png -------------------------------------------------------------------------------- /img/offset-history.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/offset-history.png -------------------------------------------------------------------------------- /img/register-zookeeper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/register-zookeeper.png -------------------------------------------------------------------------------- /img/topic-feed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/topic-feed.png -------------------------------------------------------------------------------- /img/topic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/topic.png -------------------------------------------------------------------------------- /img/topics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/topics.png -------------------------------------------------------------------------------- /img/zookeepers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/img/zookeepers.png -------------------------------------------------------------------------------- /lib/finagle-kafka_2.10-0.1.2-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/lib/finagle-kafka_2.10-0.1.2-SNAPSHOT.jar -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.0 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | // Comment to get more information during initialization 2 | logLevel := Level.Warn 3 | 4 | // The Typesafe repository 5 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" 6 | 7 | // Use the Play sbt plugin for Play projects 8 | addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.2.1") -------------------------------------------------------------------------------- /public/api-console/authentication/oauth1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /public/api-console/authentication/oauth2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /public/api-console/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /public/api-console/fonts/LICENSE-OpenSans.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans-Bold.woff -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans-BoldItalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans-BoldItalic.woff -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans-Italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans-Italic.woff -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans-Semibold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans-Semibold.woff -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans-SemiboldItalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans-SemiboldItalic.woff -------------------------------------------------------------------------------- /public/api-console/fonts/OpenSans.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/OpenSans.woff -------------------------------------------------------------------------------- /public/api-console/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /public/api-console/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /public/api-console/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/api-console/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /public/api-console/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | api:Console 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /public/api-console/kafka-web-console.raml: -------------------------------------------------------------------------------- 1 | #%RAML 0.8 2 | 3 | title: Kafka Web Console API 4 | version: 1.0 5 | baseUri: http://{hostname}:{port} 6 | mediaType: application/json 7 | 8 | /zookeepers.json/{group}: 9 | get: 10 | /zookeepers.json: 11 | put: 12 | body: 13 | schema: !include zookeeper.json 14 | /zookeepers.json/{name}: 15 | delete: 16 | /topics.json: 17 | get: 18 | /topics.json/{topic}/{zookeeper}: 19 | get: 20 | /brokers.json: 21 | get: 22 | /consumergroups.json/{consumerGroup}/{topic}/{zookeeper}: 23 | get: 24 | /offsethistory.json/{consumerGroup}/{topic}/{zookeeper}: 25 | get: 26 | /settings.json: 27 | get: 28 | post: 29 | body: 30 | schema: !include settings.json -------------------------------------------------------------------------------- /public/api-console/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Settings", 3 | "type": "array", 4 | "properties": { 5 | "key": { 6 | "type": "string" 7 | }, 8 | "value": { 9 | "type": "string" 10 | } 11 | }, 12 | "required": ["key", "value"] 13 | } 14 | -------------------------------------------------------------------------------- /public/api-console/zookeeper.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Zookeeper", 3 | "type": "object", 4 | "properties": { 5 | "name": { 6 | "type": "string" 7 | }, 8 | "host": { 9 | "type": "string" 10 | }, 11 | "port": { 12 | "type": "integer", 13 | "minimum": 0 14 | }, 15 | "group": { 16 | "type": "string" 17 | }, 18 | "chroot": { 19 | "type": "string" 20 | } 21 | }, 22 | "required": ["name", "host", "port", "group"] 23 | } 24 | -------------------------------------------------------------------------------- /public/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /public/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /public/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /public/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /public/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /public/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /public/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /public/html/partials/broker/index.html: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |
ZookeeperHostPort
{{broker.zookeeper}}{{broker.host}}{{broker.port}}
16 |
-------------------------------------------------------------------------------- /public/html/partials/offset-history/show.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |
4 |
5 | 6 |
-------------------------------------------------------------------------------- /public/html/partials/settings/index.html: -------------------------------------------------------------------------------- 1 |
2 |
4 | 5 | 10 | 11 |

A Cron 13 | expression is required.

14 |
15 |
17 | 18 | 21 | 22 |

Offset fetch 24 | interval is invalid.

25 |
26 |

27 | 28 |

29 |
-------------------------------------------------------------------------------- /public/html/partials/topic/index.html: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 |
ZookeeperTopicPartitionLog SizeLeader
14 | 15 | 16 | {{topic.zookeeper}}{{topic.name}}-{{topic.logSize}}-
{{$index}}{{partition.logSize}}{{partition.leader}}
33 |
-------------------------------------------------------------------------------- /public/html/partials/topic/show.html: -------------------------------------------------------------------------------- 1 | 5 | 6 |
7 |
8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 26 | 29 | 30 | 33 | 36 | 37 | 38 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 |
Consumer GroupPartitionOffsetLagOwner
21 | 23 | 25 | 27 | {{consumerGroup.consumerGroup}} 28 | - 31 | {{consumerGroup.offset}} 32 | 34 | {{consumerGroup.lag}} 35 | -
{{$index}}{{partition.offset}}{{partition.lag}}{{partition.owner}}
49 |
50 |
51 |
52 |
53 |
54 |
-------------------------------------------------------------------------------- /public/html/partials/zookeeper/index.html: -------------------------------------------------------------------------------- 1 | 21 | 22 |
23 |
24 |
25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 48 | 49 | 50 |
NameHostPortStatusGroupChroot
{{zookeeper.name}}{{zookeeper.host}}{{zookeeper.port}}{{zookeeper.status}}

...

{{zookeeper.group}}{{zookeeper.chroot}} 45 | 47 |
51 |
52 |
53 |
54 |
55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 67 | 68 | 69 | 70 | 71 | 72 | 76 | 77 | 78 |
NameHostPortStatusChroot
{{zookeeper.name}}{{zookeeper.host}}{{zookeeper.port}}{{zookeeper.status}}

...

{{zookeeper.chroot}} 73 | 75 |
79 |
80 |
81 |
82 |
83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 95 | 96 | 97 | 98 | 99 | 100 | 104 | 105 | 106 |
NameHostPortStatusChroot
{{zookeeper.name}}{{zookeeper.host}}{{zookeeper.port}}{{zookeeper.status}}

...

{{zookeeper.chroot}} 101 | 103 |
107 |
108 |
109 |
110 |
111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 123 | 124 | 125 | 126 | 127 | 128 | 132 | 133 | 134 |
NameHostPortStatusChroot
{{zookeeper.name}}{{zookeeper.host}}{{zookeeper.port}}{{zookeeper.status}}

...

{{zookeeper.chroot}} 129 | 131 |
135 |
136 |
137 |
138 |
139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 151 | 152 | 153 | 154 | 155 | 156 | 160 | 161 | 162 |
NameHostPortStatusChroot
{{zookeeper.name}}{{zookeeper.host}}{{zookeeper.port}}{{zookeeper.status}}

...

{{zookeeper.chroot}} 157 | 159 |
163 |
164 |
165 |
166 |
168 |
170 | 171 | 174 | 175 |

A name 176 | for Zookeeper is required.

177 |
178 |
180 | 181 | 184 | 185 |

Hostname is required.

187 | 188 |

189 | Hostname must be in a valid format.

190 |
191 |
193 | 194 | 196 | 197 |

Port 198 | number is invalid.

199 |
200 |
201 | 202 | 205 |
206 |
207 | 208 | 211 |
212 |

213 | 214 |

215 |
216 |
217 |
218 | 219 | 220 | -------------------------------------------------------------------------------- /public/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/images/favicon.ico -------------------------------------------------------------------------------- /public/images/kafka_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/images/kafka_logo.png -------------------------------------------------------------------------------- /public/images/zookeeper_small.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/images/zookeeper_small.gif -------------------------------------------------------------------------------- /public/javascripts/app.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | var app = angular.module('app', ['ngRoute', 'ngAnimate']) 18 | .config(function ($locationProvider, $routeProvider) { 19 | $routeProvider 20 | .when('/settings', { 21 | controller: 'SettingsController', 22 | templateUrl: 'settings' 23 | }) 24 | .when('/zookeepers', { 25 | controller: 'ZookeepersController', 26 | templateUrl: 'zookeepers' 27 | }) 28 | .when('/topics', { 29 | controller: 'TopicsController', 30 | templateUrl: 'topics' 31 | }) 32 | .when('/brokers', { 33 | controller: 'BrokersController', 34 | templateUrl: 'brokers' 35 | }) 36 | .when('/topics/:topic/:zookeeper', { 37 | controller: 'TopicController', 38 | templateUrl: function (params) { 39 | return 'topics/' + params.topic + '/' + params.zookeeper 40 | } 41 | }) 42 | .when('/offsethistory/:consumerGroup/:topic/:zookeeper', { 43 | controller: 'OffsetHistoryController', 44 | templateUrl: function (params) { 45 | return 'offsethistory/' + params.consumerGroup + '/' + params.topic + '/' + params.zookeeper 46 | } 47 | }) 48 | .otherwise({ 49 | redirectTo: 'zookeepers' 50 | }); 51 | 52 | }); 53 | 54 | app.run(function ($rootScope, $location) { 55 | $rootScope.isActive = function (route) { 56 | return route === $location.path(); 57 | }; 58 | }); -------------------------------------------------------------------------------- /public/javascripts/brokers-controller.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.controller("BrokersController", function ($scope, $http) { 18 | $http.get('brokers.json').success(function (data) { 19 | $scope.brokers = data; 20 | }); 21 | }); -------------------------------------------------------------------------------- /public/javascripts/filters.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.filter('orderObjectBy', function() { 18 | return function(items, field, reverse) { 19 | var filtered = []; 20 | angular.forEach(items, function(item) { 21 | filtered.push(item); 22 | }); 23 | filtered.sort(function (a, b) { 24 | return ((a[field]) > (b[field])); 25 | }); 26 | if(reverse) filtered.reverse(); 27 | return filtered; 28 | }; 29 | }); -------------------------------------------------------------------------------- /public/javascripts/offset-history-controller.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.controller("OffsetHistoryController", function ($http, $scope, $location, $routeParams) { 18 | $http.get('offsethistory.json/' + $routeParams.consumerGroup + '/' + $routeParams.topic + '/' + $routeParams.zookeeper).success(function (data) { 19 | 20 | var offsetsGroupedByTimestamp = _.groupBy(data, function (offsetPoint) { 21 | return offsetPoint.consumerGroup.toString() + offsetPoint.timestamp.toString(); 22 | }); 23 | 24 | buildOffsetHistoryGraph(offsetsGroupedByTimestamp) 25 | buildMessageThroughputGraph(offsetsGroupedByTimestamp) 26 | 27 | }); 28 | 29 | function buildMessageThroughputGraph(offsetsGroupedByTimestamp) { 30 | var previousOffsetPoint; 31 | var consumerThroughput = []; 32 | var producerThroughput = []; 33 | 34 | angular.forEach(offsetsGroupedByTimestamp, function (offsetPoint) { 35 | if (previousOffsetPoint !== undefined) { 36 | consumerThroughput.push({y: (offsetPoint[0].offset - previousOffsetPoint[0].offset) / 60, x: offsetPoint[0].timestamp}); 37 | producerThroughput.push({y: (offsetPoint[0].logSize - previousOffsetPoint[0].logSize) / 60, x: offsetPoint[0].timestamp}); 38 | } 39 | previousOffsetPoint = offsetPoint 40 | }); 41 | 42 | var consumerMaxMessages = _.max(consumerThroughput, function (dataPoint) { 43 | return dataPoint.y; 44 | }).y; 45 | 46 | var producerMaxMessages = _.max(producerThroughput, function (dataPoint) { 47 | return dataPoint.y; 48 | }).y; 49 | 50 | nv.addGraph(function () { 51 | var chart = nv.models.lineChart().margin({left: 100, right: 40}).forceY(Math.ceil(Math.max(consumerMaxMessages, producerMaxMessages))) 52 | 53 | chart.xAxis.tickFormat(function (d) { 54 | return d3.time.format('%H:%M:%S')(new Date(d)); 55 | }).axisLabel('Time'); 56 | 57 | chart.yAxis.tickFormat(d3.format('d')).axisLabel('Messages per second'); 58 | 59 | var dataPoints = [ 60 | { 61 | key: 'Consumer ', 62 | values: consumerThroughput, 63 | color: '#ff7f0e' 64 | }, 65 | { 66 | key: 'Producer/s', 67 | values: producerThroughput, 68 | color: '#2ca02c' 69 | } 70 | ]; 71 | 72 | d3.select('#message-throughput-chart svg').datum(dataPoints).transition().duration(500).call(chart); 73 | nv.utils.windowResize(chart.update); 74 | 75 | return chart; 76 | }); 77 | } 78 | 79 | function buildOffsetHistoryGraph(offsetsGroupedByTimestamp) { 80 | 81 | var chartData = _.map(offsetsGroupedByTimestamp, function (offsetPoint) { 82 | return { 83 | logSize: _(offsetPoint).pluck("logSize").reduce(function (sum, num) { 84 | return sum + num; 85 | }), 86 | offset: _(offsetPoint).pluck("offset").reduce(function (sum, num) { 87 | return sum + num; 88 | }), 89 | timestamp: offsetPoint[0].timestamp 90 | } 91 | }); 92 | 93 | var lagDataPoints = _.map(chartData, function (offsetPoint) { 94 | return { 95 | y: offsetPoint.logSize - offsetPoint.offset, 96 | x: offsetPoint.timestamp 97 | } 98 | }); 99 | 100 | var offsetDataPoints = _.map(chartData, function (offsetPoint) { 101 | return { 102 | y: offsetPoint.offset, 103 | x: offsetPoint.timestamp 104 | } 105 | }); 106 | 107 | nv.addGraph(function () { 108 | 109 | var chart = nv.models.lineChart().margin({left: 100, right: 40}).useInteractiveGuideline(true).forceY(_.max(chartData, function (offsetPoint) { 110 | return offsetPoint.logSize; 111 | })); 112 | 113 | chart.xAxis.tickFormat(function (d) { 114 | return d3.time.format('%H:%M:%S')(new Date(d)); 115 | }).axisLabel('Time'); 116 | 117 | chart.yAxis.tickFormat(d3.format('d')).axisLabel('Messages'); 118 | 119 | var dataPoints = [ 120 | { 121 | key: 'Offset', 122 | values: offsetDataPoints, 123 | color: '#ff7f0e' 124 | }, 125 | { 126 | key: 'Lag', 127 | values: lagDataPoints, 128 | color: '#2ca02c' 129 | } 130 | ]; 131 | 132 | d3.select('#offset-history-chart svg').datum(dataPoints).transition().duration(500).call(chart); 133 | nv.utils.windowResize(chart.update); 134 | 135 | return chart; 136 | }); 137 | } 138 | }); -------------------------------------------------------------------------------- /public/javascripts/services.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjmamo/kafka-web-console/592cdb35619521092c642a4475b47fbfcf06f096/public/javascripts/services.js -------------------------------------------------------------------------------- /public/javascripts/settings-controller.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.controller("SettingsController", function ($http, $scope, $location) { 18 | $('input').tooltip() 19 | 20 | $http.get('settings.json').success(function (data) { 21 | $scope.settings = [] 22 | 23 | angular.forEach(data, function (setting) { 24 | if (setting.key === 'PURGE_SCHEDULE') { 25 | $scope.settings.purgeSchedule = setting.value; 26 | } 27 | else if (setting.key === 'OFFSET_FETCH_INTERVAL') { 28 | $scope.settings.offsetFetchInterval = parseInt(setting.value); 29 | } 30 | }); 31 | }); 32 | 33 | $scope.updateSettings = function (settings) { 34 | $http.post('settings.json', [ 35 | { key: 'PURGE_SCHEDULE', value: settings.purgeSchedule}, 36 | { key: 'OFFSET_FETCH_INTERVAL', value: settings.offsetFetchInterval.toString()} 37 | ]).success(function () { 38 | $location.path("/"); 39 | }); 40 | }; 41 | }); -------------------------------------------------------------------------------- /public/javascripts/topic-controller.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.controller("TopicController", function ($http, $scope, $location, $routeParams, $filter) { 18 | $http.get('topics.json/' + $routeParams.topic + '/' + $routeParams.zookeeper).success(function (data) { 19 | $scope.topic = data; 20 | angular.forEach($scope.topic, function (consumerGroup) { 21 | angular.forEach(consumerGroup.partitions, function (partition) { 22 | partition.id = parseInt(partition.id) 23 | }); 24 | 25 | consumerGroup.partitions = $filter('orderObjectBy')(consumerGroup.partitions, 'id') 26 | }); 27 | }); 28 | 29 | var ws = new WebSocket('ws://' + $location.host() + ':' + $location.port() + $('base').attr('href') + 'topics.json/' + $routeParams.topic + '/' + $routeParams.zookeeper + '/feed'); 30 | ws.onmessage = function (message) { 31 | var well = angular.element('
'); 32 | well.text(message.data); 33 | $("#messages").append(well); 34 | $scope.$apply(); 35 | }; 36 | 37 | $scope.$on('$destroy', function () { 38 | ws.close(); 39 | }); 40 | 41 | $scope.getConsumerGroup = function (consumerGroup) { 42 | $http.get('consumergroups.json/' + consumerGroup + '/' + $routeParams.topic + '/' + $routeParams.zookeeper).success(function (data) { 43 | angular.forEach($scope.topic, function (consumerGroup_) { 44 | if (consumerGroup === consumerGroup_.consumerGroup) { 45 | consumerGroup_.consumers = data; 46 | } 47 | }); 48 | }); 49 | }; 50 | 51 | $scope.getOffsetHistory = function (consumerGroup) { 52 | $location.path('offsethistory/' + consumerGroup.consumerGroup + '/' + $routeParams.topic + '/' + $routeParams.zookeeper); 53 | }; 54 | 55 | }); -------------------------------------------------------------------------------- /public/javascripts/topics-controller.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014 Claude Mamo 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | * use this file except in compliance with the License. You may obtain a copy of 6 | * the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | * License for the specific language governing permissions and limitations under 14 | * the License. 15 | */ 16 | 17 | app.controller("TopicsController", function ($scope, $location, $http, $filter) { 18 | $http.get('topics.json'). 19 | success(function (data) { 20 | $scope.topics = data; 21 | 22 | angular.forEach($scope.topics, function (topic) { 23 | angular.forEach(topic.partitions, function (partition) { 24 | partition.id = parseInt(partition.id) 25 | }); 26 | 27 | topic.partitions = $filter('orderObjectBy')(topic.partitions, 'id') 28 | }); 29 | }); 30 | 31 | $scope.getTopic = function (topic) { 32 | $location.path('topics/' + topic.name + '/' + topic.zookeeper); 33 | }; 34 | }); -------------------------------------------------------------------------------- /public/javascripts/vendor/angular-animate.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | AngularJS v1.2.11 3 | (c) 2010-2014 Google, Inc. http://angularjs.org 4 | License: MIT 5 | */ 6 | (function(v,k,t){'use strict';k.module("ngAnimate",["ng"]).factory("$$animateReflow",["$window","$timeout",function(k,B){var d=k.requestAnimationFrame||k.webkitRequestAnimationFrame||function(d){return B(d,10,!1)},q=k.cancelAnimationFrame||k.webkitCancelAnimationFrame||function(d){return B.cancel(d)};return function(p){var k=d(p);return function(){q(k)}}}]).config(["$provide","$animateProvider",function(R,B){function d(d){for(var k=0;k=u&&a>=p&&h()}var f=b.data(n),g=d(b);if(-1!=g.className.indexOf(a)&&f){var l=f.timings,m=f.stagger,p=f.maxDuration,r=f.activeClassName,u=Math.max(l.transitionDelay, 19 | l.animationDelay)*x,w=Date.now(),v=T+" "+S,t=f.itemIndex,q="",s=[];if(0article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}"; 6 | c=d.insertBefore(c.lastChild,d.firstChild);b.hasCSS=!!c}g||t(a,b);return a}var k=l.html5||{},s=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,r=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,j,o="_html5shiv",h=0,n={},g;(function(){try{var a=f.createElement("a");a.innerHTML="";j="hidden"in a;var b;if(!(b=1==a.childNodes.length)){f.createElement("a");var c=f.createDocumentFragment();b="undefined"==typeof c.cloneNode|| 7 | "undefined"==typeof c.createDocumentFragment||"undefined"==typeof c.createElement}g=b}catch(d){g=j=!0}})();var e={elements:k.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output progress section summary template time video",version:"3.7.0",shivCSS:!1!==k.shivCSS,supportsUnknownElements:g,shivMethods:!1!==k.shivMethods,type:"default",shivDocument:q,createElement:p,createDocumentFragment:function(a,b){a||(a=f); 8 | if(g)return a.createDocumentFragment();for(var b=b||i(a),c=b.frag.cloneNode(),d=0,e=m(),h=e.length;d #mq-test-1 { width: 42px; }',c.insertBefore(e,d),b=42===f.offsetWidth,c.removeChild(e),{matches:b,media:a}}}(a.document)}(this),function(a){"use strict";function b(){u(!0)}var c={};a.respond=c,c.update=function(){};var d=[],e=function(){var b=!1;try{b=new a.XMLHttpRequest}catch(c){b=new a.ActiveXObject("Microsoft.XMLHTTP")}return function(){return b}}(),f=function(a,b){var c=e();c&&(c.open("GET",a,!0),c.onreadystatechange=function(){4!==c.readyState||200!==c.status&&304!==c.status||b(c.responseText)},4!==c.readyState&&c.send(null))};if(c.ajax=f,c.queue=d,c.regex={media:/@media[^\{]+\{([^\{\}]*\{[^\}\{]*\})+/gi,keyframes:/@(?:\-(?:o|moz|webkit)\-)?keyframes[^\{]+\{(?:[^\{\}]*\{[^\}\{]*\})+[^\}]*\}/gi,urls:/(url\()['"]?([^\/\)'"][^:\)'"]+)['"]?(\))/g,findStyles:/@media *([^\{]+)\{([\S\s]+?)$/,only:/(only\s+)?([a-zA-Z]+)\s?/,minw:/\([\s]*min\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/,maxw:/\([\s]*max\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/},c.mediaQueriesSupported=a.matchMedia&&null!==a.matchMedia("only all")&&a.matchMedia("only all").matches,!c.mediaQueriesSupported){var g,h,i,j=a.document,k=j.documentElement,l=[],m=[],n=[],o={},p=30,q=j.getElementsByTagName("head")[0]||k,r=j.getElementsByTagName("base")[0],s=q.getElementsByTagName("link"),t=function(){var a,b=j.createElement("div"),c=j.body,d=k.style.fontSize,e=c&&c.style.fontSize,f=!1;return b.style.cssText="position:absolute;font-size:1em;width:1em",c||(c=f=j.createElement("body"),c.style.background="none"),k.style.fontSize="100%",c.style.fontSize="100%",c.appendChild(b),f&&k.insertBefore(c,k.firstChild),a=b.offsetWidth,f?k.removeChild(c):c.removeChild(b),k.style.fontSize=d,e&&(c.style.fontSize=e),a=i=parseFloat(a)},u=function(b){var c="clientWidth",d=k[c],e="CSS1Compat"===j.compatMode&&d||j.body[c]||d,f={},o=s[s.length-1],r=(new Date).getTime();if(b&&g&&p>r-g)return a.clearTimeout(h),h=a.setTimeout(u,p),void 0;g=r;for(var v in l)if(l.hasOwnProperty(v)){var w=l[v],x=w.minw,y=w.maxw,z=null===x,A=null===y,B="em";x&&(x=parseFloat(x)*(x.indexOf(B)>-1?i||t():1)),y&&(y=parseFloat(y)*(y.indexOf(B)>-1?i||t():1)),w.hasquery&&(z&&A||!(z||e>=x)||!(A||y>=e))||(f[w.media]||(f[w.media]=[]),f[w.media].push(m[w.rules]))}for(var C in n)n.hasOwnProperty(C)&&n[C]&&n[C].parentNode===q&&q.removeChild(n[C]);n.length=0;for(var D in f)if(f.hasOwnProperty(D)){var E=j.createElement("style"),F=f[D].join("\n");E.type="text/css",E.media=D,q.insertBefore(E,o.nextSibling),E.styleSheet?E.styleSheet.cssText=F:E.appendChild(j.createTextNode(F)),n.push(E)}},v=function(a,b,d){var e=a.replace(c.regex.keyframes,"").match(c.regex.media),f=e&&e.length||0;b=b.substring(0,b.lastIndexOf("/"));var g=function(a){return a.replace(c.regex.urls,"$1"+b+"$2$3")},h=!f&&d;b.length&&(b+="/"),h&&(f=1);for(var i=0;f>i;i++){var j,k,n,o;h?(j=d,m.push(g(a))):(j=e[i].match(c.regex.findStyles)&&RegExp.$1,m.push(RegExp.$2&&g(RegExp.$2))),n=j.split(","),o=n.length;for(var p=0;o>p;p++)k=n[p],l.push({media:k.split("(")[0].match(c.regex.only)&&RegExp.$2||"all",rules:m.length-1,hasquery:k.indexOf("(")>-1,minw:k.match(c.regex.minw)&&parseFloat(RegExp.$1)+(RegExp.$2||""),maxw:k.match(c.regex.maxw)&&parseFloat(RegExp.$1)+(RegExp.$2||"")})}u()},w=function(){if(d.length){var b=d.shift();f(b.href,function(c){v(c,b.href,b.media),o[b.href]=!0,a.setTimeout(function(){w()},0)})}},x=function(){for(var b=0;b