├── VERSION ├── .travis.yml ├── media ├── treeview.png └── graphview.png ├── src ├── main │ ├── resources │ │ ├── META-INF │ │ │ └── MANIFEST.MF │ │ ├── log4j.properties │ │ └── templates │ │ │ ├── graph.ftl │ │ │ └── tree.ftl │ └── java │ │ └── com │ │ └── applause │ │ └── ConsumerGraph │ │ ├── ConsumerGraph.java │ │ ├── TopicConsumerMapper.java │ │ └── ConsumerGraphServer.java └── test │ └── java │ └── com │ └── applause │ └── ConsumerGraph │ └── ConsumerGraphTest.java ├── scripts └── run.tmpl ├── config.properties ├── conf ├── log4j.tmpl └── config.tmpl ├── Dockerfile.tmpl ├── LICENSE ├── README.md ├── CONTRIBUTING.md └── pom.xml /VERSION: -------------------------------------------------------------------------------- 1 | 0.2 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | -------------------------------------------------------------------------------- /media/treeview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ApplauseOSS/ConsumerGraph/HEAD/media/treeview.png -------------------------------------------------------------------------------- /media/graphview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ApplauseOSS/ConsumerGraph/HEAD/media/graphview.png -------------------------------------------------------------------------------- /src/main/resources/META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Main-Class: com.applause.ConsumerGraph.ConsumerGraph 3 | -------------------------------------------------------------------------------- /scripts/run.tmpl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Terminates after first error 4 | set -e 5 | 6 | # NOTE: this tests that _all_ brokers are up. 7 | IFS=',' read -a brokers <<< "${KAFKA_BROKERS}" 8 | for broker in "${brokers[@]}" 9 | do 10 | dockerize -wait "tcp://${broker}" -timeout 300s 11 | done 12 | 13 | dockerize -template /app/conf/config.tmpl:/app/config.properties 14 | dockerize -template /app/conf/log4j.tmpl:/app/log4j.properties 15 | 16 | java -Dlog4j.configuration="file:/app/log4j.properties" -jar consumer-graph-CONSUMERGRAPH_VERSION-jar-with-dependencies.jar --config /app/config.properties 17 | -------------------------------------------------------------------------------- /config.properties: -------------------------------------------------------------------------------- 1 | 2 | # port for the server 3 | port=9913 4 | 5 | # the name of the kafka cluster 6 | cluster.name=kafka1 7 | 8 | # kafka broker(s) 9 | # e.g. kafkaone.server.com:9092,kafkatwo.server.com:9092 10 | bootstrap.servers=192.168.10.162:9092 11 | 12 | # the time the consumer spends waiting in poll() if data is not available, in milliseconds 13 | timeout=30000 14 | 15 | # filter out topics and consumers we don't want to see 16 | filters.topic=_.*|console.* 17 | #filters.consumer=_.*|console.* 18 | 19 | # UI style 20 | # one of [tree|graph] 21 | # 'tree' is a collapsable directory-like listing 22 | # 'graph' is a collapsable graph view 23 | ui.style=tree 24 | -------------------------------------------------------------------------------- /conf/log4j.tmpl: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger={{ .Env.LOG_LEVEL }}, stdout, file 3 | 4 | # Redirect log messages to console 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 9 | 10 | # Redirect log messages to a log file, support file rolling. 11 | log4j.appender.file=org.apache.log4j.RollingFileAppender 12 | log4j.appender.file.File=consumer-graph.log 13 | log4j.appender.file.MaxFileSize=5MB 14 | log4j.appender.file.MaxBackupIndex=10 15 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 16 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 17 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout, file 3 | 4 | # Redirect log messages to console 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 9 | 10 | # Redirect log messages to a log file, support file rolling. 11 | log4j.appender.file=org.apache.log4j.RollingFileAppender 12 | log4j.appender.file.File=consumer-graph.log 13 | log4j.appender.file.MaxFileSize=5MB 14 | log4j.appender.file.MaxBackupIndex=10 15 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 16 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /conf/config.tmpl: -------------------------------------------------------------------------------- 1 | 2 | # port for the server 3 | port={{ .Env.PORT }} 4 | 5 | # the name of the kafka cluster 6 | cluster.name={{ .Env.CLUSTER_NAME }} 7 | 8 | # kafka broker(s) 9 | # e.g. kafkaone.server.com:9092,kafkatwo.server.com:9092 10 | bootstrap.servers={{ .Env.KAFKA_BROKERS }} 11 | 12 | # the time the consumer spends waiting in poll() if data is not available, in milliseconds 13 | timeout={{ .Env.CONSUMER_TIMEOUT }} 14 | 15 | # filter out topics and consumers we don't want to see 16 | {{if .Env.TOPIC_FILTER}} 17 | filters.topic={{ .Env.TOPIC_FILTER}} 18 | {{- end}} 19 | 20 | {{if .Env.CONSUMER_FILTER}} 21 | filters.consumer={{ .Env.CONSUMER_FILTER }} 22 | {{- end}} 23 | 24 | # UI style 25 | # one of [tree|graph] 26 | # 'tree' is a collapsable directory-like listing 27 | # 'graph' is a collapsable graph view 28 | ui.style={{ .Env.UI_STYLE }} -------------------------------------------------------------------------------- /Dockerfile.tmpl: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | RUN apt-get update && \ 4 | apt-get upgrade -y && \ 5 | apt-get install -y software-properties-common && \ 6 | add-apt-repository ppa:webupd8team/java -y && \ 7 | apt-get update && \ 8 | echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections && \ 9 | apt-get install -y wget oracle-java8-installer && \ 10 | apt-get clean 11 | 12 | RUN mkdir -p /app/tools 13 | WORKDIR /app 14 | 15 | ENV DOCKERIZE_VERSION v0.3.0 16 | 17 | RUN wget https://github.com/jwilder/dockerize/releases/download/"${DOCKERIZE_VERSION}"/dockerize-linux-amd64-"${DOCKERIZE_VERSION}".tar.gz \ 18 | && tar -C /usr/local/bin -xzvf dockerize-linux-amd64-"${DOCKERIZE_VERSION}".tar.gz 19 | 20 | ADD VERSION . 21 | ADD LICENSE . 22 | ADD README.md . 23 | ADD conf /app/conf 24 | ADD scripts /app/scripts 25 | ADD target/consumer-graph-CONSUMERGRAPH_VERSION-jar-with-dependencies.jar /app/consumer-graph-CONSUMERGRAPH_VERSION-jar-with-dependencies.jar 26 | 27 | CMD ./scripts/run.sh 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Applause 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/test/java/com/applause/ConsumerGraph/ConsumerGraphTest.java: -------------------------------------------------------------------------------- 1 | package com.applause.ConsumerGraph; 2 | 3 | 4 | import org.junit.Test; 5 | 6 | import java.util.ArrayList; 7 | import java.util.Properties; 8 | 9 | import static org.junit.Assert.*; 10 | 11 | public class ConsumerGraphTest { 12 | 13 | @Test 14 | public void testIsKafkaAlive() { 15 | String bootstrapServers = "unknownKafkaServer13:9092"; 16 | assertFalse(ConsumerGraph.isKafkaAlive(bootstrapServers)); 17 | } 18 | 19 | @Test 20 | public void testLoadProperties() { 21 | try { 22 | Properties properties = ConsumerGraph.loadProperties("config.properties"); 23 | assertTrue(true); 24 | } catch (Exception ioe) { 25 | fail(); 26 | } 27 | 28 | try { 29 | Properties properties = ConsumerGraph.loadProperties("foo.properties"); 30 | fail(); 31 | } catch (Exception ioe) { 32 | assertTrue(true); 33 | } 34 | } 35 | 36 | @Test 37 | public void testCheckProperties() { 38 | Properties properties = new Properties(); 39 | properties.put("bootstrap.servers", "kafka1"); 40 | properties.put("port", "9092"); 41 | 42 | ArrayList missingProperties = ConsumerGraph.checkProperties(properties); 43 | assertTrue(missingProperties.size() == 0); 44 | 45 | properties.remove("bootstrap.servers"); 46 | missingProperties = ConsumerGraph.checkProperties(properties); 47 | assertTrue(missingProperties.size() == 1); 48 | assertTrue(missingProperties.contains("bootstrap.servers")); 49 | 50 | properties.remove("port"); 51 | missingProperties = ConsumerGraph.checkProperties(properties); 52 | assertTrue(missingProperties.size() == 2); 53 | assertTrue(missingProperties.contains("bootstrap.servers") && missingProperties.contains("port")); 54 | } 55 | 56 | @Test 57 | public void testGetConfig() { 58 | String[] args = new String[]{"--config", "config.properties"}; 59 | String configFile = ConsumerGraph.getConfig(args); 60 | assertTrue(configFile.equalsIgnoreCase("config.properties")); 61 | 62 | args = new String[]{"-c", "config.properties"}; 63 | configFile = ConsumerGraph.getConfig(args); 64 | assertTrue(configFile.equalsIgnoreCase("config.properties")); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ConsumerGraph 2 | 3 | [![Build Status](https://travis-ci.org/ApplauseOSS/ConsumerGraph.svg?branch=master)](https://travis-ci.org/ApplauseOSS/ConsumerGraph) 4 | 5 | A small utility to browse Kafka topic-to-consumer relationships. Each node is collapsible. 6 | 7 | ***NOTE*** - This supports installations with offsets stored in Kafka. Offsets stored in Zookeeper are not supported. 8 | 9 | ***Graph View:*** 10 | 11 | ![alt text](media/graphview.png "Graph View") 12 | 13 | ***Tree View:*** 14 | 15 | ![alt text](media/treeview.png "Tree View") 16 | 17 | # Docker Configuration Environment Variables 18 | The following env vars are recognized by the image: 19 | 20 | | Name | Default | Description | Example | 21 | |:-----------------------------|:-----------:|:--------------------------------------------------------------------------------------------|:----------------------------------------:| 22 | |`CLUSTER_NAME` | `Kafka` | The name of this Kafka cluster. Displayed on the root node. | `kafkaone` | 23 | |`PORT` | ***NONE*** | The port the server runs on. | `8080` | 24 | |`KAFKA_BROKERS` | ***NONE*** | A list of Kafka brokers for the cluster to monitor. | `kafkaone.server.com:9092,kafkatwo.server.com:9092` | 25 | |`CONSUMER_TIMEOUT` | ***NONE*** | The time the consumer spends waiting in poll() if data is not available, in milliseconds. | `30000` | 26 | |`TOPIC_FILTER` | ***NONE*** | A regex to filter out topics. | _.*|console.\* | 27 | |`CONSUMER_FILTER` | ***NONE*** | A regex to filter out consumers. | _.*|console.\* | 28 | |`LOG_LEVEL` | ***NONE*** | The logging level for log4j, [ALL | DEBUG | ERROR | FATAL | INFO | OFF | TRACE | WARN] | `INFO` | 29 | |`UI_STYLE` | ***NONE*** | The UI style to use, [graph | tree] | `tree` | 30 | 31 | # Building and Running 32 | ConsumerGraph is available on [Docker hub](https://hub.docker.com/search/?q=consumergraph). 33 | 34 | The easiest way to build it is via the build script. You will need Maven and Docker installed. 35 | ~~~~~ 36 | $ ./build.sh 37 | $ docker run -p 9913:9913 \ 38 | -e PORT=9913 \ 39 | -e CLUSTER_NAME=kafka_one \ 40 | -e KAFKA_BROKERS=172.27.24.93:9092 \ 41 | -e TOPIC_FILTER="_.*|console.*" \ 42 | -e LOG_LEVEL=INFO \ 43 | -e UI_STYLE=tree \ 44 | -e CONSUMER_TIMEOUT=30000 \ 45 | consumergraph 46 | ~~~~~ 47 | 48 | # Development 49 | You can build ConsumerGraph with the Maven using the standard lifecycle phases. 50 | 51 | # Version Compatibility 52 | This was built/tested using the following versions. 53 | 54 | | Component | Version | 55 | |:------------------|:-----------| 56 | | Maven | `3.3.9` | 57 | | Docker | `17.03.0-ce, build 60ccb22` | 58 | | Java | `1.8.0_121` | 59 | | Kafka | `0.10.1.0` | 60 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to ConsumerGraph 2 | 3 | Please help make ConsumerGraph better. 4 | 5 | When contributing to this repository, please first discuss the change you wish to make via issue, email, or any other method with the owners of this repository before making a change. 6 | 7 | Please note we have a code of conduct, please follow it in all your interactions with the project. 8 | 9 | ## Pull Request Process 10 | 11 | 1. Have full test coverage on the new or changed code. 12 | 2. Update the README.md with details of changes to any interface. 13 | 3. Ensure all build and tests passes before issue the pull request. 14 | 4. Please add meaningful description for the proposed change/fix on the pull request description. 15 | 5. Have at least one reviewer approve the pull request. 16 | 6. You may request the repo maintainer to merge the pull request for you. 17 | 7. Bugfixes should target master branch while new features should target devel branch. 18 | 19 | ## Code of Conduct 20 | 21 | ### Our Pledge 22 | 23 | In the interest of fostering an open and welcoming environment, we as 24 | contributors and maintainers pledge to making participation in our project and 25 | our community a harassment-free experience for everyone, regardless of age, body 26 | size, disability, ethnicity, gender identity and expression, level of experience, 27 | nationality, personal appearance, race, religion, or sexual identity and 28 | orientation. 29 | 30 | ### Our Standards 31 | 32 | Examples of behavior that contributes to creating a positive environment 33 | include: 34 | 35 | * Using welcoming and inclusive language 36 | * Being respectful of differing viewpoints and experiences 37 | * Gracefully accepting constructive criticism 38 | * Focusing on what is best for the community 39 | * Showing empathy towards other community members 40 | 41 | Examples of unacceptable behavior by participants include: 42 | 43 | * The use of sexualized language or imagery and unwelcome sexual attention or 44 | advances 45 | * Trolling, insulting/derogatory comments, and personal or political attacks 46 | * Public or private harassment 47 | * Publishing others' private information, such as a physical or electronic 48 | address, without explicit permission 49 | * Other conduct which could reasonably be considered inappropriate in a 50 | professional setting 51 | 52 | ### Our Responsibilities 53 | 54 | Project maintainers are responsible for clarifying the standards of acceptable 55 | behavior and are expected to take appropriate and fair corrective action in 56 | response to any instances of unacceptable behavior. 57 | 58 | Project maintainers have the right and responsibility to remove, edit, or 59 | reject comments, commits, code, wiki edits, issues, and other contributions 60 | that are not aligned to this Code of Conduct, or to ban temporarily or 61 | permanently any contributor for other behaviors that they deem inappropriate, 62 | threatening, offensive, or harmful. 63 | 64 | ### Scope 65 | 66 | This Code of Conduct applies both within project spaces and in public spaces 67 | when an individual is representing the project or its community. Examples of 68 | representing a project or community include using an official project e-mail 69 | address, posting via an official social media account, or acting as an appointed 70 | representative at an online or offline event. Representation of a project may be 71 | further defined and clarified by project maintainers. 72 | 73 | ### Enforcement 74 | 75 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 76 | reported by contacting the project team at eng@applause.com. All 77 | complaints will be reviewed and investigated and will result in a response that 78 | is deemed necessary and appropriate to the circumstances. The project team is 79 | obligated to maintain confidentiality with regard to the reporter of an incident. 80 | Further details of specific enforcement policies may be posted separately. 81 | 82 | Project maintainers who do not follow or enforce the Code of Conduct in good 83 | faith may face temporary or permanent repercussions as determined by other 84 | members of the project's leadership. 85 | 86 | ### Attribution 87 | 88 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 89 | available at [http://contributor-covenant.org/version/1/4][version] 90 | 91 | [homepage]: http://contributor-covenant.org 92 | [version]: http://contributor-covenant.org/version/1/4/ -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | com.applause 7 | consumer-graph 8 | 0.2 9 | Consumer Graph 10 | 11 | 12 | 7.2.0.v20101020 13 | 0.10.1.0 14 | 1.7.5 15 | 20160810 16 | 2.5 17 | 1.4 18 | 2.3.25-incubating 19 | 2.4 20 | 4.12 21 | 22 | 23 | 24 | 25 | org.apache.kafka 26 | kafka-clients 27 | ${kafkaVersion} 28 | 29 | 30 | org.apache.kafka 31 | kafka_2.11 32 | ${kafkaVersion} 33 | 34 | 35 | org.eclipse.jetty 36 | jetty-server 37 | ${jettyVersion} 38 | 39 | 40 | org.eclipse.jetty 41 | jetty-servlet 42 | ${jettyVersion} 43 | 44 | 45 | org.json 46 | json 47 | ${jsonVersion} 48 | 49 | 50 | commons-io 51 | commons-io 52 | ${commonsioVersion} 53 | 54 | 55 | commons-cli 56 | commons-cli 57 | ${commonsCLIVersion} 58 | 59 | 60 | org.freemarker 61 | freemarker 62 | ${freemarkerVersion} 63 | 64 | 65 | org.apache.maven.plugins 66 | maven-jar-plugin 67 | ${mavenPluginVersion} 68 | 69 | 70 | org.slf4j 71 | slf4j-api 72 | ${slf4jVersion} 73 | 74 | 75 | org.slf4j 76 | slf4j-simple 77 | ${slf4jVersion} 78 | 79 | 80 | junit 81 | junit 82 | ${junitVersion} 83 | 84 | 85 | 86 | 87 | 88 | 89 | org.mortbay.jetty 90 | jetty-maven-plugin 91 | ${jettyVersion} 92 | 93 | 94 | 95 | maven-assembly-plugin 96 | 97 | 98 | 99 | com.applause.ConsumerGraph.ConsumerGraph 100 | 101 | 102 | 103 | jar-with-dependencies 104 | 105 | 106 | 107 | 108 | make-assembly 109 | package 110 | 111 | single 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /src/main/java/com/applause/ConsumerGraph/ConsumerGraph.java: -------------------------------------------------------------------------------- 1 | package com.applause.ConsumerGraph; 2 | 3 | 4 | import org.apache.commons.cli.*; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.io.FileInputStream; 9 | import java.io.IOException; 10 | import java.io.InputStream; 11 | import java.net.InetSocketAddress; 12 | import java.net.Socket; 13 | import java.util.ArrayList; 14 | import java.util.Properties; 15 | 16 | 17 | /** 18 | * A simple program to graph Kafka topics to their consumers. 19 | */ 20 | public class ConsumerGraph { 21 | private static Logger LOGGER = LoggerFactory.getLogger(ConsumerGraph.class); 22 | 23 | private static final int SERVER_TIMEOUT = 30; 24 | 25 | public static final String DEFAULT_CLUSTER_NAME = "Kafka"; 26 | public static final String DEFAULT_UI_STYLE = "graph"; 27 | 28 | public static final String FILTERS_TOPIC = "filters.topic"; 29 | public static final String FILTERS_CONSUMER = "filters.consumer"; 30 | public static final String CLUSTER_NAME = "cluster.name"; 31 | public static final String BOOTSTRAP_SERVERS = "bootstrap.servers"; 32 | public static final String TIMEOUT = "timeout"; 33 | public static final String PORT = "port"; 34 | public static final String UI_STYLE = "ui.style"; 35 | 36 | public ConsumerGraph() { } 37 | 38 | /** 39 | * Test if the Kafka servers are available. 40 | * Note that this fails if any server is unavailable. 41 | * 42 | * @param bootstrapServers the list of Kafka servers to test, comma separated 43 | * @return true if all servers are reachable, false otherwise 44 | */ 45 | protected static boolean isKafkaAlive(String bootstrapServers) { 46 | for (String server : bootstrapServers.split(",")) { 47 | try { 48 | String[] hostPort = server.split(":"); 49 | Socket socket = new Socket(); 50 | socket.connect(new InetSocketAddress(hostPort[0], Integer.parseInt(hostPort[1])), SERVER_TIMEOUT); 51 | } catch (IOException e) { 52 | return false; // Either timeout or unreachable or failed DNS lookup. 53 | } 54 | } 55 | 56 | return true; 57 | } 58 | 59 | /** 60 | * Load properties from the specified configuration file. 61 | * 62 | * @param configFile the configuration file to load, including path 63 | * @return a Properties object loaded from the configuration file 64 | * @throws IOException 65 | */ 66 | protected static Properties loadProperties(String configFile) throws IOException { 67 | InputStream input = null; 68 | Properties properties = null; 69 | 70 | try { 71 | input = new FileInputStream(configFile); 72 | properties = new Properties(); 73 | properties.load(input); 74 | } catch (IOException ex) { 75 | ex.printStackTrace(); 76 | } finally { 77 | if (input != null) { 78 | try { 79 | input.close(); 80 | } catch (IOException e) { 81 | e.printStackTrace(); 82 | } 83 | } 84 | } 85 | 86 | if (properties == null) { 87 | throw new RuntimeException("Unable to load properties file " + configFile); 88 | } 89 | 90 | return properties; 91 | } 92 | 93 | /** 94 | * Validates the specified properties contain all required properties. 95 | * If any are missing they are returned in a list. 96 | * 97 | * @param properties the properties to test 98 | * @return a ArrayList of missing properties 99 | */ 100 | protected static ArrayList checkProperties(Properties properties) { 101 | ArrayList missingProperties = new ArrayList(); 102 | 103 | if (!properties.containsKey("bootstrap.servers")) { 104 | missingProperties.add("bootstrap.servers"); 105 | } 106 | 107 | if (!properties.containsKey("port")) { 108 | missingProperties.add("port"); 109 | } 110 | 111 | return missingProperties; 112 | } 113 | 114 | /** 115 | * Given String[] args, return the path and configuration file specified on the commandline. 116 | * 117 | * @param args the commandline args passed in 118 | * @return a String containing the path and name of the configuration file 119 | */ 120 | protected static String getConfig(String[] args) { 121 | Options options = new Options(); 122 | 123 | Option input = new Option("c", "config", true, "location and name of the config file"); 124 | input.setRequired(true); 125 | options.addOption(input); 126 | 127 | CommandLineParser parser = new DefaultParser(); 128 | HelpFormatter formatter = new HelpFormatter(); 129 | 130 | String configFile = null; 131 | try { 132 | CommandLine cli = parser.parse(options, args); 133 | configFile = cli.getOptionValue("config"); 134 | } catch (ParseException e) { 135 | System.out.println(e.getMessage()); 136 | formatter.printHelp("ConsumerGraph", options); 137 | 138 | System.exit(1); 139 | } 140 | 141 | return configFile; 142 | } 143 | 144 | public static void main(String[] args) { 145 | try { 146 | Properties properties = loadProperties(getConfig(args)); 147 | LOGGER.info("Properties: " + properties.toString()); 148 | 149 | ArrayList missingProperties = checkProperties(properties); 150 | if (missingProperties.size() > 0) { 151 | LOGGER.error("Missing required properties: " + missingProperties.toString()); 152 | System.exit(1); 153 | } 154 | 155 | if (isKafkaAlive(properties.getProperty(BOOTSTRAP_SERVERS))) { 156 | new ConsumerGraphServer(properties); 157 | } else { 158 | LOGGER.error("Kafka is not reachable, bootstrap.servers: " + properties.getProperty(BOOTSTRAP_SERVERS)); 159 | } 160 | } catch (Exception e) { 161 | e.printStackTrace(); 162 | } 163 | } 164 | } 165 | 166 | -------------------------------------------------------------------------------- /src/main/java/com/applause/ConsumerGraph/TopicConsumerMapper.java: -------------------------------------------------------------------------------- 1 | package com.applause.ConsumerGraph; 2 | 3 | import kafka.coordinator.BaseKey; 4 | import kafka.coordinator.GroupMetadataManager; 5 | import kafka.coordinator.GroupTopicPartition; 6 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 7 | import org.apache.kafka.clients.consumer.ConsumerRecord; 8 | import org.apache.kafka.clients.consumer.ConsumerRecords; 9 | import org.apache.kafka.clients.consumer.KafkaConsumer; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.apache.kafka.common.errors.WakeupException; 12 | import org.apache.kafka.common.record.TimestampType; 13 | import org.apache.kafka.common.serialization.StringDeserializer; 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | 17 | import java.nio.ByteBuffer; 18 | import java.util.*; 19 | 20 | 21 | /** 22 | * The Kafka consumer that maps topics to consumers by listening on the internal 23 | * __consumer_offsets topic. 24 | * This does not support offsets stored in Zookeeper. 25 | */ 26 | public class TopicConsumerMapper implements Runnable { 27 | static final Logger LOGGER = LoggerFactory.getLogger(TopicConsumerMapper.class); 28 | 29 | public static final String GROUPID = "topic-consumer-mapper"; 30 | public static final String TOPIC = "__consumer_offsets"; 31 | 32 | private static final int DEFAULT_TIMEOUT = 5000; 33 | 34 | private KafkaConsumer consumer; 35 | private List topics; 36 | private String topicFilter = null; 37 | private String consumerFilter = null; 38 | private long timeout = 5000; 39 | private long dataLastUpdated; 40 | 41 | private HashMap> topicConsumerGroupMap = new HashMap>(); 42 | 43 | public HashMap> getTopicConsumerGroupMap() { 44 | return this.topicConsumerGroupMap; 45 | } 46 | 47 | /** 48 | * Create the Kafka consumer based on the passed-in parameters. 49 | * 50 | * @param bootstrapServers the Kafka servers 51 | * @param topicFilter the regex filter to apply to topics 52 | * @param consumerFilter the regex filter to apply to consumers 53 | * @param timeout the consuemr.poll() timeout 54 | */ 55 | public TopicConsumerMapper(String bootstrapServers, String topicFilter, String consumerFilter, long timeout) { 56 | this.topicFilter = topicFilter == null ? "" : topicFilter; 57 | this.consumerFilter = consumerFilter == null ? "" : consumerFilter; 58 | this.timeout = timeout < 1 ? this.DEFAULT_TIMEOUT : timeout; 59 | 60 | LOGGER.debug("Bootstrap servers: " + bootstrapServers); 61 | LOGGER.debug("Topic filter: " + this.topicFilter); 62 | LOGGER.debug("Consumer filter: " + this.consumerFilter); 63 | LOGGER.debug("Consumer.poll() timeout: " + this.timeout); 64 | 65 | this.topics = Arrays.asList(this.TOPIC); 66 | Properties props = new Properties(); 67 | props.put("bootstrap.servers", bootstrapServers); 68 | props.put("group.id", this.GROUPID); 69 | props.put("key.deserializer", StringDeserializer.class.getName()); 70 | props.put("value.deserializer", StringDeserializer.class.getName()); 71 | 72 | LOGGER.info("Creating Consumer with properties: " + props.toString()); 73 | this.consumer = new KafkaConsumer(props); 74 | } 75 | 76 | public void run() { 77 | try { 78 | /** 79 | * Subscribe to the internal __consumer_offsets topic. 80 | */ 81 | consumer.subscribe(topics, new ConsumerRebalanceListener() { 82 | public void onPartitionsRevoked(Collection partitions) { 83 | // NoOp 84 | } 85 | 86 | /** 87 | * Always go to the beginning of the partitions to ensure we account for all 88 | * consumers. 89 | */ 90 | public void onPartitionsAssigned(Collection partitions) { 91 | consumer.seekToBeginning(partitions); 92 | } 93 | }); 94 | 95 | while (true) { 96 | ConsumerRecords records = consumer.poll(timeout); 97 | for (ConsumerRecord record : records) { 98 | this.dataLastUpdated = System.currentTimeMillis(); 99 | 100 | // TODO: add message timestamps to output. this requires ui work. 101 | long recordTimestamp = record.timestamp(); 102 | TimestampType recordTimestampType = record.timestampType(); 103 | 104 | BaseKey baseKey = GroupMetadataManager.readMessageKey(ByteBuffer.wrap(record.key().getBytes())); 105 | if (baseKey.key() instanceof GroupTopicPartition) { 106 | GroupTopicPartition gtp = (GroupTopicPartition) baseKey.key(); 107 | String topic = gtp.topicPartition().topic(); 108 | String group = gtp.group(); 109 | 110 | if (!topic.matches(this.topicFilter)) { 111 | if (!topicConsumerGroupMap.containsKey(topic)) { 112 | topicConsumerGroupMap.put(topic, new ArrayList(Arrays.asList(group))); 113 | } 114 | 115 | ArrayList groupList = topicConsumerGroupMap.get(topic); 116 | if (!group.matches(this.consumerFilter) && !groupList.contains(group)) { 117 | groupList.add(group); 118 | topicConsumerGroupMap.put(topic, groupList); 119 | } 120 | } 121 | } 122 | } 123 | } 124 | } catch (WakeupException e) { 125 | // ignore for shutdown 126 | } finally { 127 | consumer.close(); 128 | } 129 | } 130 | 131 | /** 132 | * Ensure we exit the consumer correctly. 133 | */ 134 | public void shutdown() { 135 | consumer.wakeup(); 136 | } 137 | 138 | /** 139 | * Fetch the last time we updated our data. 140 | * 141 | * @return the epoch millis representing the last time the consumer fetched new data from Kafka. 142 | */ 143 | public long getDataLastUpdated() { 144 | return this.dataLastUpdated; 145 | } 146 | } -------------------------------------------------------------------------------- /src/main/resources/templates/graph.ftl: -------------------------------------------------------------------------------- 1 | 2 | <#-- mostly from https://bl.ocks.org/mbostock/4339083 --> 3 | 4 | 5 | Kafka ConsumerGraph 6 | 27 | 28 | 29 | 30 | 31 | 169 | 170 | 171 | -------------------------------------------------------------------------------- /src/main/resources/templates/tree.ftl: -------------------------------------------------------------------------------- 1 | 2 | <#-- from https://bl.ocks.org/mbostock/1093025 --> 3 | 4 | 5 | Kafka ConsumerGraph 6 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/main/java/com/applause/ConsumerGraph/ConsumerGraphServer.java: -------------------------------------------------------------------------------- 1 | package com.applause.ConsumerGraph; 2 | 3 | import freemarker.template.*; 4 | import org.eclipse.jetty.server.Request; 5 | import org.eclipse.jetty.server.Server; 6 | import org.eclipse.jetty.server.handler.AbstractHandler; 7 | import org.json.JSONArray; 8 | import org.json.JSONObject; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import javax.servlet.ServletException; 13 | import javax.servlet.http.HttpServletRequest; 14 | import javax.servlet.http.HttpServletResponse; 15 | import java.io.IOException; 16 | import java.io.StringWriter; 17 | import java.util.*; 18 | 19 | 20 | /** 21 | * The webserver. This serves the UI, including templating values, and initialization of the Kafka consumer. 22 | */ 23 | public class ConsumerGraphServer extends AbstractHandler { 24 | private static Logger LOGGER = LoggerFactory.getLogger(ConsumerGraphServer.class); 25 | 26 | private TopicConsumerMapper consumer; 27 | private Template indexTemplate; 28 | 29 | private String topicFilter; 30 | private String consumerFilter; 31 | private String clusterName; 32 | private String bootstrapServers; 33 | private String uiStyle; 34 | private long timeout; 35 | private int port; 36 | 37 | /** 38 | * Create and start the Kafka consumer and webserver based on the passed in properties. 39 | * 40 | * @param properties the properties for this server 41 | * @throws Exception 42 | */ 43 | public ConsumerGraphServer(Properties properties) throws Exception { 44 | try { 45 | this.bootstrapServers = properties.getProperty(ConsumerGraph.BOOTSTRAP_SERVERS); 46 | this.topicFilter = properties.getProperty(ConsumerGraph.FILTERS_TOPIC); 47 | this.consumerFilter = properties.getProperty(ConsumerGraph.FILTERS_CONSUMER); 48 | this.clusterName = properties.getProperty(ConsumerGraph.CLUSTER_NAME) == null ? ConsumerGraph.DEFAULT_CLUSTER_NAME : properties.getProperty(ConsumerGraph.CLUSTER_NAME); 49 | this.uiStyle = properties.getProperty(ConsumerGraph.UI_STYLE) == null ? ConsumerGraph.DEFAULT_UI_STYLE : properties.getProperty(ConsumerGraph.UI_STYLE); 50 | this.timeout = Long.parseLong(properties.getProperty(ConsumerGraph.TIMEOUT)); 51 | this.port = Integer.parseInt(properties.getProperty(ConsumerGraph.PORT)); 52 | 53 | createTopicConsumerMapper(); 54 | 55 | loadTemplate(); 56 | 57 | initServer(this.port); 58 | } catch (IOException ioe) { 59 | ioe.printStackTrace(); 60 | } 61 | } 62 | 63 | @Override 64 | public void handle(String target, 65 | Request baseRequest, 66 | HttpServletRequest request, 67 | HttpServletResponse response) throws IOException, 68 | ServletException { 69 | try { 70 | response.setContentType("text/html; charset=utf-8"); 71 | response.setStatus(HttpServletResponse.SC_OK); 72 | response.getWriter().print(getContent()); 73 | } catch (TemplateException te) { 74 | response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); 75 | te.printStackTrace(); 76 | response.getWriter().print(":("); 77 | } 78 | 79 | baseRequest.setHandled(true); 80 | } 81 | 82 | /** 83 | * Prepare the freemarker template for loading. 84 | * Set base path, encoding, locale and exception handling, then set the template. 85 | * 86 | * @throws IOException 87 | */ 88 | private void loadTemplate() throws IOException { 89 | Configuration config = new Configuration(); 90 | config.setClassForTemplateLoading(this.getClass(), "/templates"); 91 | 92 | // recommended settings 93 | config.setIncompatibleImprovements(new Version(2, 3, 20)); 94 | config.setDefaultEncoding("UTF-8"); 95 | config.setLocale(Locale.US); 96 | 97 | config.setTemplateExceptionHandler(TemplateExceptionHandler.RETHROW_HANDLER); 98 | 99 | setTemplate(config); 100 | } 101 | 102 | /** 103 | * Set the template based on the passed-in configuration. 104 | * 105 | * @param config the configuration containing the type of template to use 106 | * @throws IOException 107 | */ 108 | private void setTemplate(Configuration config) throws IOException { 109 | if (this.uiStyle.equalsIgnoreCase("tree")) { 110 | this.indexTemplate = config.getTemplate("tree.ftl"); 111 | } else { 112 | this.indexTemplate = config.getTemplate("graph.ftl"); 113 | } 114 | } 115 | 116 | /** 117 | * Given a map of JSONArray, populate the freemarker template. 118 | * 119 | * @param map a JSONArray detailing the mappings between Kafka topics and consumers 120 | * @return a String containing the fully-templated text 121 | * @throws IOException 122 | * @throws TemplateException 123 | */ 124 | private String populateTemplate(JSONArray map) throws IOException, TemplateException { 125 | Map input = new HashMap(); 126 | 127 | // TODO: set data-last-refreshed time 128 | input.put("cluster_name", this.clusterName); 129 | input.put("links", map); 130 | 131 | StringWriter sw = new StringWriter(); 132 | indexTemplate.process(input, sw); 133 | return sw.toString(); 134 | } 135 | 136 | /** 137 | * Fetch the latest Kafka topic-to-consumer map, build a JSONArray based on the data, 138 | * template the data and return that String. 139 | * 140 | * @return a String containing the fully-templated text. 141 | * @throws IOException 142 | * @throws TemplateException 143 | */ 144 | private String getContent() throws IOException, TemplateException { 145 | JSONArray map = new JSONArray(); 146 | HashMap> tcm = this.consumer.getTopicConsumerGroupMap(); 147 | for (String topic : tcm.keySet()) { 148 | JSONObject oneTopic = new JSONObject(); 149 | oneTopic.put("name", topic); 150 | oneTopic.put("parent", this.clusterName); 151 | JSONArray oneChildlist = new JSONArray(); 152 | for (String group : tcm.get(topic)) { 153 | JSONObject oneMap = new JSONObject(); 154 | oneMap.put("name", group); 155 | oneMap.put("parent", topic); 156 | oneChildlist.put(oneMap); 157 | } 158 | oneTopic.put("children", oneChildlist); 159 | map.put(oneTopic); 160 | } 161 | 162 | return populateTemplate(map); 163 | } 164 | 165 | /** 166 | * Create the Kafka consumer that maps topics to consumers. 167 | * This runs in a background thread. 168 | */ 169 | private void createTopicConsumerMapper() { 170 | this.consumer = new TopicConsumerMapper(this.bootstrapServers, this.topicFilter, this.consumerFilter, this.timeout); 171 | 172 | Thread thread = new Thread(this.consumer); 173 | 174 | thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { 175 | public void uncaughtException(Thread t, Throwable e) { 176 | e.printStackTrace(); 177 | System.exit(1); 178 | } 179 | }); 180 | 181 | Runtime.getRuntime().addShutdownHook(new Thread() { 182 | @Override 183 | public void run() { 184 | consumer.shutdown(); 185 | } 186 | }); 187 | 188 | thread.start(); 189 | } 190 | 191 | /** 192 | * Initialize the webserver. 193 | * 194 | * @param port the port the server should listen on 195 | */ 196 | private void initServer(int port) { 197 | try { 198 | Server server = new Server(port); 199 | 200 | server.setHandler(this); 201 | 202 | server.start(); 203 | server.join(); 204 | } catch (Exception e) { 205 | e.printStackTrace(); 206 | } 207 | } 208 | } 209 | --------------------------------------------------------------------------------