├── .gitattributes ├── .gitignore ├── .pre-commit-config.yaml ├── .requirements-precommit.txt ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── build.rs ├── exampleConfig.yaml ├── examples └── consumer_offsets_reader.rs ├── resources └── web_server │ └── public │ ├── css │ └── font-awesome.min.css │ ├── fonts │ ├── .directory │ ├── FontAwesome.otf │ ├── fontawesome-webfont.eot │ ├── fontawesome-webfont.svg │ ├── fontawesome-webfont.ttf │ ├── fontawesome-webfont.woff │ └── fontawesome-webfont.woff2 │ ├── images │ ├── kafka_logo.png │ ├── kafka_logo_small.png │ ├── kafka_logo_small_white.png │ ├── kafka_logo_white.png │ └── webkafka_favicon.png │ ├── my_css.css │ ├── my_js.js │ └── sb-admin-2 │ ├── dist │ ├── css │ │ ├── sb-admin-2.css │ │ └── sb-admin-2.min.css │ └── js │ │ ├── sb-admin-2.js │ │ └── sb-admin-2.min.js │ └── vendor │ ├── bootstrap │ ├── css │ │ └── bootstrap.min.css │ └── js │ │ └── bootstrap.min.js │ ├── datatables-plugins │ ├── dataTables.bootstrap.css │ └── dataTables.bootstrap.min.js │ ├── datatables-responsive │ ├── dataTables.responsive.css │ └── dataTables.responsive.js │ ├── datatables │ └── js │ │ └── jquery.dataTables.min.js │ ├── jquery │ └── jquery.min.js │ └── metisMenu │ ├── metisMenu.min.css │ └── metisMenu.min.js ├── rust-toolchain ├── screenshots ├── clusters.png ├── combined.png └── consumer.png └── src ├── cache.rs ├── config.rs ├── error.rs ├── live_consumer.rs ├── main.rs ├── metadata.rs ├── metrics.rs ├── offsets.rs ├── utils.rs ├── web_server ├── api.rs ├── mod.rs ├── pages │ ├── cluster.rs │ ├── clusters.rs │ ├── error_defaults.rs │ ├── group.rs │ ├── internals.rs │ ├── mod.rs │ ├── omnisearch.rs │ └── topic.rs ├── server.rs └── view │ ├── layout.rs │ └── mod.rs └── zk.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | resources/web_server/public/css/* linguist-vendored 2 | resources/web_server/public/fonts/* linguist-vendored 3 | resources/web_server/public/sb-admin-2/* linguist-vendored 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | *.bk 3 | .idea 4 | kafka-web.iml 5 | config.yaml 6 | precommit_venv/ 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | - repo: local 2 | hooks: 3 | - id: rust-linting 4 | name: Rust linting 5 | description: Run cargo fmt on files included in the commit. rustfmt should be installed before-hand. 6 | entry: cargo fmt --all -- 7 | pass_filenames: true 8 | types: [file, rust] 9 | language: system 10 | # - id: rust-clippy 11 | # name: Rust clippy 12 | # description: Run cargo clippy on files included in the commit. clippy should be installed before-hand. 13 | # entry: cargo clippy --all-features -- -A clippy::match_wild_err_arm 14 | # pass_filenames: false 15 | # types: [file, rust] 16 | # language: system 17 | -------------------------------------------------------------------------------- /.requirements-precommit.txt: -------------------------------------------------------------------------------- 1 | pre-commit==1.14.4 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: rust 3 | rust: 4 | - nightly-2017-10-16 5 | before_install: 6 | - sudo apt-get update -q || true 7 | - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce 8 | - rustup component add rustfmt 9 | script: 10 | - cargo build 11 | - cargo fmt -- --check 12 | - export DOCKER_REPO=fede1024/kafka-view 13 | 14 | after_success: 15 | - | 16 | if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" == "false" && "$TRAVIS_BRANCH" == "master" ]]; then 17 | docker build -t $DOCKER_REPO . && docker login -u $DOCKER_USER -p $DOCKER_PASS && docker push $DOCKER_REPO 18 | fi 19 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kafka-view" 3 | version = "0.3.3" 4 | authors = ["Federico Giraud "] 5 | license = "MIT" 6 | build = "build.rs" 7 | 8 | [dependencies] 9 | brotli = "3.3.0" 10 | byteorder = "1.0.0" 11 | chrono = { version = "0.4.0", features = ["serde"] } 12 | clap = "2.0.0" 13 | env_logger = "0.6.1" 14 | error-chain = "0.12.0" 15 | flate2 = "0.2.0" 16 | futures = "0.1.0" 17 | futures-cpupool = "0.1.0" 18 | hyper = "0.10" 19 | lazy_static = "1.0.0" 20 | log = "0.4.6" 21 | maud = { version = "0.20.0", features = ["rocket"] } 22 | rand = "0.6.5" 23 | rdkafka = "0.13.0" 24 | regex = "1.1.5" 25 | rocket = "0.4.0" 26 | rocket_codegen = "0.4.0" 27 | rocket_contrib = "0.4.0" 28 | scheduled-executor = "0.4.0" 29 | serde = "1.0.0" 30 | serde-transcode = "1.0.0" 31 | serde_derive = "1.0.0" 32 | serde_json = "1.0.0" 33 | serde_yaml = "0.8.8" 34 | zookeeper = "0.5.6" 35 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM messense/rust-musl-cross:x86_64-musl as builder 2 | ADD . /home/rust/src 3 | RUN apt-get update && apt-get install -y python && apt-get clean && rm -rf /var/lib/apt/lists/* 4 | RUN rustup update `cat rust-toolchain` && \ 5 | rustup target add --toolchain `cat rust-toolchain` x86_64-unknown-linux-musl 6 | RUN cargo build --release 7 | 8 | FROM alpine:latest 9 | RUN apk --no-cache add ca-certificates 10 | WORKDIR /root/ 11 | RUN mkdir resources 12 | COPY --from=builder /home/rust/src/resources ./resources 13 | COPY --from=builder /home/rust/src/target/x86_64-unknown-linux-musl/release/kafka-view . 14 | ENTRYPOINT ["./kafka-view"] 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Federico Giraud 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VENV := precommit_venv 2 | HOOKS := .git/hooks/pre-commit 3 | 4 | # PRE-COMMIT HOOKS 5 | 6 | $(VENV): .requirements-precommit.txt 7 | virtualenv -p python3 $(VENV) 8 | $(VENV)/bin/pip install -r .requirements-precommit.txt 9 | 10 | .PHONY: env 11 | env: $(VENV) 12 | 13 | .PHONY: clean-env 14 | clean-env: 15 | rm -rf $(VENV) 16 | 17 | $(HOOKS): $(VENV) .pre-commit-config.yaml 18 | $(VENV)/bin/pre-commit install -f --install-hooks 19 | cargo fmt --help > /dev/null || rustup component add rustfmt 20 | cargo clippy --help > /dev/null || rustup component add clippy 21 | 22 | .PHONY: install-hooks 23 | install-hooks: $(HOOKS) 24 | 25 | .PHONY: clean-hooks 26 | clean-hooks: 27 | rm -rf $(HOOKS) 28 | 29 | # LINTING 30 | 31 | .PHONY: lint 32 | lint: 33 | cargo fmt 34 | 35 | .PHONY: clean-lint 36 | clean-lint: 37 | find . -type f -name *.rs.bk -delete 38 | 39 | .PHONY: clippy 40 | clippy: 41 | cargo clippy --all-features -- -A clippy::match_wild_err_arm 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | kafka-view 2 | ========== 3 | 4 | [![Build Status](https://travis-ci.org/fede1024/kafka-view.svg?branch=master)](https://travis-ci.org/fede1024/kafka-view) 5 | [![Docker Image](https://img.shields.io/docker/pulls/fede1024/kafka-view.svg?maxAge=2592000)](https://hub.docker.com/r/fede1024/kafka-view/) 6 | [![Join the chat at https://gitter.im/rust-rdkafka/Lobby](https://badges.gitter.im/rust-rdkafka/Lobby.svg)](https://gitter.im/rust-rdkafka/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 7 | 8 | Kafka-view is an experimental web interface for Kafka written in Rust. 9 | Kafka-view creates and maintains a materialized view of the internal state of 10 | Kafka including cluster metadata, traffic metrics, group membership, consumer 11 | offsets etc. It uses the [rdkafka](https://github.com/fede1024/rust-rdkafka) 12 | Kafka client library for Rust, and [rocket](https://rocket.rs/). 13 | 14 | Click [here](https://github.com/fede1024/kafka-view#screenshots) for screenshots. 15 | 16 | Kafka-view supports multiple clusters and implements a fast search 17 | functionality to quickly find a topic or consumer group by name or by regex, 18 | across all clusters. 19 | 20 | ### Current features 21 | * Available data: 22 | * Broker and topic metrics: byte rate and message rate for each broker and 23 | topic in every cluster. 24 | * Topic metadata: leader, replicas, ISR, topic health. 25 | * Group membership: show active consumer groups and members, easily find all 26 | the consumers for a given cluster or topic. 27 | * Consumer offsets: show the current consumer offsets, the high watermark and 28 | the difference between the two. 29 | * Consume topic content directly from the web UI. 30 | * Search: 31 | * Omnisearch: search for broker, topics and consumers in a single query. 32 | * Search topics in all clusters by name or regex. 33 | * Search consumers in all clusters by name or regex. 34 | * Sort by any field (traffic, consumer lag, etc) 35 | 36 | At the moment kafka-view is designed to be read-only. Functionality such as 37 | adding topics, changing consumer offsets etc. are not supported. 38 | 39 | ## Configuring and running kafka-view 40 | 41 | ### Configuration 42 | 43 | First, create a new configuration starting from the [example configuration file]. 44 | The new configuration should contain the list of clusters you want to monitor, 45 | and a special topic in one of the clusters that kafka-view will use for caching. 46 | 47 | The caching topic should be configured to use compaction. Example setup: 48 | 49 | ```bash 50 | # Create topic 51 | kafka-topics.sh --zookeeper --create --topic --partitions 3 --replication-factor 2 52 | # Enable compaction 53 | kafka-topics.sh --zookeeper --alter --topic --config cleanup.policy=compact 54 | # Compact every 10MB per partition 55 | kafka-topics.sh --zookeeper --alter --topic --config segment.bytes=10485760 56 | ``` 57 | 58 | [example configuration file]: https://github.com/fede1024/kafka-view/blob/master/exampleConfig.yaml 59 | 60 | ### Building and running 61 | 62 | To compile and run: 63 | ```bash 64 | rustup override set $(cat rust-toolchain) 65 | cargo run --release -- --conf config.yaml 66 | ``` 67 | 68 | To build Docker image and run(Assuming you have `config.yaml` in current working directory and set port to 8080 in it): 69 | ```bash 70 | docker build -t kafka-view . 71 | docker run --rm -p 8080:8080 -v `pwd`/config.yaml:/root/config.yaml kafka-view --conf config.yaml 72 | ``` 73 | 74 | Or you can use prebuilt image from Docker hub: 75 | ```bash 76 | docker pull fede1024/kafka-view 77 | docker run --rm -p 8080:8080 -v `pwd`/config.yaml:/root/config.yaml fede1024/kafka-view --conf config.yaml 78 | ``` 79 | 80 | ### Metrics 81 | 82 | Kafka exports metrics via JMX, which can be accessed via HTTP through [jolokia]. The suggested way 83 | to run jolokia on your server is using the [JVM agent]. Example: 84 | 85 | ```bash 86 | KAFKA_OPTS="-javaagent:jolokia-jvm-1.3.7-agent.jar=port=8778" ./bin/kafka-server-start.sh config/server.properties 87 | ``` 88 | 89 | To verify that it's correctly running: 90 | 91 | ```bash 92 | curl http://localhost:8778/jolokia/read/java.lang:type=Memory/HeapMemoryUsage/used 93 | ``` 94 | 95 | Once your cluster is running with Jolokia, just add the jolokia port to the kafka-view configuration 96 | and it will start reading metrics from the cluster. 97 | 98 | [jolokia]: https://jolokia.org 99 | [JVM agent]: https://jolokia.org/agent/jvm.html 100 | 101 | ## Implementation 102 | 103 | ### Information sources 104 | 105 | * **Metadata**: cluster metadata is periodically polled using a background 106 | thread pool. Cluster metadata conatins: topic information (leader, replicas, 107 | ISR), broker information (broker id, hostname, etc), group membership (group 108 | state, members etc). 109 | * **Metrics**: metrics such as byte rate and message rate per topic are polled 110 | in the background using a thread pool. Metrics are read using Jolokia, that 111 | mush be active on the Kafka brokers. 112 | * **Consumer offsets**: Kafka-view consumes the `__consumer_offsets` topic and 113 | constantly receives the last offset commit for every consumer in every 114 | cluster. 115 | 116 | ### Data manipulation and storage 117 | 118 | Every data is internally stored using a set of in-memory data structures 119 | holding a normalized view of the last available value. When a web page is 120 | loaded, the normalized data is combined together to generate the required 121 | rapresentation of the data. 122 | 123 | ### Event caching 124 | 125 | As a new update is received from the background polling threads or the 126 | `__consumer_offsets` topics, a new event is created. Each event will update the 127 | internal memory structures, and will also be periodically stored in a compacted 128 | topic in Kafka. Kafka compaction will guarantee that the last update for every 129 | key will be available on the topic. 130 | 131 | When kafka-view restarts, the compacted topic is consumed and the internal 132 | memory structures are restored to the previous state. In future version this 133 | model will allow kafka-view to run in clustered mode, where multiple kafka-view 134 | instances will work together to poll data from Kafka and will share the 135 | information using the compacted topic. 136 | 137 | ## Contributors 138 | 139 | Thanks to: 140 | * [messense](https://github.com/messense) 141 | 142 | ## Screenshots 143 | 144 | ### Multiple cluster support 145 | 146 | ![clusters](/screenshots/clusters.png?raw=true "Clusters") 147 | 148 | ### Cluster level information 149 | 150 | ![combined](/screenshots/combined.png?raw=true "Cluster page") 151 | 152 | ### Consumer group information 153 | 154 | ![consumer](/screenshots/consumer.png?raw=true "Consumer group") 155 | 156 | 157 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::env::var; 2 | use std::fs::File; 3 | use std::io::Write; 4 | use std::path::Path; 5 | use std::process::Command; 6 | 7 | fn main() -> Result<(), std::io::Error> { 8 | let outdir = var("OUT_DIR").unwrap(); 9 | let rust_version_file = Path::new(&outdir).join("rust_version.rs"); 10 | let output = Command::new(var("RUSTC").unwrap()) 11 | .arg("--version") 12 | .output()?; 13 | let version = String::from_utf8_lossy(&output.stdout); 14 | 15 | let mut output_file = File::create(rust_version_file)?; 16 | output_file 17 | .write_all(format!("const RUST_VERSION: &str = \"{}\";", version.trim()).as_bytes())?; 18 | 19 | Ok(()) 20 | } 21 | -------------------------------------------------------------------------------- /exampleConfig.yaml: -------------------------------------------------------------------------------- 1 | # This is the kafka-view configuration file. 2 | --- 3 | # Listen host and port for the HTTP server 4 | listen_host: 0.0.0.0 5 | listen_port: 8080 6 | 7 | # Refresh interval for metadata and group membership in seconds. 8 | # Kafka-view will periodically read the metadata of the cluster, 9 | # including topics and consumer groups. This parameter specifies 10 | # how often each cluster is refreshed. 11 | metadata_refresh: 60 12 | 13 | # Refresh interval for metrics in seconds. 14 | # This parameter will regulate how often kafka-view will read the 15 | # metrics from the Kafka cluster. Remember that to have metrics, you 16 | # must run Jolokia on your cluster. 17 | metrics_refresh: 60 18 | 19 | # Where the cache will be stored. 20 | # Kafka-view will use this cluster and topic to store the cache. 21 | # If auto-topic-creation is disabled in the cluster, the topic should be 22 | # created manually before running kafka-view. The topic should also be 23 | # configured with cleanup.policy=compact. 24 | caching: 25 | cluster: local_cluster # which cluster will be used 26 | topic: replicator_topic # which topic in the cluster will be used 27 | 28 | # How long the consumer offsets will be stored for, in seconds. 29 | offsets_store_duration: 259200 30 | 31 | consumer_offsets_group_id: kafka_view_consumer 32 | 33 | clusters: 34 | # Each cluster is identified by a name, and has a list of parameters, 35 | # such as list of kafka brokers, zookeeper path and metric port. 36 | cluster_id_0: 37 | broker_list: # List of broker nodes 38 | - host1:9092 39 | - host2:9092 40 | - host3:9092 41 | zookeeper: zkhost1:2181 # format: "node:port,node:port/chroot" 42 | jolokia_port: 8778 # optional jolokia port for metrics 43 | cluster_id_1: 44 | broker_list: 45 | - host4:9092 46 | - host5:9092 47 | - host6:9092 48 | zookeeper: zkhost2:2181 49 | jolokia_port: 8778 50 | cluster_id_2: 51 | broker_list: 52 | - host7:9092 53 | - host8:9092 54 | - host9:9092 55 | zookeeper: zkhost3:2181 56 | # jolokia_port: 8778 metrics are disabled 57 | -------------------------------------------------------------------------------- /examples/consumer_offsets_reader.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate byteorder; 4 | extern crate clap; 5 | extern crate env_logger; 6 | extern crate futures; 7 | extern crate rdkafka; 8 | 9 | use byteorder::{BigEndian, ReadBytesExt}; 10 | use clap::{App, Arg}; 11 | use futures::stream::Stream; 12 | use rdkafka::config::{ClientConfig, TopicConfig}; 13 | use rdkafka::consumer::stream_consumer::StreamConsumer; 14 | use rdkafka::consumer::Consumer; 15 | use rdkafka::message::Message; 16 | use rdkafka::util::get_rdkafka_version; 17 | 18 | use std::io::{self, BufRead, Cursor}; 19 | use std::str; 20 | 21 | #[derive(Debug)] 22 | enum ParserError { 23 | Format, 24 | Io(io::Error), 25 | Utf8(str::Utf8Error), 26 | } 27 | 28 | impl From for ParserError { 29 | fn from(err: io::Error) -> ParserError { 30 | ParserError::Io(err) 31 | } 32 | } 33 | 34 | impl From for ParserError { 35 | fn from(err: str::Utf8Error) -> ParserError { 36 | ParserError::Utf8(err) 37 | } 38 | } 39 | 40 | #[derive(Debug)] 41 | enum ConsumerUpdate { 42 | Metadata, 43 | SetCommit { 44 | group: String, 45 | topic: String, 46 | partition: i32, 47 | offset: i64, 48 | }, 49 | DeleteCommit { 50 | group: String, 51 | topic: String, 52 | partition: i32, 53 | }, 54 | } 55 | 56 | fn read_str<'a>(rdr: &'a mut Cursor<&[u8]>) -> Result<&'a str, ParserError> { 57 | let strlen = rdr.read_i16::()? as usize; 58 | let pos = rdr.position() as usize; 59 | let slice = str::from_utf8(&rdr.get_ref()[pos..(pos + strlen)])?; 60 | rdr.consume(strlen); 61 | Ok(slice) 62 | } 63 | 64 | fn parse_group_offset( 65 | key_rdr: &mut Cursor<&[u8]>, 66 | payload_rdr: &mut Cursor<&[u8]>, 67 | ) -> Result { 68 | let group = read_str(key_rdr)?.to_owned(); 69 | let topic = read_str(key_rdr)?.to_owned(); 70 | let partition = key_rdr.read_i32::()?; 71 | if !payload_rdr.get_ref().is_empty() { 72 | payload_rdr.read_i16::()?; 73 | let offset = payload_rdr.read_i64::()?; 74 | Ok(ConsumerUpdate::SetCommit { 75 | group: group, 76 | topic: topic, 77 | partition: partition, 78 | offset: offset, 79 | }) 80 | } else { 81 | Ok(ConsumerUpdate::DeleteCommit { 82 | group: group, 83 | topic: topic, 84 | partition: partition, 85 | }) 86 | } 87 | } 88 | 89 | fn parse_message(key: &[u8], payload: &[u8]) -> Result { 90 | let mut key_rdr = Cursor::new(key); 91 | let mut payload_rdr = Cursor::new(payload); 92 | let key_version = key_rdr.read_i16::()?; 93 | match key_version { 94 | 0 | 1 => Ok(parse_group_offset(&mut key_rdr, &mut payload_rdr)?), 95 | 2 => Ok(ConsumerUpdate::Metadata), 96 | _ => Err(ParserError::Format), 97 | } 98 | } 99 | 100 | fn consume_and_print(brokers: &str) { 101 | let consumer = ClientConfig::new() 102 | .set("group.id", "consumer_reader_group") 103 | .set("bootstrap.servers", brokers) 104 | .set("enable.partition.eof", "false") 105 | .set("session.timeout.ms", "30000") 106 | .set("enable.auto.commit", "false") 107 | .set_default_topic_config( 108 | TopicConfig::new() 109 | .set("auto.offset.reset", "smallest") 110 | .finalize(), 111 | ) 112 | .create::>() 113 | .expect("Consumer creation failed"); 114 | 115 | consumer 116 | .subscribe(&vec!["__consumer_offsets"]) 117 | .expect("Can't subscribe to specified topics"); 118 | 119 | for message in consumer.start().wait() { 120 | match message { 121 | Err(e) => { 122 | warn!("Can't receive data from stream: {:?}", e); 123 | } 124 | Ok(Ok(m)) => { 125 | let key = match m.key_view::<[u8]>() { 126 | None => &[], 127 | Some(Ok(s)) => s, 128 | Some(Err(e)) => { 129 | println!("Error while deserializing message key: {:?}", e); 130 | &[] 131 | } 132 | }; 133 | let payload = match m.payload_view::<[u8]>() { 134 | None => &[], 135 | Some(Ok(s)) => s, 136 | Some(Err(e)) => { 137 | println!("Error while deserializing message payload: {:?}", e); 138 | &[] 139 | } 140 | }; 141 | println!( 142 | "\n#### P:{}, o:{}, s:{:.3}KB", 143 | m.partition(), 144 | m.offset(), 145 | (m.payload_len() as f64 / 1000f64) 146 | ); 147 | 148 | let msg = parse_message(key, payload); 149 | println!("{:?}", msg); 150 | } 151 | Ok(Err(e)) => { 152 | warn!("Kafka error: {:?}", e); 153 | } 154 | }; 155 | } 156 | } 157 | 158 | fn main() { 159 | let matches = App::new("consumer example") 160 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 161 | .about("Simple command line consumer") 162 | .arg( 163 | Arg::with_name("brokers") 164 | .short("b") 165 | .long("brokers") 166 | .help("Broker list in kafka format") 167 | .takes_value(true) 168 | .default_value("localhost:9092"), 169 | ) 170 | .get_matches(); 171 | 172 | let (version_n, version_s) = get_rdkafka_version(); 173 | println!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s); 174 | 175 | let brokers = matches.value_of("brokers").unwrap(); 176 | 177 | consume_and_print(brokers); 178 | } 179 | -------------------------------------------------------------------------------- /resources/web_server/public/fonts/.directory: -------------------------------------------------------------------------------- 1 | [Dolphin] 2 | Timestamp=2017,5,7,14,1,50 3 | Version=3 4 | ViewMode=2 5 | -------------------------------------------------------------------------------- /resources/web_server/public/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /resources/web_server/public/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /resources/web_server/public/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /resources/web_server/public/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /resources/web_server/public/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /resources/web_server/public/images/kafka_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/images/kafka_logo.png -------------------------------------------------------------------------------- /resources/web_server/public/images/kafka_logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/images/kafka_logo_small.png -------------------------------------------------------------------------------- /resources/web_server/public/images/kafka_logo_small_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/images/kafka_logo_small_white.png -------------------------------------------------------------------------------- /resources/web_server/public/images/kafka_logo_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/images/kafka_logo_white.png -------------------------------------------------------------------------------- /resources/web_server/public/images/webkafka_favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/resources/web_server/public/images/webkafka_favicon.png -------------------------------------------------------------------------------- /resources/web_server/public/my_css.css: -------------------------------------------------------------------------------- 1 | # .dataTables_filter label { width: 100% } 2 | .dataTables_info { margin-top: 12px } 3 | 4 | .flex-container { 5 | display: flex; 6 | flex-flow: column; 7 | } 8 | 9 | .flex-body { 10 | flex: 1 1 auto; 11 | } 12 | 13 | .flex-footer { 14 | flex: 0 1 20pt; 15 | } 16 | 17 | h3 { 18 | margin-top: 30px; 19 | border-bottom: 1px solid #eee; 20 | } 21 | 22 | .message { 23 | border-bottom: 1px solid #eee; 24 | border-top: 1px solid #eee; 25 | padding-top: 3px; 26 | padding-bottom: 3px; 27 | margin-top: 0px; 28 | margin-bottom: 0px; 29 | display: flex; 30 | } 31 | 32 | .message-key, 33 | .message-ts { 34 | padding-right: 5px; 35 | margin-right: 5px; 36 | border-right: 1px solid #eee; 37 | } 38 | 39 | div.topic_tailer { 40 | width: 100%; 41 | min-height: 200px; 42 | max-height: 500px; 43 | overflow: auto; 44 | } 45 | 46 | table.dataTable tbody th.dt-body-right, 47 | table.dataTable tbody td.dt-body-right { 48 | text-align: right; 49 | } 50 | -------------------------------------------------------------------------------- /resources/web_server/public/my_js.js: -------------------------------------------------------------------------------- 1 | jQuery.fn.dataTable.ext.type.order['num-or-str-pre'] = function (data) { 2 | var matches = data.match( /^(\d+(?:\.\d+)?)/ ); 3 | if (matches) { 4 | return parseFloat(matches[1]); 5 | } else { 6 | return -1; 7 | }; 8 | }; 9 | 10 | // jQuery.fn.dataTable.ext.type.order['my-err-pre'] = function (data) { 11 | // if (data.indexOf("times") !== -1) { 12 | // return 2; // Error 13 | // } else { 14 | // return 0; // Ok 15 | // }; 16 | //}; 17 | 18 | function formatToHuman(value, decimals, suffix, k, sizes) { 19 | if (suffix === undefined) { 20 | suffix = ""; 21 | } 22 | if (decimals === undefined) { 23 | decimals = 3; 24 | } 25 | if (value == 0) { 26 | var i = 0; 27 | var result = 0; 28 | } else { 29 | var i = Math.floor(Math.log(value) / Math.log(k)); 30 | var result = parseFloat((value / Math.pow(k, i)).toFixed(decimals)); 31 | } 32 | // return result + sizes[i] + suffix; 33 | return $('', { text: result + sizes[i] + suffix, title: value }).tooltip(); 34 | } 35 | 36 | function _bytes_to_human(value, suffix) { 37 | var bytes = parseInt(value); 38 | var sizes = [' B', ' KiB', ' MiB', ' GiB', ' TiB', ' PiB']; 39 | if (bytes == -1) { 40 | return "Unknown"; 41 | } else { 42 | return formatToHuman(bytes, 1, suffix, 1024, sizes); 43 | } 44 | } 45 | 46 | function bytes_to_human(cell, suffix) { 47 | var values = cell.innerHTML.split(","); 48 | cell.innerHTML = ""; 49 | 50 | values.forEach(function(value, i) { 51 | var bytes = _bytes_to_human(value, suffix); 52 | $(cell).append(bytes); 53 | 54 | if (i < values.length - 1) { 55 | cell.innerHTML += ","; 56 | } 57 | }); 58 | } 59 | 60 | function big_num_to_human(cell, suffix) { 61 | var value = parseInt(cell.innerHTML); 62 | var sizes = [' ', ' K', ' M', ' G']; 63 | if (value == -1) { 64 | $(cell).html("Unknown"); 65 | } else { 66 | $(cell).html(formatToHuman(value, 1, suffix, 1000, sizes)); 67 | } 68 | } 69 | 70 | function broker_to_url(cluster_id, cell) { 71 | var broker_name = cell.innerHTML; 72 | var url = "/clusters/" + cluster_id + "/brokers/" + broker_name; 73 | var link = $('', { text: broker_name, title: 'Broker page', href: url }); 74 | $(cell).html(link); 75 | } 76 | 77 | function topic_to_url(cluster_id, cell) { 78 | var topic_name = cell.innerHTML; 79 | var url = "/clusters/" + cluster_id + "/topics/" + topic_name; 80 | var link = $('', { text: topic_name, title: 'Topic page', href: url }); 81 | $(cell).html(link); 82 | } 83 | 84 | function group_to_url(cluster_id, cell) { 85 | var group_name = cell.innerHTML; 86 | var url = "/clusters/" + cluster_id + "/groups/" + group_name; 87 | var link = $('', { text: group_name, title: 'Group page', href: url }); 88 | $(cell).html(link); 89 | } 90 | 91 | function cluster_to_url(cell) { 92 | var cluster_id = cell.innerHTML; 93 | var url = "/clusters/" + cluster_id; 94 | var link = $('', { text: cluster_id, title: 'Cluster page', href: url }); 95 | $(cell).html(link); 96 | } 97 | 98 | function error_to_graphic(cell) { 99 | var error_code = cell.innerHTML; 100 | if (error_code) { 101 | var symbol = $('', { class: 'fa fa-times fa-fw', style: 'color: red', title: error_code }); 102 | } else { 103 | var symbol = $('', { class: 'fa fa-check fa-fw', style: 'color: green', title: 'No error' }); 104 | } 105 | symbol.tooltip(); 106 | $(cell).html(symbol); 107 | } 108 | 109 | function message_to_tailer_entry(msg) { 110 | var ts_text; 111 | if (msg["created_at"]) { 112 | ts_text = (new Date(msg["created_at"])).toISOString() + " Created"; 113 | } else if (msg["appended_at"]) { 114 | ts_text = (new Date(msg["appended_at"])).toISOString() + " Appended"; 115 | } else { 116 | ts_text = "N/A"; 117 | } 118 | 119 | var entry = $("
", {class: "message"}); 120 | entry.append($("
", { class: "message-key", text: msg["key"] ? msg["key"] : "N/A" })); 121 | entry.append($("
", { class: "message-ts", text: ts_text })); 122 | entry.append($("
", { class: "message-payload", text: msg["payload"] })); 123 | return entry; 124 | } 125 | 126 | $(document).ready(function() { 127 | $('#datatable-brokers-ajax').each(function(index) { 128 | $(this).DataTable({ 129 | "search": { "regex": true}, 130 | "ajax": $(this).attr("data-url"), 131 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 132 | "language": { "search": "Regex search:" }, 133 | "columnDefs": [ 134 | { "className": "dt-body-right", "targets": [ 2, 3 ] } 135 | ], 136 | "processing": true, 137 | "deferRender": true, 138 | "stateSave": true, 139 | "createdRow": function(row, data, index) { 140 | var cluster_id = $(this).attr("data-param"); 141 | // broker_to_url(cluster_id, $(row).children()[0]); 142 | bytes_to_human($(row).children()[2], "/s"); 143 | big_num_to_human($(row).children()[3], "msg/s"); 144 | } 145 | }); 146 | }); 147 | $('#datatable-topics-ajax').each(function(index) { 148 | $(this).DataTable({ 149 | "search": { "regex": true}, 150 | "ajax": $(this).attr("data-url"), 151 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 152 | "language": { "search": "Regex search:" }, 153 | "processing": true, 154 | "columns": [ 155 | { "data": "topic_name" }, 156 | { "data": "partition_count" }, 157 | { "data": "errors" }, 158 | { "data": "b_rate_15" }, 159 | { "data": "m_rate_15" } 160 | ], 161 | "columnDefs": [ 162 | { "className": "dt-body-right", "targets": [ 1, 2, 3, 4 ] } 163 | ], 164 | "deferRender": true, 165 | "stateSave": true, 166 | "createdRow": function(row, data, index) { 167 | var cluster_id = $(this).attr("data-param"); 168 | topic_to_url(cluster_id, $(row).children()[0]); 169 | error_to_graphic($(row).children()[2]); 170 | bytes_to_human($(row).children()[3], "/s"); 171 | big_num_to_human($(row).children()[4], "msg/s"); 172 | } 173 | }); 174 | }); 175 | $('#datatable-groups-ajax').each(function(index) { 176 | $(this).DataTable({ 177 | "search": { "regex": true}, 178 | "ajax": $(this).attr("data-url"), 179 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 180 | "language": { "search": "Regex search:" }, 181 | "columnDefs": [ 182 | { "className": "dt-body-right", "targets": [ 2, 3 ] } 183 | ], 184 | "processing": true, 185 | "deferRender": true, 186 | stateSave: true, 187 | "createdRow": function(row, data, index) { 188 | var cluster_id = $(this).attr("data-param"); 189 | group_to_url(cluster_id, $(row).children()[0]); 190 | } 191 | }); 192 | }); 193 | $('#datatable-reassignment-ajax').each(function(index) { 194 | $(this).DataTable({ 195 | "search": { "regex": true}, 196 | "ajax": $(this).attr("data-url"), 197 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 198 | "language": { "search": "Regex search:" }, 199 | "processing": true, 200 | "deferRender": true, 201 | "stateSave": true, 202 | "createdRow": function(row, data, index) { 203 | var cluster_id = $(this).attr("data-param"); 204 | topic_to_url(cluster_id, $(row).children()[0]); 205 | bytes_to_human($(row).children()[3], ""); 206 | } 207 | }); 208 | }); 209 | $('#datatable-topology-ajax').each(function(index) { 210 | $(this).DataTable({ 211 | "search": { "regex": true}, 212 | "ajax": $(this).attr("data-url"), 213 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 214 | "language": { "search": "Regex search:" }, 215 | "columnDefs": [ ], 216 | "processing": true, 217 | "deferRender": true, 218 | stateSave: true, 219 | "createdRow": function(row, data, index) { 220 | var cluster_id = $(this).attr("data-param"); 221 | // broker_to_url(cluster_id, $(row).children()[1]); 222 | bytes_to_human($(row).children()[1], ""); 223 | error_to_graphic($(row).children()[5]); 224 | } 225 | }); 226 | }); 227 | $('#datatable-group-members-ajax').each(function(index) { 228 | $(this).DataTable({ 229 | "search": { "regex": true}, 230 | "ajax": $(this).attr("data-url"), 231 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 232 | "language": { "search": "Regex search:" }, 233 | "columnDefs": [ ], 234 | "processing": true, 235 | "deferRender": true, 236 | stateSave: true 237 | }); 238 | }); 239 | $('#datatable-group-offsets-ajax').each(function(index) { 240 | var table = $(this).DataTable({ 241 | "search": { "regex": true}, 242 | "ajax": $(this).attr("data-url"), 243 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 244 | "language": { "search": "Regex search:" }, 245 | // "columnDefs": [ 246 | // { "className": "dt-body-right", "targets": [ 1, 2, 3, 4, 5 ] }, 247 | // { "type": "num-or-str", "targets": [ 5 ] } 248 | // ], 249 | "processing": true, 250 | "deferRender": true, 251 | stateSave: true, 252 | "createdRow": function(row, data, index) { 253 | var cluster_id = $(this).attr("data-param"); 254 | topic_to_url(cluster_id, $(row).children()[0]); 255 | } 256 | }); 257 | setInterval( function () { 258 | table.ajax.reload(); 259 | }, 20000 ); 260 | }); 261 | $('#datatable-topic-search-ajax').each(function(index) { 262 | $(this).DataTable({ 263 | "searching": false, 264 | "ajax": $(this).attr("data-url"), 265 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 266 | "pageLength": 50, 267 | "language": { "search": "Regex search:" }, 268 | "columnDefs": [ 269 | { "className": "dt-body-right", "targets": [ 2, 3, 4, 5 ] } 270 | ], 271 | "processing": true, 272 | "deferRender": true, 273 | "stateSave": true, 274 | "createdRow": function(row, data, index) { 275 | var cluster_id = $(this).attr("data-param"); 276 | var row = $(row).children(); 277 | topic_to_url(row[0].innerHTML, row[1]); 278 | cluster_to_url(row[0]); 279 | error_to_graphic(row[3]); 280 | bytes_to_human(row[4], "/s"); 281 | big_num_to_human(row[5], "msg/s"); 282 | } 283 | }); 284 | }); 285 | $('#datatable-group-search-ajax').each(function(index) { 286 | $(this).DataTable({ 287 | "searching": false, 288 | "ajax": $(this).attr("data-url"), 289 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 290 | "pageLength": 50, 291 | "columnDefs": [ 292 | { "className": "dt-body-right", "targets": [ 3, 4 ] } 293 | ], 294 | "processing": true, 295 | "deferRender": true, 296 | "stateSave": true, 297 | "createdRow": function(row, data, index) { 298 | var row = $(row).children(); 299 | group_to_url(row[0].innerHTML, row[1]); 300 | cluster_to_url(row[0]); 301 | } 302 | }); 303 | }); 304 | $('#datatable-internals-cache-brokers-ajax').each(function(index) { 305 | var table = $(this).DataTable({ 306 | "ajax": $(this).attr("data-url"), 307 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 308 | "pageLength": 50, 309 | "processing": true, 310 | "deferRender": true, 311 | "stateSave": true 312 | }); 313 | setInterval( function () { 314 | table.ajax.reload(); 315 | }, 20000 ); 316 | }); 317 | $('#datatable-internals-cache-metrics-ajax').each(function(index) { 318 | var table = $(this).DataTable({ 319 | "ajax": $(this).attr("data-url"), 320 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 321 | "pageLength": 10, 322 | "processing": true, 323 | "deferRender": true, 324 | "stateSave": true 325 | }); 326 | setInterval( function () { 327 | table.ajax.reload(); 328 | }, 20000 ); 329 | }); 330 | $('#datatable-internals-cache-offsets-ajax').each(function(index) { 331 | var table = $(this).DataTable({ 332 | "ajax": $(this).attr("data-url"), 333 | "lengthMenu": [ [10, 50, 200, -1], [10, 50, 200, "All"] ], 334 | "pageLength": 10, 335 | "processing": true, 336 | "deferRender": true, 337 | "stateSave": true 338 | }); 339 | setInterval( function () { 340 | table.ajax.reload(); 341 | }, 20000 ); 342 | }); 343 | $('#datatable-internals-live-consumers-ajax').each(function(index) { 344 | var table = $(this).DataTable({ 345 | "ajax": $(this).attr("data-url"), 346 | "lengthMenu": [ [10, 50, -1], [10, 50, "All"] ], 347 | "pageLength": 10, 348 | "processing": true, 349 | "deferRender": true, 350 | "stateSave": true 351 | }); 352 | setInterval( function () { 353 | table.ajax.reload(); 354 | }, 20000 ); 355 | }); 356 | }); 357 | 358 | function truncate(string, max_len) { 359 | if (string.length > max_len) 360 | return string.substring(0,max_len) + '...'; 361 | else 362 | return string; 363 | } 364 | 365 | function isScrolledToBottom(div) { 366 | var div = div[0]; 367 | return div.scrollHeight - div.clientHeight <= div.scrollTop + 1; 368 | } 369 | 370 | function scroll_to_bottom(div) { 371 | var div = div[0]; 372 | div.scrollTop = div.scrollHeight - div.clientHeight; 373 | } 374 | 375 | var max_msg_count = 1000; 376 | var max_msg_length = 1024; 377 | var poll_interval = 1000; 378 | 379 | var tailer_active = true; 380 | 381 | function background_tailer(cluster_id, topic_name, tailer_id) { 382 | if (!tailer_active) { 383 | setTimeout(function(){background_tailer(cluster_id, topic_name, tailer_id)}, poll_interval); 384 | return 385 | } 386 | var url = '/api/tailer/' + cluster_id + '/' + topic_name + '/' + tailer_id; 387 | $.ajax({ 388 | url: url, 389 | success: function(data) { 390 | var div_tailer = $('div.topic_tailer'); 391 | var bottom = isScrolledToBottom(div_tailer); 392 | messages = JSON.parse(data); 393 | for (var i = 0; i < messages.length; i++) { 394 | var message = messages[i]; 395 | div_tailer.append(message_to_tailer_entry(message)); 396 | } 397 | if (bottom) 398 | scroll_to_bottom(div_tailer); 399 | var message_count = div_tailer.children().length; 400 | if (message_count > max_msg_count) 401 | div_tailer.children().slice(0, message_count - max_msg_count).remove(); 402 | }, 403 | error: function(data) { 404 | console.log("error"); 405 | }, 406 | complete: function() { 407 | // Schedule the next request when the current one's complete 408 | setTimeout(function(){background_tailer(cluster_id, topic_name, tailer_id)}, poll_interval); 409 | } 410 | }); 411 | } 412 | 413 | // Load topic tailers 414 | $(document).ready(function() { 415 | $('.topic_tailer').each(function(index) { 416 | var cluster_id = $(this).attr("data-cluster"); 417 | var topic_name = $(this).attr("data-topic"); 418 | var tailer_id = $(this).attr("data-tailer"); 419 | background_tailer(cluster_id, topic_name, tailer_id); 420 | }); 421 | $('#start_tailer_button').click(function(event) { 422 | event.preventDefault(); 423 | $('#tailer_button_label').html("Topic tailer: active") 424 | tailer_active = true; 425 | }) 426 | $('#stop_tailer_button').click(function(event) { 427 | event.preventDefault(); 428 | $('#tailer_button_label').html("Topic tailer: stopped") 429 | tailer_active = false; 430 | }) 431 | }); 432 | 433 | $(document).ready(function(){ 434 | $('[data-toggle="tooltip"]').tooltip(); 435 | $(window).resize(); 436 | }); 437 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/dist/css/sb-admin-2.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - SB Admin 2 v3.3.7+1 (http://startbootstrap.com/template-overviews/sb-admin-2) 3 | * Copyright 2013-2016 Start Bootstrap 4 | * Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap/blob/gh-pages/LICENSE) 5 | */ 6 | body { 7 | background-color: #f8f8f8; 8 | } 9 | #wrapper { 10 | width: 100%; 11 | } 12 | #page-wrapper { 13 | padding: 0 15px; 14 | min-height: 568px; 15 | background-color: white; 16 | } 17 | @media (min-width: 768px) { 18 | #page-wrapper { 19 | position: inherit; 20 | margin: 0 0 0 250px; 21 | padding: 0 30px; 22 | border-left: 1px solid #e7e7e7; 23 | } 24 | } 25 | .navbar-top-links { 26 | margin-right: 0; 27 | } 28 | .navbar-top-links li { 29 | display: inline-block; 30 | } 31 | .navbar-top-links li:last-child { 32 | margin-right: 15px; 33 | } 34 | .navbar-top-links li a { 35 | padding: 15px; 36 | min-height: 50px; 37 | } 38 | .navbar-top-links .dropdown-menu li { 39 | display: block; 40 | } 41 | .navbar-top-links .dropdown-menu li:last-child { 42 | margin-right: 0; 43 | } 44 | .navbar-top-links .dropdown-menu li a { 45 | padding: 3px 20px; 46 | min-height: 0; 47 | } 48 | .navbar-top-links .dropdown-menu li a div { 49 | white-space: normal; 50 | } 51 | .navbar-top-links .dropdown-messages, 52 | .navbar-top-links .dropdown-tasks, 53 | .navbar-top-links .dropdown-alerts { 54 | width: 310px; 55 | min-width: 0; 56 | } 57 | .navbar-top-links .dropdown-messages { 58 | margin-left: 5px; 59 | } 60 | .navbar-top-links .dropdown-tasks { 61 | margin-left: -59px; 62 | } 63 | .navbar-top-links .dropdown-alerts { 64 | margin-left: -123px; 65 | } 66 | .navbar-top-links .dropdown-user { 67 | right: 0; 68 | left: auto; 69 | } 70 | .sidebar .sidebar-nav.navbar-collapse { 71 | padding-left: 0; 72 | padding-right: 0; 73 | } 74 | .sidebar .sidebar-search { 75 | padding: 15px; 76 | } 77 | .sidebar ul li { 78 | border-bottom: 1px solid #e7e7e7; 79 | } 80 | .sidebar ul li a.active { 81 | background-color: #eeeeee; 82 | } 83 | .sidebar .arrow { 84 | float: right; 85 | } 86 | .sidebar .fa.arrow:before { 87 | content: "\f104"; 88 | } 89 | .sidebar .active > a > .fa.arrow:before { 90 | content: "\f107"; 91 | } 92 | .sidebar .nav-second-level li, 93 | .sidebar .nav-third-level li { 94 | border-bottom: none !important; 95 | } 96 | .sidebar .nav-second-level li a { 97 | padding-left: 37px; 98 | } 99 | .sidebar .nav-third-level li a { 100 | padding-left: 52px; 101 | } 102 | @media (min-width: 768px) { 103 | .sidebar { 104 | z-index: 1; 105 | position: absolute; 106 | width: 250px; 107 | margin-top: 51px; 108 | } 109 | .navbar-top-links .dropdown-messages, 110 | .navbar-top-links .dropdown-tasks, 111 | .navbar-top-links .dropdown-alerts { 112 | margin-left: auto; 113 | } 114 | } 115 | .btn-outline { 116 | color: inherit; 117 | background-color: transparent; 118 | transition: all .5s; 119 | } 120 | .btn-primary.btn-outline { 121 | color: #428bca; 122 | } 123 | .btn-success.btn-outline { 124 | color: #5cb85c; 125 | } 126 | .btn-info.btn-outline { 127 | color: #5bc0de; 128 | } 129 | .btn-warning.btn-outline { 130 | color: #f0ad4e; 131 | } 132 | .btn-danger.btn-outline { 133 | color: #d9534f; 134 | } 135 | .btn-primary.btn-outline:hover, 136 | .btn-success.btn-outline:hover, 137 | .btn-info.btn-outline:hover, 138 | .btn-warning.btn-outline:hover, 139 | .btn-danger.btn-outline:hover { 140 | color: white; 141 | } 142 | .chat { 143 | margin: 0; 144 | padding: 0; 145 | list-style: none; 146 | } 147 | .chat li { 148 | margin-bottom: 10px; 149 | padding-bottom: 5px; 150 | border-bottom: 1px dotted #999999; 151 | } 152 | .chat li.left .chat-body { 153 | margin-left: 60px; 154 | } 155 | .chat li.right .chat-body { 156 | margin-right: 60px; 157 | } 158 | .chat li .chat-body p { 159 | margin: 0; 160 | } 161 | .panel .slidedown .glyphicon, 162 | .chat .glyphicon { 163 | margin-right: 5px; 164 | } 165 | .chat-panel .panel-body { 166 | height: 350px; 167 | overflow-y: scroll; 168 | } 169 | .login-panel { 170 | margin-top: 25%; 171 | } 172 | .flot-chart { 173 | display: block; 174 | height: 400px; 175 | } 176 | .flot-chart-content { 177 | width: 100%; 178 | height: 100%; 179 | } 180 | table.dataTable thead .sorting, 181 | table.dataTable thead .sorting_asc, 182 | table.dataTable thead .sorting_desc, 183 | table.dataTable thead .sorting_asc_disabled, 184 | table.dataTable thead .sorting_desc_disabled { 185 | background: transparent; 186 | } 187 | table.dataTable thead .sorting_asc:after { 188 | content: "\f0de"; 189 | float: right; 190 | font-family: fontawesome; 191 | } 192 | table.dataTable thead .sorting_desc:after { 193 | content: "\f0dd"; 194 | float: right; 195 | font-family: fontawesome; 196 | } 197 | table.dataTable thead .sorting:after { 198 | content: "\f0dc"; 199 | float: right; 200 | font-family: fontawesome; 201 | color: rgba(50, 50, 50, 0.5); 202 | } 203 | .btn-circle { 204 | width: 30px; 205 | height: 30px; 206 | padding: 6px 0; 207 | border-radius: 15px; 208 | text-align: center; 209 | font-size: 12px; 210 | line-height: 1.428571429; 211 | } 212 | .btn-circle.btn-lg { 213 | width: 50px; 214 | height: 50px; 215 | padding: 10px 16px; 216 | border-radius: 25px; 217 | font-size: 18px; 218 | line-height: 1.33; 219 | } 220 | .btn-circle.btn-xl { 221 | width: 70px; 222 | height: 70px; 223 | padding: 10px 16px; 224 | border-radius: 35px; 225 | font-size: 24px; 226 | line-height: 1.33; 227 | } 228 | .show-grid [class^="col-"] { 229 | padding-top: 10px; 230 | padding-bottom: 10px; 231 | border: 1px solid #ddd; 232 | background-color: #eee !important; 233 | } 234 | .show-grid { 235 | margin: 15px 0; 236 | } 237 | .huge { 238 | font-size: 40px; 239 | } 240 | .panel-green { 241 | border-color: #5cb85c; 242 | } 243 | .panel-green > .panel-heading { 244 | border-color: #5cb85c; 245 | color: white; 246 | background-color: #5cb85c; 247 | } 248 | .panel-green > a { 249 | color: #5cb85c; 250 | } 251 | .panel-green > a:hover { 252 | color: #3d8b3d; 253 | } 254 | .panel-red { 255 | border-color: #d9534f; 256 | } 257 | .panel-red > .panel-heading { 258 | border-color: #d9534f; 259 | color: white; 260 | background-color: #d9534f; 261 | } 262 | .panel-red > a { 263 | color: #d9534f; 264 | } 265 | .panel-red > a:hover { 266 | color: #b52b27; 267 | } 268 | .panel-yellow { 269 | border-color: #f0ad4e; 270 | } 271 | .panel-yellow > .panel-heading { 272 | border-color: #f0ad4e; 273 | color: white; 274 | background-color: #f0ad4e; 275 | } 276 | .panel-yellow > a { 277 | color: #f0ad4e; 278 | } 279 | .panel-yellow > a:hover { 280 | color: #df8a13; 281 | } 282 | .timeline { 283 | position: relative; 284 | padding: 20px 0 20px; 285 | list-style: none; 286 | } 287 | .timeline:before { 288 | content: " "; 289 | position: absolute; 290 | top: 0; 291 | bottom: 0; 292 | left: 50%; 293 | width: 3px; 294 | margin-left: -1.5px; 295 | background-color: #eeeeee; 296 | } 297 | .timeline > li { 298 | position: relative; 299 | margin-bottom: 20px; 300 | } 301 | .timeline > li:before, 302 | .timeline > li:after { 303 | content: " "; 304 | display: table; 305 | } 306 | .timeline > li:after { 307 | clear: both; 308 | } 309 | .timeline > li:before, 310 | .timeline > li:after { 311 | content: " "; 312 | display: table; 313 | } 314 | .timeline > li:after { 315 | clear: both; 316 | } 317 | .timeline > li > .timeline-panel { 318 | float: left; 319 | position: relative; 320 | width: 46%; 321 | padding: 20px; 322 | border: 1px solid #d4d4d4; 323 | border-radius: 2px; 324 | -webkit-box-shadow: 0 1px 6px rgba(0, 0, 0, 0.175); 325 | box-shadow: 0 1px 6px rgba(0, 0, 0, 0.175); 326 | } 327 | .timeline > li > .timeline-panel:before { 328 | content: " "; 329 | display: inline-block; 330 | position: absolute; 331 | top: 26px; 332 | right: -15px; 333 | border-top: 15px solid transparent; 334 | border-right: 0 solid #ccc; 335 | border-bottom: 15px solid transparent; 336 | border-left: 15px solid #ccc; 337 | } 338 | .timeline > li > .timeline-panel:after { 339 | content: " "; 340 | display: inline-block; 341 | position: absolute; 342 | top: 27px; 343 | right: -14px; 344 | border-top: 14px solid transparent; 345 | border-right: 0 solid #fff; 346 | border-bottom: 14px solid transparent; 347 | border-left: 14px solid #fff; 348 | } 349 | .timeline > li > .timeline-badge { 350 | z-index: 100; 351 | position: absolute; 352 | top: 16px; 353 | left: 50%; 354 | width: 50px; 355 | height: 50px; 356 | margin-left: -25px; 357 | border-radius: 50% 50% 50% 50%; 358 | text-align: center; 359 | font-size: 1.4em; 360 | line-height: 50px; 361 | color: #fff; 362 | background-color: #999999; 363 | } 364 | .timeline > li.timeline-inverted > .timeline-panel { 365 | float: right; 366 | } 367 | .timeline > li.timeline-inverted > .timeline-panel:before { 368 | right: auto; 369 | left: -15px; 370 | border-right-width: 15px; 371 | border-left-width: 0; 372 | } 373 | .timeline > li.timeline-inverted > .timeline-panel:after { 374 | right: auto; 375 | left: -14px; 376 | border-right-width: 14px; 377 | border-left-width: 0; 378 | } 379 | .timeline-badge.primary { 380 | background-color: #2e6da4 !important; 381 | } 382 | .timeline-badge.success { 383 | background-color: #3f903f !important; 384 | } 385 | .timeline-badge.warning { 386 | background-color: #f0ad4e !important; 387 | } 388 | .timeline-badge.danger { 389 | background-color: #d9534f !important; 390 | } 391 | .timeline-badge.info { 392 | background-color: #5bc0de !important; 393 | } 394 | .timeline-title { 395 | margin-top: 0; 396 | color: inherit; 397 | } 398 | .timeline-body > p, 399 | .timeline-body > ul { 400 | margin-bottom: 0; 401 | } 402 | .timeline-body > p + p { 403 | margin-top: 5px; 404 | } 405 | @media (max-width: 767px) { 406 | ul.timeline:before { 407 | left: 40px; 408 | } 409 | ul.timeline > li > .timeline-panel { 410 | width: calc(10%); 411 | width: -moz-calc(10%); 412 | width: -webkit-calc(10%); 413 | } 414 | ul.timeline > li > .timeline-badge { 415 | top: 16px; 416 | left: 15px; 417 | margin-left: 0; 418 | } 419 | ul.timeline > li > .timeline-panel { 420 | float: right; 421 | } 422 | ul.timeline > li > .timeline-panel:before { 423 | right: auto; 424 | left: -15px; 425 | border-right-width: 15px; 426 | border-left-width: 0; 427 | } 428 | ul.timeline > li > .timeline-panel:after { 429 | right: auto; 430 | left: -14px; 431 | border-right-width: 14px; 432 | border-left-width: 0; 433 | } 434 | } 435 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/dist/css/sb-admin-2.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - SB Admin 2 v3.3.7+1 (http://startbootstrap.com/template-overviews/sb-admin-2) 3 | * Copyright 2013-2016 Start Bootstrap 4 | * Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap/blob/gh-pages/LICENSE) 5 | */.chat,.timeline{list-style:none}body{background-color:#f8f8f8}#wrapper{width:100%}#page-wrapper{padding:0 15px;min-height:568px;background-color:#fff}@media (min-width:768px){#page-wrapper{position:inherit;margin:0 0 0 250px;padding:0 30px;border-left:1px solid #e7e7e7}}.navbar-top-links{margin-right:0}.navbar-top-links li{display:inline-block}.flot-chart,.navbar-top-links .dropdown-menu li{display:block}.navbar-top-links li:last-child{margin-right:15px}.navbar-top-links li a{padding:15px;min-height:50px}.navbar-top-links .dropdown-menu li:last-child{margin-right:0}.navbar-top-links .dropdown-menu li a{padding:3px 20px;min-height:0}.navbar-top-links .dropdown-menu li a div{white-space:normal}.navbar-top-links .dropdown-alerts,.navbar-top-links .dropdown-messages,.navbar-top-links .dropdown-tasks{width:310px;min-width:0}.navbar-top-links .dropdown-messages{margin-left:5px}.navbar-top-links .dropdown-tasks{margin-left:-59px}.navbar-top-links .dropdown-alerts{margin-left:-123px}.navbar-top-links .dropdown-user{right:0;left:auto}.sidebar .sidebar-nav.navbar-collapse{padding-left:0;padding-right:0}.sidebar .sidebar-search{padding:15px}.sidebar ul li{border-bottom:1px solid #e7e7e7}.sidebar ul li a.active{background-color:#eee}.sidebar .arrow{float:right}.sidebar .fa.arrow:before{content:"\f104"}.sidebar .active>a>.fa.arrow:before{content:"\f107"}.sidebar .nav-second-level li,.sidebar .nav-third-level li{border-bottom:none!important}.sidebar .nav-second-level li a{padding-left:37px}.sidebar .nav-third-level li a{padding-left:52px}@media (min-width:768px){.sidebar{z-index:1;position:absolute;width:250px;margin-top:51px}.navbar-top-links .dropdown-alerts,.navbar-top-links .dropdown-messages,.navbar-top-links .dropdown-tasks{margin-left:auto}}.btn-outline{color:inherit;background-color:transparent;transition:all .5s}.btn-primary.btn-outline{color:#428bca}.btn-success.btn-outline{color:#5cb85c}.btn-info.btn-outline{color:#5bc0de}.btn-warning.btn-outline{color:#f0ad4e}.btn-danger.btn-outline{color:#d9534f}.btn-danger.btn-outline:hover,.btn-info.btn-outline:hover,.btn-primary.btn-outline:hover,.btn-success.btn-outline:hover,.btn-warning.btn-outline:hover{color:#fff}.chat{margin:0;padding:0}.chat li{margin-bottom:10px;padding-bottom:5px;border-bottom:1px dotted #999}.chat li.left .chat-body{margin-left:60px}.chat li.right .chat-body{margin-right:60px}.chat li .chat-body p{margin:0}.chat .glyphicon,.panel .slidedown .glyphicon{margin-right:5px}.chat-panel .panel-body{height:350px;overflow-y:scroll}.login-panel{margin-top:25%}.flot-chart{height:400px}.flot-chart-content{width:100%;height:100%}table.dataTable thead .sorting,table.dataTable thead .sorting_asc,table.dataTable thead .sorting_asc_disabled,table.dataTable thead .sorting_desc,table.dataTable thead .sorting_desc_disabled{background:0 0}table.dataTable thead .sorting_asc:after{content:"\f0de";float:right;font-family:fontawesome}table.dataTable thead .sorting_desc:after{content:"\f0dd";float:right;font-family:fontawesome}table.dataTable thead .sorting:after{content:"\f0dc";float:right;font-family:fontawesome;color:rgba(50,50,50,.5)}.btn-circle{width:30px;height:30px;padding:6px 0;border-radius:15px;text-align:center;font-size:12px;line-height:1.428571429}.btn-circle.btn-lg{width:50px;height:50px;padding:10px 16px;border-radius:25px;font-size:18px;line-height:1.33}.btn-circle.btn-xl{width:70px;height:70px;padding:10px 16px;border-radius:35px;font-size:24px;line-height:1.33}.show-grid [class^=col-]{padding-top:10px;padding-bottom:10px;border:1px solid #ddd;background-color:#eee!important}.show-grid{margin:15px 0}.huge{font-size:40px}.panel-green{border-color:#5cb85c}.panel-green>.panel-heading{border-color:#5cb85c;color:#fff;background-color:#5cb85c}.panel-green>a{color:#5cb85c}.panel-green>a:hover{color:#3d8b3d}.panel-red{border-color:#d9534f}.panel-red>.panel-heading{border-color:#d9534f;color:#fff;background-color:#d9534f}.panel-red>a{color:#d9534f}.panel-red>a:hover{color:#b52b27}.panel-yellow{border-color:#f0ad4e}.panel-yellow>.panel-heading{border-color:#f0ad4e;color:#fff;background-color:#f0ad4e}.panel-yellow>a{color:#f0ad4e}.panel-yellow>a:hover{color:#df8a13}.timeline{position:relative;padding:20px 0}.timeline:before{content:" ";position:absolute;top:0;bottom:0;left:50%;width:3px;margin-left:-1.5px;background-color:#eee}.timeline>li{position:relative;margin-bottom:20px}.timeline>li:after,.timeline>li:before{content:" ";display:table}.timeline>li:after{clear:both}.timeline>li>.timeline-panel{float:left;position:relative;width:46%;padding:20px;border:1px solid #d4d4d4;border-radius:2px;-webkit-box-shadow:0 1px 6px rgba(0,0,0,.175);box-shadow:0 1px 6px rgba(0,0,0,.175)}.timeline>li>.timeline-panel:before{content:" ";display:inline-block;position:absolute;top:26px;right:-15px;border-top:15px solid transparent;border-right:0 solid #ccc;border-bottom:15px solid transparent;border-left:15px solid #ccc}.timeline>li>.timeline-panel:after{content:" ";display:inline-block;position:absolute;top:27px;right:-14px;border-top:14px solid transparent;border-right:0 solid #fff;border-bottom:14px solid transparent;border-left:14px solid #fff}.timeline>li>.timeline-badge{z-index:100;position:absolute;top:16px;left:50%;width:50px;height:50px;margin-left:-25px;border-radius:50%;text-align:center;font-size:1.4em;line-height:50px;color:#fff;background-color:#999}.timeline>li.timeline-inverted>.timeline-panel{float:right}.timeline>li.timeline-inverted>.timeline-panel:before{right:auto;left:-15px;border-right-width:15px;border-left-width:0}.timeline>li.timeline-inverted>.timeline-panel:after{right:auto;left:-14px;border-right-width:14px;border-left-width:0}.timeline-badge.primary{background-color:#2e6da4!important}.timeline-badge.success{background-color:#3f903f!important}.timeline-badge.warning{background-color:#f0ad4e!important}.timeline-badge.danger{background-color:#d9534f!important}.timeline-badge.info{background-color:#5bc0de!important}.timeline-title{margin-top:0;color:inherit}.timeline-body>p,.timeline-body>ul{margin-bottom:0}.timeline-body>p+p{margin-top:5px}@media (max-width:767px){ul.timeline:before{left:40px}ul.timeline>li>.timeline-panel{width:calc(10%);width:-moz-calc(10%);width:-webkit-calc(10%);float:right}ul.timeline>li>.timeline-badge{top:16px;left:15px;margin-left:0}ul.timeline>li>.timeline-panel:before{right:auto;left:-15px;border-right-width:15px;border-left-width:0}ul.timeline>li>.timeline-panel:after{right:auto;left:-14px;border-right-width:14px;border-left-width:0}} -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/dist/js/sb-admin-2.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - SB Admin 2 v3.3.7+1 (http://startbootstrap.com/template-overviews/sb-admin-2) 3 | * Copyright 2013-2016 Start Bootstrap 4 | * Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap/blob/gh-pages/LICENSE) 5 | */ 6 | $(function() { 7 | $('#side-menu').metisMenu(); 8 | }); 9 | 10 | //Loads the correct sidebar on window load, 11 | //collapses the sidebar on window resize. 12 | // Sets the min-height of #page-wrapper to window size 13 | $(function() { 14 | $(window).bind("load resize", function() { 15 | var topOffset = 50; 16 | var width = (this.window.innerWidth > 0) ? this.window.innerWidth : this.screen.width; 17 | if (width < 768) { 18 | $('div.navbar-collapse').addClass('collapse'); 19 | topOffset = 100; // 2-row-menu 20 | } else { 21 | $('div.navbar-collapse').removeClass('collapse'); 22 | } 23 | 24 | var height = ((this.window.innerHeight > 0) ? this.window.innerHeight : this.screen.height) - 1; 25 | height = height - topOffset; 26 | if (height < 1) height = 1; 27 | if (height > topOffset) { 28 | $("#page-wrapper").css("min-height", (height) + "px"); 29 | } 30 | }); 31 | 32 | var url = window.location; 33 | // var element = $('ul.nav a').filter(function() { 34 | // return this.href == url; 35 | // }).addClass('active').parent().parent().addClass('in').parent(); 36 | var element = $('ul.nav a').filter(function() { 37 | return this.href == url; 38 | }).addClass('active').parent(); 39 | 40 | while (true) { 41 | if (element.is('li')) { 42 | element = element.parent().addClass('in').parent(); 43 | } else { 44 | break; 45 | } 46 | } 47 | }); 48 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/dist/js/sb-admin-2.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - SB Admin 2 v3.3.7+1 (http://startbootstrap.com/template-overviews/sb-admin-2) 3 | * Copyright 2013-2016 Start Bootstrap 4 | * Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap/blob/gh-pages/LICENSE) 5 | */ 6 | $(function(){$("#side-menu").metisMenu()}),$(function(){$(window).bind("load resize",function(){var i=50,n=this.window.innerWidth>0?this.window.innerWidth:this.screen.width;n<768?($("div.navbar-collapse").addClass("collapse"),i=100):$("div.navbar-collapse").removeClass("collapse");var e=(this.window.innerHeight>0?this.window.innerHeight:this.screen.height)-1;e-=i,e<1&&(e=1),e>i&&$("#page-wrapper").css("min-height",e+"px")});for(var i=window.location,n=$("ul.nav a").filter(function(){return this.href==i}).addClass("active").parent();;){if(!n.is("li"))break;n=n.parent().addClass("in").parent()}}); -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/vendor/datatables-plugins/dataTables.bootstrap.css: -------------------------------------------------------------------------------- 1 | div.dataTables_length label { 2 | font-weight: normal; 3 | text-align: left; 4 | white-space: nowrap; 5 | } 6 | 7 | div.dataTables_length select { 8 | width: 75px; 9 | display: inline-block; 10 | } 11 | 12 | div.dataTables_filter { 13 | text-align: right; 14 | } 15 | 16 | div.dataTables_filter label { 17 | font-weight: normal; 18 | white-space: nowrap; 19 | text-align: left; 20 | } 21 | 22 | div.dataTables_filter input { 23 | margin-left: 0.5em; 24 | display: inline-block; 25 | } 26 | 27 | div.dataTables_info { 28 | padding-top: 8px; 29 | white-space: nowrap; 30 | } 31 | 32 | div.dataTables_paginate { 33 | margin: 0; 34 | white-space: nowrap; 35 | text-align: right; 36 | } 37 | 38 | div.dataTables_paginate ul.pagination { 39 | margin: 2px 0; 40 | white-space: nowrap; 41 | } 42 | 43 | @media screen and (max-width: 767px) { 44 | div.dataTables_length, 45 | div.dataTables_filter, 46 | div.dataTables_info, 47 | div.dataTables_paginate { 48 | text-align: center; 49 | } 50 | } 51 | 52 | 53 | table.dataTable td, 54 | table.dataTable th { 55 | -webkit-box-sizing: content-box; 56 | -moz-box-sizing: content-box; 57 | box-sizing: content-box; 58 | } 59 | 60 | 61 | table.dataTable { 62 | clear: both; 63 | margin-top: 6px !important; 64 | margin-bottom: 6px !important; 65 | max-width: none !important; 66 | } 67 | 68 | table.dataTable thead .sorting, 69 | table.dataTable thead .sorting_asc, 70 | table.dataTable thead .sorting_desc, 71 | table.dataTable thead .sorting_asc_disabled, 72 | table.dataTable thead .sorting_desc_disabled { 73 | cursor: pointer; 74 | } 75 | 76 | table.dataTable thead .sorting { background: url('../images/sort_both.png') no-repeat center right; } 77 | table.dataTable thead .sorting_asc { background: url('../images/sort_asc.png') no-repeat center right; } 78 | table.dataTable thead .sorting_desc { background: url('../images/sort_desc.png') no-repeat center right; } 79 | 80 | table.dataTable thead .sorting_asc_disabled { background: url('../images/sort_asc_disabled.png') no-repeat center right; } 81 | table.dataTable thead .sorting_desc_disabled { background: url('../images/sort_desc_disabled.png') no-repeat center right; } 82 | 83 | table.dataTable thead > tr > th { 84 | padding-left: 18px; 85 | padding-right: 18px; 86 | } 87 | 88 | table.dataTable th:active { 89 | outline: none; 90 | } 91 | 92 | /* Scrolling */ 93 | div.dataTables_scrollHead table { 94 | margin-bottom: 0 !important; 95 | border-bottom-left-radius: 0; 96 | border-bottom-right-radius: 0; 97 | } 98 | 99 | div.dataTables_scrollHead table thead tr:last-child th:first-child, 100 | div.dataTables_scrollHead table thead tr:last-child td:first-child { 101 | border-bottom-left-radius: 0 !important; 102 | border-bottom-right-radius: 0 !important; 103 | } 104 | 105 | div.dataTables_scrollBody table { 106 | border-top: none; 107 | margin-top: 0 !important; 108 | margin-bottom: 0 !important; 109 | } 110 | 111 | div.dataTables_scrollBody tbody tr:first-child th, 112 | div.dataTables_scrollBody tbody tr:first-child td { 113 | border-top: none; 114 | } 115 | 116 | div.dataTables_scrollFoot table { 117 | margin-top: 0 !important; 118 | border-top: none; 119 | } 120 | 121 | /* Frustratingly the border-collapse:collapse used by Bootstrap makes the column 122 | width calculations when using scrolling impossible to align columns. We have 123 | to use separate 124 | */ 125 | table.table-bordered.dataTable { 126 | border-collapse: separate !important; 127 | } 128 | table.table-bordered thead th, 129 | table.table-bordered thead td { 130 | border-left-width: 0; 131 | border-top-width: 0; 132 | } 133 | table.table-bordered tbody th, 134 | table.table-bordered tbody td { 135 | border-left-width: 0; 136 | border-bottom-width: 0; 137 | } 138 | table.table-bordered th:last-child, 139 | table.table-bordered td:last-child { 140 | border-right-width: 0; 141 | } 142 | div.dataTables_scrollHead table.table-bordered { 143 | border-bottom-width: 0; 144 | } 145 | 146 | 147 | 148 | 149 | /* 150 | * TableTools styles 151 | */ 152 | .table.dataTable tbody tr.active td, 153 | .table.dataTable tbody tr.active th { 154 | background-color: #08C; 155 | color: white; 156 | } 157 | 158 | .table.dataTable tbody tr.active:hover td, 159 | .table.dataTable tbody tr.active:hover th { 160 | background-color: #0075b0 !important; 161 | } 162 | 163 | .table.dataTable tbody tr.active th > a, 164 | .table.dataTable tbody tr.active td > a { 165 | color: white; 166 | } 167 | 168 | .table-striped.dataTable tbody tr.active:nth-child(odd) td, 169 | .table-striped.dataTable tbody tr.active:nth-child(odd) th { 170 | background-color: #017ebc; 171 | } 172 | 173 | table.DTTT_selectable tbody tr { 174 | cursor: pointer; 175 | } 176 | 177 | div.DTTT .btn:hover { 178 | text-decoration: none !important; 179 | } 180 | 181 | ul.DTTT_dropdown.dropdown-menu { 182 | z-index: 2003; 183 | } 184 | 185 | ul.DTTT_dropdown.dropdown-menu a { 186 | color: #333 !important; /* needed only when demo_page.css is included */ 187 | } 188 | 189 | ul.DTTT_dropdown.dropdown-menu li { 190 | position: relative; 191 | } 192 | 193 | ul.DTTT_dropdown.dropdown-menu li:hover a { 194 | background-color: #0088cc; 195 | color: white !important; 196 | } 197 | 198 | div.DTTT_collection_background { 199 | z-index: 2002; 200 | } 201 | 202 | /* TableTools information display */ 203 | div.DTTT_print_info { 204 | position: fixed; 205 | top: 50%; 206 | left: 50%; 207 | width: 400px; 208 | height: 150px; 209 | margin-left: -200px; 210 | margin-top: -75px; 211 | text-align: center; 212 | color: #333; 213 | padding: 10px 30px; 214 | opacity: 0.95; 215 | 216 | background-color: white; 217 | border: 1px solid rgba(0, 0, 0, 0.2); 218 | border-radius: 6px; 219 | 220 | -webkit-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5); 221 | box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5); 222 | } 223 | 224 | div.DTTT_print_info h6 { 225 | font-weight: normal; 226 | font-size: 28px; 227 | line-height: 28px; 228 | margin: 1em; 229 | } 230 | 231 | div.DTTT_print_info p { 232 | font-size: 14px; 233 | line-height: 20px; 234 | } 235 | 236 | div.dataTables_processing { 237 | position: absolute; 238 | top: 50%; 239 | left: 50%; 240 | width: 100%; 241 | height: 60px; 242 | margin-left: -50%; 243 | margin-top: -25px; 244 | padding-top: 20px; 245 | padding-bottom: 20px; 246 | text-align: center; 247 | font-size: 1.2em; 248 | background-color: white; 249 | background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255,255,255,0)), color-stop(25%, rgba(255,255,255,0.9)), color-stop(75%, rgba(255,255,255,0.9)), color-stop(100%, rgba(255,255,255,0))); 250 | background: -webkit-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%); 251 | background: -moz-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%); 252 | background: -ms-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%); 253 | background: -o-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%); 254 | background: linear-gradient(to right, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%); 255 | } 256 | 257 | 258 | 259 | /* 260 | * FixedColumns styles 261 | */ 262 | div.DTFC_LeftHeadWrapper table, 263 | div.DTFC_LeftFootWrapper table, 264 | div.DTFC_RightHeadWrapper table, 265 | div.DTFC_RightFootWrapper table, 266 | table.DTFC_Cloned tr.even { 267 | background-color: white; 268 | margin-bottom: 0; 269 | } 270 | 271 | div.DTFC_RightHeadWrapper table , 272 | div.DTFC_LeftHeadWrapper table { 273 | border-bottom: none !important; 274 | margin-bottom: 0 !important; 275 | border-top-right-radius: 0 !important; 276 | border-bottom-left-radius: 0 !important; 277 | border-bottom-right-radius: 0 !important; 278 | } 279 | 280 | div.DTFC_RightHeadWrapper table thead tr:last-child th:first-child, 281 | div.DTFC_RightHeadWrapper table thead tr:last-child td:first-child, 282 | div.DTFC_LeftHeadWrapper table thead tr:last-child th:first-child, 283 | div.DTFC_LeftHeadWrapper table thead tr:last-child td:first-child { 284 | border-bottom-left-radius: 0 !important; 285 | border-bottom-right-radius: 0 !important; 286 | } 287 | 288 | div.DTFC_RightBodyWrapper table, 289 | div.DTFC_LeftBodyWrapper table { 290 | border-top: none; 291 | margin: 0 !important; 292 | } 293 | 294 | div.DTFC_RightBodyWrapper tbody tr:first-child th, 295 | div.DTFC_RightBodyWrapper tbody tr:first-child td, 296 | div.DTFC_LeftBodyWrapper tbody tr:first-child th, 297 | div.DTFC_LeftBodyWrapper tbody tr:first-child td { 298 | border-top: none; 299 | } 300 | 301 | div.DTFC_RightFootWrapper table, 302 | div.DTFC_LeftFootWrapper table { 303 | border-top: none; 304 | margin-top: 0 !important; 305 | } 306 | 307 | 308 | /* 309 | * FixedHeader styles 310 | */ 311 | div.FixedHeader_Cloned table { 312 | margin: 0 !important 313 | } 314 | 315 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/vendor/datatables-plugins/dataTables.bootstrap.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | DataTables Bootstrap 3 integration 3 | ©2011-2014 SpryMedia Ltd - datatables.net/license 4 | */ 5 | (function(){var f=function(c,b){c.extend(!0,b.defaults,{dom:"<'row'<'col-sm-6'l><'col-sm-6'f>><'row'<'col-sm-12'tr>><'row'<'col-sm-6'i><'col-sm-6'p>>",renderer:"bootstrap"});c.extend(b.ext.classes,{sWrapper:"dataTables_wrapper form-inline dt-bootstrap",sFilterInput:"form-control input-sm",sLengthSelect:"form-control input-sm"});b.ext.renderer.pageButton.bootstrap=function(g,f,p,k,h,l){var q=new b.Api(g),r=g.oClasses,i=g.oLanguage.oPaginate,d,e,o=function(b,f){var j,m,n,a,k=function(a){a.preventDefault(); 6 | c(a.currentTarget).hasClass("disabled")||q.page(a.data.action).draw(!1)};j=0;for(m=f.length;j",{"class":r.sPageButton+" "+ 7 | e,"aria-controls":g.sTableId,tabindex:g.iTabIndex,id:0===p&&"string"===typeof a?g.sTableId+"_"+a:null}).append(c("",{href:"#"}).html(d)).appendTo(b),g.oApi._fnBindAction(n,{action:a},k))}};o(c(f).empty().html('
    ').children("ul"),k)};b.TableTools&&(c.extend(!0,b.TableTools.classes,{container:"DTTT btn-group",buttons:{normal:"btn btn-default",disabled:"disabled"},collection:{container:"DTTT_dropdown dropdown-menu",buttons:{normal:"",disabled:"disabled"}},print:{info:"DTTT_print_info"}, 8 | select:{row:"active"}}),c.extend(!0,b.TableTools.DEFAULTS.oTags,{collection:{container:"ul",button:"li",liner:"a"}}))};"function"===typeof define&&define.amd?define(["jquery","datatables"],f):"object"===typeof exports?f(require("jquery"),require("datatables")):jQuery&&f(jQuery,jQuery.fn.dataTable)})(window,document); 9 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/vendor/datatables-responsive/dataTables.responsive.css: -------------------------------------------------------------------------------- 1 | table.dataTable.dtr-inline.collapsed > tbody > tr > td:first-child, 2 | table.dataTable.dtr-inline.collapsed > tbody > tr > th:first-child { 3 | position: relative; 4 | padding-left: 30px; 5 | cursor: pointer; 6 | } 7 | table.dataTable.dtr-inline.collapsed > tbody > tr > td:first-child:before, 8 | table.dataTable.dtr-inline.collapsed > tbody > tr > th:first-child:before { 9 | top: 8px; 10 | left: 4px; 11 | height: 16px; 12 | width: 16px; 13 | display: block; 14 | position: absolute; 15 | color: white; 16 | border: 2px solid white; 17 | border-radius: 16px; 18 | text-align: center; 19 | line-height: 14px; 20 | box-shadow: 0 0 3px #444; 21 | box-sizing: content-box; 22 | content: '+'; 23 | background-color: #31b131; 24 | } 25 | table.dataTable.dtr-inline.collapsed > tbody > tr > td:first-child.dataTables_empty:before, 26 | table.dataTable.dtr-inline.collapsed > tbody > tr > th:first-child.dataTables_empty:before { 27 | display: none; 28 | } 29 | table.dataTable.dtr-inline.collapsed > tbody > tr.parent > td:first-child:before, 30 | table.dataTable.dtr-inline.collapsed > tbody > tr.parent > th:first-child:before { 31 | content: '-'; 32 | background-color: #d33333; 33 | } 34 | table.dataTable.dtr-inline.collapsed > tbody > tr.child td:before { 35 | display: none; 36 | } 37 | table.dataTable.dtr-inline.collapsed.compact > tbody > tr > td:first-child, 38 | table.dataTable.dtr-inline.collapsed.compact > tbody > tr > th:first-child { 39 | padding-left: 27px; 40 | } 41 | table.dataTable.dtr-inline.collapsed.compact > tbody > tr > td:first-child:before, 42 | table.dataTable.dtr-inline.collapsed.compact > tbody > tr > th:first-child:before { 43 | top: 5px; 44 | left: 4px; 45 | height: 14px; 46 | width: 14px; 47 | border-radius: 14px; 48 | line-height: 12px; 49 | } 50 | table.dataTable.dtr-column > tbody > tr > td.control, 51 | table.dataTable.dtr-column > tbody > tr > th.control { 52 | position: relative; 53 | cursor: pointer; 54 | } 55 | table.dataTable.dtr-column > tbody > tr > td.control:before, 56 | table.dataTable.dtr-column > tbody > tr > th.control:before { 57 | top: 50%; 58 | left: 50%; 59 | height: 16px; 60 | width: 16px; 61 | margin-top: -10px; 62 | margin-left: -10px; 63 | display: block; 64 | position: absolute; 65 | color: white; 66 | border: 2px solid white; 67 | border-radius: 16px; 68 | text-align: center; 69 | line-height: 14px; 70 | box-shadow: 0 0 3px #444; 71 | box-sizing: content-box; 72 | content: '+'; 73 | background-color: #31b131; 74 | } 75 | table.dataTable.dtr-column > tbody > tr.parent td.control:before, 76 | table.dataTable.dtr-column > tbody > tr.parent th.control:before { 77 | content: '-'; 78 | background-color: #d33333; 79 | } 80 | table.dataTable > tbody > tr.child { 81 | padding: 0.5em 1em; 82 | } 83 | table.dataTable > tbody > tr.child:hover { 84 | background: transparent !important; 85 | } 86 | table.dataTable > tbody > tr.child ul { 87 | display: inline-block; 88 | list-style-type: none; 89 | margin: 0; 90 | padding: 0; 91 | } 92 | table.dataTable > tbody > tr.child ul li { 93 | border-bottom: 1px solid #efefef; 94 | padding: 0.5em 0; 95 | } 96 | table.dataTable > tbody > tr.child ul li:first-child { 97 | padding-top: 0; 98 | } 99 | table.dataTable > tbody > tr.child ul li:last-child { 100 | border-bottom: none; 101 | } 102 | table.dataTable > tbody > tr.child span.dtr-title { 103 | display: inline-block; 104 | min-width: 75px; 105 | font-weight: bold; 106 | } 107 | -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/vendor/metisMenu/metisMenu.min.css: -------------------------------------------------------------------------------- 1 | /* 2 | * metismenu - v1.1.3 3 | * Easy menu jQuery plugin for Twitter Bootstrap 3 4 | * https://github.com/onokumus/metisMenu 5 | * 6 | * Made by Osman Nuri Okumus 7 | * Under MIT License 8 | */ 9 | 10 | .arrow{float:right;line-height:1.42857}.glyphicon.arrow:before{content:"\e079"}.active>a>.glyphicon.arrow:before{content:"\e114"}.fa.arrow:before{content:"\f104"}.active>a>.fa.arrow:before{content:"\f107"}.plus-times{float:right}.fa.plus-times:before{content:"\f067"}.active>a>.fa.plus-times{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(45deg);-moz-transform:rotate(45deg);-ms-transform:rotate(45deg);-o-transform:rotate(45deg);transform:rotate(45deg)}.plus-minus{float:right}.fa.plus-minus:before{content:"\f067"}.active>a>.fa.plus-minus:before{content:"\f068"} -------------------------------------------------------------------------------- /resources/web_server/public/sb-admin-2/vendor/metisMenu/metisMenu.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | * metismenu - v1.1.3 3 | * Easy menu jQuery plugin for Twitter Bootstrap 3 4 | * https://github.com/onokumus/metisMenu 5 | * 6 | * Made by Osman Nuri Okumus 7 | * Under MIT License 8 | */ 9 | !function(a,b,c){function d(b,c){this.element=a(b),this.settings=a.extend({},f,c),this._defaults=f,this._name=e,this.init()}var e="metisMenu",f={toggle:!0,doubleTapToGo:!1};d.prototype={init:function(){var b=this.element,d=this.settings.toggle,f=this;this.isIE()<=9?(b.find("li.active").has("ul").children("ul").collapse("show"),b.find("li").not(".active").has("ul").children("ul").collapse("hide")):(b.find("li.active").has("ul").children("ul").addClass("collapse in"),b.find("li").not(".active").has("ul").children("ul").addClass("collapse")),f.settings.doubleTapToGo&&b.find("li.active").has("ul").children("a").addClass("doubleTapToGo"),b.find("li").has("ul").children("a").on("click."+e,function(b){return b.preventDefault(),f.settings.doubleTapToGo&&f.doubleTapToGo(a(this))&&"#"!==a(this).attr("href")&&""!==a(this).attr("href")?(b.stopPropagation(),void(c.location=a(this).attr("href"))):(a(this).parent("li").toggleClass("active").children("ul").collapse("toggle"),void(d&&a(this).parent("li").siblings().removeClass("active").children("ul.in").collapse("hide")))})},isIE:function(){for(var a,b=3,d=c.createElement("div"),e=d.getElementsByTagName("i");d.innerHTML="",e[0];)return b>4?b:a},doubleTapToGo:function(a){var b=this.element;return a.hasClass("doubleTapToGo")?(a.removeClass("doubleTapToGo"),!0):a.parent().children("ul").length?(b.find(".doubleTapToGo").removeClass("doubleTapToGo"),a.addClass("doubleTapToGo"),!1):void 0},remove:function(){this.element.off("."+e),this.element.removeData(e)}},a.fn[e]=function(b){return this.each(function(){var c=a(this);c.data(e)&&c.data(e).remove(),c.data(e,new d(this,b))}),this}}(jQuery,window,document); -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2019-03-22 2 | -------------------------------------------------------------------------------- /screenshots/clusters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/screenshots/clusters.png -------------------------------------------------------------------------------- /screenshots/combined.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/screenshots/combined.png -------------------------------------------------------------------------------- /screenshots/consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fede1024/kafka-view/6dfb045117ed5f0f571ccb7e8e7f99f1950ffc95/screenshots/consumer.png -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::Stream; 2 | use rand::random; 3 | use rdkafka::client::EmptyContext; 4 | use rdkafka::config::{ClientConfig, TopicConfig}; 5 | use rdkafka::consumer::stream_consumer::StreamConsumer; 6 | use rdkafka::consumer::{Consumer, EmptyConsumerContext}; 7 | use rdkafka::error::KafkaError; 8 | use rdkafka::message::{BorrowedMessage, Message, OwnedMessage}; 9 | use rdkafka::producer::FutureProducer; 10 | use rdkafka::util::{duration_to_millis, millis_to_epoch}; 11 | use serde::de::{Deserialize, DeserializeOwned}; 12 | use serde::ser::Serialize; 13 | use serde_json; 14 | 15 | use std::borrow::Borrow; 16 | use std::collections::hash_map; 17 | use std::collections::{HashMap, HashSet}; 18 | use std::hash::Hash; 19 | use std::sync::{Arc, RwLock}; 20 | use std::time::{Duration, SystemTime}; 21 | 22 | use error::*; 23 | use metadata::{Broker, ClusterId, Group, Partition, TopicName}; 24 | use metrics::TopicMetrics; 25 | 26 | #[derive(Serialize, Deserialize, Debug, Hash, Eq, PartialEq)] 27 | struct WrappedKey(String, String); 28 | 29 | impl WrappedKey { 30 | fn new<'de, K>(cache_name: String, key: &'de K) -> WrappedKey 31 | where 32 | K: Serialize + Deserialize<'de>, 33 | { 34 | WrappedKey(cache_name, serde_json::to_string(key).unwrap()) //TODO: error handling 35 | } 36 | 37 | pub fn cache_name(&self) -> &str { 38 | &self.0 39 | } 40 | 41 | pub fn serialized_key(&self) -> &str { 42 | &self.1 43 | } 44 | } 45 | 46 | // 47 | // ********* REPLICA WRITER ********** 48 | // 49 | 50 | pub struct ReplicaWriter { 51 | topic_name: String, 52 | producer: FutureProducer, 53 | } 54 | 55 | impl ReplicaWriter { 56 | pub fn new(brokers: &str, topic_name: &str) -> Result { 57 | let producer = ClientConfig::new() 58 | .set("bootstrap.servers", brokers) 59 | .set("compression.codec", "gzip") 60 | .set("message.max.bytes", "10000000") 61 | .set("api.version.request", "true") 62 | .create::>() 63 | .expect("Producer creation error"); 64 | 65 | let writer = ReplicaWriter { 66 | topic_name: topic_name.to_owned(), 67 | producer, 68 | }; 69 | 70 | Ok(writer) 71 | } 72 | 73 | // TODO: use structure for value 74 | /// Writes a new update into the topic. The name of the replicated map and the key will be 75 | /// serialized together as key of the message, and the value will be serialized in the payload. 76 | pub fn update<'de, K, V>(&self, name: &str, key: &'de K, value: &'de V) -> Result<()> 77 | where 78 | K: Serialize + Deserialize<'de> + Clone, 79 | V: Serialize + Deserialize<'de>, 80 | { 81 | let serialized_key = serde_json::to_vec(&WrappedKey::new(name.to_owned(), key)) 82 | .chain_err(|| "Failed to serialize key")?; 83 | let serialized_value = 84 | serde_json::to_vec(&value).chain_err(|| "Failed to serialize value")?; 85 | trace!( 86 | "Serialized update size: key={:.3}KB value={:.3}KB", 87 | (serialized_key.len() as f64 / 1000f64), 88 | (serialized_value.len() as f64 / 1000f64) 89 | ); 90 | let ts = millis_to_epoch(SystemTime::now()); 91 | let _f = self.producer.send_copy( 92 | self.topic_name.as_str(), 93 | None, 94 | Some(&serialized_value), 95 | Some(&serialized_key), 96 | Some(ts), 97 | 1000, 98 | ); 99 | // _f.wait(); // Uncomment to make production synchronous 100 | Ok(()) 101 | } 102 | 103 | /// Deletes an element from the specified cache 104 | pub fn delete<'de, K>(&self, name: &str, key: &'de K) -> Result<()> 105 | where 106 | K: Serialize + Deserialize<'de> + Clone, 107 | { 108 | let serialized_key = serde_json::to_vec(&WrappedKey::new(name.to_owned(), key)) 109 | .chain_err(|| "Failed to serialize key")?; 110 | self.write_tombstone(&serialized_key) 111 | } 112 | 113 | /// Writes a tombstone for the specified message key. 114 | fn write_tombstone(&self, message_key: &[u8]) -> Result<()> { 115 | let ts = millis_to_epoch(SystemTime::now()); 116 | let _f = self.producer.send_copy::<[u8], [u8]>( 117 | self.topic_name.as_str(), 118 | None, 119 | None, 120 | Some(&message_key), 121 | Some(ts), 122 | 1000, 123 | ); 124 | Ok(()) 125 | } 126 | } 127 | 128 | // 129 | // ********* REPLICA READER ********** 130 | // 131 | 132 | #[derive(Debug)] 133 | pub enum ReplicaCacheUpdate<'a> { 134 | Set { 135 | key: &'a str, 136 | payload: &'a [u8], 137 | timestamp: u64, 138 | }, 139 | Delete { 140 | key: &'a str, 141 | }, 142 | } 143 | 144 | pub trait UpdateReceiver: Send + 'static { 145 | fn receive_update(&self, name: &str, update: ReplicaCacheUpdate) -> Result<()>; 146 | } 147 | 148 | type ReplicaConsumer = StreamConsumer; 149 | 150 | pub struct ReplicaReader { 151 | consumer: ReplicaConsumer, 152 | brokers: String, 153 | topic_name: String, 154 | processed_messages: i64, 155 | } 156 | 157 | impl ReplicaReader { 158 | pub fn new(brokers: &str, topic_name: &str) -> Result { 159 | let consumer: ReplicaConsumer = ClientConfig::new() 160 | .set( 161 | "group.id", 162 | &format!("kafka_web_cache_reader_{}", random::()), 163 | ) 164 | .set("bootstrap.servers", brokers) 165 | .set("session.timeout.ms", "6000") 166 | .set("enable.auto.commit", "false") 167 | .set("queued.min.messages", "10000") // Reduce memory usage 168 | //.set("fetch.message.max.bytes", "102400") 169 | .set("api.version.request", "true") 170 | .set_default_topic_config( 171 | TopicConfig::new() 172 | .set("auto.offset.reset", "smallest") 173 | .finalize(), 174 | ) 175 | .create() 176 | .chain_err(|| "Consumer creation failed")?; 177 | 178 | //let topic_partition = TopicPartitionList::with_topics(&vec![topic_name]); 179 | // consumer.assign(&topic_partition) 180 | consumer 181 | .subscribe(&[topic_name]) 182 | .chain_err(|| "Can't subscribe to specified topics")?; 183 | 184 | Ok(ReplicaReader { 185 | consumer, 186 | brokers: brokers.to_owned(), 187 | topic_name: topic_name.to_owned(), 188 | processed_messages: 0, 189 | }) 190 | } 191 | 192 | pub fn processed_messages(&self) -> i64 { 193 | self.processed_messages 194 | } 195 | 196 | pub fn load_state(&mut self, receiver: R) -> Result<()> { 197 | info!("Started creating state"); 198 | match self.last_message_per_key() { 199 | Err(e) => format_error_chain!(e), 200 | Ok(state) => { 201 | for (w_key, message) in state { 202 | let update = match message.payload() { 203 | Some(payload) => ReplicaCacheUpdate::Set { 204 | key: w_key.serialized_key(), 205 | payload, 206 | timestamp: message 207 | .timestamp() 208 | .to_millis() 209 | .unwrap_or_else(|| millis_to_epoch(SystemTime::now())) 210 | as u64, 211 | }, 212 | None => ReplicaCacheUpdate::Delete { 213 | key: w_key.serialized_key(), 214 | }, 215 | }; 216 | if let Err(e) = receiver.receive_update(w_key.cache_name(), update) { 217 | format_error_chain!(e); 218 | } 219 | } 220 | } 221 | } 222 | info!("State creation terminated"); 223 | Ok(()) 224 | } 225 | 226 | fn last_message_per_key(&mut self) -> Result> { 227 | let mut eof_set = HashSet::new(); 228 | let mut borrowed_state = HashMap::new(); 229 | let mut state = HashMap::new(); 230 | 231 | let topic_name = &self.topic_name; 232 | let metadata = self 233 | .consumer 234 | .fetch_metadata(Some(topic_name), 30000) 235 | .chain_err(|| "Failed to fetch metadata")?; 236 | 237 | if metadata.topics().is_empty() { 238 | warn!( 239 | "No replicator topic found ({} {})", 240 | self.brokers, self.topic_name 241 | ); 242 | return Ok(HashMap::new()); 243 | } 244 | let topic_metadata = &metadata.topics()[0]; 245 | if topic_metadata.partitions().is_empty() { 246 | return Ok(state); // Topic is empty and auto created 247 | } 248 | 249 | let message_stream = self.consumer.start(); 250 | 251 | for message in message_stream.wait() { 252 | match message { 253 | Ok(Ok(m)) => { 254 | self.processed_messages += 1; 255 | match parse_message_key(&m).chain_err(|| "Failed to parse message key") { 256 | Ok(wrapped_key) => { 257 | borrowed_state.insert(wrapped_key, m); 258 | } 259 | Err(e) => format_error_chain!(e), 260 | }; 261 | } 262 | Ok(Err(KafkaError::PartitionEOF(p))) => { 263 | eof_set.insert(p); 264 | } 265 | Ok(Err(e)) => error!("Error while reading from Kafka: {}", e), 266 | Err(_) => error!("Stream receive error"), 267 | }; 268 | if borrowed_state.len() >= 10000 { 269 | for (key, message) in borrowed_state { 270 | state.insert(key, message.detach()); 271 | } 272 | borrowed_state = HashMap::new(); 273 | } 274 | if eof_set.len() == topic_metadata.partitions().len() { 275 | break; 276 | } 277 | } 278 | for (key, message) in borrowed_state { 279 | state.insert(key, message.detach()); 280 | } 281 | self.consumer.stop(); 282 | info!("Total unique items in caches: {}", state.len()); 283 | Ok(state) 284 | } 285 | } 286 | 287 | fn parse_message_key(message: &BorrowedMessage) -> Result { 288 | let key_bytes = match message.key() { 289 | Some(k) => k, 290 | None => bail!("Empty key found"), 291 | }; 292 | 293 | let wrapped_key = serde_json::from_slice::(key_bytes) 294 | .chain_err(|| "Failed to decode wrapped key")?; 295 | Ok(wrapped_key) 296 | } 297 | 298 | // 299 | // ********** REPLICATED MAP ********** 300 | // 301 | 302 | #[derive(Clone)] 303 | struct ValueContainer { 304 | value: V, 305 | updated: u64, // millis since epoch 306 | } 307 | 308 | impl ValueContainer { 309 | fn new(value: V) -> ValueContainer { 310 | ValueContainer { 311 | value, 312 | updated: millis_to_epoch(SystemTime::now()) as u64, 313 | } 314 | } 315 | 316 | fn new_with_timestamp(value: V, timestamp: u64) -> ValueContainer { 317 | ValueContainer { 318 | value, 319 | updated: timestamp, 320 | } 321 | } 322 | } 323 | 324 | pub struct ReplicatedMap 325 | where 326 | K: Eq + Hash + Clone + Serialize + DeserializeOwned, 327 | V: Clone + PartialEq + Serialize + DeserializeOwned, 328 | { 329 | name: String, 330 | map: Arc>>>, 331 | replica_writer: Arc, 332 | } 333 | 334 | impl ReplicatedMap 335 | where 336 | K: Eq + Hash + Clone + Serialize + DeserializeOwned, 337 | V: Clone + PartialEq + Serialize + DeserializeOwned, 338 | { 339 | pub fn new(name: &str, replica_writer: Arc) -> ReplicatedMap { 340 | ReplicatedMap { 341 | name: name.to_owned(), 342 | map: Arc::new(RwLock::new(HashMap::new())), 343 | replica_writer, 344 | } 345 | } 346 | 347 | pub fn alias(&self) -> ReplicatedMap { 348 | ReplicatedMap { 349 | name: self.name.clone(), 350 | map: self.map.clone(), 351 | replica_writer: self.replica_writer.clone(), 352 | } 353 | } 354 | 355 | pub fn keys(&self) -> Vec { 356 | match self.map.read() { 357 | Ok(ref cache) => (*cache).keys().cloned().collect::>(), 358 | Err(_) => panic!("Poison error"), 359 | } 360 | } 361 | 362 | fn receive_update(&self, update: ReplicaCacheUpdate) -> Result<()> { 363 | match update { 364 | ReplicaCacheUpdate::Set { 365 | key, 366 | payload, 367 | timestamp, 368 | } => { 369 | let key = serde_json::from_str::(key).chain_err(|| "Failed to parse key")?; 370 | let value = 371 | serde_json::from_slice::(payload).chain_err(|| "Failed to parse payload")?; 372 | self.local_update(key, value, Some(timestamp)); 373 | } 374 | ReplicaCacheUpdate::Delete { key } => { 375 | let key = serde_json::from_str::(key).chain_err(|| "Failed to parse key")?; 376 | self.local_remove(&key); 377 | } 378 | } 379 | Ok(()) 380 | } 381 | 382 | fn local_update(&self, key: K, value: V, timestamp: Option) { 383 | let value = if let Some(ts) = timestamp { 384 | ValueContainer::new_with_timestamp(value, ts) 385 | } else { 386 | ValueContainer::new(value) 387 | }; 388 | match self.map.write() { 389 | Ok(mut cache) => (*cache).insert(key, value), 390 | Err(_) => panic!("Poison error"), 391 | }; 392 | } 393 | 394 | fn local_remove(&self, key: &K) { 395 | match self.map.write() { 396 | Ok(mut cache) => (*cache).remove(key), 397 | Err(_) => panic!("Poison error"), 398 | }; 399 | } 400 | 401 | pub fn insert(&self, key: K, new_value: V) -> Result<()> { 402 | let current_value = self.get(&key); 403 | if current_value.is_none() || current_value.unwrap() != new_value { 404 | self.replica_writer 405 | .update(&self.name, &key, &new_value) 406 | .chain_err(|| "Failed to write cache update")?; 407 | } 408 | self.local_update(key, new_value, None); 409 | Ok(()) 410 | } 411 | 412 | pub fn remove(&self, key: &K) -> Result<()> { 413 | self.replica_writer 414 | .delete(&self.name, key) 415 | .chain_err(|| "Failed to write cache delete")?; 416 | self.local_remove(key); 417 | Ok(()) 418 | } 419 | 420 | pub fn remove_expired(&self, max_age: Duration) -> Vec { 421 | let to_remove = { 422 | let cache = self.map.read().unwrap(); 423 | let max_ms = duration_to_millis(max_age) as i64; 424 | let current_ms = millis_to_epoch(SystemTime::now()); 425 | cache 426 | .iter() 427 | .filter(|&(_, v)| (current_ms as i64) - (v.updated as i64) > max_ms) 428 | .map(|(k, _)| k.clone()) 429 | .collect::>() 430 | }; 431 | for k in &to_remove { 432 | if let Err(e) = self.remove(k) { 433 | format_error_chain!(e); 434 | } 435 | } 436 | to_remove 437 | } 438 | 439 | pub fn get(&self, key: &Q) -> Option 440 | where 441 | K: Borrow, 442 | Q: Hash + Eq, 443 | { 444 | match self.map.read() { 445 | Ok(cache) => (*cache).get(key).map(|v| v.value.clone()), 446 | Err(_) => panic!("Poison error"), 447 | } 448 | } 449 | 450 | // TODO: add doc 451 | pub fn lock_iter(&self, f: F) -> R 452 | where 453 | for<'a> F: Fn(ReplicatedMapIter<'a, K, V>) -> R, 454 | { 455 | match self.map.read() { 456 | Ok(cache) => { 457 | let iter = ReplicatedMapIter::new(cache.iter()); 458 | f(iter) 459 | } 460 | Err(_) => panic!("Poison error"), 461 | } 462 | } 463 | 464 | pub fn count(&self, f: F) -> usize 465 | where 466 | F: Fn(&K) -> bool, 467 | { 468 | self.lock_iter(|iter| iter.filter(|&(k, _)| f(k)).count()) 469 | } 470 | 471 | pub fn filter_clone(&self, f: F) -> Vec<(K, V)> 472 | where 473 | F: Fn(&K) -> bool, 474 | { 475 | self.lock_iter(|iter| { 476 | iter.filter(|&(k, _)| f(k)) 477 | .map(|(k, v)| (k.clone(), v.clone())) 478 | .collect::>() 479 | }) 480 | } 481 | 482 | // pub fn filter_clone_v(&self, f: F) -> Vec 483 | // where F: Fn(&K) -> bool { 484 | // self.lock_iter(|iter| { 485 | // iter.filter(|&(k, _)| f(k)) 486 | // .map(|(_, v)| v.clone()) 487 | // .collect::>() 488 | // }) 489 | // } 490 | 491 | pub fn filter_clone_k(&self, f: F) -> Vec 492 | where 493 | F: Fn(&K) -> bool, 494 | { 495 | self.lock_iter(|iter| { 496 | iter.filter(|&(k, _)| f(k)) 497 | .map(|(k, _)| k.clone()) 498 | .collect::>() 499 | }) 500 | } 501 | } 502 | 503 | pub struct ReplicatedMapIter<'a, K, V> 504 | where 505 | K: 'a, 506 | V: 'a, 507 | { 508 | inner: hash_map::Iter<'a, K, ValueContainer>, 509 | } 510 | 511 | impl<'a, K, V> ReplicatedMapIter<'a, K, V> 512 | where 513 | K: 'a, 514 | V: 'a, 515 | { 516 | fn new(inner: hash_map::Iter<'a, K, ValueContainer>) -> ReplicatedMapIter<'a, K, V> { 517 | ReplicatedMapIter { inner } 518 | } 519 | } 520 | 521 | impl<'a, K, V> Iterator for ReplicatedMapIter<'a, K, V> { 522 | type Item = (&'a K, &'a V); 523 | 524 | fn next(&mut self) -> Option { 525 | self.inner.next().map(|(k, v)| (k, &v.value)) 526 | } 527 | } 528 | 529 | // 530 | // ********** CACHE ********** 531 | // 532 | 533 | /// Metrics for a specific topic 534 | pub type MetricsCache = ReplicatedMap<(ClusterId, TopicName), TopicMetrics>; 535 | 536 | /// Broker information 537 | pub type BrokerCache = ReplicatedMap>; 538 | 539 | /// Topic and partition information 540 | pub type TopicCache = ReplicatedMap<(ClusterId, TopicName), Vec>; 541 | 542 | /// Groups 543 | pub type GroupCache = ReplicatedMap<(ClusterId, String), Group>; 544 | 545 | /// Consumer group offsets per topic 546 | pub type OffsetsCache = ReplicatedMap<(ClusterId, String, TopicName), Vec>; 547 | 548 | /// Offsets for the internal consumers of the __consumer_offsets topic 549 | pub type InternalConsumerOffsetCache = ReplicatedMap>; 550 | 551 | pub struct Cache { 552 | pub metrics: MetricsCache, 553 | pub offsets: OffsetsCache, 554 | pub brokers: BrokerCache, 555 | pub topics: TopicCache, 556 | pub groups: GroupCache, 557 | pub internal_offsets: InternalConsumerOffsetCache, 558 | } 559 | 560 | impl Cache { 561 | pub fn new(replica_writer: ReplicaWriter) -> Cache { 562 | let replica_writer_arc = Arc::new(replica_writer); 563 | Cache { 564 | metrics: ReplicatedMap::new("metrics", replica_writer_arc.clone()), 565 | offsets: ReplicatedMap::new("offsets", replica_writer_arc.clone()), 566 | brokers: ReplicatedMap::new("brokers", replica_writer_arc.clone()), 567 | topics: ReplicatedMap::new("topics", replica_writer_arc.clone()), 568 | groups: ReplicatedMap::new("groups", replica_writer_arc.clone()), 569 | internal_offsets: ReplicatedMap::new("internal_offsets", replica_writer_arc), 570 | } 571 | } 572 | 573 | pub fn alias(&self) -> Cache { 574 | Cache { 575 | metrics: self.metrics.alias(), 576 | offsets: self.offsets.alias(), 577 | brokers: self.brokers.alias(), 578 | topics: self.topics.alias(), 579 | groups: self.groups.alias(), 580 | internal_offsets: self.internal_offsets.alias(), 581 | } 582 | } 583 | } 584 | 585 | impl UpdateReceiver for Cache { 586 | fn receive_update(&self, cache_name: &str, update: ReplicaCacheUpdate) -> Result<()> { 587 | match cache_name { 588 | "metrics" => self.metrics.receive_update(update), 589 | "offsets" => self.offsets.receive_update(update), 590 | "brokers" => self.brokers.receive_update(update), 591 | "topics" => self.topics.receive_update(update), 592 | "groups" => self.groups.receive_update(update), 593 | "internal_offsets" => self.internal_offsets.receive_update(update), 594 | _ => bail!("Unknown cache name: {}", cache_name), 595 | } 596 | } 597 | } 598 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use serde_yaml; 2 | 3 | use error::*; 4 | use metadata::ClusterId; 5 | 6 | use std::collections::HashMap; 7 | use std::fs::File; 8 | use std::io::prelude::*; 9 | 10 | fn default_true() -> bool { 11 | true 12 | } 13 | 14 | #[derive(Serialize, Deserialize, Debug, Clone)] 15 | pub struct ClusterConfig { 16 | pub cluster_id: Option, // This will always be available after load 17 | pub broker_list: Vec, 18 | pub zookeeper: String, 19 | pub jolokia_port: Option, 20 | pub graph_url: Option, 21 | #[serde(default = "default_true")] 22 | pub enable_tailing: bool, 23 | #[serde(default = "default_true")] 24 | pub show_zk_reassignments: bool, 25 | } 26 | 27 | impl ClusterConfig { 28 | pub fn bootstrap_servers(&self) -> String { 29 | self.broker_list.join(",") 30 | } 31 | } 32 | 33 | #[derive(Serialize, Deserialize, Debug, Clone)] 34 | pub struct CachingConfig { 35 | pub cluster: ClusterId, 36 | pub topic: String, 37 | } 38 | 39 | #[derive(Serialize, Deserialize, Debug, Clone)] 40 | pub struct Config { 41 | pub listen_port: u16, 42 | pub listen_host: String, 43 | pub metadata_refresh: u64, 44 | pub metrics_refresh: u64, 45 | pub offsets_store_duration: u64, 46 | pub consumer_offsets_group_id: String, 47 | pub clusters: HashMap, 48 | pub caching: CachingConfig, 49 | } 50 | 51 | impl Config { 52 | pub fn cluster(&self, cluster_id: &ClusterId) -> Option<&ClusterConfig> { 53 | self.clusters.get(cluster_id) 54 | } 55 | } 56 | 57 | pub fn read_config(path: &str) -> Result { 58 | let mut f = File::open(path).chain_err(|| "Unable to open configuration file")?;; 59 | let mut s = String::new(); 60 | f.read_to_string(&mut s) 61 | .chain_err(|| "Unable to read configuration file")?; 62 | 63 | let mut config: Config = 64 | serde_yaml::from_str(&s).chain_err(|| "Unable to parse configuration file")?; 65 | 66 | for (cluster_id, cluster) in &mut config.clusters { 67 | cluster.cluster_id = Some(cluster_id.clone()); 68 | } 69 | 70 | info!("Configuration: {:?}", config); 71 | 72 | Ok(config) 73 | } 74 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use metadata::ClusterId; 2 | 3 | error_chain! { 4 | errors { 5 | MissingConsumerError(cluster: ClusterId) { 6 | description("consumer is missing from cache") 7 | display("consumer is missing from cache for cluster {}", cluster) 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/live_consumer.rs: -------------------------------------------------------------------------------- 1 | use rdkafka::config::ClientConfig; 2 | use rdkafka::consumer::{BaseConsumer, Consumer, EmptyConsumerContext}; 3 | use rdkafka::message::BorrowedMessage; 4 | use rdkafka::message::Timestamp::*; 5 | use rdkafka::Message; 6 | use rocket::http::RawStr; 7 | use rocket::State; 8 | use scheduled_executor::ThreadPoolExecutor; 9 | 10 | use config::{ClusterConfig, Config}; 11 | use error::*; 12 | use metadata::ClusterId; 13 | 14 | use std::borrow::Cow; 15 | use std::collections::HashMap; 16 | use std::sync::atomic::{AtomicBool, Ordering}; 17 | use std::sync::{Arc, RwLock}; 18 | use std::time::{Duration, Instant}; 19 | 20 | pub struct LiveConsumer { 21 | id: u64, 22 | cluster_id: ClusterId, 23 | topic: String, 24 | last_poll: RwLock, 25 | consumer: BaseConsumer, 26 | active: AtomicBool, 27 | } 28 | 29 | impl LiveConsumer { 30 | fn new(id: u64, cluster_config: &ClusterConfig, topic: &str) -> Result { 31 | let consumer = ClientConfig::new() 32 | .set("bootstrap.servers", &cluster_config.bootstrap_servers()) 33 | .set("group.id", &format!("kafka_view_live_consumer_{}", id)) 34 | .set("enable.partition.eof", "false") 35 | .set("api.version.request", "true") 36 | .set("enable.auto.commit", "false") 37 | .set("queued.max.messages.kbytes", "100") // Reduce memory usage 38 | .set("fetch.message.max.bytes", "102400") 39 | //.set("debug", "all") 40 | .create::>() 41 | .chain_err(|| "Failed to create rdkafka consumer")?; 42 | 43 | debug!("Creating live consumer for {}", topic); 44 | 45 | Ok(LiveConsumer { 46 | id, 47 | cluster_id: cluster_config.cluster_id.clone().unwrap(), 48 | consumer, 49 | active: AtomicBool::new(false), 50 | last_poll: RwLock::new(Instant::now()), 51 | topic: topic.to_owned(), 52 | }) 53 | } 54 | 55 | fn activate(&self) -> Result<()> { 56 | // TODO: start from the past 57 | debug!("Activating live consumer for {}", self.topic); 58 | 59 | // TODO: use assign instead 60 | self.consumer 61 | .subscribe(vec![self.topic.as_str()].as_slice()) 62 | .chain_err(|| "Can't subscribe to specified topics")?; 63 | self.active.store(true, Ordering::Relaxed); 64 | Ok(()) 65 | } 66 | 67 | pub fn is_active(&self) -> bool { 68 | self.active.load(Ordering::Relaxed) 69 | } 70 | 71 | pub fn last_poll(&self) -> Instant { 72 | *self.last_poll.read().unwrap() 73 | } 74 | 75 | pub fn id(&self) -> u64 { 76 | self.id 77 | } 78 | 79 | pub fn cluster_id(&self) -> &ClusterId { 80 | &self.cluster_id 81 | } 82 | 83 | pub fn topic(&self) -> &str { 84 | &self.topic 85 | } 86 | 87 | fn poll(&self, max_msg: usize, timeout: Duration) -> Vec { 88 | let start_time = Instant::now(); 89 | let mut buffer = Vec::new(); 90 | *self.last_poll.write().unwrap() = Instant::now(); 91 | 92 | while Instant::elapsed(&start_time) < timeout && buffer.len() < max_msg { 93 | match self.consumer.poll(100) { 94 | None => {} 95 | Some(Ok(m)) => buffer.push(m), 96 | Some(Err(e)) => { 97 | error!("Error while receiving message {:?}", e); 98 | } 99 | }; 100 | } 101 | 102 | debug!( 103 | "{} messages received in {:?}", 104 | buffer.len(), 105 | Instant::elapsed(&start_time) 106 | ); 107 | buffer 108 | } 109 | } 110 | 111 | impl Drop for LiveConsumer { 112 | fn drop(&mut self) { 113 | debug!("Dropping consumer {}", self.id); 114 | } 115 | } 116 | 117 | type LiveConsumerMap = HashMap>; 118 | 119 | fn remove_idle_consumers(consumers: &mut LiveConsumerMap) { 120 | consumers.retain(|_, ref consumer| consumer.last_poll().elapsed() < Duration::from_secs(20)); 121 | } 122 | 123 | pub struct LiveConsumerStore { 124 | consumers: Arc>, 125 | _executor: ThreadPoolExecutor, 126 | } 127 | 128 | impl LiveConsumerStore { 129 | pub fn new(executor: ThreadPoolExecutor) -> LiveConsumerStore { 130 | let consumers = Arc::new(RwLock::new(HashMap::new())); 131 | let consumers_clone = Arc::clone(&consumers); 132 | executor.schedule_fixed_rate( 133 | Duration::from_secs(10), 134 | Duration::from_secs(10), 135 | move |_handle| { 136 | let mut consumers = consumers_clone.write().unwrap(); 137 | remove_idle_consumers(&mut *consumers); 138 | }, 139 | ); 140 | LiveConsumerStore { 141 | consumers, 142 | _executor: executor, 143 | } 144 | } 145 | 146 | fn get_consumer(&self, id: u64) -> Option> { 147 | let consumers = self.consumers.read().expect("Poison error"); 148 | (*consumers).get(&id).cloned() 149 | } 150 | 151 | fn add_consumer( 152 | &self, 153 | id: u64, 154 | cluster_config: &ClusterConfig, 155 | topic: &str, 156 | ) -> Result> { 157 | let live_consumer = LiveConsumer::new(id, cluster_config, topic) 158 | .chain_err(|| "Failed to create live consumer")?; 159 | 160 | let live_consumer_arc = Arc::new(live_consumer); 161 | 162 | // Add consumer immediately to the store, to prevent other threads from adding it again. 163 | match self.consumers.write() { 164 | Ok(mut consumers) => (*consumers).insert(id, live_consumer_arc.clone()), 165 | Err(_) => panic!("Poison error while writing consumer to cache"), 166 | }; 167 | 168 | live_consumer_arc 169 | .activate() 170 | .chain_err(|| "Failed to activate live consumer")?; 171 | 172 | Ok(live_consumer_arc) 173 | } 174 | 175 | pub fn consumers(&self) -> Vec> { 176 | self.consumers 177 | .read() 178 | .unwrap() 179 | .iter() 180 | .map(|(_, consumer)| consumer.clone()) 181 | .collect::>() 182 | } 183 | } 184 | 185 | // TODO: check log in case of error 186 | 187 | #[derive(Serialize)] 188 | struct TailedMessage { 189 | partition: i32, 190 | offset: i64, 191 | key: Option, 192 | created_at: Option, 193 | appended_at: Option, 194 | payload: String, 195 | } 196 | 197 | #[get("/api/tailer///")] 198 | pub fn topic_tailer_api( 199 | cluster_id: ClusterId, 200 | topic: &RawStr, 201 | id: u64, 202 | config: State, 203 | live_consumers_store: State, 204 | ) -> Result { 205 | let cluster_config = config.clusters.get(&cluster_id); 206 | 207 | if cluster_config.is_none() || !cluster_config.unwrap().enable_tailing { 208 | return Ok("[]".to_owned()); 209 | } 210 | let cluster_config = cluster_config.unwrap(); 211 | 212 | let consumer = match live_consumers_store.get_consumer(id) { 213 | Some(consumer) => consumer, 214 | None => live_consumers_store 215 | .add_consumer(id, cluster_config, topic) 216 | .chain_err(|| { 217 | format!( 218 | "Error while creating live consumer for {} {}", 219 | cluster_id, topic 220 | ) 221 | })?, 222 | }; 223 | 224 | if !consumer.is_active() { 225 | // Consumer is still being activated, no results for now. 226 | return Ok("[]".to_owned()); 227 | } 228 | 229 | let mut output = Vec::new(); 230 | for message in consumer.poll(100, Duration::from_secs(3)) { 231 | let key = message 232 | .key() 233 | .map(|bytes| String::from_utf8_lossy(bytes)) 234 | .map(|cow_str| cow_str.into_owned()); 235 | 236 | let mut created_at = None; 237 | let mut appended_at = None; 238 | match message.timestamp() { 239 | CreateTime(ctime) => created_at = Some(ctime), 240 | LogAppendTime(atime) => appended_at = Some(atime), 241 | NotAvailable => (), 242 | } 243 | 244 | let original_payload = message 245 | .payload() 246 | .map(|bytes| String::from_utf8_lossy(bytes)) 247 | .unwrap_or(Cow::Borrowed("")); 248 | let payload = if original_payload.len() > 1024 { 249 | format!( 250 | "{}...", 251 | original_payload.chars().take(1024).collect::() 252 | ) 253 | } else { 254 | original_payload.into_owned() 255 | }; 256 | 257 | output.push(TailedMessage { 258 | partition: message.partition(), 259 | offset: message.offset(), 260 | key, 261 | created_at, 262 | appended_at, 263 | payload, 264 | }) 265 | } 266 | 267 | Ok(json!(output).to_string()) 268 | } 269 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(plugin, proc_macro_hygiene, decl_macro)] 2 | 3 | #[macro_use] 4 | extern crate error_chain; 5 | #[macro_use] 6 | extern crate log; 7 | #[macro_use] 8 | extern crate serde_derive; 9 | #[macro_use] 10 | extern crate serde_json; 11 | #[macro_use] 12 | extern crate lazy_static; 13 | extern crate brotli; 14 | extern crate byteorder; 15 | extern crate chrono; 16 | extern crate clap; 17 | extern crate env_logger; 18 | extern crate flate2; 19 | extern crate futures; 20 | extern crate futures_cpupool; 21 | extern crate hyper; 22 | extern crate maud; 23 | extern crate rand; 24 | extern crate rdkafka; 25 | extern crate regex; 26 | #[macro_use] 27 | extern crate rocket; 28 | extern crate scheduled_executor; 29 | extern crate serde; 30 | extern crate serde_yaml; 31 | extern crate zookeeper; 32 | 33 | #[macro_use] 34 | mod utils; 35 | mod cache; 36 | mod config; 37 | mod error; 38 | mod live_consumer; 39 | mod metadata; 40 | mod metrics; 41 | mod offsets; 42 | mod web_server; 43 | mod zk; 44 | 45 | use clap::{App, Arg, ArgMatches}; 46 | use scheduled_executor::{TaskGroupScheduler, ThreadPoolExecutor}; 47 | use std::time::Duration; 48 | 49 | use cache::{Cache, ReplicaReader, ReplicaWriter}; 50 | use error::*; 51 | use metadata::MetadataFetchTaskGroup; 52 | use metrics::MetricsFetchTaskGroup; 53 | use offsets::run_offset_consumer; 54 | 55 | include!(concat!(env!("OUT_DIR"), "/rust_version.rs")); 56 | 57 | fn run_kafka_web(config_path: &str) -> Result<()> { 58 | let config = config::read_config(config_path) 59 | .chain_err(|| format!("Unable to load configuration from '{}'", config_path))?; 60 | 61 | let replicator_bootstrap_servers = match config.cluster(&config.caching.cluster) { 62 | Some(cluster) => cluster.bootstrap_servers(), 63 | None => bail!("Can't find cache cluster {}", config.caching.cluster), 64 | }; 65 | let topic_name = &config.caching.topic; 66 | let replica_writer = 67 | ReplicaWriter::new(&replicator_bootstrap_servers, topic_name).chain_err(|| { 68 | format!( 69 | "Replica writer creation failed (brokers: {}, topic: {})", 70 | replicator_bootstrap_servers, topic_name 71 | ) 72 | })?; 73 | let mut replica_reader = ReplicaReader::new(&replicator_bootstrap_servers, topic_name) 74 | .chain_err(|| { 75 | format!( 76 | "Replica reader creation failed (brokers: {}, topic: {})", 77 | replicator_bootstrap_servers, topic_name 78 | ) 79 | })?; 80 | 81 | let cache = Cache::new(replica_writer); 82 | 83 | // Load all the state from Kafka 84 | let start_time = chrono::Utc::now(); 85 | replica_reader.load_state(cache.alias()).chain_err(|| { 86 | format!( 87 | "State load failed (brokers: {}, topic: {})", 88 | replicator_bootstrap_servers, topic_name 89 | ) 90 | })?; 91 | let elapsed_sec = chrono::Utc::now() 92 | .signed_duration_since(start_time) 93 | .num_milliseconds() as f32 94 | / 1000f32; 95 | info!( 96 | "Processed {} messages in {:.3} seconds ({:.0} msg/s).", 97 | replica_reader.processed_messages(), 98 | elapsed_sec, 99 | replica_reader.processed_messages() as f32 / elapsed_sec 100 | ); 101 | 102 | let executor = 103 | ThreadPoolExecutor::new(4).chain_err(|| "Failed to start thread pool executor")?; 104 | 105 | // Metadata fetch 106 | executor.schedule( 107 | MetadataFetchTaskGroup::new(&cache, &config), 108 | Duration::from_secs(0), 109 | Duration::from_secs(config.metadata_refresh), 110 | ); 111 | 112 | // Metrics fetch 113 | executor.schedule( 114 | MetricsFetchTaskGroup::new(&cache, &config), 115 | Duration::from_secs(0), 116 | Duration::from_secs(config.metrics_refresh), 117 | ); 118 | 119 | // Consumer offsets 120 | for (cluster_id, cluster_config) in &config.clusters { 121 | if let Err(e) = run_offset_consumer(cluster_id, cluster_config, &config, &cache) { 122 | format_error_chain!(e); 123 | } 124 | } 125 | 126 | // CACHE EXPIRATION 127 | let cache_clone = cache.alias(); 128 | let metadata_expiration = config.metadata_refresh * 3; 129 | executor.schedule_fixed_rate( 130 | Duration::from_secs(config.metadata_refresh * 2), 131 | Duration::from_secs(config.metadata_refresh), 132 | move |_| { 133 | cache_clone 134 | .topics 135 | .remove_expired(Duration::from_secs(metadata_expiration)); 136 | cache_clone 137 | .brokers 138 | .remove_expired(Duration::from_secs(metadata_expiration)); 139 | cache_clone 140 | .groups 141 | .remove_expired(Duration::from_secs(metadata_expiration)); 142 | }, 143 | ); 144 | 145 | let cache_clone = cache.alias(); 146 | let metrics_expiration = config.metrics_refresh * 3; 147 | executor.schedule_fixed_rate( 148 | Duration::from_secs(config.metrics_refresh * 2), 149 | Duration::from_secs(config.metrics_refresh), 150 | move |_| { 151 | cache_clone 152 | .metrics 153 | .remove_expired(Duration::from_secs(metrics_expiration)); 154 | }, 155 | ); 156 | 157 | let cache_clone = cache.alias(); 158 | let offsets_store_duration = config.offsets_store_duration; 159 | executor.schedule_fixed_rate( 160 | Duration::from_secs(10), 161 | Duration::from_secs(120), 162 | move |_| { 163 | cache_clone 164 | .offsets 165 | .remove_expired(Duration::from_secs(offsets_store_duration)); 166 | }, 167 | ); 168 | 169 | web_server::server::run_server(&executor, cache.alias(), &config) 170 | .chain_err(|| "Server initialization failed")?; 171 | 172 | Ok(()) 173 | } 174 | 175 | fn setup_args<'a>() -> ArgMatches<'a> { 176 | App::new("kafka web interface") 177 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 178 | .about("Kafka web interface") 179 | .arg( 180 | Arg::with_name("conf") 181 | .short("c") 182 | .long("conf") 183 | .help("Configuration file") 184 | .takes_value(true) 185 | .required(true), 186 | ) 187 | .arg( 188 | Arg::with_name("log-conf") 189 | .long("log-conf") 190 | .help("Configure the logging format (example: 'rdkafka=trace')") 191 | .takes_value(true), 192 | ) 193 | .get_matches() 194 | } 195 | 196 | fn main() { 197 | let matches = setup_args(); 198 | 199 | utils::setup_logger(true, matches.value_of("log-conf"), "%F %T%z"); 200 | 201 | let config_path = matches.value_of("conf").unwrap(); 202 | 203 | info!("Kafka-view is starting up!"); 204 | if let Err(e) = run_kafka_web(config_path) { 205 | format_error_chain!(e); 206 | std::process::exit(1); 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/metadata.rs: -------------------------------------------------------------------------------- 1 | use byteorder::{BigEndian, ReadBytesExt}; 2 | use rdkafka::config::ClientConfig; 3 | use rdkafka::consumer::{BaseConsumer, Consumer, EmptyConsumerContext}; 4 | use rdkafka::error as rderror; 5 | use scheduled_executor::TaskGroup; 6 | 7 | use cache::Cache; 8 | use config::{ClusterConfig, Config}; 9 | use error::*; 10 | use utils::read_str; 11 | 12 | use std::collections::HashMap; 13 | use std::error::Error; 14 | use std::fmt; 15 | use std::io::Cursor; 16 | use std::sync::{Arc, RwLock}; 17 | 18 | pub type MetadataConsumer = BaseConsumer; 19 | 20 | lazy_static! { 21 | pub static ref CONSUMERS: MetadataConsumerCache = MetadataConsumerCache::new(); 22 | } 23 | 24 | pub struct MetadataConsumerCache { 25 | consumers: RwLock>>, 26 | } 27 | 28 | impl MetadataConsumerCache { 29 | pub fn new() -> MetadataConsumerCache { 30 | MetadataConsumerCache { 31 | consumers: RwLock::new(HashMap::new()), 32 | } 33 | } 34 | 35 | pub fn get(&self, cluster_id: &ClusterId) -> Option> { 36 | match self.consumers.read() { 37 | Ok(consumers) => (*consumers).get(cluster_id).cloned(), 38 | Err(_) => panic!("Poison error while reading consumer from cache"), 39 | } 40 | } 41 | 42 | pub fn get_err(&self, cluster_id: &ClusterId) -> Result> { 43 | self.get(cluster_id) 44 | .ok_or_else(|| ErrorKind::MissingConsumerError(cluster_id.clone()).into()) 45 | } 46 | 47 | pub fn get_or_init( 48 | &self, 49 | cluster_id: &ClusterId, 50 | config: &ClusterConfig, 51 | ) -> Result> { 52 | if let Some(consumer) = self.get(cluster_id) { 53 | return Ok(consumer); 54 | } 55 | 56 | debug!("Creating metadata consumer for {}", cluster_id); 57 | let consumer = ClientConfig::new() 58 | .set("bootstrap.servers", &config.bootstrap_servers()) 59 | .set("api.version.request", "true") 60 | .create::() 61 | .chain_err(|| format!("Consumer creation failed for {}", cluster_id))?; 62 | 63 | let consumer_arc = Arc::new(consumer); 64 | match self.consumers.write() { 65 | Ok(mut consumers) => (*consumers).insert(cluster_id.clone(), consumer_arc.clone()), 66 | Err(_) => panic!("Poison error while writing consumer to cache"), 67 | }; 68 | 69 | Ok(consumer_arc) 70 | } 71 | } 72 | 73 | // TODO: Use structs? 74 | pub type BrokerId = i32; 75 | pub type TopicName = String; 76 | pub type TopicPartition = (TopicName, i32); 77 | 78 | #[derive(Eq, PartialEq, Hash, Clone, Debug, PartialOrd, Ord, Serialize, Deserialize)] 79 | pub struct ClusterId(String); 80 | 81 | impl ClusterId { 82 | pub fn name(&self) -> &str { 83 | &self.0 84 | } 85 | } 86 | 87 | impl<'a> From<&'a str> for ClusterId { 88 | fn from(id: &'a str) -> ClusterId { 89 | ClusterId(id.to_owned()) 90 | } 91 | } 92 | 93 | impl From for ClusterId { 94 | fn from(id: String) -> ClusterId { 95 | ClusterId(id) 96 | } 97 | } 98 | 99 | impl fmt::Display for ClusterId { 100 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 101 | write!(f, "{}", self.0) 102 | } 103 | } 104 | 105 | // 106 | // ********** METADATA ********** 107 | // 108 | 109 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 110 | pub struct Partition { 111 | pub id: i32, 112 | pub leader: BrokerId, 113 | pub replicas: Vec, 114 | pub isr: Vec, 115 | pub error: Option, 116 | } 117 | 118 | impl Partition { 119 | fn new( 120 | id: i32, 121 | leader: BrokerId, 122 | mut replicas: Vec, 123 | mut isr: Vec, 124 | error: Option, 125 | ) -> Partition { 126 | replicas.sort(); 127 | isr.sort(); 128 | Partition { 129 | id, 130 | leader, 131 | replicas, 132 | isr, 133 | error, 134 | } 135 | } 136 | } 137 | 138 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 139 | pub struct Broker { 140 | pub id: BrokerId, 141 | pub hostname: String, 142 | pub port: i32, 143 | } 144 | 145 | impl Broker { 146 | fn new(id: BrokerId, hostname: String, port: i32) -> Broker { 147 | Broker { id, hostname, port } 148 | } 149 | } 150 | 151 | // 152 | // ********** REASSIGNMENT ********** 153 | // 154 | 155 | #[derive(Debug, Deserialize, Serialize)] 156 | pub struct Reassignment { 157 | pub partitions: Vec, 158 | } 159 | 160 | #[derive(Debug, Deserialize, Serialize)] 161 | pub struct PartitionReassignment { 162 | pub topic: String, 163 | pub partition: i32, 164 | pub replicas: Vec, 165 | // replicas: &'a [BrokerId], // This cannot be deserialized with zero-copy :( 166 | } 167 | 168 | // 169 | // ********** GROUPS ********** 170 | // 171 | 172 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 173 | pub struct MemberAssignment { 174 | pub topic: String, 175 | pub partitions: Vec, 176 | } 177 | 178 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 179 | pub struct GroupMember { 180 | pub id: String, 181 | pub client_id: String, 182 | pub client_host: String, 183 | #[serde(default)] 184 | pub assignments: Vec, 185 | } 186 | 187 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 188 | pub struct Group { 189 | pub name: String, 190 | pub state: String, 191 | pub members: Vec, 192 | } 193 | 194 | fn parse_member_assignment(payload_rdr: &mut Cursor<&[u8]>) -> Result> { 195 | let _version = payload_rdr 196 | .read_i16::() 197 | .chain_err(|| "Failed to parse value version")?; 198 | let assign_len = payload_rdr 199 | .read_i32::() 200 | .chain_err(|| "Failed to parse assigment length")?; 201 | let mut assigns = Vec::with_capacity(assign_len as usize); 202 | for _ in 0..assign_len { 203 | let topic = read_str(payload_rdr) 204 | .chain_err(|| "Failed to parse assignment topic name")? 205 | .to_owned(); 206 | let partition_len = payload_rdr 207 | .read_i32::() 208 | .chain_err(|| "Failed to parse assignment partition length")?; 209 | let mut partitions = Vec::with_capacity(partition_len as usize); 210 | for _ in 0..partition_len { 211 | let partition = payload_rdr 212 | .read_i32::() 213 | .chain_err(|| "Failed to parse assignment partition")?; 214 | partitions.push(partition); 215 | } 216 | assigns.push(MemberAssignment { topic, partitions }) 217 | } 218 | Ok(assigns) 219 | } 220 | 221 | fn fetch_groups(consumer: &MetadataConsumer, timeout_ms: i32) -> Result> { 222 | let group_list = consumer 223 | .fetch_group_list(None, timeout_ms) 224 | .chain_err(|| "Failed to fetch consumer group list")?; 225 | 226 | let mut groups = Vec::new(); 227 | for rd_group in group_list.groups() { 228 | let members = rd_group 229 | .members() 230 | .iter() 231 | .map(|m| { 232 | let mut assigns = Vec::new(); 233 | if rd_group.protocol_type() == "consumer" { 234 | if let Some(assignment) = m.assignment() { 235 | let mut payload_rdr = Cursor::new(assignment); 236 | assigns = parse_member_assignment(&mut payload_rdr) 237 | .expect("Parse member assignment failed"); 238 | } 239 | } 240 | GroupMember { 241 | id: m.id().to_owned(), 242 | client_id: m.client_id().to_owned(), 243 | client_host: m.client_host().to_owned(), 244 | assignments: assigns, 245 | } 246 | }) 247 | .collect::>(); 248 | groups.push(Group { 249 | name: rd_group.name().to_owned(), 250 | state: rd_group.state().to_owned(), 251 | members, 252 | }) 253 | } 254 | Ok(groups) 255 | } 256 | 257 | pub struct MetadataFetchTaskGroup { 258 | cache: Cache, 259 | config: Config, 260 | } 261 | 262 | impl MetadataFetchTaskGroup { 263 | pub fn new(cache: &Cache, config: &Config) -> MetadataFetchTaskGroup { 264 | MetadataFetchTaskGroup { 265 | cache: cache.alias(), 266 | config: config.clone(), 267 | } 268 | } 269 | 270 | fn fetch_data(&self, consumer: Arc, cluster_id: &ClusterId) -> Result<()> { 271 | let metadata = consumer 272 | .fetch_metadata(None, 120_000) 273 | .chain_err(|| format!("Failed to fetch metadata from {}", cluster_id))?; 274 | 275 | // Brokers 276 | let mut brokers = Vec::new(); 277 | for broker in metadata.brokers() { 278 | brokers.push(Broker::new( 279 | broker.id(), 280 | broker.host().to_owned(), 281 | broker.port(), 282 | )); 283 | } 284 | self.cache 285 | .brokers 286 | .insert(cluster_id.to_owned(), brokers) 287 | .chain_err(|| "Failed to insert broker information in cache")?; 288 | 289 | // Topics 290 | for topic in metadata.topics() { 291 | let mut partitions = Vec::with_capacity(topic.partitions().len()); 292 | for p in topic.partitions() { 293 | let err_descr = p 294 | .error() 295 | .map(|e| rderror::RDKafkaError::from(e).description().to_owned()); 296 | let partition = Partition::new( 297 | p.id(), 298 | p.leader(), 299 | p.replicas().to_owned(), 300 | p.isr().to_owned(), 301 | err_descr, 302 | ); 303 | partitions.push(partition); 304 | } 305 | partitions.sort_by(|a, b| a.id.cmp(&b.id)); 306 | // TODO: do not update if it's already there? 307 | self.cache 308 | .topics 309 | .insert((cluster_id.clone(), topic.name().to_owned()), partitions) 310 | .chain_err(|| "Failed to insert broker information in cache")?; 311 | } 312 | 313 | // Groups 314 | for group in fetch_groups(consumer.as_ref(), 30000)? { 315 | self.cache 316 | .groups 317 | .insert((cluster_id.clone(), group.name.to_owned()), group)?; 318 | } 319 | 320 | Ok(()) 321 | } 322 | } 323 | 324 | impl TaskGroup for MetadataFetchTaskGroup { 325 | type TaskId = ClusterId; 326 | 327 | fn get_tasks(&self) -> Vec { 328 | self.config.clusters.keys().cloned().collect::>() 329 | } 330 | 331 | fn execute(&self, cluster_id: ClusterId) { 332 | match CONSUMERS.get_or_init(&cluster_id, self.config.cluster(&cluster_id).unwrap()) { 333 | Ok(consumer) => { 334 | if let Err(e) = self.fetch_data(consumer, &cluster_id) { 335 | format_error_chain!(e); 336 | } 337 | } 338 | Err(e) => format_error_chain!(e), 339 | } 340 | } 341 | } 342 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use hyper::Client; 3 | use regex::Regex; 4 | use scheduled_executor::TaskGroup; 5 | use serde_json; 6 | use serde_json::Value; 7 | 8 | use std::collections::{HashMap, HashSet}; 9 | use std::f64; 10 | use std::io::Read; 11 | 12 | use cache::Cache; 13 | use config::Config; 14 | use error::*; 15 | use metadata::{Broker, ClusterId, TopicName}; 16 | use utils::insert_at; 17 | 18 | #[derive(PartialEq, Serialize, Deserialize, Debug, Copy, Clone)] 19 | pub struct PartitionMetrics { 20 | pub size_bytes: f64, 21 | } 22 | 23 | impl Default for PartitionMetrics { 24 | fn default() -> PartitionMetrics { 25 | PartitionMetrics { size_bytes: 0f64 } 26 | } 27 | } 28 | 29 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 30 | pub struct TopicBrokerMetrics { 31 | pub m_rate_15: f64, 32 | pub b_rate_15: f64, 33 | pub partitions: Vec, 34 | } 35 | 36 | impl Default for TopicBrokerMetrics { 37 | fn default() -> Self { 38 | TopicBrokerMetrics { 39 | m_rate_15: 0f64, 40 | b_rate_15: 0f64, 41 | partitions: Vec::new(), 42 | } 43 | } 44 | } 45 | 46 | #[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] 47 | pub struct TopicMetrics { 48 | pub brokers: HashMap, 49 | } 50 | 51 | impl TopicMetrics { 52 | pub fn new() -> TopicMetrics { 53 | TopicMetrics { 54 | brokers: HashMap::new(), 55 | } 56 | } 57 | 58 | pub fn aggregate_broker_metrics(&self) -> TopicBrokerMetrics { 59 | self.brokers.iter().fold( 60 | TopicBrokerMetrics::default(), 61 | |mut acc, (_, broker_metrics)| { 62 | acc.m_rate_15 += broker_metrics.m_rate_15; 63 | acc.b_rate_15 += broker_metrics.b_rate_15; 64 | acc 65 | }, 66 | ) 67 | } 68 | } 69 | 70 | impl Default for TopicMetrics { 71 | fn default() -> Self { 72 | TopicMetrics::new() 73 | } 74 | } 75 | 76 | fn format_jolokia_path(hostname: &str, port: i32, filter: &str) -> String { 77 | format!("http://{}:{}/jolokia/read/{}?ignoreErrors=true&includeStackTrace=false&maxCollectionSize=0", 78 | hostname, port, filter) 79 | } 80 | 81 | fn fetch_metrics_json(hostname: &str, port: i32, filter: &str) -> Result { 82 | let client = Client::new(); 83 | let url = format_jolokia_path(hostname, port, filter); 84 | let mut response = client.get(&url).send().chain_err(|| "Connection error")?; 85 | 86 | let mut body = String::new(); 87 | response 88 | .read_to_string(&mut body) 89 | .chain_err(|| "Could not read response to string")?; 90 | 91 | let value = serde_json::from_str(&body).chain_err(|| "Failed to parse JSON")?; 92 | 93 | Ok(value) 94 | } 95 | 96 | fn jolokia_response_get_value(json_response: &Value) -> Result<&serde_json::Map> { 97 | let obj = match json_response.as_object() { 98 | Some(obj) => obj, 99 | None => bail!("The provided Value is not a JSON object"), 100 | }; 101 | if let Some(v) = obj.get("value") { 102 | if let Some(value_obj) = v.as_object() { 103 | return Ok(value_obj); 104 | } else { 105 | bail!("'value' is not a JSON object"); 106 | } 107 | } else { 108 | bail!("Missing value"); 109 | } 110 | } 111 | 112 | fn parse_broker_rate_metrics(jolokia_json_response: &Value) -> Result> { 113 | let value_map = jolokia_response_get_value(jolokia_json_response) 114 | .chain_err(|| "Failed to extract 'value' from jolokia response.")?; 115 | let mut metrics = HashMap::new(); 116 | let re = Regex::new(r"topic=([^,]+),").unwrap(); 117 | 118 | for (mbean_name, value) in value_map.iter() { 119 | let topic = match re.captures(mbean_name) { 120 | Some(cap) => cap.get(1).unwrap().as_str(), 121 | None => "__TOTAL__", 122 | }; 123 | if let Value::Object(ref obj) = *value { 124 | match obj.get("FifteenMinuteRate") { 125 | Some(&Value::Number(ref rate)) => { 126 | metrics.insert(topic.to_owned(), rate.as_f64().unwrap_or(-1f64)) 127 | } 128 | None => bail!("Can't find key in metric"), 129 | _ => bail!("Unexpected metric type"), 130 | }; 131 | } 132 | } 133 | Ok(metrics) 134 | } 135 | 136 | fn parse_partition_size_metrics( 137 | jolokia_json_response: &Value, 138 | ) -> Result>> { 139 | let value_map = jolokia_response_get_value(jolokia_json_response) 140 | .chain_err(|| "Failed to extract 'value' from jolokia response.")?; 141 | let topic_re = Regex::new(r"topic=([^,]+),").unwrap(); 142 | let partition_re = Regex::new(r"partition=([^,]+),").unwrap(); 143 | 144 | let mut metrics = HashMap::new(); 145 | for (mbean_name, value) in value_map.iter() { 146 | let topic = topic_re 147 | .captures(mbean_name) 148 | .and_then(|cap| cap.get(1).map(|m| m.as_str())); 149 | let partition = partition_re 150 | .captures(mbean_name) 151 | .and_then(|cap| cap.get(1).map(|m| m.as_str())) 152 | .and_then(|p_str| p_str.parse::().ok()); 153 | if topic.is_none() || partition.is_none() { 154 | bail!("Can't parse topic and partition metadata from metric name"); 155 | } 156 | if let Value::Object(ref obj) = *value { 157 | match obj.get("Value") { 158 | Some(&Value::Number(ref size)) => { 159 | let partition_metrics = PartitionMetrics { 160 | size_bytes: size.as_f64().unwrap_or(-1f64), 161 | }; 162 | insert_at( 163 | metrics 164 | .entry(topic.unwrap().to_owned()) 165 | .or_insert_with(Vec::new), 166 | partition.unwrap() as usize, 167 | partition_metrics, 168 | PartitionMetrics::default(), 169 | ); 170 | } 171 | None => bail!("Can't find key in metric"), 172 | _ => bail!("Unexpected metric type"), 173 | }; 174 | } 175 | } 176 | Ok(metrics) 177 | } 178 | 179 | fn log_elapsed_time(task_name: &str, start: DateTime) { 180 | debug!( 181 | "{} completed in: {:.3}ms", 182 | task_name, 183 | Utc::now() 184 | .signed_duration_since(start) 185 | .num_microseconds() 186 | .unwrap() as f64 187 | / 1000f64 188 | ); 189 | } 190 | 191 | pub struct MetricsFetchTaskGroup { 192 | cache: Cache, 193 | config: Config, 194 | } 195 | 196 | impl MetricsFetchTaskGroup { 197 | pub fn new(cache: &Cache, config: &Config) -> MetricsFetchTaskGroup { 198 | MetricsFetchTaskGroup { 199 | cache: cache.alias(), 200 | config: config.clone(), 201 | } 202 | } 203 | 204 | fn fetch_metrics(&self, cluster_id: &ClusterId, broker: &Broker, port: i32) -> Result<()> { 205 | let start = Utc::now(); 206 | let byte_rate_json = fetch_metrics_json( 207 | &broker.hostname, 208 | port, 209 | "kafka.server:name=BytesInPerSec,*,type=BrokerTopicMetrics/FifteenMinuteRate", 210 | ) 211 | .chain_err(|| format!("Failed to fetch byte rate metrics from {}", broker.hostname))?; 212 | 213 | let byte_rate_metrics = parse_broker_rate_metrics(&byte_rate_json) 214 | .chain_err(|| "Failed to parse byte rate broker metrics")?; 215 | 216 | let msg_rate_json = fetch_metrics_json( 217 | &broker.hostname, 218 | port, 219 | "kafka.server:name=MessagesInPerSec,*,type=BrokerTopicMetrics/FifteenMinuteRate", 220 | ) 221 | .chain_err(|| { 222 | format!( 223 | "Failed to fetch message rate metrics from {}", 224 | broker.hostname 225 | ) 226 | })?; 227 | 228 | let msg_rate_metrics = parse_broker_rate_metrics(&msg_rate_json) 229 | .chain_err(|| "Failed to parse message rate broker metrics")?; 230 | let partition_metrics_json = fetch_metrics_json( 231 | &broker.hostname, 232 | port, 233 | "kafka.log:name=Size,*,type=Log/Value", 234 | ) 235 | .chain_err(|| { 236 | format!( 237 | "Failed to fetch partition size metrics from {}", 238 | broker.hostname 239 | ) 240 | })?; 241 | 242 | let pt_size_metrics = parse_partition_size_metrics(&partition_metrics_json) 243 | .chain_err(|| "Failed to parse partition size broker metrics")?; 244 | 245 | let topics = byte_rate_metrics 246 | .keys() 247 | .chain(msg_rate_metrics.keys()) 248 | .chain(pt_size_metrics.keys()) 249 | .collect::>(); 250 | 251 | for topic in topics { 252 | let mut topic_metrics = self 253 | .cache 254 | .metrics 255 | .get(&(cluster_id.clone(), topic.clone())) 256 | .unwrap_or_default(); 257 | 258 | let b_rate_15 = *byte_rate_metrics.get(topic).unwrap_or(&-1f64); 259 | let m_rate_15 = *msg_rate_metrics.get(topic).unwrap_or(&-1f64); 260 | let partitions = pt_size_metrics.get(topic).cloned().unwrap_or_else(Vec::new); 261 | topic_metrics.brokers.insert( 262 | broker.id, 263 | TopicBrokerMetrics { 264 | m_rate_15, 265 | b_rate_15, 266 | partitions, 267 | }, 268 | ); 269 | 270 | self.cache 271 | .metrics 272 | .insert((cluster_id.clone(), topic.clone()), topic_metrics) 273 | .chain_err(|| "Failed to insert to metrics")?; 274 | } 275 | log_elapsed_time("metrics fetch", start); 276 | Ok(()) 277 | } 278 | } 279 | 280 | impl TaskGroup for MetricsFetchTaskGroup { 281 | type TaskId = (ClusterId, Broker, i32); 282 | 283 | fn get_tasks(&self) -> Vec { 284 | self.cache.brokers.lock_iter(|iter| { 285 | let mut tasks = Vec::new(); 286 | for (cluster_id, brokers) in iter { 287 | let port = self 288 | .config 289 | .cluster(cluster_id) 290 | .and_then(|cluster_config| cluster_config.jolokia_port); 291 | if port.is_some() { 292 | for broker in brokers { 293 | tasks.push((cluster_id.clone(), broker.clone(), port.unwrap())); 294 | } 295 | } 296 | } 297 | debug!("New metrics tasks: {:?}", tasks); 298 | tasks 299 | }) 300 | } 301 | 302 | fn execute(&self, task_id: (ClusterId, Broker, i32)) { 303 | debug!("Starting fetch for {}: {}", task_id.0, task_id.1.id); 304 | if let Err(e) = self.fetch_metrics(&task_id.0, &task_id.1, task_id.2) { 305 | format_error_chain!(e); 306 | } 307 | } 308 | } 309 | -------------------------------------------------------------------------------- /src/offsets.rs: -------------------------------------------------------------------------------- 1 | use byteorder::{BigEndian, ReadBytesExt}; 2 | use futures::Stream; 3 | use rdkafka::config::{ClientConfig, TopicConfig}; 4 | use rdkafka::consumer::stream_consumer::StreamConsumer; 5 | use rdkafka::consumer::{Consumer, EmptyConsumerContext}; 6 | use rdkafka::error::KafkaError; 7 | use rdkafka::{Message, Offset, TopicPartitionList}; 8 | 9 | use cache::{Cache, OffsetsCache}; 10 | use config::{ClusterConfig, Config}; 11 | use error::*; 12 | use metadata::{ClusterId, TopicName}; 13 | use utils::{insert_at, read_string}; 14 | 15 | use std::cmp; 16 | use std::collections::HashMap; 17 | use std::io::Cursor; 18 | use std::str; 19 | use std::thread; 20 | use std::time::{Duration, Instant}; 21 | 22 | #[derive(Debug)] 23 | enum ConsumerUpdate { 24 | Metadata, 25 | OffsetCommit { 26 | group: String, 27 | topic: String, 28 | partition: i32, 29 | offset: i64, 30 | }, 31 | OffsetTombstone { 32 | group: String, 33 | topic: String, 34 | partition: i32, 35 | }, 36 | } 37 | 38 | fn parse_group_offset( 39 | key_rdr: &mut Cursor<&[u8]>, 40 | payload_rdr: &mut Cursor<&[u8]>, 41 | ) -> Result { 42 | let group = read_string(key_rdr).chain_err(|| "Failed to parse group name from key")?; 43 | let topic = read_string(key_rdr).chain_err(|| "Failed to parse topic name from key")?; 44 | let partition = key_rdr 45 | .read_i32::() 46 | .chain_err(|| "Failed to parse partition from key")?; 47 | if !payload_rdr.get_ref().is_empty() { 48 | // payload is not empty 49 | let _version = payload_rdr 50 | .read_i16::() 51 | .chain_err(|| "Failed to parse value version")?; 52 | let offset = payload_rdr 53 | .read_i64::() 54 | .chain_err(|| "Failed to parse offset from value")?; 55 | Ok(ConsumerUpdate::OffsetCommit { 56 | group, 57 | topic, 58 | partition, 59 | offset, 60 | }) 61 | } else { 62 | Ok(ConsumerUpdate::OffsetTombstone { 63 | group, 64 | topic, 65 | partition, 66 | }) 67 | } 68 | } 69 | 70 | fn parse_message(key: &[u8], payload: &[u8]) -> Result { 71 | let mut key_rdr = Cursor::new(key); 72 | let key_version = key_rdr 73 | .read_i16::() 74 | .chain_err(|| "Failed to parse key version")?; 75 | match key_version { 76 | 0 | 1 => parse_group_offset(&mut key_rdr, &mut Cursor::new(payload)) 77 | .chain_err(|| "Failed to parse group offset update"), 78 | 2 => Ok(ConsumerUpdate::Metadata), 79 | _ => bail!("Key version not recognized"), 80 | } 81 | } 82 | 83 | fn create_consumer( 84 | brokers: &str, 85 | group_id: &str, 86 | start_offsets: Option>, 87 | ) -> Result> { 88 | let consumer = ClientConfig::new() 89 | .set("group.id", group_id) 90 | .set("bootstrap.servers", brokers) 91 | .set("enable.partition.eof", "false") 92 | .set("enable.auto.commit", "false") 93 | .set("session.timeout.ms", "30000") 94 | .set("api.version.request", "true") 95 | //.set("fetch.message.max.bytes", "1024000") // Reduce memory usage 96 | .set("queued.min.messages", "10000") // Reduce memory usage 97 | .set("message.max.bytes", "10485760") 98 | .set_default_topic_config( 99 | TopicConfig::new() 100 | .set("auto.offset.reset", "smallest") 101 | .finalize(), 102 | ) 103 | .create::>() 104 | .chain_err(|| format!("Consumer creation failed: {}", brokers))?; 105 | 106 | match start_offsets { 107 | Some(pos) => { 108 | let mut tp_list = TopicPartitionList::new(); 109 | for (partition, &offset) in pos.iter().enumerate() { 110 | tp_list.add_partition_offset( 111 | "__consumer_offsets", 112 | partition as i32, 113 | Offset::Offset(offset), 114 | ); 115 | } 116 | debug!( 117 | "Previous offsets found, assigning offsets explicitly: {:?}", 118 | tp_list 119 | ); 120 | consumer 121 | .assign(&tp_list) 122 | .chain_err(|| "Failure during consumer assignment")?; 123 | } 124 | None => { 125 | debug!("No previous offsets found, subscribing to topic"); 126 | consumer.subscribe(&["__consumer_offsets"]).chain_err(|| { 127 | format!("Can't subscribe to offset __consumer_offsets ({})", brokers) 128 | })?; 129 | } 130 | } 131 | 132 | Ok(consumer) 133 | } 134 | 135 | // we should really have some tests here 136 | fn update_global_cache( 137 | cluster_id: &ClusterId, 138 | local_cache: &HashMap<(String, String), Vec>, 139 | cache: &OffsetsCache, 140 | ) { 141 | for (&(ref group, ref topic), new_offsets) in local_cache { 142 | // Consider a consuming iterator 143 | // This logic is not needed if i store the consumer offset, right? wrong! 144 | if new_offsets.iter().any(|&offset| offset == -1) { 145 | if let Some(mut existing_offsets) = 146 | cache.get(&(cluster_id.to_owned(), group.to_owned(), topic.to_owned())) 147 | { 148 | // If the new offset is not complete and i have an old one, do the merge 149 | vec_merge_in_place(&mut existing_offsets, &new_offsets, -1, cmp::max); 150 | // for i in 0..(cmp::max(offsets.len(), existing_offsets.len())) { 151 | // let new_offset = cmp::max( 152 | // offsets.get(i).cloned().unwrap_or(-1), 153 | // existing_offsets.get(i).cloned().unwrap_or(-1)); 154 | // insert_at(&mut existing_offsets, i, new_offset, -1); 155 | // } 156 | let _ = cache.insert( 157 | (cluster_id.to_owned(), group.to_owned(), topic.to_owned()), 158 | existing_offsets, 159 | ); 160 | continue; 161 | } 162 | } 163 | // TODO: log errors 164 | let _ = cache.insert( 165 | (cluster_id.to_owned(), group.to_owned(), topic.to_owned()), 166 | new_offsets.clone(), 167 | ); 168 | } 169 | } 170 | 171 | fn commit_offset_position_to_array(tp_list: TopicPartitionList) -> Vec { 172 | let tp_elements = tp_list.elements_for_topic("__consumer_offsets"); 173 | let mut offsets = vec![0; tp_elements.len()]; 174 | for tp in &tp_elements { 175 | offsets[tp.partition() as usize] = tp.offset().to_raw(); 176 | } 177 | offsets 178 | } 179 | 180 | fn consume_offset_topic( 181 | cluster_id: ClusterId, 182 | consumer: StreamConsumer, 183 | cache: &Cache, 184 | ) -> Result<()> { 185 | let mut local_cache = HashMap::new(); 186 | let mut last_dump = Instant::now(); 187 | 188 | debug!("Starting offset consumer loop for {:?}", cluster_id); 189 | 190 | for message in consumer.start_with(Duration::from_millis(200), true).wait() { 191 | match message { 192 | Ok(Ok(m)) => { 193 | let key = m.key().unwrap_or(&[]); 194 | let payload = m.payload().unwrap_or(&[]); 195 | match parse_message(key, payload) { 196 | Ok(ConsumerUpdate::OffsetCommit { 197 | group, 198 | topic, 199 | partition, 200 | offset, 201 | }) => { 202 | let mut offsets = local_cache 203 | .entry((group.to_owned(), topic.to_owned())) 204 | .or_insert_with(Vec::new); 205 | insert_at(&mut offsets, partition as usize, offset, -1); 206 | } 207 | Ok(_) => {} 208 | Err(e) => format_error_chain!(e), 209 | }; 210 | } 211 | Ok(Err(KafkaError::NoMessageReceived)) => {} 212 | Ok(Err(e)) => warn!("Kafka error: {} {:?}", cluster_id, e), 213 | Err(e) => warn!("Can't receive data from stream: {:?}", e), 214 | }; 215 | // Update the cache if needed 216 | if (Instant::now() - last_dump) > Duration::from_secs(10) { 217 | trace!( 218 | "Dumping local offset cache ({}: {} updates)", 219 | cluster_id, 220 | local_cache.len() 221 | ); 222 | update_global_cache(&cluster_id, &local_cache, &cache.offsets); 223 | // Consumer position is not up to date after start, so we have to merge with the 224 | // existing offsets and take the largest. 225 | let res = consumer 226 | .position() 227 | .map(|current_position| { 228 | let previous_position_vec = cache 229 | .internal_offsets 230 | .get(&cluster_id) 231 | .unwrap_or_else(Vec::new); 232 | let mut current_position_vec = 233 | commit_offset_position_to_array(current_position); 234 | vec_merge_in_place( 235 | &mut current_position_vec, 236 | &previous_position_vec, 237 | Offset::Invalid.to_raw(), 238 | cmp::max, 239 | ); 240 | cache 241 | .internal_offsets 242 | .insert(cluster_id.clone(), current_position_vec) 243 | }) 244 | .chain_err(|| "Failed to store consumer offset position")?; 245 | if let Err(e) = res { 246 | format_error_chain!(e); 247 | } 248 | local_cache = HashMap::with_capacity(local_cache.len()); 249 | last_dump = Instant::now(); 250 | } 251 | } 252 | Ok(()) 253 | } 254 | 255 | pub fn vec_merge_in_place(vec1: &mut Vec, vec2: &[T], default: T, merge_fn: F) 256 | where 257 | F: Fn(T, T) -> T, 258 | { 259 | let new_len = cmp::max(vec1.len(), vec2.len()); 260 | vec1.resize(new_len, default); 261 | for i in 0..vec1.len() { 262 | vec1[i] = merge_fn( 263 | vec1.get(i).unwrap_or(&default).to_owned(), 264 | vec2.get(i).unwrap_or(&default).to_owned(), 265 | ); 266 | } 267 | } 268 | 269 | //pub fn vec_merge(vec1: Vec, vec2: Vec, default: T, merge: F) -> Vec 270 | // where F: Fn(T, T) -> T 271 | //{ 272 | // (0..cmp::max(vec1.len(), vec2.len())) 273 | // .map(|index| 274 | // merge( 275 | // vec1.get(index).unwrap_or(&default).to_owned(), 276 | // vec2.get(index).unwrap_or(&default).to_owned())) 277 | // .collect::>() 278 | //} 279 | 280 | pub fn run_offset_consumer( 281 | cluster_id: &ClusterId, 282 | cluster_config: &ClusterConfig, 283 | config: &Config, 284 | cache: &Cache, 285 | ) -> Result<()> { 286 | let start_position = cache.internal_offsets.get(cluster_id); 287 | let consumer = create_consumer( 288 | &cluster_config.bootstrap_servers(), 289 | &config.consumer_offsets_group_id, 290 | start_position, 291 | ) 292 | .chain_err(|| format!("Failed to create offset consumer for {}", cluster_id))?; 293 | 294 | let cluster_id_clone = cluster_id.clone(); 295 | let cache_alias = cache.alias(); 296 | let _ = thread::Builder::new() 297 | .name("offset-consumer".to_owned()) 298 | .spawn(move || { 299 | if let Err(e) = consume_offset_topic(cluster_id_clone, consumer, &cache_alias) { 300 | format_error_chain!(e); 301 | } 302 | }) 303 | .chain_err(|| "Failed to start offset consumer thread")?; 304 | 305 | Ok(()) 306 | } 307 | 308 | pub trait OffsetStore { 309 | fn offsets_by_cluster( 310 | &self, 311 | cluster_id: &ClusterId, 312 | ) -> Vec<((ClusterId, String, TopicName), Vec)>; 313 | fn offsets_by_cluster_topic( 314 | &self, 315 | cluster_id: &ClusterId, 316 | topic_name: &str, 317 | ) -> Vec<((ClusterId, String, TopicName), Vec)>; 318 | fn offsets_by_cluster_group( 319 | &self, 320 | cluster_id: &ClusterId, 321 | group_name: &str, 322 | ) -> Vec<((ClusterId, String, TopicName), Vec)>; 323 | } 324 | 325 | impl OffsetStore for Cache { 326 | fn offsets_by_cluster( 327 | &self, 328 | cluster: &ClusterId, 329 | ) -> Vec<((ClusterId, String, TopicName), Vec)> { 330 | self.offsets.filter_clone(|&(ref c, _, _)| c == cluster) 331 | } 332 | 333 | fn offsets_by_cluster_topic( 334 | &self, 335 | cluster: &ClusterId, 336 | topic: &str, 337 | ) -> Vec<((ClusterId, String, TopicName), Vec)> { 338 | self.offsets 339 | .filter_clone(|&(ref c, _, ref t)| c == cluster && t == topic) 340 | } 341 | 342 | fn offsets_by_cluster_group( 343 | &self, 344 | cluster: &ClusterId, 345 | group: &str, 346 | ) -> Vec<((ClusterId, String, TopicName), Vec)> { 347 | self.offsets 348 | .filter_clone(|&(ref c, ref g, _)| c == cluster && g == group) 349 | } 350 | } 351 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use brotli; 2 | use byteorder::{BigEndian, ReadBytesExt}; 3 | use chrono::Local; 4 | use env_logger::Builder; 5 | use log::{LevelFilter, Record}; 6 | use rocket::http::{ContentType, Status}; 7 | use rocket::response::{self, Responder}; 8 | use rocket::{fairing, Data, Request, Response}; 9 | use serde_json; 10 | 11 | use std::env; 12 | use std::io::{self, BufRead, Cursor, Write}; 13 | use std::str; 14 | use std::thread; 15 | 16 | use env_logger::fmt::Formatter; 17 | use error::*; 18 | 19 | pub fn setup_logger(log_thread: bool, rust_log: Option<&str>, date_format: &str) { 20 | let date_format = date_format.to_owned(); 21 | let output_format = move |buffer: &mut Formatter, record: &Record| { 22 | let thread_name = if log_thread { 23 | format!("({}) ", thread::current().name().unwrap_or("unknown")) 24 | } else { 25 | "".to_string() 26 | }; 27 | let date = Local::now().format(&date_format).to_string(); 28 | writeln!( 29 | buffer, 30 | "{}: {}{} - {} - {}", 31 | date, 32 | thread_name, 33 | record.level(), 34 | record.target(), 35 | record.args() 36 | ) 37 | }; 38 | 39 | let mut builder = Builder::new(); 40 | builder 41 | .format(output_format) 42 | .filter(None, LevelFilter::Info); 43 | if env::var("ROCKET_ENV") 44 | .map(|var| !var.starts_with("dev")) 45 | .unwrap_or(false) 46 | { 47 | // _ is used in Rocket as a special target for debugging purpose 48 | builder.filter(Some("_"), LevelFilter::Error); 49 | } 50 | 51 | rust_log.map(|conf| builder.parse_filters(conf)); 52 | 53 | builder.init(); 54 | } 55 | 56 | macro_rules! format_error_chain { 57 | ($err: expr) => {{ 58 | error!("error: {}", $err); 59 | for e in $err.iter().skip(1) { 60 | error!("caused by: {}", e); 61 | } 62 | if let Some(backtrace) = $err.backtrace() { 63 | error!("backtrace: {:?}", backtrace); 64 | } 65 | }}; 66 | } 67 | 68 | macro_rules! time { 69 | ($title:expr, $msg:expr) => {{ 70 | use chrono; 71 | let start_time = chrono::Utc::now(); 72 | let ret = $msg; 73 | let elapsed_micros = chrono::Utc::now() 74 | .signed_duration_since(start_time) 75 | .num_microseconds() 76 | .unwrap() as f32; 77 | debug!( 78 | "Elapsed time while {}: {:.3}ms", 79 | $title, 80 | elapsed_micros / 1000f32 81 | ); 82 | ret 83 | }}; 84 | } 85 | 86 | /// Given a vector, will insert `value` at the desired position `pos`, filling the items 87 | /// with `default`s if needed. 88 | pub fn insert_at(vector: &mut Vec, pos: usize, value: T, default: T) { 89 | for _ in vector.len()..=pos { 90 | vector.push(default); 91 | } 92 | vector[pos] = value; 93 | } 94 | 95 | /// Wraps a JSON value and implements a responder for it, with support for brotli compression. 96 | #[allow(dead_code)] 97 | pub struct CompressedJSON(pub serde_json::Value); 98 | 99 | impl Responder<'static> for CompressedJSON { 100 | fn respond_to(self, req: &Request) -> response::Result<'static> { 101 | let json = serde_json::to_vec(&self.0).unwrap(); 102 | let reader = io::Cursor::new(json); 103 | let headers = req.headers(); 104 | if headers.contains("Accept") && headers.get("Accept-Encoding").any(|e| e.contains("br")) { 105 | Ok(Response::build() 106 | .status(Status::Ok) 107 | .header(ContentType::JSON) 108 | .raw_header("Content-Encoding", "br") 109 | .streamed_body(brotli::CompressorReader::new(reader, 4096, 3, 20)) 110 | .finalize()) 111 | } else { 112 | Ok(Response::build() 113 | .status(Status::Ok) 114 | .header(ContentType::JSON) 115 | .streamed_body(reader) 116 | .finalize()) 117 | } 118 | } 119 | } 120 | 121 | pub fn read_str<'a>(rdr: &'a mut Cursor<&[u8]>) -> Result<&'a str> { 122 | let len = (rdr.read_i16::()).chain_err(|| "Failed to parse string len")? as usize; 123 | let pos = rdr.position() as usize; 124 | let slice = str::from_utf8(&rdr.get_ref()[pos..(pos + len)]) 125 | .chain_err(|| "String is not valid UTF-8")?; 126 | rdr.consume(len); 127 | Ok(slice) 128 | } 129 | 130 | pub fn read_string(rdr: &mut Cursor<&[u8]>) -> Result { 131 | read_str(rdr).map(str::to_string) 132 | } 133 | 134 | // GZip compression fairing 135 | pub struct GZip; 136 | 137 | impl fairing::Fairing for GZip { 138 | fn info(&self) -> fairing::Info { 139 | fairing::Info { 140 | name: "GZip compression", 141 | kind: fairing::Kind::Response, 142 | } 143 | } 144 | 145 | fn on_response(&self, request: &Request, response: &mut Response) { 146 | use flate2::{Compression, FlateReadExt}; 147 | use std::io::{Cursor, Read}; 148 | let headers = request.headers(); 149 | if headers 150 | .get("Accept-Encoding") 151 | .any(|e| e.to_lowercase().contains("gzip")) 152 | { 153 | response.body_bytes().and_then(|body| { 154 | let mut enc = body.gz_encode(Compression::Default); 155 | let mut buf = Vec::with_capacity(body.len()); 156 | enc.read_to_end(&mut buf) 157 | .map(|_| { 158 | response.set_sized_body(Cursor::new(buf)); 159 | response.set_raw_header("Content-Encoding", "gzip"); 160 | }) 161 | .map_err(|e| eprintln!("{}", e)) 162 | .ok() 163 | }); 164 | } 165 | } 166 | } 167 | 168 | // Request logging 169 | pub struct RequestLogger; 170 | 171 | impl fairing::Fairing for RequestLogger { 172 | // This is a request and response fairing named "GET/POST Counter". 173 | fn info(&self) -> fairing::Info { 174 | fairing::Info { 175 | name: "User request logger", 176 | kind: fairing::Kind::Request, 177 | } 178 | } 179 | 180 | fn on_request(&self, request: &mut Request, _: &Data) { 181 | let uri = request.uri().path(); 182 | if !uri.starts_with("/api") && !uri.starts_with("/public") { 183 | info!("User request: {}", uri); 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/web_server/api.rs: -------------------------------------------------------------------------------- 1 | use futures::{future, Future}; 2 | use futures_cpupool::Builder; 3 | use rdkafka::consumer::Consumer; 4 | use rdkafka::error::KafkaResult; 5 | use regex::Regex; 6 | use rocket::http::RawStr; 7 | use rocket::State; 8 | 9 | use cache::Cache; 10 | use config::Config; 11 | use error::*; 12 | use live_consumer::LiveConsumerStore; 13 | use metadata::{ClusterId, TopicName, TopicPartition, CONSUMERS}; 14 | use offsets::OffsetStore; 15 | use web_server::pages::omnisearch::OmnisearchFormParams; 16 | use zk::ZK; 17 | 18 | use std::collections::{HashMap, HashSet}; 19 | 20 | // 21 | // ********** TOPICS LIST ********** 22 | // 23 | 24 | #[derive(Serialize)] 25 | struct TopicDetails { 26 | topic_name: String, 27 | partition_count: usize, 28 | errors: String, 29 | b_rate_15: f64, 30 | m_rate_15: f64, 31 | } 32 | 33 | #[get("/api/clusters//topics")] 34 | pub fn cluster_topics(cluster_id: ClusterId, cache: State) -> String { 35 | let brokers = cache.brokers.get(&cluster_id); 36 | if brokers.is_none() { 37 | // TODO: Improve here 38 | return empty(); 39 | } 40 | 41 | let result_data = cache 42 | .topics 43 | .filter_clone(|&(ref c, _)| c == &cluster_id) 44 | .into_iter() 45 | .map(|((_, topic_name), partitions)| { 46 | let metrics = cache 47 | .metrics 48 | .get(&(cluster_id.clone(), topic_name.to_owned())) 49 | .unwrap_or_default() 50 | .aggregate_broker_metrics(); 51 | TopicDetails { 52 | topic_name, 53 | partition_count: partitions.len(), 54 | errors: partitions 55 | .into_iter() 56 | .filter_map(|p| p.error) 57 | .collect::>() 58 | .join(","), 59 | b_rate_15: metrics.b_rate_15.round(), 60 | m_rate_15: metrics.m_rate_15.round(), 61 | } 62 | }) 63 | .collect::>(); 64 | 65 | json!({ "data": result_data }).to_string() 66 | } 67 | 68 | // 69 | // ********** BROKERS LIST ********** 70 | // 71 | 72 | #[get("/api/clusters//brokers")] 73 | pub fn brokers(cluster_id: ClusterId, cache: State) -> String { 74 | let brokers = cache.brokers.get(&cluster_id); 75 | if brokers.is_none() { 76 | // TODO: Improve here 77 | return empty(); 78 | } 79 | 80 | let brokers = brokers.unwrap(); 81 | let broker_metrics = cache 82 | .metrics 83 | .get(&(cluster_id.to_owned(), "__TOTAL__".to_owned())) 84 | .unwrap_or_default(); 85 | let mut result_data = Vec::with_capacity(brokers.len()); 86 | for broker in brokers { 87 | let metric = broker_metrics 88 | .brokers 89 | .get(&broker.id) 90 | .cloned() 91 | .unwrap_or_default(); 92 | result_data.push(json!(( 93 | broker.id, 94 | broker.hostname, 95 | metric.b_rate_15.round(), 96 | metric.m_rate_15.round() 97 | ))); 98 | } 99 | 100 | json!({ "data": result_data }).to_string() 101 | } 102 | 103 | // 104 | // ********** GROUP ********** 105 | // 106 | 107 | #[derive(Debug)] 108 | struct GroupInfo { 109 | state: String, 110 | members: usize, 111 | topics: HashSet, 112 | } 113 | 114 | impl GroupInfo { 115 | fn new(state: String, members: usize) -> GroupInfo { 116 | GroupInfo { 117 | state, 118 | members, 119 | topics: HashSet::new(), 120 | } 121 | } 122 | 123 | fn new_empty() -> GroupInfo { 124 | GroupInfo { 125 | state: "Offsets only".to_owned(), 126 | members: 0, 127 | topics: HashSet::new(), 128 | } 129 | } 130 | 131 | fn add_topic(&mut self, topic_name: TopicName) { 132 | self.topics.insert(topic_name); 133 | } 134 | } 135 | 136 | // TODO: add doc 137 | // TODO: add limit 138 | fn build_group_list(cache: &Cache, filter: F) -> HashMap<(ClusterId, String), GroupInfo> 139 | where 140 | F: Fn(&ClusterId, &String) -> bool, 141 | { 142 | let mut groups: HashMap<(ClusterId, String), GroupInfo> = cache.groups.lock_iter(|iter| { 143 | iter.filter(|&(&(ref c, ref g), _)| filter(c, g)) 144 | .map(|(&(ref c, _), g)| { 145 | ( 146 | (c.clone(), g.name.clone()), 147 | GroupInfo::new(g.state.clone(), g.members.len()), 148 | ) 149 | }) 150 | .collect() 151 | }); 152 | 153 | let offsets = cache 154 | .offsets 155 | .filter_clone_k(|&(ref c, ref g, _)| filter(c, g)); 156 | for (cluster_id, group, t) in offsets { 157 | groups 158 | .entry((cluster_id, group)) 159 | .or_insert_with(GroupInfo::new_empty) 160 | .add_topic(t); 161 | } 162 | 163 | groups 164 | } 165 | 166 | #[get("/api/clusters//groups")] 167 | pub fn cluster_groups(cluster_id: ClusterId, cache: State) -> String { 168 | let brokers = cache.brokers.get(&cluster_id); 169 | if brokers.is_none() { 170 | // TODO: Improve here 171 | return empty(); 172 | } 173 | 174 | let groups = build_group_list(cache.inner(), |c, _| c == &cluster_id); 175 | 176 | let mut result_data = Vec::with_capacity(groups.len()); 177 | for ((_cluster_id, group_name), info) in groups { 178 | result_data.push(json!(( 179 | group_name, 180 | info.state, 181 | info.members, 182 | info.topics.len() 183 | ))); 184 | } 185 | 186 | json!({ "data": result_data }).to_string() 187 | } 188 | 189 | #[get("/api/clusters//topics//groups")] 190 | pub fn topic_groups(cluster_id: ClusterId, topic_name: &RawStr, cache: State) -> String { 191 | let brokers = cache.brokers.get(&cluster_id); 192 | if brokers.is_none() { 193 | // TODO: Improve here 194 | return empty(); 195 | } 196 | 197 | let groups = build_group_list(cache.inner(), |c, _| c == &cluster_id); 198 | 199 | let mut result_data = Vec::with_capacity(groups.len()); 200 | for ((_cluster_id, group_name), info) in groups { 201 | if !info.topics.contains(&topic_name.to_string()) { 202 | continue; 203 | } 204 | result_data.push(json!(( 205 | group_name, 206 | info.state, 207 | info.members, 208 | info.topics.len() 209 | ))); 210 | } 211 | 212 | json!({ "data": result_data }).to_string() 213 | } 214 | 215 | #[get("/api/clusters//groups//members")] 216 | pub fn group_members(cluster_id: ClusterId, group_name: &RawStr, cache: State) -> String { 217 | let group = cache 218 | .groups 219 | .get(&(cluster_id.clone(), group_name.to_string())); 220 | if group.is_none() { 221 | // TODO: Improve here 222 | return empty(); 223 | } 224 | 225 | let group = group.unwrap(); 226 | 227 | let mut result_data = Vec::with_capacity(group.members.len()); 228 | for member in group.members { 229 | let assigns = member 230 | .assignments 231 | .iter() 232 | .map(|assign| { 233 | format!( 234 | "{}/{}", 235 | assign.topic, 236 | assign 237 | .partitions 238 | .iter() 239 | .map(i32::to_string) 240 | .collect::>() 241 | .join(",") 242 | ) 243 | }) 244 | .collect::>() 245 | .join("\n"); 246 | result_data.push(json!(( 247 | member.id, 248 | member.client_id, 249 | member.client_host, 250 | assigns 251 | ))); 252 | } 253 | 254 | json!({ "data": result_data }).to_string() 255 | } 256 | 257 | #[get("/api/clusters//groups//offsets")] 258 | pub fn group_offsets(cluster_id: ClusterId, group_name: &RawStr, cache: State) -> String { 259 | let offsets = cache.offsets_by_cluster_group(&cluster_id, group_name.as_str()); 260 | 261 | let wms = time!("fetching wms", fetch_watermarks(&cluster_id, &offsets)); 262 | let wms = match wms { 263 | Ok(wms) => wms, 264 | Err(e) => { 265 | error!("Error while fetching watermarks: {}", e); 266 | return empty(); 267 | } 268 | }; 269 | 270 | let mut result_data = Vec::with_capacity(offsets.len()); 271 | for ((_cluster_id, _group, topic), partitions) in offsets { 272 | for (partition_id, &curr_offset) in partitions.iter().enumerate() { 273 | let (low, high) = match wms.get(&(topic.clone(), partition_id as i32)) { 274 | Some(&Ok((low_mark, high_mark))) => (low_mark, high_mark), 275 | _ => (-1, -1), 276 | }; 277 | let (lag_shown, percentage_shown) = match (high - low, high - curr_offset) { 278 | (0, _) => ("Empty topic".to_owned(), "0.0%".to_owned()), 279 | (size, lag) if lag > size => ("Out of retention".to_owned(), "".to_owned()), 280 | (size, lag) => ( 281 | lag.to_string(), 282 | format!("{:.1}%", (lag as f64) / (size as f64) * 100.0), 283 | ), 284 | }; 285 | result_data.push(json!(( 286 | topic.clone(), 287 | partition_id, 288 | high - low, 289 | low, 290 | high, 291 | curr_offset, 292 | lag_shown, 293 | percentage_shown 294 | ))); 295 | } 296 | } 297 | 298 | json!({ "data": result_data }).to_string() 299 | } 300 | 301 | type ClusterGroupOffsets = ((ClusterId, String, TopicName), Vec); 302 | 303 | fn fetch_watermarks( 304 | cluster_id: &ClusterId, 305 | offsets: &[ClusterGroupOffsets], 306 | ) -> Result>> { 307 | let consumer = CONSUMERS.get_err(cluster_id)?; 308 | 309 | let cpu_pool = Builder::new().pool_size(32).create(); 310 | 311 | let mut futures = Vec::new(); 312 | 313 | for &((_, _, ref topic), ref partitions) in offsets { 314 | for partition_id in 0..partitions.len() { 315 | let consumer_clone = consumer.clone(); 316 | let topic_clone = topic.clone(); 317 | let wm_future = cpu_pool.spawn_fn(move || { 318 | let wms = consumer_clone.fetch_watermarks(&topic_clone, partition_id as i32, 10000); 319 | Ok::<_, ()>(((topic_clone, partition_id as i32), wms)) // never fail 320 | }); 321 | futures.push(wm_future); 322 | } 323 | } 324 | 325 | let watermarks = future::join_all(futures) 326 | .wait() 327 | .unwrap() 328 | .into_iter() 329 | .collect::>(); 330 | 331 | Ok(watermarks) 332 | } 333 | 334 | // 335 | // ********** TOPIC TOPOLOGY ********** 336 | // 337 | 338 | #[get("/api/clusters//topics//topology")] 339 | pub fn topic_topology(cluster_id: ClusterId, topic_name: &RawStr, cache: State) -> String { 340 | let partitions = cache 341 | .topics 342 | .get(&(cluster_id.to_owned(), topic_name.to_string())); 343 | if partitions.is_none() { 344 | return empty(); 345 | } 346 | 347 | let topic_metrics = cache 348 | .metrics 349 | .get(&(cluster_id.clone(), topic_name.to_string())) 350 | .unwrap_or_default(); 351 | let partitions = partitions.unwrap(); 352 | 353 | let mut result_data = Vec::with_capacity(partitions.len()); 354 | for p in partitions { 355 | let partition_metrics = topic_metrics 356 | .brokers 357 | .get(&p.leader) 358 | .and_then(|broker_metrics| broker_metrics.partitions.get(p.id as usize)) 359 | .cloned() 360 | .unwrap_or_default(); 361 | result_data.push(json!(( 362 | p.id, 363 | partition_metrics.size_bytes, 364 | p.leader, 365 | p.replicas, 366 | p.isr, 367 | p.error 368 | ))); 369 | } 370 | 371 | json!({ "data": result_data }).to_string() 372 | } 373 | 374 | // 375 | // ********** SEARCH ********** 376 | // 377 | 378 | #[get("/api/search/consumer?")] 379 | pub fn consumer_search(search: OmnisearchFormParams, cache: State) -> String { 380 | let groups = if search.regex { 381 | Regex::new(&search.string) 382 | .map(|r| build_group_list(&cache, |_, g| r.is_match(g))) 383 | .unwrap_or_default() 384 | } else { 385 | build_group_list(&cache, |_, g| g.contains(&search.string)) 386 | }; 387 | 388 | let mut result_data = Vec::with_capacity(groups.len()); 389 | for ((cluster_id, group_name), info) in groups { 390 | result_data.push(json!(( 391 | cluster_id, 392 | group_name, 393 | info.state, 394 | info.members, 395 | info.topics.len() 396 | ))); 397 | } 398 | 399 | json!({ "data": result_data }).to_string() 400 | } 401 | 402 | #[get("/api/search/topic?")] 403 | pub fn topic_search(search: OmnisearchFormParams, cache: State) -> String { 404 | let topics = if search.regex { 405 | Regex::new(&search.string) 406 | .map(|r| cache.topics.filter_clone(|&(_, ref name)| r.is_match(name))) 407 | .unwrap_or_default() 408 | } else { 409 | cache 410 | .topics 411 | .filter_clone(|&(_, ref name)| name.contains(&search.string)) 412 | }; 413 | 414 | let mut result_data = Vec::new(); 415 | for ((cluster_id, topic_name), partitions) in topics { 416 | let metrics = cache 417 | .metrics 418 | .get(&(cluster_id.clone(), topic_name.clone())) 419 | .unwrap_or_default() 420 | .aggregate_broker_metrics(); 421 | let errors = partitions.iter().find(|p| p.error.is_some()); 422 | result_data.push(json!(( 423 | cluster_id, 424 | topic_name, 425 | partitions.len(), 426 | errors, 427 | metrics.b_rate_15, 428 | metrics.m_rate_15 429 | ))); 430 | } 431 | 432 | json!({ "data": result_data }).to_string() 433 | } 434 | 435 | // 436 | // ********** ZOOKEEPER ********** 437 | // 438 | 439 | #[get("/api/clusters//reassignment")] 440 | pub fn cluster_reassignment( 441 | cluster_id: ClusterId, 442 | cache: State, 443 | config: State, 444 | ) -> String { 445 | if cache.brokers.get(&cluster_id).is_none() { 446 | return empty(); 447 | } 448 | 449 | let zk_url = &config.clusters.get(&cluster_id).unwrap().zookeeper; 450 | 451 | let zk = match ZK::new(zk_url) { 452 | // TODO: cache ZK clients 453 | Ok(zk) => zk, 454 | Err(_) => { 455 | error!("Error connecting to {:?}", zk_url); 456 | return empty(); 457 | } 458 | }; 459 | 460 | let reassignment = match zk.pending_reassignment() { 461 | Some(reassignment) => reassignment, 462 | None => return empty(), 463 | }; 464 | 465 | let result_data = reassignment 466 | .partitions 467 | .into_iter() 468 | .map(|p| { 469 | let topic_metrics = cache 470 | .metrics 471 | .get(&(cluster_id.clone(), p.topic.to_owned())) 472 | .unwrap_or_default(); 473 | 474 | let replica_metrics = p 475 | .replicas 476 | .iter() 477 | .map(|ref r| { 478 | topic_metrics 479 | .brokers 480 | .get(&r) 481 | .and_then(|b| b.partitions.get(p.partition as usize)) 482 | .cloned() 483 | .unwrap_or_default() 484 | .size_bytes 485 | }) 486 | .collect::>(); 487 | 488 | json!((p.topic, p.partition, p.replicas, replica_metrics)) 489 | }) 490 | .collect::>(); 491 | 492 | json!({ "data": result_data }).to_string() 493 | } 494 | 495 | // 496 | // ********** INTERNALS ********** 497 | // 498 | 499 | #[get("/api/internals/cache/brokers")] 500 | pub fn cache_brokers(cache: State) -> String { 501 | let result_data = cache.brokers.lock_iter(|brokers_cache_entry| { 502 | brokers_cache_entry 503 | .map(|(cluster_id, brokers)| { 504 | ( 505 | cluster_id.clone(), 506 | brokers.iter().map(|b| b.id).collect::>(), 507 | ) 508 | }) 509 | .collect::>() 510 | }); 511 | 512 | json!({ "data": result_data }).to_string() 513 | } 514 | 515 | #[get("/api/internals/cache/metrics")] 516 | pub fn cache_metrics(cache: State) -> String { 517 | let result_data = cache.metrics.lock_iter(|metrics_cache_entry| { 518 | metrics_cache_entry 519 | .map(|(&(ref cluster_id, ref topic_id), metrics)| { 520 | (cluster_id.clone(), topic_id.clone(), metrics.brokers.len()) 521 | }) 522 | .collect::>() 523 | }); 524 | 525 | json!({ "data": result_data }).to_string() 526 | } 527 | 528 | #[get("/api/internals/cache/offsets")] 529 | pub fn cache_offsets(cache: State) -> String { 530 | let result_data = cache.offsets.lock_iter(|offsets_cache_entry| { 531 | offsets_cache_entry 532 | .map( 533 | |(&(ref cluster_id, ref group_name, ref topic_id), partitions)| { 534 | ( 535 | cluster_id.clone(), 536 | group_name.clone(), 537 | topic_id.clone(), 538 | format!("{:?}", partitions), 539 | ) 540 | }, 541 | ) 542 | .collect::>() 543 | }); 544 | 545 | json!({ "data": result_data }).to_string() 546 | } 547 | 548 | #[get("/api/internals/live_consumers")] 549 | pub fn live_consumers(live_consumers: State) -> String { 550 | let result_data = live_consumers 551 | .consumers() 552 | .iter() 553 | .map(|consumer| { 554 | ( 555 | consumer.id(), 556 | consumer.cluster_id().to_owned(), 557 | consumer.topic().to_owned(), 558 | consumer.last_poll().elapsed().as_secs(), 559 | ) 560 | }) 561 | .collect::>(); 562 | json!({ "data": result_data }).to_string() 563 | } 564 | 565 | fn empty() -> String { 566 | json!({"data": []}).to_string() 567 | } 568 | -------------------------------------------------------------------------------- /src/web_server/mod.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | mod pages; 3 | mod view; 4 | 5 | pub mod server; 6 | -------------------------------------------------------------------------------- /src/web_server/pages/cluster.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup, PreEscaped}; 2 | 3 | use metadata::{BrokerId, ClusterId}; 4 | use web_server::pages; 5 | use web_server::view::layout; 6 | 7 | use cache::Cache; 8 | use config::Config; 9 | 10 | use rocket::State; 11 | 12 | fn broker_table(cluster_id: &ClusterId) -> PreEscaped { 13 | let api_url = format!("/api/clusters/{}/brokers", cluster_id); 14 | layout::datatable_ajax( 15 | "brokers-ajax", 16 | &api_url, 17 | cluster_id.name(), 18 | html! { tr { th { "Broker id" } th { "Hostname" } 19 | th data-toggle="tooltip" data-container="body" 20 | title="Total average over the last 15 minutes" { "Total byte rate" } 21 | th data-toggle="tooltip" data-container="body" 22 | title="Total average over the last 15 minutes" { "Total msg rate" } 23 | } 24 | }, 25 | ) 26 | } 27 | 28 | fn topic_table(cluster_id: &ClusterId) -> PreEscaped { 29 | let api_url = format!("/api/clusters/{}/topics", cluster_id); 30 | layout::datatable_ajax( 31 | "topics-ajax", 32 | &api_url, 33 | cluster_id.name(), 34 | html! { tr { th { "Topic name" } th { "#Partitions" } th { "Status" } 35 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Byte rate" } 36 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Msg rate" } 37 | } 38 | }, 39 | ) 40 | } 41 | 42 | fn groups_table(cluster_id: &ClusterId) -> PreEscaped { 43 | let api_url = format!("/api/clusters/{}/groups", cluster_id); 44 | layout::datatable_ajax( 45 | "groups-ajax", 46 | &api_url, 47 | cluster_id.name(), 48 | html! { tr { th { "Group name" } th { "Status" } th { "Registered members" } th { "Stored topic offsets" } } }, 49 | ) 50 | } 51 | 52 | fn reassignment_table(cluster_id: &ClusterId) -> PreEscaped { 53 | let api_url = format!("/api/clusters/{}/reassignment", cluster_id); 54 | layout::datatable_ajax( 55 | "reassignment-ajax", 56 | &api_url, 57 | cluster_id.name(), 58 | html! { tr { th { "Topic" } th { "Partition" } th { "Reassigned replicas" } th { "Replica sizes" } } }, 59 | ) 60 | } 61 | 62 | #[get("/clusters/")] 63 | pub fn cluster_page(cluster_id: ClusterId, cache: State, config: State) -> Markup { 64 | if cache.brokers.get(&cluster_id).is_none() { 65 | return pages::warning_page( 66 | &format!("Cluster: {}", cluster_id), 67 | "The specified cluster doesn't exist.", 68 | ); 69 | } 70 | 71 | let cluster_config = config.clusters.get(&cluster_id); 72 | let content = html! { 73 | h3 style="margin-top: 0px" { "Information" } 74 | dl class="dl-horizontal" { 75 | dt { "Cluster name: " } dd { (cluster_id.name()) } 76 | @if cluster_config.is_some() { 77 | dt { "Bootstrap list: " } dd { (cluster_config.unwrap().broker_list.join(", ")) } 78 | dt { "Zookeeper: " } dd { (cluster_config.unwrap().zookeeper) } 79 | } @else { 80 | dt { "Bootstrap list: " } dd { "Cluster configuration is missing" } 81 | dt { "Zookeeper: " } dd { "Cluster configuration is missing" } 82 | } 83 | } 84 | h3 { "Brokers" } 85 | div { (broker_table(&cluster_id)) } 86 | h3 { "Topics" } 87 | (topic_table(&cluster_id)) 88 | h3 { "Consumer groups" } 89 | (groups_table(&cluster_id)) 90 | 91 | @if cluster_config.map(|c| c.show_zk_reassignments).unwrap_or(false) { 92 | h3 { "Reassignment" } 93 | (reassignment_table(&cluster_id)) 94 | } 95 | }; 96 | layout::page(&format!("Cluster: {}", cluster_id), content) 97 | } 98 | 99 | #[get("/clusters//brokers/")] 100 | pub fn broker_page( 101 | cluster_id: ClusterId, 102 | broker_id: BrokerId, 103 | cache: State, 104 | config: State, 105 | ) -> Markup { 106 | let broker = cache 107 | .brokers 108 | .get(&cluster_id) 109 | .and_then(|brokers| brokers.iter().find(|b| b.id == broker_id).cloned()); 110 | let cluster_config = config.clusters.get(&cluster_id); 111 | 112 | if broker.is_none() || cluster_config.is_none() { 113 | return pages::warning_page( 114 | &format!("Broker: {}", broker_id), 115 | "The specified broker doesn't exist.", 116 | ); 117 | } 118 | 119 | let broker = broker.unwrap(); 120 | let metrics = cache 121 | .metrics 122 | .get(&(cluster_id.to_owned(), "__TOTAL__".to_owned())) 123 | .unwrap_or_default() 124 | .aggregate_broker_metrics(); 125 | let content = html! { 126 | h3 style="margin-top: 0px" { "Information" } 127 | dl class="dl-horizontal" { 128 | dt { "Cluster name: " } dd { (cluster_id.name()) } 129 | dt { "Bootstrap list: " } dd { (cluster_config.unwrap().broker_list.join(", ")) } 130 | dt { "Zookeeper: " } dd { (cluster_config.unwrap().zookeeper) } 131 | dt { "Hostname" } dd { (broker.hostname) } 132 | dt { "Traffic" } dd { (format!("{:.1} KB/s {:.0} msg/s", metrics.b_rate_15 / 1000f64, metrics.m_rate_15)) } 133 | } 134 | }; 135 | layout::page(&format!("Broker: {}", cluster_id), content) 136 | } 137 | -------------------------------------------------------------------------------- /src/web_server/pages/clusters.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup, PreEscaped}; 2 | use rocket::State; 3 | 4 | use cache::{BrokerCache, Cache, TopicCache}; 5 | use metadata::ClusterId; 6 | use web_server::view::layout; 7 | 8 | fn cluster_pane_layout( 9 | cluster_id: &ClusterId, 10 | brokers: usize, 11 | topics: usize, 12 | ) -> PreEscaped { 13 | let link = format!("/clusters/{}/", cluster_id.name()); 14 | html! { 15 | div class="col-lg-4 col-md-6" { 16 | div class="panel panel-primary" { 17 | div class="panel-heading" { 18 | div class="row" { 19 | // div class="col-xs-3" i class="fa fa-server fa-5x" {} 20 | div class="col-xs-3" { img style="height: 64px" src="/public/images/kafka_logo_white.png" {} } 21 | div class="col-xs-9 text-right" { 22 | div style="font-size: 24px" { 23 | a href=(link) style="color: inherit; text-decoration: inherit;" { (cluster_id.name()) } 24 | } 25 | div { (brokers) " brokers" } 26 | div { (topics) " topics" } 27 | } 28 | } 29 | } 30 | a href=(link) { 31 | div class="panel-footer" { 32 | span class="pull-left" { "View Details" } 33 | span class="pull-right" { i class="fa fa-arrow-circle-right" {} } 34 | div class="clearfix" {} 35 | } 36 | } 37 | } 38 | } 39 | } 40 | } 41 | 42 | fn cluster_pane( 43 | cluster_id: &ClusterId, 44 | broker_cache: &BrokerCache, 45 | topic_cache: &TopicCache, 46 | ) -> PreEscaped { 47 | let broker_count = broker_cache.get(cluster_id).unwrap_or_default().len(); 48 | let topics_count = topic_cache.count(|&(ref c, _)| c == cluster_id); 49 | cluster_pane_layout(cluster_id, broker_count, topics_count) 50 | } 51 | 52 | #[get("/clusters")] 53 | pub fn clusters_page(cache: State) -> Markup { 54 | let mut cluster_ids = cache.brokers.keys(); 55 | cluster_ids.sort(); 56 | 57 | let content = html! { 58 | @for cluster_id in &cluster_ids { 59 | (cluster_pane(cluster_id, &cache.brokers, &cache.topics)) 60 | } 61 | }; 62 | 63 | layout::page("Clusters", content) 64 | } 65 | -------------------------------------------------------------------------------- /src/web_server/pages/error_defaults.rs: -------------------------------------------------------------------------------- 1 | use web_server::view::layout; 2 | 3 | use maud::{html, Markup}; 4 | 5 | pub fn warning_page(title: &str, message: &str) -> Markup { 6 | let content = layout::notification( 7 | "warning", 8 | html! { 9 | div class="flex-container" { 10 | span class="flex-item" style="padding: 0.3in; font-size: 16pt" { 11 | i class="fa fa-frown-o fa-3x" style="vertical-align: middle;" { "" } 12 | " " (message) 13 | } 14 | } 15 | }, 16 | ); 17 | layout::page(title, content) 18 | } 19 | -------------------------------------------------------------------------------- /src/web_server/pages/group.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup, PreEscaped}; 2 | use rocket::http::RawStr; 3 | 4 | use cache::Cache; 5 | use metadata::ClusterId; 6 | use web_server::pages; 7 | use web_server::view::layout; 8 | 9 | use rocket::State; 10 | 11 | fn group_members_table(cluster_id: &ClusterId, group_name: &str) -> PreEscaped { 12 | let api_url = format!("/api/clusters/{}/groups/{}/members", cluster_id, group_name); 13 | layout::datatable_ajax( 14 | "group-members-ajax", 15 | &api_url, 16 | cluster_id.name(), 17 | html! { tr { th { "Member id" } th { "Client id" } th { "Hostname" } th { "Assignments" } } }, 18 | ) 19 | } 20 | 21 | fn group_offsets_table(cluster_id: &ClusterId, group_name: &str) -> PreEscaped { 22 | let api_url = format!("/api/clusters/{}/groups/{}/offsets", cluster_id, group_name); 23 | layout::datatable_ajax( 24 | "group-offsets-ajax", 25 | &api_url, 26 | cluster_id.name(), 27 | html! { tr { th { "Topic" } th { "Partition" } th { "Size" } th { "Low mark" } th { "High mark" } 28 | th { "Current offset" } th { "Lag" } th { "Lag %" }} }, 29 | ) 30 | } 31 | 32 | #[get("/clusters//groups/")] 33 | pub fn group_page(cluster_id: ClusterId, group_name: &RawStr, cache: State) -> Markup { 34 | if cache.brokers.get(&cluster_id).is_none() { 35 | return pages::warning_page(group_name, "The specified cluster doesn't exist."); 36 | } 37 | 38 | let group_state = match cache 39 | .groups 40 | .get(&(cluster_id.to_owned(), group_name.to_string())) 41 | { 42 | Some(group) => group.state, 43 | None => "Not registered".to_string(), 44 | }; 45 | 46 | let cluster_link = format!("/clusters/{}/", cluster_id.name()); 47 | let content = html! { 48 | h3 style="margin-top: 0px" { "Information" } 49 | dl class="dl-horizontal" { 50 | dt { "Cluster name:" } dd { a href=(cluster_link) { (cluster_id) } } 51 | dt { "Group name: " } dd { (group_name) } 52 | dt { "Group state: " } dd { (group_state) } 53 | } 54 | h3 { "Members" } 55 | div { (group_members_table(&cluster_id, group_name)) } 56 | h3 { "Offsets" } 57 | div { (group_offsets_table(&cluster_id, group_name)) } 58 | }; 59 | 60 | layout::page(&format!("Group: {}", group_name), content) 61 | } 62 | -------------------------------------------------------------------------------- /src/web_server/pages/internals.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup, PreEscaped}; 2 | use rocket::State; 3 | 4 | use cache::Cache; 5 | use web_server::view::layout; 6 | 7 | fn broker_table() -> PreEscaped { 8 | layout::datatable_ajax( 9 | "internals-cache-brokers-ajax", 10 | "/api/internals/cache/brokers", 11 | "", 12 | html! { tr { th { "Cluster id" } th { "Broker ids" } } }, 13 | ) 14 | } 15 | 16 | fn metrics_table() -> PreEscaped { 17 | layout::datatable_ajax( 18 | "internals-cache-metrics-ajax", 19 | "/api/internals/cache/metrics", 20 | "", 21 | html! { tr { th { "Cluster id" } th { "Topic name" } th { "Brokers" } } }, 22 | ) 23 | } 24 | 25 | fn offsets_table() -> PreEscaped { 26 | layout::datatable_ajax( 27 | "internals-cache-offsets-ajax", 28 | "/api/internals/cache/offsets", 29 | "", 30 | html! { tr { th { "Cluster id" } th { "Consumer group" } th { "Topic name" } th { "Offsets" } } }, 31 | ) 32 | } 33 | 34 | fn cache_description_table(name: &str, key: &str, value: &str, count: usize) -> PreEscaped { 35 | html! { 36 | table style="margin-top: 10px; margin-bottom: 10px" { 37 | tr { 38 | td style="font-weight: bold" { "Name:" } 39 | td style="font-family: monospace; padding-left: 20px" { (name) } 40 | } 41 | tr { 42 | td style="font-weight: bold" { "Key:" } 43 | td style="font-family: monospace; padding-left: 20px" { (key) } 44 | } 45 | tr { 46 | td style="font-weight: bold" { "Value:" } 47 | td style="font-family: monospace; padding-left: 20px" { (value) } 48 | } 49 | tr { 50 | td style="font-weight: bold" { "Items count:" } 51 | td style="font-family: monospace; padding-left: 20px" { (count) } 52 | } 53 | } 54 | } 55 | } 56 | 57 | #[get("/internals/caches")] 58 | pub fn caches_page(cache: State) -> Markup { 59 | let content = html! { 60 | h3 style="margin-top: 0px" { "Information" } 61 | h3 { "Brokers" } 62 | (cache_description_table("BrokerCache", "ClusterId", "Vec", cache.brokers.keys().len())) 63 | div { (broker_table()) } 64 | h3 { "Metrics" } 65 | (cache_description_table("MetricsCache", "(ClusterId, TopicName)", "TopicMetrics", cache.metrics.keys().len())) 66 | div { (metrics_table()) } 67 | h3 { "Offsets" } 68 | (cache_description_table("OffsetsCache", "(ClusterId, GroupName, TopicName)", "Vec", cache.offsets.keys().len())) 69 | div { (offsets_table()) } 70 | }; 71 | layout::page("Caches", content) 72 | } 73 | 74 | fn live_consumers_table() -> PreEscaped { 75 | layout::datatable_ajax( 76 | "internals-live-consumers-ajax", 77 | "/api/internals/live_consumers", 78 | "", 79 | html! { tr { th { "Id" } th { "Cluster id" } th { "Topic name" } th { "Last poll" } } }, 80 | ) 81 | } 82 | 83 | #[get("/internals/live_consumers")] 84 | pub fn live_consumers_page() -> Markup { 85 | let content = html! { 86 | h3 style="margin-top: 0px" { "Active instances" } 87 | div { (live_consumers_table()) } 88 | }; 89 | layout::page("Live consumers", content) 90 | } 91 | -------------------------------------------------------------------------------- /src/web_server/pages/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cluster; 2 | pub mod clusters; 3 | pub mod error_defaults; 4 | pub mod group; 5 | pub mod internals; 6 | pub mod omnisearch; 7 | pub mod topic; 8 | 9 | pub use self::cluster::cluster_page; 10 | pub use self::clusters::clusters_page; 11 | pub use self::error_defaults::warning_page; 12 | pub use self::group::group_page; 13 | pub use self::topic::topic_page; 14 | 15 | pub use self::omnisearch::{consumer_search, topic_search}; 16 | -------------------------------------------------------------------------------- /src/web_server/pages/omnisearch.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup}; 2 | use rocket::http::uri::Uri; 3 | use rocket::request::{FromQuery, Query}; 4 | 5 | use web_server::view::layout; 6 | 7 | #[derive(Debug)] 8 | pub struct OmnisearchFormParams { 9 | pub string: String, 10 | pub regex: bool, 11 | } 12 | 13 | impl<'q> FromQuery<'q> for OmnisearchFormParams { 14 | type Error = (); 15 | 16 | fn from_query(query: Query<'q>) -> Result { 17 | let mut params = OmnisearchFormParams { 18 | string: "".to_owned(), 19 | regex: false, 20 | }; 21 | for item in query { 22 | let (key, value) = item.key_value_decoded(); 23 | match key.as_str() { 24 | "string" => params.string = Uri::percent_decode_lossy(value.as_bytes()).to_string(), 25 | "regex" => params.regex = value == "on" || value == "true", 26 | _ => {} 27 | } 28 | } 29 | Ok(params) 30 | } 31 | } 32 | 33 | #[get("/omnisearch")] 34 | pub fn omnisearch() -> Markup { 35 | omnisearch_p(OmnisearchFormParams { 36 | string: "".to_owned(), 37 | regex: false, 38 | }) 39 | } 40 | 41 | #[get("/omnisearch?")] 42 | pub fn omnisearch_p(search: OmnisearchFormParams) -> Markup { 43 | let search_form = 44 | layout::search_form("/omnisearch", "Omnisearch", &search.string, search.regex); 45 | let api_url = format!( 46 | "/api/search/topic?string={}®ex={}", 47 | &search.string, search.regex 48 | ); 49 | let topics = layout::datatable_ajax( 50 | "topic-search-ajax", 51 | &api_url, 52 | "", 53 | html! { tr { th { "Cluster name" } th { "Topic name" } th { "#Partitions" } th { "Status" } 54 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Byte rate" } 55 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Msg rate" } 56 | }}, 57 | ); 58 | let api_url = format!( 59 | "/api/search/consumer?string={}®ex={}", 60 | &search.string, search.regex 61 | ); 62 | let consumers = layout::datatable_ajax( 63 | "group-search-ajax", 64 | &api_url, 65 | "", 66 | html! { tr { th { "Cluster" } th { "Group name" } th { "Status" } th { "Registered members" } th { "Stored topic offsets" } } }, 67 | ); 68 | 69 | layout::page( 70 | "Omnisearch", 71 | html! { 72 | (search_form) 73 | @if !search.string.is_empty() { 74 | h3 { "Topics" } 75 | (topics) 76 | } 77 | @if !search.string.is_empty() { 78 | h3 { "Consumers" } 79 | (consumers) 80 | } 81 | }, 82 | ) 83 | } 84 | 85 | #[get("/consumers")] 86 | pub fn consumer_search() -> Markup { 87 | consumer_search_p(OmnisearchFormParams { 88 | string: "".to_owned(), 89 | regex: false, 90 | }) 91 | } 92 | 93 | #[get("/consumers?")] 94 | pub fn consumer_search_p(search: OmnisearchFormParams) -> Markup { 95 | let search_form = 96 | layout::search_form("/consumers", "Consumer name", &search.string, search.regex); 97 | let api_url = format!( 98 | "/api/search/consumer?string={}®ex={}", 99 | &search.string, search.regex 100 | ); 101 | let results = layout::datatable_ajax( 102 | "group-search-ajax", 103 | &api_url, 104 | "", 105 | html! { tr { th { "Cluster" } th { "Group name" } th { "Status" } th { "Registered members" } th { "Stored topic offsets" } } }, 106 | ); 107 | 108 | layout::page( 109 | "Consumer search", 110 | html! { 111 | (search_form) 112 | @if !search.string.is_empty() { 113 | h3 { "Search results" } 114 | (results) 115 | } 116 | }, 117 | ) 118 | } 119 | 120 | #[get("/topics")] 121 | pub fn topic_search() -> Markup { 122 | topic_search_p(OmnisearchFormParams { 123 | string: "".to_owned(), 124 | regex: false, 125 | }) 126 | } 127 | 128 | #[get("/topics?")] 129 | pub fn topic_search_p(search: OmnisearchFormParams) -> Markup { 130 | let search_form = layout::search_form("/topics", "Topic name", &search.string, search.regex); 131 | let api_url = format!( 132 | "/api/search/topic?string={}®ex={}", 133 | &search.string, search.regex 134 | ); 135 | let results = layout::datatable_ajax( 136 | "topic-search-ajax", 137 | &api_url, 138 | "", 139 | html! { tr { th { "Cluster name" } th { "Topic name" } th { "#Partitions" } th { "Status" } 140 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Byte rate" } 141 | th data-toggle="tooltip" data-container="body" title="Average over the last 15 minutes" { "Msg rate" } 142 | }}, 143 | ); 144 | 145 | layout::page( 146 | "Topic search", 147 | html! { 148 | (search_form) 149 | @if !search.string.is_empty() { 150 | h3 { "Search results" } 151 | (results) 152 | } 153 | }, 154 | ) 155 | } 156 | -------------------------------------------------------------------------------- /src/web_server/pages/topic.rs: -------------------------------------------------------------------------------- 1 | use maud::{html, Markup, PreEscaped}; 2 | use rand::random; 3 | use rocket::http::RawStr; 4 | 5 | use cache::Cache; 6 | use config::Config; 7 | use metadata::ClusterId; 8 | use web_server::pages; 9 | use web_server::view::layout; 10 | 11 | use rocket::State; 12 | 13 | fn topic_table(cluster_id: &ClusterId, topic_name: &str) -> PreEscaped { 14 | let api_url = format!( 15 | "/api/clusters/{}/topics/{}/topology", 16 | cluster_id, topic_name 17 | ); 18 | layout::datatable_ajax( 19 | "topology-ajax", 20 | &api_url, 21 | cluster_id.name(), 22 | html! { tr { th { "Id" } th { "Size" } th { "Leader" } th { "Replicas" } th { "ISR" } th { "Status" } } }, 23 | ) 24 | } 25 | 26 | fn consumer_groups_table(cluster_id: &ClusterId, topic_name: &str) -> PreEscaped { 27 | let api_url = format!("/api/clusters/{}/topics/{}/groups", cluster_id, topic_name); 28 | layout::datatable_ajax( 29 | "groups-ajax", 30 | &api_url, 31 | cluster_id.name(), 32 | html! { tr { th { "Group name" } th { "Status" } th { "Registered members" } th { "Stored topic offsets" } } }, 33 | ) 34 | } 35 | 36 | fn graph_link(graph_url: &str, topic: &str) -> PreEscaped { 37 | let url = graph_url.replace("{%s}", topic); 38 | html! { 39 | a href=(url) { "link" } 40 | } 41 | } 42 | 43 | fn topic_tailer_panel(cluster_id: &ClusterId, topic: &str, tailer_id: u64) -> PreEscaped { 44 | let panel_head = html! { 45 | i class="fa fa-align-left fa-fw" {} "Messages" 46 | }; 47 | let panel_body = html! { 48 | div class="topic_tailer" data-cluster=(cluster_id) data-topic=(topic) data-tailer=(tailer_id) { 49 | "Tailing recent messages..." 50 | } 51 | }; 52 | layout::panel(panel_head, panel_body) 53 | } 54 | 55 | #[get("/clusters//topics/")] 56 | pub fn topic_page( 57 | cluster_id: ClusterId, 58 | topic_name: &RawStr, 59 | cache: State, 60 | config: State, 61 | ) -> Markup { 62 | let partitions = match cache 63 | .topics 64 | .get(&(cluster_id.clone(), topic_name.to_string())) 65 | { 66 | Some(partitions) => partitions, 67 | None => { 68 | return pages::warning_page( 69 | &format!("Topic: {}", cluster_id), 70 | "The specified cluster doesn't exist.", 71 | ) 72 | } 73 | }; 74 | 75 | let cluster_config = config.clusters.get(&cluster_id).unwrap(); 76 | let _ = cache 77 | .brokers 78 | .get(&cluster_id) 79 | .expect("Cluster should exist"); // TODO: handle better 80 | 81 | let metrics = cache 82 | .metrics 83 | .get(&(cluster_id.clone(), topic_name.to_string())) 84 | .unwrap_or_default() 85 | .aggregate_broker_metrics(); 86 | 87 | let cluster_link = format!("/clusters/{}/", cluster_id.name()); 88 | let content = html! { 89 | h3 style="margin-top: 0px" {"General information"} 90 | dl class="dl-horizontal" { 91 | dt { "Cluster name " dd { a href=(cluster_link) { (cluster_id) } } } 92 | dt { "Topic name " dd { (topic_name) } } 93 | dt { "Number of partitions " dd { (partitions.len()) } } 94 | dt { "Number of replicas " dd { (partitions[0].replicas.len()) } } 95 | dt { "Traffic last 15 minutes" } 96 | dd { ( format!("{:.1} KB/s {:.0} msg/s", metrics.b_rate_15 / 1000f64, metrics.m_rate_15)) } 97 | @if cluster_config.graph_url.is_some() { 98 | dt { "Traffic chart" } dd { (graph_link(cluster_config.graph_url.as_ref().unwrap(), topic_name)) } 99 | } 100 | } 101 | h3 { "Topology" } 102 | (topic_table(&cluster_id, topic_name)) 103 | h3 {"Consumer groups"} 104 | (consumer_groups_table(&cluster_id, topic_name)) 105 | h3 { "Tailer" } 106 | @if cluster_config.enable_tailing { 107 | (topic_tailer_panel(&cluster_id, topic_name, random::())) 108 | } @else { 109 | p { "Topic tailing is disabled in this cluster." } 110 | } 111 | }; 112 | 113 | layout::page(&format!("Topic: {}", topic_name), content) 114 | } 115 | -------------------------------------------------------------------------------- /src/web_server/server.rs: -------------------------------------------------------------------------------- 1 | use rocket; 2 | use rocket::http::RawStr; 3 | use rocket::request::{FromParam, Request}; 4 | use rocket::response::{self, NamedFile, Redirect, Responder}; 5 | use scheduled_executor::ThreadPoolExecutor; 6 | 7 | use cache::Cache; 8 | use config::Config; 9 | use error::*; 10 | use live_consumer::{self, LiveConsumerStore}; 11 | use metadata::ClusterId; 12 | use utils::{GZip, RequestLogger}; 13 | use web_server::api; 14 | use web_server::pages; 15 | 16 | use std; 17 | use std::path::{Path, PathBuf}; 18 | 19 | #[get("/")] 20 | fn index() -> Redirect { 21 | Redirect::to("/clusters") 22 | } 23 | 24 | // Make ClusterId a valid parameter 25 | impl<'a> FromParam<'a> for ClusterId { 26 | type Error = (); 27 | 28 | fn from_param(param: &'a RawStr) -> std::result::Result { 29 | Ok(param.as_str().into()) 30 | } 31 | } 32 | 33 | #[get("/public/")] 34 | fn files(file: PathBuf) -> Option { 35 | NamedFile::open(Path::new("resources/web_server/public/").join(file)) 36 | .map(CachedFile::from) 37 | .ok() 38 | } 39 | 40 | #[get("/public/?")] 41 | fn files_v(file: PathBuf, version: &RawStr) -> Option { 42 | let _ = version; // just ignore version 43 | NamedFile::open(Path::new("resources/web_server/public/").join(file)) 44 | .map(CachedFile::from) 45 | .ok() 46 | } 47 | 48 | pub struct CachedFile { 49 | ttl: usize, 50 | file: NamedFile, 51 | } 52 | 53 | impl CachedFile { 54 | pub fn from(file: NamedFile) -> CachedFile { 55 | CachedFile::with_ttl(1800, file) 56 | } 57 | 58 | pub fn with_ttl(ttl: usize, file: NamedFile) -> CachedFile { 59 | CachedFile { ttl, file } 60 | } 61 | } 62 | 63 | impl<'a> Responder<'a> for CachedFile { 64 | fn respond_to(self, request: &Request) -> response::Result<'a> { 65 | let inner_response = self.file.respond_to(request).unwrap(); // fixme 66 | response::Response::build_from(inner_response) 67 | .raw_header( 68 | "Cache-Control", 69 | format!("max-age={}, must-revalidate", self.ttl), 70 | ) 71 | .ok() 72 | } 73 | } 74 | 75 | pub fn run_server(executor: &ThreadPoolExecutor, cache: Cache, config: &Config) -> Result<()> { 76 | let version = option_env!("CARGO_PKG_VERSION").unwrap_or("?"); 77 | info!( 78 | "Starting kafka-view v{}, listening on {}:{}.", 79 | version, config.listen_host, config.listen_port 80 | ); 81 | 82 | let rocket_env = rocket::config::Environment::active() 83 | .chain_err(|| "Invalid ROCKET_ENV environment variable")?; 84 | let rocket_config = rocket::config::Config::build(rocket_env) 85 | .address(config.listen_host.to_owned()) 86 | .port(config.listen_port) 87 | .workers(4) 88 | .finalize() 89 | .chain_err(|| "Invalid rocket configuration")?; 90 | 91 | rocket::custom(rocket_config) 92 | .attach(GZip) 93 | .attach(RequestLogger) 94 | .manage(cache) 95 | .manage(config.clone()) 96 | .manage(LiveConsumerStore::new(executor.clone())) 97 | .mount( 98 | "/", 99 | routes![ 100 | index, 101 | files, 102 | files_v, 103 | pages::cluster::cluster_page, 104 | pages::cluster::broker_page, 105 | pages::clusters::clusters_page, 106 | pages::group::group_page, 107 | pages::internals::caches_page, 108 | pages::internals::live_consumers_page, 109 | pages::omnisearch::consumer_search, 110 | pages::omnisearch::consumer_search_p, 111 | pages::omnisearch::omnisearch, 112 | pages::omnisearch::omnisearch_p, 113 | pages::omnisearch::topic_search, 114 | pages::omnisearch::topic_search_p, 115 | pages::topic::topic_page, 116 | api::brokers, 117 | api::cache_brokers, 118 | api::cache_metrics, 119 | api::cache_offsets, 120 | api::cluster_reassignment, 121 | api::live_consumers, 122 | api::cluster_groups, 123 | api::cluster_topics, 124 | api::consumer_search, 125 | api::group_members, 126 | api::group_offsets, 127 | api::topic_groups, 128 | api::topic_search, 129 | api::topic_topology, 130 | live_consumer::topic_tailer_api, 131 | ], 132 | ) 133 | .launch(); 134 | 135 | Ok(()) 136 | } 137 | -------------------------------------------------------------------------------- /src/web_server/view/layout.rs: -------------------------------------------------------------------------------- 1 | use crate::RUST_VERSION; 2 | use maud::{self, html, PreEscaped}; 3 | 4 | pub fn search_form( 5 | action: &str, 6 | placeholder: &str, 7 | value: &str, 8 | regex: bool, 9 | ) -> PreEscaped { 10 | html! { 11 | form action=(action) { 12 | div class="row" { 13 | div class="col-md-12" style="margin-top: 20pt" {} 14 | } 15 | div class="row" { 16 | div class="col-md-2" { "" } 17 | div class="col-md-8" { 18 | div class="input-group custom-search-form" { 19 | input class="form-control" type="text" name="string" style="font-size: 18pt; height: 30pt" 20 | placeholder=(placeholder) value=(value) { 21 | span class="input-group-btn" { 22 | button class="btn btn-default" style="height: 30pt" type="submit" { 23 | i class="fa fa-search fa-2x" {} 24 | } 25 | } 26 | } 27 | } 28 | } 29 | div class="col-md-2" {} 30 | } 31 | div class="row" { 32 | div class="col-md-2" { "" } 33 | div class="col-md-8" style="margin-top: 10pt" { 34 | strong { "Search options:" } 35 | label class="checkbox-inline" style="margin-left: 10pt" { 36 | @if regex { 37 | input type="checkbox" name="regex" checked="" {} 38 | } @else { 39 | input type="checkbox" name="regex" {} 40 | } 41 | "Regex" 42 | } 43 | } 44 | div class="col-md-2" { "" } 45 | } 46 | div class="row" { 47 | div class="col-md-12" style="margin-top: 20pt" {} 48 | } 49 | } 50 | } 51 | } 52 | 53 | pub fn notification(n_type: &str, content: PreEscaped) -> PreEscaped { 54 | let alert_class = format!("alert alert-{}", n_type); 55 | html! { 56 | div class=(alert_class) { 57 | (content) 58 | } 59 | } 60 | } 61 | 62 | pub fn datatable_ajax( 63 | id: &str, 64 | url: &str, 65 | param: &str, 66 | table_header: PreEscaped, 67 | ) -> PreEscaped { 68 | let table_id = format!("datatable-{}", id); 69 | html! { 70 | table id=(table_id) data-url=(url) data-param=(param) width="100%" class="table table-striped table-bordered table-hover" { 71 | thead { (table_header) } 72 | } 73 | } 74 | } 75 | 76 | pub fn panel(heading: PreEscaped, body: PreEscaped) -> PreEscaped { 77 | html! { 78 | div class="panel panel-default" { 79 | div class="panel-heading" { 80 | (heading) 81 | div class="pull-right" { 82 | div class="btn-group" { 83 | button id="tailer_button_label" type="button" 84 | class="btn btn-default btn-xs dropdown-toggle" data-toggle="dropdown" { 85 | "Topic tailer: active" span class="caret" {} 86 | } 87 | ul class="dropdown-menu pull-right" role="menu" { 88 | li id="start_tailer_button" { a href="#" { "Start" } } 89 | li id="stop_tailer_button" { a href="#" { "Stop" } } 90 | // li a href="#" "Action" 91 | // li a href="#" "Action" 92 | // li class="divider" {} 93 | // li a href="#" "Action" 94 | } 95 | } 96 | } 97 | } 98 | div class="panel-body" { (body) } 99 | } 100 | } 101 | } 102 | 103 | fn html_head(title: &str) -> PreEscaped { 104 | html! { 105 | head profile="http://www.w3.org/2005/10/profile" { 106 | link rel="icon" type="image/png" href="/public/images/webkafka_favicon.png" {} 107 | meta charset="utf-8" {} 108 | meta http-equiv="X-UA-Compatible" content="IE=edge" {} 109 | meta name="viewport" content="width=device-width, initial-scale=1" {} 110 | title { (title) } 111 | link href="/public/sb-admin-2/vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet" {} 112 | link href="/public/sb-admin-2/vendor/metisMenu/metisMenu.min.css" rel="stylesheet" {} 113 | link href="/public/sb-admin-2/vendor/datatables-plugins/dataTables.bootstrap.css" rel="stylesheet" {} 114 | // link href="/public/sb-admin-2/vendor/datatables/css/jquery.dataTables.min.css" rel="stylesheet" {} 115 | // link href="/public/sb-admin-2/vendor/datatables/css/dataTables.jqueryui.min.css" rel="stylesheet" {} 116 | link href="/public/sb-admin-2/dist/css/sb-admin-2.css" rel="stylesheet" {} 117 | link href="/public/css/font-awesome.min.css" rel="stylesheet" type="text/css" {} 118 | link href="/public/my_css.css" rel="stylesheet" type="text/css" {} 119 | script async="" defer="" src="https://buttons.github.io/buttons.js" {} 120 | } 121 | } 122 | } 123 | 124 | fn navbar_header() -> PreEscaped { 125 | html! { 126 | div class="navbar-header" { 127 | button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse" { 128 | span class="sr-only" { "Toggle navigation" } 129 | span class="icon-bar" {} 130 | span class="icon-bar" {} 131 | span class="icon-bar" {} 132 | } 133 | a class="navbar-brand" href="/" { 134 | img src="/public/images/kafka_logo.png" 135 | style="float:left;max-width:160%;max-height:160%; margin-top: -0.06in; margin-right: 0.07in" 136 | align="bottom" 137 | { "Kafka-view" } 138 | } 139 | } 140 | } 141 | } 142 | 143 | fn navbar_top() -> PreEscaped { 144 | html! { 145 | ul class="nav navbar-top-links navbar-right" { 146 | li class="dropdown" { 147 | a class="dropdown-toggle" style="font-size: 12pt" data-toggle="dropdown" href="#" { 148 | i class="fa fa-question-circle-o fa-fw" {} 149 | i class="fa fa-caret-down" {} 150 | } 151 | ul class="dropdown-menu dropdown-user" { 152 | li { a href="https://github.com/fede1024/kafka-view" { 153 | i class="fa fa-github fa-fw" {} "GitHub" } 154 | } 155 | // li class="divider" {} 156 | // li { a href="#" {i class="fa fa-sign-out fa-fw" {} "Logout" } } 157 | } 158 | } 159 | } 160 | } 161 | } 162 | 163 | fn navbar_side() -> PreEscaped { 164 | html! { 165 | div class="navbar-default sidebar" role="navigation" { 166 | div class="sidebar-nav navbar-collapse" { 167 | ul class="nav" id="side-menu" { 168 | li class="sidebar-search" { 169 | form action="/omnisearch" { 170 | div class="input-group custom-search-form" { 171 | input type="text" name="string" class="form-control" placeholder="Omnisearch..." { 172 | span class="input-group-btn" { 173 | button class="btn btn-default" type="submit" { 174 | i class="fa fa-search" {} 175 | } 176 | } 177 | } 178 | } 179 | } 180 | } 181 | // li a href="/" { i class="fa fa-dashboard fa-fw" {} " Home" } 182 | //li a href="/" style="font-size: 12pt" { i class="fa fa-info-circle fa-fw" {} " Home" } 183 | li { a href="/clusters/" style="font-size: 12pt" { i class="fa fa-server fa-fw" {} " Clusters" } } 184 | li { a href="/topics/" style="font-size: 12pt" { i class="fa fa-cubes fa-fw" {} " Topics" } } 185 | li { a href="/consumers/" style="font-size: 12pt" { i class="fa fa-exchange fa-fw" {} " Consumers" } } 186 | li { 187 | a href="#" style="font-size: 12pt" { 188 | i class="fa fa-gear fa-fw" {} " Internals" 189 | span class="fa arrow" {} 190 | } 191 | ul class="nav nav-second-level" { 192 | li { 193 | a href="/internals/caches" { 194 | i class="fa fa-microchip fa-fw" { {} " Caches" } 195 | } 196 | } 197 | li { 198 | a href="/internals/live_consumers" { 199 | i class="fa fa-microchip fa-fw" {} " Live consumers" 200 | } 201 | } 202 | // li { 203 | // a href="#" { "Third Level" span class="fa arrow" {} } 204 | // ul class="nav nav-third-level" { 205 | // li a href="#" "Third Level Item" 206 | // li a href="#" "Third Level Item" 207 | // li a href="#" "Third Level Item" 208 | // li a href="#" "Third Level Item" 209 | // } 210 | // } 211 | } 212 | } 213 | } 214 | } 215 | } 216 | } 217 | } 218 | 219 | fn body(page_title: &str, content: PreEscaped) -> PreEscaped { 220 | html! { 221 | div id="wrapper" { 222 | // Navigation 223 | nav class="navbar navbar-default navbar-static-top" role="navigation" style="margin-bottom: 0" { 224 | (navbar_header()) 225 | (navbar_top()) 226 | (navbar_side()) 227 | } 228 | 229 | div id="page-wrapper" class="flex-container" { 230 | div class="row" { 231 | div class="col-md-12" { 232 | h1 class="page-header" { (page_title) } 233 | } 234 | } 235 | div class="row flex-body" { 236 | div class="col-md-12" { 237 | (content) 238 | } 239 | } 240 | div class="row" { 241 | div class="col-md-12" {} 242 | } 243 | div class="row flex-footer" style="border-top: 1px solid #eee; margin-top: 0.2in; padding-top: 0.05in" { 244 | div class="col-md-4" style="text-align: center;" { 245 | a href="https://github.com/fede1024/kafka-view" { 246 | "kafka-view " (option_env!("CARGO_PKG_VERSION").unwrap_or("")) } 247 | } 248 | div class="col-md-4" style="text-align: center;" { 249 | a href="https://www.rust-lang.org" { (RUST_VERSION) } 250 | } 251 | div class="col-md-4" style="text-align: center;" { 252 | a class="github-button" href="https://github.com/fede1024/kafka-view" 253 | data-icon="octicon-star" data-count-href="/fede1024/kafka-view/stargazers" 254 | data-show-count="true" 255 | data-count-aria-label="# stargazers on GitHub" // data-style="mega" 256 | aria-label="Star fede1024/kafka-view on GitHub" { "Star" } 257 | } 258 | } 259 | } 260 | } 261 | 262 | script src="/public/sb-admin-2/vendor/jquery/jquery.min.js" {} 263 | script src="/public/sb-admin-2/vendor/bootstrap/js/bootstrap.min.js" {} 264 | script src="/public/sb-admin-2/vendor/metisMenu/metisMenu.min.js" {} 265 | script src="/public/sb-admin-2/vendor/datatables/js/jquery.dataTables.min.js" {} 266 | script src="/public/sb-admin-2/vendor/datatables-plugins/dataTables.bootstrap.min.js" {} 267 | script src="/public/sb-admin-2/vendor/datatables-responsive/dataTables.responsive.js" {} 268 | script src="/public/sb-admin-2/dist/js/sb-admin-2.js" {} 269 | // (PreEscaped("