├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── build.gradle ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── kafka-mesos.properties ├── kafka-mesos.sh ├── lib └── util-mesos-0.1.0.0.jar ├── license.hdr ├── quickstart.sh ├── src ├── docker │ ├── Dockerfile │ ├── README.md │ ├── build-image.sh │ └── docker-entrypoint.sh ├── scala │ ├── iface │ │ ├── 0_10 │ │ │ └── ly │ │ │ │ └── stealth │ │ │ │ └── mesos │ │ │ │ └── kafka │ │ │ │ └── interface │ │ │ │ └── impl │ │ │ │ ├── AdminUtils.scala │ │ │ │ └── ZkUtils.scala │ │ ├── 0_11 │ │ │ └── ly │ │ │ │ └── stealth │ │ │ │ └── mesos │ │ │ │ └── kafka │ │ │ │ └── interface │ │ │ │ └── impl │ │ │ │ ├── AdminUtils.scala │ │ │ │ └── ZkUtils.scala │ │ ├── 0_8 │ │ │ └── ly │ │ │ │ └── stealth │ │ │ │ └── mesos │ │ │ │ └── kafka │ │ │ │ └── interface │ │ │ │ └── impl │ │ │ │ ├── AdminUtils.scala │ │ │ │ └── ZkUtils.scala │ │ ├── 0_9 │ │ │ └── ly │ │ │ │ └── stealth │ │ │ │ └── mesos │ │ │ │ └── kafka │ │ │ │ └── interface │ │ │ │ └── impl │ │ │ │ ├── AdminUtils.scala │ │ │ │ └── ZkUtils.scala │ │ ├── 1_1 │ │ │ └── ly │ │ │ │ └── stealth │ │ │ │ └── mesos │ │ │ │ └── kafka │ │ │ │ └── interface │ │ │ │ └── impl │ │ │ │ ├── AdminUtils.scala │ │ │ │ └── ZkUtils.scala │ │ └── base │ │ │ └── ly │ │ │ └── stealth │ │ │ └── mesos │ │ │ └── kafka │ │ │ └── interface │ │ │ ├── AdminUtilsProxy.scala │ │ │ └── ZkUtilsProxy.scala │ └── main │ │ └── ly │ │ └── stealth │ │ └── mesos │ │ └── kafka │ │ ├── ApiModel.scala │ │ ├── Broker.scala │ │ ├── Cluster.scala │ │ ├── Config.scala │ │ ├── Message.scala │ │ ├── Util.scala │ │ ├── cli │ │ ├── BrokerCli.scala │ │ ├── Cli.scala │ │ ├── SchedulerCli.scala │ │ └── TopicCli.scala │ │ ├── executor │ │ ├── BrokerServer.scala │ │ ├── Executor.scala │ │ └── MetricCollectorProxy.scala │ │ ├── json │ │ ├── JsonUtil.scala │ │ └── Model.scala │ │ └── scheduler │ │ ├── BrokerLifecycleManager.scala │ │ ├── BrokerLogManager.scala │ │ ├── Expr.scala │ │ ├── Extractors.scala │ │ ├── KafkaDistribution.scala │ │ ├── Quotas.scala │ │ ├── Rebalancer.scala │ │ ├── Registry.scala │ │ ├── Topics.scala │ │ ├── ZkUtilsWrapper.scala │ │ ├── http │ │ ├── BothParam.java │ │ ├── BothParamValueFactoryProvider.scala │ │ ├── HttpServer.scala │ │ └── api │ │ │ ├── BrokerApi.scala │ │ │ ├── Model.scala │ │ │ ├── PartitionApi.scala │ │ │ ├── QuotaApi.scala │ │ │ └── TopicApi.scala │ │ └── mesos │ │ ├── BrokerTaskManager.scala │ │ ├── MesosTaskFactory.scala │ │ ├── OfferManager.scala │ │ ├── Scheduler.scala │ │ └── TaskReconciler.scala └── test │ ├── ly │ └── stealth │ │ └── mesos │ │ └── kafka │ │ ├── BrokerTest.scala │ │ ├── CliTest.scala │ │ ├── ClusterTest.scala │ │ ├── ExecutorTest.scala │ │ ├── ExprTest.scala │ │ ├── HttpServerTest.scala │ │ ├── JsonTest.scala │ │ ├── KafkaBrokerServerTest.scala │ │ ├── KafkaMesosTestCase.scala │ │ ├── RebalancerTest.scala │ │ ├── SchedulerTest.scala │ │ ├── TopicsTest.scala │ │ └── UtilTest.scala │ └── resources │ ├── broker.json │ └── topic.json └── vagrant ├── README.md ├── Vagrantfile └── init.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | out/* 3 | project/* 4 | .gradle 5 | *.iml 6 | *.ipr 7 | .idea/* 8 | todo.txt 9 | 10 | src/docker/.docker 11 | vagrant/.vagrant 12 | 13 | kafka-mesos*.json 14 | kafka-mesos*.properties 15 | kafka-mesos*.jar 16 | jre*.tar.gz 17 | kafka*.tgz 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | language: java 17 | sudo: false 18 | dist: precise 19 | jdk: 20 | - oraclejdk8 21 | - oraclejdk7 22 | env: 23 | - KAFKA_VER=1.1.0 24 | - KAFKA_VER=0.11.0.0 25 | - KAFKA_VER=0.10.2.0 26 | - KAFKA_VER=0.9.0.1 27 | - KAFKA_VER=0.8.2.2 28 | matrix: 29 | exclude: 30 | - jdk: oraclejdk7 31 | env: KAFKA_VER=1.1.0 32 | - jdk: oraclejdk7 33 | env: KAFKA_VER=0.9.0.1 34 | - jdk: oraclejdk7 35 | env: KAFKA_VER=0.8.2.2 36 | 37 | before_cache: 38 | - rm -f $HOME/.gradle/caches/modules-2/modules-2.lock 39 | - rm -fr $HOME/.gradle/caches/*/plugin-resolution/ 40 | cache: 41 | directories: 42 | - $HOME/.m2/repository 43 | - $HOME/.gradle/caches/ 44 | - $HOME/.gradle/wrapper/ 45 | 46 | install: true 47 | before_script: 48 | - 'export JAVA_OPTS="-XX:MaxPermSize=512m -Xms1g -Xmx3g -Xss2m -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC"' 49 | script: ./gradlew jar -PkafkaVersion=$KAFKA_VER 50 | -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id 'scala' 3 | id 'idea' 4 | id 'distribution' 5 | id 'de.undercouch.download' version '3.2.0' 6 | id "com.github.hierynomus.license" version "0.13.1" 7 | } 8 | 9 | def scalaVersion = "2.11.8" 10 | def mesosVersion = "0.28.0" 11 | ext.kafkaVersion = project.hasProperty("kafkaVersion") ? project.getProperty("kafkaVersion") : "1.1.0" 12 | def kafkaMajorVersion="${kafkaVersion.split("\\.").take(2).join("_")}" 13 | def kafkaScalaVersion=scalaVersion.split("\\.").take(2).join(".") 14 | def jettyVersion = "9.0.4.v20130625" 15 | def jerseyVersion = "2.24" 16 | def jacksonVersion = "2.9.5" 17 | 18 | version = new File('src/scala/main/ly/stealth/mesos/kafka/Config.scala') 19 | .readLines() 20 | .findResult({ x -> x.contains("val value = \"") ? x.substring(x.indexOf("\"") + 1, x.lastIndexOf("\"")) : null }) 21 | 22 | jar.archiveName = "kafka-mesos-${version}-kafka_$kafkaScalaVersion-${kafkaVersion.replace('_', '.')}.jar" 23 | 24 | repositories { 25 | mavenCentral() 26 | flatDir { dirs 'lib' } 27 | } 28 | 29 | license { 30 | header rootProject.file('license.hdr') 31 | exclude "**/*.json" 32 | } 33 | 34 | buildDir = 'out/gradle' 35 | libsDirName = '../../' 36 | distsDirName = '../../' 37 | 38 | sourceSets { 39 | main { 40 | scala { 41 | srcDirs = [ 42 | 'src/scala/main', 43 | 'src/scala/iface/base/', 44 | "src/scala/iface/$kafkaMajorVersion/" 45 | ] 46 | } 47 | } 48 | test { 49 | scala { srcDirs = ['src/test'] } 50 | } 51 | } 52 | 53 | dependencies { 54 | compile "org.scala-lang:scala-library:$scalaVersion" 55 | compile "org.apache.mesos:mesos:$mesosVersion" 56 | compile name: "util-mesos-0.1.0.0" 57 | compile "com.google.protobuf:protobuf-java:2.5.0" 58 | compile "log4j:log4j:1.2.17" 59 | compile "net.sf.jopt-simple:jopt-simple:4.8" 60 | compile "org.eclipse.jetty:jetty-http:$jettyVersion" 61 | compile "org.eclipse.jetty:jetty-io:$jettyVersion" 62 | compile "org.eclipse.jetty:jetty-security:$jettyVersion" 63 | compile "org.eclipse.jetty:jetty-server:$jettyVersion" 64 | compile "org.eclipse.jetty:jetty-servlet:$jettyVersion" 65 | compile "org.eclipse.jetty:jetty-util:$jettyVersion" 66 | compile "org.glassfish.jersey.core:jersey-server:$jerseyVersion" 67 | compile "com.google.guava:guava:14.0.1" 68 | compile "org.glassfish.jersey.containers:jersey-container-servlet:$jerseyVersion" 69 | compile "org.glassfish.jersey.containers:jersey-container-servlet-core:$jerseyVersion" 70 | compile "org.glassfish.jersey.containers:jersey-container-jetty-http:$jerseyVersion" 71 | compile "org.glassfish.jersey.media:jersey-media-json-jackson:$jerseyVersion" 72 | compile "javax.servlet:javax.servlet-api:3.0.1" 73 | 74 | compile "com.fasterxml.jackson.core:jackson-core:$jacksonVersion" 75 | compile "com.fasterxml.jackson.core:jackson-databind:$jacksonVersion" 76 | compile "com.fasterxml.jackson.module:jackson-module-scala_$kafkaScalaVersion:$jacksonVersion" 77 | 78 | testCompile 'junit:junit:4.12' 79 | testCompile 'com.101tec:zkclient:0.7' 80 | 81 | compile ("org.apache.kafka:kafka_$kafkaScalaVersion:$kafkaVersion") { 82 | exclude group: "javax.jms" 83 | exclude group: "com.sun.jdmk" 84 | exclude group: "com.sun.jmx" 85 | exclude group: "junit" 86 | exclude group: "jline" 87 | exclude module: "com.yammer.metrics", group: "metrics-core" 88 | } 89 | 90 | runtime "com.yammer.metrics:metrics-core:2.2.0" 91 | } 92 | 93 | jar { 94 | dependsOn 'test' 95 | 96 | 97 | from(configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }) { 98 | exclude "*" 99 | exclude "about_files/*" 100 | exclude "META-INF/*.SF" 101 | exclude "META-INF/*.DSA" 102 | exclude "META-INF/*.RSA" 103 | } 104 | 105 | manifest.attributes("Main-Class": "ly.stealth.mesos.kafka.cli.Cli") 106 | } 107 | 108 | task wrapper(type: Wrapper) { 109 | gradleVersion = '4.1' 110 | } 111 | 112 | distributions.main { 113 | contents { 114 | from(rootDir) { 115 | include jar.archiveName 116 | include "README.md" 117 | include "LICENSE" 118 | include "kafka-mesos.properties" 119 | include "kafka-mesos.sh" 120 | } 121 | } 122 | } 123 | 124 | distTar { 125 | dependsOn jar 126 | compression Compression.GZIP 127 | } 128 | 129 | import de.undercouch.gradle.tasks.download.Download 130 | task downloadKafka(type: Download) { 131 | src "https://archive.apache.org/dist/kafka/${kafkaVersion}/kafka_${kafkaScalaVersion}-${kafkaVersion}.tgz" 132 | dest rootDir 133 | } 134 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mesos/kafka/ffbf162530653fde4628edf266856a4e2faf3e4b/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Tue Jan 24 23:20:19 EST 2017 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.7-bin.zip 7 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 10 | DEFAULT_JVM_OPTS="" 11 | 12 | APP_NAME="Gradle" 13 | APP_BASE_NAME=`basename "$0"` 14 | 15 | # Use the maximum available, or set MAX_FD != -1 to use that value. 16 | MAX_FD="maximum" 17 | 18 | warn ( ) { 19 | echo "$*" 20 | } 21 | 22 | die ( ) { 23 | echo 24 | echo "$*" 25 | echo 26 | exit 1 27 | } 28 | 29 | # OS specific support (must be 'true' or 'false'). 30 | cygwin=false 31 | msys=false 32 | darwin=false 33 | case "`uname`" in 34 | CYGWIN* ) 35 | cygwin=true 36 | ;; 37 | Darwin* ) 38 | darwin=true 39 | ;; 40 | MINGW* ) 41 | msys=true 42 | ;; 43 | esac 44 | 45 | # Attempt to set APP_HOME 46 | # Resolve links: $0 may be a link 47 | PRG="$0" 48 | # Need this for relative symlinks. 49 | while [ -h "$PRG" ] ; do 50 | ls=`ls -ld "$PRG"` 51 | link=`expr "$ls" : '.*-> \(.*\)$'` 52 | if expr "$link" : '/.*' > /dev/null; then 53 | PRG="$link" 54 | else 55 | PRG=`dirname "$PRG"`"/$link" 56 | fi 57 | done 58 | SAVED="`pwd`" 59 | cd "`dirname \"$PRG\"`/" >/dev/null 60 | APP_HOME="`pwd -P`" 61 | cd "$SAVED" >/dev/null 62 | 63 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 64 | 65 | # Determine the Java command to use to start the JVM. 66 | if [ -n "$JAVA_HOME" ] ; then 67 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 68 | # IBM's JDK on AIX uses strange locations for the executables 69 | JAVACMD="$JAVA_HOME/jre/sh/java" 70 | else 71 | JAVACMD="$JAVA_HOME/bin/java" 72 | fi 73 | if [ ! -x "$JAVACMD" ] ; then 74 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 75 | 76 | Please set the JAVA_HOME variable in your environment to match the 77 | location of your Java installation." 78 | fi 79 | else 80 | JAVACMD="java" 81 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 82 | 83 | Please set the JAVA_HOME variable in your environment to match the 84 | location of your Java installation." 85 | fi 86 | 87 | # Increase the maximum file descriptors if we can. 88 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then 89 | MAX_FD_LIMIT=`ulimit -H -n` 90 | if [ $? -eq 0 ] ; then 91 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 92 | MAX_FD="$MAX_FD_LIMIT" 93 | fi 94 | ulimit -n $MAX_FD 95 | if [ $? -ne 0 ] ; then 96 | warn "Could not set maximum file descriptor limit: $MAX_FD" 97 | fi 98 | else 99 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 100 | fi 101 | fi 102 | 103 | # For Darwin, add options to specify how the application appears in the dock 104 | if $darwin; then 105 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 106 | fi 107 | 108 | # For Cygwin, switch paths to Windows format before running java 109 | if $cygwin ; then 110 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 111 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 112 | JAVACMD=`cygpath --unix "$JAVACMD"` 113 | 114 | # We build the pattern for arguments to be converted via cygpath 115 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 116 | SEP="" 117 | for dir in $ROOTDIRSRAW ; do 118 | ROOTDIRS="$ROOTDIRS$SEP$dir" 119 | SEP="|" 120 | done 121 | OURCYGPATTERN="(^($ROOTDIRS))" 122 | # Add a user-defined pattern to the cygpath arguments 123 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 124 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 125 | fi 126 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 127 | i=0 128 | for arg in "$@" ; do 129 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 130 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 131 | 132 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 133 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 134 | else 135 | eval `echo args$i`="\"$arg\"" 136 | fi 137 | i=$((i+1)) 138 | done 139 | case $i in 140 | (0) set -- ;; 141 | (1) set -- "$args0" ;; 142 | (2) set -- "$args0" "$args1" ;; 143 | (3) set -- "$args0" "$args1" "$args2" ;; 144 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 145 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 146 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 147 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 148 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 149 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 150 | esac 151 | fi 152 | 153 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules 154 | function splitJvmOpts() { 155 | JVM_OPTS=("$@") 156 | } 157 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS 158 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" 159 | 160 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" 161 | -------------------------------------------------------------------------------- /kafka-mesos.properties: -------------------------------------------------------------------------------- 1 | # Scheduler options defaults. See `./kafka-mesos.sh help scheduler` for more details 2 | debug=true 3 | 4 | user=vagrant 5 | 6 | storage=zk:/mesos-kafka-scheduler 7 | 8 | master=master:5050 9 | 10 | zk=master:2181/chroot 11 | 12 | #for testing on the vagrant master via ./kafka-mesos.sh scheduler 13 | #you will eventually want to run this on a scheduler i.e marathon 14 | #change the IP to what is service discoverable & routable for your setup 15 | api=http://192.168.3.5:7000 16 | -------------------------------------------------------------------------------- /kafka-mesos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | jar='kafka-mesos*.jar' 3 | 4 | check_jar() { 5 | jars=$(find . -maxdepth 1 -name "$jar" | wc -l) 6 | 7 | if [ $jars -eq 0 ] 8 | then 9 | echo "$jar not found" 10 | exit 1 11 | elif [ $jars -gt 1 ] 12 | then 13 | echo "More than one $jar found" 14 | exit 1 15 | fi 16 | } 17 | 18 | check_jar 19 | exec java -jar $jar "$@" 20 | -------------------------------------------------------------------------------- /lib/util-mesos-0.1.0.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mesos/kafka/ffbf162530653fde4628edf266856a4e2faf3e4b/lib/util-mesos-0.1.0.0.jar -------------------------------------------------------------------------------- /license.hdr: -------------------------------------------------------------------------------- 1 | Licensed to the Apache Software Foundation (ASF) under one or more 2 | contributor license agreements. See the NOTICE file distributed with 3 | this work for additional information regarding copyright ownership. 4 | The ASF licenses this file to You under the Apache License, Version 2.0 5 | (the "License"); you may not use this file except in compliance with 6 | the License. You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | -------------------------------------------------------------------------------- /quickstart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # This script deploys Mesos Kafka on a minimesos cluster and adds a single broker 5 | # 6 | 7 | command -v minimesos >/dev/null 2>&1 || { echo "minimesos is not found. You can install it by running 'curl -sSL https://minimesos.org/install | sh'. For more info see https://www.minimesos.org" >&2; exit 1; } 8 | 9 | cp kafka-mesos.sh /tmp 10 | cp kafka-mesos*.jar /tmp 11 | 12 | if [ "$1" == "" ]; then 13 | echo "Usage: quickstart.sh " 14 | exit 1 15 | fi; 16 | 17 | IP=$1 18 | 19 | echo "Launching minimesos cluster..." 20 | minimesos up 21 | 22 | echo "Exporting minimesos environment variables..." 23 | $(minimesos) 24 | 25 | MASTER_HOST_PORT=$(echo $MINIMESOS_MASTER | cut -d/ -f3) 26 | 27 | echo "Creating kafka-mesos.properties file in /tmp..." 28 | cat << EOF > /tmp/kafka-mesos.properties 29 | storage=file:kafka-mesos.json 30 | master=$MINIMESOS_ZOOKEEPER/mesos 31 | zk=$MASTER_HOST_PORT 32 | api=http://$IP:7000 33 | EOF 34 | 35 | echo "Installing Mesos Kafka..." 36 | ZK_HOST_PORT=$(echo $MINIMESOS_ZOOKEEPER | cut -d/ -f3) 37 | docker run -d -p 7000:7000 `whoami`/kafka-mesos --master=$MINIMESOS_ZOOKEEPER/mesos --api=http://$IP:7000 --zk=$ZK_HOST_PORT 2>&1 > /dev/null 38 | 39 | echo "Wait until Mesos Kafka API is available..." 40 | while ! nc -z $IP 7000; do 41 | sleep 0.1 # wait for 1/10 of the second before check again 42 | done 43 | echo "Mesos Kafka API is available..." 44 | 45 | echo "Adding broker 0..." 46 | cd /tmp 47 | ./kafka-mesos.sh broker add 0 48 | cd - 2>&1 > /dev/null 49 | 50 | echo "Starting broker 0..." 51 | cd /tmp 52 | ./kafka-mesos.sh broker start 0 53 | cd - 2>&1 > /dev/null 54 | 55 | minimesos state > state.json 56 | BROKER_IP=$(cat state.json | jq -r ".frameworks[0].tasks[] | select(.name=\"broker-0\").statuses[0].container_status.network_infos[0].ip_address") 57 | echo "broker-0 IP is $BROKER_IP" 58 | -------------------------------------------------------------------------------- /src/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Add mesos repo 4 | RUN \ 5 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E56151BF && \ 6 | echo deb http://repos.mesosphere.io/ubuntu trusty main > /etc/apt/sources.list.d/mesosphere.list 7 | 8 | # Install deps 9 | RUN \ 10 | apt-get update && \ 11 | apt-get install -qy \ 12 | git zip mc curl \ 13 | openjdk-8-jdk mesos libc6 14 | 15 | # Add kafka-mesos & kafka 16 | COPY .docker/kafka* /opt/kafka-mesos/ 17 | COPY ./docker-entrypoint.sh /docker-entrypoint.sh 18 | 19 | WORKDIR /opt/kafka-mesos 20 | EXPOSE 7000 21 | 22 | ENTRYPOINT ["/docker-entrypoint.sh"] 23 | 24 | CMD ["scheduler"] 25 | -------------------------------------------------------------------------------- /src/docker/README.md: -------------------------------------------------------------------------------- 1 | # Intro 2 | A docker image could be built with `build-image.sh` script. 3 | 4 | The image will have following pre-installed components: 5 | - openjdk-jdk; 6 | - mesos native libs; 7 | 8 | `/opt/kafka-mesos` folder will contain following content: 9 | - kafka*.tgz - kafka distro; 10 | - kafka-mesos*.jar - kafka-mesos distro; 11 | - kafka-mesos.sh - shell script to run kafka-mesos; 12 | 13 | No default configuration file for kafka-mesos is provided. All configuration params should be 14 | specified via cli options. 15 | 16 | # Building image 17 | Building image is done by `build-image.sh` script. Please refer to help via `./build-image.sh -h`. 18 | 19 | Example: 20 | ``` 21 | # ./build-image.sh 22 | ``` 23 | Note: this would not push the image. Push should be done manually after testing. 24 | 25 | ## Using docker-machine 26 | If you have docker-machine installed, with virtualbox, you can run the following commands: 27 | ``` 28 | $ docker-machine create --driver virtualbox dev 29 | $ eval "$(docker-machine env dev)" 30 | $ ./build-image.sh 31 | ``` 32 | 33 | # Running image 34 | Running image using docker. Required networking params should be provided. Image has no entry point, 35 | so ./kafka-mesos.sh should be specified explicitly. 36 | 37 | Example: 38 | ``` 39 | # sudo docker run -it -p 7000:7000 --add-host=master:192.168.3.5 `whoami`/kafka-mesos ./kafka-mesos.sh scheduler \ 40 | --master=master:5050 --zk=master:2181 --api=http://:7000 --storage=zk:/kafka-mesos 41 | ``` 42 | Where accessible-ip - is the IP address of running host, accessible from mesos nodes. 43 | 44 | Note: if you want to inspect image content you can skip specifying `./kafka-mesos.sh` entry point. 45 | In that case you will get shell access. 46 | 47 | # Running image in Marathon 48 | To run image in Marathon it is better to pre-pull image on all required nodes using: 49 | ``` 50 | # sudo docker pull 51 | ``` 52 | 53 | After that, scheduler process could be created via Marathon API via POST /v2/apps call. 54 | Example: 55 | ``` 56 | { 57 | "container": { 58 | "type": "DOCKER", 59 | "docker": { 60 | "network": "HOST", 61 | "image": "$imageName" 62 | } 63 | }, 64 | "id":"kafka-mesos-scheduler", 65 | "cpus": 0.5, 66 | "mem": 256, 67 | "ports": [7000], 68 | "cmd": "./kafka-mesos.sh scheduler --master=master:5050 --zk=master:2181 --api=http://master:7000 --storage=zk:/kafka-mesos", 69 | "instances": 1, 70 | "constraints": [["hostname", "LIKE", "master"]] 71 | } 72 | ``` 73 | 74 | Then the cli should be able to connect to url http://master:7000 from your api client machine 75 | (typically different than the mesos slaves). Example: 76 | ``` 77 | # ./kafka-mesos.sh broker list --api=http://master:7000 78 | no brokers 79 | ``` 80 | Now you can configure and start brokers. 81 | -------------------------------------------------------------------------------- /src/docker/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ue 3 | 4 | base_dir=../.. 5 | tmp_dir=.docker 6 | mkdir -p $tmp_dir 7 | 8 | kafka_version=1.1.0 9 | scala_version=2.11 10 | docker_tag=`whoami`/kafka-mesos 11 | 12 | print_help() { 13 | echo "Usage: build-image.sh [options]" 14 | echo "Options:" 15 | echo " -t - Docker tag name. Default $docker_tag" 16 | echo " -k - Kafka version. Default $kafka_version" 17 | echo " -h - Print this help" 18 | } 19 | 20 | # parse args 21 | while getopts ":t:k:h" opt; do 22 | case $opt in 23 | t) 24 | docker_tag=$OPTARG 25 | ;; 26 | k) 27 | kafka_version=$OPTARG 28 | ;; 29 | h) 30 | print_help 31 | exit 0 32 | ;; 33 | \?) 34 | echo "Invalid option: -$OPTARG" 35 | exit 1 36 | ;; 37 | :) 38 | echo "Option -$OPTARG requires an argument" 39 | exit 1 40 | esac 41 | done 42 | 43 | # download kafka 44 | kafka_file=kafka_$scala_version-$kafka_version.tgz 45 | wget -nc -P $tmp_dir https://archive.apache.org/dist/kafka/$kafka_version/$kafka_file 46 | find $tmp_dir -type f -name 'kafka*.tgz' -not -name "$kafka_file" -delete 47 | 48 | # build kafka-mesos-jar 49 | find $tmp_dir -type f -name 'kafka-mesos*.jar' -delete 50 | cd $base_dir && ./gradlew jar && cd - 51 | cp $base_dir/kafka-mesos*.jar $tmp_dir 52 | cp $base_dir/kafka-mesos*.sh $tmp_dir 53 | 54 | # build docker image 55 | sudo="sudo" 56 | if docker info &> /dev/null; then sudo=""; fi # skip sudo if possible 57 | $sudo docker build -t $docker_tag . 58 | -------------------------------------------------------------------------------- /src/docker/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | 4 | if [ $1 == "scheduler" ]; then 5 | trap 'echo trap; kill -TERM $PID' TERM INT 6 | /usr/bin/java -jar /opt/kafka-mesos/kafka-mesos-*.jar "$@" & 7 | PID=$! 8 | wait $PID 9 | trap - TERM INT 10 | wait $PID 11 | EXIT_STATUS=$? 12 | else 13 | exec "$@" 14 | fi 15 | -------------------------------------------------------------------------------- /src/scala/iface/0_10/ly/stealth/mesos/kafka/interface/impl/AdminUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.admin.{BrokerMetadata, AdminUtils => KafkaAdminUtils} 21 | import java.util.Properties 22 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, FeatureSupport} 23 | import scala.collection.Map 24 | 25 | 26 | class AdminUtils(zkUrl: String) extends AdminUtilsProxy { 27 | private val DEFAULT_TIMEOUT_MS = 30000 28 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 29 | 30 | override def fetchAllTopicConfigs(): Map[String, Properties] = KafkaAdminUtils.fetchAllTopicConfigs(zkUtils) 31 | 32 | override def createOrUpdateTopicPartitionAssignmentPathInZK( 33 | topic: String, 34 | partitionReplicaAssignment: Map[Int, Seq[Int]], 35 | config: Properties, 36 | update: Boolean 37 | ): Unit = KafkaAdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, partitionReplicaAssignment, config, update) 38 | 39 | override def changeTopicConfig( 40 | topic: String, 41 | configs: Properties 42 | ): Unit = KafkaAdminUtils.changeTopicConfig(zkUtils, topic, configs) 43 | 44 | override def deleteTopic(topicToDelete: String): Unit = 45 | KafkaAdminUtils.deleteTopic(zkUtils, topicToDelete) 46 | 47 | override def fetchEntityConfig( 48 | entityType: String, 49 | entity: String 50 | ): Properties = KafkaAdminUtils.fetchEntityConfig(zkUtils, entityType, entity) 51 | 52 | override def changeClientIdConfig( 53 | clientId: String, 54 | configs: Properties 55 | ): Unit = KafkaAdminUtils.changeClientIdConfig(zkUtils, clientId, configs) 56 | 57 | override def fetchAllEntityConfigs(entityType: String): Map[String, Properties] 58 | = KafkaAdminUtils.fetchAllEntityConfigs(zkUtils, entityType) 59 | 60 | override def assignReplicasToBrokers( 61 | ids: Seq[Int], 62 | nPartitions: Int, 63 | replicationFactor: Int, 64 | fixedStartIndex: Int, 65 | startPartitionId: Int 66 | ): Map[Int, Seq[Int]] = { 67 | val md = ids.map(BrokerMetadata(_, None)) 68 | KafkaAdminUtils.assignReplicasToBrokers(md, nPartitions, replicationFactor, fixedStartIndex, startPartitionId) 69 | } 70 | 71 | override val features: FeatureSupport = FeatureSupport(quotas = true, genericEntityConfigs = true) 72 | } 73 | -------------------------------------------------------------------------------- /src/scala/iface/0_10/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import ly.stealth.mesos.kafka.interface.ZkUtilsProxy 23 | import scala.collection.{Map, Set, mutable} 24 | 25 | class ZkUtils(zkUrl: String) extends ZkUtilsProxy { 26 | private val DEFAULT_TIMEOUT_MS = 30000 27 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 28 | 29 | override def getAllTopics(): Seq[String] = zkUtils.getAllTopics() 30 | 31 | override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 32 | = zkUtils.getReplicaAssignmentForTopics(topics) 33 | 34 | override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 35 | = zkUtils.getPartitionsBeingReassigned() 36 | 37 | override def getReplicasForPartition( 38 | topic: String, 39 | partition: Int 40 | ): Seq[Int] = zkUtils.getReplicasForPartition(topic, partition) 41 | 42 | override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 43 | = zkUtils.updatePartitionReassignmentData(partitionsToBeReassigned) 44 | 45 | override def createPersistentPath( 46 | path: String, 47 | data: String 48 | ): Unit = zkUtils.createPersistentPath(path, data) 49 | 50 | override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]] 51 | = zkUtils.getPartitionAssignmentForTopics(topics) 52 | 53 | override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 54 | = zkUtils.getPartitionLeaderAndIsrForTopics(null, topicAndPartitions) 55 | 56 | override def getSortedBrokerList(): Seq[Int] = zkUtils.getSortedBrokerList() 57 | } 58 | -------------------------------------------------------------------------------- /src/scala/iface/0_11/ly/stealth/mesos/kafka/interface/impl/AdminUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.admin.{BrokerMetadata, AdminUtils => KafkaAdminUtils} 21 | import java.util.Properties 22 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, FeatureSupport} 23 | import scala.collection.Map 24 | 25 | 26 | class AdminUtils(zkUrl: String) extends AdminUtilsProxy { 27 | private val DEFAULT_TIMEOUT_MS = 30000 28 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 29 | 30 | override def fetchAllTopicConfigs(): Map[String, Properties] = KafkaAdminUtils.fetchAllTopicConfigs(zkUtils) 31 | 32 | override def createOrUpdateTopicPartitionAssignmentPathInZK( 33 | topic: String, 34 | partitionReplicaAssignment: Map[Int, Seq[Int]], 35 | config: Properties, 36 | update: Boolean 37 | ): Unit = KafkaAdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, partitionReplicaAssignment, config, update) 38 | 39 | override def changeTopicConfig( 40 | topic: String, 41 | configs: Properties 42 | ): Unit = KafkaAdminUtils.changeTopicConfig(zkUtils, topic, configs) 43 | 44 | override def deleteTopic(topicToDelete: String): Unit = 45 | KafkaAdminUtils.deleteTopic(zkUtils, topicToDelete) 46 | 47 | override def fetchEntityConfig( 48 | entityType: String, 49 | entity: String 50 | ): Properties = KafkaAdminUtils.fetchEntityConfig(zkUtils, entityType, entity) 51 | 52 | override def changeClientIdConfig( 53 | clientId: String, 54 | configs: Properties 55 | ): Unit = KafkaAdminUtils.changeClientIdConfig(zkUtils, clientId, configs) 56 | 57 | override def fetchAllEntityConfigs(entityType: String): Map[String, Properties] 58 | = KafkaAdminUtils.fetchAllEntityConfigs(zkUtils, entityType) 59 | 60 | override def assignReplicasToBrokers( 61 | ids: Seq[Int], 62 | nPartitions: Int, 63 | replicationFactor: Int, 64 | fixedStartIndex: Int, 65 | startPartitionId: Int 66 | ): Map[Int, Seq[Int]] = { 67 | val md = ids.map(BrokerMetadata(_, None)) 68 | KafkaAdminUtils.assignReplicasToBrokers(md, nPartitions, replicationFactor, fixedStartIndex, startPartitionId) 69 | } 70 | 71 | override val features: FeatureSupport = FeatureSupport(quotas = true, genericEntityConfigs = true) 72 | } 73 | -------------------------------------------------------------------------------- /src/scala/iface/0_11/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import ly.stealth.mesos.kafka.interface.ZkUtilsProxy 23 | import scala.collection.{Map, Set, mutable} 24 | 25 | class ZkUtils(zkUrl: String) extends ZkUtilsProxy { 26 | private val DEFAULT_TIMEOUT_MS = 30000 27 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 28 | 29 | override def getAllTopics(): Seq[String] = zkUtils.getAllTopics() 30 | 31 | override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 32 | = zkUtils.getReplicaAssignmentForTopics(topics) 33 | 34 | override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 35 | = zkUtils.getPartitionsBeingReassigned() 36 | 37 | override def getReplicasForPartition( 38 | topic: String, 39 | partition: Int 40 | ): Seq[Int] = zkUtils.getReplicasForPartition(topic, partition) 41 | 42 | override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 43 | = zkUtils.updatePartitionReassignmentData(partitionsToBeReassigned) 44 | 45 | override def createPersistentPath( 46 | path: String, 47 | data: String 48 | ): Unit = zkUtils.createPersistentPath(path, data) 49 | 50 | override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]] 51 | = zkUtils.getPartitionAssignmentForTopics(topics) 52 | 53 | override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 54 | = zkUtils.getPartitionLeaderAndIsrForTopics(topicAndPartitions) 55 | 56 | override def getSortedBrokerList(): Seq[Int] = zkUtils.getSortedBrokerList() 57 | } 58 | -------------------------------------------------------------------------------- /src/scala/iface/0_8/ly/stealth/mesos/kafka/interface/impl/AdminUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import java.util.Properties 20 | import kafka.admin.{AdminUtils => KafkaAdminUtils} 21 | import kafka.utils.ZKStringSerializer 22 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, FeatureSupport} 23 | import org.I0Itec.zkclient.ZkClient 24 | import scala.collection.Map 25 | 26 | class AdminUtils(zkUrl: String) extends AdminUtilsProxy { 27 | private val DEFAULT_TIMEOUT_MS = 30000 28 | private val zkClient = new ZkClient(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, ZKStringSerializer) 29 | 30 | override def fetchAllTopicConfigs(): Map[String, Properties] = KafkaAdminUtils.fetchAllTopicConfigs(zkClient) 31 | 32 | override def createOrUpdateTopicPartitionAssignmentPathInZK( 33 | topic: String, 34 | partitionReplicaAssignment: Map[Int, Seq[Int]], 35 | config: Properties, 36 | update: Boolean 37 | ): Unit = KafkaAdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, partitionReplicaAssignment, config, update) 38 | 39 | override def changeTopicConfig( 40 | topic: String, 41 | configs: Properties 42 | ): Unit = KafkaAdminUtils.changeTopicConfig(zkClient, topic, configs) 43 | 44 | override def deleteTopic(topicToDelete: String): Unit = 45 | KafkaAdminUtils.deleteTopic(zkClient, topicToDelete) 46 | 47 | override def fetchEntityConfig( 48 | entityType: String, 49 | entity: String 50 | ): Properties = { 51 | if (entityType == "topics") 52 | KafkaAdminUtils.fetchTopicConfig(zkClient, entity) 53 | else 54 | new Properties() 55 | } 56 | 57 | override def changeClientIdConfig( 58 | clientId: String, 59 | configs: Properties 60 | ): Unit = {} 61 | 62 | override def fetchAllEntityConfigs(entityType: String): Map[String, Properties] = { 63 | if (entityType == "topics") 64 | KafkaAdminUtils.fetchAllTopicConfigs(zkClient) 65 | else 66 | Map() 67 | } 68 | 69 | override def assignReplicasToBrokers( 70 | ids: Seq[Int], 71 | nPartitions: Int, 72 | replicationFactor: Int, 73 | fixedStartIndex: Int, 74 | startPartitionId: Int 75 | ): Map[Int, Seq[Int]] = KafkaAdminUtils.assignReplicasToBrokers(ids, nPartitions, replicationFactor, fixedStartIndex, startPartitionId) 76 | 77 | override val features: FeatureSupport = FeatureSupport(quotas = false, genericEntityConfigs = false) 78 | } 79 | -------------------------------------------------------------------------------- /src/scala/iface/0_8/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils, ZKStringSerializer} 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import ly.stealth.mesos.kafka.interface.ZkUtilsProxy 23 | import org.I0Itec.zkclient.ZkClient 24 | import scala.collection.{Map, Set, mutable} 25 | 26 | class ZkUtils(zkUrl: String) extends ZkUtilsProxy { 27 | private val DEFAULT_TIMEOUT_MS = 30000 28 | private val zkClient = new ZkClient(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, ZKStringSerializer) 29 | 30 | override def getAllTopics(): Seq[String] = KafkaZkUtils.getAllTopics(zkClient) 31 | 32 | override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 33 | = KafkaZkUtils.getReplicaAssignmentForTopics(zkClient, topics) 34 | 35 | override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 36 | = KafkaZkUtils.getPartitionsBeingReassigned(zkClient) 37 | 38 | override def getReplicasForPartition( 39 | topic: String, 40 | partition: Int 41 | ): Seq[Int] = KafkaZkUtils.getReplicasForPartition(zkClient, topic, partition) 42 | 43 | override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 44 | = KafkaZkUtils.updatePartitionReassignmentData(zkClient, partitionsToBeReassigned) 45 | 46 | override def createPersistentPath( 47 | path: String, 48 | data: String 49 | ): Unit = KafkaZkUtils.createPersistentPath(zkClient, path, data) 50 | 51 | override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]] 52 | = KafkaZkUtils.getPartitionAssignmentForTopics(zkClient, topics) 53 | 54 | override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 55 | = KafkaZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, topicAndPartitions) 56 | 57 | override def getSortedBrokerList(): Seq[Int] 58 | = KafkaZkUtils.getSortedBrokerList(zkClient) 59 | } 60 | -------------------------------------------------------------------------------- /src/scala/iface/0_9/ly/stealth/mesos/kafka/interface/impl/AdminUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.admin.{AdminUtils => KafkaAdminUtils} 21 | import java.util.Properties 22 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, FeatureSupport} 23 | import scala.collection.Map 24 | 25 | 26 | class AdminUtils(zkUrl: String) extends AdminUtilsProxy { 27 | private val DEFAULT_TIMEOUT_MS = 30000 28 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 29 | 30 | override def fetchAllTopicConfigs(): Map[String, Properties] = KafkaAdminUtils.fetchAllTopicConfigs(zkUtils) 31 | 32 | override def createOrUpdateTopicPartitionAssignmentPathInZK( 33 | topic: String, 34 | partitionReplicaAssignment: Map[Int, Seq[Int]], 35 | config: Properties, 36 | update: Boolean 37 | ): Unit = KafkaAdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, partitionReplicaAssignment, config, update) 38 | 39 | override def changeTopicConfig( 40 | topic: String, 41 | configs: Properties 42 | ): Unit = KafkaAdminUtils.changeTopicConfig(zkUtils, topic, configs) 43 | 44 | override def deleteTopic(topicToDelete: String): Unit = 45 | KafkaAdminUtils.deleteTopic(zkUtils, topicToDelete) 46 | 47 | override def fetchEntityConfig( 48 | entityType: String, 49 | entity: String 50 | ): Properties = KafkaAdminUtils.fetchEntityConfig(zkUtils, entityType, entity) 51 | 52 | override def changeClientIdConfig( 53 | clientId: String, 54 | configs: Properties 55 | ): Unit = KafkaAdminUtils.changeClientIdConfig(zkUtils, clientId, configs) 56 | 57 | override def fetchAllEntityConfigs(entityType: String): Map[String, Properties] 58 | = KafkaAdminUtils.fetchAllEntityConfigs(zkUtils, entityType) 59 | 60 | override def assignReplicasToBrokers( 61 | ids: Seq[Int], 62 | nPartitions: Int, 63 | replicationFactor: Int, 64 | fixedStartIndex: Int, 65 | startPartitionId: Int 66 | ): Map[Int, Seq[Int]] = { 67 | KafkaAdminUtils.assignReplicasToBrokers(ids, nPartitions, replicationFactor, fixedStartIndex, startPartitionId) 68 | } 69 | 70 | override val features: FeatureSupport = FeatureSupport(quotas = true, genericEntityConfigs = true) 71 | } 72 | -------------------------------------------------------------------------------- /src/scala/iface/0_9/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import ly.stealth.mesos.kafka.interface.ZkUtilsProxy 23 | import scala.collection.{Map, Set, mutable} 24 | 25 | class ZkUtils(zkUrl: String) extends ZkUtilsProxy { 26 | private val DEFAULT_TIMEOUT_MS = 30000 27 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 28 | 29 | override def getAllTopics(): Seq[String] = zkUtils.getAllTopics() 30 | 31 | override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 32 | = zkUtils.getReplicaAssignmentForTopics(topics) 33 | 34 | override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 35 | = zkUtils.getPartitionsBeingReassigned() 36 | 37 | override def getReplicasForPartition( 38 | topic: String, 39 | partition: Int 40 | ): Seq[Int] = zkUtils.getReplicasForPartition(topic, partition) 41 | 42 | override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 43 | = zkUtils.updatePartitionReassignmentData(partitionsToBeReassigned) 44 | 45 | override def createPersistentPath( 46 | path: String, 47 | data: String 48 | ): Unit = zkUtils.createPersistentPath(path, data) 49 | 50 | override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]] 51 | = zkUtils.getPartitionAssignmentForTopics(topics) 52 | 53 | override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 54 | = zkUtils.getPartitionLeaderAndIsrForTopics(null, topicAndPartitions) 55 | 56 | override def getSortedBrokerList(): Seq[Int] = zkUtils.getSortedBrokerList() 57 | } 58 | -------------------------------------------------------------------------------- /src/scala/iface/1_1/ly/stealth/mesos/kafka/interface/impl/AdminUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.admin.{BrokerMetadata, AdminUtils => KafkaAdminUtils} 21 | import kafka.zk.AdminZkClient 22 | import java.util.Properties 23 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, FeatureSupport} 24 | import scala.collection.Map 25 | 26 | 27 | class AdminUtils(zkUrl: String) extends AdminUtilsProxy { 28 | private val DEFAULT_TIMEOUT_MS = 30000 29 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 30 | 31 | override def fetchAllTopicConfigs(): Map[String, Properties] = KafkaAdminUtils.fetchAllTopicConfigs(zkUtils) 32 | 33 | override def createOrUpdateTopicPartitionAssignmentPathInZK( 34 | topic: String, 35 | partitionReplicaAssignment: Map[Int, Seq[Int]], 36 | config: Properties, 37 | update: Boolean 38 | ): Unit = KafkaAdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, partitionReplicaAssignment, config, update) 39 | 40 | override def changeTopicConfig( 41 | topic: String, 42 | configs: Properties 43 | ): Unit = KafkaAdminUtils.changeTopicConfig(zkUtils, topic, configs) 44 | 45 | override def deleteTopic(topicToDelete: String): Unit = 46 | KafkaAdminUtils.deleteTopic(zkUtils, topicToDelete) 47 | 48 | override def fetchEntityConfig( 49 | entityType: String, 50 | entity: String 51 | ): Properties = KafkaAdminUtils.fetchEntityConfig(zkUtils, entityType, entity) 52 | 53 | override def changeClientIdConfig( 54 | clientId: String, 55 | configs: Properties 56 | ): Unit = KafkaAdminUtils.changeClientIdConfig(zkUtils, clientId, configs) 57 | 58 | override def fetchAllEntityConfigs(entityType: String): Map[String, Properties] 59 | = KafkaAdminUtils.fetchAllEntityConfigs(zkUtils, entityType) 60 | 61 | override def assignReplicasToBrokers( 62 | ids: Seq[Int], 63 | nPartitions: Int, 64 | replicationFactor: Int, 65 | fixedStartIndex: Int, 66 | startPartitionId: Int 67 | ): Map[Int, Seq[Int]] = { 68 | val md = ids.map(BrokerMetadata(_, None)) 69 | KafkaAdminUtils.assignReplicasToBrokers(md, nPartitions, replicationFactor, fixedStartIndex, startPartitionId) 70 | } 71 | 72 | override val features: FeatureSupport = FeatureSupport(quotas = true, genericEntityConfigs = true) 73 | } 74 | -------------------------------------------------------------------------------- /src/scala/iface/1_1/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface.impl 18 | 19 | import kafka.utils.{ZkUtils => KafkaZkUtils} 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import ly.stealth.mesos.kafka.interface.ZkUtilsProxy 23 | import scala.collection.{Map, Set, mutable} 24 | 25 | class ZkUtils(zkUrl: String) extends ZkUtilsProxy { 26 | private val DEFAULT_TIMEOUT_MS = 30000 27 | private val zkUtils = KafkaZkUtils(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, isZkSecurityEnabled = false) 28 | 29 | override def getAllTopics(): Seq[String] = zkUtils.getAllTopics() 30 | 31 | override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 32 | = zkUtils.getReplicaAssignmentForTopics(topics) 33 | 34 | override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 35 | = zkUtils.getPartitionsBeingReassigned() 36 | 37 | override def getReplicasForPartition( 38 | topic: String, 39 | partition: Int 40 | ): Seq[Int] = zkUtils.getReplicasForPartition(topic, partition) 41 | 42 | override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 43 | = zkUtils.updatePartitionReassignmentData(partitionsToBeReassigned) 44 | 45 | override def createPersistentPath( 46 | path: String, 47 | data: String 48 | ): Unit = zkUtils.createPersistentPath(path, data) 49 | 50 | override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]] 51 | = zkUtils.getPartitionAssignmentForTopics(topics) 52 | 53 | override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 54 | = zkUtils.getPartitionLeaderAndIsrForTopics(topicAndPartitions) 55 | 56 | override def getSortedBrokerList(): Seq[Int] = zkUtils.getSortedBrokerList() 57 | } 58 | -------------------------------------------------------------------------------- /src/scala/iface/base/ly/stealth/mesos/kafka/interface/AdminUtilsProxy.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface 18 | 19 | import ly.stealth.mesos.kafka.interface.impl.AdminUtils 20 | import java.util.Properties 21 | import scala.collection.Map 22 | 23 | case class FeatureSupport( 24 | quotas: Boolean, 25 | genericEntityConfigs: Boolean 26 | ) 27 | 28 | trait AdminUtilsProxy { 29 | def fetchAllTopicConfigs(): Map[String, Properties] 30 | 31 | def createOrUpdateTopicPartitionAssignmentPathInZK(topic: String, 32 | partitionReplicaAssignment: Map[Int, Seq[Int]], 33 | config: Properties = new Properties, 34 | update: Boolean = false) 35 | 36 | def changeTopicConfig(topic: String, configs: Properties) 37 | 38 | def deleteTopic(topic: String) 39 | 40 | def fetchEntityConfig(entityType: String, entity: String): Properties 41 | 42 | def changeClientIdConfig(clientId: String, configs: Properties) 43 | 44 | def fetchAllEntityConfigs(entityType: String): Map[String, Properties] 45 | 46 | def assignReplicasToBrokers(ids: Seq[Int], nPartitions: Int, 47 | replicationFactor: Int, 48 | fixedStartIndex: Int = -1, 49 | startPartitionId: Int = -1): Map[Int, Seq[Int]] 50 | 51 | val features: FeatureSupport 52 | } 53 | 54 | object AdminUtilsProxy { 55 | def apply(zkUrl: String) = new AdminUtils(zkUrl) 56 | } 57 | -------------------------------------------------------------------------------- /src/scala/iface/base/ly/stealth/mesos/kafka/interface/ZkUtilsProxy.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.interface 18 | 19 | import ly.stealth.mesos.kafka.interface.impl.ZkUtils 20 | import kafka.common.TopicAndPartition 21 | import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext} 22 | import scala.collection.{Map, Set, mutable} 23 | 24 | trait ZkUtilsProxy { 25 | def getAllTopics(): Seq[String] 26 | def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] 27 | def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext] 28 | def getReplicasForPartition(topic: String, partition: Int): Seq[Int] 29 | def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit 30 | def createPersistentPath(path: String, data: String = ""): Unit 31 | def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, collection.Map[Int, Seq[Int]]] 32 | def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] 33 | def getSortedBrokerList(): Seq[Int] 34 | } 35 | 36 | object ZkUtilsProxy { 37 | def apply(zkUrl: String) = new ZkUtils(zkUrl) 38 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/ApiModel.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | import ly.stealth.mesos.kafka.Broker.Metrics 20 | import scala.collection.{Map, Seq} 21 | 22 | case class BrokerStatusResponse(brokers: Seq[Broker]) 23 | case class BrokerStartResponse(brokers: Seq[Broker], status: String, message: Option[String] = None) 24 | case class BrokerRemoveResponse(ids: Seq[String]) 25 | 26 | case class FrameworkMessage(metrics: Option[Metrics] = None, log: Option[LogResponse] = None) 27 | case class ListTopicsResponse(topics: Seq[Topic]) 28 | 29 | case class HttpLogResponse(status: String, content: String) 30 | case class RebalanceStartResponse(status: String, state: String, error: Option[String]) 31 | case class HttpErrorResponse(code: Int, error: String) 32 | 33 | 34 | case class Partition( 35 | id: Int = 0, 36 | replicas: Seq[Int] = null, 37 | isr: Seq[Int] = null, 38 | leader: Int = 0, 39 | expectedLeader: Int = 0) 40 | 41 | case class Topic( 42 | name: String = null, 43 | partitions: Map[Int, Seq[Int]] = Map(), 44 | options: Map[String, String] = Map() 45 | ) { 46 | def partitionsState: String = { 47 | var s: String = "" 48 | for ((partition, brokers) <- partitions.toSeq.sortBy(_._1)) { 49 | if (!s.isEmpty) s += ", " 50 | s += partition + ":[" + brokers.mkString(",") + "]" 51 | } 52 | s 53 | } 54 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/Cluster.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import java.util 21 | import scala.collection.JavaConversions._ 22 | import java.util.Collections 23 | import java.io.{File, FileWriter} 24 | import ly.stealth.mesos.kafka.json.JsonUtil 25 | import ly.stealth.mesos.kafka.scheduler.{Quotas, Rebalancer, Topics, ZKStringSerializer} 26 | import ly.stealth.mesos.kafka.scheduler.mesos.KafkaMesosScheduler 27 | import org.I0Itec.zkclient.ZkClient 28 | import org.I0Itec.zkclient.exception.ZkNodeExistsException 29 | 30 | class Cluster { 31 | val version: String = SchedulerVersion.value 32 | private val brokers: util.List[Broker] = new util.concurrent.CopyOnWriteArrayList[Broker]() 33 | private[kafka] var rebalancer: Rebalancer = new Rebalancer() 34 | private[kafka] var topics: Topics = new Topics() 35 | private[kafka] var quotas: Quotas = new Quotas() 36 | private[kafka] var frameworkId: String = null 37 | 38 | def getBrokers:util.List[Broker] = Collections.unmodifiableList(brokers) 39 | 40 | def getBroker(id: Int): Broker = { 41 | for (broker <- brokers) 42 | if (broker.id == id) return broker 43 | null 44 | } 45 | 46 | def getBrokerByTaskId(taskId: String): Option[Broker] = { 47 | for (broker <- brokers) 48 | if (broker.task != null && broker.task.id == taskId) 49 | return Some(broker) 50 | None 51 | } 52 | 53 | def addBroker(broker: Broker): Broker = { 54 | brokers.add(broker) 55 | broker 56 | } 57 | 58 | def removeBroker(broker: Broker): Unit = brokers.remove(broker) 59 | 60 | def clear(): Unit = brokers.clear() 61 | def save() = Cluster.storage.save(this) 62 | } 63 | 64 | object Cluster { 65 | var storage: Storage = newStorage(Config.storage) 66 | def load() = storage.load() 67 | 68 | def newStorage(s: String): Storage = { 69 | if (s.startsWith("file:")) return new FsStorage(new File(s.substring("file:".length))) 70 | else if (s.startsWith("zk:")) return new ZkStorage(s.substring("zk:".length)) 71 | throw new IllegalStateException("Unsupported storage " + s) 72 | } 73 | 74 | abstract class Storage { 75 | def load(): Cluster = { 76 | val json: String = loadJson 77 | if (json == null) return new Cluster() 78 | 79 | val cluster = JsonUtil.fromJson[Cluster](json) 80 | save(cluster) 81 | 82 | cluster 83 | } 84 | 85 | def save(cluster: Cluster): Unit = { 86 | saveJson(JsonUtil.toJson(cluster)) 87 | } 88 | 89 | protected def loadJson: String 90 | protected def saveJson(json: String): Unit 91 | } 92 | 93 | class FsStorage(val file: File) extends Storage { 94 | protected def loadJson: String = { 95 | if (!file.exists) return null 96 | val source = scala.io.Source.fromFile(file) 97 | try source.mkString finally source.close() 98 | } 99 | 100 | protected def saveJson(json: String): Unit = { 101 | val writer = new FileWriter(file) 102 | try { writer.write(json) } 103 | finally { writer.close() } 104 | } 105 | } 106 | 107 | object FsStorage { 108 | val DEFAULT_FILE: File = new File("kafka-mesos.json") 109 | } 110 | 111 | class ZkStorage(val path: String) extends Storage { 112 | createChrootIfRequired() 113 | val zkClient = new ZkClient(Config.zk, 30000, 30000, ZKStringSerializer) 114 | 115 | private def createChrootIfRequired(): Unit = { 116 | val slashIdx: Int = Config.zk.indexOf('/') 117 | if (slashIdx == -1) return 118 | 119 | val chroot = Config.zk.substring(slashIdx) 120 | val zkConnect = Config.zk.substring(0, slashIdx) 121 | 122 | val client = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer) 123 | try { client.createPersistent(chroot, true) } 124 | finally { client.close() } 125 | } 126 | 127 | protected def loadJson: String = { 128 | zkClient.readData(path, true).asInstanceOf[String] 129 | } 130 | 131 | protected def saveJson(json: String): Unit = { 132 | if (zkClient.exists(path)) { 133 | zkClient.writeData(path, json) 134 | } 135 | else { 136 | try { zkClient.createPersistent(path, json) } 137 | catch { case e: ZkNodeExistsException => zkClient.writeData(path, json) } 138 | } 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/Config.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import java.io.{File, FileInputStream} 21 | import java.util.Properties 22 | import java.net.URI 23 | import ly.stealth.mesos.kafka.Util.BindAddress 24 | import net.elodina.mesos.util.{Period, Version} 25 | 26 | object SchedulerVersion { 27 | val value = "0.10.1.0-SNAPSHOT" 28 | } 29 | 30 | object Config { 31 | val DEFAULT_FILE = new File("kafka-mesos.properties") 32 | 33 | var debug: Boolean = false 34 | var storage: String = "file:kafka-mesos.json" 35 | 36 | var master: String = null 37 | var principal: String = null 38 | var secret: String = null 39 | var user: String = null 40 | 41 | var frameworkName: String = "kafka" 42 | var frameworkRole: String = "*" 43 | var frameworkTimeout: Period = new Period("30d") 44 | 45 | var reconciliationTimeout = new Period("5m") 46 | var reconciliationAttempts = 6 47 | var reconciliationInterval = new Period((reconciliationTimeout.ms() * reconciliationAttempts) + "ms") 48 | 49 | var jre: File = null 50 | var log: File = null 51 | var api: String = null 52 | var bindAddress: BindAddress = null 53 | var zk: String = null 54 | 55 | def apiPort: Int = { 56 | val port = new URI(api).getPort 57 | if (port == -1) 80 else port 58 | } 59 | 60 | def replaceApiPort(port: Int): Unit = { 61 | val prev: URI = new URI(api) 62 | api = "" + new URI( 63 | prev.getScheme, prev.getUserInfo, 64 | prev.getHost, port, 65 | prev.getPath, prev.getQuery, prev.getFragment 66 | ) 67 | } 68 | 69 | private[kafka] def load(file: File): Unit = { 70 | val props: Properties = new Properties() 71 | val stream: FileInputStream = new FileInputStream(file) 72 | 73 | props.load(stream) 74 | stream.close() 75 | 76 | if (props.containsKey("debug")) debug = java.lang.Boolean.valueOf(props.getProperty("debug")) 77 | if (props.containsKey("storage")) storage = props.getProperty("storage") 78 | 79 | if (props.containsKey("master")) master = props.getProperty("master") 80 | if (props.containsKey("user")) user = props.getProperty("user") 81 | if (props.containsKey("principal")) principal = props.getProperty("principal") 82 | if (props.containsKey("secret")) secret = props.getProperty("secret") 83 | 84 | if (props.containsKey("framework-name")) frameworkName = props.getProperty("framework-name") 85 | if (props.containsKey("framework-role")) frameworkRole = props.getProperty("framework-role") 86 | if (props.containsKey("framework-timeout")) frameworkTimeout = new Period(props.getProperty("framework-timeout")) 87 | 88 | if (props.containsKey("reconciliation-timeout")) reconciliationTimeout = new Period(props.getProperty("reconciliation-timeout")) 89 | if (props.containsKey("reconciliation-attempts")) reconciliationAttempts = Integer.valueOf("reconciliation-attempts") 90 | if (props.containsKey("reconciliation-interval")) reconciliationInterval = new Period(props.getProperty("reconciliation-interval")) 91 | 92 | if (props.containsKey("jre")) jre = new File(props.getProperty("jre")) 93 | if (props.containsKey("log")) log = new File(props.getProperty("log")) 94 | if (props.containsKey("api")) api = props.getProperty("api") 95 | if (props.containsKey("bind-address")) bindAddress = new BindAddress(props.getProperty("bind-address")) 96 | if (props.containsKey("zk")) zk = props.getProperty("zk") 97 | } 98 | 99 | override def toString: String = { 100 | s""" 101 | |debug: $debug, storage: $storage 102 | |mesos: master=$master, user=${if (user == null || user.isEmpty) "" else user}, principal=${if (principal != null) principal else ""}, secret=${if (secret != null) "*****" else ""} 103 | |framework: name=$frameworkName, role=$frameworkRole, timeout=$frameworkTimeout 104 | |api: $api, bind-address: ${if (bindAddress != null) bindAddress else ""}, zk: $zk, jre: ${if (jre == null) "" else jre} 105 | """.stripMargin.trim 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/Message.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | case class LogRequest(requestId: Long, lines: Int, name: String) { 20 | override def toString: String = s"log,$requestId,$lines,$name" 21 | } 22 | 23 | object LogRequest { 24 | def parse(message: String): LogRequest = { 25 | val Array(_, rid, numLines, name) = message.split(",") 26 | val requestId = rid.toLong 27 | val lines = numLines.toInt 28 | 29 | LogRequest(requestId, lines, name) 30 | } 31 | } 32 | 33 | case class LogResponse(requestId: Long, content: String) 34 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/Util.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import java.util 21 | import scala.collection.JavaConversions._ 22 | import java.io.{File, IOException} 23 | import java.net.{Inet4Address, InetAddress, NetworkInterface} 24 | import java.util.Date 25 | 26 | trait Clock { 27 | def now(): Date 28 | } 29 | 30 | trait ClockComponent { 31 | val clock: Clock 32 | } 33 | 34 | trait WallClockComponent extends ClockComponent { 35 | val clock: Clock = new WallClock 36 | 37 | class WallClock extends Clock { 38 | def now(): Date = new Date() 39 | } 40 | } 41 | 42 | object RunnableConversions { 43 | implicit def fnToRunnable[T](fn: () => T): Runnable = new Runnable { 44 | override def run(): Unit = fn() 45 | } 46 | } 47 | 48 | object Util { 49 | var terminalWidth: Int = getTerminalWidth 50 | private def getTerminalWidth: Int = { 51 | // No tty, don't bother trying to find the terminal width 52 | if (System.console() == null) { 53 | return 80 54 | } 55 | 56 | val file: File = File.createTempFile("getTerminalWidth", null) 57 | file.delete() 58 | 59 | var width = 80 60 | try { 61 | new ProcessBuilder(List("bash", "-c", "tput cols")) 62 | .inheritIO().redirectOutput(file).start().waitFor() 63 | 64 | val source = scala.io.Source.fromFile(file) 65 | width = try Integer.valueOf(source.mkString.trim) finally source.close() 66 | } catch { 67 | case e: IOException => /* ignore */ 68 | case e: NumberFormatException => /* ignore */ 69 | } 70 | 71 | file.delete() 72 | width 73 | } 74 | 75 | class BindAddress(s: String) { 76 | private var _source: String = null 77 | private var _value: String = null 78 | 79 | def source: String = _source 80 | def value: String = _value 81 | 82 | parse 83 | def parse { 84 | val idx = s.indexOf(":") 85 | if (idx != -1) { 86 | _source = s.substring(0, idx) 87 | _value = s.substring(idx + 1) 88 | } else 89 | _value = s 90 | 91 | if (source != null && source != "if") 92 | throw new IllegalArgumentException(s) 93 | } 94 | 95 | def resolve(): String = { 96 | _source match { 97 | case null => resolveAddress(_value) 98 | case "if" => resolveInterfaceAddress(_value) 99 | case _ => throw new IllegalStateException("Failed to resolve " + s) 100 | } 101 | } 102 | 103 | def resolveAddress(addressOrMask: String): String = { 104 | if (!addressOrMask.endsWith("*")) return addressOrMask 105 | val prefix = addressOrMask.substring(0, addressOrMask.length - 1) 106 | 107 | for (ni <- NetworkInterface.getNetworkInterfaces) { 108 | val address = ni.getInetAddresses.find(_.getHostAddress.startsWith(prefix)).getOrElse(null) 109 | if (address != null) return address.getHostAddress 110 | } 111 | 112 | throw new IllegalStateException("Failed to resolve " + s) 113 | } 114 | 115 | def resolveInterfaceAddress(name: String): String = { 116 | val ni = NetworkInterface.getNetworkInterfaces.find(_.getName == name).getOrElse(null) 117 | if (ni == null) throw new IllegalStateException("Failed to resolve " + s) 118 | 119 | val addresses: util.Enumeration[InetAddress] = ni.getInetAddresses 120 | val address = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(null) 121 | if (address != null) return address.getHostAddress 122 | 123 | throw new IllegalStateException("Failed to resolve " + s) 124 | } 125 | 126 | 127 | override def hashCode(): Int = 31 * _source.hashCode + _value.hashCode 128 | 129 | override def equals(o: scala.Any): Boolean = { 130 | if (!o.isInstanceOf[BindAddress]) return false 131 | val address = o.asInstanceOf[BindAddress] 132 | _source == address._source && _value == address._value 133 | } 134 | 135 | override def toString: String = s 136 | } 137 | 138 | def readLastLines(file: File, n: Int, maxBytes: Int = 102400): String = { 139 | require(n > 0) 140 | 141 | val raf = new java.io.RandomAccessFile(file, "r") 142 | val fileLength: Long = raf.length() 143 | var line = 0 144 | var pos: Long = fileLength - 1 145 | var found = false 146 | var nlPos = pos 147 | 148 | try { 149 | while (pos != -1 && !found) { 150 | raf.seek(pos) 151 | 152 | if (raf.readByte() == 10) { 153 | if (pos != fileLength - 1) { 154 | line += 1 155 | } 156 | nlPos = pos 157 | } 158 | 159 | if (line == n) found = true 160 | if (fileLength - pos > maxBytes) found = true 161 | 162 | if (!found) pos -= 1 163 | } 164 | 165 | if (pos == -1) pos = 0 166 | 167 | if (line == n) { 168 | pos = pos + 1 169 | } else if (fileLength - pos > maxBytes) { 170 | pos = nlPos + 1 171 | } 172 | 173 | raf.seek(pos) 174 | 175 | val buffer = new Array[Byte]((fileLength - pos).toInt) 176 | 177 | raf.read(buffer, 0, buffer.length) 178 | 179 | val str = new String(buffer, "UTF-8") 180 | 181 | str 182 | } finally { 183 | raf.close() 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/cli/SchedulerCli.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.cli 18 | 19 | import java.io.File 20 | import ly.stealth.mesos.kafka.cli.Cli.Error 21 | import ly.stealth.mesos.kafka.Config 22 | import ly.stealth.mesos.kafka.Util.BindAddress 23 | import ly.stealth.mesos.kafka.scheduler.ProductionRegistry 24 | import ly.stealth.mesos.kafka.scheduler.mesos.KafkaMesosScheduler 25 | import net.elodina.mesos.util.Period 26 | 27 | trait SchedulerCli { 28 | this: CliUtils => 29 | 30 | val schedulerCli: CliHandler = new SchedulerCliImpl 31 | 32 | class SchedulerCliImpl extends CliHandler { 33 | def handle(cmd: String, args: Array[String], help: Boolean = false): Unit = { 34 | val parser = newParser() 35 | parser.accepts("debug", "Debug mode. Default - " + Config.debug) 36 | .withRequiredArg().ofType(classOf[java.lang.Boolean]) 37 | 38 | parser.accepts("storage", 39 | """Storage for cluster state. Examples: 40 | | - file:kafka-mesos.json 41 | | - zk:/kafka-mesos 42 | |Default - """.stripMargin + Config.storage) 43 | .withRequiredArg().ofType(classOf[String]) 44 | 45 | 46 | parser.accepts("master", 47 | """Master connection settings. Examples: 48 | | - master:5050 49 | | - master:5050,master2:5050 50 | | - zk://master:2181/mesos 51 | | - zk://username:password@master:2181 52 | | - zk://master:2181,master2:2181/mesos""".stripMargin) 53 | .withRequiredArg().ofType(classOf[String]) 54 | 55 | parser.accepts("user", "Mesos user to run tasks. Default - none") 56 | .withRequiredArg().ofType(classOf[String]) 57 | 58 | parser.accepts("principal", "Principal (username) used to register framework. Default - none") 59 | .withRequiredArg().ofType(classOf[String]) 60 | 61 | parser.accepts("secret", "Secret (password) used to register framework. Default - none") 62 | .withRequiredArg().ofType(classOf[String]) 63 | 64 | 65 | parser.accepts("framework-name", "Framework name. Default - " + Config.frameworkName) 66 | .withRequiredArg().ofType(classOf[String]) 67 | 68 | parser.accepts("framework-role", "Framework role. Default - " + Config.frameworkRole) 69 | .withRequiredArg().ofType(classOf[String]) 70 | 71 | parser.accepts("framework-timeout", "Framework timeout (30s, 1m, 1h). Default - " + Config.frameworkTimeout) 72 | .withRequiredArg().ofType(classOf[String]) 73 | 74 | 75 | parser.accepts("reconciliation-timeout", "Reconciliation timeout (5m, 1h). Default - " + Config.reconciliationTimeout) 76 | .withRequiredArg().ofType(classOf[String]) 77 | 78 | parser.accepts("reconciliation-attempts", "Number of reconciliation attempts before giving up. Default - " + Config.reconciliationAttempts) 79 | .withRequiredArg().ofType(classOf[Integer]) 80 | 81 | 82 | parser.accepts("api", "Api url. Example: http://master:7000") 83 | .withRequiredArg().ofType(classOf[String]) 84 | 85 | parser.accepts("bind-address", "Scheduler bind address (master, 0.0.0.0, 192.168.50.*, if:eth1). Default - all") 86 | .withRequiredArg().ofType(classOf[String]) 87 | 88 | parser.accepts("zk", 89 | """Kafka zookeeper.connect. Examples: 90 | | - master:2181 91 | | - master:2181,master2:2181""".stripMargin) 92 | .withRequiredArg().ofType(classOf[String]) 93 | 94 | parser.accepts("jre", "JRE zip-file (jre-7-openjdk.zip). Default - none.") 95 | .withRequiredArg().ofType(classOf[String]) 96 | 97 | parser.accepts("log", "Log file to use. Default - stdout.") 98 | .withRequiredArg().ofType(classOf[String]) 99 | 100 | 101 | val configArg = parser.nonOptions() 102 | 103 | if (help) { 104 | printLine("Start scheduler \nUsage: scheduler [options] [config.properties]\n") 105 | parser.printHelpOn(out) 106 | return 107 | } 108 | 109 | val options = parseOptions(parser, args) 110 | 111 | var configFile = if (options.valueOf(configArg) != null) new File(options.valueOf(configArg)) else null 112 | if (configFile != null && !configFile.exists()) throw new Error(s"config-file $configFile not found") 113 | 114 | if (configFile == null && Config.DEFAULT_FILE.exists()) configFile = Config.DEFAULT_FILE 115 | 116 | if (configFile != null) { 117 | printLine("Loading config defaults from " + configFile) 118 | Config.load(configFile) 119 | } 120 | 121 | val debug = options.valueOf("debug").asInstanceOf[java.lang.Boolean] 122 | if (debug != null) Config.debug = debug 123 | 124 | val storage = options.valueOf("storage").asInstanceOf[String] 125 | if (storage != null) Config.storage = storage 126 | 127 | val provideOption = "Provide either cli option or config default value" 128 | 129 | val master = options.valueOf("master").asInstanceOf[String] 130 | if (master != null) Config.master = master 131 | else if (Config.master == null) throw new Error(s"Undefined master. $provideOption") 132 | 133 | val user = options.valueOf("user").asInstanceOf[String] 134 | if (user != null) Config.user = user 135 | 136 | val principal = options.valueOf("principal").asInstanceOf[String] 137 | if (principal != null) Config.principal = principal 138 | 139 | val secret = options.valueOf("secret").asInstanceOf[String] 140 | if (secret != null) Config.secret = secret 141 | 142 | 143 | val frameworkName = options.valueOf("framework-name").asInstanceOf[String] 144 | if (frameworkName != null) Config.frameworkName = frameworkName 145 | 146 | val frameworkRole = options.valueOf("framework-role").asInstanceOf[String] 147 | if (frameworkRole != null) Config.frameworkRole = frameworkRole 148 | 149 | val frameworkTimeout = options.valueOf("framework-timeout").asInstanceOf[String] 150 | if (frameworkTimeout != null) 151 | try { Config.frameworkTimeout = new Period(frameworkTimeout) } 152 | catch { case e: IllegalArgumentException => throw new Error("Invalid framework-timeout") } 153 | 154 | val reconciliationTimeout = options.valueOf("reconciliation-timeout").asInstanceOf[String] 155 | if (reconciliationTimeout != null) 156 | try { Config.reconciliationTimeout = new Period(reconciliationTimeout) } 157 | catch { case e: IllegalArgumentException => throw new Error("Invalid reconciliation-timeout") } 158 | 159 | val reconciliationAttempts = options.valueOf("reconciliation-attempts").asInstanceOf[Integer] 160 | if (reconciliationAttempts != null) Config.reconciliationAttempts = reconciliationAttempts 161 | 162 | val api = options.valueOf("api").asInstanceOf[String] 163 | if (api != null) Config.api = api 164 | else if (Config.api == null) throw new Error(s"Undefined api. $provideOption") 165 | 166 | val bindAddress = options.valueOf("bind-address").asInstanceOf[String] 167 | if (bindAddress != null) 168 | try { Config.bindAddress = new BindAddress(bindAddress) } 169 | catch { case e: IllegalArgumentException => throw new Error("Invalid bind-address") } 170 | 171 | val zk = options.valueOf("zk").asInstanceOf[String] 172 | if (zk != null) Config.zk = zk 173 | else if (Config.zk == null) throw new Error(s"Undefined zk. $provideOption") 174 | 175 | val jre = options.valueOf("jre").asInstanceOf[String] 176 | if (jre != null) Config.jre = new File(jre) 177 | if (Config.jre != null && !Config.jre.exists()) throw new Error("JRE file doesn't exists") 178 | 179 | val log = options.valueOf("log").asInstanceOf[String] 180 | if (log != null) Config.log = new File(log) 181 | if (Config.log != null) printLine(s"Logging to ${Config.log}") 182 | 183 | val registry = new ProductionRegistry() 184 | KafkaMesosScheduler.start(registry) 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/executor/Executor.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka.executor 19 | 20 | import com.google.protobuf.ByteString 21 | import java.io._ 22 | import ly.stealth.mesos.kafka.json.JsonUtil 23 | import ly.stealth.mesos.kafka.{Broker, FrameworkMessage, LogRequest, LogResponse, Util} 24 | import net.elodina.mesos.util.Repr 25 | import org.apache.log4j._ 26 | import org.apache.log4j.net.SyslogAppender 27 | import org.apache.mesos.Protos._ 28 | import org.apache.mesos.{ExecutorDriver, MesosExecutorDriver} 29 | 30 | object Executor extends org.apache.mesos.Executor { 31 | val logger: Logger = Logger.getLogger("Executor") 32 | var server: BrokerServer = new KafkaServer() 33 | 34 | def registered(driver: ExecutorDriver, executor: ExecutorInfo, framework: FrameworkInfo, slave: SlaveInfo): Unit = { 35 | logger.info("[registered] framework:" + Repr.framework(framework) + " slave:" + Repr.slave(slave)) 36 | } 37 | 38 | def reregistered(driver: ExecutorDriver, slave: SlaveInfo): Unit = { 39 | logger.info("[reregistered] " + Repr.slave(slave)) 40 | } 41 | 42 | def disconnected(driver: ExecutorDriver): Unit = { 43 | logger.info("[disconnected]") 44 | } 45 | 46 | def launchTask(driver: ExecutorDriver, task: TaskInfo): Unit = { 47 | logger.info("[launchTask] " + Repr.task(task)) 48 | startBroker(driver, task) 49 | } 50 | 51 | def killTask(driver: ExecutorDriver, id: TaskID): Unit = { 52 | logger.info("[killTask] " + id.getValue) 53 | killExecutor(driver, id) 54 | } 55 | 56 | def frameworkMessage(driver: ExecutorDriver, data: Array[Byte]): Unit = { 57 | logger.info("[frameworkMessage] " + new String(data)) 58 | handleMessage(driver, new String(data)) 59 | } 60 | 61 | def shutdown(driver: ExecutorDriver): Unit = { 62 | logger.info("[shutdown]") 63 | stopExecutor(driver) 64 | } 65 | 66 | def error(driver: ExecutorDriver, message: String): Unit = { 67 | logger.info("[error] " + message) 68 | } 69 | 70 | private[kafka] def startBroker(driver: ExecutorDriver, task: TaskInfo): Unit = { 71 | val brokerThread = new Thread { 72 | override def run(): Unit = { 73 | try { 74 | if (task.getData == null) { 75 | throw new IllegalArgumentException("No task data received") 76 | } 77 | val config = JsonUtil.fromJson[LaunchConfig](task.getData.toByteArray) 78 | 79 | def send(metrics: Broker.Metrics): Unit = { 80 | try { 81 | driver.sendFrameworkMessage( 82 | JsonUtil.toJsonBytes(FrameworkMessage(metrics = Some(metrics)))) 83 | } 84 | catch { 85 | case t: Exception => 86 | logger.error("Error posting metrics", t) 87 | } 88 | } 89 | 90 | val startingStatus = TaskStatus.newBuilder 91 | .setTaskId(task.getTaskId).setState(TaskState.TASK_STARTING) 92 | driver.sendStatusUpdate(startingStatus.build()) 93 | 94 | val endpoint = server.start(config, send) 95 | 96 | val runningStatus = TaskStatus.newBuilder 97 | .setTaskId(task.getTaskId).setState(TaskState.TASK_RUNNING) 98 | .setData(ByteString.copyFromUtf8("" + endpoint)) 99 | driver.sendStatusUpdate(runningStatus.build()) 100 | 101 | server.waitFor() 102 | val finishedStatus = TaskStatus.newBuilder 103 | .setTaskId(task.getTaskId) 104 | .setState(TaskState.TASK_FINISHED) 105 | driver.sendStatusUpdate(finishedStatus.build()) 106 | } catch { 107 | case t: Throwable => 108 | logger.warn("", t) 109 | sendTaskFailed(driver, task, t) 110 | } finally { 111 | stopExecutor(driver) 112 | } 113 | } 114 | } 115 | brokerThread.setContextClassLoader(server.getClassLoader) 116 | brokerThread.setName("BrokerServer") 117 | brokerThread.start() 118 | } 119 | 120 | private def killExecutor(driver: ExecutorDriver, taskId: TaskID): Unit = { 121 | val killThread = new Thread() { 122 | override def run(): Unit = { 123 | logger.info("Sending task killing") 124 | driver.sendStatusUpdate(TaskStatus.newBuilder 125 | .setTaskId(taskId) 126 | .setState(TaskState.TASK_KILLING) 127 | .setMessage("Asked to shut down") 128 | .clearReason() 129 | .build) 130 | 131 | if (server.isStarted) 132 | server.stop() 133 | 134 | driver.sendStatusUpdate(TaskStatus.newBuilder 135 | .setTaskId(taskId) 136 | .setState(TaskState.TASK_KILLED) 137 | .setMessage("Asked to shut down") 138 | .clearReason() 139 | .build) 140 | 141 | stopExecutor(driver) 142 | } 143 | } 144 | killThread.setName("ExecutorStopper") 145 | killThread.start() 146 | } 147 | 148 | private[kafka] def stopExecutor(driver: ExecutorDriver): Unit = { 149 | if (server.isStarted) 150 | server.stop() 151 | driver.stop() 152 | } 153 | 154 | private[kafka] def handleMessage(driver: ExecutorDriver, message: String): Unit = { 155 | if (message == "stop") driver.stop() 156 | if (message.startsWith("log,")) handleLogRequest(driver, message) 157 | } 158 | 159 | private[kafka] def handleLogRequest(driver: ExecutorDriver, message: String) { 160 | val logRequest = LogRequest.parse(message) 161 | val name = logRequest.name 162 | 163 | val cwd = new File(".") 164 | val path = if (name == "stdout" || name == "stderr") { 165 | cwd.toPath.resolve(name).toFile 166 | } else if (name.endsWith(".log")) { 167 | new File(BrokerServer.distro.dir, "log/" + name) 168 | } else { 169 | null 170 | } 171 | 172 | val content = if (path == null) { 173 | s"$name doesn't ends with .log" 174 | } else if (path.exists()) { 175 | if (path.canRead) Util.readLastLines(path, logRequest.lines) 176 | else s"$name not readable" 177 | } else { 178 | s"$name doesn't exist" 179 | } 180 | 181 | val response = LogResponse(logRequest.requestId, content) 182 | driver.sendFrameworkMessage(JsonUtil.toJsonBytes(FrameworkMessage(log = Some(response)))) 183 | } 184 | 185 | private def sendTaskFailed(driver: ExecutorDriver, task: TaskInfo, t: Throwable) { 186 | val stackTrace = new StringWriter() 187 | t.printStackTrace(new PrintWriter(stackTrace, true)) 188 | 189 | driver.sendStatusUpdate(TaskStatus.newBuilder 190 | .setTaskId(task.getTaskId).setState(TaskState.TASK_FAILED) 191 | .setMessage("" + stackTrace) 192 | .build 193 | ) 194 | } 195 | 196 | def main(args: Array[String]) { 197 | configureLogging() 198 | 199 | val driver = new MesosExecutorDriver(Executor) 200 | val status = if (driver.run eq Status.DRIVER_STOPPED) 0 else 1 201 | 202 | System.exit(status) 203 | } 204 | 205 | private def configureLogging() { 206 | System.setProperty("log4j.ignoreTCL", "true") // fix log4j class loading issue 207 | BasicConfigurator.resetConfiguration() 208 | 209 | val root = Logger.getRootLogger 210 | root.setLevel(Level.INFO) 211 | 212 | val logger = Logger.getLogger("Executor") 213 | logger.setLevel(if (System.getProperty("debug") != null) Level.DEBUG else Level.INFO) 214 | 215 | val pattern = "%d [%t] %-5p %c %x - %m%n" 216 | 217 | if (System.getenv("MESOS_SYSLOG") != null) { 218 | val frameworkName = System.getenv("MESOS_SYSLOG_TAG") 219 | val appender = new SyslogAppender(new PatternLayout(frameworkName + ": " + pattern.substring(3)), "localhost", SyslogAppender.LOG_USER) 220 | 221 | appender.setHeader(true) 222 | root.addAppender(appender) 223 | } 224 | 225 | root.addAppender(new ConsoleAppender(new PatternLayout(pattern))) 226 | } 227 | } 228 | 229 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/json/JsonUtil.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.json 18 | 19 | import com.fasterxml.jackson.annotation.JsonInclude 20 | import java.text.SimpleDateFormat 21 | import java.util.TimeZone 22 | import com.fasterxml.jackson.databind._ 23 | import com.fasterxml.jackson.module.scala.DefaultScalaModule 24 | import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper 25 | 26 | object JsonUtil { 27 | val mapper = new ObjectMapper() with ScalaObjectMapper 28 | mapper.registerModule(DefaultScalaModule) 29 | mapper.registerModule(KafkaObjectModel) 30 | mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) 31 | mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL) 32 | 33 | private[this] def dateFormat = { 34 | val format: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS") 35 | format.setTimeZone(TimeZone.getTimeZone("UTC-0")) 36 | format 37 | } 38 | mapper.setDateFormat(dateFormat) 39 | 40 | def toJson(value: Any): String = mapper.writeValueAsString(value) 41 | def toJsonBytes(value: Any): Array[Byte] = mapper.writeValueAsBytes(value) 42 | def fromJson[T](str: String)(implicit m: Manifest[T]): T = mapper.readValue[T](str) 43 | def fromJson[T](bytes: Array[Byte])(implicit m: Manifest[T]) = mapper.readValue[T](bytes) 44 | } 45 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/BrokerLogManager.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeoutException} 20 | 21 | import scala.collection.concurrent.TrieMap 22 | import scala.concurrent.ExecutionContext.Implicits.global 23 | import scala.concurrent.duration.Duration 24 | import scala.concurrent.{Future, Promise} 25 | import scala.util.Success 26 | 27 | trait BrokerLogManagerComponent { 28 | val brokerLogManager: BrokerLogManager 29 | 30 | trait BrokerLogManager { 31 | def initLogRequest(timeout: Duration): (Long, Future[String]) 32 | def putLog(requestId: Long, content: String): Unit 33 | } 34 | } 35 | 36 | trait BrokerLogManagerComponentImpl extends BrokerLogManagerComponent { 37 | val brokerLogManager = new BrokerLogManagerImpl 38 | 39 | class BrokerLogManagerImpl extends BrokerLogManager { 40 | private[this] val pendingLogs = TrieMap[Long, Promise[String]]() 41 | private[this] val scheduler: ScheduledExecutorService = Executors.newScheduledThreadPool(1); 42 | 43 | def putLog(requestId: Long, content: String): Unit = 44 | pendingLogs.get(requestId).foreach { log => log.complete(Success(content)) } 45 | 46 | private def scheduleTimeout(promise: Promise[String], timeout: Duration): ScheduledFuture[_] = { 47 | scheduler.schedule(new Runnable { 48 | override def run() = promise.tryFailure(new TimeoutException()) 49 | }, timeout.length, timeout.unit) 50 | } 51 | 52 | def initLogRequest(timeout: Duration): (Long, Future[String]) = { 53 | val requestId = System.currentTimeMillis() 54 | val promise = Promise[String]() 55 | 56 | pendingLogs.put(requestId, promise) 57 | 58 | val timer = scheduleTimeout(promise, timeout) 59 | promise.future.onComplete { _ => 60 | timer.cancel(false) // false: probably too late to interrupt the failing promise anyway 61 | pendingLogs.remove(requestId) 62 | } 63 | 64 | (requestId, promise.future) 65 | } 66 | } 67 | } 68 | 69 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/Expr.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import ly.stealth.mesos.kafka.{Broker, Cluster} 20 | import net.elodina.mesos.util.Strings 21 | import scala.collection.JavaConversions._ 22 | 23 | object Expr { 24 | object any { 25 | def unapply(arg: String): Option[String] = if (arg.equals("*")) Some(arg) else None 26 | } 27 | object wildcard { 28 | def unapply(arg: String): Option[String] = 29 | if (arg.endsWith("*")) 30 | Some(arg.substring(0, arg.length - 1)) 31 | else None 32 | } 33 | object range { 34 | def unapply(part: String): Option[(Int, Int)] = 35 | part.indexOf("..") match { 36 | case v if v == -1 => None 37 | case v if v == 0 => throw new IllegalArgumentException("Invalid expr " + part) 38 | case idx => Some( 39 | try { 40 | (part.substring(0, idx).toInt, part.substring(idx + 2, part.length).toInt) 41 | } catch { 42 | case _: NumberFormatException => throw new IllegalArgumentException("Invalid expr " + part) 43 | }) 44 | } 45 | } 46 | object parsable { 47 | def unapply(arg: String): Option[Int] = try { 48 | Some(arg.toInt) 49 | } catch { 50 | case _: NumberFormatException => None 51 | } 52 | } 53 | 54 | private def parseAttrs(expr: String): (String, Option[Map[String, Option[String]]]) = { 55 | if (expr.endsWith("]")) { 56 | val idx = expr.lastIndexOf("[") 57 | if (idx == -1) 58 | throw new IllegalArgumentException("Invalid expr " + expr) 59 | 60 | (expr.substring(0, idx), 61 | Some(Strings.parseMap(expr.substring(idx + 1, expr.length - 1), true) 62 | .toMap.mapValues(Option.apply))) 63 | } else { 64 | (expr, None) 65 | } 66 | } 67 | 68 | def expandBrokers(cluster: Cluster, _expr: String, sortByAttrs: Boolean = false): Seq[Int] = { 69 | val (expr, attributes) = parseAttrs(_expr) 70 | val ids = expr.split(",").map(_.trim).flatMap { 71 | case wildcard(i) if i.isEmpty => cluster.getBrokers.map(_.id) 72 | case range((start, end)) => start until end + 1 73 | case parsable(i) => Seq(i) 74 | case _ => throw new IllegalArgumentException("Invalid expr " + expr) 75 | }.distinct 76 | 77 | attributes 78 | .map(attrs => filterAndSortBrokersByAttrs(cluster, ids, attrs, sortByAttrs)) 79 | .getOrElse(ids.sorted) 80 | } 81 | 82 | private def brokerAttr(broker: Broker, name: String): Option[String] = { 83 | Option(broker) 84 | .flatMap(b => Option(b.task)) 85 | .flatMap { t => 86 | name match { 87 | case "hostname" => Some(t.hostname) 88 | case attr => t.attributes.get(attr) 89 | } 90 | } 91 | } 92 | 93 | private def filterAndSortBrokersByAttrs( 94 | cluster: Cluster, 95 | ids: Iterable[Int], 96 | attributes: Map[String, Option[String]], 97 | sortByAttrs: Boolean 98 | ): Seq[Int] = { 99 | val filtered = ids.map(id => cluster.getBroker(id)).filter(_ != null).filter { broker => 100 | attributes.forall { case (key, expected) => 101 | (brokerAttr(broker, key), expected) match { 102 | case (Some(a), Some(e)) if e.endsWith("*") => a.startsWith(e.substring(0, e.length - 1)) 103 | case (Some(a), Some(e)) => a == e 104 | case (Some(_), None) => true 105 | case _ => false 106 | } 107 | } 108 | } 109 | if (sortByAttrs) 110 | sortBrokers(cluster, attributes.keys, filtered) 111 | else 112 | filtered.map(_.id).toSeq 113 | } 114 | 115 | // Group passed in brokers by attribute, and then round robin across the groups. 116 | // All brokers in the broker list must have all attributes in the passed attribute list. 117 | private def sortBrokers( 118 | cluster: Cluster, 119 | attributes: Iterable[String], 120 | brokers: Iterable[Broker] 121 | ): Seq[Int] = { 122 | brokers 123 | .groupBy(b => attributes 124 | .map(a => a -> brokerAttr(b, a).get)) // Group by unique attribute (k,v) tuples 125 | .toSeq 126 | .sortBy(_._1) // Sort them (this will be by the key and value combo) 127 | .flatMap(_._2.zipWithIndex) // Add an index to each element, and merge all lists together 128 | .sortBy(_._2) // Sort by the index added from above 129 | .map(_._1.id) // Pick the id off of the broker 130 | } 131 | 132 | def expandTopics(expr: String): Seq[String] = { 133 | val allTopics = ZkUtilsWrapper().getAllTopics() 134 | 135 | expr.split(",").map(_.trim).filter(!_.isEmpty).flatMap { 136 | case wildcard(part) => allTopics.filter(topic => topic.startsWith(part)) 137 | case p => Seq(p) 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/Extractors.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import ly.stealth.mesos.kafka.Broker 20 | import org.apache.mesos.Protos.{TaskState, TaskStatus} 21 | 22 | 23 | abstract class TaskStatusExtractor(states: Set[TaskState]) { 24 | def this(state: TaskState) = this(Set(state)) 25 | 26 | def unapply(status: TaskStatus) = 27 | if (states.contains(status.getState)) Some(status) else None 28 | } 29 | 30 | object TaskStaging extends TaskStatusExtractor(TaskState.TASK_STAGING) {} 31 | object TaskStarting extends TaskStatusExtractor(TaskState.TASK_STARTING) {} 32 | object TaskRunning extends TaskStatusExtractor(TaskState.TASK_RUNNING) {} 33 | object TaskKilling extends TaskStatusExtractor(TaskState.TASK_KILLING) {} 34 | object TaskLost extends TaskStatusExtractor(TaskState.TASK_LOST) {} 35 | object TaskExited extends TaskStatusExtractor(Set( 36 | TaskState.TASK_ERROR, TaskState.TASK_FAILED, TaskState.TASK_FINISHED, 37 | TaskState.TASK_KILLED, TaskState.TASK_LOST 38 | )) {} 39 | 40 | object Reconciling { 41 | def unapply(tup: (Option[Broker], TaskStatus)) = tup match { 42 | case (ReconcilingBroker(broker), status) 43 | if status.getReason == TaskStatus.Reason.REASON_RECONCILIATION => 44 | Some((broker, status)) 45 | case _ => None 46 | } 47 | } 48 | 49 | 50 | abstract class BrokerTaskExtractor(test: (Broker.Task => Boolean)) { 51 | def unapply(maybeBroker: Option[Broker]) = maybeBroker match { 52 | case b@Some(broker) if broker.task != null && test(broker.task) => b 53 | case _ => None 54 | } 55 | } 56 | 57 | object PendingBroker extends BrokerTaskExtractor(_.pending) {} 58 | object StartingBroker extends BrokerTaskExtractor(_.starting) {} 59 | object RunningBroker extends BrokerTaskExtractor(_.running) {} 60 | object StoppingBroker extends BrokerTaskExtractor(_.stopping) {} 61 | object ReconcilingBroker extends BrokerTaskExtractor(_.reconciling) {} 62 | object StoppedBroker { 63 | def unapply(maybeBroker: Option[Broker]) = maybeBroker match { 64 | case b@Some(broker) if broker.task == null => b 65 | case _ => None 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/KafkaDistribution.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import java.io.File 20 | import net.elodina.mesos.util.Version 21 | 22 | case class KafkaDistributionInfo(jar: File, kafkaVersion: Version, kafkaDist: File) 23 | 24 | trait KafkaDistributionComponent { 25 | val kafkaDistribution: KafkaDistribution 26 | 27 | trait KafkaDistribution { 28 | val distInfo: KafkaDistributionInfo 29 | } 30 | } 31 | 32 | trait KafkaDistributionComponentImpl extends KafkaDistributionComponent { 33 | val kafkaDistribution: KafkaDistribution = new KafkaDistributionImpl 34 | 35 | class KafkaDistributionImpl extends KafkaDistribution{ 36 | lazy val distInfo: KafkaDistributionInfo = { 37 | var jar: File = null 38 | var kafkaDist: File = null 39 | var kafkaVersion: Version = null 40 | 41 | val jarMask: String = "kafka-mesos.*\\.jar" 42 | val kafkaMask: String = "kafka.*\\.tgz" 43 | 44 | for (file <- new File(".").listFiles()) { 45 | if (file.getName.matches(jarMask)) jar = file 46 | if (file.getName.matches(kafkaMask)) kafkaDist = file 47 | } 48 | 49 | if (jar == null) throw new IllegalStateException(jarMask + " not found in current dir") 50 | if (kafkaDist == null) throw new IllegalStateException(kafkaMask + " not found in in current dir") 51 | 52 | // extract version: "kafka-dist-1.2.3.tgz" => "1.2.3" 53 | val distName: String = kafkaDist.getName 54 | val tgzIdx = distName.lastIndexOf(".tgz") 55 | val hIdx = distName.lastIndexOf("-") 56 | if (tgzIdx == -1 || hIdx == -1) throw new IllegalStateException("Can't extract version number from " + distName) 57 | kafkaVersion = new Version(distName.substring(hIdx + 1, tgzIdx)) 58 | 59 | KafkaDistributionInfo(jar, kafkaVersion, kafkaDist) 60 | } 61 | } 62 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/Quotas.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import java.util.Properties 20 | 21 | case class Quota(producerByteRate: Option[Integer], consumerByteRate: Option[Integer]) 22 | 23 | object Quotas { 24 | val PRODUCER_BYTE_RATE = "producer_byte_rate" 25 | val CONSUMER_BYTE_RATE = "consumer_byte_rate" 26 | } 27 | 28 | class Quotas { 29 | 30 | private[this] def propToInt(properties: Properties, key: String): Option[Integer] = { 31 | val propVal = properties.get(key) 32 | if (propVal == null) { 33 | None 34 | } else { 35 | Some(propVal.asInstanceOf[String].toInt) 36 | } 37 | } 38 | 39 | def getClientQuotas(): Map[String, Quota] = { 40 | val quotas = AdminUtilsWrapper().fetchAllEntityConfigs("clients") 41 | quotas.mapValues(p => Quota(propToInt(p, Quotas.PRODUCER_BYTE_RATE), propToInt(p, Quotas.CONSUMER_BYTE_RATE))).toMap 42 | } 43 | 44 | def getClientConfig(clientId: String) = 45 | AdminUtilsWrapper().fetchEntityConfig("clients", clientId) 46 | 47 | def setClientConfig(clientId: String, configs: Properties) = 48 | AdminUtilsWrapper().changeClientIdConfig(clientId, configs) 49 | } 50 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/Registry.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import ly.stealth.mesos.kafka.scheduler.http.api._ 20 | import ly.stealth.mesos.kafka.scheduler.http.{HttpServerComponent, HttpServerComponentImpl} 21 | import ly.stealth.mesos.kafka.scheduler.mesos._ 22 | import ly.stealth.mesos.kafka.{ClockComponent, Cluster, WallClockComponent} 23 | 24 | trait HttpApiComponent 25 | extends BrokerApiComponent 26 | with TopicApiComponent 27 | with PartitionApiComponent 28 | with QuotaApiComponent 29 | 30 | trait Registry 31 | extends ClusterComponent 32 | with TaskReconcilerComponent 33 | with SchedulerComponent 34 | with OfferManagerComponent 35 | with HttpServerComponent 36 | with KafkaDistributionComponent 37 | with MesosTaskFactoryComponent 38 | with BrokerLogManagerComponent 39 | with SchedulerDriverComponent 40 | with BrokerLifecycleManagerComponent 41 | with ClockComponent 42 | with BrokerTaskManagerComponent 43 | with HttpApiComponent 44 | with EventLoopComponent 45 | { 46 | 47 | } 48 | 49 | class ProductionRegistry 50 | extends Registry 51 | with ClusterComponent 52 | with OfferManagerComponentImpl 53 | with TaskReconcilerComponentImpl 54 | with HttpServerComponentImpl 55 | with SchedulerComponentImpl 56 | with KafkaDistributionComponentImpl 57 | with MesosTaskFactoryComponentImpl 58 | with BrokerLogManagerComponentImpl 59 | with BrokerLifecycleManagerComponentImpl 60 | with WallClockComponent 61 | with BrokerTaskManagerComponentImpl 62 | with BrokerApiComponentImpl 63 | with TopicApiComponentImpl 64 | with PartitionApiComponentImpl 65 | with QuotaApiComponentImpl 66 | with DefaultEventLoopComponent 67 | { 68 | val cluster = Cluster.load() 69 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/Topics.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka.scheduler 19 | 20 | import java.util 21 | import java.util.Properties 22 | import kafka.api.LeaderAndIsr 23 | import kafka.common.TopicAndPartition 24 | import kafka.controller.LeaderIsrAndControllerEpoch 25 | import kafka.log.LogConfig 26 | import ly.stealth.mesos.kafka.{Partition, Topic} 27 | import org.apache.kafka.common.KafkaException 28 | import scala.collection.JavaConversions._ 29 | import scala.collection.{Map, Seq, mutable} 30 | 31 | class Topics { 32 | def getTopic(name: String): Topic = { 33 | if (name == null) return null 34 | val topics: util.List[Topic] = getTopics.filter(_.name == name) 35 | if (topics.nonEmpty) topics(0) else null 36 | } 37 | 38 | def getTopics: Seq[Topic] = { 39 | val names = ZkUtilsWrapper().getAllTopics() 40 | 41 | val assignments: mutable.Map[String, Map[Int, Seq[Int]]] = ZkUtilsWrapper().getPartitionAssignmentForTopics(names) 42 | val configs = AdminUtilsWrapper().fetchAllTopicConfigs() 43 | 44 | names.sorted.map(t => Topic( 45 | t, 46 | assignments.getOrElse(t, null), 47 | propertiesAsScalaMap(configs.getOrElse(t, null))) 48 | ) 49 | } 50 | 51 | private val NoLeader = LeaderIsrAndControllerEpoch(LeaderAndIsr(LeaderAndIsr.NoLeader, -1, List(), -1), -1) 52 | 53 | def getPartitions(topics: Seq[String]): Map[String, Set[Partition]] = { 54 | // returns topic name -> (partition -> brokers) 55 | val assignments = ZkUtilsWrapper().getPartitionAssignmentForTopics(topics) 56 | val topicAndPartitions = assignments.flatMap { 57 | case (topic, partitions) => partitions.map { 58 | case (partition, _) => TopicAndPartition(topic, partition) 59 | } 60 | }.toSet 61 | val leaderAndisr = ZkUtilsWrapper().getPartitionLeaderAndIsrForTopics(topicAndPartitions) 62 | 63 | topicAndPartitions.map(tap => { 64 | val replicas = assignments(tap.topic).getOrElse(tap.partition, Seq()) 65 | val partitionLeader = leaderAndisr.getOrElse(tap, NoLeader) 66 | tap.topic -> Partition( 67 | tap.partition, 68 | replicas, 69 | partitionLeader.leaderAndIsr.isr, 70 | partitionLeader.leaderAndIsr.leader, 71 | replicas.headOption.getOrElse(-1) 72 | ) 73 | }).groupBy(_._1).mapValues(v => v.map(_._2)) 74 | } 75 | 76 | def fairAssignment(partitions: Int = 1, replicas: Int = 1, brokers: Seq[Int] = null, fixedStartIndex: Int = -1, startPartitionId: Int = -1): Map[Int, Seq[Int]] = { 77 | var brokers_ = brokers 78 | 79 | if (brokers_ == null) { 80 | brokers_ = ZkUtilsWrapper().getSortedBrokerList() 81 | } 82 | 83 | AdminUtilsWrapper().assignReplicasToBrokers(brokers_, partitions, replicas, fixedStartIndex, startPartitionId) 84 | } 85 | 86 | def deleteTopic(name: String): Topic = { 87 | val topicToDelete = getTopic(name) 88 | if(topicToDelete != null) AdminUtilsWrapper().deleteTopic(topicToDelete.name) 89 | topicToDelete 90 | } 91 | 92 | def addTopic(name: String, assignment: Map[Int, Seq[Int]] = null, options: Map[String, String] = null): Topic = { 93 | val config = new Properties() 94 | if (options != null) 95 | for ((k, v) <- options) config.setProperty(k, v) 96 | 97 | AdminUtilsWrapper().createOrUpdateTopicPartitionAssignmentPathInZK( 98 | name, 99 | Option(assignment).getOrElse(fairAssignment(1, 1, null)), 100 | config) 101 | getTopic(name) 102 | } 103 | 104 | def updateTopic(topic: Topic, options: util.Map[String, String]): Topic = { 105 | val config: Properties = new Properties() 106 | for ((k, v) <- options) config.setProperty(k, v) 107 | 108 | AdminUtilsWrapper().changeTopicConfig(topic.name, config) 109 | topic 110 | } 111 | 112 | def validateOptions(options: Map[String, String]): String = { 113 | val config: Properties = new Properties() 114 | for ((k, v) <- options) config.setProperty(k, v) 115 | 116 | try { LogConfig.validate(config) } 117 | catch { 118 | case e: IllegalArgumentException => return e.getMessage 119 | case e: KafkaException => return e.getMessage 120 | } 121 | null 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/ZkUtilsWrapper.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler 18 | 19 | import ly.stealth.mesos.kafka.Config 20 | import ly.stealth.mesos.kafka.interface.{AdminUtilsProxy, ZkUtilsProxy} 21 | import org.I0Itec.zkclient.exception.ZkMarshallingError 22 | import org.I0Itec.zkclient.serialize.ZkSerializer 23 | 24 | object ZKStringSerializer extends ZkSerializer { 25 | 26 | @throws(classOf[ZkMarshallingError]) 27 | def serialize(data : Object) : Array[Byte] = data.asInstanceOf[String].getBytes("UTF-8") 28 | 29 | @throws(classOf[ZkMarshallingError]) 30 | def deserialize(bytes : Array[Byte]) : Object = { 31 | if (bytes == null) 32 | null 33 | else 34 | new String(bytes, "UTF-8") 35 | } 36 | } 37 | 38 | abstract class LazyWrapper[I](fact: (String) => I) { 39 | private def getInstance() = { 40 | fact(Config.zk) 41 | } 42 | 43 | private var instance: Option[I] = None 44 | 45 | def apply(): I = instance match { 46 | case Some(i) => i 47 | case None => 48 | instance = Some(getInstance()) 49 | instance.get 50 | } 51 | def reset() = instance = None 52 | } 53 | 54 | 55 | object ZkUtilsWrapper extends LazyWrapper(ZkUtilsProxy.apply) {} 56 | 57 | object AdminUtilsWrapper extends LazyWrapper(AdminUtilsProxy.apply) { } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/BothParam.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http; 18 | 19 | import java.lang.annotation.ElementType; 20 | import java.lang.annotation.Retention; 21 | import java.lang.annotation.RetentionPolicy; 22 | import java.lang.annotation.Target; 23 | 24 | @Target({ElementType.PARAMETER, ElementType.METHOD, ElementType.FIELD}) 25 | @Retention(RetentionPolicy.RUNTIME) 26 | public @interface BothParam { 27 | 28 | /** 29 | * Defines the name of the HTTP query parameter whose value will be used 30 | * to initialize the value of the annotated method argument, class field or 31 | * bean property. The name is specified in decoded form, any percent encoded 32 | * literals within the value will not be decoded and will instead be 33 | * treated as literal text. E.g. if the parameter name is "a b" then the 34 | * value of the annotation is "a b", not "a+b" or "a%20b". 35 | */ 36 | String value(); 37 | } 38 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/BothParamValueFactoryProvider.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http 18 | 19 | import javax.inject.{Inject, Singleton} 20 | import javax.ws.rs.core.{Feature, FeatureContext, Form, MediaType} 21 | import org.glassfish.hk2.api.{Factory, InjectionResolver, ServiceLocator, TypeLiteral} 22 | import org.glassfish.hk2.utilities.binding.AbstractBinder 23 | import org.glassfish.jersey.message.internal.MediaTypes 24 | import org.glassfish.jersey.server.ContainerRequest 25 | import org.glassfish.jersey.server.internal.inject.{AbstractContainerRequestValueFactory, AbstractValueFactoryProvider, MultivaluedParameterExtractor, MultivaluedParameterExtractorProvider, ParamInjectionResolver} 26 | import org.glassfish.jersey.server.model.Parameter 27 | import org.glassfish.jersey.server.spi.internal.ValueFactoryProvider 28 | 29 | class BothParamFeature extends Feature { 30 | class BothParamResolverBinder extends AbstractBinder { 31 | override def configure(): Unit = { 32 | bind(classOf[BothParamInjectionResolver]).to( 33 | new TypeLiteral[InjectionResolver[BothParam]] {}).in(classOf[Singleton]) 34 | bind(classOf[BothParamValueFactoryProvider]).to( 35 | classOf[ValueFactoryProvider]).in(classOf[Singleton]) 36 | } 37 | } 38 | 39 | override def configure(context: FeatureContext): Boolean = { 40 | context.register(new BothParamResolverBinder(), 0) 41 | true 42 | } 43 | } 44 | 45 | class BothParamInjectionResolver 46 | extends ParamInjectionResolver[BothParam](classOf[BothParamValueFactoryProvider]) 47 | 48 | 49 | class BothParamValueFactoryProvider @Inject() ( 50 | mepe: MultivaluedParameterExtractorProvider, locator: ServiceLocator 51 | ) extends AbstractValueFactoryProvider(mepe, locator, Parameter.Source.UNKNOWN) { 52 | 53 | class BothParamValueFactory( 54 | extractor: MultivaluedParameterExtractor[_], 55 | decode: Boolean 56 | ) extends AbstractContainerRequestValueFactory[Object] { 57 | private val FORM_CACHE_KEY = "ly.stealth.mesos.kafka.scheduler.http.FORM_CACHE_KEY" 58 | 59 | override def provide(): Object = { 60 | val request = getContainerRequest 61 | if (request.getUriInfo.getQueryParameters.containsKey(extractor.getName)) { 62 | extractor.extract(request.getUriInfo.getQueryParameters(decode)).asInstanceOf[Object] 63 | } else { 64 | val cachedForm = request.getProperty(FORM_CACHE_KEY).asInstanceOf[Form] 65 | val form = 66 | if (cachedForm != null) 67 | cachedForm 68 | else { 69 | val newForm = readForm(request) 70 | request.setProperty(FORM_CACHE_KEY, newForm) 71 | newForm 72 | } 73 | extractor.extract(form.asMap()).asInstanceOf[Object] 74 | } 75 | } 76 | 77 | private def readForm(request: ContainerRequest): Form = { 78 | if (MediaTypes.typeEqual(MediaType.APPLICATION_FORM_URLENCODED_TYPE, request.getMediaType)) { 79 | request.bufferEntity 80 | val form: Form = 81 | if (decode) 82 | request.readEntity(classOf[Form]) 83 | else { 84 | request.readEntity(classOf[Form]) 85 | } 86 | if (form == null) 87 | new Form 88 | else form 89 | } 90 | else 91 | new Form 92 | } 93 | } 94 | 95 | override def createValueFactory(parameter: Parameter): Factory[_] = { 96 | val paramName = parameter.getSourceName 97 | if (paramName == null || paramName.length == 0) 98 | null 99 | else { 100 | val e = get(parameter) 101 | if (e == null) { 102 | null 103 | } else { 104 | new BothParamValueFactory(e, !parameter.isEncoded) 105 | } 106 | } 107 | } 108 | } 109 | 110 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/HttpServer.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka.scheduler.http 19 | 20 | import com.fasterxml.jackson.databind.ObjectMapper 21 | import java.io._ 22 | import javax.ws.rs.core.Response 23 | import javax.ws.rs.ext.ContextResolver 24 | import javax.ws.rs.{GET, POST, Path, QueryParam} 25 | import ly.stealth.mesos.kafka._ 26 | import ly.stealth.mesos.kafka.json.JsonUtil 27 | import ly.stealth.mesos.kafka.scheduler.mesos.{ClusterComponent, SchedulerComponent} 28 | import ly.stealth.mesos.kafka.scheduler.{BrokerLifecycleManagerComponent, BrokerLogManagerComponent, HttpApiComponent, KafkaDistributionComponent} 29 | import org.apache.log4j.{Level, Logger} 30 | import org.eclipse.jetty.server.handler.HandlerList 31 | import org.eclipse.jetty.server.{Server, ServerConnector} 32 | import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder} 33 | import org.eclipse.jetty.util.thread.QueuedThreadPool 34 | import org.glassfish.jersey.jackson.JacksonFeature 35 | import org.glassfish.jersey.server.ResourceConfig 36 | import org.glassfish.jersey.servlet.ServletContainer 37 | 38 | trait HttpServerComponent { 39 | val httpServer: HttpServer 40 | 41 | trait HttpServer { 42 | def start(): Unit 43 | def initLogging(): Unit 44 | def stop(): Unit 45 | } 46 | } 47 | 48 | trait HttpServerComponentImpl extends HttpServerComponent { 49 | this: ClusterComponent 50 | with SchedulerComponent 51 | with KafkaDistributionComponent 52 | with BrokerLogManagerComponent 53 | with BrokerLifecycleManagerComponent 54 | with HttpApiComponent => 55 | 56 | val httpServer = new HttpServerImpl 57 | 58 | class ApiObjectMapperProvider extends ContextResolver[ObjectMapper] { 59 | override def getContext(`type`: Class[_]): ObjectMapper = { 60 | JsonUtil.mapper 61 | } 62 | } 63 | 64 | class HttpServerImpl extends HttpServer { 65 | private val logger = Logger.getLogger("HttpServerImpl") 66 | private var server: Server = _ 67 | 68 | @Path("/") 69 | class FileServer { 70 | private def fileResponse(file: File): Response = { 71 | Response 72 | .ok(new FileInputStream(file)) 73 | .header("Content-Disposition", "attachment; filename=\"" + file.getName + "\"") 74 | .header("Content-Length", "" + file.length()) 75 | .`type`("application/zip") 76 | .build() 77 | } 78 | 79 | @Path("/jar/{file}") 80 | @GET 81 | def downloadJar(): Response = fileResponse(kafkaDistribution.distInfo.jar) 82 | 83 | @Path("/kafka/{file}") 84 | @GET 85 | def downloadKafka(): Response = fileResponse(kafkaDistribution.distInfo.kafkaDist) 86 | 87 | @Path("/jre/{file}") 88 | @GET 89 | def downloadJre(): Response = { 90 | if (Config.jre == null) 91 | Response.status(Response.Status.NOT_FOUND).build() 92 | else 93 | fileResponse(Config.jre) 94 | } 95 | } 96 | 97 | @Path("/") 98 | class AdminServer { 99 | @Path("/quitquitquit") 100 | @POST 101 | def qqq(): Response = { 102 | scheduler.stop() 103 | Response.ok().build() 104 | } 105 | 106 | @Path("/quitquitquit") 107 | @GET 108 | def qqq_get(): Response = Response.status(Response.Status.METHOD_NOT_ALLOWED).build() 109 | 110 | @Path("/abortabortabort") 111 | @POST 112 | def aaa(): Response = { 113 | scheduler.kill() 114 | Response.ok().build() 115 | } 116 | 117 | @Path("/abortabortabort") 118 | @GET 119 | def aaa_get(): Response = Response.status(Response.Status.METHOD_NOT_ALLOWED).build() 120 | 121 | @Path("/health") 122 | @GET 123 | def health(): String = "ok" 124 | 125 | @Path("/health") 126 | @POST 127 | def health_post(): String = health() 128 | 129 | @Path("/loglevel") 130 | @POST 131 | def setLogLevel( 132 | @QueryParam("logger") inLogger: String, 133 | @QueryParam("level") level: String 134 | ): Response = { 135 | val logger = inLogger match { 136 | case "root" => Logger.getRootLogger 137 | case "scheduler" => Logger.getLogger("KafkaMesosScheduler") 138 | case "brokerManager" => Logger.getLogger("BrokerLifecycleManager") 139 | case l => Logger.getLogger(l) 140 | } 141 | logger.setLevel(Level.toLevel(level)) 142 | Response.ok.build() 143 | } 144 | } 145 | 146 | private def makeContext(resources: Any*): ServletContextHandler = { 147 | val ctx = new ServletContextHandler() 148 | val config = new ResourceConfig() 149 | resources.foreach({ 150 | case r: Class[_] => config.register(r) 151 | case r => config.register(r, 0) 152 | }) 153 | 154 | ctx.addServlet(new ServletHolder(new ServletContainer(config)), "/*") 155 | ctx 156 | } 157 | 158 | def start() { 159 | if (server != null) throw new IllegalStateException("started") 160 | 161 | val threadPool = new QueuedThreadPool(Runtime.getRuntime.availableProcessors() * 16) 162 | threadPool.setName("Jetty") 163 | 164 | server = new Server(threadPool) 165 | val connector = new ServerConnector(server) 166 | connector.setPort(Config.apiPort) 167 | if (Config.bindAddress != null) connector.setHost(Config.bindAddress.resolve()) 168 | connector.setIdleTimeout(60 * 1000) 169 | 170 | val api = makeContext( 171 | brokerApi, 172 | topicApi, 173 | partitionApi, 174 | quotaApi, 175 | new ApiObjectMapperProvider, 176 | classOf[BothParamFeature], 177 | classOf[JacksonFeature] 178 | ) 179 | api.setContextPath("/api") 180 | val adminServer = makeContext(new FileServer, new AdminServer) 181 | 182 | val handlers = new HandlerList 183 | handlers.setHandlers(Seq(api, adminServer).toArray) 184 | server.setHandler(handlers) 185 | server.addConnector(connector) 186 | server.start() 187 | 188 | if (Config.apiPort == 0) Config.replaceApiPort(connector.getLocalPort) 189 | logger.info("started on port " + connector.getLocalPort) 190 | } 191 | 192 | def stop() { 193 | if (server == null) throw new IllegalStateException("!started") 194 | 195 | server.stop() 196 | server.join() 197 | server = null 198 | 199 | logger.info("stopped") 200 | } 201 | 202 | def initLogging(): Unit = { 203 | System.setProperty("org.eclipse.jetty.util.log.class", classOf[JettyLog4jLogger].getName) 204 | Logger.getLogger("org.eclipse.jetty").setLevel(Level.WARN) 205 | Logger.getLogger("Jetty").setLevel(Level.WARN) 206 | } 207 | 208 | class JettyLog4jLogger extends org.eclipse.jetty.util.log.Logger { 209 | private var logger: Logger = Logger.getLogger("Jetty") 210 | 211 | def this(logger: Logger) { 212 | this() 213 | this.logger = logger 214 | } 215 | 216 | def isDebugEnabled: Boolean = logger.isDebugEnabled 217 | 218 | def setDebugEnabled(enabled: Boolean) = logger 219 | .setLevel(if (enabled) Level.DEBUG else Level.INFO) 220 | 221 | def getName: String = logger.getName 222 | 223 | def getLogger(name: String): org.eclipse.jetty.util.log.Logger = new JettyLog4jLogger(Logger 224 | .getLogger(name)) 225 | 226 | def info(s: String, args: AnyRef*) = logger.info(format(s, args)) 227 | 228 | def info(s: String, t: Throwable) = logger.info(s, t) 229 | 230 | def info(t: Throwable) = logger.info("", t) 231 | 232 | def debug(s: String, args: AnyRef*) = logger.debug(format(s, args)) 233 | 234 | def debug(s: String, t: Throwable) = logger.debug(format(s, t)) 235 | 236 | def debug(msg: String, value: Long): Unit = logger.debug(msg) 237 | 238 | def debug(t: Throwable) = logger.debug("", t) 239 | 240 | def warn(s: String, args: AnyRef*) = logger.warn(format(s, args)) 241 | 242 | def warn(s: String, t: Throwable) = logger.warn(s, t) 243 | 244 | def warn(s: String) = logger.warn(s) 245 | 246 | def warn(t: Throwable) = logger.warn("", t) 247 | 248 | def ignore(t: Throwable) = logger.info("Ignored", t) 249 | 250 | } 251 | 252 | private def format(s: String, args: AnyRef*): String = { 253 | var result: String = "" 254 | var i: Int = 0 255 | 256 | for (token <- s.split("\\{\\}")) { 257 | result += token 258 | if (args.length > i) result += args(i) 259 | i += 1 260 | } 261 | 262 | result 263 | } 264 | } 265 | 266 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/api/Model.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http.api 18 | 19 | import javax.ws.rs.core.Response 20 | import javax.ws.rs.core.Response.StatusType 21 | import net.elodina.mesos.util.{Constraint, Strings} 22 | import scala.collection.JavaConverters._ 23 | import scala.collection.mutable 24 | 25 | class StringMap(value: String) extends mutable.HashMap[String, String] { 26 | this ++= Strings.parseMap(value).asScala 27 | } 28 | 29 | class ConstraintMap(value: String) extends mutable.HashMap[String, Constraint] { 30 | this ++= Strings.parseMap(value).asScala.mapValues(new Constraint(_)) 31 | } 32 | 33 | object Status { 34 | class BadRequest(reason: String) extends StatusType { 35 | override def getStatusCode: Int = Response.Status.BAD_REQUEST.getStatusCode 36 | override def getReasonPhrase: String = reason 37 | override def getFamily: Response.Status.Family = Response.Status.BAD_REQUEST.getFamily 38 | } 39 | object BadRequest { 40 | def apply(reason: String) = Response.status(new BadRequest(reason)).build() 41 | } 42 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/api/PartitionApi.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http.api 18 | 19 | import javax.ws.rs.{GET, POST, Path, Produces} 20 | import javax.ws.rs.core.{MediaType, Response} 21 | import ly.stealth.mesos.kafka.scheduler.Expr 22 | import ly.stealth.mesos.kafka.scheduler.http.BothParam 23 | import ly.stealth.mesos.kafka.scheduler.mesos.ClusterComponent 24 | 25 | trait PartitionApiComponent { 26 | val partitionApi: PartitionApi 27 | trait PartitionApi {} 28 | } 29 | 30 | trait PartitionApiComponentImpl extends PartitionApiComponent { 31 | this: ClusterComponent => 32 | 33 | val partitionApi = new PartitionApiImpl 34 | 35 | @Path("partition") 36 | @Produces(Array(MediaType.APPLICATION_JSON)) 37 | class PartitionApiImpl extends PartitionApi { 38 | 39 | @Path("list") 40 | @POST 41 | @Produces(Array(MediaType.APPLICATION_JSON)) 42 | def list(@BothParam("topic") topicExpr: String): Response = { 43 | if (topicExpr == null) 44 | return Status.BadRequest("invalid topic expression") 45 | 46 | val topics = Expr.expandTopics(topicExpr) 47 | val topicsAndPartitions = cluster.topics.getPartitions(topics) 48 | Response.ok(topicsAndPartitions).build() 49 | } 50 | 51 | @Path("list") 52 | @GET 53 | @Produces(Array(MediaType.APPLICATION_JSON)) 54 | def listGet(@BothParam("topic") topicExpr: String) = list(topicExpr) 55 | 56 | @Path("add") 57 | @POST 58 | @Produces(Array(MediaType.APPLICATION_JSON)) 59 | def add(@BothParam("topic") topic: String): Response = { 60 | cluster.rebalancer 61 | Response.ok().build() 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/api/QuotaApi.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http.api 18 | 19 | import javax.ws.rs.core.{MediaType, Response} 20 | import javax.ws.rs.{GET, POST, Path, Produces} 21 | import ly.stealth.mesos.kafka.scheduler.Quotas 22 | import ly.stealth.mesos.kafka.scheduler.http.BothParam 23 | import ly.stealth.mesos.kafka.scheduler.mesos.ClusterComponent 24 | 25 | trait QuotaApiComponent { 26 | val quotaApi: QuotaApi 27 | trait QuotaApi {} 28 | } 29 | 30 | trait QuotaApiComponentImpl extends QuotaApiComponent { 31 | this: ClusterComponent => 32 | 33 | val quotaApi = new QuotaApiImpl 34 | 35 | @Path("quota") 36 | class QuotaApiImpl extends QuotaApi { 37 | 38 | @Path("list") 39 | @POST 40 | @Produces(Array(MediaType.APPLICATION_JSON)) 41 | def list(): Response = { 42 | val quotas = cluster.quotas.getClientQuotas() 43 | val response = quotas.mapValues( 44 | v => Map( 45 | "producer_byte_rate" -> v.producerByteRate, 46 | "consumer_byte_rate" -> v.consumerByteRate 47 | ).filter({ 48 | case (_, Some(_)) => true 49 | case _ => false 50 | }).mapValues(v => v.get) 51 | ) 52 | Response.ok(response).build() 53 | } 54 | 55 | @Path("list") 56 | @GET 57 | @Produces(Array(MediaType.APPLICATION_JSON)) 58 | def listGet() = list() 59 | 60 | @Path("set") 61 | @POST 62 | def set( 63 | @BothParam("entityType") entityType: String, 64 | @BothParam("entity") entityName: String, 65 | @BothParam("producerByteRate") producerQuota: String, 66 | @BothParam("consumerByteRate") consumerQuota: String 67 | ): Response = { 68 | if (entityType == null || entityType != "clients") { 69 | Status.BadRequest("invalid entity type") 70 | } 71 | else if (entityName == null) { 72 | Status.BadRequest("invalid entity") 73 | } 74 | else { 75 | if (producerQuota == null && consumerQuota == null) { 76 | Status.BadRequest("invalid quotas") 77 | } 78 | else { 79 | val existingConfig = cluster.quotas.getClientConfig(entityName) 80 | if (producerQuota != null) { 81 | existingConfig.setProperty(Quotas.PRODUCER_BYTE_RATE, producerQuota.toInt.toString) 82 | } 83 | if (consumerQuota != null) { 84 | existingConfig.setProperty(Quotas.CONSUMER_BYTE_RATE, consumerQuota.toInt.toString) 85 | } 86 | cluster.quotas.setClientConfig(entityName, existingConfig) 87 | Response.ok().build() 88 | } 89 | } 90 | } 91 | } 92 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/http/api/TopicApi.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.http.api 18 | 19 | import java.lang.{Integer => JInt} 20 | import javax.ws.rs.core.{MediaType, Response} 21 | import javax.ws.rs.{DefaultValue, FormParam, GET, POST, Path, PathParam, Produces} 22 | import ly.stealth.mesos.kafka.scheduler.{Expr, Rebalancer} 23 | import ly.stealth.mesos.kafka.scheduler.http.BothParam 24 | import ly.stealth.mesos.kafka.{ListTopicsResponse, RebalanceStartResponse} 25 | import ly.stealth.mesos.kafka.scheduler.mesos.ClusterComponent 26 | import net.elodina.mesos.util.Period 27 | import scala.collection.JavaConversions._ 28 | import scala.util.Try 29 | 30 | trait TopicApiComponent { 31 | val topicApi: TopicApi 32 | trait TopicApi {} 33 | } 34 | 35 | trait TopicApiComponentImpl extends TopicApiComponent { 36 | this: ClusterComponent => 37 | 38 | val topicApi: TopicApi = new TopicApiImpl 39 | 40 | @Path("/topic") 41 | class TopicApiImpl extends TopicApi { 42 | 43 | @Path("list") 44 | @GET 45 | @Produces(Array(MediaType.APPLICATION_JSON)) 46 | def list( 47 | @DefaultValue("*") @BothParam("topic") expr: String 48 | ): Response = { 49 | val names = Expr.expandTopics(expr).toSet 50 | Response.ok(ListTopicsResponse( 51 | cluster.topics.getTopics.filter(t => names.contains(t.name)) 52 | )).build() 53 | } 54 | 55 | @Path("list") 56 | @POST 57 | @Produces(Array(MediaType.APPLICATION_JSON)) 58 | def listPost( 59 | @DefaultValue("*") @BothParam("topic") expr: String 60 | ) = list(expr) 61 | 62 | @Path("delete") 63 | @POST 64 | @Produces(Array(MediaType.APPLICATION_JSON)) 65 | def deletePost( 66 | @FormParam("topic") topicExpr: String 67 | ): Response = { 68 | if (topicExpr == null) 69 | return Status.BadRequest("topic required") 70 | 71 | val names = Expr.expandTopics(topicExpr).toSet 72 | 73 | val missing = names.filter(cluster.topics.getTopic(_) == null) 74 | if(missing.nonEmpty) { 75 | Status.BadRequest(s"topic${if (missing.size > 1) "s" else ""} not found ${missing.mkString(",")}") 76 | } else { 77 | Response.ok(ListTopicsResponse( 78 | names.toSeq.sorted.map(cluster.topics.deleteTopic(_)) 79 | )).build() 80 | } 81 | } 82 | 83 | @Path("{op: (add|update)}") 84 | @POST 85 | @Produces(Array(MediaType.APPLICATION_JSON)) 86 | def addTopic( 87 | @PathParam("op") operation: String, 88 | @BothParam("topic") topicExpr: String, 89 | @BothParam("broker") brokerExpr: String, 90 | @DefaultValue("1") @BothParam("partitions") partitions: JInt, 91 | @DefaultValue("1") @BothParam("replicas") replicas: JInt, 92 | @DefaultValue("-1") @BothParam("fixedStartIndex") fixedStartIndex: Int, 93 | @DefaultValue("-1") @BothParam("startPartitionId") startPartitionId: Int, 94 | @BothParam("options") options: StringMap 95 | ): Response = { 96 | val add = operation == "add" 97 | if (topicExpr == null) 98 | return Status.BadRequest("topic required") 99 | 100 | val brokerIds = Option(brokerExpr).map(b => Expr.expandBrokers(cluster, b, sortByAttrs = true)) 101 | if (!add && options == null) 102 | return Status.BadRequest("options required") 103 | if (options != null) 104 | Option(cluster.topics.validateOptions(options)) match { 105 | case Some(err) => return Status.BadRequest(err) 106 | case _ => 107 | } 108 | 109 | val topics = Expr.expandTopics(topicExpr) 110 | val topicErrors = topics.flatMap(topic => Option(cluster.topics.getTopic(topic)) match { 111 | case Some(_) if add => Some(s"Topic $topic already exists") 112 | case None if !add => Some(s"Topic $topic not found") 113 | case _ => None 114 | }) 115 | if (topicErrors.nonEmpty) 116 | return Status.BadRequest(topicErrors.mkString("; ")) 117 | 118 | val result = if (add) { 119 | topics.map(t => cluster.topics.addTopic( 120 | t, 121 | cluster.topics.fairAssignment( 122 | partitions, 123 | replicas, 124 | brokerIds.orNull, 125 | fixedStartIndex, 126 | startPartitionId), 127 | options 128 | )) 129 | } else { 130 | topics.map(t => cluster.topics.updateTopic(cluster.topics.getTopic(t), options)) 131 | } 132 | Response.ok(ListTopicsResponse(result)).build() 133 | } 134 | 135 | @Path("rebalance") 136 | @POST 137 | @Produces(Array(MediaType.APPLICATION_JSON)) 138 | def rebalance( 139 | @BothParam("topic") topicExpr: String, 140 | @DefaultValue("*") @BothParam("broker") brokerExpr: String, 141 | @DefaultValue("-1") @BothParam("replicas") replicas: Int, 142 | @DefaultValue("-1") @BothParam("fixedStartIndex") fixedStartIndex: Int, 143 | @DefaultValue("-1") @BothParam("startPartitionId") startPartitionId: Int, 144 | @BothParam("timeout") timeout: Period, 145 | @DefaultValue("false") @BothParam("realign") realign: Boolean 146 | ): Response = { 147 | val topics = Expr.expandTopics(topicExpr) 148 | if (topics != null && cluster.rebalancer.running) 149 | return Status.BadRequest("rebalance is already running") 150 | if (topics != null && topics.isEmpty) 151 | return Status.BadRequest("no topics specified") 152 | 153 | val brokers = Expr.expandBrokers(cluster, brokerExpr, sortByAttrs = true) 154 | if (realign && startPartitionId != -1) { 155 | return Status.BadRequest("startPartitionId is incompatible with realign") 156 | } 157 | 158 | val (status, error) = 159 | if (topics != null) { 160 | Try( 161 | cluster.rebalancer 162 | .start(topics, brokers, replicas, fixedStartIndex, startPartitionId, realign) 163 | ).map(_ => { 164 | if (timeout != null) { 165 | if (!cluster.rebalancer.waitFor(running = false, timeout)) 166 | "timeout" -> None 167 | else 168 | "completed" -> None 169 | } else { 170 | if (realign && !cluster.rebalancer.running) 171 | ("idle (no-op)", None) 172 | else ("started", None) 173 | } 174 | }).recover({ 175 | case e: Rebalancer.Exception => ("failed", Some(e.getMessage)) 176 | }).get 177 | } 178 | else 179 | (if (cluster.rebalancer.running) "running" else "idle") -> None 180 | 181 | Response.ok(RebalanceStartResponse(status, cluster.rebalancer.state, error)).build() 182 | } 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/mesos/BrokerTaskManager.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.mesos 18 | 19 | import ly.stealth.mesos.kafka.Broker 20 | import ly.stealth.mesos.kafka.scheduler.mesos.OfferResult.Accept 21 | import net.elodina.mesos.util.Repr 22 | import org.apache.log4j.Logger 23 | import org.apache.mesos.Protos.{ExecutorID, SlaveID, TaskID} 24 | import scala.collection.JavaConverters._ 25 | 26 | trait BrokerTaskManagerComponent { 27 | 28 | val brokerTaskManager: BrokerTaskManager 29 | 30 | trait BrokerTaskManager { 31 | def forceStopBroker(broker: Broker): Unit 32 | def killBroker(broker: Broker): Unit 33 | def killTask(taskId: TaskID): Unit 34 | def launchBroker(offerResult: OfferResult.Accept): Broker.Task 35 | } 36 | } 37 | 38 | trait BrokerTaskManagerComponentImpl extends BrokerTaskManagerComponent { 39 | this: SchedulerDriverComponent 40 | with MesosTaskFactoryComponent => 41 | 42 | val brokerTaskManager: BrokerTaskManager = new BrokerTaskManagerImpl 43 | 44 | class BrokerTaskManagerImpl extends BrokerTaskManager { 45 | private[this] val logger = Logger.getLogger("BrokerTaskManager") 46 | 47 | def forceStopBroker(broker: Broker): Unit = { 48 | if (driver != null && broker.task != null) { 49 | logger.info(s"Stopping broker ${ broker.id } forcibly: sending 'stop' message") 50 | Driver.call(_.sendFrameworkMessage( 51 | ExecutorID.newBuilder().setValue(broker.task.executorId).build(), 52 | SlaveID.newBuilder().setValue(broker.task.slaveId).build(), 53 | "stop".getBytes 54 | )) 55 | } 56 | } 57 | 58 | def killBroker(broker: Broker): Unit = 59 | if (broker.task != null && broker.task.id != null) 60 | Driver.call(_.killTask(TaskID.newBuilder.setValue(broker.task.id).build)) 61 | 62 | def killTask(taskId: TaskID): Unit = Driver.call(_.killTask(taskId)) 63 | 64 | def launchBroker(offerResult: Accept): Broker.Task = { 65 | val broker = offerResult.broker 66 | val offer = offerResult.offer 67 | 68 | broker.needsRestart = false 69 | val reservation = broker.getReservation(offer) 70 | val task_ = taskFactory.newTask(broker, offer, reservation) 71 | val id = task_.getTaskId.getValue 72 | 73 | val attributes = offer.getAttributesList 74 | .asScala 75 | .filter(_.hasText) 76 | .map(a => a.getName -> a.getText.getValue) 77 | .toMap 78 | 79 | Driver.call(_.launchTasks(Seq(offer.getId).asJava, Seq(task_).asJava)) 80 | logger 81 | .info(s"Starting broker ${broker.id}: launching task $id by offer " + 82 | s"${offer.getHostname + Repr.id(offer.getId.getValue)}\n ${Repr.task(task_)}") 83 | 84 | Broker.Task( 85 | id, 86 | task_.getSlaveId.getValue, 87 | task_.getExecutor.getExecutorId.getValue, 88 | offer.getHostname, 89 | attributes) 90 | } 91 | } 92 | 93 | } 94 | 95 | -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/mesos/MesosTaskFactory.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.mesos 18 | 19 | import com.google.protobuf.ByteString 20 | import ly.stealth.mesos.kafka.Broker.{Container, ContainerType, MountMode} 21 | import ly.stealth.mesos.kafka._ 22 | import ly.stealth.mesos.kafka.executor.LaunchConfig 23 | import ly.stealth.mesos.kafka.json.JsonUtil 24 | import ly.stealth.mesos.kafka.scheduler.KafkaDistributionComponent 25 | import net.elodina.mesos.util.Version 26 | import org.apache.mesos.Protos.ContainerInfo.{DockerInfo, MesosInfo} 27 | import org.apache.mesos.Protos.Environment.Variable 28 | import org.apache.mesos.Protos._ 29 | import scala.collection.mutable 30 | import scala.collection.JavaConversions._ 31 | 32 | trait MesosTaskFactoryComponent { 33 | val taskFactory: MesosTaskFactory 34 | 35 | trait MesosTaskFactory { 36 | def newTask(broker: Broker, offer: Offer, reservation: Broker.Reservation): TaskInfo 37 | } 38 | } 39 | 40 | trait MesosTaskFactoryComponentImpl extends MesosTaskFactoryComponent { 41 | this: KafkaDistributionComponent => 42 | 43 | val taskFactory: MesosTaskFactory = new MesosTaskFactoryImpl 44 | 45 | class MesosTaskFactoryImpl extends MesosTaskFactory { 46 | private[kafka] def newExecutor(broker: Broker): ExecutorInfo = { 47 | val distInfo = kafkaDistribution.distInfo 48 | var cmd = "" 49 | if (broker.executionOptions.container.isDefined) { 50 | cmd += "cd $MESOS_SANDBOX && " 51 | } 52 | cmd += s"${broker.executionOptions.javaCmd} -cp ${distInfo.jar.getName} -Xmx${broker.heap}m" 53 | if (broker.executionOptions.jvmOptions != null) 54 | cmd += " " + broker.executionOptions.jvmOptions.replace("$id", broker.id.toString) 55 | 56 | if (Config.debug) cmd += " -Ddebug" 57 | cmd += " ly.stealth.mesos.kafka.executor.Executor" 58 | 59 | val commandBuilder = CommandInfo.newBuilder 60 | if (Config.jre != null) { 61 | commandBuilder 62 | .addUris(CommandInfo.URI.newBuilder().setValue(Config.api + "/jre/" + Config.jre.getName)) 63 | cmd = "jre/bin/" + cmd 64 | } 65 | 66 | val env = new mutable.HashMap[String, String]() 67 | env("MESOS_SYSLOG_TAG") = Config.frameworkName + "-" + broker.id 68 | if (broker.syslog) env("MESOS_SYSLOG") = "true" 69 | 70 | val envBuilder = Environment.newBuilder() 71 | for ((name, value) <- env) 72 | envBuilder.addVariables(Variable.newBuilder().setName(name).setValue(value)) 73 | commandBuilder.setEnvironment(envBuilder) 74 | 75 | commandBuilder 76 | .addUris(CommandInfo.URI.newBuilder() 77 | .setValue(Config.api + "/jar/" + distInfo.jar.getName).setExtract(false)) 78 | .addUris(CommandInfo.URI.newBuilder() 79 | .setValue(Config.api + "/kafka/" + distInfo.kafkaDist.getName)) 80 | .setValue(cmd) 81 | 82 | val executor = ExecutorInfo.newBuilder() 83 | .setExecutorId(ExecutorID.newBuilder.setValue(Broker.nextExecutorId(broker))) 84 | .setCommand(commandBuilder) 85 | .setName("broker-" + broker.id) 86 | 87 | broker.executionOptions.container.foreach { c => 88 | executor.setContainer(createContainerInfo(c, broker.id)) 89 | 90 | } 91 | executor.build() 92 | } 93 | 94 | private def createContainerInfo(c: Container, brokerId: Int): ContainerInfo = { 95 | val containerName = c.name.replace("$id", brokerId.toString) 96 | val containerInfo = 97 | if (c.ctype == ContainerType.Docker) { 98 | ContainerInfo.newBuilder() 99 | .setDocker(DockerInfo.newBuilder() 100 | .setImage(containerName) 101 | .setNetwork(DockerInfo.Network.HOST)) 102 | .setType(ContainerInfo.Type.DOCKER) 103 | } else if (c.ctype == ContainerType.Mesos) { 104 | ContainerInfo.newBuilder() 105 | .setMesos(MesosInfo.newBuilder() 106 | .setImage(Image.newBuilder() 107 | .setDocker(Image.Docker.newBuilder().setName(containerName)) 108 | .setType(Image.Type.DOCKER))) 109 | .setType(ContainerInfo.Type.MESOS) 110 | } else { 111 | throw new IllegalArgumentException("unsupported type") 112 | } 113 | 114 | containerInfo 115 | .addAllVolumes( 116 | c.mounts.map(m => 117 | Volume.newBuilder() 118 | .setHostPath(m.hostPath.replace("$id", brokerId.toString)) 119 | .setContainerPath(m.containerPath.replace("$id", brokerId.toString)) 120 | .setMode(m.mode match { 121 | case MountMode.ReadWrite => Volume.Mode.RW 122 | case MountMode.ReadOnly => Volume.Mode.RO 123 | }) 124 | .build() 125 | )) 126 | .build() 127 | } 128 | 129 | def newTask(broker: Broker, offer: Offer, reservation: Broker.Reservation): TaskInfo = { 130 | val taskData = { 131 | var defaults: Map[String, String] = Map( 132 | "broker.id" -> broker.id.toString, 133 | "port" -> ("" + reservation.port), 134 | "log.dirs" -> "kafka-logs", 135 | "log.retention.bytes" -> ("" + 10l * 1024 * 1024 * 1024), 136 | 137 | "zookeeper.connect" -> Config.zk, 138 | "host.name" -> offer.getHostname 139 | ) 140 | 141 | if (kafkaDistribution.distInfo.kafkaVersion.compareTo(new Version("0.9")) >= 0) 142 | defaults += ("listeners" -> s"PLAINTEXT://:${ reservation.port }") 143 | 144 | if (reservation.volume != null) 145 | defaults += ("log.dirs" -> "data/kafka-logs") 146 | 147 | val launchConfig = LaunchConfig( 148 | broker.id, 149 | broker.options, 150 | broker.syslog, 151 | broker.log4jOptions, 152 | broker.bindAddress, 153 | defaults) 154 | ByteString.copyFrom(JsonUtil.toJsonBytes(launchConfig)) 155 | } 156 | 157 | val taskBuilder: TaskInfo.Builder = TaskInfo.newBuilder 158 | .setName(Config.frameworkName + "-" + broker.id) 159 | .setTaskId(TaskID.newBuilder.setValue(Broker.nextTaskId(broker)).build) 160 | .setSlaveId(offer.getSlaveId) 161 | .setData(taskData) 162 | .setExecutor(newExecutor(broker)) 163 | 164 | taskBuilder.addAllResources(reservation.toResources) 165 | taskBuilder.build 166 | } 167 | } 168 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/mesos/OfferManager.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.mesos 18 | 19 | import java.util.Date 20 | import ly.stealth.mesos.kafka.Broker 21 | import org.apache.log4j.Logger 22 | import org.apache.mesos.Protos.{Filters, Offer, OfferID} 23 | import scala.collection.JavaConversions._ 24 | import scala.collection.JavaConverters._ 25 | import scala.collection.mutable 26 | import scala.util.Random 27 | 28 | abstract class OfferResult { 29 | def offer: Offer 30 | } 31 | object OfferResult { 32 | abstract class Decline extends OfferResult { 33 | def reason: String 34 | def duration: Int 35 | } 36 | 37 | case class DeclineBroker( 38 | offer: Offer, 39 | broker: Broker, 40 | reason: String, 41 | duration: Int = 5 42 | ) extends Decline 43 | case class NoWork(offer: Offer) extends Decline { 44 | val reason = "no work to do" 45 | val duration: Int = 60 * 60 46 | } 47 | case class Accept(offer: Offer, broker: Broker) extends OfferResult 48 | 49 | def neverMatch(offer: Offer, broker: Broker, reason: String) = 50 | DeclineBroker(offer, broker, reason, 60 * 60) 51 | def eventuallyMatch(offer: Offer, broker: Broker, reason: String, howLong: Int) = 52 | DeclineBroker(offer, broker, reason, howLong) 53 | } 54 | 55 | object OfferManager { 56 | def otherTasksAttributes(name: String, brokers: Iterable[Broker]): Iterable[String] = { 57 | def value(task: Broker.Task, name: String): Option[String] = { 58 | if (name == "hostname") return Some(task.hostname) 59 | task.attributes.get(name) 60 | } 61 | 62 | brokers.flatMap(b => Option(b.task).flatMap(t => value(t, name))) 63 | } 64 | } 65 | 66 | trait OfferManagerComponent { 67 | val offerManager: OfferManager 68 | 69 | trait OfferManager { 70 | def enableOfferSuppression() 71 | def tryAcceptOffer( 72 | offer: Offer, 73 | brokers: Iterable[Broker] 74 | ): Either[OfferResult.Accept, Seq[OfferResult.Decline]] 75 | def pauseOrResumeOffers(forceRevive: Boolean = false): Unit 76 | def declineOffer(offer: OfferID): Unit 77 | def declineOffer(offer: OfferID, filters: Filters): Unit 78 | } 79 | } 80 | 81 | trait OfferManagerComponentImpl extends OfferManagerComponent { 82 | this: ClusterComponent with SchedulerDriverComponent => 83 | 84 | val offerManager: OfferManager = new OfferManagerImpl 85 | 86 | class OfferManagerImpl extends OfferManager { 87 | private[this] var offersAreSuppressed: Boolean = false 88 | private[this] val logger = Logger.getLogger("OfferManager") 89 | private[this] var canSuppressOffers = true 90 | 91 | def enableOfferSuppression(): Unit = canSuppressOffers = true 92 | 93 | def tryAcceptOffer( 94 | offer: Offer, 95 | brokers: Iterable[Broker] 96 | ): Either[OfferResult.Accept, Seq[OfferResult.Decline]] = { 97 | val now = new Date() 98 | val declines = mutable.Buffer[OfferResult.Decline]() 99 | 100 | for (broker <- brokers.filter(_.shouldStart(offer.getHostname))) { 101 | broker.matches(offer, now, name => OfferManager.otherTasksAttributes(name, brokers)) match { 102 | case accept: OfferResult.Accept => return Left(accept) 103 | case reason: OfferResult.Decline => declines.append(reason) 104 | } 105 | } 106 | 107 | // if we got here we're declining the offer, 108 | // if there's no reason it just meant we had nothing to do 109 | if (declines.isEmpty) 110 | declines.append(OfferResult.NoWork(offer)) 111 | 112 | val maxDuration = declines.map(_.duration).max.toDouble 113 | val jitter = (maxDuration / 3) * (Random.nextDouble() - .5) 114 | val fb = Filters.newBuilder().setRefuseSeconds(maxDuration + jitter) 115 | declineOffer(offer.getId, fb.build()) 116 | Right(declines) 117 | } 118 | 119 | def declineOffer(offer: OfferID): Unit = Driver.call(_.declineOffer(offer)) 120 | def declineOffer(offer: OfferID, filters: Filters): Unit = 121 | Driver.call(_.declineOffer(offer, filters)) 122 | 123 | def pauseOrResumeOffers(forceRevive: Boolean = false): Unit = { 124 | if (forceRevive) { 125 | driver.reviveOffers() 126 | logger.info("Re-requesting previously suppressed offers.") 127 | this.offersAreSuppressed = false 128 | return 129 | } 130 | 131 | val clusterIsSteadyState = cluster.getBrokers.asScala.forall(_.isSteadyState) 132 | // If all brokers are steady state we can request mesos to stop sending offers. 133 | if (!this.offersAreSuppressed && clusterIsSteadyState) { 134 | if (canSuppressOffers) { 135 | // Our version of mesos supports suppressOffers, so use it. 136 | Driver.call(_.suppressOffers()) 137 | logger.info("Cluster is now stable, offers are suppressed") 138 | this.offersAreSuppressed = true 139 | } 140 | else { 141 | // No support for suppress offers, noop it. 142 | this.offersAreSuppressed = true 143 | } 144 | } 145 | // Else, if offers are suppressed, and we are no longer steady-state, resume offers. 146 | else if (!clusterIsSteadyState && offersAreSuppressed) { 147 | Driver.call(_.reviveOffers()) 148 | logger.info("Cluster is no longer stable, resuming offers.") 149 | this.offersAreSuppressed = false 150 | } 151 | } 152 | } 153 | 154 | } -------------------------------------------------------------------------------- /src/scala/main/ly/stealth/mesos/kafka/scheduler/mesos/TaskReconciler.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka.scheduler.mesos 18 | 19 | import java.util.Date 20 | import java.util.concurrent.atomic.AtomicReference 21 | import java.util.concurrent.{Future, ScheduledFuture, TimeUnit} 22 | import ly.stealth.mesos.kafka.{Broker, ClockComponent, Cluster, Config} 23 | import org.apache.log4j.Logger 24 | import org.apache.mesos.Protos.{TaskID, TaskState, TaskStatus} 25 | import scala.collection.JavaConversions._ 26 | 27 | trait TaskReconcilerComponent { 28 | val taskReconciler : TaskReconciler 29 | 30 | trait TaskReconciler { 31 | def start(): Future[_] 32 | def onBrokerReconciled(broker: Broker): Unit 33 | def isReconciling: Boolean 34 | 35 | def attempts: Int 36 | def lastReconcile: Date 37 | } 38 | } 39 | 40 | trait ClusterComponent { 41 | def cluster: Cluster 42 | } 43 | 44 | trait TaskReconcilerComponentImpl extends TaskReconcilerComponent { 45 | this: ClusterComponent 46 | with SchedulerDriverComponent 47 | with ClockComponent 48 | with EventLoopComponent => 49 | 50 | val taskReconciler = new TaskReconcilerImpl 51 | 52 | class TaskReconcilerImpl extends TaskReconciler { 53 | import ly.stealth.mesos.kafka.RunnableConversions._ 54 | 55 | private[this] val logger = Logger.getLogger("TaskReconciler") 56 | private[this] val retryFuture: AtomicReference[ScheduledFuture[_]] = new AtomicReference[ScheduledFuture[_]]() 57 | 58 | private[this] var _attempts: Int = 1 59 | private[this] var _lastAttempt: Date = new Date(0) 60 | 61 | def attempts: Int = _attempts 62 | def lastReconcile: Date = _lastAttempt 63 | 64 | def onBrokerReconciled(broker: Broker): Unit = { 65 | if (!isReconciling) { 66 | logger.info("Reconciliation complete") 67 | Option(retryFuture.getAndSet(null)).foreach { _.cancel(false) } 68 | } 69 | } 70 | 71 | def isReconciling: Boolean = 72 | cluster.getBrokers.exists(b => b.task != null && b.task.reconciling) 73 | 74 | private def scheduleRetry() = { 75 | retryFuture.set(eventLoop.schedule( 76 | retryReconciliation _, 77 | Config.reconciliationTimeout.ms, 78 | TimeUnit.MILLISECONDS)) 79 | } 80 | 81 | def retryReconciliation(): Unit = { 82 | if (!isReconciling) { 83 | return 84 | } 85 | 86 | _attempts += 1 87 | _lastAttempt = clock.now() 88 | 89 | val brokersReconciling = cluster.getBrokers.filter(b => b.task != null && b.task.reconciling) 90 | brokersReconciling.foreach(broker => { 91 | logger.info( 92 | s"Reconciling $attempts/${ Config.reconciliationAttempts } state of broker" + 93 | s" ${ broker.id }, task ${ broker.task.id }") 94 | }) 95 | 96 | if (attempts > Config.reconciliationAttempts) { 97 | for (broker <- brokersReconciling) { 98 | logger.info( 99 | s"Reconciling exceeded ${ Config.reconciliationAttempts } tries " + 100 | s"for broker ${ broker.id }, sending killTask for task ${ broker.task.id }") 101 | Driver.call(_.killTask(TaskID.newBuilder().setValue(broker.task.id).build())) 102 | broker.task.state = Broker.State.STOPPING 103 | } 104 | cluster.save() 105 | } 106 | else { 107 | val statuses = brokersReconciling 108 | .map(broker => { 109 | TaskStatus.newBuilder() 110 | .setTaskId(TaskID.newBuilder().setValue(broker.task.id)) 111 | .setState(TaskState.TASK_STAGING) 112 | .build() 113 | }) 114 | 115 | if (statuses.nonEmpty) { 116 | Driver.call(_.reconcileTasks(statuses)) 117 | scheduleRetry() 118 | } 119 | } 120 | } 121 | 122 | def start(): Future[_] = { 123 | eventLoop.submit(() => 124 | if (isReconciling) { 125 | logger.warn("Reconcile already in progress, skipping.") 126 | } else { 127 | startImpl() 128 | } 129 | ) 130 | } 131 | 132 | private def startImpl() = { 133 | retryFuture.set(null) 134 | _attempts = 1 135 | logger.info("Starting reconciliation") 136 | 137 | val brokersToReconcile = cluster.getBrokers.filter(_.task != null) 138 | brokersToReconcile.foreach(broker => { 139 | broker.task.state = Broker.State.RECONCILING 140 | logger.info(s"Reconciling $attempts/${ Config.reconciliationAttempts } " + 141 | s"state of broker ${ broker.id }, task ${ broker.task.id }") 142 | }) 143 | 144 | Driver.call(_.reconcileTasks(Seq())) 145 | cluster.save() 146 | 147 | scheduleRetry() 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/ClusterTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import org.junit.{Before, Test} 21 | import java.util 22 | import org.junit.Assert._ 23 | import ly.stealth.mesos.kafka.Broker.{Metrics, State} 24 | import ly.stealth.mesos.kafka.json.JsonUtil 25 | 26 | class ClusterTest extends KafkaMesosTestCase { 27 | var cluster: Cluster = new Cluster() 28 | 29 | @Before 30 | override def before { 31 | super.before 32 | cluster.clear() 33 | } 34 | 35 | @Test 36 | def addBroker_removeBroker_getBrokers { 37 | assertTrue(cluster.getBrokers.isEmpty) 38 | 39 | val broker0 = cluster.addBroker(new Broker(0)) 40 | val broker1 = cluster.addBroker(new Broker(1)) 41 | assertEquals(util.Arrays.asList(broker0, broker1), cluster.getBrokers) 42 | 43 | cluster.removeBroker(broker0) 44 | assertEquals(util.Arrays.asList(broker1), cluster.getBrokers) 45 | 46 | cluster.removeBroker(broker1) 47 | assertTrue(cluster.getBrokers.isEmpty) 48 | } 49 | 50 | @Test 51 | def getBroker { 52 | assertNull(cluster.getBroker(0)) 53 | 54 | val broker0 = cluster.addBroker(new Broker(0)) 55 | assertSame(broker0, cluster.getBroker(0)) 56 | } 57 | 58 | @Test 59 | def save_load { 60 | cluster.addBroker(new Broker(0)) 61 | cluster.addBroker(new Broker(1)) 62 | cluster.save() 63 | 64 | val read = Cluster.load() 65 | assertEquals(2, read.getBrokers.size()) 66 | } 67 | 68 | @Test 69 | def load_empty_cluster: Unit = { 70 | val jsData = "{\"version\":\"0.10.1.0-SNAPSHOT\"}" 71 | val cluster = JsonUtil.fromJson[Cluster](jsData) 72 | assertNotNull(cluster.getBrokers) 73 | assertEquals(0, cluster.getBrokers.size()) 74 | } 75 | 76 | @Test 77 | def toJson_fromJson { 78 | val broker0 = cluster.addBroker(new Broker(0)) 79 | broker0.task = Broker.Task("1", "slave", "executor", "host") 80 | broker0.task.state = State.RUNNING 81 | cluster.addBroker(new Broker(1)) 82 | cluster.frameworkId = "id" 83 | 84 | val read = JsonUtil.fromJson[Cluster](JsonUtil.toJson(cluster)) 85 | 86 | assertEquals(cluster.frameworkId, read.frameworkId) 87 | assertEquals(2, read.getBrokers.size()) 88 | BrokerTest.assertBrokerEquals(broker0, read.getBroker(0)) 89 | } 90 | 91 | @Test 92 | def toJsonExcludesMetrics: Unit = { 93 | val broker0 = cluster.addBroker(new Broker(0)) 94 | broker0.metrics = Metrics(Map("test" -> 0), System.currentTimeMillis()) 95 | 96 | val read = JsonUtil.fromJson[Cluster](JsonUtil.toJson(cluster)) 97 | assertEquals(read.getBroker(0).metrics.timestamp, 0) 98 | 99 | // Make sure a broker itself still works normally 100 | val readBroker = JsonUtil.fromJson[Broker](JsonUtil.toJson(broker0)) 101 | assertEquals(broker0.metrics.timestamp, readBroker.metrics.timestamp) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/ExecutorTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import ly.stealth.mesos.kafka.executor.{Executor, LaunchConfig} 21 | import ly.stealth.mesos.kafka.json.JsonUtil 22 | import org.junit.Test 23 | import org.junit.Assert._ 24 | import org.apache.mesos.Protos.{Status, TaskState} 25 | import scala.collection.JavaConversions._ 26 | 27 | class ExecutorTest extends KafkaMesosTestCase { 28 | @Test(timeout = 5000) 29 | def startBroker_success { 30 | val data = JsonUtil.toJson(LaunchConfig(0)) 31 | Executor.startBroker(executorDriver, task("id", "task", "slave", data)) 32 | executorDriver.waitForStatusUpdates(2) 33 | assertEquals(2, executorDriver.statusUpdates.size()) 34 | 35 | assertEquals( 36 | Seq(TaskState.TASK_STARTING, TaskState.TASK_RUNNING), 37 | executorDriver.statusUpdates.map(_.getState)) 38 | assertTrue(Executor.server.isStarted) 39 | 40 | Executor.server.stop() 41 | executorDriver.waitForStatusUpdates(3) 42 | 43 | assertEquals(3, executorDriver.statusUpdates.size()) 44 | val status = executorDriver.statusUpdates.get(2) 45 | assertEquals(TaskState.TASK_FINISHED, status.getState) 46 | assertFalse(Executor.server.isStarted) 47 | } 48 | 49 | @Test(timeout = 5000) 50 | def startBroker_failure { 51 | Executor.server.asInstanceOf[TestBrokerServer].failOnStart = true 52 | Executor.startBroker(executorDriver, task()) 53 | 54 | executorDriver.waitForStatusUpdates(1) 55 | assertEquals(1, executorDriver.statusUpdates.size()) 56 | 57 | val status = executorDriver.statusUpdates.get(0) 58 | assertEquals(TaskState.TASK_FAILED, status.getState) 59 | assertFalse(Executor.server.isStarted) 60 | } 61 | 62 | @Test 63 | def stopExecutor { 64 | Executor.server.start(null, null) 65 | assertTrue(Executor.server.isStarted) 66 | assertEquals(Status.DRIVER_RUNNING, executorDriver.status) 67 | 68 | Executor.stopExecutor(executorDriver) 69 | assertFalse(Executor.server.isStarted) 70 | assertEquals(Status.DRIVER_STOPPED, executorDriver.status) 71 | 72 | Executor.stopExecutor(executorDriver) // no error 73 | assertEquals(Status.DRIVER_STOPPED, executorDriver.status) 74 | } 75 | 76 | @Test(timeout = 5000) 77 | def launchTask { 78 | val data = JsonUtil.toJson(LaunchConfig(0)) 79 | Executor.launchTask(executorDriver, task("id", "task", "slave", data)) 80 | 81 | executorDriver.waitForStatusUpdates(1) 82 | assertTrue(Executor.server.isStarted) 83 | } 84 | 85 | @Test(timeout = 5000) 86 | def killTask { 87 | Executor.server.start(null, null) 88 | Executor.killTask(executorDriver, taskId()) 89 | 90 | Executor.server.waitFor() 91 | assertFalse(Executor.server.isStarted) 92 | } 93 | 94 | @Test 95 | def shutdown { 96 | Executor.server.start(null, null) 97 | Executor.shutdown(executorDriver) 98 | assertFalse(Executor.server.isStarted) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/ExprTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | import org.junit.{After, Before, Test} 20 | import org.junit.Assert._ 21 | import java.util 22 | import ly.stealth.mesos.kafka.scheduler.{Expr, Topics} 23 | import net.elodina.mesos.util.Strings.parseMap 24 | import scala.collection.JavaConversions._ 25 | 26 | 27 | class ExprTest extends KafkaMesosTestCase { 28 | @Before 29 | override def before { 30 | super.before 31 | startZkServer() 32 | } 33 | 34 | @After 35 | override def after { 36 | super.after 37 | stopZkServer() 38 | } 39 | 40 | @Test 41 | def expandBrokers { 42 | val cluster = registry.cluster 43 | 44 | for (i <- 0 until 5) 45 | cluster.addBroker(new Broker(i)) 46 | 47 | try { 48 | assertEquals(util.Arrays.asList(), Expr.expandBrokers(cluster, "")) 49 | fail() 50 | } catch { case e: IllegalArgumentException => } 51 | 52 | assertEquals(Seq(0), Expr.expandBrokers(cluster, "0")) 53 | assertEquals(Seq(0, 2, 4), Expr.expandBrokers(cluster, "0,2,4")) 54 | assertEquals(Seq(1, 2, 3), Expr.expandBrokers(cluster, "1..3")) 55 | assertEquals(Seq(0, 1, 3, 4), Expr.expandBrokers(cluster, "0..1,3..4")) 56 | assertEquals(Seq(0, 1, 2, 3, 4), Expr.expandBrokers(cluster, "*")) 57 | 58 | // duplicates 59 | assertEquals(Seq(0, 1, 2, 3, 4), Expr.expandBrokers(cluster, "0..3,2..4")) 60 | 61 | // sorting 62 | assertEquals(Seq(2, 3, 4), Expr.expandBrokers(cluster, "4,3,2")) 63 | 64 | // not-existent brokers 65 | assertEquals(Seq(5, 6, 7), Expr.expandBrokers(cluster, "5,6,7")) 66 | } 67 | 68 | @Test 69 | def expandBrokers_attributes { 70 | val cluster = registry.cluster 71 | val b0 = cluster.addBroker(new Broker(0)) 72 | val b1 = cluster.addBroker(new Broker(1)) 73 | val b2 = cluster.addBroker(new Broker(2)) 74 | cluster.addBroker(new Broker(3)) 75 | 76 | b0.task = Broker.Task(hostname = "master", attributes = parseMap("a=1").toMap) 77 | b1.task = Broker.Task(hostname = "slave0", attributes = parseMap("a=2,b=2").toMap) 78 | b2.task = Broker.Task(hostname = "slave1", attributes = parseMap("b=2").toMap) 79 | 80 | // exact match 81 | assertEquals(Seq(0, 1, 2, 3), Expr.expandBrokers(cluster, "*")) 82 | assertEquals(Seq(0), Expr.expandBrokers(cluster, "*[a=1]")) 83 | assertEquals(Seq(1, 2), Expr.expandBrokers(cluster, "*[b=2]")) 84 | 85 | // attribute present 86 | assertEquals(Seq(0, 1), Expr.expandBrokers(cluster, "*[a]")) 87 | assertEquals(Seq(1, 2), Expr.expandBrokers(cluster, "*[b]")) 88 | 89 | // hostname 90 | assertEquals(Seq(0), Expr.expandBrokers(cluster, "*[hostname=master]")) 91 | assertEquals(Seq(1, 2), Expr.expandBrokers(cluster, "*[hostname=slave*]")) 92 | 93 | // not existent broker 94 | assertEquals(Seq(), Expr.expandBrokers(cluster, "5[a]")) 95 | assertEquals(Seq(), Expr.expandBrokers(cluster, "5[]")) 96 | } 97 | 98 | @Test 99 | def expandBrokers_sortByAttrs { 100 | val cluster = registry.cluster 101 | val b0 = cluster.addBroker(new Broker(0)) 102 | val b1 = cluster.addBroker(new Broker(1)) 103 | val b2 = cluster.addBroker(new Broker(2)) 104 | val b3 = cluster.addBroker(new Broker(3)) 105 | val b4 = cluster.addBroker(new Broker(4)) 106 | val b5 = cluster.addBroker(new Broker(5)) 107 | val b6 = cluster.addBroker(new Broker(6)) 108 | 109 | 110 | b0.task = Broker.Task(attributes = parseMap("r=2,a=1").toMap) 111 | b1.task = Broker.Task(attributes = parseMap("r=0,a=1").toMap) 112 | b2.task = Broker.Task(attributes = parseMap("r=1,a=1").toMap) 113 | b3.task = Broker.Task(attributes = parseMap("r=1,a=2").toMap) 114 | b4.task = Broker.Task(attributes = parseMap("r=0,a=2").toMap) 115 | b5.task = Broker.Task(attributes = parseMap("r=0,a=2").toMap) 116 | b6.task = Broker.Task(attributes = parseMap("a=2").toMap) 117 | 118 | assertEquals(Seq(0, 1, 2, 3, 4, 5, 6), Expr.expandBrokers(cluster, "*", sortByAttrs = true)) 119 | assertEquals(Seq(1, 2, 0, 4, 3, 5), Expr.expandBrokers(cluster, "*[r]", sortByAttrs = true)) 120 | assertEquals(Seq(1, 4, 2, 3, 0, 5), Expr.expandBrokers(cluster, "*[r,a]", sortByAttrs = true)) 121 | 122 | assertEquals(Seq(1, 2, 0), Expr.expandBrokers(cluster, "*[r=*,a=1]", sortByAttrs = true)) 123 | assertEquals(Seq(4, 3, 5), Expr.expandBrokers(cluster, "*[r,a=2]", sortByAttrs = true)) 124 | } 125 | 126 | @Test 127 | def expandTopics { 128 | val cluster = registry.cluster 129 | val topics: Topics = cluster.topics 130 | 131 | topics.addTopic("t0") 132 | topics.addTopic("t1") 133 | topics.addTopic("x") 134 | 135 | assertEquals(Seq(), Expr.expandTopics("")) 136 | assertEquals(Seq("t5", "t6"), Expr.expandTopics("t5,t6")) 137 | assertEquals(Seq("t0"), Expr.expandTopics("t0")) 138 | assertEquals(Seq("t0", "t1"), Expr.expandTopics("t0, t1")) 139 | assertEquals(Set("t0", "t1", "x"), Expr.expandTopics("*").toSet) 140 | assertEquals(Set("t0", "t1"), Expr.expandTopics("t*").toSet) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/JsonTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import org.junit.Test 21 | import org.junit.Assert._ 22 | import net.elodina.mesos.util.{Constraint, Period, Range} 23 | import ly.stealth.mesos.kafka.Broker.{Endpoint, ExecutionOptions, Failover, Stickiness, Task} 24 | import scala.io.Source 25 | 26 | class JsonTest { 27 | 28 | def getResourceJson[T](file: String)(implicit m: Manifest[T]): T = { 29 | val fileData = this.getClass.getResource(file) 30 | val txt = Source.fromFile(fileData.getFile) 31 | json.JsonUtil.fromJson[T](txt.mkString) 32 | } 33 | 34 | @Test 35 | def broker_legacy(): Unit = { 36 | val broker = getResourceJson[Broker]("/broker.json") 37 | 38 | val b = new Broker(1) 39 | b.task = Task( 40 | id = "kafka-general-0-705d56e2-7d62-4d7e-b033-74ea5526ed82", 41 | hostname = "host1", 42 | executorId = "kafka-general-0-ff258207-18f3-4fd2-9028-5f4c4143f84d", 43 | attributes = Map( 44 | "ip" -> "10.253.166.214", 45 | "host" -> "host1", 46 | "ami" -> "ami-d0232eba", 47 | "cluster" -> "us-east-1", 48 | "dedicated" -> "kafka/general", 49 | "zone" -> "us-east-1d", 50 | "instance_type" -> "i2.2xlarge" 51 | ), 52 | slaveId = "1fbd3a0d-a685-47e6-8066-01be06d68fac-S821" 53 | ) 54 | b.task.state = Broker.State.RUNNING 55 | b.task.endpoint = new Endpoint("host1:9092") 56 | b.syslog = false 57 | b.stickiness = new Stickiness(new Period("10m")) 58 | b.stickiness.registerStart("host1") 59 | b.log4jOptions = Map("k1" -> "v1", "k2" -> "v2") 60 | b.options = Map("a" -> "1", "b" -> "2") 61 | b.active = true 62 | b.port = new Range(9092) 63 | b.constraints = Map("dedicated" -> new Constraint("like:kafka/general")) 64 | b.mem = 56320 65 | b.executionOptions = ExecutionOptions(jvmOptions = "-server") 66 | b.cpus = 7 67 | b.heap = 5120 68 | b.failover = new Failover(new Period("1m"), new Period("10m")) 69 | 70 | BrokerTest.assertBrokerEquals(b, broker) 71 | } 72 | 73 | @Test 74 | def topic_legacy(): Unit = { 75 | val topic = getResourceJson[Topic]("/topic.json") 76 | val t = Topic( 77 | "__consumer_offsets", 78 | Map( 79 | 45 -> Seq(5,3,4), 80 | 34 -> Seq(2,7,0) 81 | ), 82 | Map( 83 | "cleanup.policy" -> "compact", 84 | "compression.type" -> "uncompressed", 85 | "segment.bytes" -> "104857600" 86 | ) 87 | ) 88 | assertEquals(t, topic) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/KafkaBrokerServerTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | import ly.stealth.mesos.kafka.executor.KafkaServer 20 | import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} 21 | import org.apache.kafka.common.serialization.ByteArraySerializer 22 | import org.junit.Test 23 | import org.junit.Assert._ 24 | import scala.collection.JavaConversions._ 25 | 26 | class KafkaBrokerServerTest extends KafkaMesosTestCase { 27 | //@Test 28 | def launchKafkaBroker { 29 | 30 | Thread.currentThread().setContextClassLoader(KafkaServer.Distro.loader) 31 | 32 | val client = startZkServer() 33 | client.delete("/brokers/ids/0") 34 | 35 | val defaults: Map[String, String] = Map( 36 | "broker.id" -> "0", 37 | "port" -> ("" + 9092), 38 | "log.dirs" -> "kafka-logs", 39 | "log.retention.bytes" -> ("" + 10l * 1024 * 1024 * 1024), 40 | 41 | "zookeeper.connect" -> Config.zk, 42 | "host.name" -> "localhost", 43 | "listeners" -> s"PLAINTEXT://:9092", 44 | "log.dirs" -> "data/kafka-logs", 45 | "auto.create.topics.enable" -> "true" 46 | ) 47 | val server = KafkaServer.Distro.newServer(defaults) 48 | server.getClass.getMethod("startup").invoke(server) 49 | 50 | val producer = new KafkaProducer[Array[Byte], Array[Byte]]( 51 | Map[String, Object]( 52 | "bootstrap.servers" -> "localhost:9092" 53 | ), 54 | new ByteArraySerializer(), 55 | new ByteArraySerializer() 56 | ) 57 | producer.send(new ProducerRecord[Array[Byte], Array[Byte]]("test", new Array[Byte](100))) 58 | 59 | var gotMetrics: Broker.Metrics = null 60 | KafkaServer.Distro.startCollector(server, m => { gotMetrics = m }) 61 | 62 | assertNotNull(gotMetrics) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/KafkaMesosTestCase.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | import java.io.{File, FileWriter} 20 | import org.I0Itec.zkclient.{IDefaultNameSpace, ZkClient, ZkServer} 21 | import org.apache.log4j.{Appender, BasicConfigurator, ConsoleAppender, Level, Logger, PatternLayout} 22 | import ly.stealth.mesos.kafka.Cluster.FsStorage 23 | import net.elodina.mesos.util.{IO, Net, Period, Version} 24 | import org.junit.{After, Before, Ignore} 25 | import scala.concurrent.duration.Duration 26 | import scala.collection.JavaConversions._ 27 | import java.util.concurrent.atomic.AtomicBoolean 28 | import java.util 29 | import java.util.Date 30 | import ly.stealth.mesos.kafka.executor.{BrokerServer, Executor, KafkaServer, LaunchConfig} 31 | import ly.stealth.mesos.kafka.scheduler._ 32 | import net.elodina.mesos.test.TestSchedulerDriver 33 | import org.apache.mesos.Protos.{Status, TaskState} 34 | import org.junit.Assert._ 35 | 36 | @Ignore 37 | class KafkaMesosTestCase extends net.elodina.mesos.test.MesosTestCase { 38 | var zkDir: File = null 39 | var zkServer: ZkServer = null 40 | 41 | trait MockKafkaDistribution extends KafkaDistributionComponent { 42 | def createTempFile(name: String, content: String): File = { 43 | val file = File.createTempFile(getClass.getSimpleName, name) 44 | IO.writeFile(file, content) 45 | 46 | file.deleteOnExit() 47 | file 48 | } 49 | 50 | override val kafkaDistribution: KafkaDistribution = new KafkaDistribution { 51 | override val distInfo: KafkaDistributionInfo = KafkaDistributionInfo( 52 | jar = createTempFile("executor.jar", "executor"), 53 | kafkaDist = createTempFile("kafka-0.9.3.0.tgz", "kafka"), 54 | kafkaVersion = new Version("0.9.3.0") 55 | ) 56 | } 57 | } 58 | 59 | object MockWallClock extends Clock { 60 | private[this] var mockDate: Option[Date] = None 61 | 62 | def now(): Date = mockDate.getOrElse(new Date()) 63 | def overrideNow(now: Option[Date]): Unit = mockDate = now 64 | } 65 | 66 | trait MockWallClockComponent extends ClockComponent { 67 | override val clock: Clock = MockWallClock 68 | } 69 | 70 | schedulerDriver = new TestSchedulerDriver() { 71 | override def suppressOffers() = Status.DRIVER_RUNNING 72 | override def reviveOffers(): Status = Status.DRIVER_RUNNING 73 | } 74 | 75 | var registry: Registry = _ 76 | 77 | def started(broker: Broker) { 78 | registry.scheduler.resourceOffers(schedulerDriver, Seq(offer("slave" + broker.id, "cpus:2.0;mem:2048;ports:9042..65000"))) 79 | broker.waitFor(Broker.State.PENDING, new Period("1s"), 1) 80 | registry.scheduler.statusUpdate(schedulerDriver, taskStatus(broker.task.id, TaskState.TASK_STARTING, "slave" + broker.id + ":9042")) 81 | registry.scheduler.statusUpdate(schedulerDriver, taskStatus(broker.task.id, TaskState.TASK_RUNNING, "slave" + broker.id + ":9042")) 82 | broker.waitFor(Broker.State.RUNNING, new Period("1s"), 1) 83 | assertEquals(Broker.State.RUNNING, broker.task.state) 84 | assertTrue(broker.active) 85 | } 86 | 87 | def stopped(broker: Broker): Unit = { 88 | assertTrue(broker.waitFor(Broker.State.STOPPING, new Period("1s"), 1)) 89 | registry.scheduler.statusUpdate(schedulerDriver, taskStatus(broker.task.id, TaskState.TASK_FINISHED)) 90 | assertTrue(broker.waitFor(null, new Period("1s"), 1)) 91 | assertNull(broker.task) 92 | } 93 | 94 | @Before 95 | def before { 96 | BasicConfigurator.resetConfiguration() 97 | val layout = new PatternLayout("%d %-5p %23c] %m%n") 98 | val appender: Appender = new ConsoleAppender(layout) 99 | 100 | Logger.getRootLogger.addAppender(appender) 101 | Logger.getLogger("org.apache.zookeeper").setLevel(Level.FATAL) 102 | Logger.getLogger("org.I0Itec.zkclient").setLevel(Level.FATAL) 103 | 104 | val storageFile = File.createTempFile(getClass.getSimpleName, null) 105 | storageFile.delete() 106 | Cluster.storage = new FsStorage(storageFile) 107 | 108 | Config.api = "http://localhost:7000" 109 | Config.zk = "localhost" 110 | 111 | MockWallClock.overrideNow(None) 112 | registry = new ProductionRegistry() with MockKafkaDistribution with MockWallClockComponent 113 | registry.cluster.clear() 114 | registry.cluster.rebalancer = new TestRebalancer() 115 | 116 | registry.scheduler.registered(schedulerDriver, frameworkId(), master()) 117 | Executor.server = new TestBrokerServer() 118 | } 119 | 120 | @After 121 | def after { 122 | val storage = Cluster.storage.asInstanceOf[FsStorage] 123 | storage.file.delete() 124 | Cluster.storage = new FsStorage(FsStorage.DEFAULT_FILE) 125 | 126 | Executor.server.stop() 127 | Executor.server = new KafkaServer() 128 | BasicConfigurator.resetConfiguration() 129 | ZkUtilsWrapper.reset() 130 | AdminUtilsWrapper.reset() 131 | } 132 | 133 | def startZkServer(): ZkClient = { 134 | val port = Net.findAvailPort 135 | Config.zk = s"localhost:$port" 136 | 137 | zkDir = File.createTempFile(getClass.getName, null) 138 | zkDir.delete() 139 | 140 | val defaultNamespace = new IDefaultNameSpace { def createDefaultNameSpace(zkClient: ZkClient): Unit = {} } 141 | zkServer = new ZkServer("" + zkDir, "" + zkDir, defaultNamespace, port) 142 | zkServer.start() 143 | 144 | val zkClient: ZkClient = zkServer.getZkClient 145 | zkClient.createPersistent("/brokers/ids/0", true) 146 | zkClient.createPersistent("/config/changes", true) 147 | zkClient 148 | } 149 | 150 | def stopZkServer() { 151 | if (zkDir == null) return 152 | 153 | zkServer.shutdown() 154 | def delete(dir: File) { 155 | val children: Array[File] = dir.listFiles() 156 | if (children != null) children.foreach(delete) 157 | dir.delete() 158 | } 159 | delete(zkDir) 160 | 161 | zkDir = null 162 | zkServer = null 163 | } 164 | 165 | def startHttpServer() { 166 | registry.httpServer.initLogging() 167 | Config.api = "http://localhost:0" 168 | registry.httpServer.start() 169 | } 170 | 171 | def stopHttpServer() { 172 | registry.httpServer.stop() 173 | } 174 | 175 | def delay(duration: String = "100ms")(f: => Unit) = new Thread { 176 | override def run(): Unit = { 177 | Thread.sleep(Duration(duration).toMillis) 178 | f 179 | } 180 | }.start() 181 | } 182 | 183 | class TestBrokerServer extends BrokerServer { 184 | var failOnStart: Boolean = false 185 | private val started: AtomicBoolean = new AtomicBoolean(false) 186 | 187 | def isStarted: Boolean = started.get() 188 | 189 | def start(config: LaunchConfig, send: Broker.Metrics => Unit): Broker.Endpoint = { 190 | if (failOnStart) throw new RuntimeException("failOnStart") 191 | started.set(true) 192 | new Broker.Endpoint("localhost", 9092) 193 | } 194 | 195 | def stop(): Unit = { 196 | started.set(false) 197 | started.synchronized { started.notify() } 198 | } 199 | 200 | def waitFor(): Unit = { 201 | started.synchronized { 202 | while (started.get) 203 | started.wait() 204 | } 205 | } 206 | 207 | def getClassLoader: ClassLoader = getClass.getClassLoader 208 | } 209 | 210 | class TestRebalancer extends Rebalancer { 211 | var _running: Boolean = false 212 | var _failOnStart: Boolean = false 213 | 214 | override def running: Boolean = _running 215 | 216 | override def start(topics: Seq[String], brokers: Seq[Int], replicas: Int = -1, fixedStartIndex: Int = -1, startPartitionId: Int = -1, realignment: Boolean = false): Unit = { 217 | if (_failOnStart) throw new Rebalancer.Exception("failOnStart") 218 | _running = true 219 | } 220 | 221 | override def state: String = if (running) "running" else "" 222 | } 223 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/RebalancerTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import org.junit.{After, Before, Test} 21 | import org.junit.Assert._ 22 | import org.I0Itec.zkclient.ZkClient 23 | import kafka.utils.ZkUtils 24 | import scala.collection.JavaConversions._ 25 | import java.util 26 | import ly.stealth.mesos.kafka.scheduler.{Rebalancer, ZKStringSerializer, ZkUtilsWrapper} 27 | 28 | class RebalancerTest extends KafkaMesosTestCase { 29 | var rebalancer: Rebalancer = _ 30 | var zkClient: ZkClient = _ 31 | 32 | @Before 33 | override def before { 34 | super.before 35 | rebalancer = new Rebalancer() 36 | 37 | val port = 56789 38 | Config.zk = s"localhost:$port" 39 | 40 | startZkServer() 41 | zkClient = zkServer.getZkClient 42 | zkClient.setZkSerializer(ZKStringSerializer) 43 | } 44 | 45 | @After 46 | override def after { 47 | super.after 48 | stopZkServer() 49 | } 50 | 51 | @Test 52 | def start { 53 | val cluster = registry.cluster 54 | cluster.addBroker(new Broker(0)) 55 | cluster.addBroker(new Broker(1)) 56 | 57 | cluster.topics.addTopic("topic", Map(0 -> Seq(0), 1 -> Seq(0))) 58 | assertFalse(rebalancer.running) 59 | rebalancer.start(Seq("topic"), Seq(0, 1)) 60 | 61 | assertTrue(rebalancer.running) 62 | assertFalse(rebalancer.state.isEmpty) 63 | } 64 | 65 | // This test no longer applies in kafka 0.10.0 66 | //@Test 67 | def start_in_progress { 68 | registry.cluster.topics.addTopic("topic", Map(0 -> Seq(0), 1 -> Seq(0))) 69 | ZkUtilsWrapper().createPersistentPath(ZkUtils.ReassignPartitionsPath) 70 | 71 | registry.cluster.addBroker(new Broker(2)) 72 | try { rebalancer.start(Seq("topic"), Seq(1, 2)); fail() } 73 | catch { 74 | case e: Rebalancer.Exception => assertTrue(e.getMessage, e.getMessage.contains("in progress")) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/TopicsTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package ly.stealth.mesos.kafka 18 | 19 | import scala.collection.JavaConversions._ 20 | import scala.collection.JavaConverters._ 21 | import org.junit.{After, Before, Test} 22 | import org.junit.Assert._ 23 | import java.util 24 | import ly.stealth.mesos.kafka.json.JsonUtil 25 | import ly.stealth.mesos.kafka.scheduler.Topics 26 | import net.elodina.mesos.util.Strings.{formatMap, parseMap} 27 | 28 | class TopicsTest extends KafkaMesosTestCase { 29 | var topics: Topics = null 30 | 31 | @Before 32 | override def before { 33 | super.before 34 | startZkServer() 35 | topics = registry.cluster.topics 36 | } 37 | 38 | @After 39 | override def after { 40 | super.after 41 | stopZkServer() 42 | } 43 | 44 | @Test 45 | def getTopic { 46 | assertNull(topics.getTopic("t")) 47 | 48 | topics.addTopic("t") 49 | assertNotNull(topics.getTopic("t")) 50 | } 51 | 52 | @Test 53 | def getTopics { 54 | assertEquals(0, topics.getTopics.size) 55 | 56 | topics.addTopic("t0") 57 | topics.addTopic("t1") 58 | assertEquals(2, topics.getTopics.size) 59 | } 60 | 61 | @Test 62 | def deleteTopics { 63 | assertEquals(0, topics.getTopics.size) 64 | 65 | topics.addTopic("t0") 66 | topics.addTopic("t1") 67 | 68 | assertEquals(2, topics.getTopics.size) 69 | 70 | topics.deleteTopic("t0") 71 | topics.deleteTopic("t1") 72 | 73 | // delete is done asynchronously 74 | delay("500ms") { 75 | assertEquals(0, topics.getTopics.size) 76 | } 77 | } 78 | 79 | @Test 80 | def fairAssignment { 81 | val assignment = topics.fairAssignment(3, 2, Seq(0, 1, 2), 0, 0) 82 | assertEquals(3, assignment.size) 83 | assertEquals(Seq(0, 1), assignment(0)) 84 | assertEquals(Seq(1, 2), assignment(1)) 85 | assertEquals(Seq(2, 0), assignment(2)) 86 | } 87 | 88 | @Test 89 | def addTopic { 90 | topics.addTopic("t0", topics.fairAssignment(2, 1), options = parseMap("flush.ms=1000")) 91 | topics.addTopic("t1") 92 | 93 | val _topics: util.List[Topic] = topics.getTopics 94 | assertEquals(2, _topics.size()) 95 | 96 | val t0: Topic = _topics.get(0) 97 | assertEquals("t0", t0.name) 98 | assertEquals("flush.ms=1000", formatMap(t0.options)) 99 | 100 | assertEquals(2, t0.partitions.size) 101 | assertEquals(Seq(0), t0.partitions(0)) 102 | assertEquals(Seq(0), t0.partitions(1)) 103 | } 104 | 105 | @Test 106 | def updateTopic { 107 | var t: Topic = topics.addTopic("t") 108 | topics.updateTopic(t, parseMap("flush.ms=1000")) 109 | 110 | t = topics.getTopic(t.name) 111 | assertEquals("flush.ms=1000", formatMap(t.options)) 112 | } 113 | 114 | @Test 115 | def validateOptions { 116 | assertNull(topics.validateOptions(parseMap("flush.ms=1000"))) 117 | assertNotNull(topics.validateOptions(parseMap("invalid=1000"))) 118 | } 119 | 120 | // Topic 121 | @Test 122 | def Topic_toJson_fromJson() { 123 | val topic = Topic( 124 | "name", 125 | Map( 126 | 0 -> Seq(0, 1), 127 | 1 -> Seq(1, 2), 128 | 2 -> Seq(0, 2) 129 | ), 130 | parseMap("a=1,b=2") 131 | ) 132 | 133 | val read = JsonUtil.fromJson[Topic](JsonUtil.toJson(topic)) 134 | assertEquals(topic.name, read.name) 135 | assertEquals(topic.partitions(0), read.partitions(0)) 136 | assertEquals(topic.options, read.options) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/test/ly/stealth/mesos/kafka/UtilTest.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package ly.stealth.mesos.kafka 19 | 20 | import org.junit.Test 21 | import org.junit.Assert._ 22 | import ly.stealth.mesos.kafka.Util.BindAddress 23 | import java.util 24 | 25 | class UtilTest { 26 | // BindAddress 27 | @Test 28 | def BindAddress_init { 29 | new BindAddress("broker0") 30 | new BindAddress("192.168.*") 31 | new BindAddress("if:eth1") 32 | 33 | // unknown source 34 | try { new BindAddress("unknown:value"); fail() } 35 | catch { case e: IllegalArgumentException => } 36 | } 37 | 38 | @Test 39 | def BindAddress_resolve { 40 | // address without mask 41 | assertEquals("host", new BindAddress("host").resolve()) 42 | 43 | // address with mask 44 | assertEquals("127.0.0.1", new BindAddress("127.0.0.*").resolve()) 45 | 46 | // unresolvable 47 | try { new BindAddress("255.255.*").resolve(); fail() } 48 | catch { case e: IllegalStateException => } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/test/resources/broker.json: -------------------------------------------------------------------------------- 1 | { 2 | "task": { 3 | "hostname": "host1", 4 | "state": "running", 5 | "slaveId": "1fbd3a0d-a685-47e6-8066-01be06d68fac-S821", 6 | "executorId": "kafka-general-0-ff258207-18f3-4fd2-9028-5f4c4143f84d", 7 | "attributes": { 8 | "ip": "10.253.166.214", 9 | "host": "host1", 10 | "ami": "ami-d0232eba", 11 | "cluster": "us-east-1", 12 | "dedicated": "kafka\/general", 13 | "zone": "us-east-1d", 14 | "instance_type": "i2.2xlarge" 15 | }, 16 | "id": "kafka-general-0-705d56e2-7d62-4d7e-b033-74ea5526ed82", 17 | "endpoint": "host1:9092" 18 | }, 19 | "syslog": false, 20 | "stickiness": { 21 | "period": "10m", 22 | "hostname": "host1" 23 | }, 24 | "log4jOptions": "k1=v1,k2=v2", 25 | "options": "a=1,b=2", 26 | "id": "1", 27 | "port": "9092", 28 | "constraints": "dedicated=like:kafka\/general", 29 | "mem": 56320, 30 | "jvmOptions": "-server", 31 | "cpus": 7, 32 | "heap": 5120, 33 | "failover": { 34 | "delay": "1m", 35 | "maxDelay": "10m" 36 | }, 37 | "active": true 38 | } 39 | -------------------------------------------------------------------------------- /src/test/resources/topic.json: -------------------------------------------------------------------------------- 1 | {"name" : "__consumer_offsets", "partitions" : {"45" : [5, 3, 4], "34" : [2, 7, 0]}, "options" : {"cleanup.policy" : "compact", "compression.type" : "uncompressed", "segment.bytes" : "104857600"}} 2 | -------------------------------------------------------------------------------- /vagrant/README.md: -------------------------------------------------------------------------------- 1 | # Vagrant VMs for Mesos cluster 2 | Vagrantfile creates mesos cluster with following nodes: 3 | - master; 4 | - slave0..slave(N-1) (N is specified in vagrantfile); 5 | 6 | Master provides web ui listening on http://master:5050 7 | Both master and slave nodes runs mesos slave daemons. 8 | 9 | Every node has pre-installed docker. Master node has pre-installed 10 | marathon scheduler. 11 | 12 | Host's public key is copied to `authorized_hosts`, 13 | so direct access like `ssh vagrant@master|slaveX` should work. 14 | 15 | For general mesos overview please refer to 16 | http://mesos.apache.org/documentation/latest/mesos-architecture/ 17 | 18 | ## Node Names 19 | During first run vagrantfile creates `hosts` file which 20 | contains host names for cluster nodes. It is recommended 21 | to append its content to `/etc/hosts` (or other OS-specific 22 | location) of the running (hosting) OS to be able to refer 23 | master and slaves by logical names. 24 | 25 | ## Startup 26 | Mesos master and slaves daemons are started automatically. 27 | 28 | Each slave node runs 'mesos-slave' daemon while master runs both 29 | 'mesos-master' and 'mesos-slave' daemons. 30 | 31 | Daemons could be controlled by using: 32 | `/etc/init.d/mesos-{master|slave} {start|stop|status|restart}` 33 | 34 | ## Configuration 35 | Configuration is read from the following locations: 36 | - `/etc/mesos`, `/etc/mesos-{master|slave}` 37 | for general or master|slave specific CLI options; 38 | - `/etc/default/mesos`, `/etc/default/mesos-{master|slave}` 39 | for general or master|slave specific environment vars; 40 | 41 | Please refer to CLI of 'mesos-master|slave' daemons and `/usr/bin/mesos-init-wrapper` 42 | for details. 43 | 44 | ## Logs 45 | Logs are written to `/var/log/mesos/mesos-{master|slave}.*` -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # -*- mode: ruby -*- 16 | # vi: set ft=ruby : 17 | 18 | SLAVES=1 19 | NET_PREFIX="192.168.3." 20 | 21 | NODES={"master" => NET_PREFIX + "5"} 22 | (0..SLAVES-1).each do |i| NODES["slave#{i}"] = NET_PREFIX + (6 + i).to_s end 23 | 24 | # create hosts 25 | File.open('.vagrant/hosts', 'w') do |file| 26 | file.write("127.0.0.1\tlocalhost\n") 27 | file.write("\n# cluster nodes\n") 28 | NODES.each do |name, ip| file.write("#{ip}\t#{name}\n") end 29 | end 30 | 31 | Vagrant.configure(2) do |config| 32 | config.vm.box = "ubuntu/trusty64" 33 | config.vm.synced_folder "../", "/vagrant" 34 | 35 | config.vm.define "master" do |master| 36 | master.vm.provider "virtualbox" do |v| 37 | v.memory = 1024 38 | end 39 | 40 | master.vm.hostname = "master" 41 | master.vm.network :private_network, ip: NODES["master"] 42 | 43 | master.vm.provision "shell", path: "init.sh", args: "master" 44 | end 45 | 46 | (0..SLAVES-1).each do |i| 47 | config.vm.define "slave#{i}" do |slave| 48 | slave.vm.provider "virtualbox" do |v| 49 | v.memory = 2048 50 | v.cpus = 2 51 | end 52 | 53 | slave.vm.hostname = "slave#{i}" 54 | slave.vm.network :private_network, ip: NODES[slave.vm.hostname] 55 | 56 | slave.vm.provision "shell", path: "init.sh", args: "slave" 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /vagrant/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | install_mesos() { 19 | mode=$1 # master | slave 20 | apt-get -qy install mesos=0.28.0* 21 | 22 | echo "zk://master:2181/mesos" > /etc/mesos/zk 23 | echo '5mins' > /etc/mesos-slave/executor_registration_timeout 24 | 25 | ip=$(cat /etc/hosts | grep `hostname` | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}") 26 | echo $ip > "/etc/mesos-$mode/ip" 27 | 28 | if [ $mode == "master" ]; then 29 | ln -s /lib/init/upstart-job /etc/init.d/mesos-master 30 | service mesos-master start 31 | else 32 | apt-get -qy remove zookeeper 33 | fi 34 | 35 | ln -s /lib/init/upstart-job /etc/init.d/mesos-slave 36 | service mesos-slave start 37 | } 38 | 39 | install_marathon() { 40 | apt-get install -qy marathon=0.10.0* 41 | service marathon start 42 | } 43 | 44 | install_docker() { 45 | apt-get install -qy lxc-docker 46 | echo 'docker,mesos' > /etc/mesos-slave/containerizers 47 | service mesos-slave restart 48 | } 49 | 50 | if [[ $1 != "master" && $1 != "slave" ]]; then 51 | echo "Usage: $0 master|slave" 52 | exit 1 53 | fi 54 | mode=$1 55 | 56 | cd /vagrant/vagrant 57 | 58 | # name resolution 59 | cp .vagrant/hosts /etc/hosts 60 | 61 | # ssh key 62 | key=".vagrant/ssh_key.pub" 63 | if [ -f $key ]; then 64 | cat $key >> /home/vagrant/.ssh/authorized_keys 65 | fi 66 | 67 | # disable ipv6 68 | echo -e "\nnet.ipv6.conf.all.disable_ipv6 = 1\n" >> /etc/sysctl.conf 69 | sysctl -p 70 | 71 | # use apt-proxy if present 72 | if [ -f ".vagrant/apt-proxy" ]; then 73 | apt_proxy=$(cat ".vagrant/apt-proxy") 74 | echo "Using apt-proxy: $apt_proxy"; 75 | echo "Acquire::http::Proxy \"$apt_proxy\";" > /etc/apt/apt.conf.d/90-apt-proxy.conf 76 | fi 77 | 78 | # add mesosphere repo 79 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E56151BF 80 | DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') 81 | CODENAME=$(lsb_release -cs) 82 | echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | tee /etc/apt/sources.list.d/mesosphere.list 83 | 84 | # add docker repo 85 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 86 | echo "deb http://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list 87 | 88 | apt-get -qy update 89 | 90 | # install deps 91 | apt-get install -qy vim zip mc curl wget openjdk-7-jre scala git 92 | 93 | install_mesos $mode 94 | if [ $mode == "master" ]; then install_marathon; fi 95 | #install_docker 96 | 97 | --------------------------------------------------------------------------------