├── .gitignore ├── LICENSE ├── README.markdown ├── project.clj └── src ├── clj └── kafka │ └── deploy │ ├── crate │ └── zookeeper.clj │ ├── deploy_util.clj │ ├── provision.clj │ └── security.clj └── resource ├── log4j.properties └── server.properties /.gitignore: -------------------------------------------------------------------------------- 1 | .lein-deps-sum 2 | lib 3 | logs 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2012 Nathan Marz 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | Automated deploy for a Kafka cluster on AWS. Can be used with [Storm](https://github.com/nathanmarz/storm) or standalone. 2 | 3 | The deploy provisions and configures both Zookeeper and Kafka. 4 | 5 | ## Usage 6 | 7 | Install [leiningen](https://github.com/technomancy/leiningen). 8 | 9 | Set up your `~/.pallet/config.clj` as described [here](https://github.com/nathanmarz/storm-deploy/wiki) 10 | 11 | Start a cluster: 12 | 13 | ``` 14 | lein deps 15 | lein run :deploy --start --name mykafkacluster --kn 8 --zn 2 16 | ``` 17 | 18 | This creates a cluster called "mykafkacluster" with 8 kafka nodes and 2 zookeeper nodes. 19 | 20 | 21 | Stop a cluster: 22 | 23 | ``` 24 | lein run :deploy --stop --name mykafkacluster 25 | ``` 26 | -------------------------------------------------------------------------------- /project.clj: -------------------------------------------------------------------------------- 1 | (defproject kafka-deploy "0.0.1-SNAPSHOT" 2 | :source-path "src/clj" 3 | :test-path "test/clj" 4 | :resources-path "src/resource" 5 | :run-aliases {:deploy kafka.deploy.provision} 6 | 7 | :repositories { 8 | "sonatype" "https://oss.sonatype.org/content/repositories/releases" 9 | "jclouds-snapshot" "https://oss.sonatype.org/content/repositories/snapshots" 10 | } 11 | 12 | :dependencies [ 13 | [storm "0.5.4"] 14 | [commons-codec "1.4"] 15 | [org.cloudhoist/pallet "0.6.1"] 16 | [org.cloudhoist/java "0.5.0"] 17 | [org.cloudhoist/ssh-key "0.5.0"] 18 | [org.cloudhoist/automated-admin-user "0.5.0"] 19 | [org.cloudhoist/iptables "0.5.0"] 20 | [org.cloudhoist/zookeeper "0.5.1"] 21 | [org.cloudhoist/crontab "0.5.0"] 22 | 23 | [org.jclouds.provider/aws-ec2 "1.0.0"] 24 | [org.jclouds.provider/aws-s3 "1.0.0"] 25 | [com.jcraft/jsch "0.1.44-1"] ; is this necessary? 26 | 27 | [log4j/log4j "1.2.14"]] 28 | 29 | :dev-dependencies [[swank-clojure "1.2.1"] 30 | [org.cloudhoist/pallet-lein "0.2.0"]]) 31 | 32 | 33 | -------------------------------------------------------------------------------- /src/clj/kafka/deploy/crate/zookeeper.clj: -------------------------------------------------------------------------------- 1 | (ns kafka.deploy.crate.zookeeper 2 | (:require 3 | [pallet.action.directory :as directory] 4 | [pallet.action.file :as file] 5 | [pallet.action.remote-directory :as remote-directory] 6 | [pallet.action.remote-file :as remote-file] 7 | [pallet.action.service :as service] 8 | [pallet.action.user :as user] 9 | [pallet.argument :as argument] 10 | [pallet.compute :as compute] 11 | [pallet.parameter :as parameter] 12 | [pallet.session :as session] 13 | [pallet.stevedore :as stevedore] 14 | [clojure.string :as string] 15 | [pallet.resource.package :as package] 16 | [pallet.resource.exec-script :as exec-script] 17 | [pallet.crate.crontab :as crontab] 18 | ) 19 | (:use 20 | pallet.thread-expr)) 21 | 22 | (def install-path "/usr/local/zookeeper") 23 | (def log-path "/var/log/zookeeper") 24 | (def tx-log-path "/mnt/zookeeper") 25 | (def config-path "/etc/zookeeper") 26 | (def data-path "/var/zookeeper") 27 | (def zookeeper-home install-path) 28 | (def zookeeper-user "zookeeper") 29 | (def zookeeper-group "zookeeper") 30 | (def default-config 31 | {:dataDir data-path 32 | :tickTime 2000 33 | :clientPort 2181 34 | :initLimit 10 35 | :syncLimit 5 36 | :dataLogDir tx-log-path}) 37 | 38 | (defn url "Download url" 39 | [version] 40 | (format 41 | "http://www.apache.org/dist/zookeeper/zookeeper-%s/zookeeper-%s.tar.gz" 42 | version version)) 43 | 44 | (defn install 45 | "Install Zookeeper" 46 | [session & {:keys [user group version home] 47 | :or {user zookeeper-user 48 | group zookeeper-group 49 | version "3.3.3"} 50 | :as options}] 51 | (let [url (url version) 52 | home (or home (format "%s-%s" install-path version))] 53 | (-> 54 | session 55 | (package/package "daemontools") 56 | 57 | (parameter/assoc-for 58 | [:zookeeper :home] home 59 | [:zookeeper :owner] user 60 | [:zookeeper :group] group) 61 | (user/group group :system true) 62 | (user/user user :system true :group group) 63 | (remote-directory/remote-directory 64 | home 65 | :url url :md5-url (str url ".md5") 66 | :unpack :tar :tar-options "xz" 67 | :owner user :group group) 68 | (directory/directory log-path :owner user :group group :mode "0755") 69 | (directory/directory tx-log-path :owner user :group group :mode "0755") 70 | (directory/directory config-path :owner user :group group :mode "0755") 71 | (directory/directory data-path :owner user :group group :mode "0755") 72 | (directory/directory (format "/home/%s" user) :owner user :group group :mode "0755") 73 | (directory/directory (format "%s/supervise" home) :owner user :group group :mode "0755") 74 | (remote-file/remote-file 75 | (format "%s/purge" home) 76 | :content 77 | (format 78 | "#!/bin/bash 79 | cd %s && export ZOOBINDIR=\"bin\" && . bin/zkEnv.sh && echo $CLASSPATH && java -cp $CLASSPATH org.apache.zookeeper.server.PurgeTxnLog %s %s -n 3 80 | " 81 | home 82 | tx-log-path 83 | data-path 84 | ) 85 | :overwrite-changes true 86 | :literal true 87 | :mode 755) 88 | (remote-file/remote-file 89 | (format "%s/run" home) 90 | :content 91 | "#!/bin/bash 92 | 93 | export ZOOBINDIR=\".\" 94 | 95 | if [ \"x$JMXLOCALONLY\" = \"x\" ] 96 | then 97 | JMXLOCALONLY=false 98 | fi 99 | 100 | if [ \"x$JMXDISABLE\" = \"x\" ] 101 | then 102 | echo \"JMX enabled by default\" 103 | # for some reason these two options are necessary on jdk6 on Ubuntu 104 | # accord to the docs they are not necessary, but otw jconsole cannot 105 | # do a local attach 106 | ZOOMAIN=\"-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain\" 107 | else 108 | echo \"JMX disabled by user request\" 109 | ZOOMAIN=\"org.apache.zookeeper.server.quorum.QuorumPeerMain\" 110 | fi 111 | 112 | if [ \"x$2\" != \"x\" ] 113 | then 114 | ZOOCFG=\"$ZOOCFGDIR/$2\" 115 | fi 116 | 117 | 118 | cd bin && . ./zkEnv.sh && java \"-Dzookeeper.log.dir=${ZOO_LOG_DIR}\" \"-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}\" -cp \"$CLASSPATH\" $JVMFLAGS $ZOOMAIN \"$ZOOCFG\" 119 | " 120 | :overwrite-changes true 121 | :literal true 122 | :mode 755) 123 | 124 | (remote-file/remote-file 125 | (format "%s/log4j.properties" config-path) 126 | :remote-file (format "%s/conf/log4j.properties" home) 127 | :owner user :group group :mode "0644") 128 | (file/sed 129 | (format "%s/bin/zkServer.sh" home) 130 | {"# chkconfig:.*" "" 131 | "# description:.*" "" 132 | "# by default we allow local JMX connections" 133 | "# by default we allow local JMX connections\\n# chkconfig: 2345 20 80\\n# description: zookeeper"}) 134 | (file/sed 135 | (format "%s/log4j.properties" config-path) 136 | {"log4j.rootLogger=INFO, CONSOLE" 137 | "log4j.rootLogger=INFO, ROLLINGFILE" 138 | "log4j.appender.ROLLINGFILE.File=zookeeper.log" 139 | (format "log4j.appender.ROLLINGFILE.File=%s/zookeeper.log" log-path)} 140 | :seperator "|") 141 | ))) 142 | 143 | (defn init [session] 144 | (-> session 145 | (exec-script/exec-script 146 | (cd ~(parameter/get-for session [:zookeeper :home])) 147 | "sudo -u " ~(parameter/get-for session [:zookeeper :owner]) " nohup supervise . &") 148 | (crontab/crontab "root" 149 | :content (format "@daily sh %s/purge" (parameter/get-for session [:zookeeper :home]))) 150 | )) 151 | 152 | (defn config-files 153 | "Create a zookeeper configuration file. We sort by name to preserve sequence 154 | across invocations." 155 | [session] 156 | (let [target-name (session/target-name session) 157 | target-ip (session/target-ip session) 158 | nodes (sort-by compute/hostname (session/nodes-in-group session)) 159 | configs (parameter/get-for 160 | session 161 | [:zookeper (keyword (session/group-name session))]) 162 | config (configs (keyword target-name)) 163 | owner (parameter/get-for session [:zookeeper :owner]) 164 | group (parameter/get-for session [:zookeeper :group])] 165 | (-> 166 | session 167 | (remote-file/remote-file 168 | (format "%s/zoo.cfg" config-path) 169 | :content (str (string/join 170 | \newline 171 | (map #(format "%s=%s" (name (first %)) (second %)) 172 | (merge 173 | default-config 174 | (dissoc config :electionPort :quorumPort)))) 175 | \newline 176 | (when (> (count nodes) 1) 177 | (string/join 178 | \newline 179 | (map #(let [config (configs 180 | (keyword (compute/hostname %1)))] 181 | (format "server.%s=%s:%s:%s" 182 | %2 183 | (compute/private-ip %1) 184 | (:quorumPort config 2888) 185 | (:electionPort config 3888))) 186 | nodes 187 | (range 1 (inc (count nodes))))))) 188 | :owner owner :group group :mode "0644") 189 | 190 | (remote-file/remote-file 191 | (format "%s/myid" data-path) 192 | :content (str (some #(and (= target-ip (second %)) (first %)) 193 | (map #(vector %1 (compute/primary-ip %2)) 194 | (range 1 (inc (count nodes))) 195 | nodes))) 196 | :owner owner :group group :mode "0644")))) 197 | 198 | (defn store-configuration 199 | "Capture zookeeper configuration" 200 | [session options] 201 | (parameter/update-for 202 | session 203 | [:zookeper (keyword (session/group-name session))] 204 | (fn [m] 205 | (assoc m (session/target-name session) options)))) 206 | 207 | (defn configure 208 | "Configure zookeeper instance" 209 | [session & {:keys [dataDir tickTime clientPort initLimit syncLimit dataLogDir 210 | electionPort quorumPort] 211 | :or {client-port 2181 quorumPort 2888 electionPort 3888} 212 | :as options}] 213 | (-> 214 | session 215 | (store-configuration 216 | (assoc options :quorumPort quorumPort :electionPort electionPort)) 217 | (config-files))) 218 | 219 | #_ 220 | (pallet.core/defnode zk 221 | {} 222 | :bootstrap (pallet.action/phase 223 | (pallet.crate.automated-admin-user/automated-admin-user)) 224 | :configure (pallet.action/phase 225 | (pallet.crate.java/java :openjdk :jdk) 226 | (pallet.crate.zookeeper/install) 227 | (pallet.crate.zookeeper/configure) 228 | (pallet.crate.zookeeper/init)) 229 | :restart-zookeeper (pallet.action/phase 230 | (pallet.action.service/service 231 | "zookeeper" :action :restart))) -------------------------------------------------------------------------------- /src/clj/kafka/deploy/deploy_util.clj: -------------------------------------------------------------------------------- 1 | (ns kafka.deploy.deploy-util 2 | (:require [clojure.string :as s] 3 | [pallet.execute :as execute])) 4 | 5 | (def env-keys-to-resolve [:username :public-key-path :private-key-path]) 6 | 7 | (defn resolve-path [path] 8 | (s/trim (:out (execute/local-script (echo ~path))))) 9 | 10 | (defn resolve-keypaths 11 | [user-map] 12 | (reduce #(%2 %1) 13 | user-map 14 | (for [kwd env-keys-to-resolve] 15 | #(if (kwd %) 16 | (update-in % [kwd] resolve-path) 17 | %)))) 18 | 19 | (defn set-var-root* [avar val] 20 | (alter-var-root avar (fn [avar] val))) 21 | 22 | (defmacro set-var-root [var-sym val] 23 | `(set-var-root* (var ~var-sym) ~val)) 24 | 25 | (defmacro with-var-roots [bindings & body] 26 | (let [settings (partition 2 bindings) 27 | tmpvars (repeatedly (count settings) (partial gensym "old")) 28 | vars (map first settings) 29 | savevals (vec (mapcat (fn [t v] [t v]) tmpvars vars)) 30 | setters (for [[v s] settings] `(set-var-root ~v ~s)) 31 | restorers (map (fn [v s] `(set-var-root ~v ~s)) vars tmpvars) 32 | ] 33 | `(let ~savevals 34 | ~@setters 35 | (try 36 | ~@body 37 | (finally 38 | ~@restorers)) 39 | ))) -------------------------------------------------------------------------------- /src/clj/kafka/deploy/provision.clj: -------------------------------------------------------------------------------- 1 | (ns kafka.deploy.provision 2 | (:use [pallet compute configure core resource] 3 | [clojure.contrib command-line] 4 | [kafka.deploy security] 5 | [org.jclouds.compute :only [nodes-with-tag]]) 6 | (:require [kafka.deploy.crate [zookeeper :as zookeeper]] 7 | [pallet.crate.java :as java] 8 | [pallet.crate.automated-admin-user :as automated-admin-user] 9 | [kafka.deploy.deploy-util :as util] 10 | [org.jclouds.compute :only [nodes-with-tag]] 11 | [pallet.resource.remote-file :as remote-file] 12 | [pallet.resource.exec-script :as exec-script] 13 | [pallet.resource.package :as package] 14 | [clojure.contrib [str-utils2 :as str]] 15 | [pallet [session :as session]] 16 | )) 17 | 18 | (defn target-node-index 19 | "Returns the target node's index within its group. For a group of size 3, returns either 0, 1 or 2." 20 | [request] 21 | (let [nodes (sort-by private-ip (session/nodes-in-group request)) 22 | node->idx (zipmap nodes (iterate inc 0))] 23 | (node->idx (session/target-node request)))) 24 | 25 | (defn my-region [] 26 | (-> (pallet-config) :services :default :jclouds.regions) 27 | ) 28 | 29 | (defn jclouds-group [& group-pieces] 30 | (str "jclouds#" 31 | (apply str group-pieces) 32 | "#" 33 | (my-region) 34 | )) 35 | 36 | (defn zookeeper-ips [compute name] 37 | (let [running-nodes (filter running? 38 | (nodes-with-tag (str "kafka-zookeeper-" name) compute))] 39 | (map private-ip running-nodes))) 40 | 41 | (def *USER* nil) 42 | 43 | (defn base-server-spec [] 44 | (server-spec 45 | :phases {:bootstrap (fn [req] (automated-admin-user/automated-admin-user 46 | req 47 | (:username *USER*) 48 | (:public-key-path *USER*))) 49 | :configure (phase 50 | (java/java :sun :jdk))})) 51 | 52 | (def ZK-VERSION "3.3.4") 53 | 54 | (defn zookeeper-server-spec [] 55 | (server-spec 56 | :extends (base-server-spec) 57 | :phases {:configure (phase 58 | (zookeeper/install :version ZK-VERSION) 59 | (zookeeper/configure 60 | :clientPort 2181 61 | :maxClientCnxns 0) 62 | (zookeeper/init)) 63 | :post-configure (phase 64 | (exec-script/exec-script 65 | (cd ~(str zookeeper/install-path "-" ZK-VERSION)) 66 | (sh "bin/zkCli.sh create /kafka 1") 67 | )) 68 | })) 69 | 70 | (def RELEASE-URL "http://people.apache.org/~nehanarkhede/kafka-0.7.0-incubating/kafka-0.7.0-incubating-src.tar.gz") 71 | 72 | (defn download-release [request] 73 | (-> request 74 | (remote-file/remote-file 75 | "$HOME/kafka.tar.gz" 76 | :url RELEASE-URL 77 | :no-versioning true) 78 | )) 79 | 80 | (defn mk-zk-str [compute name] 81 | (->> (zookeeper-ips compute name) 82 | (map #(str % ":2181")) 83 | (str/join ",") 84 | (#(str % "/kafka")) 85 | )) 86 | 87 | (defn kafka-server-spec [name] 88 | (server-spec 89 | :extends (base-server-spec) 90 | :phases {:configure (phase 91 | (package/package "daemontools") 92 | (download-release) 93 | (exec-script/exec-checked-script 94 | "build kafka" 95 | (cd "$HOME") 96 | (tar "-xzf kafka.tar.gz") 97 | (mv "kafka-0.7.0-incubating-src" "kafka") 98 | (cd "kafka") 99 | (sh "sbt update") 100 | (sh "sbt package") 101 | (mkdir "logs"))) 102 | :post-configure 103 | (fn [request] 104 | (-> request 105 | (remote-file/remote-file "$HOME/kafka/config/server.properties" 106 | :template "server.properties" 107 | :values {'zk-str (mk-zk-str (:compute request) name) 108 | 'id (target-node-index request)} 109 | :owner (:username *USER*)) 110 | (remote-file/remote-file "$HOME/kafka/config/log4j.properties" 111 | :template "log4j.properties" 112 | :owner (:username *USER*)) 113 | (remote-file/remote-file 114 | "$HOME/kafka/run" 115 | :content (str 116 | "#!/bin/bash\n\n 117 | sh bin/kafka-server-start.sh config/server.properties") 118 | :overwrite-changes true 119 | :literal true 120 | :mode 755) 121 | )) 122 | :exec (phase 123 | (exec-script/exec-script 124 | (cd "$HOME/kafka") 125 | "nohup supervise . &") 126 | )})) 127 | 128 | (defn zookeeper [name] 129 | (group-spec 130 | (str "kafka-zookeeper-" name) 131 | :node-spec (node-spec 132 | :image {:inbound-ports [2181 22] 133 | :image-id "us-east-1/ami-08f40561" 134 | :hardware-id "m1.large" 135 | }) 136 | :extends (zookeeper-server-spec))) 137 | 138 | (defn kafka [name] 139 | (group-spec 140 | (str "kafka-" name) 141 | :node-spec (node-spec 142 | :image {:inbound-ports [2181 22] 143 | :image-id "us-east-1/ami-08f40561" 144 | :hardware-id "m1.large" 145 | }) 146 | :extends (kafka-server-spec name))) 147 | 148 | (defn converge! [name aws kn zn] 149 | (converge {(kafka name) kn 150 | (zookeeper name) zn 151 | } 152 | :compute aws)) 153 | 154 | (defn kafka-config 155 | ([] (pallet-config "default")) 156 | ([conf-name] (compute-service-properties (pallet-config) [conf-name]))) 157 | 158 | (defn- print-ips-for-tag! [aws tag-str] 159 | (let [running-node (filter running? (nodes-with-tag tag-str aws))] 160 | (println "TAG: " tag-str) 161 | (println "PUBLIC: " (map primary-ip running-node)) 162 | (println "PRIVATE: " (map private-ip running-node)))) 163 | 164 | (defn print-all-ips! [aws name] 165 | (let [all-tags [(str "kafka-zookeeper-" name) (str "kafka-" name)]] 166 | (doseq [tag all-tags] 167 | (print-ips-for-tag! aws tag)))) 168 | 169 | (defn start! [aws name kn zn] 170 | (println "Starting cluster") 171 | (println (format "Provisioning nodes [zn=%d, kn=%d]" zn kn)) 172 | (converge! name aws kn zn) 173 | (authorize-group aws (my-region) (jclouds-group "kafka-" name) (jclouds-group "kafka-zookeeper-" name)) 174 | (authorize-group aws (my-region) (jclouds-group "kafka-zookeeper-" name) (jclouds-group "kafka-" name)) 175 | 176 | (lift (zookeeper name) :compute aws :phase [:post-configure]) 177 | (lift (kafka name) :compute aws :phase [:post-configure :exec]) 178 | (println "Provisioning Complete.") 179 | (print-all-ips! aws name)) 180 | 181 | (defn stop! [aws name] 182 | (println "Shutting Down nodes...") 183 | (converge! name aws 0 0) 184 | (println "Shutdown Finished.")) 185 | 186 | (defn mk-aws [] 187 | (let [conf (-> (kafka-config "default") 188 | (update-in [:environment :user] util/resolve-keypaths))] 189 | (compute-service-from-map conf))) 190 | 191 | (defn -main [& args] 192 | (let [aws (mk-aws) 193 | user (-> (kafka-config "default") 194 | :environment 195 | :user 196 | util/resolve-keypaths) 197 | ] 198 | (util/with-var-roots [*USER* user] 199 | (with-command-line args 200 | "Provisioning tool for Kafka Clusters" 201 | [[start? "Start Cluster?"] 202 | [stop? "Shutdown Cluster?"] 203 | [ips? "Print Cluster IP Addresses?"] 204 | [name "Cluster name" "dev"] 205 | [kn "Number of Kafka nodes" "1"] 206 | [zn "Number of Zookeeper nodes" "1"]] 207 | 208 | (cond 209 | stop? (stop! aws name) 210 | start? (start! aws name (Integer/parseInt kn) (Integer/parseInt zn)) 211 | ips? (print-all-ips! aws name) 212 | :else (println "Must pass --start or --stop or --ips")))))) -------------------------------------------------------------------------------- /src/clj/kafka/deploy/security.clj: -------------------------------------------------------------------------------- 1 | ; 2 | ; 3 | ; Copyright (C) 2011 Cloud Conscious, LLC. 4 | ; 5 | ; ==================================================================== 6 | ; Licensed under the Apache License, Version 2.0 (the "License"); 7 | ; you may not use this file except in compliance with the License. 8 | ; You may obtain a copy of the License at 9 | ; 10 | ; http://www.apache.org/licenses/LICENSE-2.0 11 | ; 12 | ; Unless required by applicable law or agreed to in writing, software 13 | ; distributed under the License is distributed on an "AS IS" BASIS, 14 | ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | ; See the License for the specific language governing permissions and 16 | ; limitations under the License. 17 | ; ==================================================================== 18 | ; 19 | 20 | (ns 21 | #^{:author "Juegen Hoetzel, juergen@archlinux.org" 22 | :doc "A clojure binding for the jclouds AWS security group interface."} 23 | kafka.deploy.security 24 | (:require (org.jclouds [compute2 :as compute]) 25 | [org.jclouds.ec2.ebs :as ebs]) 26 | (:import org.jclouds.ec2.domain.IpProtocol 27 | org.jclouds.ec2.domain.SecurityGroup 28 | org.jclouds.ec2.services.SecurityGroupClient 29 | org.jclouds.ec2.domain.UserIdGroupPair 30 | java.io.DataInputStream 31 | java.net.URL)) 32 | 33 | (defn #^SecurityGroupClient 34 | sg-service 35 | "Returns the SecurityGroup Client associated with the specified compute service." 36 | [compute] 37 | (-> compute .getContext .getProviderSpecificContext .getApi .getSecurityGroupServices)) 38 | 39 | (defn create-group 40 | "Creates a new security group. 41 | 42 | e.g. (create-group compute \"Database Server\" \"Description for group\" :region :us-west-1)" 43 | [compute name & {:keys [description region]}] 44 | (.createSecurityGroupInRegion (sg-service compute) (ebs/get-region region) name (or description name))) 45 | 46 | (defn delete-group 47 | "Deletes a security group. 48 | 49 | e.g. (delete-group compute \"Database Server\" :region :us-west-1)" 50 | [compute name & {:keys [region]}] 51 | (.deleteSecurityGroupInRegion (sg-service compute) (ebs/get-region region) name)) 52 | 53 | (defn groups 54 | "Returns a map of GroupName -> org.jclouds.ec2.domain.SecurityGroup instances. 55 | 56 | e.g. (groups compute :region :us-east-1)" 57 | [compute & {:keys [region]}] 58 | (into {} (for [#^SecurityGroup group (.describeSecurityGroupsInRegion (sg-service compute) 59 | (ebs/get-region region) 60 | (into-array String '()))] 61 | [(.getName group) group]))) 62 | 63 | (defn get-protocol [v] 64 | "Coerce argument to a IP Protocol." 65 | (cond 66 | (instance? IpProtocol v) v 67 | (keyword? v) (if-let [p (get {:tcp IpProtocol/TCP 68 | :udp IpProtocol/UDP 69 | :icmp IpProtocol/ICMP} 70 | v)] 71 | p 72 | (throw (IllegalArgumentException. 73 | (str "Can't obtain IP protocol from " v " (valid :tcp, :udp and :icmp)")))) 74 | (nil? v) IpProtocol/TCP 75 | :else (throw (IllegalArgumentException. 76 | (str "Can't obtain IP protocol from argument of type " (type v)))))) 77 | 78 | (defn authorize 79 | "Adds permissions to a security group. 80 | 81 | e.g. (authorize compute \"jclouds#webserver#us-east-1\" 80 :ip-range \"0.0.0.0/0\") 82 | (authorize compute \"jclouds#webserver#us-east-1\" [1000,2000] :protocol :udp)" 83 | 84 | [compute group-name port & {:keys [protocol ip-range region]}] 85 | (let [group ((groups compute :region region) group-name) 86 | [from-port to-port] (if (number? port) [port port] port)] 87 | (if group 88 | (.authorizeSecurityGroupIngressInRegion 89 | (sg-service compute) (ebs/get-region region) (.getName group) (get-protocol protocol) from-port to-port (or ip-range "0.0.0.0/0")) 90 | (throw (IllegalArgumentException. 91 | (str "Can't find security group for name " group-name)))))) 92 | 93 | (def my-ip 94 | (memoize 95 | (fn [] 96 | (let [is (DataInputStream. (.openStream (URL. "http://demontunes.com/api/?g=ip"))) 97 | ret (.readLine is)] 98 | (.close is) 99 | ret 100 | )))) 101 | 102 | (defn authorizeme [compute group-name port] 103 | (try 104 | (authorize compute group-name port :ip-range (str (my-ip) "/32") 105 | ) 106 | (catch IllegalStateException _) 107 | )) 108 | 109 | (defn authorize-group 110 | ([compute region to-group from-group] 111 | (authorize-group compute region to-group from-group (:aws-user-id (. compute environment))) 112 | ) 113 | ([compute region to-group from-group user-id] 114 | (try 115 | (.authorizeSecurityGroupIngressInRegion 116 | (sg-service compute) 117 | region 118 | to-group 119 | (UserIdGroupPair. user-id from-group) 120 | ) 121 | (catch IllegalStateException _) 122 | ))) 123 | 124 | (defn revoke 125 | "Revokes permissions from a security group. 126 | 127 | e.g. (revoke compute 80 \"jclouds#webserver#us-east-1\" :protocol :tcp 80 80 :ip-range \"0.0.0.0/0\")" 128 | [compute group-name port & {:keys [protocol ip-range region]}] 129 | (let [group ((groups compute :region region) group-name) 130 | [from-port to-port] (if (number? port) [port port] port)] 131 | (if group 132 | (.revokeSecurityGroupIngressInRegion 133 | (sg-service compute) (ebs/get-region region) (.getName group) (get-protocol protocol) from-port to-port (or ip-range "0.0.0.0/0")) 134 | (throw (IllegalArgumentException. 135 | (str "Can't find security group for name " group-name)))))) 136 | -------------------------------------------------------------------------------- /src/resource/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, A1 2 | 3 | 4 | log4j.appender.A1 = org.apache.log4j.DailyRollingFileAppender 5 | log4j.appender.A1.File = logs/kafka.log 6 | log4j.appender.A1.Append = true 7 | log4j.appender.A1.DatePattern = '.'yyy-MM-dd 8 | log4j.appender.A1.layout = org.apache.log4j.PatternLayout 9 | log4j.appender.A1.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n -------------------------------------------------------------------------------- /src/resource/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # see kafka.server.KafkaConfig for additional details and defaults 16 | 17 | ############################# Server Basics ############################# 18 | 19 | # The id of the broker. This must be set to a unique integer for each broker. 20 | brokerid=~{id} 21 | 22 | # Hostname the broker will advertise to consumers. If not set, kafka will use the value returned 23 | # from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost 24 | # may not be what you want. 25 | #hostname= 26 | 27 | 28 | ############################# Socket Server Settings ############################# 29 | 30 | # The port the socket server listens on 31 | port=9092 32 | 33 | # The number of processor threads the socket server uses for receiving and answering requests. 34 | # Defaults to the number of cores on the machine 35 | num.threads=8 36 | 37 | # The send buffer (SO_SNDBUF) used by the socket server 38 | socket.send.buffer=1048576 39 | 40 | # The receive buffer (SO_RCVBUF) used by the socket server 41 | socket.receive.buffer=1048576 42 | 43 | # The maximum size of a request that the socket server will accept (protection against OOM) 44 | max.socket.request.bytes=104857600 45 | 46 | 47 | ############################# Log Basics ############################# 48 | 49 | # The directory under which to store log files 50 | log.dir=/mnt/kafka-logs 51 | 52 | # The number of logical partitions per topic per server. More partitions allow greater parallelism 53 | # for consumption, but also mean more files. 54 | num.partitions=8 55 | 56 | # Overrides for for the default given by num.partitions on a per-topic basis 57 | #topic.partition.count.map=topic1:3, topic2:4 58 | 59 | ############################# Log Flush Policy ############################# 60 | 61 | # The following configurations control the flush of data to disk. This is the most 62 | # important performance knob in kafka. 63 | # There are a few important trade-offs here: 64 | # 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. 65 | # 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). 66 | # 3. Throughput: The flush is generally the most expensive operation. 67 | # The settings below allow one to configure the flush policy to flush data after a period of time or 68 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 69 | 70 | # The number of messages to accept before forcing a flush of data to disk 71 | log.flush.interval=10000 72 | 73 | # The maximum amount of time a message can sit in a log before we force a flush 74 | log.default.flush.interval.ms=1000 75 | 76 | # Per-topic overrides for log.default.flush.interval.ms 77 | #topic.flush.intervals.ms=topic1:1000, topic2:3000 78 | 79 | # The interval (in ms) at which logs are checked to see if they need to be flushed to disk. 80 | log.default.flush.scheduler.interval.ms=1000 81 | 82 | ############################# Log Retention Policy ############################# 83 | 84 | # The following configurations control the disposal of log segments. The policy can 85 | # be set to delete segments after a period of time, or after a given size has accumulated. 86 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 87 | # from the end of the log. 88 | 89 | # The minimum age of a log file to be eligible for deletion 90 | log.retention.hours=168 91 | 92 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 93 | # segments don't drop below log.retention.size. 94 | #log.retention.size=1073741824 95 | 96 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 97 | log.file.size=536870912 98 | 99 | # The interval at which log segments are checked to see if they can be deleted according 100 | # to the retention policies 101 | log.cleanup.interval.mins=1 102 | 103 | ############################# Zookeeper ############################# 104 | 105 | # Enable connecting to zookeeper 106 | enable.zookeeper=true 107 | 108 | # Zk connection string (see zk docs for details). 109 | # This is a comma separated host:port pairs, each corresponding to a zk 110 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 111 | # You can also append an optional chroot string to the urls to specify the 112 | # root directory for all kafka znodes. 113 | zk.connect=~{zk-str} 114 | 115 | # Timeout in ms for connecting to zookeeper 116 | zk.connectiontimeout.ms=1000000 117 | --------------------------------------------------------------------------------