├── ebin ├── erlkafka_app.beam ├── kafka_server.beam ├── kafka_protocol.beam ├── erlkafka_root_sup.beam ├── kafka_server_sup.beam ├── kafka_simple_api.beam └── erlkafka_app.app ├── src ├── erlkafka.hrl ├── erlkafka_app.erl ├── erlkafka_root_sup.erl ├── erlkafka_app.app.src ├── kafka_stream_consumer_sup.erl ├── erlkafka.erl ├── kafka_sequential_reader.erl ├── uuid.erl ├── kafka_server.erl ├── kafka_stream_consumer.erl ├── kafka_server_sup.erl ├── kafka_simple_api.erl └── kafka_protocol.erl ├── LICENSE ├── BSD_LICENSE └── README /ebin/erlkafka_app.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/erlkafka_app.beam -------------------------------------------------------------------------------- /ebin/kafka_server.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/kafka_server.beam -------------------------------------------------------------------------------- /ebin/kafka_protocol.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/kafka_protocol.beam -------------------------------------------------------------------------------- /ebin/erlkafka_root_sup.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/erlkafka_root_sup.beam -------------------------------------------------------------------------------- /ebin/kafka_server_sup.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/kafka_server_sup.beam -------------------------------------------------------------------------------- /ebin/kafka_simple_api.beam: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/b/erlkafka/master/ebin/kafka_simple_api.beam -------------------------------------------------------------------------------- /src/erlkafka.hrl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : erlkafka.hrl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | -author("Milind Parikh [http://www.milindparikh.com]"). 6 | 7 | -ifndef(erlkafka). 8 | -define(MAX_MSG_SIZE, 1048576). 9 | -endif. 10 | -------------------------------------------------------------------------------- /ebin/erlkafka_app.app: -------------------------------------------------------------------------------- 1 | {application,erlkafka_app, 2 | [{vsn,"0.5.0"}, 3 | {modules,[erlkafka_app,erlkafka_root_sup,kafka_protocol, 4 | kafka_server,kafka_server_sup,kafka_simple_api]}, 5 | {registered,[erlkafka_app]}, 6 | {mod,{erlkafka_app,[]}}, 7 | {applications,[kernel,stdlib]}, 8 | {env,[{enable_kafka_autodiscovery,false}, 9 | {kafka_brokers,[{0,'127.0.1.1',9092}]}, 10 | {kafka_prefix,[]}]}]}. 11 | -------------------------------------------------------------------------------- /src/erlkafka_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : erlkafka_app.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(erlkafka_app). 7 | -author("Milind Parikh [http://www.milindparikh.com]"). 8 | -behaviour(application). 9 | 10 | -export([start/2, stop/1]). 11 | 12 | start(normal, _Args) -> 13 | erlkafka_root_sup:start_link(1). 14 | 15 | stop(_State) -> 16 | ok. 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | erlkafka - a client to kafka written in erlang 2 | Copyright (C) 2012 Milind Parikh 3 | 4 | This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. 5 | 6 | This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. 7 | 8 | You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 9 | 10 | -------------------------------------------------------------------------------- /src/erlkafka_root_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : erlkafka_root_sup.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(erlkafka_root_sup). 7 | -author("Milind Parikh [http://www.milindparikh.com]"). 8 | -behaviour(supervisor). 9 | 10 | 11 | -export([start_link/1]). 12 | -export([init/1]). 13 | 14 | start_link(_Params) -> 15 | supervisor:start_link({local, ?MODULE}, 16 | ?MODULE, []). 17 | 18 | init([]) -> 19 | RestartStrategy = {one_for_one, 0, 1}, 20 | Children = [ 21 | { 22 | kafka_server_sup, 23 | {kafka_server_sup, start_link,[]}, 24 | permanent, 25 | infinity, 26 | supervisor, 27 | [kafka_server_sup] 28 | }, 29 | { 30 | kafka_stream_consumer_sup, 31 | {kafka_stream_consumer_sup, start_link,[]}, 32 | permanent, 33 | infinity, 34 | supervisor, 35 | [kafka_stream_consumer_sup] 36 | } 37 | 38 | ], 39 | {ok, {RestartStrategy, Children}}. 40 | 41 | -------------------------------------------------------------------------------- /src/erlkafka_app.app.src: -------------------------------------------------------------------------------- 1 | {application, erlkafka_app, 2 | [{vsn, "0.6.0"}, 3 | {modules, [ 4 | erlkafka_app, 5 | erlkafka, 6 | %---------------------------------------------------------------- 7 | % Supervisor Hiearchy 8 | %---------------------------------------------------------------- 9 | erlkafka_root_sup, % RootSup 10 | kafka_server_sup, % ServerSup 11 | kafka_server, 12 | kafka_stream_consumer_sup, % ConsumerSup 13 | kafka_sequential_reader, 14 | kafka_stream_consumer, 15 | %---------------------------------------------------------------- 16 | % Library Modules 17 | %---------------------------------------------------------------- 18 | kafka_simple_api, kafka_protocol, 19 | uuid 20 | ]}, 21 | {registered, [erlkafka_app]}, 22 | {mod, {erlkafka_app, []}}, 23 | {applications, [kernel, stdlib]}, 24 | {env, [ 25 | {enable_kafka_autodiscovery, false}, 26 | {kafka_brokers, [{0, '127.0.1.1', 9092}]}, 27 | {kafka_prefix, ""} 28 | ]} 29 | ]}. 30 | -------------------------------------------------------------------------------- /BSD_LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012, Milind Parikh 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | erlkafka is a kafka client written in erlang. 2 | 3 | erlkafka provides seven core functions 4 | 5 | produce %% native kafka produce request 6 | multi_produce %% native kafka multi_produce request 7 | fetch %% native kafka fetch request 8 | multi_fetch %% native kafka multi_fetch request 9 | offset %% native kafka offset request 10 | get_list_of_brokers %% conditional zookeeper dependent list of brokers 11 | get_list_of_broker_partitions %% conditional zookeeper dependent list of broker partitions 12 | %% for a topic 13 | 14 | erlkafka is available under two different licenses. LGPL or the BSD license. 15 | erlkafka current verion : 0.5.0 16 | 17 | It requires ezk (https://github.com/infinipool/ezk.git) for auto discovery. 18 | 19 | {enable_kafka_autodiscovery, true} in erlkafka_app.app is the switch to 20 | turn auto discovery on. 21 | 22 | 23 | if {enable_kafka_autodiscovery, false) then 24 | application:start(erlkafka_app) is sufficient 25 | 26 | if {enable_kafka_autodiscovery, true) then 27 | application:start(ezk) 28 | application:start(erlkafka_app) is required 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /src/kafka_stream_consumer_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_stream_consumer_sup.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | 7 | -module(kafka_stream_consumer_sup). 8 | -author("Milind Parikh [http://www.milindparikh.com]"). 9 | -behaviour(supervisor). 10 | 11 | 12 | -export([start_link/0 13 | 14 | ]). 15 | -export([init/1]). 16 | 17 | 18 | -define(DEFAULT_POOL_COUNT, 5). 19 | 20 | 21 | %%%------------------------------------------------------------------- 22 | %%% API FUNCTIONS 23 | %%%------------------------------------------------------------------- 24 | 25 | 26 | start_link() -> 27 | 28 | supervisor:start_link({local, ?MODULE}, 29 | ?MODULE, []). 30 | 31 | 32 | 33 | 34 | %%%------------------------------------------------------------------- 35 | %%% SUPERVISOR CB FUNCTIONS 36 | %%%------------------------------------------------------------------- 37 | 38 | 39 | 40 | 41 | init([]) -> 42 | 43 | RestartStrategy = {one_for_one, 0, 1}, 44 | Children = [], 45 | 46 | 47 | {ok, {RestartStrategy, Children}}. 48 | 49 | 50 | 51 | %%%------------------------------------------------------------------- 52 | %%% INTERNAL FUNCTIONS 53 | %%%------------------------------------------------------------------- 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /src/erlkafka.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : erlkafka.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(erlkafka). 7 | -author('Milind Parikh '). 8 | -include("erlkafka.hrl"). 9 | 10 | 11 | -export([get_kafka_stream_consumer/4, uuid/0]). 12 | -import(uuid). 13 | 14 | 15 | %%%------------------------------------------------------------------- 16 | %%% API FUNCTIONS 17 | %%%------------------------------------------------------------------- 18 | 19 | 20 | uuid() -> 21 | uuid:v4(). 22 | 23 | 24 | 25 | get_kafka_stream_consumer(Broker, Topic, Partition, Offset) -> 26 | {A1, A2, A3} = now(), 27 | random:seed(A1, A2, A3), 28 | 29 | UuidKSR_1 = uuid(), 30 | UuidKSR = uuid:to_string(UuidKSR_1), 31 | 32 | UuidKSC_1 = uuid(), 33 | UuidKSC = uuid:to_string(UuidKSC_1), 34 | 35 | 36 | {ok, KsrPid} = 37 | supervisor:start_child( 38 | kafka_stream_consumer_sup, 39 | { 40 | UuidKSR, 41 | { 42 | kafka_sequential_reader, 43 | start_link, 44 | [[Broker, Topic, Partition, Offset]] 45 | }, 46 | temporary, % never restart 47 | brutal_kill, 48 | worker, 49 | [kafka_sequential_reader] 50 | }), 51 | 52 | {ok, KscPid} = 53 | supervisor:start_child( 54 | kafka_stream_consumer_sup, 55 | { 56 | UuidKSC, 57 | { 58 | kafka_stream_consumer, 59 | start_link, 60 | [KsrPid] 61 | }, 62 | 63 | temporary, % never restart 64 | brutal_kill, 65 | worker, 66 | [kafka_stream_consumer] 67 | }), 68 | 69 | 70 | 71 | {kafka_stream_consumer:get_stream_function(), 72 | kafka_stream_consumer:get_terminate_function(), 73 | KscPid}. 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /src/kafka_sequential_reader.erl: -------------------------------------------------------------------------------- 1 | 2 | %%%------------------------------------------------------------------- 3 | %%% File : kafka_sequential_reader.erl 4 | %%% Author : Milind Parikh 5 | %%%------------------------------------------------------------------- 6 | 7 | -module(kafka_sequential_reader). 8 | -behaviour(gen_server). 9 | 10 | -export([start_link/1, next_messages/1]). 11 | 12 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). 13 | 14 | -record(state, {conn_pid, broker, partition, topic, offset, maxsize = 1048576}). 15 | 16 | 17 | %%%------------------------------------------------------------------- 18 | %%% API FUNCTIONS 19 | %%%------------------------------------------------------------------- 20 | 21 | start_link([Broker, Topic, Partition, Offset]) -> 22 | gen_server:start_link( ?MODULE, [Broker, Topic, Partition, Offset], []). 23 | 24 | next_messages(ConnPid) -> 25 | gen_server:call(ConnPid, next_messages). 26 | 27 | 28 | %%%------------------------------------------------------------------- 29 | %%% GEN_SERVER CB FUNCTIONS 30 | %%%------------------------------------------------------------------- 31 | 32 | 33 | 34 | init([Broker, Topic, Partition, Offset]) -> 35 | {ok, #state{broker=Broker,topic=Topic, partition=Partition,offset=Offset}, 0}. 36 | 37 | handle_call(next_messages, _From, #state{broker=Broker,topic=Topic, partition=Partition,offset=Offset} = State) -> 38 | Resp = kafka_simple_api:fetch(Broker, Topic, Partition, Offset), 39 | 40 | case Resp of 41 | {ok, []} -> 42 | {reply, Resp, State}; 43 | {ok, {[], _ }} -> 44 | {reply, Resp, State}; 45 | {ok, {_Messages, Size}} -> 46 | NewState = State#state{offset=Offset+Size}, 47 | {reply, Resp, NewState} 48 | end. 49 | 50 | 51 | 52 | 53 | 54 | handle_cast(stop_link, State) -> 55 | {stop, normal, State}. 56 | 57 | 58 | 59 | handle_info(timeout, #state{conn_pid=undefined} = State) -> 60 | NewState = State#state{conn_pid=self()}, 61 | {noreply, NewState}; 62 | 63 | handle_info(_, State) -> 64 | {noreply, State}. 65 | 66 | terminate(_Reason, _State) -> 67 | ok. 68 | 69 | code_change(_OldVsn, State, _Extra) -> 70 | {ok, State}. 71 | 72 | -------------------------------------------------------------------------------- /src/uuid.erl: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2008, Travis Vachon 2 | % All rights reserved. 3 | % 4 | % Redistribution and use in source and binary forms, with or without 5 | % modification, are permitted provided that the following conditions are 6 | % met: 7 | % 8 | % * Redistributions of source code must retain the above copyright 9 | % notice, this list of conditions and the following disclaimer. 10 | % 11 | % * Redistributions in binary form must reproduce the above copyright 12 | % notice, this list of conditions and the following disclaimer in the 13 | % documentation and/or other materials provided with the distribution. 14 | % 15 | % * Neither the name of the author nor the names of its contributors 16 | % may be used to endorse or promote products derived from this 17 | % software without specific prior written permission. 18 | % 19 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | % "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | % LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | % A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | % OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | % SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 25 | % TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | % 31 | -module(uuid). 32 | -export([v4/0, to_string/1, get_parts/1, to_binary/1]). 33 | -import(random). 34 | 35 | % Generates a random binary UUID. 36 | v4() -> 37 | v4(random:uniform(round(math:pow(2, 48))) - 1, random:uniform(round(math:pow(2, 12))) - 1, random:uniform(round(math:pow(2, 32))) - 1, random:uniform(round(math:pow(2, 30))) - 1). 38 | v4(R1, R2, R3, R4) -> 39 | <>. 40 | 41 | % Returns a string representation of a binary UUID. 42 | to_string(U) -> 43 | lists:flatten(io_lib:format("~8.16.0b-~4.16.0b-~4.16.0b-~2.16.0b~2.16.0b-~12.16.0b", get_parts(U))). 44 | 45 | % Returns the 32, 16, 16, 8, 8, 48 parts of a binary UUID. 46 | get_parts(<>) -> 47 | [TL, TM, THV, CSR, CSL, N]. 48 | 49 | % Converts a UUID string in the format of 550e8400-e29b-41d4-a716-446655440000 50 | % (with or without the dashes) to binary. 51 | to_binary(U)-> 52 | convert(lists:filter(fun(Elem) -> Elem /= $- end, U), []). 53 | 54 | % Converts a list of pairs of hex characters (00-ff) to bytes. 55 | convert([], Acc)-> 56 | list_to_binary(lists:reverse(Acc)); 57 | convert([X, Y | Tail], Acc)-> 58 | {ok, [Byte], _} = io_lib:fread("~16u", [X, Y]), 59 | convert(Tail, [Byte | Acc]). 60 | -------------------------------------------------------------------------------- /src/kafka_server.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_server.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(kafka_server). 7 | -author('Milind Parikh '). 8 | -behaviour(gen_server). 9 | -include("erlkafka.hrl"). 10 | 11 | 12 | -export([start_link/0, start_link/1]). 13 | 14 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). 15 | 16 | -record(state, {socket, maxsize = ?MAX_MSG_SIZE}). 17 | 18 | %%%------------------------------------------------------------------- 19 | %%% API FUNCTIONS 20 | %%%------------------------------------------------------------------- 21 | 22 | 23 | start_link() -> 24 | start_link(['127.0.0.1', 9092]). 25 | 26 | 27 | start_link([Host, Port]) -> 28 | gen_server:start_link( ?MODULE, [Host, Port], []). 29 | 30 | 31 | 32 | %%%------------------------------------------------------------------- 33 | %%% GEN_SERVER CB FUNCTIONS 34 | %%%------------------------------------------------------------------- 35 | 36 | init([Host, Port]) -> 37 | {ok, Socket} = gen_tcp:connect(Host, Port, 38 | [binary, {active, false}, {packet, raw}]), 39 | {ok, #state{socket=Socket}, 0}. 40 | 41 | 42 | handle_call({request_with_response, Req}, _From, State) -> 43 | 44 | ok = gen_tcp:send(State#state.socket, Req), 45 | 46 | 47 | case gen_tcp:recv(State#state.socket, 6) of 48 | {ok, <<2:32/integer, 0:16/integer>>} -> 49 | {reply, {ok, []}, State}; 50 | {ok, <>} -> 51 | {ok, Data} = gen_tcp:recv(State#state.socket, L-2), 52 | {Messages, Size} = kafka_protocol:parse_messages(Data), 53 | {reply, {ok, {Messages, Size}}, State}; 54 | 55 | {ok, B} -> 56 | {reply, {error, B}, State} 57 | end; 58 | 59 | handle_call({request_with_response_offset, Req}, _From, State) -> 60 | 61 | ok = gen_tcp:send(State#state.socket, Req), 62 | 63 | 64 | case gen_tcp:recv(State#state.socket, 6) of 65 | {ok, <<2:32/integer, 0:16/integer>>} -> 66 | {reply, {ok, []}, State}; 67 | {ok, <>} -> 68 | {ok, Data} = gen_tcp:recv(State#state.socket, L-2), 69 | Offsets = kafka_protocol:parse_offsets(Data), 70 | {reply, {ok, Offsets}, State}; 71 | 72 | {ok, B} -> 73 | {reply, {error, B}, State} 74 | end; 75 | 76 | 77 | 78 | handle_call({request, Req}, _From, State) -> 79 | 80 | ok = gen_tcp:send(State#state.socket, Req), 81 | {reply, ok, State}. 82 | 83 | 84 | 85 | handle_cast(stop_link, State) -> 86 | {stop, normal, State}. 87 | 88 | handle_info(_, State) -> 89 | {noreply, State}. 90 | 91 | terminate(_Reason, _State) -> 92 | ok. 93 | 94 | code_change(_OldVsn, State, _Extra) -> 95 | {ok, State}. 96 | 97 | -------------------------------------------------------------------------------- /src/kafka_stream_consumer.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_stream_consumer_sup.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(kafka_stream_consumer). 7 | -behaviour(gen_server). 8 | 9 | -export([start_link/1, get_stream_function/0, get_terminate_function/0]). 10 | 11 | -export([stream_messages/1, stop_link/1]). 12 | 13 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). 14 | 15 | -record(state, {conn_pid, ksr_pid , count=5, time=5000}). 16 | 17 | 18 | 19 | %%%------------------------------------------------------------------- 20 | %%% API FUNCTIONS 21 | %%%------------------------------------------------------------------- 22 | 23 | start_link(KsrPid) -> 24 | start_link(KsrPid, 5, 5000). 25 | 26 | get_stream_function() -> 27 | fun stream_messages/1. 28 | 29 | get_terminate_function() -> 30 | fun stop_link/1. 31 | 32 | 33 | %%%------------------------------------------------------------------- 34 | %%% INTERNAL API FUNCTIONS 35 | %%%------------------------------------------------------------------- 36 | 37 | 38 | stream_messages(ConnPid) -> 39 | gen_server:call(ConnPid, stream_messages). 40 | 41 | 42 | stop_link(ConnPid) -> 43 | 44 | gen_server:cast(ConnPid, stop_link). 45 | 46 | 47 | %%%------------------------------------------------------------------- 48 | %%% GEN_SERVER CB FUNCTIONS 49 | %%%------------------------------------------------------------------- 50 | 51 | 52 | init([ KsrPid, Count, Time]) -> 53 | {ok, #state{ksr_pid=KsrPid, count=Count, time=Time}, 0}. 54 | 55 | handle_call(get_state, _From, State) -> 56 | {reply, {ok, State}, State}; 57 | 58 | handle_call(stream_messages, _From, #state{ksr_pid=KsrPid, count=Count, time=Time} = State) -> 59 | 60 | 61 | try 62 | lists:foreach(fun(_X) -> 63 | 64 | Resp = kafka_sequential_reader:next_messages(KsrPid), 65 | case Resp of 66 | {ok, []} -> 67 | throw(no_data); 68 | {ok, {[], _ }} -> 69 | ok; 70 | {ok, {Messages, Size}} -> 71 | throw({Messages, Size}) 72 | end, 73 | receive 74 | after Time -> 75 | ok % wait for Time seconds in a loop 76 | end 77 | end, 78 | lists:seq(1,Count)), % loop through Count times 79 | 80 | {reply, {ok, no_data}, State} 81 | 82 | catch 83 | throw:{Messages, Size} -> {reply, {ok, {Messages, Size}}, State}; 84 | throw:no_data -> {reply, {ok, no_data}, State} 85 | end. 86 | 87 | 88 | 89 | handle_cast(stop_link, State) -> 90 | {stop, normal, State}. 91 | 92 | 93 | 94 | handle_info(timeout, #state{conn_pid=undefined, ksr_pid=KsrPid} = State) -> 95 | NewState = State#state{conn_pid=self()}, 96 | link(KsrPid), 97 | {noreply, NewState}; 98 | 99 | handle_info(_, State) -> 100 | {noreply, State}. 101 | 102 | terminate(_Reason, _State) -> 103 | ok. 104 | 105 | code_change(_OldVsn, State, _Extra) -> 106 | {ok, State}. 107 | 108 | 109 | 110 | %%%------------------------------------------------------------------- 111 | %%% INTERNAL FUNCTIONS 112 | %%%------------------------------------------------------------------- 113 | 114 | start_link(KsrPid, Count, Time) -> 115 | gen_server:start_link( ?MODULE, [ KsrPid, Count, Time], []). 116 | -------------------------------------------------------------------------------- /src/kafka_server_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_server_sup.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | 7 | -module(kafka_server_sup). 8 | -author("Milind Parikh [http://www.milindparikh.com]"). 9 | -behaviour(supervisor). 10 | 11 | 12 | -export([start_link/1, start_link/0, 13 | get_ids/0, 14 | get_random_broker_instance_from_pool/1 15 | ]). 16 | -export([init/1]). 17 | 18 | 19 | -define(DEFAULT_POOL_COUNT, 5). 20 | 21 | 22 | %%%------------------------------------------------------------------- 23 | %%% API FUNCTIONS 24 | %%%------------------------------------------------------------------- 25 | 26 | 27 | start_link() -> 28 | 29 | case application:get_env(erlkafka_app,enable_kafka_autodiscovery) of 30 | undefined -> 31 | start_link([{0, '127.0.0.1', 9092}]); 32 | {ok, false} -> 33 | case application:get_env(erlkafka_app,kafka_brokers) of 34 | undefined -> 35 | % This is default and if it does not work, 36 | % change the application env 37 | 38 | start_link([{0, '127.0.0.1', 9092}]); 39 | {ok, Brokers} -> 40 | start_link(Brokers) 41 | end; 42 | {ok,true} -> 43 | start_link(kafka_protocol:get_dynamic_list_of_brokers()) 44 | end. 45 | 46 | 47 | 48 | 49 | 50 | start_link(Params) -> 51 | 52 | supervisor:start_link({local, ?MODULE}, 53 | ?MODULE, [Params]). 54 | 55 | 56 | get_random_broker_instance_from_pool(Broker) -> 57 | BrokerPoolCount = param("BrokerPoolCount", ?DEFAULT_POOL_COUNT), 58 | Pids = get_ids(), 59 | BrokerInstance = Broker*BrokerPoolCount + random:uniform(BrokerPoolCount), 60 | 61 | lists:nth(1, 62 | lists:filter(fun ({_Child, Id} ) -> 63 | case Id =:= BrokerInstance 64 | of true -> true; 65 | false-> false 66 | end 67 | end, 68 | Pids)). 69 | 70 | 71 | 72 | 73 | 74 | get_ids() -> 75 | [{Child, Id} || 76 | {Id, Child, _Type, _Modules} <- supervisor:which_children(?MODULE), 77 | Child /= undefined, Id /= 0]. 78 | 79 | 80 | 81 | %%%------------------------------------------------------------------- 82 | %%% SUPERVISOR CB FUNCTIONS 83 | %%%------------------------------------------------------------------- 84 | 85 | 86 | 87 | 88 | init([Params]) -> 89 | BrokerPoolCount = param(broker_pool_count, ?DEFAULT_POOL_COUNT), 90 | RestartStrategy = {one_for_one, 0, 1}, 91 | Children = 92 | lists:flatten( 93 | lists:map( fun ({Broker, Host, Port}) -> 94 | lists:map(fun (X) -> {Broker*BrokerPoolCount + X, 95 | {kafka_server, start_link, [[Host, Port]]}, 96 | transient, 97 | brutal_kill, 98 | worker, 99 | [kafka_server] 100 | } 101 | end, 102 | lists:seq(1, BrokerPoolCount)) 103 | end, 104 | Params) 105 | ), 106 | 107 | 108 | {ok, {RestartStrategy, Children}}. 109 | 110 | 111 | 112 | %%%------------------------------------------------------------------- 113 | %%% INTERNAL FUNCTIONS 114 | %%%------------------------------------------------------------------- 115 | 116 | param(Name, Default)-> 117 | case application:get_env(erlkafka_app, Name) of 118 | {ok, Value} -> Value; 119 | _-> Default 120 | end. 121 | 122 | 123 | -------------------------------------------------------------------------------- /src/kafka_simple_api.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_simple_api.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(kafka_simple_api). 7 | -author('Milind Parikh '). 8 | 9 | 10 | -ifdef(TEST). 11 | -include_lib("eunit/include/eunit.hrl"). 12 | -endif. 13 | 14 | -include("erlkafka.hrl"). 15 | 16 | -export([produce/4, multi_produce/2, fetch/4, multi_fetch/2, offset/5]). 17 | -export([get_list_of_brokers/0, get_list_of_broker_partitions/1]). 18 | 19 | %%%------------------------------------------------------------------- 20 | %%% API FUNCTIONS 21 | %%%------------------------------------------------------------------- 22 | 23 | produce(Broker, Topic, Partition, Messages) -> 24 | Req = kafka_protocol:produce_request (Topic, Partition, Messages), 25 | call({Broker, request, Req}). % a produce in Kafka 0.7 has no response 26 | 27 | multi_produce(Broker, TopicPartitionMessages) -> 28 | Req = kafka_protocol:multi_produce_request(TopicPartitionMessages), 29 | call({Broker, request, Req}). % a produce in Kafka 0.7 has no response 30 | 31 | fetch (Broker, Topic, Partition, Offset) -> 32 | Req = kafka_protocol:fetch_request(Topic, Offset, Partition, ?MAX_MSG_SIZE), 33 | call({Broker, request_with_response, Req}). 34 | 35 | multi_fetch(Broker, TopicPartitionOffsets) -> 36 | Req = kafka_protocol:multi_fetch_request (TopicPartitionOffsets), 37 | call({Broker, request_with_response, Req}). 38 | 39 | offset(Broker, Topic, Partition, Time, MaxNumberOfOffsets) -> 40 | Req = kafka_protocol:offset_request(Topic, Partition, Time, MaxNumberOfOffsets), 41 | call({Broker, request_with_response_offset, Req}). 42 | 43 | get_list_of_brokers() -> 44 | 45 | kafka_protocol:get_list_of_brokers( 46 | application:get_env(erlkafka_app, enable_autodiscovery), 47 | application:get_env(erlkafka_app, kafka_brokers), 48 | application:get_env(erlkafka_app, kafka_prefix) 49 | ). 50 | 51 | get_list_of_broker_partitions(Topic) -> 52 | kafka_protocol:get_list_of_broker_partitions( 53 | application:get_env(erlkafka_app, enable_autodiscovery), 54 | application:get_env(erlkafka_app, kafka_brokers), 55 | application:get_env(erlkafka_app, kafka_prefix), 56 | Topic 57 | ). 58 | 59 | %%%------------------------------------------------------------------- 60 | %%% INTERNAL FUNCTIONS 61 | %%%------------------------------------------------------------------- 62 | 63 | 64 | call({Broker, request, Req}) -> 65 | case kafka_server_sup:get_random_broker_instance_from_pool(Broker) of 66 | {error, _} -> 67 | {error, unable_to_get_broker_instance_from_pool}; 68 | 69 | {BrokerInstancePid, _BrokerInstanceId} -> 70 | gen_server:call(BrokerInstancePid, {request, Req}) 71 | end; 72 | 73 | 74 | call({Broker, request_with_response_offset, Req}) -> 75 | case kafka_server_sup:get_random_broker_instance_from_pool(Broker) of 76 | {error, _} -> 77 | {error, unable_to_get_broker_instance_from_pool}; 78 | 79 | {BrokerInstancePid,_BrokerInstanceId} -> 80 | gen_server:call(BrokerInstancePid, {request_with_response_offset, Req}) 81 | end; 82 | 83 | call({Broker, request_with_response, Req}) -> 84 | case kafka_server_sup:get_random_broker_instance_from_pool(Broker) of 85 | {error, _} -> 86 | {error, unable_to_get_broker_instance_from_pool}; 87 | 88 | {BrokerInstancePid,_BrokerInstanceId} -> 89 | gen_server:call(BrokerInstancePid, {request_with_response, Req}) 90 | end. 91 | 92 | 93 | 94 | 95 | 96 | %%%------------------------------------------------------------------- 97 | %%% TEST FUNCTIONS 98 | %%%------------------------------------------------------------------- 99 | 100 | -ifdef(TEST). 101 | 102 | get_list_of_brokers_test()-> 103 | get_list_of_brokers(). 104 | 105 | 106 | produce_test() -> 107 | BrokerId = 0, 108 | Topic = <<"test">>, 109 | Partition = 0, 110 | Messages = [<<"hi">>, <<"there">>], 111 | produce(BrokerId, Topic, Partition, Messages). 112 | 113 | multi_produce_test() -> 114 | BrokerId = 0, 115 | TopicPartitionMessages = [ 116 | { 117 | <<"test1">>, %Topic 118 | 0, % partition 119 | [{1,0, <<"hi">> }, % {Magic, Compression, Msg} 120 | {1,0, <<"there">>}] 121 | }, 122 | { 123 | <<"test2">>, %Topic 124 | 0, % partition 125 | [{1,0, <<"hello">> }, % {Magic, Compression, Msg} 126 | {1,0, <<"world">>}] 127 | } 128 | ], 129 | multi_produce(BrokerId, TopicPartitionMessages). 130 | 131 | 132 | 133 | fetch_test () -> 134 | BrokerId = 0, 135 | Topic = <<"test">>, 136 | Partition = 0, 137 | Offset = 0, 138 | fetch(BrokerId, Topic, Partition, Offset). 139 | 140 | multi_fetch_test() -> 141 | BrokerId = 0, 142 | TopicPartitionOffsets = [{<<"test">>, 0, 0, ?MAX_MSG_SIZE}, {<<"test2">>, 0,0, ?MAX_MSG_SIZE}], 143 | multi_fetch(BrokerId,TopicPartitionOffsets). 144 | 145 | 146 | offset_test() -> 147 | BrokerId = 0, 148 | Topic = <<"test">>, 149 | Partition = 0, 150 | Time = -1, 151 | MaxNumberOfOffsets = 10, 152 | offset(BrokerId, Topic, Partition, Time, MaxNumberOfOffsets). 153 | 154 | -endif. 155 | -------------------------------------------------------------------------------- /src/kafka_protocol.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : kafka_protocol.erl 3 | %%% Author : Milind Parikh 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(kafka_protocol). 7 | -author('Milind Parikh '). 8 | 9 | %% Initial philosophy is derived from 10 | %% https://github.com/wooga/kafka-erlang.git 11 | %% The kafka protocol is tested against kafka 0.7.1 12 | %% It requires ezk (https://github.com/infinipool/ezk.git) for dynamic discovery 13 | 14 | 15 | -export([fetch_request/3, fetch_request/4]). 16 | -export([multi_fetch_request/1]). 17 | -export( [parse_messages/1]). 18 | 19 | -export([ produce_request/2, produce_request/3, produce_request/5]). 20 | -export([multi_produce_request/1]). 21 | 22 | -export([offset_request/4]). 23 | -export([parse_offsets/1]). 24 | 25 | 26 | -export([get_list_of_brokers/0]). 27 | -export([get_list_of_broker_partitions/1]). 28 | 29 | 30 | 31 | 32 | -define(RQ_TYPE_PRODUCE, 0). 33 | -define(RQ_TYPE_FETCH, 1). 34 | -define(RQ_TYPE_MULTIFETCH, 2). 35 | -define(RQ_TYPE_MULTIPRODUCE, 3). 36 | -define(RQ_TYPE_OFFSETS, 4). 37 | 38 | 39 | %%%------------------------------------------------------------------- 40 | %%% API FUNCTIONS 41 | %%%------------------------------------------------------------------- 42 | 43 | 44 | 45 | 46 | %%%------------------------------------------------------------------- 47 | %%% API/FETCH FUNCTIONS 48 | %%%------------------------------------------------------------------- 49 | 50 | 51 | %% @doc The default fetch request; which assumes a default partition of 0 52 | %% 53 | 54 | -spec fetch_request(Topic::binary(), 55 | Offset::integer(), 56 | MaxSize::integer() 57 | ) 58 | -> binary(). 59 | 60 | 61 | fetch_request(Topic, Offset, MaxSize) -> 62 | fetch_request(Topic, Offset,0, MaxSize). 63 | 64 | %% @doc The fetch request with partition also passed in 65 | %% 66 | 67 | 68 | -spec fetch_request(Topic::binary(), 69 | Offset::integer(), 70 | Partition::integer(), 71 | MaxSize::integer() 72 | ) 73 | -> binary(). 74 | 75 | 76 | 77 | 78 | fetch_request(Topic, Offset,Partition, MaxSize) -> 79 | TopicSize = size(Topic), 80 | RequestSize = 2 + 2 + TopicSize + 4 + 8 + 4, 81 | 82 | <>. 89 | 90 | 91 | 92 | %% @doc The multi-fetch request with partition also passed in 93 | %% TopicPartitionOffset is {Topic, Partition, Offset, Maxsize} 94 | %% [{"test", 0, 0, 100}, {"test2", 0,0, 200}] 95 | 96 | -spec multi_fetch_request(TopicPartitionOffsets::list() 97 | ) 98 | -> binary(). 99 | 100 | 101 | 102 | multi_fetch_request(TopicPartitionOffsets) -> 103 | 104 | TPOsSize = size_multi_fetch_tpos(TopicPartitionOffsets), 105 | 106 | TopicPartitionCount = length(TopicPartitionOffsets), 107 | RequestLength = 2 + 2 + TPOsSize, 108 | 109 | RequestHeader = <>, 112 | 113 | RequestBody = lists:foldr( fun ({Topic, Partition, Offset, MaxSize}, Acc) -> 114 | TopicLength = size(Topic), 115 | <> 121 | end, 122 | <<"">>, 123 | TopicPartitionOffsets), 124 | <>. 125 | 126 | 127 | %% @doc parse the fetched messages 128 | 129 | -spec parse_messages(Bs::binary()) -> {list()}. 130 | 131 | parse_messages(Bs) -> 132 | parse_messages(Bs, [], 0). 133 | 134 | 135 | 136 | 137 | 138 | %%%------------------------------------------------------------------- 139 | %%% API/PRODUCE FUNCTIONS 140 | %%%------------------------------------------------------------------- 141 | 142 | 143 | 144 | %% @doc The default produce request with the only default partition. 145 | %% 146 | 147 | -spec produce_request(Topic::binary(), Messages::list(binary())) -> binary(). 148 | 149 | produce_request(Topic, Messages) -> 150 | produce_request(Topic, 0 , 1,0, Messages). 151 | 152 | 153 | %% @doc The default produce request. 154 | %% 155 | 156 | -spec produce_request(Topic::binary(), 157 | Partition::integer(), 158 | Messages::list(binary())) -> binary(). 159 | 160 | produce_request(Topic, Partition, Messages) -> 161 | produce_request(Topic, Partition, 1,0, Messages). 162 | 163 | 164 | 165 | %% @doc The produce request with passed in Magic and Compression 166 | %% 167 | 168 | -spec produce_request(Topic::binary(), 169 | Partition::integer(), 170 | Magic::integer(), 171 | Compression::integer(), 172 | Messages::list()) -> binary(). 173 | 174 | produce_request(Topic, Partition, Magic, Compression, Messages) -> 175 | 176 | MessagesLength = size_of_produce_messages(Messages), 177 | io:format("Messages_Length = ~w~n", [MessagesLength]), 178 | TopicSize = size(Topic), 179 | RequestSize = 2 + 2 + TopicSize + 4 + 4 + MessagesLength, 180 | 181 | ProducedMessages = lists:foldr(fun (X, A) -> 182 | KafkaMessage = produce_message(X, Magic, Compression), 183 | << KafkaMessage/binary, A/binary>> 184 | end, 185 | <<"">>, 186 | Messages), 187 | 188 | 189 | <>. 190 | 191 | 192 | 193 | %% @doc The multi-produce request with partition also passed in 194 | %% 195 | %% [{<<"topic1">>, 0, [{Magic, Compression, <<"hi">>}, {Magic, Compression, <<"second hihi">>}]}, 196 | %% [{<<"topic2">>, 0, [{Magic, Compression, <<"hi2">>}, {Magic, Compression, <<"second hihi2">>}]}, 197 | 198 | -spec multi_produce_request(TopicPartitionMessages::list() 199 | ) 200 | -> binary(). 201 | 202 | 203 | multi_produce_request(TopicPartitionMessages) -> 204 | 205 | TPMSize = size_multi_produce_tpms(TopicPartitionMessages), 206 | RequestLength = 2+2+ TPMSize, 207 | TopicPartitionCount = length(TopicPartitionMessages), 208 | 209 | RequestHeader = <>, 212 | 213 | RequestBody = lists:foldr (fun({Topic, Partition, Messages},Acc1) -> 214 | TopicLength = size(Topic), 215 | 216 | {MessagesLength, MessagesBin} = 217 | lists:foldr(fun({Magic, Compression, MsgBin}, {Count, Bin}) -> 218 | KafkaMessage=produce_message(MsgBin,Magic, Compression ), 219 | 220 | {size(KafkaMessage) + Count, <>} 221 | end, 222 | {0, <<"">>}, 223 | Messages), 224 | 225 | <> 231 | end, 232 | <<"">>, 233 | TopicPartitionMessages), 234 | 235 | <>. 236 | 237 | 238 | %%%------------------------------------------------------------------- 239 | %%% API/OFFSETS FUNCTIONS 240 | %%%------------------------------------------------------------------- 241 | 242 | 243 | %% @doc The offset request with given time 244 | %% 245 | 246 | -spec offset_request(Topic::binary(), 247 | Partition::integer(), 248 | Time::integer(), 249 | MaxNumberOfOffsets::integer()) -> binary(). 250 | 251 | offset_request(Topic, Partition, Time, MaxNumberOfOffsets) -> 252 | TopicSize = size(Topic), 253 | RequestLength = 2+2+TopicSize+4+8+4, 254 | 255 | <>. 256 | 257 | 258 | 259 | %% @doc Parsing the results of the offset request 260 | %% 261 | -spec parse_offsets(binary()) -> binary(). 262 | 263 | parse_offsets(<>) -> 264 | parse_offsets(Ds, [], NumOffsets). 265 | 266 | 267 | 268 | 269 | %%%------------------------------------------------------------------- 270 | %%% API/BROKER FUNCTIONS 271 | %%%------------------------------------------------------------------- 272 | 273 | 274 | %% @ If enable_kafka_autodiscovery is enabled under application erlkafka_app 275 | %% Then looks for zookeeper based broker registry 276 | %% If not, then looks under a static definition of kafka_brokers 277 | 278 | 279 | get_list_of_brokers() -> 280 | case application:get_env(erlkafka_app, enable_kafka_autodiscovery) of 281 | undefined -> []; 282 | {ok, false} -> 283 | case application:get_env(erlkafka_app, kafka_brokers) of 284 | undefined -> []; 285 | {ok, X} -> X 286 | end; 287 | {ok, true} -> 288 | get_dynamic_list_of_brokers() 289 | end. 290 | 291 | %% @ This is to get all possible broker-partition combinations hosting 292 | %% a specific topic. Currently only implemented through the 293 | %% auto discovery in zookeeper (and therefore requires ezk). 294 | 295 | 296 | get_list_of_broker_partitions(Topic) -> 297 | get_dynamic_list_of_broker_partitions(Topic). 298 | 299 | 300 | %%%------------------------------------------------------------------- 301 | %%% END API FUNCTIONS 302 | %%%------------------------------------------------------------------- 303 | 304 | 305 | 306 | 307 | 308 | %%%------------------------------------------------------------------- 309 | %%% INTERNAL FUNCTIONS 310 | %%%------------------------------------------------------------------- 311 | 312 | 313 | 314 | get_dynamic_list_of_broker_partitions(Topic) -> 315 | 316 | DynList = 317 | lists:flatten( 318 | lists:foldr(fun ({Broker, Partitions}, Acc1) -> 319 | [lists:foldr(fun (Partition, Acc2) -> 320 | [{Broker, Partition} | Acc2] 321 | end, 322 | [], 323 | Partitions) 324 | |Acc1] 325 | end, 326 | [], 327 | 328 | lists:foldr(fun ({BrokerId, NumPartitions}, Acc3) -> 329 | [{BrokerId, lists:seq(0, NumPartitions )} |Acc3] 330 | end, 331 | [], 332 | 333 | lists:foldr(fun ( {BrokerId, _, _ }, Acc4) -> 334 | [ {BrokerId, 335 | get_num_partitions_topic_broker(Topic, BrokerId) 336 | } | Acc4] 337 | end, 338 | [], 339 | kafka_protocol:get_list_of_brokers() 340 | ) 341 | ) 342 | 343 | ) 344 | ), 345 | 346 | DynList 347 | . 348 | 349 | get_num_partitions_topic_broker(Topic, Broker) -> 350 | NewTopic = binary_to_list(Topic), 351 | 352 | {ok, Conn} = ezk:start_connection(), 353 | 354 | 355 | case ezk:get(Conn, get_path_for_broker_topics()++NewTopic++"/" ++ integer_to_list(Broker)) of 356 | {ok, {X, _}} -> NumPartitions = list_to_integer(binary_to_list(X)); 357 | {error, no_dir} -> NumPartitions = 0 358 | end, 359 | 360 | 361 | ezk:end_connection(Conn, ""), 362 | NumPartitions. 363 | 364 | 365 | 366 | 367 | 368 | get_dynamic_list_of_brokers() -> 369 | {ok, Conn} = ezk:start_connection(), 370 | {ok, RawListBrokers} = ezk:ls(Conn, get_path_for_broker_ids()), 371 | 372 | ListBrokers = 373 | lists:foldr(fun (X, Acc) -> 374 | {ok, {B1, _} } = ezk:get(Conn, get_path_for_broker_ids() ++ "/" ++ X), 375 | [{ 376 | list_to_integer(binary_to_list(X)), 377 | list_to_atom(lists:nth(2, string:tokens(binary_to_list(B1), ":"))) , 378 | list_to_integer(lists:nth(3, string:tokens(binary_to_list(B1), ":"))) 379 | } 380 | 381 | | Acc] 382 | end, 383 | [], 384 | RawListBrokers 385 | ), 386 | ezk:end_connection(Conn, ""), 387 | 388 | ListBrokers. 389 | 390 | 391 | 392 | 393 | 394 | get_path_for_broker_ids() -> 395 | case application:get_env(erlkafka_app, kafka_prefix) of 396 | undefined -> "/brokers/ids"; 397 | {ok, KafkaPrefix} -> KafkaPrefix++"/brokers/ids" 398 | end. 399 | 400 | 401 | get_path_for_broker_topics() -> 402 | case application:get_env(erlkafka_app, kafka_prefix) of 403 | undefined -> "/brokers/topics"; 404 | {ok, KafkaPrefix} -> KafkaPrefix++"/brokers/topics" 405 | end. 406 | 407 | 408 | 409 | 410 | 411 | produce_message (X, Magic, Compression) -> 412 | MessageLength = 1+1+4+size(X), 413 | CheckSum = erlang:crc32(X), 414 | << 415 | MessageLength:32/integer, 416 | Magic:8/integer, 417 | Compression:8/integer, 418 | CheckSum:32/integer, 419 | X/binary 420 | >>. 421 | 422 | 423 | size_multi_fetch_tpos (TPOs) -> 424 | lists:foldl(fun({Topic, _, _, _},A) -> 425 | 2 + size(Topic) + 4 + 8 + 4 + A 426 | end, 427 | 0, 428 | TPOs). 429 | 430 | 431 | 432 | size_multi_produce_tpms(TopicPartitionMessages) -> 433 | 434 | lists:foldl(fun({Topic, _, Messages},Acc1) -> 435 | 2+size(Topic) + 4+4 + 436 | lists:foldl(fun({_Magic, _Compression, X}, Acc2) -> 437 | 4+1+1+4+size(X) + Acc2 438 | end, 439 | 0, 440 | Messages) 441 | + Acc1 442 | end, 443 | 0, 444 | TopicPartitionMessages). 445 | 446 | 447 | 448 | 449 | size_of_produce_messages(Messages) -> 450 | lists:foldl(fun (X, Size) -> 451 | 452 | Size + 4 + 1 + 1 + 4 + size(X) 453 | end, 454 | 0, 455 | Messages). 456 | 457 | 458 | parse_offsets(<<"">>, Offsets, _) -> 459 | {lists:reverse(Offsets)}; 460 | 461 | parse_offsets(_, Offsets, 0) -> 462 | {lists:reverse(Offsets)}; 463 | 464 | parse_offsets(<>, Offsets, NumOffsets) -> 465 | parse_offsets(Rest, [Offset|Offsets], NumOffsets - 1). 466 | 467 | 468 | 469 | 470 | parse_messages(<<>>, Acc, Size) -> 471 | {lists:reverse(Acc), Size}; 472 | 473 | parse_messages(<> = B, Acc, Size) when size(B) >= L + 4-> 474 | MsgLengthOfPayload = L -1 -1 -4 , 475 | <<_:32/integer, _M:8/integer, _C:8/integer, _Check:32/integer, 476 | Msg:MsgLengthOfPayload/binary, 477 | Rest/bitstring>> = B, 478 | 479 | parse_messages(Rest, [Msg | Acc], Size + L + 4); 480 | 481 | parse_messages(_B, Acc, Size) -> 482 | {lists:reverse(Acc), Size}. 483 | 484 | 485 | 486 | %%%------------------------------------------------------------------- 487 | %%% END INTERNAL FUNCTIONS 488 | %%%------------------------------------------------------------------- 489 | 490 | 491 | 492 | --------------------------------------------------------------------------------