├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── apps └── ddb_proxy │ ├── include │ └── .gitignore │ ├── proto │ └── remote.proto │ ├── rebar.config │ └── src │ ├── .gitignore │ ├── ddb_proxy.app.src │ ├── ddb_proxy_app.erl │ ├── ddb_proxy_prom_scraper.erl │ ├── ddb_proxy_prom_sup.erl │ ├── ddb_proxy_sup.erl │ ├── ddb_proxy_udp.erl │ ├── dp_binary_proto.erl │ ├── dp_http_listener.erl │ ├── dp_index.erl │ ├── dp_line_proto.erl │ ├── dp_multiline_proto.erl │ ├── dp_prom_writer.erl │ ├── dp_tcp_listener.erl │ └── dp_util.erl ├── config.mk ├── elvis.config ├── example ├── diamond.conf └── telegraf.conf ├── fifo.mk ├── hooks └── pre-commit ├── rebar.config ├── rebar.lock ├── rebar3 ├── rel ├── deb │ ├── .gitignore │ ├── Makefile │ ├── control │ ├── copyright │ ├── postinst │ ├── preinst │ ├── systemd │ │ └── dalmatinerpx.service │ └── upstart │ │ └── ddb_proxy ├── files │ ├── erl │ └── nodetool ├── freebsd │ ├── dalmatinerpx │ └── vars.config ├── pkg │ ├── .gitignore │ ├── Makefile │ ├── comment │ ├── deinstall.sh │ ├── deploy │ │ ├── .gitignore │ │ └── sbin │ │ │ └── dpx │ ├── description │ ├── displayfile │ ├── install.sh │ ├── post_pkg │ └── pre_pkg ├── pkgng │ ├── +MANIFEST.in │ ├── +POST_DEINSTALL │ ├── +POST_INSTALL │ ├── +PRE_DEINSTALL │ ├── +PRE_INSTALL │ ├── .gitignore │ └── Makefile ├── vars.config └── vars │ └── deb.config ├── schema ├── dpx.schema ├── erlang_vm.schema └── lager.schema ├── share ├── .gitignore └── dpx.xml └── tree /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | _* 3 | .eunit 4 | *.o 5 | *.beam 6 | *.plt 7 | *.swp 8 | *.swo 9 | .erlang.cookie 10 | ebin 11 | log 12 | erl_crash.dump 13 | rebar3.crashdump 14 | .rebar 15 | logs 16 | _build 17 | apps/esyslog/src/esyslog_message_lexer.erl 18 | apps/esyslog/src/esyslog_message_parser.erl 19 | *~ 20 | rel/deb/*.deb 21 | compile_commands.json 22 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | otp_release: 3 | - 18.3 4 | 5 | sudo: false 6 | install: true 7 | script: 8 | - ./rebar3 xref 9 | #- ./rebar3 dialyzer # need to disable this for the damn protobuff 10 | 11 | branches: 12 | only: 13 | - master 14 | - dev 15 | - test 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Heinz N. Gies 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP=dpx 2 | include fifo.mk 3 | 4 | all: 5 | $(REBAR) compile 6 | 7 | clean: 8 | $(REBAR) clean 9 | $(MAKE) -C rel/deb clean 10 | 11 | rel: FORCE 12 | $(REBAR) as prod release 13 | 14 | deb-clean: 15 | $(MAKE) -C rel/deb clean 16 | 17 | deb-prepare: 18 | $(REBAR) as deb compile 19 | $(REBAR) as deb release 20 | $(MAKE) -C rel/deb prepare 21 | 22 | deb-package: deb-prepare 23 | $(MAKE) -C rel/deb package 24 | 25 | package: rel 26 | $(MAKE) -C rel/pkg package 27 | 28 | version_header: 29 | true 30 | 31 | FORCE: 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DalmatinerDB Protocol X(-translator) 2 | 3 | Official Site: https://dalmatiner.io/ 4 | 5 | A multi metric and event log protocol translator for use with DalmatinerDB. 6 | 7 | ## Supported metric formats 8 | Supports Graphite, Metrics2.0, Influx, Prometheus and OpenTSDB 9 | 10 | ## Supported log formats 11 | Currently only Syslog is supported. 12 | 13 | ![dalmatiner architecture](http://cdn2.hubspot.net/hubfs/528953/dalmatiner.png "Dalmatiner Architecture") 14 | 15 | This protocol translator connects by default to DalmatinerDB Storage on localhost:5555 and DalmatinerDB Metadata (Postgres) on localhost:5432 16 | 17 | Requirements: As per the diagram you will need both DalmatinerDB and Postgres running. 18 | 19 | # Metric Listener Configuration 20 | 21 | Settings are configured in dpx.conf 22 | 23 | ## DQE Indexer backend 24 | 25 | Firstly, it is important to configure the dqe indexer module that you intend to use. 26 | 27 | For example, for the Postgres Indexer, configure your dpx.conf as follows: 28 | ``` 29 | idx.backend = dqe_idx_pg 30 | ``` 31 | 32 | ## DQE Indexer backend 33 | 34 | Firstly, it is important to configure the dqe indexer module that you intend to use. 35 | 36 | For example, for the Postgres Indexer, configure your ddb_proxy.conf as follows: 37 | ``` 38 | idx.backend = dqe_idx_pg 39 | ``` 40 | 41 | ## Graphite 42 | 43 | Enable the Graphite listener with the following config lines. 44 | 45 | ``` 46 | listeners.dp_graphite.bucket = graphite 47 | listeners.dp_graphite.port = 2003 48 | ``` 49 | 50 | ### Note (Graphite Optional Extras): 51 | 52 | You can optionally include tags on Graphite metrics by including an = sign in a segment. This segment will be handled as a key value pair for a tag. 53 | 54 | Ordering is important! Example: 55 | 56 | `a.tag1=value.tag2=value` and `a.tag2=value.tag1=value` are different metrics! 57 | 58 | Metadata is supported by inserting an 'empty'. Example: 59 | 60 | `segment.so a.tag=value..metadata=value` and `a.tag=value..metadata=other_value` will end up in the same metric just change the value of metadata. 61 | 62 | When using tags the base of the metric has to be provided first, followed by tags, so in the example above a would be the base metric. 63 | 64 | ## InfluxDB 65 | 66 | Enable the Influx listener with the following config lines. 67 | 68 | ``` 69 | listeners.dp_influx.bucket = influx 70 | listeners.dp_influx.bucket = http 71 | listeners.dp_influx.port = 8086 72 | ``` 73 | 74 | ## Metrics 2.0 75 | 76 | Enable the Metrics 2.0 listener with the following config lines. 77 | 78 | ``` 79 | listeners.dp_metrics2.bucket = metrics2.0 80 | listeners.dp_metrics2.protocol = tcp 81 | listeners.dp_metrics2.port = 2004 82 | ``` 83 | 84 | The metrics 2.0 protocol us fully supported, all metrics 2.0 metrics use the base metric `metric` with the data fully in tags. 85 | 86 | ## Prometheus Scrapper 87 | 88 | Enable the Prometheus scraper with the following config lines. 89 | ``` 90 | prometheus_scrapers.node_exporter.bucket = prom 91 | prometheus_scrapers.node_exporter.url = http://localhost:9100/metrics 92 | prometheus_scrapers.node_exporter.frequency = 10000 93 | ``` 94 | 95 | ## Prometheus Remote Wrtiter 96 | 97 | To enabler the Prometheus remote write API: 98 | ``` 99 | listeners.dp_prom_writer.bucket = promwriter 100 | listeners.dp_prom_writer.port = 1234 101 | listeners.dp_prom_writer.protocol = http 102 | ``` 103 | 104 | And configure the remote written in the Prometheus config: 105 | ```yaml 106 | remote_writer: 107 | url: "http://:1234/receive" 108 | ``` 109 | 110 | 111 | ## OpenTSDB 112 | 113 | Enable the OpenTSDB listener with the following config lines. 114 | 115 | ``` 116 | listeners.dp_otsdb.bucket = OpenTSDB 117 | listeners.dp_otsdb.protocol = tcp 118 | listeners.dp_otsdb.port = 4242 119 | ``` 120 | 121 | # Log Listener Configuration 122 | 123 | Settings are configured in dpx.conf 124 | 125 | ## Syslog 126 | 127 | Enable the OpenTSDB listener with the following config lines. 128 | 129 | ``` 130 | listeners.dp_syslog.bucket = syslog 131 | listeners.dp_syslog.port = 514 132 | listeners.dp_syslog.protocol = udp 133 | ``` 134 | 135 | Build 136 | ----- 137 | 138 | ```bash 139 | $ rebar3 compile 140 | ``` 141 | 142 | Release 143 | ------- 144 | 145 | ```bash 146 | $ rebar3 release 147 | ``` 148 | -------------------------------------------------------------------------------- /apps/ddb_proxy/include/.gitignore: -------------------------------------------------------------------------------- 1 | remote_pb.hrl 2 | -------------------------------------------------------------------------------- /apps/ddb_proxy/proto/remote.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Prometheus Team 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | syntax = "proto3"; 15 | 16 | package remote; 17 | 18 | message Sample { 19 | double value = 1; 20 | int64 timestamp_ms = 2; 21 | } 22 | 23 | message LabelPair { 24 | string name = 1; 25 | string value = 2; 26 | } 27 | 28 | message TimeSeries { 29 | repeated LabelPair labels = 1; 30 | // Sorted by time, oldest sample first. 31 | repeated Sample samples = 2; 32 | } 33 | 34 | message WriteRequest { 35 | repeated TimeSeries timeseries = 1; 36 | } 37 | -------------------------------------------------------------------------------- /apps/ddb_proxy/rebar.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | 3 | %%------------------------------------------------------------------- 4 | %% GPB 5 | %%------------------------------------------------------------------- 6 | 7 | {gpb_opts, [ 8 | {i, "proto"}, 9 | {module_name_suffix, "_pb"}, 10 | {o_erl, "src"}, 11 | {o_hrl, "include"}, 12 | {strings_as_binaries, true}, 13 | type_specs]}. 14 | {provider_hooks, 15 | [{pre, [ 16 | {compile, {protobuf, compile}}, 17 | {clean, {protobuf, clean}} 18 | ]} 19 | ]}. 20 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/.gitignore: -------------------------------------------------------------------------------- 1 | remote_pb.erl 2 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy.app.src: -------------------------------------------------------------------------------- 1 | {application, ddb_proxy, 2 | [{description, "Dalmatiner proxy"}, 3 | {vsn, "0.3.3"}, 4 | {registered, []}, 5 | {mod, { ddb_proxy_app, []}}, 6 | {applications, 7 | [kernel, 8 | lager, 9 | stdlib, 10 | ranch, 11 | hackney, 12 | cowboy, 13 | dp_decoder, 14 | dqe_idx, 15 | dqe_idx_pg, 16 | ddb_client, 17 | ddb_connection, 18 | trie, 19 | snappiest, 20 | jsone 21 | ]}, 22 | {env,[]}, 23 | {modules, []}, 24 | 25 | {maintainers, []}, 26 | {licenses, []}, 27 | {links, []} 28 | ]}. 29 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc ddb_proxy public API 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(ddb_proxy_app). 7 | 8 | -behaviour(application). 9 | 10 | %% Application callbacks 11 | -export([start/2, stop/1]). 12 | 13 | %%==================================================================== 14 | %% API 15 | %%==================================================================== 16 | 17 | start(_StartType, _StartArgs) -> 18 | ddb_proxy_sup:start_link(). 19 | 20 | %%-------------------------------------------------------------------- 21 | stop(_State) -> 22 | ok. 23 | 24 | %%==================================================================== 25 | %% Internal functions 26 | %%==================================================================== 27 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy_prom_scraper.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author Heinz Nikolaus Gies 3 | %%% @copyright (C) 2016, Heinz Nikolaus Gies 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 7 Jun 2016 by Heinz Nikolaus Gies 8 | %%%------------------------------------------------------------------- 9 | -module(ddb_proxy_prom_scraper). 10 | 11 | -behaviour(gen_server). 12 | 13 | %% API 14 | -export([start_link/4]). 15 | 16 | %% gen_server callbacks 17 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 18 | terminate/2, code_change/3]). 19 | 20 | -ignore_xref([start_link/4]). 21 | 22 | -define(SERVER, ?MODULE). 23 | 24 | -record(state, {url :: string(), 25 | freq :: pos_integer(), 26 | bucket :: binary(), 27 | %% this looks silly but the prom data 28 | %% already arrives in 1s resolution 29 | res = 1000 div 1000 :: pos_integer(), 30 | ddb}). 31 | 32 | %%%=================================================================== 33 | %%% API 34 | %%%=================================================================== 35 | 36 | %%-------------------------------------------------------------------- 37 | %% @doc 38 | %% Starts the server 39 | %% 40 | %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} 41 | %% @end 42 | %%-------------------------------------------------------------------- 43 | start_link(Name, Bucket, URL, Freq) -> 44 | gen_server:start_link(?MODULE, [Name, Bucket, URL, Freq], []). 45 | 46 | %%%=================================================================== 47 | %%% gen_server callbacks 48 | %%%=================================================================== 49 | 50 | %%-------------------------------------------------------------------- 51 | %% @private 52 | %% @doc 53 | %% Initializes the server 54 | %% 55 | %% @spec init(Args) -> {ok, State} | 56 | %% {ok, State, Timeout} | 57 | %% ignore | 58 | %% {stop, Reason} 59 | %% @end 60 | %%-------------------------------------------------------------------- 61 | init([Name, Bucket, URL, Freq]) -> 62 | lager:info("[prom:~s] Adding scraper on ~s with an interval of ~p", 63 | [Name, URL, Freq]), 64 | erlang:send_after(Freq, self(), scrape), 65 | {Host, Port} = dp_util:ddb_config(), 66 | C = dp_util:ddb_c(ddb_tcp:connect(Host,Port)), 67 | {ok, #{resolution := Res}, C1} = ddb_tcp:bucket_info(Bucket, C), 68 | %% Prom data already arrives in 1s resolution. 69 | Res1 = Res div 1000, 70 | C2 = dp_util:ddb_c(ddb_tcp:stream_mode(Bucket, 5, C1)), 71 | {ok, #state{bucket = Bucket, url = URL, freq = Freq, ddb = C2, res = Res1}}. 72 | 73 | %%-------------------------------------------------------------------- 74 | %% @private 75 | %% @doc 76 | %% Handling call messages 77 | %% 78 | %% @spec handle_call(Request, From, State) -> 79 | %% {reply, Reply, State} | 80 | %% {reply, Reply, State, Timeout} | 81 | %% {noreply, State} | 82 | %% {noreply, State, Timeout} | 83 | %% {stop, Reason, Reply, State} | 84 | %% {stop, Reason, State} 85 | %% @end 86 | %%-------------------------------------------------------------------- 87 | handle_call(_Request, _From, State) -> 88 | Reply = ok, 89 | {reply, Reply, State}. 90 | 91 | %%-------------------------------------------------------------------- 92 | %% @private 93 | %% @doc 94 | %% Handling cast messages 95 | %% 96 | %% @spec handle_cast(Msg, State) -> {noreply, State} | 97 | %% {noreply, State, Timeout} | 98 | %% {stop, Reason, State} 99 | %% @end 100 | %%-------------------------------------------------------------------- 101 | handle_cast(_Msg, State) -> 102 | {noreply, State}. 103 | 104 | %%-------------------------------------------------------------------- 105 | %% @private 106 | %% @doc 107 | %% Handling all non call/cast messages 108 | %% 109 | %% @spec handle_info(Info, State) -> {noreply, State} | 110 | %% {noreply, State, Timeout} | 111 | %% {stop, Reason, State} 112 | %% @end 113 | %%-------------------------------------------------------------------- 114 | 115 | handle_info(scrape, State = #state{url = URL, freq = F}) -> 116 | State1 = case hackney:get(URL) of 117 | {ok, 200, _Hdrs, Client} -> 118 | {ok, Body} = hackney:body(Client), 119 | L = binary:split(Body, [<<"\r\n">>, <<"\n">>], [global]), 120 | Metrics = [dp_prometheus:parse(E) || E <- L], 121 | Metrics2 = lists:flatten([M || {ok, M} <- Metrics]), 122 | lager:info("scrape: ~s -> ~p~n", [URL, length(Metrics2)]), 123 | lists:foldl(fun do_send/2, State, Metrics2); 124 | _ -> 125 | lager:error("scrape error on: ~s~n", [URL]), 126 | State 127 | end, 128 | erlang:send_after(F, self(), scrape), 129 | {noreply, State1}; 130 | 131 | handle_info(_Info, State) -> 132 | {noreply, State}. 133 | 134 | %%-------------------------------------------------------------------- 135 | %% @private 136 | %% @doc 137 | %% This function is called by a gen_server when it is about to 138 | %% terminate. It should be the opposite of Module:init/1 and do any 139 | %% necessary cleaning up. When it returns, the gen_server terminates 140 | %% with Reason. The return value is ignored. 141 | %% 142 | %% @spec terminate(Reason, State) -> void() 143 | %% @end 144 | %%-------------------------------------------------------------------- 145 | terminate(_Reason, _State) -> 146 | ok. 147 | 148 | %%-------------------------------------------------------------------- 149 | %% @private 150 | %% @doc 151 | %% Convert process state when code is changed 152 | %% 153 | %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} 154 | %% @end 155 | %%-------------------------------------------------------------------- 156 | code_change(_OldVsn, State, _Extra) -> 157 | {ok, State}. 158 | 159 | %%%=================================================================== 160 | %%% Internal functions 161 | %%%=================================================================== 162 | do_send(Decoded = #{time := Time, key := Key, value := Value}, 163 | State = #state{bucket = Bucket, ddb = C, res = R}) -> 164 | KeyBin = dproto:metric_from_list(Key), 165 | Points = mmath_bin:from_list([Value]), 166 | C1 = dp_util:ddb_c(ddb_tcp:send(KeyBin, Time div R, Points, C)), 167 | dp_index:add(Bucket, Decoded, Time), 168 | State#state{ddb = C1}. 169 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy_prom_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author Heinz Nikolaus Gies 3 | %%% @copyright (C) 2016, Heinz Nikolaus Gies 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 7 Jun 2016 by Heinz Nikolaus Gies 8 | %%%------------------------------------------------------------------- 9 | -module(ddb_proxy_prom_sup). 10 | 11 | -behaviour(supervisor). 12 | 13 | %% API 14 | -export([start_link/0]). 15 | 16 | %% Supervisor callbacks 17 | -export([init/1]). 18 | 19 | -ignore_xref([start_link/0]). 20 | 21 | -define(SERVER, ?MODULE). 22 | 23 | %%%=================================================================== 24 | %%% API functions 25 | %%%=================================================================== 26 | 27 | %%-------------------------------------------------------------------- 28 | %% @doc 29 | %% Starts the supervisor 30 | %% 31 | %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} 32 | %% @end 33 | %%-------------------------------------------------------------------- 34 | start_link() -> 35 | supervisor:start_link({local, ?SERVER}, ?MODULE, []). 36 | 37 | %%%=================================================================== 38 | %%% Supervisor callbacks 39 | %%%=================================================================== 40 | 41 | child(Name, Bucket, URL, Frequency) -> 42 | Restart = permanent, 43 | Shutdown = 2000, 44 | Type = worker, 45 | {Name, {dqe_proxy_prom_scraper, start_link, [Name, Bucket, URL, Frequency]}, 46 | Restart, Shutdown, Type, [dqe_proxy_prom_scraper]}. 47 | %%-------------------------------------------------------------------- 48 | %% @private 49 | %% @doc 50 | %% Whenever a supervisor is started using supervisor:start_link/[2,3], 51 | %% this function is called by the new process to find out about 52 | %% restart strategy, maximum restart frequency and child 53 | %% specifications. 54 | %% 55 | %% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} | 56 | %% ignore | 57 | %% {error, Reason} 58 | %% @end 59 | %%-------------------------------------------------------------------- 60 | init([]) -> 61 | RestartStrategy = one_for_one, 62 | MaxRestarts = 1000, 63 | MaxSecondsBetweenRestarts = 3600, 64 | SupFlags = {RestartStrategy, MaxRestarts, MaxSecondsBetweenRestarts}, 65 | Targets = application:get_env(ddb_proxy, prom_scrapers, []), 66 | Children = [child(Name, Bucket, URL, Frequency) 67 | || {Name, Bucket, URL, Frequency} <- Targets], 68 | {ok, {SupFlags, Children}}. 69 | 70 | %%%=================================================================== 71 | %%% Internal functions 72 | %%%=================================================================== 73 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc ddb_proxy top level supervisor. 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(ddb_proxy_sup). 7 | 8 | -behaviour(supervisor). 9 | 10 | %% API 11 | -export([start_link/0]). 12 | 13 | %% Supervisor callbacks 14 | -export([init/1]). 15 | 16 | -define(SERVER, ?MODULE). 17 | -define(ACCEPTOR_COUNT, 100). 18 | 19 | %%==================================================================== 20 | %% API functions 21 | %%==================================================================== 22 | 23 | listener_name({Decoder, Bucket, Port, Protocol}) -> 24 | DecoderB = atom_to_binary(Decoder, utf8), 25 | PortB = integer_to_binary(Port), 26 | ProtocolB = atom_to_binary(Protocol, utf8), 27 | BName = <>, 29 | binary_to_atom(BName, utf8). 30 | 31 | listener({Decoder, Bucket, Port, Protocol} = L) 32 | when is_atom(Decoder), 33 | is_binary(Bucket), 34 | is_integer(Port), 35 | Port > 0 -> 36 | Name = listener_name(L), 37 | State = #{bucket => Bucket, decoder => Decoder, 38 | proto => Decoder:protocol()}, 39 | lager:info("[listener:~s] Adding listener on bucket: ~p and port ~p~n", 40 | [Name, Bucket, Port]), 41 | start_listener(Protocol, Name, Port, State). 42 | 43 | start_listener(tcp, Name, Port, State) -> 44 | Proto = dp_tcp_listener, 45 | {ok, _} = ranch:start_listener(Name, ?ACCEPTOR_COUNT, 46 | ranch_tcp, [{port, Port}], 47 | Proto, State), 48 | ok; 49 | 50 | start_listener(udp, Name, Port, State) -> 51 | {ok, _} = ddb_proxy_udp:start_listener(Name, Port, State), 52 | ok; 53 | 54 | start_listener(http, Name, Port, State) -> 55 | Proto = dp_http_listener, 56 | Dispatch = cowboy_router:compile([{'_', [{"/[...]", Proto, State}]}]), 57 | {ok, _} = cowboy:start_http(Name, ?ACCEPTOR_COUNT, 58 | [{port, Port}], 59 | [{env, [{dispatch, Dispatch}]}, 60 | {max_keepalive, 5}, 61 | {timeout, 50000}]), 62 | ok. 63 | 64 | start_link() -> 65 | R = supervisor:start_link({local, ?SERVER}, ?MODULE, []), 66 | {ok, Listeners} = application:get_env(ddb_proxy, listeners), 67 | dqe_idx_pg:init(), 68 | [listener(L) || L <- Listeners], 69 | R. 70 | 71 | %%==================================================================== 72 | %% Supervisor callbacks 73 | %%==================================================================== 74 | 75 | %% Child :: {Id,StartFunc,Restart,Shutdown,Type,Modules} 76 | init([]) -> 77 | Restart = permanent, 78 | Shutdown = 2000, 79 | Type = supervisor, 80 | Prom = {ddb_proxy_prom_sup, 81 | {ddb_proxy_prom_sup, start_link, []}, 82 | Restart, Shutdown, Type, [ddb_proxy_prom_sup]}, 83 | Idx = {dp_index, 84 | {dp_index, start_link, []}, 85 | Restart, Shutdown, worker, [dp_index]}, 86 | {ok, { {one_for_one, 0, 1}, [Prom, Idx]} }. 87 | 88 | %%==================================================================== 89 | %% Internal functions 90 | %%==================================================================== 91 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/ddb_proxy_udp.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author Heinz Nikolaus Gies 3 | %%% @copyright (C) 2016, Heinz Nikolaus Gies 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 19 Sep 2016 by Heinz Nikolaus Gies 8 | %%%------------------------------------------------------------------- 9 | -module(ddb_proxy_udp). 10 | 11 | -behaviour(gen_server). 12 | 13 | %% API 14 | -export([start_listener/3, start_link/3]). 15 | 16 | %% gen_server callbacks 17 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 18 | terminate/2, code_change/3]). 19 | 20 | -define(SERVER, ?MODULE). 21 | 22 | -record(state, 23 | { 24 | port, 25 | socket, 26 | ds, 27 | name 28 | }). 29 | 30 | %%%=================================================================== 31 | %%% API 32 | %%%=================================================================== 33 | 34 | start_listener(Name, Port, State) -> 35 | start_link(Name, Port, State). 36 | 37 | %%-------------------------------------------------------------------- 38 | %% @doc 39 | %% Starts the server 40 | %% 41 | %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} 42 | %% @end 43 | %%-------------------------------------------------------------------- 44 | start_link(Name, Port, State) -> 45 | gen_server:start_link({local, Name}, ?MODULE, [Name, Port, State], []). 46 | 47 | %%%=================================================================== 48 | %%% gen_server callbacks 49 | %%%=================================================================== 50 | 51 | %%-------------------------------------------------------------------- 52 | %% @private 53 | %% @doc 54 | %% Initializes the server 55 | %% 56 | %% @spec init(Args) -> {ok, State} | 57 | %% {ok, State, Timeout} | 58 | %% ignore | 59 | %% {stop, Reason} 60 | %% @end 61 | %%-------------------------------------------------------------------- 62 | init([Name, Port, State]) -> 63 | {ok, Socket} = gen_udp:open(Port, [binary, {active, true}]), 64 | {ok, #state{ 65 | socket = Socket, 66 | port = Port, 67 | ds = State, 68 | name = Name 69 | }}. 70 | 71 | %%-------------------------------------------------------------------- 72 | %% @private 73 | %% @doc 74 | %% Handling call messages 75 | %% 76 | %% @spec handle_call(Request, From, State) -> 77 | %% {reply, Reply, State} | 78 | %% {reply, Reply, State, Timeout} | 79 | %% {noreply, State} | 80 | %% {noreply, State, Timeout} | 81 | %% {stop, Reason, Reply, State} | 82 | %% {stop, Reason, State} 83 | %% @end 84 | %%-------------------------------------------------------------------- 85 | handle_call(_Request, _From, State) -> 86 | Reply = ok, 87 | {reply, Reply, State}. 88 | 89 | %%-------------------------------------------------------------------- 90 | %% @private 91 | %% @doc 92 | %% Handling cast messages 93 | %% 94 | %% @spec handle_cast(Msg, State) -> {noreply, State} | 95 | %% {noreply, State, Timeout} | 96 | %% {stop, Reason, State} 97 | %% @end 98 | %%-------------------------------------------------------------------- 99 | handle_cast(_Msg, State) -> 100 | {noreply, State}. 101 | 102 | %%-------------------------------------------------------------------- 103 | %% @private 104 | %% @doc 105 | %% Handling all non call/cast messages 106 | %% 107 | %% @spec handle_info(Info, State) -> {noreply, State} | 108 | %% {noreply, State, Timeout} | 109 | %% {stop, Reason, State} 110 | %% @end 111 | %%-------------------------------------------------------------------- 112 | handle_info({udp, _Socket, IP, _InPortNo, Packet}, 113 | State = #state{ds = #{decoder := Decoder, bucket := Bucket}}) -> 114 | case Decoder:parse(Packet) of 115 | {ok, Es} -> 116 | %% TODO, how to handled metrics 117 | Events = 118 | [{T, D#{ 119 | <<"src_ip">> => ip2str(IP) 120 | }} || #{time := T, type := event, data := D} <- Es], 121 | io:format("~p~n", [Events]), 122 | ddb_connection:events(Bucket, Events); 123 | Er -> 124 | io:format("bad packet: ~p~n~n ==> ~p~n", [Packet, Er]) 125 | end, 126 | {noreply, State}; 127 | 128 | handle_info(Info, State) -> 129 | io:format("nknown udp message: ~p~n", [Info]), 130 | {noreply, State}. 131 | 132 | 133 | 134 | %%-------------------------------------------------------------------- 135 | %% @private 136 | %% @doc 137 | %% This function is called by a gen_server when it is about to 138 | %% terminate. It should be the opposite of Module:init/1 and do any 139 | %% necessary cleaning up. When it returns, the gen_server terminates 140 | %% with Reason. The return value is ignored. 141 | %% 142 | %% @spec terminate(Reason, State) -> void() 143 | %% @end 144 | %%-------------------------------------------------------------------- 145 | terminate(_Reason, _State) -> 146 | ok. 147 | 148 | %%-------------------------------------------------------------------- 149 | %% @private 150 | %% @doc 151 | %% Convert process state when code is changed 152 | %% 153 | %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} 154 | %% @end 155 | %%-------------------------------------------------------------------- 156 | code_change(_OldVsn, State, _Extra) -> 157 | {ok, State}. 158 | 159 | %%%=================================================================== 160 | %%% Internal functions 161 | %%%=================================================================== 162 | 163 | ip2str({A, B, C, D}) -> 164 | list_to_binary(io_lib:format("~p.~p.~p.~p", [A, B, C, D])). 165 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_binary_proto.erl: -------------------------------------------------------------------------------- 1 | -module(dp_binary_proto). 2 | 3 | -export([send/3]). 4 | 5 | send(Data, _, State) -> 6 | State1 = decode_metrics(Data, State), 7 | {<<>>, State1}. 8 | 9 | decode_metrics(Line, State = #{decoder := Decoder}) -> 10 | {ok, Decoded} = Decoder:parse(Line), 11 | lists:foldl(fun decode_metric/2, State, Decoded). 12 | 13 | decode_metric(Metric, State = #{bucket := Bucket, ddb := C, 14 | res := Res}) -> 15 | #{time := Time, key := Key, value := Value} = Metric, 16 | KeyBin = dproto:metric_from_list(Key), 17 | Points = mmath_bin:from_list([Value]), 18 | C1 = dp_util:ddb_c(ddb_tcp:send(KeyBin, Time div Res, Points, C)), 19 | dp_index:add(Bucket, Metric, Time), 20 | State#{ddb => C1}. 21 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_http_listener.erl: -------------------------------------------------------------------------------- 1 | -module(dp_http_listener). 2 | -behaviour(cowboy_http_handler). 3 | 4 | -export([init/3, handle/2, terminate/3]). 5 | 6 | -ignore_xref([init/3, handle/2, terminate/3]). 7 | 8 | init(_Transport, Req, State = #{bucket := Bucket}) -> 9 | {Host, Port} = dp_util:ddb_config(), 10 | C = dp_util:ddb_c(ddb_tcp:connect(Host, Port)), 11 | {ok, #{resolution := Res}, C1} = ddb_tcp:bucket_info(Bucket, C), 12 | Res1 = Res div 1000, 13 | C2 = dp_util:ddb_c(ddb_tcp:stream_mode(Bucket, 5 , C1)), 14 | {ok, Req, State#{ddb => C2, res => Res1}}. 15 | 16 | -dialyzer({no_opaque, handle/2}). 17 | handle(Req, State = #{proto := Proto}) -> 18 | case cowboy_req:body_length(Req) of 19 | {_L, Req1} when _L > 0 -> 20 | {ok, Body, Req2} = cowboy_req:body(Req1), 21 | {_Acc, State1} = Proto:send(Body, <<>>, State), 22 | {ok, Req2, State1}; 23 | %% This is dirty - some output plugins such as telegraf expect to be 24 | %% able to create a database. The success result is a lie. 25 | {_, Req1} -> 26 | ReqX = 27 | case cowboy_req:qs(Req1) of 28 | {<<"q=show%20databases">>, Req2} -> 29 | S = #{name => <<"databases">>, 30 | columns => [<<"name">>], 31 | values => []}, 32 | D = #{results => 33 | [#{series => [S]}] 34 | }, 35 | json_reply(D, Req2); 36 | {S, Req2} -> 37 | lager:info("Query: ~s", [S]), 38 | D = [{<<"results">>, [#{}]}], 39 | json_reply(D, Req2) 40 | end, 41 | {ok, ReqX, State} 42 | end. 43 | 44 | terminate(_Reason, _Req, _State = #{ddb := C}) -> 45 | ddb_tcp:close(C), 46 | ok. 47 | 48 | json_reply(JSON, Req) -> 49 | {ok, Req1} = 50 | cowboy_req:reply( 51 | 200, [{<<"content-type">>, <<"application/json">>}], 52 | jsone:encode(JSON), Req), 53 | Req1. 54 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_index.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author Heinz Nikolaus Gies 3 | %%% @copyright (C) 2016, Heinz Nikolaus Gies 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 4 Sep 2016 by Heinz Nikolaus Gies 8 | %%%------------------------------------------------------------------- 9 | -module(dp_index). 10 | 11 | -behaviour(gen_server). 12 | 13 | %% API 14 | -export([start_link/0, add/3]). 15 | 16 | %% gen_server callbacks 17 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 18 | terminate/2, code_change/3]). 19 | 20 | -ignore_xref([start_link/0]). 21 | 22 | -define(SERVER, ?MODULE). 23 | 24 | -record(state, { 25 | seen = btrie:new(), 26 | last_seen_update = 10*60 % 10m 27 | }). 28 | 29 | %%%=================================================================== 30 | %%% API 31 | %%%=================================================================== 32 | 33 | %%-------------------------------------------------------------------- 34 | %% @doc 35 | %% Starts the server 36 | %% 37 | %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} 38 | %% @end 39 | %%--------------------------------------------------------------------x 40 | start_link() -> 41 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 42 | 43 | add(Bucket, Metric, Time) -> 44 | case erlang:process_info(whereis(?SERVER), message_queue_len) of 45 | {message_queue_len, N} when N > 100 -> 46 | gen_server:call(?SERVER, {tags, Bucket, Metric, Time}); 47 | _ -> 48 | gen_server:cast(?SERVER, {tags, Bucket, Metric, Time}) 49 | end. 50 | 51 | %%%=================================================================== 52 | %%% gen_server callbacks 53 | %%%=================================================================== 54 | 55 | %%-------------------------------------------------------------------- 56 | %% @private 57 | %% @doc 58 | %% Initializes the server 59 | %% 60 | %% @spec init(Args) -> {ok, State} | 61 | %% {ok, State, Timeout} | 62 | %% ignore | 63 | %% {stop, Reason} 64 | %% @end 65 | %%-------------------------------------------------------------------- 66 | init([]) -> 67 | {ok, #state{}}. 68 | 69 | %%-------------------------------------------------------------------- 70 | %% @private 71 | %% @doc 72 | %% Handling call messages 73 | %% 74 | %% @spec handle_call(Request, From, State) -> 75 | %% {reply, Reply, State} | 76 | %% {reply, Reply, State, Timeout} | 77 | %% {noreply, State} | 78 | %% {noreply, State, Timeout} | 79 | %% {stop, Reason, Reply, State} | 80 | %% {stop, Reason, State} 81 | %% @end 82 | %%-------------------------------------------------------------------- 83 | handle_call({tags, Bucket, Metric, Time}, _From, State) -> 84 | State1 = do_add(Bucket, Metric, Time, State), 85 | {reply, ok, State1}; 86 | handle_call(_Request, _From, State) -> 87 | Reply = ok, 88 | {reply, Reply, State}. 89 | 90 | %%-------------------------------------------------------------------- 91 | %% @private 92 | %% @doc 93 | %% Handling cast messages 94 | %% 95 | %% @spec handle_cast(Msg, State) -> {noreply, State} | 96 | %% {noreply, State, Timeout} | 97 | %% {stop, Reason, State} 98 | %% @end 99 | %%-------------------------------------------------------------------- 100 | handle_cast({tags, Bucket, Metric, Time}, State) -> 101 | State1 = do_add(Bucket, Metric, Time, State), 102 | {noreply, State1}; 103 | handle_cast(_Msg, State) -> 104 | {noreply, State}. 105 | 106 | 107 | %%-------------------------------------------------------------------- 108 | %% @private 109 | %% @doc 110 | %% Handling all non call/cast messages 111 | %% 112 | %% @spec handle_info(Info, State) -> {noreply, State} | 113 | %% {noreply, State, Timeout} | 114 | %% {stop, Reason, State} 115 | %% @end 116 | %%-------------------------------------------------------------------- 117 | handle_info(_Info, State) -> 118 | {noreply, State}. 119 | 120 | %%-------------------------------------------------------------------- 121 | %% @private 122 | %% @doc 123 | %% This function is called by a gen_server when it is about to 124 | %% terminate. It should be the opposite of Module:init/1 and do any 125 | %% necessary cleaning up. When it returns, the gen_server terminates 126 | %% with Reason. The return value is ignored. 127 | %% 128 | %% @spec terminate(Reason, State) -> void() 129 | %% @end 130 | %%-------------------------------------------------------------------- 131 | terminate(_Reason, _State) -> 132 | ok. 133 | 134 | %%-------------------------------------------------------------------- 135 | %% @private 136 | %% @doc 137 | %% Convert process state when code is changed 138 | %% 139 | %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} 140 | %% @end 141 | %%-------------------------------------------------------------------- 142 | code_change(_OldVsn, State, _Extra) -> 143 | {ok, State}. 144 | 145 | %%%=================================================================== 146 | %%% Internal functions 147 | %%%=================================================================== 148 | 149 | do_add(Bucket, Metric = #{key := Key}, Time, State = #state{seen = Seen, last_seen_update = TTL}) -> 150 | KeyBin = dproto:metric_from_list(Key), 151 | K = <>, 152 | case btrie:find(K, Seen) of 153 | {ok, Last} when Time - Last < TTL -> 154 | State; 155 | {ok, _Last} -> 156 | dqe_idx:touch([{Bucket, Key, Time}]), 157 | State#state{seen = btrie:store(K, Time, Seen)}; 158 | error -> 159 | #{metric := MetricParts, tags := Tags} = 160 | dp_util:expand_tags(Metric), 161 | dqe_idx:add(Bucket, MetricParts, Bucket, Key, Time, Tags), 162 | State#state{seen = btrie:store(K, Time, Seen)} 163 | end. 164 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_line_proto.erl: -------------------------------------------------------------------------------- 1 | -module(dp_line_proto). 2 | 3 | -export([send/3]). 4 | 5 | send(Acc, Line, State) -> 6 | fold_lines(Acc, Line, State). 7 | 8 | fold_lines(<<"\r\n", R/binary>>, Line, State) -> 9 | State1 = decode_metrics(Line, State), 10 | fold_lines(R, <<>>, State1); 11 | fold_lines(<<"\n", R/binary>>, Line, State) -> 12 | State1 = decode_metrics(Line, State), 13 | fold_lines(R, <<>>, State1); 14 | fold_lines(<<>>, Line, State) -> 15 | {Line, State}; 16 | fold_lines(<>, Line, State) -> 17 | fold_lines(R, <>, State). 18 | 19 | decode_metrics(Line, State = #{decoder := Decoder}) -> 20 | {ok, Decoded} = Decoder:parse(Line), 21 | lists:foldl(fun decode_metric/2, State, Decoded). 22 | 23 | decode_metric(Metric, State = #{bucket := Bucket, ddb := C}) -> 24 | #{time := Time, key := Key, value := Value} = Metric, 25 | KeyBin = dproto:metric_from_list(Key), 26 | Points = mmath_bin:from_list([Value]), 27 | C1 = dp_util:ddb_c(ddb_tcp:send(KeyBin, Time, Points, C)), 28 | dp_index:add(Bucket, Metric, Time), 29 | State#{ddb => C1}. 30 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_multiline_proto.erl: -------------------------------------------------------------------------------- 1 | -module(dp_multiline_proto). 2 | 3 | -export([send/3]). 4 | 5 | -ignore_xref([send/3]). 6 | 7 | send(Line, Acc, State) -> 8 | send1(<>, State). 9 | 10 | send1(<<>>, State) -> 11 | {<<>>, State}; 12 | 13 | send1(Line, State = #{decoder := Decoder}) -> 14 | case Decoder:parse(Line) of 15 | {ok, Decoded, R} -> 16 | State1 = send_metrics(Decoded, State), 17 | send1(R, State1); 18 | {ok, Decoded} -> 19 | State1 = send_metrics(Decoded, State), 20 | send1(<<>>, State1); 21 | {incomplete, R} -> 22 | {R, State} 23 | end. 24 | 25 | send_metrics([], State) -> 26 | State; 27 | send_metrics([#{ time := T} | _ ] = Ms, State = #{ddb := C, res := R}) -> 28 | C1 = dp_util:ddb_c(ddb_tcp:batch_start(T, C)), 29 | send_metrics(Ms, T div R, [], State#{ddb => C1}). 30 | 31 | send_metrics([M = #{time := Tin, key := Key, value := Value} | Ms], 32 | T, Acc, State = #{bucket := Bucket, res := R}) 33 | when Tin div R =:= T -> 34 | dp_index:add(Bucket, M, Tin), 35 | KeyBin = dproto:metric_from_list(Key), 36 | Points = mmath_bin:from_list([Value]), 37 | send_metrics(Ms, T, [{KeyBin, Points} | Acc], State); 38 | send_metrics([M = #{time := T, key := Key, value := Value} | Ms], 39 | _, Acc, State = #{ddb := C, bucket := Bucket, res := R}) -> 40 | %%lager:info("Batch size: ~p", [length(Acc)]), 41 | C1 = dp_util:ddb_c(ddb_tcp:batch(Acc, C)), 42 | C2 = dp_util:ddb_c(ddb_tcp:batch_end(C1)), 43 | C3 = dp_util:ddb_c(ddb_tcp:batch_start(T, C2)), 44 | KeyBin = dproto:metric_from_list(Key), 45 | Points = mmath_bin:from_list([Value]), 46 | dp_index:add(Bucket, M, T), 47 | send_metrics(Ms, T div R, [{KeyBin, Points}], State#{ddb => C3}); 48 | send_metrics([], _, Acc, State = #{ddb := C}) -> 49 | %%lager:info("Batch size: ~p", [length(Acc)]), 50 | C1 = dp_util:ddb_c(ddb_tcp:batch(Acc, C)), 51 | C2 = dp_util:ddb_c(ddb_tcp:batch_end(C1)), 52 | State#{ddb => C2}. 53 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_prom_writer.erl: -------------------------------------------------------------------------------- 1 | -module(dp_prom_writer). 2 | 3 | -export([protocol/0, parse/1]). 4 | -include("remote_pb.hrl"). 5 | 6 | parse(Data) -> 7 | Uncompressed = unsnap(Data), 8 | Msg = remote_pb:decode_msg(Uncompressed, 'WriteRequest'), 9 | 10 | Metrics = lists:flatten([decode(TS) || TS <- Msg#'WriteRequest'.timeseries]), 11 | 12 | {ok, Metrics}. 13 | 14 | protocol() -> 15 | dp_binary_proto. 16 | 17 | 18 | unsnap(<<255,6,0,0, "sNaPpY", Data/binary>>) -> 19 | << << (unsnap_chunk(T, E))/binary >> || 20 | <> <= Data >>; 21 | 22 | unsnap(Compressed) -> 23 | {ok, C} = snappiest:decompress(Compressed), 24 | C. 25 | 26 | -define(SNAPPY_CHUNK, 16#00). 27 | -define(UNCOMPRESSED_CHUNK, 16#01). 28 | -define(PADDING_CHUNK, 16#fe). 29 | 30 | -define(UNSKIPPLE_START, 16#02). 31 | -define(UNSKIPPLE_END, 16#7f). 32 | 33 | -define(SKIPPLE_START, 16#80). 34 | -define(SKIPPLE_END, 16#fd). 35 | 36 | unsnap_chunk(?SNAPPY_CHUNK, <<_CrC:32, Compressed/binary>>) -> 37 | {ok, C} = snappiest:decompress(Compressed), 38 | C; 39 | unsnap_chunk(?UNCOMPRESSED_CHUNK, D) -> 40 | D; 41 | unsnap_chunk(?PADDING_CHUNK, _) -> 42 | <<>>; 43 | unsnap_chunk(T, _) when T >= ?SKIPPLE_START, 44 | T =< ?SKIPPLE_END -> 45 | <<>>; 46 | unsnap_chunk(T, _) when T >= ?UNSKIPPLE_START, 47 | T =< ?UNSKIPPLE_END -> 48 | error(badarg). 49 | 50 | decode(#'TimeSeries'{labels = Labels, samples = Samples}) -> 51 | Tags = lists:sort([decode_label(Label) || Label <- Labels]), 52 | Metric = get_name(Tags), 53 | Key = make_key(Tags), 54 | Base = #{metric => Metric, key => Key, tags => Tags}, 55 | apply_samples(Base, Samples). 56 | 57 | 58 | decode_label(#'LabelPair'{name = Name, value = Value}) -> 59 | {<<>>, Name, Value}. 60 | 61 | get_name(Tags) -> 62 | {_, _, Name} = lists:keyfind(<<"__name__">>, 2, Tags), 63 | [Name]. 64 | 65 | make_key(Tags) -> 66 | [<> || {_, N, V} <- Tags]. 67 | 68 | 69 | apply_samples(Base, Samples) -> 70 | [Base#{value => V, time => T div 1000} || 71 | #'Sample'{value = V, timestamp_ms = T} <- Samples, 72 | is_number(V)]. 73 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_tcp_listener.erl: -------------------------------------------------------------------------------- 1 | -module(dp_tcp_listener). 2 | -behaviour(ranch_protocol). 3 | 4 | -export([start_link/4]). 5 | -export([init/4]). 6 | 7 | start_link(Ref, Socket, Transport, Opts) -> 8 | Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]), 9 | {ok, Pid}. 10 | 11 | init(Ref, Socket, Transport, M = #{bucket := Bucket}) -> 12 | ok = ranch:accept_ack(Ref), 13 | {Host, Port} = dp_util:ddb_config(), 14 | C = dp_util:ddb_c(ddb_tcp:connect(Host,Port)), 15 | C1 = dp_util:ddb_c(ddb_tcp:stream_mode(Bucket, 5, C)), 16 | loop(Socket, Transport, <<>>, M#{ddb => C1}). 17 | 18 | loop(Socket, Transport, Acc, State) -> 19 | case Transport:recv(Socket, 0, 5000) of 20 | {ok, Data} -> 21 | Acc1 = <>, 22 | {Acc2, State1} = dp_line_proto:send(Acc1, <<>>, State), 23 | loop(Socket, Transport, Acc2, State1); 24 | {error, timeout} -> 25 | loop(Socket, Transport, Acc, State); 26 | _ -> 27 | ok = Transport:close(Socket) 28 | end. 29 | -------------------------------------------------------------------------------- /apps/ddb_proxy/src/dp_util.erl: -------------------------------------------------------------------------------- 1 | -module(dp_util). 2 | 3 | -export([expand_tags/1, ddb_config/0, ddb_c/1]). 4 | 5 | expand_tags(M = #{tags := Tags, key := Metric}) -> 6 | L = {<<"ddb">>, <<"key_length">>, 7 | integer_to_binary(length(Metric))}, 8 | M#{tags => add_tags(1, Metric, [L | Tags])}. 9 | 10 | add_tags(_, [], Tags) -> 11 | Tags; 12 | add_tags(N, [E | R], Tags) -> 13 | PosBin = integer_to_binary(N), 14 | T = {<<"ddb">>, <<"part_", PosBin/binary>>, E}, 15 | add_tags(N + 1, R, [T | Tags]). 16 | 17 | 18 | ddb_config() -> 19 | ddb_connection_sup:endpoint(). 20 | 21 | ddb_c({ok, C}) -> 22 | C; 23 | ddb_c({error, _, C}) -> 24 | C. 25 | -------------------------------------------------------------------------------- /config.mk: -------------------------------------------------------------------------------- 1 | VERSION=0.3.3 2 | COMPONENT_INTERNAL=dpx 3 | COMPONENT=dalmatinerpx 4 | -------------------------------------------------------------------------------- /elvis.config: -------------------------------------------------------------------------------- 1 | % -*- erlang -*- 2 | [ 3 | { 4 | elvis, 5 | [ 6 | {config, 7 | %% We can't lint right now thanks to lexer/parser modules 8 | [#{dirs => ["src_ignore"], 9 | filter => "*.erl", 10 | ignore => [dql_parser, dql_lexer], 11 | rules => [{elvis_style, line_length, 12 | #{ignore => [], 13 | limit => 80, 14 | skip_comments => false}}, 15 | {elvis_style, no_tabs}, 16 | {elvis_style, no_trailing_whitespace}, 17 | {elvis_style, macro_names, #{ignore => []}}, 18 | {elvis_style, macro_module_names}, 19 | {elvis_style, operator_spaces, #{rules => [{right, ","}, 20 | {right, "++"}, 21 | {left, "++"}]}}, 22 | {elvis_style, nesting_level, #{level => 3}}, 23 | {elvis_style, god_modules, #{limit => 25, ignore => []}}, 24 | {elvis_style, no_if_expression}, 25 | {elvis_style, invalid_dynamic_call, 26 | #{ignore => []}}, 27 | {elvis_style, used_ignored_variable, #{ignore => [dql_parser]}}, 28 | {elvis_style, no_behavior_info}, 29 | { 30 | elvis_style, 31 | module_naming_convention, 32 | #{regex => "^[a-z]([a-z0-9]*_?)*(_SUITE)?$", 33 | ignore => [dql_parser]} 34 | }, 35 | { 36 | elvis_style, 37 | function_naming_convention, 38 | #{regex => "^([a-z][a-z0-9]*_?)*$"} 39 | }, 40 | {elvis_style, state_record_and_type}, 41 | {elvis_style, no_spec_with_records}, 42 | {elvis_style, dont_repeat_yourself, #{min_complexity => 11, 43 | ignore => [dql_parser] }}, 44 | {elvis_style, no_debug_call, 45 | #{ignore => []}} 46 | ] 47 | }, 48 | #{dirs => ["."], 49 | filter => "Makefile", 50 | rules => [{elvis_project, no_deps_master_erlang_mk, #{ignore => []}}, 51 | {elvis_project, protocol_for_deps_erlang_mk, #{ignore => []}}] 52 | }, 53 | #{dirs => ["."], 54 | filter => "rebar.config", 55 | %% on the test branch master deps are OK 56 | rules => [%{elvis_project, no_deps_master_rebar, #{ignore => []}}, 57 | {elvis_project, protocol_for_deps_rebar, #{ignore => []}}] 58 | }, 59 | #{dirs => ["."], 60 | filter => "elvis.config", 61 | rules => [{elvis_project, old_configuration_format}] 62 | } 63 | ] 64 | }]}]. 65 | -------------------------------------------------------------------------------- /example/diamond.conf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Diamond Configuration File 3 | ################################################################################ 4 | 5 | ################################################################################ 6 | ### Options for the server 7 | [server] 8 | 9 | # Handlers for published metrics. 10 | #handlers = diamond.handler.graphite.GraphiteHandler 11 | handlers = diamond.handler.tsdb.TSDBHandler 12 | 13 | # User diamond will run as 14 | # Leave empty to use the current user 15 | user = 16 | 17 | # Group diamond will run as 18 | # Leave empty to use the current group 19 | group = 20 | 21 | # Pid file 22 | pid_file = /data/run/diamond.pid 23 | 24 | # Directory to load collector modules from 25 | collectors_path = /data/diamond/collectors/ 26 | 27 | # Directory to load collector configs from 28 | collectors_config_path = /etc/diamond/collectors/ 29 | 30 | # Number of seconds between each collector load 31 | # collectors_load_delay = 1.0 32 | 33 | # Directory to load handler configs from 34 | handlers_config_path = /etc/diamond/handlers/ 35 | 36 | # Directory to load handler modules from 37 | handlers_path = /data/diamond/handlers/ 38 | 39 | # Maximum number of metrics waiting to be processed by handlers. 40 | # When metric queue is full, new metrics are dropped. 41 | metric_queue_size = 16384 42 | 43 | 44 | ################################################################################ 45 | ### Options for handlers 46 | [handlers] 47 | 48 | # daemon logging handler(s) 49 | keys = rotated_file 50 | 51 | ### Defaults options for all Handlers 52 | [[default]] 53 | 54 | [[ArchiveHandler]] 55 | 56 | # File to write archive log files 57 | log_file = /data/log/diamond/archive.log 58 | 59 | # Number of days to keep archive log files 60 | days = 7 61 | 62 | [[GraphiteHandler]] 63 | ### Options for GraphiteHandler 64 | 65 | # Graphite server host 66 | host = 127.0.0.1 67 | 68 | # Port to send metrics to 69 | port = 2003 70 | 71 | # Socket timeout (seconds) 72 | timeout = 15 73 | 74 | # Batch size for metrics 75 | batch = 1 76 | 77 | [[GraphitePickleHandler]] 78 | ### Options for GraphitePickleHandler 79 | 80 | # Graphite server host 81 | host = 127.0.0.1 82 | 83 | # Port to send metrics to 84 | port = 2004 85 | 86 | # Socket timeout (seconds) 87 | timeout = 15 88 | 89 | # Batch size for pickled metrics 90 | batch = 256 91 | 92 | [[MySQLHandler]] 93 | ### Options for MySQLHandler 94 | 95 | # MySQL Connection Info 96 | hostname = 127.0.0.1 97 | port = 3306 98 | username = root 99 | password = 100 | database = diamond 101 | table = metrics 102 | # INT UNSIGNED NOT NULL 103 | col_time = timestamp 104 | # VARCHAR(255) NOT NULL 105 | col_metric = metric 106 | # VARCHAR(255) NOT NULL 107 | col_value = value 108 | 109 | [[StatsdHandler]] 110 | host = 127.0.0.1 111 | port = 8125 112 | 113 | [[TSDBHandler]] 114 | host = 127.0.0.1 115 | port = 4242 116 | timeout = 15 117 | 118 | [[LibratoHandler]] 119 | user = user@example.com 120 | apikey = abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 121 | 122 | [[HostedGraphiteHandler]] 123 | apikey = abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 124 | timeout = 15 125 | batch = 1 126 | 127 | [[SignalfxHandler]] 128 | auth_token = abcdefghijklmnopqrstuvwxyz 129 | 130 | # And any other config settings from GraphiteHandler are valid here 131 | 132 | [[HttpPostHandler]] 133 | 134 | ### Urp to post the metrics 135 | url = http://localhost:8888/ 136 | ### Metrics batch size 137 | batch = 100 138 | 139 | 140 | ################################################################################ 141 | ### Options for collectors 142 | [collectors] 143 | 144 | [[default]] 145 | ### Defaults options for all Collectors 146 | 147 | # Uncomment and set to hardcode a hostname for the collector path 148 | # Keep in mind, periods are seperators in graphite 149 | # hostname = my_custom_hostname 150 | 151 | # If you prefer to just use a different way of calculating the hostname 152 | # Uncomment and set this to one of these values: 153 | 154 | # smart = Default. Tries fqdn_short. If that's localhost, uses hostname_short 155 | 156 | # fqdn_short = Default. Similar to hostname -s 157 | # fqdn = hostname output 158 | # fqdn_rev = hostname in reverse (com.example.www) 159 | 160 | # uname_short = Similar to uname -n, but only the first part 161 | # uname_rev = uname -r in reverse (com.example.www) 162 | 163 | # hostname_short = `hostname -s` 164 | # hostname = `hostname` 165 | # hostname_rev = `hostname` in reverse (com.example.www) 166 | 167 | # shell = Run the string set in hostname as a shell command and use its 168 | # output(with spaces trimmed off from both ends) as the hostname. 169 | 170 | # hostname_method = smart 171 | 172 | # Path Prefix and Suffix 173 | # you can use one or both to craft the path where you want to put metrics 174 | # such as: %(path_prefix)s.$(hostname)s.$(path_suffix)s.$(metric)s 175 | # path_prefix = servers 176 | # path_suffix = 177 | 178 | # Path Prefix for Virtual Machines 179 | # If the host supports virtual machines, collectors may report per 180 | # VM metrics. Following OpenStack nomenclature, the prefix for 181 | # reporting per VM metrics is "instances", and metric foo for VM 182 | # bar will be reported as: instances.bar.foo... 183 | # instance_prefix = instances 184 | 185 | # Default Poll Interval (seconds) 186 | interval = 30 187 | 188 | ################################################################################ 189 | # Default enabled collectors 190 | ################################################################################ 191 | 192 | [[CPUCollector]] 193 | enabled = False 194 | 195 | [[DiskSpaceCollector]] 196 | enabled = False 197 | 198 | [[DiskUsageCollector]] 199 | enabled = False 200 | 201 | [[LoadAverageCollector]] 202 | enabled = False 203 | 204 | [[MemoryCollector]] 205 | enabled = True 206 | 207 | [[VMStatCollector]] 208 | enabled = False 209 | 210 | ################################################################################ 211 | ### Options for logging 212 | # for more information on file format syntax: 213 | # http://docs.python.org/library/logging.config.html#configuration-file-format 214 | 215 | [loggers] 216 | 217 | keys = root 218 | 219 | # handlers are higher in this config file, in: 220 | # [handlers] 221 | # keys = ... 222 | 223 | [formatters] 224 | 225 | keys = default 226 | 227 | [logger_root] 228 | 229 | # to increase verbosity, set DEBUG 230 | level = INFO 231 | handlers = rotated_file 232 | propagate = 1 233 | 234 | [handler_rotated_file] 235 | 236 | class = handlers.TimedRotatingFileHandler 237 | level = DEBUG 238 | formatter = default 239 | # rotate at midnight, each day and keep 7 days 240 | args = ('/data/log/diamond/diamond.log', 'midnight', 1, 7) 241 | 242 | [formatter_default] 243 | 244 | format = [%(asctime)s] [%(threadName)s] %(message)s 245 | datefmt = 246 | 247 | ################################################################################ 248 | ### Options for config merging 249 | # [configs] 250 | # path = "/etc/diamond/configs/" 251 | # extension = ".conf" 252 | #------------------------------------------------------------------------------- 253 | # Example: 254 | # /etc/diamond/configs/net.conf 255 | # [collectors] 256 | # 257 | # [[NetworkCollector]] 258 | # enabled = True 259 | -------------------------------------------------------------------------------- /example/telegraf.conf: -------------------------------------------------------------------------------- 1 | # Telegraf Configuration 2 | # 3 | # Telegraf is entirely plugin driven. All metrics are gathered from the 4 | # declared inputs, and sent to the declared outputs. 5 | # 6 | # Plugins must be declared in here to be active. 7 | # To deactivate a plugin, comment out the name and any variables. 8 | # 9 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config 10 | # file would generate. 11 | # 12 | # Environment variables can be used anywhere in this config file, simply prepend 13 | # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), 14 | # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) 15 | 16 | 17 | # Global tags can be specified here in key="value" format. 18 | [global_tags] 19 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1 20 | # rack = "1a" 21 | ## Environment variables can be used as tags, and throughout the config file 22 | # user = "$USER" 23 | 24 | 25 | # Configuration for telegraf agent 26 | [agent] 27 | ## Default data collection interval for all inputs 28 | interval = "10s" 29 | ## Rounds collection interval to 'interval' 30 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc. 31 | round_interval = true 32 | 33 | ## Telegraf will cache metric_buffer_limit metrics for each output, and will 34 | ## flush this buffer on a successful write. 35 | metric_buffer_limit = 1000 36 | ## Flush the buffer whenever full, regardless of flush_interval. 37 | flush_buffer_when_full = true 38 | 39 | ## Collection jitter is used to jitter the collection by a random amount. 40 | ## Each plugin will sleep for a random time within jitter before collecting. 41 | ## This can be used to avoid many plugins querying things like sysfs at the 42 | ## same time, which can have a measurable effect on the system. 43 | collection_jitter = "0s" 44 | 45 | ## Default flushing interval for all outputs. You shouldn't set this below 46 | ## interval. Maximum flush_interval will be flush_interval + flush_jitter 47 | flush_interval = "10s" 48 | ## Jitter the flush interval by a random amount. This is primarily to avoid 49 | ## large write spikes for users running a large number of telegraf instances. 50 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s 51 | flush_jitter = "0s" 52 | 53 | ## Run telegraf in debug mode 54 | debug = false 55 | ## Run telegraf in quiet mode 56 | quiet = false 57 | ## Override default hostname, if empty use os.Hostname() 58 | hostname = "" 59 | ## If set to true, do no set the "host" tag in the telegraf agent. 60 | omit_hostname = false 61 | 62 | 63 | ############################################################################### 64 | # OUTPUT PLUGINS # 65 | ############################################################################### 66 | 67 | # Configuration for OpenTSDB server to send metrics to 68 | [[outputs.opentsdb]] 69 | ## prefix for metrics keys 70 | prefix = "telegraf.test." 71 | 72 | ## Telnet Mode ## 73 | ## DNS name of the OpenTSDB server in telnet mode 74 | host = "localhost" 75 | 76 | ## Port of the OpenTSDB server in telnet mode 77 | port = 4243 78 | 79 | ## Debug true - Prints OpenTSDB communication 80 | debug = false 81 | 82 | # Configuration for influxdb server to send metrics to 83 | [[outputs.influxdb]] 84 | ## The full HTTP or UDP endpoint URL for your InfluxDB instance. 85 | ## Multiple urls can be specified as part of the same cluster, 86 | ## this means that only ONE of the urls will be written to each interval. 87 | # urls = ["udp://localhost:8089"] # UDP endpoint example 88 | urls = ["http://localhost:8087"] # required 89 | ## The target database for metrics (telegraf will create it if not exists). 90 | database = "telegraf" # required 91 | ## Retention policy to write to. 92 | retention_policy = "default" 93 | ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". 94 | ## note: using "s" precision greatly improves InfluxDB compression. 95 | precision = "s" 96 | 97 | ## Write timeout (for the InfluxDB client), formatted as a string. 98 | ## If not provided, will default to 5s. 0s means no timeout (not recommended). 99 | timeout = "5s" 100 | # username = "telegraf" 101 | # password = "metricsmetricsmetricsmetrics" 102 | ## Set the user agent for HTTP POSTs (can be useful for log differentiation) 103 | # user_agent = "telegraf" 104 | ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) 105 | # udp_payload = 512 106 | 107 | ## Optional SSL Config 108 | # ssl_ca = "/etc/telegraf/ca.pem" 109 | # ssl_cert = "/etc/telegraf/cert.pem" 110 | # ssl_key = "/etc/telegraf/key.pem" 111 | ## Use SSL but skip chain & host verification 112 | # insecure_skip_verify = false 113 | 114 | 115 | ############################################################################### 116 | # INPUT PLUGINS # 117 | ############################################################################### 118 | 119 | # Read metrics about cpu usage 120 | [[inputs.cpu]] 121 | ## Whether to report per-cpu stats or not 122 | percpu = true 123 | ## Whether to report total system cpu stats or not 124 | totalcpu = true 125 | ## Comment this line if you want the raw CPU time metrics 126 | fielddrop = ["time_*"] 127 | 128 | 129 | # Read metrics about disk usage by mount point 130 | [[inputs.disk]] 131 | ## By default, telegraf gather stats for all mountpoints. 132 | ## Setting mountpoints will restrict the stats to the specified mountpoints. 133 | mount_points = ["/", "/Users/heinz/Projects", "/Users/heinz/Projects/fifo", "/Users/heinz/Projects/dalmatiner"] 134 | 135 | ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually 136 | ## present on /run, /var/run, /dev/shm or /dev). 137 | ignore_fs = ["tmpfs", "devtmpfs"] 138 | 139 | 140 | # Read metrics about disk IO by device 141 | [[inputs.diskio]] 142 | ## By default, telegraf will gather stats for all devices including 143 | ## disk partitions. 144 | ## Setting devices will restrict the stats to the specified devices. 145 | # devices = ["sda", "sdb"] 146 | ## Uncomment the following line if you do not need disk serial numbers. 147 | # skip_serial_number = true 148 | 149 | 150 | # Get kernel statistics from /proc/stat 151 | [[inputs.kernel]] 152 | # no configuration 153 | 154 | 155 | # Read metrics about memory usage 156 | [[inputs.mem]] 157 | # no configuration 158 | 159 | 160 | # Get the number of processes and group them by status 161 | [[inputs.processes]] 162 | # no configuration 163 | 164 | 165 | # Read metrics about swap memory usage 166 | [[inputs.swap]] 167 | # no configuration 168 | 169 | 170 | # Read metrics about system load & uptime 171 | [[inputs.system]] 172 | # no configuration 173 | 174 | 175 | # # Read stats from an aerospike server 176 | # [[inputs.aerospike]] 177 | # ## Aerospike servers to connect to (with port) 178 | # ## This plugin will query all namespaces the aerospike 179 | # ## server has configured and get stats for them. 180 | # servers = ["localhost:3000"] 181 | 182 | 183 | # # Read Apache status information (mod_status) 184 | # [[inputs.apache]] 185 | # ## An array of Apache status URI to gather stats. 186 | # urls = ["http://localhost/server-status?auto"] 187 | 188 | 189 | # # Read metrics of bcache from stats_total and dirty_data 190 | # [[inputs.bcache]] 191 | # ## Bcache sets path 192 | # ## If not specified, then default is: 193 | # bcachePath = "/sys/fs/bcache" 194 | # 195 | # ## By default, telegraf gather stats for all bcache devices 196 | # ## Setting devices will restrict the stats to the specified 197 | # ## bcache devices. 198 | # bcacheDevs = ["bcache0"] 199 | 200 | 201 | # # Read Cassandra metrics through Jolokia 202 | # [[inputs.cassandra]] 203 | # # This is the context root used to compose the jolokia url 204 | # context = "/jolokia/read" 205 | # ## List of cassandra servers exposing jolokia read service 206 | # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] 207 | # ## List of metrics collected on above servers 208 | # ## Each metric consists of a jmx path. 209 | # ## This will collect all heap memory usage metrics from the jvm and 210 | # ## ReadLatency metrics for all keyspaces and tables. 211 | # ## "type=Table" in the query works with Cassandra3.0. Older versions might 212 | # ## need to use "type=ColumnFamily" 213 | # metrics = [ 214 | # "/java.lang:type=Memory/HeapMemoryUsage", 215 | # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" 216 | # ] 217 | 218 | 219 | # # Pull Metric Statistics from Amazon CloudWatch 220 | # [[inputs.cloudwatch]] 221 | # ## Amazon Region 222 | # region = 'us-east-1' 223 | # 224 | # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) 225 | # period = '1m' 226 | # 227 | # ## Collection Delay (required - must account for metrics availability via CloudWatch API) 228 | # delay = '1m' 229 | # 230 | # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid 231 | # ## gaps or overlap in pulled data 232 | # interval = '1m' 233 | # 234 | # ## Metric Statistic Namespace (required) 235 | # namespace = 'AWS/ELB' 236 | # 237 | # ## Metrics to Pull (optional) 238 | # ## Defaults to all Metrics in Namespace if nothing is provided 239 | # ## Refreshes Namespace available metrics every 1h 240 | # #[[inputs.cloudwatch.metrics]] 241 | # # names = ['Latency', 'RequestCount'] 242 | # # 243 | # # ## Dimension filters for Metric (optional) 244 | # # [[inputs.cloudwatch.metrics.dimensions]] 245 | # # name = 'LoadBalancerName' 246 | # # value = 'p-example' 247 | 248 | 249 | # # Read metrics from one or many couchbase clusters 250 | # [[inputs.couchbase]] 251 | # ## specify servers via a url matching: 252 | # ## [protocol://][:password]@address[:port] 253 | # ## e.g. 254 | # ## http://couchbase-0.example.com/ 255 | # ## http://admin:secret@couchbase-0.example.com:8091/ 256 | # ## 257 | # ## If no servers are specified, then localhost is used as the host. 258 | # ## If no protocol is specifed, HTTP is used. 259 | # ## If no port is specified, 8091 is used. 260 | # servers = ["http://localhost:8091"] 261 | 262 | 263 | # # Read CouchDB Stats from one or more servers 264 | # [[inputs.couchdb]] 265 | # ## Works with CouchDB stats endpoints out of the box 266 | # ## Multiple HOSTs from which to read CouchDB stats: 267 | # hosts = ["http://localhost:8086/_stats"] 268 | 269 | 270 | # # Read metrics from one or many disque servers 271 | # [[inputs.disque]] 272 | # ## An array of URI to gather stats about. Specify an ip or hostname 273 | # ## with optional port and password. 274 | # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. 275 | # ## If no servers are specified, then localhost is used as the host. 276 | # servers = ["localhost"] 277 | 278 | 279 | # # Query given DNS server and gives statistics 280 | # [[inputs.dns_query]] 281 | # ## servers to query 282 | # servers = ["8.8.8.8"] # required 283 | # 284 | # ## Domains or subdomains to query. "."(root) is default 285 | # domains = ["."] # optional 286 | # 287 | # ## Query record type. Default is "A" 288 | # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. 289 | # record_type = "A" # optional 290 | # 291 | # ## Dns server port. 53 is default 292 | # port = 53 # optional 293 | # 294 | # ## Query timeout in seconds. Default is 2 seconds 295 | # timeout = 2 # optional 296 | 297 | 298 | # # Read metrics about docker containers 299 | # [[inputs.docker]] 300 | # ## Docker Endpoint 301 | # ## To use TCP, set endpoint = "tcp://[ip]:[port]" 302 | # ## To use environment variables (ie, docker-machine), set endpoint = "ENV" 303 | # endpoint = "unix:///var/run/docker.sock" 304 | # ## Only collect metrics for these containers, collect all if empty 305 | # container_names = [] 306 | 307 | 308 | # # Read statistics from one or many dovecot servers 309 | # [[inputs.dovecot]] 310 | # ## specify dovecot servers via an address:port list 311 | # ## e.g. 312 | # ## localhost:24242 313 | # ## 314 | # ## If no servers are specified, then localhost is used as the host. 315 | # servers = ["localhost:24242"] 316 | # ## Type is one of "user", "domain", "ip", or "global" 317 | # type = "global" 318 | # ## Wildcard matches like "*.com". An empty string "" is same as "*" 319 | # ## If type = "ip" filters should be 320 | # filters = [""] 321 | 322 | 323 | # # Read stats from one or more Elasticsearch servers or clusters 324 | # [[inputs.elasticsearch]] 325 | # ## specify a list of one or more Elasticsearch servers 326 | # servers = ["http://localhost:9200"] 327 | # 328 | # ## set local to false when you want to read the indices stats from all nodes 329 | # ## within the cluster 330 | # local = true 331 | # 332 | # ## set cluster_health to true when you want to also obtain cluster level stats 333 | # cluster_health = false 334 | 335 | 336 | # # Read metrics from one or more commands that can output to stdout 337 | # [[inputs.exec]] 338 | # ## Commands array 339 | # commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] 340 | # 341 | # ## measurement name suffix (for separating different commands) 342 | # name_suffix = "_mycollector" 343 | # 344 | # ## Data format to consume. 345 | # ## Each data format has it's own unique set of configuration options, read 346 | # ## more about them here: 347 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 348 | # data_format = "influx" 349 | 350 | 351 | # # Read metrics of haproxy, via socket or csv stats page 352 | # [[inputs.haproxy]] 353 | # ## An array of address to gather stats about. Specify an ip on hostname 354 | # ## with optional port. ie localhost, 10.10.3.33:1936, etc. 355 | # 356 | # ## If no servers are specified, then default to 127.0.0.1:1936 357 | # servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] 358 | # ## Or you can also use local socket(not work yet) 359 | # ## servers = ["socket://run/haproxy/admin.sock"] 360 | 361 | 362 | # # HTTP/HTTPS request given an address a method and a timeout 363 | # [[inputs.http_response]] 364 | # ## Server address (default http://localhost) 365 | # address = "http://github.com" 366 | # ## Set response_timeout (default 5 seconds) 367 | # response_timeout = 5 368 | # ## HTTP Request Method 369 | # method = "GET" 370 | # ## Whether to follow redirects from the server (defaults to false) 371 | # follow_redirects = true 372 | # ## HTTP Request Headers (all values must be strings) 373 | # # [inputs.http_response.headers] 374 | # # Host = "github.com" 375 | # ## Optional HTTP Request Body 376 | # # body = ''' 377 | # # {'fake':'data'} 378 | # # ''' 379 | 380 | 381 | # # Read flattened metrics from one or more JSON HTTP endpoints 382 | # [[inputs.httpjson]] 383 | # ## NOTE This plugin only reads numerical measurements, strings and booleans 384 | # ## will be ignored. 385 | # 386 | # ## a name for the service being polled 387 | # name = "webserver_stats" 388 | # 389 | # ## URL of each server in the service's cluster 390 | # servers = [ 391 | # "http://localhost:9999/stats/", 392 | # "http://localhost:9998/stats/", 393 | # ] 394 | # 395 | # ## HTTP method to use: GET or POST (case-sensitive) 396 | # method = "GET" 397 | # 398 | # ## List of tag names to extract from top-level of JSON server response 399 | # # tag_keys = [ 400 | # # "my_tag_1", 401 | # # "my_tag_2" 402 | # # ] 403 | # 404 | # ## HTTP parameters (all values must be strings) 405 | # [inputs.httpjson.parameters] 406 | # event_type = "cpu_spike" 407 | # threshold = "0.75" 408 | # 409 | # ## HTTP Header parameters (all values must be strings) 410 | # # [inputs.httpjson.headers] 411 | # # X-Auth-Token = "my-xauth-token" 412 | # # apiVersion = "v1" 413 | # 414 | # ## Optional SSL Config 415 | # # ssl_ca = "/etc/telegraf/ca.pem" 416 | # # ssl_cert = "/etc/telegraf/cert.pem" 417 | # # ssl_key = "/etc/telegraf/key.pem" 418 | # ## Use SSL but skip chain & host verification 419 | # # insecure_skip_verify = false 420 | 421 | 422 | # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints 423 | # [[inputs.influxdb]] 424 | # ## Works with InfluxDB debug endpoints out of the box, 425 | # ## but other services can use this format too. 426 | # ## See the influxdb plugin's README for more details. 427 | # 428 | # ## Multiple URLs from which to read InfluxDB-formatted JSON 429 | # urls = [ 430 | # "http://localhost:8086/debug/vars" 431 | # ] 432 | 433 | 434 | # # Read metrics from one or many bare metal servers 435 | # [[inputs.ipmi_sensor]] 436 | # ## specify servers via a url matching: 437 | # ## [username[:password]@][protocol[(address)]] 438 | # ## e.g. 439 | # ## root:passwd@lan(127.0.0.1) 440 | # ## 441 | # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] 442 | 443 | 444 | # # Read JMX metrics through Jolokia 445 | # [[inputs.jolokia]] 446 | # ## This is the context root used to compose the jolokia url 447 | # context = "/jolokia/read" 448 | # 449 | # ## List of servers exposing jolokia read service 450 | # [[inputs.jolokia.servers]] 451 | # name = "stable" 452 | # host = "192.168.103.2" 453 | # port = "8180" 454 | # # username = "myuser" 455 | # # password = "mypassword" 456 | # 457 | # ## List of metrics collected on above servers 458 | # ## Each metric consists in a name, a jmx path and either 459 | # ## a pass or drop slice attribute. 460 | # ## This collect all heap memory usage metrics. 461 | # [[inputs.jolokia.metrics]] 462 | # name = "heap_memory_usage" 463 | # jmx = "/java.lang:type=Memory/HeapMemoryUsage" 464 | # 465 | # ## This collect thread counts metrics. 466 | # [[inputs.jolokia.metrics]] 467 | # name = "thread_count" 468 | # jmx = "/java.lang:type=Threading/TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" 469 | # 470 | # ## This collect number of class loaded/unloaded counts metrics. 471 | # [[inputs.jolokia.metrics]] 472 | # name = "class_count" 473 | # jmx = "/java.lang:type=ClassLoading/LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" 474 | 475 | 476 | # # Read metrics from a LeoFS Server via SNMP 477 | # [[inputs.leofs]] 478 | # ## An array of URI to gather stats about LeoFS. 479 | # ## Specify an ip or hostname with port. ie 127.0.0.1:4020 480 | # servers = ["127.0.0.1:4021"] 481 | 482 | 483 | # # Read metrics from local Lustre service on OST, MDS 484 | # [[inputs.lustre2]] 485 | # ## An array of /proc globs to search for Lustre stats 486 | # ## If not specified, the default will work on Lustre 2.5.x 487 | # ## 488 | # # ost_procfiles = [ 489 | # # "/proc/fs/lustre/obdfilter/*/stats", 490 | # # "/proc/fs/lustre/osd-ldiskfs/*/stats" 491 | # # ] 492 | # # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] 493 | 494 | 495 | # # Gathers metrics from the /3.0/reports MailChimp API 496 | # [[inputs.mailchimp]] 497 | # ## MailChimp API key 498 | # ## get from https://admin.mailchimp.com/account/api/ 499 | # api_key = "" # required 500 | # ## Reports for campaigns sent more than days_old ago will not be collected. 501 | # ## 0 means collect all. 502 | # days_old = 0 503 | # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old 504 | # # campaign_id = "" 505 | 506 | 507 | # # Read metrics from one or many memcached servers 508 | # [[inputs.memcached]] 509 | # ## An array of address to gather stats about. Specify an ip on hostname 510 | # ## with optional port. ie localhost, 10.0.0.1:11211, etc. 511 | # servers = ["localhost:11211"] 512 | # # unix_sockets = ["/var/run/memcached.sock"] 513 | 514 | 515 | # # Telegraf plugin for gathering metrics from N Mesos masters 516 | # [[inputs.mesos]] 517 | # # Timeout, in ms. 518 | # timeout = 100 519 | # # A list of Mesos masters, default value is localhost:5050. 520 | # masters = ["localhost:5050"] 521 | # # Metrics groups to be collected, by default, all enabled. 522 | # master_collections = [ 523 | # "resources", 524 | # "master", 525 | # "system", 526 | # "slaves", 527 | # "frameworks", 528 | # "messages", 529 | # "evqueue", 530 | # "registrar", 531 | # ] 532 | 533 | 534 | # # Read metrics from one or many MongoDB servers 535 | # [[inputs.mongodb]] 536 | # ## An array of URI to gather stats about. Specify an ip or hostname 537 | # ## with optional port add password. ie, 538 | # ## mongodb://user:auth_key@10.10.3.30:27017, 539 | # ## mongodb://10.10.3.33:18832, 540 | # ## 10.0.0.1:10000, etc. 541 | # servers = ["127.0.0.1:27017"] 542 | 543 | 544 | # # Read metrics from one or many mysql servers 545 | # [[inputs.mysql]] 546 | # ## specify servers via a url matching: 547 | # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] 548 | # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name 549 | # ## e.g. 550 | # ## root:passwd@tcp(127.0.0.1:3306)/?tls=false 551 | # ## root@tcp(127.0.0.1:3306)/?tls=false 552 | # ## 553 | # ## If no servers are specified, then localhost is used as the host. 554 | # servers = ["tcp(127.0.0.1:3306)/"] 555 | 556 | 557 | # # Read metrics about network interface usage 558 | # [[inputs.net]] 559 | # ## By default, telegraf gathers stats from any up interface (excluding loopback) 560 | # ## Setting interfaces will tell it to gather these explicit interfaces, 561 | # ## regardless of status. 562 | # ## 563 | # # interfaces = ["eth0"] 564 | 565 | 566 | # # TCP or UDP 'ping' given url and collect response time in seconds 567 | # [[inputs.net_response]] 568 | # ## Protocol, must be "tcp" or "udp" 569 | # protocol = "tcp" 570 | # ## Server address (default localhost) 571 | # address = "github.com:80" 572 | # ## Set timeout (default 1.0 seconds) 573 | # timeout = 1.0 574 | # ## Set read timeout (default 1.0 seconds) 575 | # read_timeout = 1.0 576 | # ## Optional string sent to the server 577 | # # send = "ssh" 578 | # ## Optional expected string in answer 579 | # # expect = "ssh" 580 | 581 | 582 | # # Read TCP metrics such as established, time wait and sockets counts. 583 | # [[inputs.netstat]] 584 | # # no configuration 585 | 586 | 587 | # # Read Nginx's basic status information (ngx_http_stub_status_module) 588 | # [[inputs.nginx]] 589 | # ## An array of Nginx stub_status URI to gather stats. 590 | # urls = ["http://localhost/status"] 591 | 592 | 593 | # # Read NSQ topic and channel statistics. 594 | # [[inputs.nsq]] 595 | # ## An array of NSQD HTTP API endpoints 596 | # endpoints = ["http://localhost:4151"] 597 | 598 | 599 | # # Get standard NTP query metrics, requires ntpq executable. 600 | # [[inputs.ntpq]] 601 | # ## If false, set the -n ntpq flag. Can reduce metric gather time. 602 | # dns_lookup = true 603 | 604 | 605 | # # Read metrics of passenger using passenger-status 606 | # [[inputs.passenger]] 607 | # ## Path of passenger-status. 608 | # ## 609 | # ## Plugin gather metric via parsing XML output of passenger-status 610 | # ## More information about the tool: 611 | # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html 612 | # ## 613 | # ## If no path is specified, then the plugin simply execute passenger-status 614 | # ## hopefully it can be found in your PATH 615 | # command = "passenger-status -v --show=xml" 616 | 617 | 618 | # # Read metrics of phpfpm, via HTTP status page or socket 619 | # [[inputs.phpfpm]] 620 | # ## An array of addresses to gather stats about. Specify an ip or hostname 621 | # ## with optional port and path 622 | # ## 623 | # ## Plugin can be configured in three modes (either can be used): 624 | # ## - http: the URL must start with http:// or https://, ie: 625 | # ## "http://localhost/status" 626 | # ## "http://192.168.130.1/status?full" 627 | # ## 628 | # ## - unixsocket: path to fpm socket, ie: 629 | # ## "/var/run/php5-fpm.sock" 630 | # ## or using a custom fpm status path: 631 | # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" 632 | # ## 633 | # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: 634 | # ## "fcgi://10.0.0.12:9000/status" 635 | # ## "cgi://10.0.10.12:9001/status" 636 | # ## 637 | # ## Example of multiple gathering from local socket and remove host 638 | # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] 639 | # urls = ["http://localhost/status"] 640 | 641 | 642 | # # Ping given url(s) and return statistics 643 | # [[inputs.ping]] 644 | # ## NOTE: this plugin forks the ping command. You may need to set capabilities 645 | # ## via setcap cap_net_raw+p /bin/ping 646 | # 647 | # ## urls to ping 648 | # urls = ["www.google.com"] # required 649 | # ## number of pings to send (ping -c ) 650 | # count = 1 # required 651 | # ## interval, in s, at which to ping. 0 == default (ping -i ) 652 | # ping_interval = 0.0 653 | # ## ping timeout, in s. 0 == no timeout (ping -t ) 654 | # timeout = 0.0 655 | # ## interface to send ping from (ping -I ) 656 | # interface = "" 657 | 658 | 659 | # # Read metrics from one or many postgresql servers 660 | # [[inputs.postgresql]] 661 | # ## specify address via a url matching: 662 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ 663 | # ## ?sslmode=[disable|verify-ca|verify-full] 664 | # ## or a simple string: 665 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production 666 | # ## 667 | # ## All connection parameters are optional. 668 | # ## 669 | # ## Without the dbname parameter, the driver will default to a database 670 | # ## with the same name as the user. This dbname is just for instantiating a 671 | # ## connection with the server and doesn't restrict the databases we are trying 672 | # ## to grab metrics for. 673 | # ## 674 | # address = "host=localhost user=postgres sslmode=disable" 675 | # 676 | # ## A list of databases to pull metrics about. If not specified, metrics for all 677 | # ## databases are gathered. 678 | # # databases = ["app_production", "testing"] 679 | 680 | 681 | # # Read metrics from one or many postgresql servers 682 | # [[inputs.postgresql_extensible]] 683 | # ## specify address via a url matching: 684 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ 685 | # ## ?sslmode=[disable|verify-ca|verify-full] 686 | # ## or a simple string: 687 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production 688 | # # 689 | # ## All connection parameters are optional. # 690 | # ## Without the dbname parameter, the driver will default to a database 691 | # ## with the same name as the user. This dbname is just for instantiating a 692 | # ## connection with the server and doesn't restrict the databases we are trying 693 | # ## to grab metrics for. 694 | # # 695 | # address = "host=localhost user=postgres sslmode=disable" 696 | # ## A list of databases to pull metrics about. If not specified, metrics for all 697 | # ## databases are gathered. 698 | # ## databases = ["app_production", "testing"] 699 | # # 700 | # ## Define the toml config where the sql queries are stored 701 | # ## New queries can be added, if the withdbname is set to true and there is no 702 | # ## databases defined in the 'databases field', the sql query is ended by a 703 | # ## 'is not null' in order to make the query succeed. 704 | # ## Example : 705 | # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become 706 | # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" 707 | # ## because the databases variable was set to ['postgres', 'pgbench' ] and the 708 | # ## withdbname was true. Be careful that if the withdbname is set to false you 709 | # ## don't have to define the where clause (aka with the dbname) the tagvalue 710 | # ## field is used to define custom tags (separated by comas) 711 | # # 712 | # ## Structure : 713 | # ## [[inputs.postgresql_extensible.query]] 714 | # ## sqlquery string 715 | # ## version string 716 | # ## withdbname boolean 717 | # ## tagvalue string (coma separated) 718 | # [[inputs.postgresql_extensible.query]] 719 | # sqlquery="SELECT * FROM pg_stat_database" 720 | # version=901 721 | # withdbname=false 722 | # tagvalue="" 723 | # [[inputs.postgresql_extensible.query]] 724 | # sqlquery="SELECT * FROM pg_stat_bgwriter" 725 | # version=901 726 | # withdbname=false 727 | # tagvalue="" 728 | 729 | 730 | # # Read metrics from one or many PowerDNS servers 731 | # [[inputs.powerdns]] 732 | # ## An array of sockets to gather stats about. 733 | # ## Specify a path to unix socket. 734 | # unix_sockets = ["/var/run/pdns.controlsocket"] 735 | 736 | 737 | # # Monitor process cpu and memory usage 738 | # [[inputs.procstat]] 739 | # ## Must specify one of: pid_file, exe, or pattern 740 | # ## PID file to monitor process 741 | # pid_file = "/var/run/nginx.pid" 742 | # ## executable name (ie, pgrep ) 743 | # # exe = "nginx" 744 | # ## pattern as argument for pgrep (ie, pgrep -f ) 745 | # # pattern = "nginx" 746 | # ## user as argument for pgrep (ie, pgrep -u ) 747 | # # user = "nginx" 748 | # 749 | # ## Field name prefix 750 | # prefix = "" 751 | # ## comment this out if you want raw cpu_time stats 752 | # fielddrop = ["cpu_time_*"] 753 | 754 | 755 | # # Read metrics from one or many prometheus clients 756 | # [[inputs.prometheus]] 757 | # ## An array of urls to scrape metrics from. 758 | # urls = ["http://localhost:9100/metrics"] 759 | # 760 | # ## Use SSL but skip chain & host verification 761 | # # insecure_skip_verify = false 762 | # ## Use bearer token for authorization 763 | # # bearer_token = /path/to/bearer/token 764 | 765 | 766 | # # Reads last_run_summary.yaml file and converts to measurments 767 | # [[inputs.puppetagent]] 768 | # ## Location of puppet last run summary file 769 | # location = "/var/lib/puppet/state/last_run_summary.yaml" 770 | 771 | 772 | # # Read metrics from one or many RabbitMQ servers via the management API 773 | # [[inputs.rabbitmq]] 774 | # url = "http://localhost:15672" # required 775 | # # name = "rmq-server-1" # optional tag 776 | # # username = "guest" 777 | # # password = "guest" 778 | # 779 | # ## A list of nodes to pull metrics about. If not specified, metrics for 780 | # ## all nodes are gathered. 781 | # # nodes = ["rabbit@node1", "rabbit@node2"] 782 | 783 | 784 | # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) 785 | # [[inputs.raindrops]] 786 | # ## An array of raindrops middleware URI to gather stats. 787 | # urls = ["http://localhost:8080/_raindrops"] 788 | 789 | 790 | # # Read metrics from one or many redis servers 791 | # [[inputs.redis]] 792 | # ## specify servers via a url matching: 793 | # ## [protocol://][:password]@address[:port] 794 | # ## e.g. 795 | # ## tcp://localhost:6379 796 | # ## tcp://:password@192.168.99.100 797 | # ## 798 | # ## If no servers are specified, then localhost is used as the host. 799 | # ## If no port is specified, 6379 is used 800 | # servers = ["tcp://localhost:6379"] 801 | 802 | 803 | # # Read metrics from one or many RethinkDB servers 804 | # [[inputs.rethinkdb]] 805 | # ## An array of URI to gather stats about. Specify an ip or hostname 806 | # ## with optional port add password. ie, 807 | # ## rethinkdb://user:auth_key@10.10.3.30:28105, 808 | # ## rethinkdb://10.10.3.33:18832, 809 | # ## 10.0.0.1:10000, etc. 810 | # servers = ["127.0.0.1:28015"] 811 | 812 | 813 | # # Read metrics one or many Riak servers 814 | # [[inputs.riak]] 815 | # # Specify a list of one or more riak http servers 816 | # servers = ["http://localhost:8098"] 817 | 818 | 819 | # # Reads oids value from one or many snmp agents 820 | # [[inputs.snmp]] 821 | # ## Use 'oids.txt' file to translate oids to names 822 | # ## To generate 'oids.txt' you need to run: 823 | # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt 824 | # ## Or if you have an other MIB folder with custom MIBs 825 | # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt 826 | # snmptranslate_file = "/tmp/oids.txt" 827 | # [[inputs.snmp.host]] 828 | # address = "192.168.2.2:161" 829 | # # SNMP community 830 | # community = "public" # default public 831 | # # SNMP version (1, 2 or 3) 832 | # # Version 3 not supported yet 833 | # version = 2 # default 2 834 | # # SNMP response timeout 835 | # timeout = 2.0 # default 2.0 836 | # # SNMP request retries 837 | # retries = 2 # default 2 838 | # # Which get/bulk do you want to collect for this host 839 | # collect = ["mybulk", "sysservices", "sysdescr"] 840 | # # Simple list of OIDs to get, in addition to "collect" 841 | # get_oids = [] 842 | # 843 | # [[inputs.snmp.host]] 844 | # address = "192.168.2.3:161" 845 | # community = "public" 846 | # version = 2 847 | # timeout = 2.0 848 | # retries = 2 849 | # collect = ["mybulk"] 850 | # get_oids = [ 851 | # "ifNumber", 852 | # ".1.3.6.1.2.1.1.3.0", 853 | # ] 854 | # 855 | # [[inputs.snmp.get]] 856 | # name = "ifnumber" 857 | # oid = "ifNumber" 858 | # 859 | # [[inputs.snmp.get]] 860 | # name = "interface_speed" 861 | # oid = "ifSpeed" 862 | # instance = "0" 863 | # 864 | # [[inputs.snmp.get]] 865 | # name = "sysuptime" 866 | # oid = ".1.3.6.1.2.1.1.3.0" 867 | # unit = "second" 868 | # 869 | # [[inputs.snmp.bulk]] 870 | # name = "mybulk" 871 | # max_repetition = 127 872 | # oid = ".1.3.6.1.2.1.1" 873 | # 874 | # [[inputs.snmp.bulk]] 875 | # name = "ifoutoctets" 876 | # max_repetition = 127 877 | # oid = "ifOutOctets" 878 | # 879 | # [[inputs.snmp.host]] 880 | # address = "192.168.2.13:161" 881 | # #address = "127.0.0.1:161" 882 | # community = "public" 883 | # version = 2 884 | # timeout = 2.0 885 | # retries = 2 886 | # #collect = ["mybulk", "sysservices", "sysdescr", "systype"] 887 | # collect = ["sysuptime" ] 888 | # [[inputs.snmp.host.table]] 889 | # name = "iftable3" 890 | # include_instances = ["enp5s0", "eth1"] 891 | # 892 | # # SNMP TABLEs 893 | # # table without mapping neither subtables 894 | # [[inputs.snmp.table]] 895 | # name = "iftable1" 896 | # oid = ".1.3.6.1.2.1.31.1.1.1" 897 | # 898 | # # table without mapping but with subtables 899 | # [[inputs.snmp.table]] 900 | # name = "iftable2" 901 | # oid = ".1.3.6.1.2.1.31.1.1.1" 902 | # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] 903 | # 904 | # # table with mapping but without subtables 905 | # [[inputs.snmp.table]] 906 | # name = "iftable3" 907 | # oid = ".1.3.6.1.2.1.31.1.1.1" 908 | # # if empty. get all instances 909 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" 910 | # # if empty, get all subtables 911 | # 912 | # # table with both mapping and subtables 913 | # [[inputs.snmp.table]] 914 | # name = "iftable4" 915 | # oid = ".1.3.6.1.2.1.31.1.1.1" 916 | # # if empty get all instances 917 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" 918 | # # if empty get all subtables 919 | # # sub_tables could be not "real subtables" 920 | # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] 921 | 922 | 923 | # # Read metrics from Microsoft SQL Server 924 | # [[inputs.sqlserver]] 925 | # ## Specify instances to monitor with a list of connection strings. 926 | # ## All connection parameters are optional. 927 | # ## By default, the host is localhost, listening on default port, TCP 1433. 928 | # ## for Windows, the user is the currently running AD user (SSO). 929 | # ## See https://github.com/denisenkom/go-mssqldb for detailed connection 930 | # ## parameters. 931 | # # servers = [ 932 | # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", 933 | # # ] 934 | 935 | 936 | # # Inserts sine and cosine waves for demonstration purposes 937 | # [[inputs.trig]] 938 | # ## Set the amplitude 939 | # amplitude = 10.0 940 | 941 | 942 | # # Read Twemproxy stats data 943 | # [[inputs.twemproxy]] 944 | # ## Twemproxy stats address and port (no scheme) 945 | # addr = "localhost:22222" 946 | # ## Monitor pool name 947 | # pools = ["redis_pool", "mc_pool"] 948 | 949 | 950 | # # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats 951 | # [[inputs.zfs]] 952 | # ## ZFS kstat path 953 | # ## If not specified, then default is: 954 | # kstatPath = "/proc/spl/kstat/zfs" 955 | # 956 | # ## By default, telegraf gather all zfs stats 957 | # ## If not specified, then default is: 958 | # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] 959 | # 960 | # ## By default, don't gather zpool stats 961 | # poolMetrics = false 962 | 963 | 964 | # # Reads 'mntr' stats from one or many zookeeper servers 965 | # [[inputs.zookeeper]] 966 | # ## An array of address to gather stats about. Specify an ip or hostname 967 | # ## with port. ie localhost:2181, 10.0.0.1:2181, etc. 968 | # 969 | # ## If no servers are specified, then localhost is used as the host. 970 | # ## If no port is specified, 2181 is used 971 | # servers = [":2181"] 972 | 973 | 974 | 975 | ############################################################################### 976 | # SERVICE INPUT PLUGINS # 977 | ############################################################################### 978 | 979 | # # A Github Webhook Event collector 980 | # [[inputs.github_webhooks]] 981 | # ## Address and port to host Webhook listener on 982 | # service_address = ":1618" 983 | 984 | 985 | # # Read metrics from Kafka topic(s) 986 | # [[inputs.kafka_consumer]] 987 | # ## topic(s) to consume 988 | # topics = ["telegraf"] 989 | # ## an array of Zookeeper connection strings 990 | # zookeeper_peers = ["localhost:2181"] 991 | # ## Zookeeper Chroot 992 | # zookeeper_chroot = "/" 993 | # ## the name of the consumer group 994 | # consumer_group = "telegraf_metrics_consumers" 995 | # ## Offset (must be either "oldest" or "newest") 996 | # offset = "oldest" 997 | # 998 | # ## Data format to consume. 999 | # ## Each data format has it's own unique set of configuration options, read 1000 | # ## more about them here: 1001 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1002 | # data_format = "influx" 1003 | 1004 | 1005 | # # Read metrics from MQTT topic(s) 1006 | # [[inputs.mqtt_consumer]] 1007 | # servers = ["localhost:1883"] 1008 | # ## MQTT QoS, must be 0, 1, or 2 1009 | # qos = 0 1010 | # 1011 | # ## Topics to subscribe to 1012 | # topics = [ 1013 | # "telegraf/host01/cpu", 1014 | # "telegraf/+/mem", 1015 | # "sensors/#", 1016 | # ] 1017 | # 1018 | # # if true, messages that can't be delivered while the subscriber is offline 1019 | # # will be delivered when it comes back (such as on service restart). 1020 | # # NOTE: if true, client_id MUST be set 1021 | # persistent_session = false 1022 | # # If empty, a random client ID will be generated. 1023 | # client_id = "" 1024 | # 1025 | # ## username and password to connect MQTT server. 1026 | # # username = "telegraf" 1027 | # # password = "metricsmetricsmetricsmetrics" 1028 | # 1029 | # ## Optional SSL Config 1030 | # # ssl_ca = "/etc/telegraf/ca.pem" 1031 | # # ssl_cert = "/etc/telegraf/cert.pem" 1032 | # # ssl_key = "/etc/telegraf/key.pem" 1033 | # ## Use SSL but skip chain & host verification 1034 | # # insecure_skip_verify = false 1035 | # 1036 | # ## Data format to consume. 1037 | # ## Each data format has it's own unique set of configuration options, read 1038 | # ## more about them here: 1039 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1040 | # data_format = "influx" 1041 | 1042 | 1043 | # # Read metrics from NATS subject(s) 1044 | # [[inputs.nats_consumer]] 1045 | # ## urls of NATS servers 1046 | # servers = ["nats://localhost:4222"] 1047 | # ## Use Transport Layer Security 1048 | # secure = false 1049 | # ## subject(s) to consume 1050 | # subjects = ["telegraf"] 1051 | # ## name a queue group 1052 | # queue_group = "telegraf_consumers" 1053 | # 1054 | # ## Data format to consume. 1055 | # ## Each data format has it's own unique set of configuration options, read 1056 | # ## more about them here: 1057 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1058 | # data_format = "influx" 1059 | 1060 | 1061 | # # Statsd Server 1062 | # [[inputs.statsd]] 1063 | # ## Address and port to host UDP listener on 1064 | # service_address = ":8125" 1065 | # ## Delete gauges every interval (default=false) 1066 | # delete_gauges = false 1067 | # ## Delete counters every interval (default=false) 1068 | # delete_counters = false 1069 | # ## Delete sets every interval (default=false) 1070 | # delete_sets = false 1071 | # ## Delete timings & histograms every interval (default=true) 1072 | # delete_timings = true 1073 | # ## Percentiles to calculate for timing & histogram stats 1074 | # percentiles = [90] 1075 | # 1076 | # ## separator to use between elements of a statsd metric 1077 | # metric_separator = "_" 1078 | # 1079 | # ## Parses tags in the datadog statsd format 1080 | # ## http://docs.datadoghq.com/guides/dogstatsd/ 1081 | # parse_data_dog_tags = false 1082 | # 1083 | # ## Statsd data translation templates, more info can be read here: 1084 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite 1085 | # # templates = [ 1086 | # # "cpu.* measurement*" 1087 | # # ] 1088 | # 1089 | # ## Number of UDP messages allowed to queue up, once filled, 1090 | # ## the statsd server will start dropping packets 1091 | # allowed_pending_messages = 10000 1092 | # 1093 | # ## Number of timing/histogram values to track per-measurement in the 1094 | # ## calculation of percentiles. Raising this limit increases the accuracy 1095 | # ## of percentiles but also increases the memory usage and cpu time. 1096 | # percentile_limit = 1000 1097 | 1098 | 1099 | # # Generic TCP listener 1100 | # [[inputs.tcp_listener]] 1101 | # ## Address and port to host TCP listener on 1102 | # service_address = ":8094" 1103 | # 1104 | # ## Number of TCP messages allowed to queue up. Once filled, the 1105 | # ## TCP listener will start dropping packets. 1106 | # allowed_pending_messages = 10000 1107 | # 1108 | # ## Maximum number of concurrent TCP connections to allow 1109 | # max_tcp_connections = 250 1110 | # 1111 | # ## Data format to consume. 1112 | # ## Each data format has it's own unique set of configuration options, read 1113 | # ## more about them here: 1114 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1115 | # data_format = "influx" 1116 | 1117 | 1118 | # # Generic UDP listener 1119 | # [[inputs.udp_listener]] 1120 | # ## Address and port to host UDP listener on 1121 | # service_address = ":8092" 1122 | # 1123 | # ## Number of UDP messages allowed to queue up. Once filled, the 1124 | # ## UDP listener will start dropping packets. 1125 | # allowed_pending_messages = 10000 1126 | # 1127 | # ## Data format to consume. 1128 | # ## Each data format has it's own unique set of configuration options, read 1129 | # ## more about them here: 1130 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1131 | # data_format = "influx" 1132 | 1133 | -------------------------------------------------------------------------------- /fifo.mk: -------------------------------------------------------------------------------- 1 | REBAR = $(shell pwd)/rebar3 2 | REBAR_VSN = $(shell erl -noshell -eval '{ok, F} = file:consult("rebar.config"), [{release, {_, Vsn}, _}] = [O || {relx, [O | _]} <- F], io:format("~s", [Vsn]), init:stop().') 3 | VARS_VSN = $(shell grep 'bugsnag_app_version' rel/vars.config | sed -e 's/.*,//' -e 's/[^0-9.p]//g' -e 's/[.]$$//') 4 | APP_VSN = $(shell grep vsn apps/$(APP)/src/$(APP).app.src | sed 's/[^0-9.p]//g') 5 | 6 | include config.mk 7 | 8 | compile: $(REBAR) .git/hooks/pre-commit 9 | $(REBAR) compile 10 | 11 | .git/hooks/pre-commit: hooks/pre-commit 12 | [ -f .git/hooks ] && cp hooks/pre-commit .git/hooks || true 13 | 14 | pre-commit: test-scripts test-vsn lint xref dialyzer test 15 | 16 | dialyzer: $(REBAR) 17 | $(REBAR) dialyzer 18 | 19 | xref: $(REBAR) 20 | $(REBAR) xref 21 | 22 | test-scripts: 23 | for i in rel/files/*; do (head -1 $$i | grep -v sh > /dev/null) || bash -n $$i || exit 1; done; 24 | 25 | test: $(REBAR) 26 | $(REBAR) eunit 27 | 28 | lint: $(REBAR) 29 | $(REBAR) as lint lint 30 | 31 | $(REBAR): 32 | cp `which rebar3` $(REBAR) 33 | 34 | upgrade: $(REBAR) 35 | $(REBAR) upgrade 36 | $(MAKE) tree 37 | 38 | update: $(REBAR) 39 | $(REBAR) update 40 | 41 | rebar.lock: rebar.config $(REBAR) 42 | $(REBAR) compile 43 | 44 | tree: $(REBAR) rebar.lock 45 | $(REBAR) tree | grep -v '=' | sed 's/ (.*//' > tree 46 | 47 | tree-diff: tree 48 | git diff test -- tree 49 | 50 | update-fifo.mk: 51 | cp _build/default/lib/fifo_utils/priv/fifo.mk . 52 | 53 | ### 54 | ### Packaging 55 | ### 56 | 57 | uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') 58 | uname_V6 := $(shell sh -c 'uname -v 2>/dev/null | cut -c-6 || echo not') 59 | 60 | ifeq ($(uname_S),Darwin) 61 | PLATFORM = darwin 62 | REBARPROFILE = darwin 63 | export REBARPROFILE 64 | endif 65 | ifeq ($(uname_S),FreeBSD) 66 | PLATFORM = freebsd 67 | REBARPROFILE = freebsd 68 | export REBARPROFILE 69 | endif 70 | ifeq ($(uname_V6),joyent) 71 | PLATFORM = smartos 72 | REBARPROFILE = smartos 73 | export REBARPROFILE 74 | endif 75 | 76 | dist: ${PLATFORM} ; 77 | 78 | generic/rel: version_header 79 | $(REBAR) as ${REBARPROFILE} compile 80 | $(REBAR) as ${REBARPROFILE} release 81 | 82 | freebsd: ${PLATFORM}/rel 83 | $(MAKE) -C rel/pkgng package 84 | 85 | smartos: ${PLATFORM}/rel 86 | $(MAKE) -C rel/pkg package 87 | 88 | darwin: ${PLATFORM}/rel 89 | 90 | freebsd/rel: generic/rel 91 | 92 | smartos/rel: generic/rel 93 | 94 | darwin/rel: generic/rel 95 | 96 | dist-help: 97 | @echo "FiFo dist tool" 98 | @echo "You are running this on: ${PLATFORM}" 99 | @echo 100 | @echo "Currently supported platforms are: FreeBSD, SmartOS, Darwin/OSX" 101 | @echo 102 | @echo "SmartOS:" 103 | @echo " rebar profile: smartos $(shell if grep profiles -A12 rebar.config | grep smartos > /dev/null; then echo OK; else echo MISSING; fi)" 104 | @echo " packaging makefile: rel/pkg/Makefile $(shell if [ -f rel/pkg/Makefile ]; then echo OK; else echo MISSING; fi)" 105 | @echo "FreeBSD:" 106 | @echo " rebar profile: freebsd $(shell if grep profiles -A12 rebar.config | grep freebsd > /dev/null; then echo OK; else echo MISSING; fi)" 107 | @echo " packaging makefile: rel/pkgng/Makefile $(shell if [ -f rel/pkgng/Makefile ]; then echo OK; else echo MISSING; fi)" 108 | @echo "Darwin:" 109 | @echo " rebar profile: darwin $(shell if grep profiles -A12 rebar.config | grep darwin > /dev/null; then echo OK; else echo MISSING; fi)" 110 | @echo " packaging makefile: - no packaing -" 111 | 112 | ### 113 | ### Docs 114 | ### 115 | docs: 116 | $(REBAR) edoc 117 | 118 | ### 119 | ### Version 120 | ### 121 | 122 | build-vsn: 123 | @echo "$(REBAR_VSN)" 124 | vsn: 125 | @echo "## Config:" 126 | @echo "$(VERSION)" 127 | @echo "## apps/$(APP)/src/$(APP).app.src" 128 | @echo "$(APP_VSN)" 129 | @echo "## rebar.config" 130 | @echo "$(REBAR_VSN)" 131 | @echo "## rel/vars.config" 132 | @echo "$(VARS_VSN)" 133 | 134 | test-vsn: 135 | @echo "Testing against package version: $(VERSION)" 136 | @[ "$(VERSION)" = "$(APP_VSN)" ] && echo " - App version ok: $(APP_VSN)" || (echo "App version out of date" && false) 137 | @[ "$(VERSION)" = "$(REBAR_VSN)" ] && echo " - Rebar version ok: $(REBAR_VSN)" || (echo "Package version out of date" && false) 138 | @[ "$(VERSION)" = "$(VARS_VSN)" ] && echo " - Vars version ok: $(VARS_VSN)" || (echo "Vars version out of date" && false) 139 | -------------------------------------------------------------------------------- /hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to verify what is about to be committed. 4 | # Called by "git commit" with no arguments. The hook should 5 | # exit with non-zero status after issuing an appropriate message if 6 | # it wants to stop the commit. 7 | # 8 | # To enable this hook, rename this file to "pre-commit". 9 | 10 | if [[ "x${SKIP}x" == "x1x" ]] 11 | then 12 | exit 0 13 | fi 14 | 15 | make pre-commit 16 | RETVAL=$? 17 | if [ $RETVAL -ne 0 ] 18 | then 19 | echo "checks failed." 20 | exit 1 21 | fi 22 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | 3 | {erl_opts, [ 4 | {i, "./_build/default/plugins/gpb/include"}, 5 | debug_info, warnings_as_errors, {parse_transform, lager_transform}]}. 6 | 7 | %%------------------------------------------------------------------- 8 | %% Dependencies 9 | %%------------------------------------------------------------------- 10 | 11 | {deps, 12 | [ 13 | recon, 14 | trie, 15 | fifo_utils, 16 | {snappiest, "~>1.2.0"}, 17 | {fifo_lager, "~>0.1.4"}, 18 | {ranch, "~>1.3.0"}, 19 | {cowboy, "~>1.1.0"}, 20 | {jsone, "~>1.2.3"}, 21 | {hackney, "~>1.6.0"}, 22 | {mmath, "~>0.2.9"}, 23 | {dqe_idx, "~>0.4.0"}, 24 | {dp_decoder, "~>0.2.6"}, 25 | {dqe_idx_pg, "~>0.5.0"}, 26 | {ddb_connection, "~>0.4.2"} 27 | ]}. 28 | 29 | %%------------------------------------------------------------------- 30 | %% Plugins 31 | %%------------------------------------------------------------------- 32 | 33 | {project_plugins, [{rebar3_cuttlefish, "~>0.16.0"}]}. 34 | {plugins, [rebar3_gpb_plugin]}. 35 | 36 | %%------------------------------------------------------------------- 37 | %% Cuttlefish 38 | %%------------------------------------------------------------------- 39 | 40 | {cuttlefish, 41 | [{file_name, "dpx.conf"}, 42 | {schema_discovery, false}]}. 43 | 44 | %%------------------------------------------------------------------- 45 | %% Profiles 46 | %%------------------------------------------------------------------- 47 | 48 | {profiles, 49 | [ 50 | {darwin, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 51 | {smartos, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 52 | {freebsd, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 53 | {lint, [{plugins, [rebar3_lint]}]}, 54 | {deb, [{relx, [{dev_mode, false}, 55 | {overlay_vars, "rel/vars/deb.config"}, 56 | {include_erts, true}]}]} 57 | ]}. 58 | 59 | %%------------------------------------------------------------------- 60 | %% RELx 61 | %%------------------------------------------------------------------- 62 | 63 | {relx, 64 | [{release, {dpx, "0.3.3"}, 65 | [ddb_proxy, 66 | {recon, load}, 67 | sasl]}, 68 | 69 | {overlay_vars, "rel/vars.config"}, 70 | {dev_mode, true}, 71 | {include_erts, true}, 72 | {generate_start_script, false}, 73 | 74 | {overlay, 75 | [{mkdir, "etc"}, 76 | {mkdir, "share"}, 77 | {copy, "rel/files/erl", "erts-\{\{erts_vsn\}\}/bin/erl"}, 78 | {copy, "rel/files/nodetool", "erts-\{\{erts_vsn\}\}/bin/nodetool"}, 79 | {template, "share/dpx.xml", "share/dpx.xml"}, 80 | {template, "schema/dpx.schema", 81 | "share/schema/00-dpx.schema"}, 82 | {template, "schema/erlang_vm.schema", 83 | "share/schema/01-vm.schema"}, 84 | {template, "\{\{build_dir\}\}/lib/dqe_idx_pg/priv/dqe_idx_pg.schema", 85 | "share/schema/02-dqe_idx_pg.schema"}, 86 | {template, "\{\{build_dir\}\}/lib/dqe_idx/priv/dqe_idx.schema", 87 | "share/schema/03-dqe_idx.schema"}, 88 | {template, "\{\{build_dir\}\}/lib/ddb_connection/priv/ddb_connection.schema", 89 | "share/schema/04-ddb_connection.schema"}, 90 | {template, "schema/lager.schema", 91 | "share/schema/06-lager.schema"} 92 | ]} 93 | ]}. 94 | 95 | {xref_checks, [undefined_function_calls, 96 | undefined_functions, 97 | deprecated_function_calls, 98 | deprecated_functions]}. 99 | 100 | {dialyzer, [{warnings, [underspecs]}]}. 101 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.1.0", 2 | [{<<"certifi">>,{pkg,<<"certifi">>,<<"1.0.0">>},1}, 3 | {<<"cowboy">>,{pkg,<<"cowboy">>,<<"1.1.2">>},0}, 4 | {<<"cowlib">>,{pkg,<<"cowlib">>,<<"1.0.2">>},1}, 5 | {<<"ddb_client">>,{pkg,<<"ddb_client">>,<<"0.5.7">>},1}, 6 | {<<"ddb_connection">>,{pkg,<<"ddb_connection">>,<<"0.4.3">>},0}, 7 | {<<"dp_decoder">>,{pkg,<<"dp_decoder">>,<<"0.2.16">>},0}, 8 | {<<"dproto">>,{pkg,<<"dproto">>,<<"0.5.3">>},2}, 9 | {<<"dqe_idx">>,{pkg,<<"dqe_idx">>,<<"0.4.3">>},0}, 10 | {<<"dqe_idx_pg">>,{pkg,<<"dqe_idx_pg">>,<<"0.5.7">>},0}, 11 | {<<"dynamic_compile">>,{pkg,<<"dynamic_compile">>,<<"1.0.0">>},2}, 12 | {<<"epgsql">>,{pkg,<<"epgsql">>,<<"3.3.0">>},2}, 13 | {<<"fifo_lager">>,{pkg,<<"fifo_lager">>,<<"0.1.6">>},0}, 14 | {<<"fifo_utils">>,{pkg,<<"fifo_utils">>,<<"0.1.49">>},0}, 15 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},2}, 16 | {<<"hackney">>,{pkg,<<"hackney">>,<<"1.6.6">>},0}, 17 | {<<"ibrowse">>,{pkg,<<"ibrowse">>,<<"4.4.0">>},2}, 18 | {<<"idna">>,{pkg,<<"idna">>,<<"4.0.0">>},1}, 19 | {<<"jsone">>,{pkg,<<"jsone">>,<<"1.2.6">>},0}, 20 | {<<"jsx">>,{pkg,<<"jsx">>,<<"2.8.2">>},2}, 21 | {<<"jsxd">>,{pkg,<<"jsxd">>,<<"0.2.4">>},3}, 22 | {<<"lager">>,{pkg,<<"lager">>,<<"3.2.4">>},1}, 23 | {<<"lager_graylog">>,{pkg,<<"lager_graylog">>,<<"0.1.3">>},1}, 24 | {<<"lager_logstash_backend">>, 25 | {pkg,<<"lager_logstash_backend">>,<<"0.1.3">>}, 26 | 1}, 27 | {<<"metrics">>,{pkg,<<"metrics">>,<<"1.0.1">>},1}, 28 | {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.0.2">>},1}, 29 | {<<"mmath">>,{pkg,<<"mmath">>,<<"0.2.19">>},0}, 30 | {<<"otters">>,{pkg,<<"otters">>,<<"0.2.10">>},1}, 31 | {<<"pgapp">>,{pkg,<<"pgapp">>,<<"0.0.2">>},1}, 32 | {<<"poolboy">>,{pkg,<<"poolboy">>,<<"1.5.1">>},1}, 33 | {<<"quickrand">>,{pkg,<<"quickrand">>,<<"1.7.2">>},1}, 34 | {<<"ranch">>,{pkg,<<"ranch">>,<<"1.3.2">>},0}, 35 | {<<"recon">>,{pkg,<<"recon">>,<<"2.3.2">>},0}, 36 | {<<"snappiest">>,{pkg,<<"snappiest">>,<<"1.2.0">>},0}, 37 | {<<"sqlmig">>,{pkg,<<"sqlmig">>,<<"0.1.5">>},1}, 38 | {<<"ssl_verify_fun">>,{pkg,<<"ssl_verify_fun">>,<<"1.1.1">>},1}, 39 | {<<"trie">>,{pkg,<<"trie">>,<<"1.7.2">>},0}, 40 | {<<"uuid">>,{pkg,<<"uuid_erl">>,<<"1.7.2">>},1}]}. 41 | [ 42 | {pkg_hash,[ 43 | {<<"certifi">>, <<"1C787A85B1855BA354F0B8920392C19AA1D06B0EE1362F9141279620A5BE2039">>}, 44 | {<<"cowboy">>, <<"61AC29EA970389A88ECA5A65601460162D370A70018AFE6F949A29DCA91F3BB0">>}, 45 | {<<"cowlib">>, <<"9D769A1D062C9C3AC753096F868CA121E2730B9A377DE23DEC0F7E08B1DF84EE">>}, 46 | {<<"ddb_client">>, <<"1BC5836D3EB6786778F6DD202B64D4649DAA44C8882FA2001BB22C1CA17C72E1">>}, 47 | {<<"ddb_connection">>, <<"B74F010840917CFAF6088391DCC9B91A1036213E4DDF2B58B4F7FBC00AEA01C2">>}, 48 | {<<"dp_decoder">>, <<"F08B30D5E53A8F47DD57AB1B48D81519685C9269B8FD59755EBA7E94EBA4F297">>}, 49 | {<<"dproto">>, <<"8B911526C22090681EE82C3D647A4A0BADAB0F18A3593E8C7C35609703AE54F9">>}, 50 | {<<"dqe_idx">>, <<"275F45869B1791BE6679D6B4A250C72CF43636CB4B3F2213E5D4AA6F8E0F1CF2">>}, 51 | {<<"dqe_idx_pg">>, <<"E6B24FEF5DD8F323FA14F67DB76DE15C0636B579E0CE6C5A3968CAD5EC3C0712">>}, 52 | {<<"dynamic_compile">>, <<"8171B2CB4953EA3ED2EF63F5B26ABF677ACD0CA32210C2A08A7A8406A743F76B">>}, 53 | {<<"epgsql">>, <<"974A578340E52012CBAB820CE756E7ED1DF1BAF0110C59A6753D8337A2CF9454">>}, 54 | {<<"fifo_lager">>, <<"5D350A2C85F27F776B28E022A731B1B2F4A8C0D1703416C2553C4046AA09E837">>}, 55 | {<<"fifo_utils">>, <<"09EC2D0370B133E49084CF14A99E443D0DDB3F05BA8F9E95AC3A780D60CF2BD1">>}, 56 | {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>}, 57 | {<<"hackney">>, <<"5564B4695D48FD87859E9DF77A7FA4B4D284D24519F0CD7CC898F09E8FBDC8A3">>}, 58 | {<<"ibrowse">>, <<"2D923325EFE0D2CB09B9C6A047B2835A5EDA69D8A47ED6FF8BC03628B764E991">>}, 59 | {<<"idna">>, <<"10AAA9F79D0B12CF0DEF53038547855B91144F1BFCC0EC73494F38BB7B9C4961">>}, 60 | {<<"jsone">>, <<"3EED1BC3F34D5727A011AB84A20230F4BDD8BB2B9C07C3AD0DCF412410231A74">>}, 61 | {<<"jsx">>, <<"7ACC7D785B5ABE8A6E9ADBDE926A24E481F29956DD8B4DF49E3E4E7BCC92A018">>}, 62 | {<<"jsxd">>, <<"C14114AFCA463F2D03D3FB6CC81FD51CDA8CA86A47E5AC3ABDF0CA572A73A413">>}, 63 | {<<"lager">>, <<"A6DEB74DAE7927F46BD13255268308EF03EB206EC784A94EAF7C1C0F3B811615">>}, 64 | {<<"lager_graylog">>, <<"B81F6CC71198F0CF113E74395A12F9C87E22F7B57F5E484A4802413E2A5B8F2C">>}, 65 | {<<"lager_logstash_backend">>, <<"86E7FBE08B34DAF9341E9FC397EADFCD7C1ABA0A0EA901FA7A4C454A3C4C335C">>}, 66 | {<<"metrics">>, <<"25F094DEA2CDA98213CECC3AEFF09E940299D950904393B2A29D191C346A8486">>}, 67 | {<<"mimerl">>, <<"993F9B0E084083405ED8252B99460C4F0563E41729AB42D9074FD5E52439BE88">>}, 68 | {<<"mmath">>, <<"8549F4A2A9C5239323A252974DAE8B40B61BE88826A0EF7E8077900DE943F9E6">>}, 69 | {<<"otters">>, <<"31A7B47D50E14B15CAE380D060F666A54A2D79688BCC29D4CCB70E2FE05B0EA8">>}, 70 | {<<"pgapp">>, <<"3E104BB777C8455D8B26D1538B67ABE0188EE97B1DF973FD936C2204CB316196">>}, 71 | {<<"poolboy">>, <<"6B46163901CFD0A1B43D692657ED9D7E599853B3B21B95AE5AE0A777CF9B6CA8">>}, 72 | {<<"quickrand">>, <<"E856F3C69FEC00D1ACCA8E56CB452B650E838D3A9720811410F439121EFAFE59">>}, 73 | {<<"ranch">>, <<"E4965A144DC9FBE70E5C077C65E73C57165416A901BD02EA899CFD95AA890986">>}, 74 | {<<"recon">>, <<"4444C879BE323B1B133EEC5241CB84BD3821EA194C740D75617E106BE4744318">>}, 75 | {<<"snappiest">>, <<"25706FEBB5ECAEA900D879A89C6D967C8D1BF700F8546BEBD0DEA514A8CCBFB7">>}, 76 | {<<"sqlmig">>, <<"8208D222A9335C1B1171F4FD1CE4150CF28B1FDF37CA9A66715AC434ED9B9AF4">>}, 77 | {<<"ssl_verify_fun">>, <<"28A4D65B7F59893BC2C7DE786DEC1E1555BD742D336043FE644AE956C3497FBE">>}, 78 | {<<"trie">>, <<"CF3779ACF42DE76F8A9B74517E46449D3D31A827ED7A7C287B3A534945966237">>}, 79 | {<<"uuid">>, <<"D596C8DD01A4AE48B9D8D51832CCC8F8302BF67ACD01336AEC3FCFAE6B9D2BC2">>}]} 80 | ]. 81 | -------------------------------------------------------------------------------- /rebar3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/rebar3 -------------------------------------------------------------------------------- /rel/deb/.gitignore: -------------------------------------------------------------------------------- 1 | stage 2 | -------------------------------------------------------------------------------- /rel/deb/Makefile: -------------------------------------------------------------------------------- 1 | VERSION = $(BUILD_NUMBER) # THis does not seem to work at the moment 2 | VERSION ?= 0 3 | COMPONENT = dpx 4 | 5 | .PHONY: prepare package 6 | 7 | prepare: 8 | mkdir -p stage/data/dalmatinerpx/example 9 | mkdir -p stage/etc/systemd/system 10 | mkdir -p stage/usr/bin 11 | mkdir -p stage/usr/lib/dalmatinerpx/lib 12 | mkdir -p stage/usr/share/dalmatinerpx 13 | mkdir -p stage/var/lib/dalmatinerpx 14 | mkdir -p stage/var/log/dalmatinerpx 15 | mkdir -p stage/DEBIAN 16 | cp -r ../../_build/deb/rel/dalmatinerpx/bin stage/usr/lib/dalmatinerpx/ 17 | cp -r ../../_build/deb/rel/dalmatinerpx/etc stage/data/dalmatinerpx/etc/ 18 | cp -r ../../_build/deb/rel/dalmatinerpx/erts-* stage/usr/lib/dalmatinerpx/ 19 | tar -cC ../../_build/deb/rel/dalmatinerpx/lib --exclude c_src --exclude src . | tar -xC stage/usr/lib/dalmatinerpx/lib 20 | cp -r ../../_build/deb/rel/dalmatinerpx/releases stage/usr/lib/dalmatinerpx/ 21 | cp -r ../../_build/deb/rel/dalmatinerpx/share stage/usr/lib/dalmatinerpx/ 22 | cp -r ../../example/*conf stage/data/dalmatinerpx/example/ 23 | cp -r systemd/dalmatinerpx.service stage/etc/systemd/system/dalmatinerpx.service 24 | cp control stage/DEBIAN/ 25 | cp preinst stage/DEBIAN/ 26 | cp postinst stage/DEBIAN/ 27 | chmod 555 stage/DEBIAN/preinst 28 | chmod 555 stage/DEBIAN/postinst 29 | 30 | package: 31 | dpkg-deb --build stage 32 | 33 | clean: 34 | rm -rf ./stage 35 | rm -f *.deb 36 | -------------------------------------------------------------------------------- /rel/deb/control: -------------------------------------------------------------------------------- 1 | Package: dalmatinerpx 2 | Version: 0.3.2 3 | License: MIT 4 | Section: base 5 | Vendor: Project-Fifo 6 | Maintainer: Project-Fifo 7 | Priority: optional 8 | Architecture: amd64 9 | Homepage: http://www.dalmatiner.io 10 | Description: Dalmatiner PX 11 | Distributed TSBD. Woof. 12 | -------------------------------------------------------------------------------- /rel/deb/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: ddb-proxy 3 | 4 | Files: * 5 | Copyright: 2014 Heinz N. Gies 6 | 7 | License: MIT 8 | 9 | Files: debian/* 10 | Copyright: 2016 Heinz N. Gies 11 | License: MIT 12 | 13 | License: MIT 14 | Permission is hereby granted, free of charge, to any person obtaining a 15 | copy of this software and associated documentation files (the "Software"), 16 | to deal in the Software without restriction, including without limitation 17 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 | and/or sell copies of the Software, and to permit persons to whom the 19 | Software is furnished to do so, subject to the following conditions: 20 | . 21 | The above copyright notice and this permission notice shall be included 22 | in all copies or substantial portions of the Software. 23 | . 24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 25 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 27 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 28 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 29 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 30 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 31 | 32 | # Please also look if there are files or directories which have a 33 | # different copyright/license attached and list them here. 34 | # Please avoid to pick license terms that are more restrictive than the 35 | # packaged work, as it may make Debian's contributions unacceptable upstream. 36 | -------------------------------------------------------------------------------- /rel/deb/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -d "/data/dalmatinerproxy" ]; then 4 | mkdir -p /data/dalmatinerpx 5 | fi 6 | 7 | if [ ! -d "/data/dalmatinerproxy/log" ]; then 8 | mkdir -p /data/dalmatinerpx/log 9 | fi 10 | 11 | # Sort out file permissions 12 | if [[ $(id dalmatinerproxy 2>/dev/null) ]] ;then 13 | chown -R dalmatinerpx:dalmatinerpx /data/dalmatinerpx 14 | fi 15 | 16 | CONFFILE=/data/dalmatinerpx/etc/dpx.conf 17 | 18 | if [ ! -f "${CONFFILE}" ] 19 | then 20 | echo "Creating new configuration from example file." 21 | cp ${CONFFILE}.example ${CONFFILE} 22 | else 23 | echo "Please update your configuration according to the update manual!" 24 | fi 25 | -------------------------------------------------------------------------------- /rel/deb/preinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create a system user for the process, modern useradd will add the default group too 4 | # Assume DDB home dir as default location 5 | if [[ ! $(id dalmatinerpx 2>/dev/null) ]] ;then 6 | # create a dalmatiner system account 7 | /usr/sbin/useradd --system --create-home --home-dir /data/dalmatinerpx -s /bin/bash dalmatinerpx 8 | fi 9 | -------------------------------------------------------------------------------- /rel/deb/systemd/dalmatinerpx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=DalmatinerDB Proxy 3 | After=network.target 4 | 5 | [Service] 6 | User=dalmatinerpx 7 | Group=dalmatinerpx 8 | ExecStart=/usr/lib/dalmatinerpx/bin/dpx console -noinput +Bd 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | 13 | -------------------------------------------------------------------------------- /rel/deb/upstart/ddb_proxy: -------------------------------------------------------------------------------- 1 | description "Dalmatiner DB Proxy" 2 | start on filesystem 3 | stop on runlevel [06] 4 | respawn 5 | exec /usr/lib/ddb_proxy/bin/ddb_proxy console -noinput +Bd 6 | limit core unlimited unlimited 7 | limit nofile 262144 262144 8 | -------------------------------------------------------------------------------- /rel/files/erl: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/ksh is. 4 | if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then 5 | POSIX_SHELL="true" 6 | export POSIX_SHELL 7 | exec /usr/bin/ksh $0 "$@" 8 | fi 9 | unset POSIX_SHELL # clear it so if we invoke other scripts, they run as ksh as well 10 | 11 | 12 | ## This script replaces the default "erl" in erts-VSN/bin. This is necessary 13 | ## as escript depends on erl and in turn, erl depends on having access to a 14 | ## bootscript (start.boot). Note that this script is ONLY invoked as a side-effect 15 | ## of running escript -- the embedded node bypasses erl and uses erlexec directly 16 | ## (as it should). 17 | ## 18 | ## Note that this script makes the assumption that there is a start_clean.boot 19 | ## file available in $ROOTDIR/release/VSN. 20 | 21 | ## installed by node_package (github.com/basho/node_package) 22 | 23 | # Determine the abspath of where this script is executing from. 24 | ERTS_BIN_DIR=$(cd ${0%/*} && pwd) 25 | 26 | # Now determine the root directory -- this script runs from erts-VSN/bin, 27 | # so we simply need to strip off two dirs from the end of the ERTS_BIN_DIR 28 | # path. 29 | ROOTDIR=${ERTS_BIN_DIR%/*/*} 30 | 31 | # Parse out release and erts info 32 | START_ERL=`cat $ROOTDIR/releases/start_erl.data` 33 | ERTS_VSN=${START_ERL% *} 34 | APP_VSN=${START_ERL#* } 35 | 36 | BINDIR=$ROOTDIR/erts-$ERTS_VSN/bin 37 | EMU=beam 38 | PROGNAME=`echo $0 | sed 's/.*\///'` 39 | CMD="$BINDIR/erlexec" 40 | export EMU 41 | export ROOTDIR 42 | export BINDIR 43 | export PROGNAME 44 | 45 | exec $CMD -boot $ROOTDIR/releases/$APP_VSN/start_clean ${1+"$@"} 46 | -------------------------------------------------------------------------------- /rel/files/nodetool: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! +fnu 3 | %% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*- 4 | %% ex: ft=erlang ts=4 sw=4 et 5 | %% ------------------------------------------------------------------- 6 | %% 7 | %% nodetool: Helper Script for interacting with live nodes 8 | %% 9 | %% ------------------------------------------------------------------- 10 | 11 | %% installed by node_package (github.com/basho/node_package) 12 | 13 | main(Args) -> 14 | io:setopts([{encoding, utf8}]), 15 | ok = start_epmd(), 16 | %% Extract the args 17 | {RestArgs, TargetNode} = process_args(Args, [], undefined), 18 | 19 | %% Extract the RPC timeout from process dictionary, if it's defined 20 | RpcTimeout = case erlang:get(rpctimeout) of 21 | undefined -> 22 | 60000; 23 | Value -> 24 | Value 25 | end, 26 | 27 | %% process_args() has side-effects (e.g. when processing "-name"), 28 | %% so take care of app-starting business first. 29 | [application:start(App) || App <- [crypto, public_key, ssl]], 30 | 31 | %% any commands that don't need a running node 32 | case RestArgs of 33 | ["chkconfig", File] -> 34 | chkconfig(File); 35 | ["chkconfig", "-config", File|_] -> 36 | chkconfig(File); 37 | _ -> 38 | ok 39 | end, 40 | 41 | %% See if the node is currently running -- if it's not, we'll bail 42 | case {net_kernel:hidden_connect_node(TargetNode), net_adm:ping(TargetNode)} of 43 | {true, pong} -> 44 | ok; 45 | {false, pong} -> 46 | io:format(standard_error, "failed to connect to node ~p .\n", [TargetNode]), 47 | halt(1); 48 | {_, pang} -> 49 | io:format(standard_error, "Node ~p not responding to pings.\n", [TargetNode]), 50 | halt(1) 51 | end, 52 | 53 | case RestArgs of 54 | ["getpid"] -> 55 | io:format("~p\n", [list_to_integer(rpc:call(TargetNode, os, getpid, []))]); 56 | ["ping"] -> 57 | %% If we got this far, the node already responsed to a ping, so just dump 58 | %% a "pong" 59 | io:format("pong\n"); 60 | ["stop"] -> 61 | io:format("~p\n", [rpc:call(TargetNode, erlang, apply, 62 | [fun() -> 63 | catch error_logger:info_msg("Administrative stop\n"), 64 | init:stop() 65 | end, []], RpcTimeout)]); 66 | ["restart"] -> 67 | io:format("~p\n", [rpc:call(TargetNode, erlang, apply, 68 | [fun() -> 69 | catch error_logger:info_msg("Administrative restart\n"), 70 | init:restart() 71 | end, []], RpcTimeout)]); 72 | ["reboot"] -> 73 | io:format("~p\n", [rpc:call(TargetNode, erlang, apply, 74 | [fun() -> 75 | catch error_logger:info_msg("Administrative reboot\n"), 76 | init:reboot() 77 | end, []], RpcTimeout)]); 78 | ["rpc", Module, Function | RpcArgs] -> 79 | case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), 80 | [RpcArgs], RpcTimeout) of 81 | ok -> 82 | ok; 83 | {badrpc, Reason} -> 84 | io:format(standard_error, "RPC to ~p failed: ~p\n", [TargetNode, Reason]), 85 | halt(1); 86 | {error, Code} when is_integer(Code) -> 87 | halt(Code); 88 | {error, Code, Reason} when is_integer(Code) -> 89 | io:format(standard_error, "ERROR: ~p\n", [Reason]), 90 | halt(Code); 91 | _ -> 92 | halt(1) 93 | end; 94 | ["rpc_infinity", Module, Function | RpcArgs] -> 95 | case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), [RpcArgs], infinity) of 96 | ok -> 97 | ok; 98 | {badrpc, Reason} -> 99 | io:format(standard_error, "RPC to ~p failed: ~p\n", [TargetNode, Reason]), 100 | halt(1); 101 | {error, Code} when is_integer(Code) -> 102 | halt(Code); 103 | {error, Code, Reason} when is_integer(Code) -> 104 | io:format(standard_error, "ERROR: ~p\n", [Reason]), 105 | halt(Code); 106 | _ -> 107 | halt(1) 108 | end; 109 | ["rpcterms", Module, Function, ArgsAsString] -> 110 | case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), 111 | consult(ArgsAsString), RpcTimeout) of 112 | {badrpc, Reason} -> 113 | io:format(standard_error, "RPC to ~p failed: ~p\n", [TargetNode, Reason]), 114 | halt(1); 115 | {error, Code} when is_integer(Code) -> 116 | halt(Code); 117 | {error, Code, Reason} when is_integer(Code) -> 118 | io:format(standard_error, "ERROR: ~p\n", [Reason]), 119 | halt(Code); 120 | Other -> 121 | io:format("~p\n", [Other]) 122 | end; 123 | Other -> 124 | io:format("Other: ~p\n", [Other]), 125 | io:format("Usage: nodetool {ping|stop|restart|reboot|chkconfig}\n") 126 | end, 127 | net_kernel:stop(). 128 | 129 | process_args([], Acc, TargetNode) -> 130 | {lists:reverse(Acc), TargetNode}; 131 | process_args(["-kernel", "net_ticktime", Value | Rest], Acc, TargetNode) -> 132 | application:set_env(kernel, net_ticktime, list_to_integer(Value)), 133 | process_args(Rest, Acc, TargetNode); 134 | process_args(["-setcookie", Cookie | Rest], Acc, TargetNode) -> 135 | erlang:set_cookie(node(), list_to_atom(Cookie)), 136 | process_args(Rest, Acc, TargetNode); 137 | process_args(["-name", TargetName | Rest], Acc, _) -> 138 | ThisNode = append_node_suffix(TargetName, "_maint_"), 139 | {ok, _} = net_kernel:start([ThisNode, longnames]), 140 | process_args(Rest, Acc, nodename(TargetName)); 141 | process_args(["-sname", TargetName | Rest], Acc, _) -> 142 | ThisNode = append_node_suffix(TargetName, "_maint_"), 143 | {ok, _} = net_kernel:start([ThisNode, shortnames]), 144 | process_args(Rest, Acc, nodename(TargetName)); 145 | process_args(["-rpctimeout", TimeoutStr | Rest], Acc, TargetNode) -> 146 | Timeout = case TimeoutStr of 147 | "infinity" -> infinity; 148 | _ -> list_to_integer(TimeoutStr) 149 | end, 150 | erlang:put(rpctimeout, Timeout), 151 | process_args(Rest, Acc, TargetNode); 152 | process_args([Arg | Rest], Acc, Opts) -> 153 | process_args(Rest, [Arg | Acc], Opts). 154 | 155 | 156 | start_epmd() -> 157 | [] = os:cmd(epmd_path() ++ " -daemon"), 158 | ok. 159 | 160 | epmd_path() -> 161 | ErtsBinDir = filename:dirname(escript:script_name()), 162 | Name = "epmd", 163 | case os:find_executable(Name, ErtsBinDir) of 164 | false -> 165 | case os:find_executable(Name) of 166 | false -> 167 | io:format("Could not find epmd.~n"), 168 | halt(1); 169 | GlobalEpmd -> 170 | GlobalEpmd 171 | end; 172 | Epmd -> 173 | Epmd 174 | end. 175 | 176 | 177 | nodename(Name) -> 178 | case string:tokens(Name, "@") of 179 | [_Node, _Host] -> 180 | list_to_atom(Name); 181 | [Node] -> 182 | [_, Host] = string:tokens(atom_to_list(node()), "@"), 183 | list_to_atom(lists:concat([Node, "@", Host])) 184 | end. 185 | 186 | append_node_suffix(Name, Suffix) -> 187 | case string:tokens(Name, "@") of 188 | [Node, Host] -> 189 | list_to_atom(lists:concat([Node, Suffix, os:getpid(), "@", Host])); 190 | [Node] -> 191 | list_to_atom(lists:concat([Node, Suffix, os:getpid()])) 192 | end. 193 | 194 | chkconfig(File) -> 195 | case file:consult(File) of 196 | {ok, _} -> 197 | io:format("ok\n"), 198 | halt(0); 199 | {error, {Line, Mod, Term}} -> 200 | io:format(standard_error, 201 | ["Error on line ", 202 | file:format_error({Line, Mod, Term}), "\n"], []), 203 | halt(1); 204 | {error, R} -> 205 | io:format(standard_error, 206 | ["Error reading config file: ", 207 | file:format_error(R), "\n"], []), 208 | halt(1) 209 | end. 210 | 211 | 212 | %% 213 | %% Given a string or binary, parse it into a list of terms, ala file:consult/0 214 | %% 215 | consult(Str) when is_list(Str) -> 216 | consult([], Str, []); 217 | consult(Bin) when is_binary(Bin)-> 218 | consult([], binary_to_list(Bin), []). 219 | 220 | consult(Cont, Str, Acc) -> 221 | case erl_scan:tokens(Cont, Str, 0) of 222 | {done, Result, Remaining} -> 223 | case Result of 224 | {ok, Tokens, _} -> 225 | {ok, Term} = erl_parse:parse_term(Tokens), 226 | consult([], Remaining, [Term | Acc]); 227 | {eof, _Other} -> 228 | lists:reverse(Acc); 229 | {error, Info, _} -> 230 | {error, Info} 231 | end; 232 | {more, Cont1} -> 233 | consult(Cont1, eof, Acc) 234 | end. 235 | -------------------------------------------------------------------------------- /rel/freebsd/dalmatinerpx: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # $FreeBSD$ 4 | # 5 | 6 | # PROVIDE: dalmatinerpx 7 | # REQUIRE: LOGIN 8 | # KEYWORD: shutdown 9 | 10 | 11 | . /etc/rc.subr 12 | 13 | name="dalmatinerpx" 14 | rcvar=dalmatinerpx_enable 15 | 16 | load_rc_config $name 17 | : ${dalmatinerpx="NO"} 18 | 19 | 20 | start_cmd="${name}_start" 21 | stop_cmd="${name}_stop" 22 | 23 | 24 | 25 | dalmatinerpx_start() 26 | { 27 | if checkyesno ${rcvar}; then 28 | echo "* starting dalmatiner db... " 29 | /usr/local/bin/sudo -u dalmatinerpx /usr/local/lib/dpx/bin/dpx start 30 | fi 31 | } 32 | 33 | dalmatinerpx_stop() 34 | { 35 | if checkyesno ${rcvar}; then 36 | echo "* stopping dalmatiner db... " 37 | /usr/local/bin/sudo -u dalmatinerpx /usr/local/lib/dpx/bin/dpx stop 38 | fi 39 | } 40 | 41 | run_rc_command "$1" 42 | -------------------------------------------------------------------------------- /rel/freebsd/vars.config: -------------------------------------------------------------------------------- 1 | %% 2 | %% etc/vm.args 3 | %% 4 | {run_user, "dalmatinerdb"}. -------------------------------------------------------------------------------- /rel/pkg/.gitignore: -------------------------------------------------------------------------------- 1 | build-info 2 | packlist 3 | -------------------------------------------------------------------------------- /rel/pkg/Makefile: -------------------------------------------------------------------------------- 1 | VERSION=0.3.3 2 | COMPONENT_INTERNAL=dpx 3 | COMPONENT=dalmatinerpx 4 | DEPS="erlang" "coreutils" "sudo" 5 | 6 | include ../../_build/default/lib/fifo_utils/priv/pkg.mk 7 | 8 | .PHONY: prepare 9 | 10 | prepare: 11 | -rm -r $(STAGE_DIR)/$(COMPONENT) 12 | cp -r ../../_build/${REBARPROFILE}/rel/$(COMPONENT_INTERNAL) $(STAGE_DIR)/$(COMPONENT) 13 | 14 | package: prepare $(FILE).tgz 15 | 16 | clean: clean-pkg 17 | -rm *.tgz 18 | 19 | -------------------------------------------------------------------------------- /rel/pkg/comment: -------------------------------------------------------------------------------- 1 | Dalmatiner Protocol Translator. 2 | -------------------------------------------------------------------------------- /rel/pkg/deinstall.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | case $2 in 4 | DEINSTALL) 5 | ;; 6 | POST-DEINSTALL) 7 | echo "Please beware that database and logfiles have not been" 8 | echo "deleted! Neither have the dalmatiner user or gorup." 9 | echo "If you don't need them any more remove the directories:" 10 | echo " /data/dalmatinerpx/log" 11 | echo " /data/dalmatinerpx/db" 12 | ;; 13 | esac 14 | -------------------------------------------------------------------------------- /rel/pkg/deploy/.gitignore: -------------------------------------------------------------------------------- 1 | dalmatiner-proxy 2 | -------------------------------------------------------------------------------- /rel/pkg/deploy/sbin/dpx: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | /opt/local/dalmatinerpx/bin/dalmatinerpx $@ 4 | -------------------------------------------------------------------------------- /rel/pkg/description: -------------------------------------------------------------------------------- 1 | DalmatinerPX is the protocol translator for DalmatinerDB allowing for multiple metric protocols to be sent to DalmatinerDB. Protocols include Graphite, Metrics2.0, Influx, Prometheus and OpenTSDB. 2 | 3 | https://dalmatiner.io 4 | -------------------------------------------------------------------------------- /rel/pkg/displayfile: -------------------------------------------------------------------------------- 1 | Congratulations the Dalmatiner PX server was sucessfully installed 2 | .------------------------------------------------------------------------------. 3 | | By default the service is disabled since it's common that you will need to | 4 | | do some additional configurations. | 5 | | | 6 | | Please have a look at the configuration files: | 7 | | | 8 | | * /data/dalmatinerpx/etc/dpx.conf | 9 | | | 10 | | Note: To enable Dalmatiner PX you first need to enable the epmd service. | 11 | | | 12 | `------------------------------------------------------------------------------' 13 | 14 | -------------------------------------------------------------------------------- /rel/pkg/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | AWK=/usr/bin/awk 4 | SED=/usr/bin/sed 5 | 6 | USER=dalmatiner 7 | GROUP=$USER 8 | 9 | case $2 in 10 | PRE-INSTALL) 11 | if grep "^$GROUP:" /etc/group > /dev/null 2>&1 12 | then 13 | echo "Group already exists, skipping creation." 14 | else 15 | echo Creating dalmatinerfe group ... 16 | groupadd $GROUP 17 | fi 18 | if id $USER > /dev/null 2>&1 19 | then 20 | echo "User already exists, skipping creation." 21 | else 22 | echo Creating dalmatinerfe user ... 23 | useradd -g $GROUP -d /data/dalmatinerpx -s /bin/false $USER 24 | /usr/sbin/usermod -K defaultpriv=basic,net_privaddr $USER 25 | fi 26 | echo Creating directories ... 27 | mkdir -p /data/dalmatinerpx/etc 28 | mkdir -p /data/dalmatinerpx/db 29 | mkdir -p /data/dalmatinerpx/log/sasl 30 | chown -R $USER:$GROUP /data/dalmatinerpx 31 | if [ -d /tmp/dalmatinerpx ] 32 | then 33 | chown -R $USER:$GROUP /tmp/dalmatinerpx 34 | fi 35 | ;; 36 | POST-INSTALL) 37 | echo Importing service ... 38 | svccfg import /opt/local/dalmatinerpx/share/dpx.xml 39 | echo Trying to guess configuration ... 40 | IP=`ifconfig net0 | grep inet | $AWK '{print $2}'` 41 | 42 | CONFFILE=/data/dalmatinerpx/etc/dpx.conf 43 | cp /opt/local/dalmatinerpx/etc/dpx.conf ${CONFFILE}.example 44 | 45 | if [ ! -f "${CONFFILE}" ] 46 | then 47 | echo "Creating new configuration from example file." 48 | cp ${CONFFILE}.example ${CONFFILE} 49 | $SED -i bak -e "s/127.0.0.1/${IP}/g" ${CONFFILE} 50 | else 51 | echo "Please make sure you update your config according to the update manual!" 52 | fi 53 | ;; 54 | esac 55 | -------------------------------------------------------------------------------- /rel/pkg/post_pkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/rel/pkg/post_pkg -------------------------------------------------------------------------------- /rel/pkg/pre_pkg: -------------------------------------------------------------------------------- 1 | @pkgdep erlang>=15.1.1 2 | -------------------------------------------------------------------------------- /rel/pkgng/+MANIFEST.in: -------------------------------------------------------------------------------- 1 | name: dalmatinerpx 2 | version: "__VNS__" 3 | origin: projectfifo/dalmatinerpx 4 | comment: "Dalmatiner TS Proxy" 5 | desc: "Dalmatiner TS Proxy" 6 | maintainer: info@project-fifo.net 7 | www: https://project-fifo.net 8 | prefix: / 9 | -------------------------------------------------------------------------------- /rel/pkgng/+POST_DEINSTALL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/rel/pkgng/+POST_DEINSTALL -------------------------------------------------------------------------------- /rel/pkgng/+POST_INSTALL: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | USER=dalmatinerpx 3 | GROUP=$USER 4 | 5 | echo Creating group ... 6 | getent group $GROUP >/dev/null 2>&1 || pw groupadd $GROUP 7 | 8 | echo Creating user ... 9 | id $USER >/dev/null 2>&1 || pw useradd $USER -g $GROUP -d /data/$USER -s /usr/sbin/nologin 10 | 11 | echo Creating directories ... 12 | mkdir -p /data/$USER/db/ring 13 | mkdir -p /data/$USER/etc 14 | mkdir -p /data/$USER/log/sasl 15 | chown -R $USER:$GROUP /data/$USER 16 | 17 | mkdir -p /tmp/$USER 18 | chown -R $USER:$GROUP /tmp/$USER/ 19 | 20 | 21 | echo Trying to guess configuration ... 22 | IP=$(ifconfig $(netstat -r | grep default | awk '{ print $4 }') | grep 'inet' | awk -F ' ' '{ print $2 }') 23 | 24 | DISTCONFFILE=/usr/local/lib/dpx/etc/dpx.conf 25 | CONFFILE=/data/dalmatinerpx/etc/dpx.conf 26 | 27 | if [ ! -f "${CONFFILE}" ] 28 | then 29 | echo "Creating new configuration from example file." 30 | cp ${DISTCONFFILE} ${CONFFILE}.example 31 | cp ${CONFFILE}.example ${CONFFILE} 32 | /usr/bin/sed -i bak -e "s/127.0.0.1/${IP}/g" ${CONFFILE} 33 | else 34 | echo "Please make sure you update your config according to the update manual!" 35 | fi 36 | 37 | OT=/data/dalmatinerpx/etc/rules.ot 38 | if [ ! -f "${OT}" ] 39 | then 40 | echo "none() -> drop." > ${OT} 41 | fi 42 | -------------------------------------------------------------------------------- /rel/pkgng/+PRE_DEINSTALL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/rel/pkgng/+PRE_DEINSTALL -------------------------------------------------------------------------------- /rel/pkgng/+PRE_INSTALL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/rel/pkgng/+PRE_INSTALL -------------------------------------------------------------------------------- /rel/pkgng/.gitignore: -------------------------------------------------------------------------------- 1 | +MANIFEST 2 | deploy 3 | *.txz 4 | -------------------------------------------------------------------------------- /rel/pkgng/Makefile: -------------------------------------------------------------------------------- 1 | INSTALL_DIR=usr/local/lib 2 | DEPS="coreutils" "sudo" 3 | REBARPROFILE ?= default 4 | 5 | include ../../config.mk 6 | include ../../_build/${REBARPROFILE}/lib/fifo_utils/priv/pkgng.mk 7 | 8 | .PHONY: package prepare clean 9 | 10 | prepare: 11 | -rm -r $(STAGE_DIR) 12 | mkdir -p $(STAGE_DIR)/$(INSTALL_DIR) 13 | cp -r ../../_build/${REBARPROFILE}/rel/$(COMPONENT_INTERNAL) $(STAGE_DIR)/$(INSTALL_DIR) 14 | rm -rf $(STAGE_DIR)/$(INSTALL_DIR)/lib/*/c_src 15 | mkdir -p $(STAGE_DIR)/usr/local/etc/rc.d/ 16 | cp ../freebsd/dalmatinerpx $(STAGE_DIR)/usr/local/etc/rc.d/ 17 | -------------------------------------------------------------------------------- /rel/vars.config: -------------------------------------------------------------------------------- 1 | %% 2 | %% rebar.config 3 | %% 4 | {build_dir, "{{output_dir}}/../.."}. 5 | 6 | %% 7 | %% app.config 8 | %% 9 | {run_user_home, "/data/dalmatinerpx"}. 10 | {prometheus_default_bucket, "prom"}. 11 | {prometheus_default_url, "http://localhost:9100/metrics"}. 12 | {prometheus_default_frequency, 10000}. 13 | {listeners_default_decoder, "dp_influx"}. 14 | {listeners_default_bucket, "InfluxDB"}. 15 | {listeners_default_port, 8086}. 16 | {listeners_default_proto, http}. 17 | {log_path, "/data/dalmatinerpx/log"}. 18 | 19 | %% 20 | %% etc/vm.args 21 | %% 22 | {node, "dalmatinerpx@127.0.0.1"}. 23 | {crash_dump, "/data/dalmatinerpx/log"}. 24 | {distributed_cookie, "dalmatinerpx_cookie"}. 25 | {run_user, "dalmatiner"}. 26 | 27 | {platform_etc_dir, "/data/dalmatinerpx/etc"}. 28 | {platform_bin_dir, "./bin"}. 29 | {platform_lib_dir, "./lib"}. 30 | {platform_log_dir, "/data/dalmatinerpx/log"}. 31 | {platform_db_dir, "/data/dalmatinerpx/db"}. 32 | {platform_gen_dir, "/data/dalmatinerpx/db"}. 33 | {platform_pipe_dir, "/tmp/dalmatinerpx_pipes/"}. 34 | 35 | {bugsnag_app_version, "0.3.3"}. 36 | {service, "ddb_proxy"}. 37 | -------------------------------------------------------------------------------- /rel/vars/deb.config: -------------------------------------------------------------------------------- 1 | %% 2 | %% etc/vm.args 3 | %% 4 | {run_user, "dalmatinerpx"}. -------------------------------------------------------------------------------- /schema/dpx.schema: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | 3 | %% @doc The path data gets stored into. 4 | {mapping, "run_user_home", "setup.home", 5 | [{default, "{{run_user_home}}"}, 6 | {datatype, string}]}. 7 | 8 | %% @doc The bucket where scraped Prometheus data will be stored. This property 9 | %% is preceded by the name of the scraper e.g. 10 | %% prometheus_scrapers.my_scraper_name.bucket = PromLocal 11 | {mapping, "prometheus_scrapers.$name.bucket", "ddb_proxy.prom_scrapers", 12 | [{datatype, string}, 13 | {default, "{{prometheus_default_bucket}}"}, 14 | {commented, "{{prometheus_default_bucket}}"}, 15 | {include_default, "node_exporter"}]}. 16 | 17 | %% @doc The metrics HTTP endpoint of the target system for scraping. 18 | %% This property is preceded by the name of the scraper e.g. 19 | %% prometheus_scrapers.my_scraper_name.url = "http://localhost:9090/metrics" 20 | {mapping, "prometheus_scrapers.$name.url", "ddb_proxy.prom_scrapers", 21 | [{datatype, string}, 22 | {default, "{{prometheus_default_url}}"}, 23 | {commented, "{{prometheus_default_url}}"}, 24 | {include_default, "node_exporter"}]}. 25 | 26 | %% @doc Interval of time between scraping requests to the target endpoint. 27 | %% This property is prefixed by the name of the scraper e.g. 28 | %% prometheus_scrapers.my_scraper_name.frequency = 10s 29 | {mapping, "prometheus_scrapers.$name.frequency", "ddb_proxy.prom_scrapers", 30 | [{datatype, {duration, ms}}, 31 | {default, {{prometheus_default_frequency}}}, 32 | {commented, {{prometheus_default_frequency}}}, 33 | {include_default, "node_exporter"}]}. 34 | 35 | {translation, "ddb_proxy.prom_scrapers", 36 | fun(Conf) -> 37 | ScraperList = cuttlefish_variable:filter_by_prefix("prometheus_scrapers", Conf), 38 | Names = [ Name || {[_, Name, _], _} <- ScraperList], 39 | Scrapers = [begin 40 | Bucket = 41 | cuttlefish:conf_get(["prometheus_scrapers", Name, "bucket"], Conf), 42 | Url = 43 | cuttlefish:conf_get(["prometheus_scrapers", Name, "url"], Conf), 44 | Frequency = 45 | cuttlefish:conf_get(["prometheus_scrapers", Name, "frequency"], Conf), 46 | {list_to_atom(Name), list_to_binary(Bucket), Url, Frequency} 47 | end || Name <- Names], 48 | lists:usort(Scrapers) 49 | end}. 50 | 51 | %% @doc The bucket where incomming data will be stored by the listener. 52 | %% This property is prefixed by the name of the line protocol used, which can 53 | %% be one of 'dp_influx', 'dp_graphite', 'dp_otsdb', 'dp_bsdsyslog', 54 | %% 'dp_metrics2' or 'dp_prom_writer' e.g. 55 | %% listeners.dp_influx.bucket = InfluxData 56 | {mapping, "listeners.$decoder.bucket", "ddb_proxy.listeners", 57 | [{datatype, string}, 58 | {default, "{{listeners_default_bucket}}"}, 59 | {include_default, "{{listeners_default_decoder}}"}]}. 60 | 61 | %% @doc The port used for listening. 62 | %% This property is prefixed by the name of the line protocol used, which can 63 | %% be one of 'dp_influx', 'dp_graphite', 'dp_otsdb', 'dp_bsdsyslog', 64 | %% 'dp_metrics2' or 'dp_prom_writer' e.g. 65 | %% listeners.dp_influx.port = 8086 66 | {mapping, "listeners.$decoder.port", "ddb_proxy.listeners", 67 | [{datatype, integer}, 68 | {default, "{{listeners_default_port}}"}, 69 | {include_default, "{{listeners_default_decoder}}"}]}. 70 | 71 | %% @doc The transport used for listening, which may either be 'http', 'tcp' or 72 | %% 'udp'. 73 | %% This property is prefixed by the name of the line protocol used, which can 74 | %% be one of 'dp_influx', 'dp_graphite', 'dp_otsdb', 'dp_bsdsyslog', 75 | %% 'dp_metrics2' or 'dp_prom_writer' 76 | %% listeners.dp_influx.protocol = http 77 | {mapping, "listeners.$decoder.protocol", "ddb_proxy.listeners", 78 | [{datatype, string}, 79 | {default, "{{listeners_default_proto}}"}, 80 | {include_default, "{{listeners_default_decoder}}"}]}. 81 | 82 | {translation, "ddb_proxy.listeners", 83 | fun(Conf) -> 84 | ListenerList = cuttlefish_variable:filter_by_prefix("listeners", Conf), 85 | Names = [ Name || {[_, Name, _], _} <- ListenerList], 86 | Listeners = [begin 87 | Bucket = 88 | cuttlefish:conf_get(["listeners", Name, "bucket"], Conf), 89 | Port = 90 | cuttlefish:conf_get(["listeners", Name, "port"], Conf), 91 | Protocol = 92 | cuttlefish:conf_get(["listeners", Name, "protocol"], Conf), 93 | {list_to_atom(Name), list_to_binary(Bucket), 94 | Port, list_to_atom(Protocol)} 95 | end || Name <- Names], 96 | lists:usort(Listeners) 97 | end}. 98 | 99 | %% @doc DQE Indexing backend like dqe_idx_pg or dqe_idx_ddb 100 | {mapping, "idx.backend", "dqe_idx.lookup_module", 101 | [{default, "dqe_idx_pg"}, 102 | {datatype, atom}]}. 103 | -------------------------------------------------------------------------------- /schema/erlang_vm.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | %% @doc Enables or disables the kernel poll functionality if the 4 | %% emulator supports it. If the emulator does not support kernel poll, 5 | %% and the K flag is passed to the emulator, a warning is issued at 6 | %% startup. 7 | %% 8 | %% Similar information at: http://erlang.org/doc/man/erl.html 9 | {mapping, "erlang.K", "vm_args.+K", [ 10 | {default, on}, 11 | {datatype, flag}, 12 | hidden 13 | ]}. 14 | 15 | %%%% Tunables 16 | %% @doc Name of the Erlang node 17 | {mapping, "nodename", "vm_args.-name", [ 18 | {default, "{{node}}"} 19 | ]}. 20 | 21 | %% @doc Cookie for distributed node communication. All nodes in the 22 | %% same cluster should use the same cookie or they will not be able to 23 | %% communicate. 24 | {mapping, "distributed_cookie", "vm_args.-setcookie", [ 25 | {default, "ddb_proxy_cookie"} 26 | ]}. 27 | 28 | %% @doc Sets the number of threads in async thread pool, valid range 29 | %% is 0-1024. If thread support is available, the default is 64. 30 | %% 31 | %% More information at: http://erlang.org/doc/man/erl.html 32 | {mapping, "erlang.async_threads", "vm_args.+A", [ 33 | {default, 30}, 34 | {datatype, integer}, 35 | {validators, ["range:0-1024"]} 36 | ]}. 37 | 38 | {validator, "range:0-1024", "must be 0 to 1024", 39 | fun(X) -> X >= 0 andalso X =< 1024 end}. 40 | 41 | %% @doc Set the location of crash dumps 42 | {mapping, "erlang.crash_dump", "vm_args.-env ERL_CRASH_DUMP", [ 43 | {default, "{{crash_dump}}"}, 44 | {datatype, file}, 45 | hidden 46 | ]}. 47 | -------------------------------------------------------------------------------- /schema/lager.schema: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | %% complex lager example 3 | %% @doc where do you want the console.log output: 4 | %% off : nowhere 5 | %% file: the file specified by log.console.file 6 | %% console : standard out 7 | %% both : log.console.file and standard out. 8 | {mapping, "log.console", "lager.handlers", [ 9 | {default, file}, 10 | {datatype, {enum, [off, file, console, both]}} 11 | ]}. 12 | 13 | %% @doc the log level of the console log 14 | {mapping, "log.console.level", "lager.handlers", [ 15 | {default, info}, 16 | {datatype, {enum, [debug, info, warning, error]}} 17 | ]}. 18 | 19 | %% @doc location of the console log 20 | {mapping, "log.console.file", "lager.handlers", [ 21 | {default, "{{log_path}}/console.log"} 22 | ]}. 23 | 24 | %% *gasp* notice the same @mapping! 25 | %% @doc location of the error log 26 | {mapping, "log.error.file", "lager.handlers", [ 27 | {default, "{{log_path}}/error.log"} 28 | ]}. 29 | 30 | %% *gasp* notice the same @mapping! 31 | %% @doc location of the debug log 32 | {mapping, "log.debug.file", "lager.handlers", [ 33 | {default, "{{log_path}}/debug.log"} 34 | ]}. 35 | 36 | %% *gasp* notice the same @mapping! 37 | %% @doc turn on syslog 38 | {mapping, "log.syslog", "lager.handlers", [ 39 | {default, off}, 40 | {datatype, {enum, [on, off]}} 41 | ]}. 42 | 43 | { translation, 44 | "lager.handlers", 45 | fun(Conf) -> 46 | SyslogHandler = case cuttlefish:conf_get("log.syslog", Conf) of 47 | on -> [{lager_syslog_backend, ["riak", daemon, info]}]; 48 | _ -> [] 49 | end, 50 | ErrorHandler = case cuttlefish:conf_get("log.error.file", Conf) of 51 | undefined -> []; 52 | ErrorFilename -> [{lager_file_backend, [{file, ErrorFilename}, 53 | {level, error}, 54 | {size, 10485760}, 55 | {date, "$D0"}, 56 | {count, 5}]}] 57 | end, 58 | 59 | ConsoleLogLevel = cuttlefish:conf_get("log.console.level", Conf), 60 | ConsoleLogFile = cuttlefish:conf_get("log.console.file", Conf), 61 | 62 | ConsoleHandler = {lager_console_handler, ConsoleLogLevel}, 63 | ConsoleFileHandler = {lager_file_backend, [{file, ConsoleLogFile}, 64 | {level, ConsoleLogLevel}, 65 | {size, 10485760}, 66 | {date, "$D0"}, 67 | {count, 5}]}, 68 | 69 | ConsoleHandlers = case cuttlefish:conf_get("log.console", Conf) of 70 | off -> []; 71 | file -> [ConsoleFileHandler]; 72 | console -> [ConsoleHandler]; 73 | both -> [ConsoleHandler, ConsoleFileHandler]; 74 | _ -> [] 75 | end, 76 | DebugHandler = case cuttlefish:conf_get("log.debug.file", Conf) of 77 | undefined -> []; 78 | DebugFilename -> [{lager_file_backend, [{file, DebugFilename}, 79 | {level, debug}, 80 | {size, 10485760}, 81 | {date, "$D0"}, 82 | {count, 5}]}] 83 | end, 84 | 85 | SyslogHandler ++ ConsoleHandlers ++ ErrorHandler ++ DebugHandler 86 | end 87 | }. 88 | 89 | %% Lager Config 90 | 91 | %% @doc Whether to write a crash log, and where. 92 | %% Commented/omitted/undefined means no crash logger. 93 | {mapping, "log.crash.file", "lager.crash_log", [ 94 | {default, "{{log_path}}/crash.log"} 95 | ]}. 96 | 97 | %% @doc Maximum size in bytes of events in the crash log - defaults to 65536 98 | %% @datatype integer 99 | %% @mapping 100 | {mapping, "log.crash.msg_size", "lager.crash_log_msg_size", [ 101 | {default, "64KB"}, 102 | {datatype, bytesize} 103 | ]}. 104 | 105 | %% @doc Maximum size of the crash log in bytes, before its rotated, set 106 | %% to 0 to disable rotation - default is 0 107 | {mapping, "log.crash.size", "lager.crash_log_size", [ 108 | {default, "10MB"}, 109 | {datatype, bytesize} 110 | ]}. 111 | 112 | %% @doc What time to rotate the crash log - default is no time 113 | %% rotation. See the lager README for a description of this format: 114 | %% https://github.com/basho/lager/blob/master/README.org 115 | {mapping, "log.crash.date", "lager.crash_log_date", [ 116 | {default, "$D0"} 117 | ]}. 118 | 119 | %% @doc Number of rotated crash logs to keep, 0 means keep only the 120 | %% current one - default is 0 121 | {mapping, "log.crash.count", "lager.crash_log_count", [ 122 | {default, 5}, 123 | {datatype, integer} 124 | ]}. 125 | 126 | %% @doc Whether to redirect error_logger messages into lager - defaults to true 127 | {mapping, "log.error.redirect", "lager.error_logger_redirect", [ 128 | {default, on}, 129 | {datatype, {enum, [on, off]}} 130 | ]}. 131 | 132 | { translation, 133 | "lager.error_logger_redirect", fun(Conf) -> 134 | Setting = cuttlefish:conf_get("log.error.redirect", Conf), 135 | case Setting of 136 | on -> true; 137 | off -> false; 138 | _Default -> true 139 | end 140 | end}. 141 | 142 | %% @doc maximum number of error_logger messages to handle in a second 143 | %% lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup 144 | {mapping, "log.error.messages_per_second", "lager.error_logger_hwm", [ 145 | {default, 100}, 146 | {datatype, integer} 147 | ]}. 148 | 149 | %% SASL 150 | %% We should never care about this 151 | {mapping, "sasl", "sasl.sasl_error_logger", [ 152 | {default, off}, 153 | {datatype, {enum, [on, off]}}, 154 | {level, advanced} 155 | ]}. 156 | 157 | { translation, 158 | "sasl.sasl_error_logger", 159 | fun(Conf) -> 160 | case cuttlefish:conf_get("sasl", Conf) of %%how to pull default? 161 | on -> true; 162 | _ -> false 163 | end 164 | end 165 | }. 166 | -------------------------------------------------------------------------------- /share/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalmatinerdb/ddb_proxy/1d07fecadaae201ba0fee0be5e9ed5898b215980/share/.gitignore -------------------------------------------------------------------------------- /share/dpx.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 12 | 13 | 14 | 19 | 20 | 21 | 22 | 27 | 28 | 29 | 30 | 34 | 35 | 36 | 37 | 41 | 42 | 43 | 44 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /tree: -------------------------------------------------------------------------------- 1 | ├─ cowboy─1.1.2 2 | │ └─ cowlib─1.0.2 3 | ├─ ddb_connection─0.4.3 4 | │ ├─ ddb_client─0.5.7 5 | │ │ └─ dproto─0.5.3 6 | │ │ └─ jsxd─0.2.4 7 | │ ├─ otters─0.2.10 8 | │ │ ├─ dynamic_compile─1.0.0 9 | │ │ └─ ibrowse─4.4.0 10 | │ └─ poolboy─1.5.1 11 | ├─ ddb_proxy─0.3.3 12 | ├─ dp_decoder─0.2.16 13 | ├─ dqe_idx─0.4.3 14 | ├─ dqe_idx_pg─0.5.7 15 | │ ├─ lager─3.2.4 16 | │ │ └─ goldrush─0.1.9 17 | │ ├─ pgapp─0.0.2 18 | │ │ └─ epgsql─3.3.0 19 | │ └─ sqlmig─0.1.5 20 | ├─ fifo_lager─0.1.6 21 | │ ├─ lager_graylog─0.1.3 22 | │ └─ lager_logstash_backend─0.1.3 23 | │ └─ jsx─2.8.2 24 | ├─ fifo_utils─0.1.49 25 | │ ├─ quickrand─1.7.2 26 | │ └─ uuid─1.7.2 27 | ├─ hackney─1.6.6 28 | │ ├─ certifi─1.0.0 29 | │ ├─ idna─4.0.0 30 | │ ├─ metrics─1.0.1 31 | │ ├─ mimerl─1.0.2 32 | │ └─ ssl_verify_fun─1.1.1 33 | ├─ jsone─1.2.6 34 | ├─ mmath─0.2.19 35 | ├─ ranch─1.3.2 36 | ├─ recon─2.3.2 37 | ├─ snappiest─1.2.0 38 | └─ trie─1.7.2 39 | --------------------------------------------------------------------------------