├── .github └── workflows │ └── erlang.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── development.config ├── elvis.config ├── include └── kinetic.hrl ├── kinesis.dict ├── nextroll.dict ├── rebar.config ├── rebar.lock ├── src ├── kinetic.app.src ├── kinetic.erl ├── kinetic_config.erl ├── kinetic_stream.erl ├── kinetic_stream_sup.erl ├── kinetic_sup.erl └── kinetic_utils.erl └── test ├── kinetic_config_tests.erl ├── kinetic_stream_tests.erl ├── kinetic_sup_tests.erl ├── kinetic_tests.erl └── kinetic_utils_tests.erl /.github/workflows/erlang.yml: -------------------------------------------------------------------------------- 1 | name: Erlang CI 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | ERL_FLAGS: "-enable-feature all" 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-24.04 11 | 12 | strategy: 13 | matrix: 14 | include: 15 | - otp: '25.2.1' 16 | rebar: '3.20.0' 17 | - otp: '27.3.4' 18 | rebar: '3.24.0' 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - uses: erlef/setup-beam@v1 23 | id: setup-beam 24 | with: 25 | otp-version: ${{matrix.otp}} 26 | rebar3-version: ${{matrix.rebar}} 27 | - name: Restore _build 28 | uses: actions/cache@v4 29 | with: 30 | path: _build 31 | key: _build-cache-for-os-${{runner.os}}-otp-${{steps.setup-beam.outputs.otp-version}}-rebar3-${{steps.setup-beam.outputs.rebar3-version}}-hash-${{hashFiles('rebar.lock')}} 32 | - name: Restore rebar3's cache 33 | uses: actions/cache@v4 34 | with: 35 | path: ~/.cache/rebar3 36 | key: rebar3-cache-for-os-${{runner.os}}-otp-${{steps.setup-beam.outputs.otp-version}}-rebar3-${{steps.setup-beam.outputs.rebar3-version}}-hash-${{hashFiles('rebar.lock')}} 37 | - name: Compile 38 | run: rebar3 compile 39 | - name: Format check 40 | run: rebar3 format --verify 41 | - name: Run tests and verifications 42 | run: rebar3 test 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | doc/ 2 | _build/ 3 | *.o 4 | *.beam 5 | *.plt 6 | erl_crash.dump 7 | ebin 8 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | See the [Releases](../../releases) page. 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, AdRoll 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the {organization} nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | PREFIX:=../ 3 | 4 | REBAR=rebar3 5 | 6 | .PHONY: all edoc clean test dialyzer 7 | 8 | all: 9 | @$(REBAR) compile 10 | 11 | edoc: 12 | @$(REBAR) edoc 13 | 14 | test: 15 | @$(REBAR) test 16 | 17 | clean: 18 | @$(REBAR) clean 19 | 20 | dialyzer: 21 | @$(REBAR) dialyzer || $(REBAR) dialyzer 22 | 23 | xref: 24 | @$(REBAR) xref 25 | 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Kinetic 2 | ======= 3 | 4 | Kinetic is an erlang Kinesis client built to be an OTP application and 5 | easy to integrate and work with. 6 | 7 | If you are running Kinetic from an EC2 instance with an IAM role it 8 | essentially doesn't need any configuration as it will be smart enough to 9 | grab everything from the context. At the same time it will be possible 10 | to override the context values with configured ones. 11 | 12 | You can start an erl with: 13 | 14 | $ erl -pa ebin -pa deps/*/ebin -s inets -s crypto -s ssl -s lhttpc -config development -s kinetic 15 | Erlang R16B03-1 (erts-5.10.4) [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false] [dtrace] 16 | 17 | Eshell V5.10.4 (abort with ^G) 18 | 1> kinetic:list_streams([]). 19 | {ok, [{<<"HasMoreStreams">>,false},{<<"StreamNames">>,[]}]} 20 | 21 | `development.config` allows the developer to override some configuration 22 | values to allow for different setups. 23 | -------------------------------------------------------------------------------- /development.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | [{kinetic, [{args, []}]}]. 3 | -------------------------------------------------------------------------------- /elvis.config: -------------------------------------------------------------------------------- 1 | [{elvis, 2 | [{config, 3 | [#{dirs => ["src"], 4 | filter => "*.erl", 5 | ruleset => erl_files, 6 | rules => 7 | [{elvis_style, nesting_level, #{level => 4}}, 8 | {elvis_style, state_record_and_type, disable}]}, 9 | #{dirs => ["."], 10 | filter => "rebar.config", 11 | ruleset => rebar_config}, 12 | #{dirs => ["."], 13 | filter => "elvis.config", 14 | ruleset => elvis_config}]}]}]. 15 | -------------------------------------------------------------------------------- /include/kinetic.hrl: -------------------------------------------------------------------------------- 1 | -define(KINETIC_DATA, kinetic_data). 2 | -define(KINETIC_STREAM, kinetic_stream). 3 | -define(KINESIS_MAX_PUT_SIZE, 51200). 4 | 5 | -record(kinetic_arguments, 6 | {region :: undefined | string(), 7 | date :: undefined | string(), 8 | host :: undefined | string(), 9 | url :: undefined | string(), 10 | lhttpc_opts = [] :: [any()], 11 | timeout :: undefined | pos_integer(), 12 | aws_credentials}). 13 | -------------------------------------------------------------------------------- /kinesis.dict: -------------------------------------------------------------------------------- 1 | __type 2 | createstream 3 | data 4 | deletestream 5 | describestream 6 | errorcode 7 | errormessage 8 | getrecords 9 | getsharditerator 10 | kinesis_20131202 11 | liststreams 12 | mergeshards 13 | partitionkey 14 | provisionedthroughputexceededexception 15 | putrecord 16 | putrecords 17 | sequencenumber 18 | splitshard 19 | streamname 20 | -------------------------------------------------------------------------------- /nextroll.dict: -------------------------------------------------------------------------------- 1 | + 2 | - 3 | api 4 | args 5 | bsd-3-clause 6 | config 7 | erlang 8 | erliam 9 | gen_server 10 | new_args 11 | todo 12 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, 2 | [inline_list_funcs, 3 | warn_unused_import, 4 | warn_export_vars, 5 | warnings_as_errors, 6 | verbose, 7 | report, 8 | debug_info]}. 9 | 10 | {minimum_otp_vsn, "25"}. 11 | 12 | {cover_enabled, true}. 13 | 14 | {cover_opts, [verbose]}. 15 | 16 | {cover_print_enabled, true}. 17 | 18 | {deps, 19 | [{lhttpc, "1.4.0", {pkg, nextroll_lhttpc}}, 20 | {jiffy, "1.1.1"}, 21 | {erliam, "1.0.1"}, 22 | {b64fast, "0.2.3"}]}. 23 | 24 | {xref_checks, 25 | [undefined_function_calls, 26 | locals_not_used, 27 | deprecated_function_calls, 28 | deprecated_functions]}. 29 | 30 | {dialyzer, 31 | [{warnings, [unknown, no_return, error_handling, missing_return, extra_return]}, 32 | {plt_apps, top_level_deps}, 33 | {plt_extra_apps, [b64fast]}, 34 | {plt_location, local}, 35 | {base_plt_apps, [erts, stdlib, kernel]}, 36 | {base_plt_location, global}]}. 37 | 38 | {profiles, [{test, [{deps, [{meck, "0.9.2"}]}]}]}. 39 | 40 | {spellcheck, 41 | [{ignore_regex, 42 | "(eunit|~>|<-|//|=|[|]|[.]hrl|\\d[.]\\d|<<[\"]|[a-z][a-z][-][a-z]|[?][A-Z])"}, 43 | {files, ["src/*"]}, 44 | {additional_dictionaries, ["nextroll.dict", "kinesis.dict"]}]}. 45 | 46 | {alias, [{test, [format, spellcheck, lint, hank, xref, dialyzer, eunit, cover, edoc]}]}. 47 | 48 | {project_plugins, 49 | [{rebar3_hex, "~> 7.0.7"}, 50 | {rebar3_format, "~> 1.3.0"}, 51 | {rebar3_lint, "~> 3.2.3"}, 52 | {rebar3_hank, "~> 1.4.0"}, 53 | {rebar3_sheldon, "~> 0.4.3"}]}. 54 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.2.0", 2 | [{<<"b64fast">>,{pkg,<<"b64fast">>,<<"0.2.3">>},0}, 3 | {<<"erliam">>,{pkg,<<"erliam">>,<<"1.0.1">>},0}, 4 | {<<"jiffy">>,{pkg,<<"jiffy">>,<<"1.1.1">>},0}, 5 | {<<"lhttpc">>,{pkg,<<"nextroll_lhttpc">>,<<"1.4.0">>},0}]}. 6 | [ 7 | {pkg_hash,[ 8 | {<<"b64fast">>, <<"07649CF971A0ED088DEFC4F75767A52E08C468CC1D448693F4FB3051092AB987">>}, 9 | {<<"erliam">>, <<"20E1ECB876AFDEEC2DE07483E2D174B1E3DB38848ED981145DAB9A889E7B55F9">>}, 10 | {<<"jiffy">>, <<"ACA10F47AA91697BF24AB9582C74E00E8E95474C7EF9F76D4F1A338D0F5DE21B">>}, 11 | {<<"lhttpc">>, <<"45282FF22BC55E6AE751CF87AC42C261DC4FAAFADD9C034E127ECED74E672FAB">>}]}, 12 | {pkg_hash_ext,[ 13 | {<<"b64fast">>, <<"DE874B5302D607840B787E93785628035FF55334EBCFAFB6413B735983913D23">>}, 14 | {<<"erliam">>, <<"2EE375544AC36711BEEB5EC56DB060488447CECC308763BC8B4A4FEE894AAF76">>}, 15 | {<<"jiffy">>, <<"62E1F0581C3C19C33A725C781DFA88410D8BFF1BBAFC3885A2552286B4785C4C">>}, 16 | {<<"lhttpc">>, <<"57BA3D5720FBD17C75D8563169394B5F6CD160161D64A8A9F96F7E829221C648">>}]} 17 | ]. 18 | -------------------------------------------------------------------------------- /src/kinetic.app.src: -------------------------------------------------------------------------------- 1 | {application, 2 | kinetic, 3 | [{description, "Erlang Kinesis Client"}, 4 | {vsn, git}, 5 | {modules, []}, 6 | {registered, [kinetic_config]}, 7 | {applications, [kernel, stdlib, inets, crypto, ssl, jiffy, lhttpc, erliam]}, 8 | {env, [{args, []}]}, 9 | {mod, {kinetic, []}}, 10 | {licenses, ["BSD-3-Clause"]}]}. 11 | -------------------------------------------------------------------------------- /src/kinetic.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic). 2 | 3 | %% @todo Remove once https://github.com/erlang/otp/issues/6779 is fixed 4 | -dialyzer([{no_missing_return, [start/2]}]). 5 | 6 | -behaviour(application). 7 | 8 | -export([start/0, stop/0]). 9 | -export([start/2, stop/1]). 10 | -export([start/1]). 11 | -export([create_stream/1, create_stream/2]). 12 | -export([list_streams/1, list_streams/2]). 13 | -export([delete_stream/1, delete_stream/2]). 14 | -export([describe_stream/1, describe_stream/2]). 15 | -export([get_records/1, get_records/2]). 16 | -export([get_shard_iterator/1, get_shard_iterator/2]). 17 | -export([merge_shards/1, merge_shards/2]). 18 | -export([put_record/1, put_record/2]). 19 | -export([put_records/1, put_records/2]). 20 | -export([split_shard/1, split_shard/2]). 21 | 22 | -include("kinetic.hrl"). 23 | 24 | % application behaviour 25 | 26 | -spec start() -> ok | {error, any()}. 27 | start() -> 28 | application:start(kinetic). 29 | 30 | -spec stop() -> ok | {error, any()}. 31 | stop() -> 32 | application:stop(kinetic). 33 | 34 | start(Opts) when is_list(Opts) -> 35 | kinetic_sup:start_link(Opts). 36 | 37 | -spec start(normal | {takeover, node()} | {failover, node()}, any()) -> {ok, pid()}. 38 | start(_, Opts) -> 39 | kinetic_sup:start_link(Opts). 40 | 41 | -spec stop(any()) -> ok. 42 | stop(_) -> 43 | ok. 44 | 45 | % Public API 46 | 47 | %% 48 | %% Payload = [{<<"ShardCount">>, integer()}, <- required 49 | %% {<<"StreamName">>, binary()}] <- required 50 | %% 51 | %% Response = {ok, []} 52 | create_stream(Payload) -> 53 | create_stream(Payload, []). 54 | 55 | create_stream(Payload, Opts) when is_list(Opts) -> 56 | execute("CreateStream", Payload, Opts); 57 | create_stream(Payload, Timeout) -> 58 | create_stream(Payload, [{timeout, Timeout}]). 59 | 60 | %% 61 | %% Payload = [{<<"StreamName">>, binary()}] <- required 62 | %% 63 | %% Response = {ok, []} 64 | delete_stream(Payload) -> 65 | delete_stream(Payload, []). 66 | 67 | delete_stream(Payload, Opts) when is_list(Opts) -> 68 | execute("DeleteStream", Payload, Opts); 69 | delete_stream(Payload, Timeout) -> 70 | delete_stream(Payload, [{timeout, Timeout}]). 71 | 72 | %% 73 | %% Payload = [{<<"StreamName">>, binary()}, <- required 74 | %% {<<"Limit">>, integer()}, <- optional 75 | %% {<<"ExclusiveStartShardId">>, binary()}] <- optional 76 | %% 77 | %% Response = {ok, [{<<"StreamDescription">>, 78 | %% {[{<<"HasMoreShards">>,false}, 79 | %% {<<"StreamStatus">>,<<"ACTIVE">>}, 80 | %% {<<"StreamName">>,<<"exampleStreamName">>}, 81 | %% {<<"StreamARN">>, 82 | %% <<"arn:aws:kinesis:us-east-1:052958737983:exampleStreamName">>}, 83 | %% {<<"Shards">>, 84 | %% [{[{<<"HashKeyRange">>, 85 | %% {[{<<"EndingHashKey">>, 86 | %% <<"113427455640312821154458202477256070484">>}, 87 | %% {<<"StartingHashKey">>,<<"0">>}]}}, 88 | %% {<<"ShardId">>,<<"shardId-000000000000">>}, 89 | %% {<<"SequenceNumberRange">>, 90 | %% {[{<<"EndingSequenceNumber">>, 91 | %% <<"21269319989741826081360214168359141376">>}, 92 | %% {<<"StartingSequenceNumber">>, 93 | %% <<"21267647932558653966460912964485513216">>}]}}]}, 94 | %% {[{<<"HashKeyRange">>, 95 | %% {[{<<"EndingHashKey">>, 96 | %% <<"226854911280625642308916404954512140969">>}, 97 | %% {<<"StartingHashKey">>, 98 | %% <<"113427455640312821154458202477256070485">>}]}}, 99 | %% {<<"ShardId">>,<<"shardId-000000000001">>}, 100 | %% {<<"SequenceNumberRange">>, 101 | %% {[{<<"StartingSequenceNumber">>, 102 | %% <<"21267647932558653966460912964485513217">>}]}}]}, 103 | %% {[{<<"HashKeyRange">>, 104 | %% {[{<<"EndingHashKey">>, 105 | %% <<"340282366920938463463374607431768211455">>}, 106 | %% {<<"StartingHashKey">>, 107 | %% <<"226854911280625642308916404954512140970">>}]}}, 108 | %% {<<"ShardId">>,<<"shardId-000000000002">>}, 109 | %% {<<"SequenceNumberRange">>, 110 | %% {[{<<"StartingSequenceNumber">>, 111 | %% <<"21267647932558653966460912964485513218">>}]}}]}]}]}}] 112 | describe_stream(Payload) -> 113 | describe_stream(Payload, []). 114 | 115 | describe_stream(Payload, Opts) when is_list(Opts) -> 116 | execute("DescribeStream", Payload, Opts); 117 | describe_stream(Payload, Timeout) -> 118 | describe_stream(Payload, [{timeout, Timeout}]). 119 | 120 | %% 121 | %% Payload = [{<<"Limit">>, integer()}, <- optional 122 | %% {<<"ShardIterator">>, binary()}] <- required 123 | %% 124 | %% Response = {ok, [ 125 | %% {<<"NextShardIterator">>, <<"...a long base64 binary...">>}, 126 | %% {<<"Records">>, [{<<"Data">>, <<"XzxkYXRhPl8w">>}, 127 | %% {<<"PartitionKey">>, <<"partitionKey">>}, 128 | %% {<<"SequenceNumber">>: <<"21269319989652663814458848515492872193">>}]}]} 129 | get_records(Payload) -> 130 | get_records(Payload, []). 131 | 132 | get_records(Payload, Opts) when is_list(Opts) -> 133 | execute("GetRecords", Payload, Opts); 134 | get_records(Payload, Timeout) -> 135 | get_records(Payload, [{timeout, Timeout}]). 136 | 137 | %% 138 | %% Payload = [{<<"StreamName">>, binary()}, <- required 139 | %% {<<"ShardId">>, binary()}, <- required 140 | %% {<<"ShardIteratorType">>, <<"AT_SEQUENCE_NUMBER | 141 | %% AFTER_SEQUENCE_NUMBER | 142 | %% TRIM_HORIZON | 143 | %% LATEST">>}, <- required 144 | %% {<<"StartingSequenceNumber">>, binary()}] <- optional 145 | %% 146 | %% Response = {ok, [{<<"ShardIterator">>, <<"...a long base64 binary...">>}]} 147 | get_shard_iterator(Payload) -> 148 | get_shard_iterator(Payload, []). 149 | 150 | get_shard_iterator(Payload, Opts) when is_list(Opts) -> 151 | execute("GetShardIterator", Payload, Opts); 152 | get_shard_iterator(Payload, Timeout) -> 153 | get_shard_iterator(Payload, [{timeout, Timeout}]). 154 | 155 | %% 156 | %% Payload = [{<<"ExclusiveStartStreamName">>, binary()}, <- optional 157 | %% {<<"Limit">>, integer()}] <- optional 158 | %% 159 | %% Response = {ok, [{<<"HasMoreStreams">>, false}, 160 | %% {<<"StreamNames">>, [<<"exampleStreamName">>]}]} 161 | list_streams(Payload) -> 162 | list_streams(Payload, []). 163 | 164 | list_streams(Payload, Opts) when is_list(Opts) -> 165 | execute("ListStreams", Payload, Opts); 166 | list_streams(Payload, Timeout) -> 167 | list_streams(Payload, [{timeout, Timeout}]). 168 | 169 | %% 170 | %% Payload = [{<<"StreamName">>, binary()}, <- required 171 | %% {<<"ShardToMerge">>, binary()}, <- required 172 | %% {<<"AdjacentShardToMerge">>, binary()}] <- required 173 | %% 174 | %% Response = {ok, []} 175 | merge_shards(Payload) -> 176 | merge_shards(Payload, []). 177 | 178 | merge_shards(Payload, Opts) when is_list(Opts) -> 179 | execute("MergeShards", Payload, Opts); 180 | merge_shards(Payload, Timeout) -> 181 | merge_shards(Payload, [{timeout, Timeout}]). 182 | 183 | %% 184 | %% Payload = [{<<"Data">>, base64_binary()}, <- required 185 | %% {<<"ExplicitHashKey">>, binary()}, <- optional 186 | %% {<<"PartitionKey">>, binary()}, <- required 187 | %% {<<"SequenceNumberForOrdering">>, binary()}, <- optional 188 | %% {<<"StreamName">>, binary()}] <- required 189 | %% 190 | %% Response = {ok, [{<<"SequenceNumber">>, <<"21269319989653637946712965403778482177">>}, 191 | %% {<<"ShardId">>, <<"shardId-000000000001">>}]} 192 | put_record(Payload) -> 193 | put_record(Payload, []). 194 | 195 | put_record(Payload, Opts) when is_list(Opts) -> 196 | execute("PutRecord", Payload, Opts); 197 | put_record(Payload, Timeout) -> 198 | put_record(Payload, [{timeout, Timeout}]). 199 | 200 | %% 201 | %% Payload = [{<<"Records">>, [ 202 | %% {<<"Data">>, base64_binary()}, <- required 203 | %% {<<"PartitionKey">>, binary()} <- required 204 | %% ]} 205 | %% {<<"StreamName">>, binary()}] <- required 206 | %% 207 | %% Response = {ok, [{<<"FailedRecordCount">>, binary()}, 208 | %% {<<"Records">>, [ 209 | %% {[ 210 | %% {<<"ErrorCode">>, binary()}, 211 | %% {<<"ErrorMessage">>, binary()}, 212 | %% {<<"SequenceNumber">>, binary()}, 213 | %% {<<"ShardId">>, binary()} 214 | %% ]}]}]} 215 | %% 216 | %% Returns {error, Reason} | {ok, [ok | {error, Reason}]}. 217 | %% In the latter case, the list contains a result for each input record, in order. 218 | put_records(Payload) -> 219 | put_records(Payload, []). 220 | 221 | put_records(Payload, Opts) when is_list(Opts) -> 222 | case execute("PutRecords", Payload, Opts) of 223 | {error, E} -> 224 | {error, E}; 225 | {ok, Response} -> 226 | {<<"Records">>, Records} = lists:keyfind(<<"Records">>, 1, Response), 227 | {ok, [record_status(R) || {R} <- Records]} 228 | end; 229 | put_records(Payload, Timeout) -> 230 | put_records(Payload, [{timeout, Timeout}]). 231 | 232 | %% 233 | %% Payload = [{<<"StreamName">>, binary()}, <- required 234 | %% {<<"ShardToSplit">>, binary()}, <- required 235 | %% {<<"NewStartingHashKey">>, binary()}] <- required 236 | %% 237 | %% Response = {ok, []} 238 | split_shard(Payload) -> 239 | split_shard(Payload, []). 240 | 241 | split_shard(Payload, Opts) when is_list(Opts) -> 242 | execute("SplitShard", Payload, Opts); 243 | split_shard(Payload, Timeout) -> 244 | split_shard(Payload, [{timeout, Timeout}]). 245 | 246 | %% Internal 247 | execute(Operation, Payload, Opts) -> 248 | case kinetic_config:get_args() of 249 | {error, E} -> 250 | {error, E}; 251 | {ok, Args} -> 252 | #kinetic_arguments{aws_credentials = AwsCreds, 253 | region = Region, 254 | date = Date, 255 | url = Url, 256 | host = Host, 257 | lhttpc_opts = LHttpcOpts, 258 | timeout = Timeout} = 259 | kinetic_config:merge_args(Args, Opts), 260 | case kinetic_utils:encode({Payload}) of 261 | {error, E} -> 262 | {error, E}; 263 | Body -> 264 | Target = ["Kinesis_20131202.", Operation], 265 | 266 | SignedHeaders = 267 | #{"content-type" => "application/x-amz-json-1.1", 268 | "connection" => "keep-alive"}, 269 | Headers = 270 | awsv4:headers(AwsCreds, 271 | #{service => "kinesis", 272 | target_api => Target, 273 | method => "POST", 274 | region => Region, 275 | host => Host, 276 | signed_headers => SignedHeaders, 277 | aws_date => Date}, 278 | iolist_to_binary(Body)), 279 | 280 | case lhttpc:request(Url, post, Headers, Body, Timeout, LHttpcOpts) of 281 | {ok, {{200, _}, _ResponseHeaders, ResponseBody}} -> 282 | {ok, kinetic_utils:decode(ResponseBody)}; 283 | {ok, {{Code, _}, ResponseHeaders, ResponseBody}} -> 284 | {error, {Code, ResponseHeaders, ResponseBody}}; 285 | {error, E} -> 286 | {error, E} 287 | end 288 | end 289 | end. 290 | 291 | record_status(Record) -> 292 | case lists:keymember(<<"SequenceNumber">>, 1, Record) of 293 | true -> 294 | ok; 295 | false -> 296 | {error, {get_value(<<"ErrorCode">>, Record), get_value(<<"ErrorMessage">>, Record)}} 297 | end. 298 | 299 | get_value(Key, TupleList) -> 300 | {Key, Value} = lists:keyfind(Key, 1, TupleList), 301 | Value. 302 | -------------------------------------------------------------------------------- /src/kinetic_config.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_config). 2 | 3 | -behaviour(gen_server). 4 | 5 | -export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3, 6 | handle_info/2]). 7 | -export([start_link/1, update_data/1, stop/0, g/1, get_args/0, merge_args/2]). 8 | 9 | -include("kinetic.hrl"). 10 | 11 | -record(kinetic_config, {tref}). 12 | 13 | start_link(Opts) -> 14 | gen_server:start_link({local, ?MODULE}, ?MODULE, [Opts], []). 15 | 16 | stop() -> 17 | gen_server:call(?MODULE, stop). 18 | 19 | g(Name) -> 20 | case application:get_env(kinetic, Name) of 21 | {ok, Value} -> 22 | Value; 23 | _ -> 24 | undefined 25 | end. 26 | 27 | get_args() -> 28 | try ets:lookup_element(?KINETIC_DATA, args, 2) of 29 | V -> 30 | {ok, V} 31 | catch 32 | error:badarg -> 33 | {error, missing_args} 34 | end. 35 | 36 | update_data(Opts) -> 37 | Arguments = 38 | case get_args() of 39 | {error, missing_args} -> 40 | new_args(Opts); 41 | {ok, Result} -> 42 | Result#kinetic_arguments{aws_credentials = erliam:credentials(), 43 | date = awsv4:isonow()} 44 | end, 45 | ets:insert(?KINETIC_DATA, {args, Arguments}), 46 | {ok, Arguments}. 47 | 48 | % gen_server behavior 49 | 50 | init([Opts]) -> 51 | process_flag(trap_exit, true), 52 | EtsOpts = [named_table, set, public, {read_concurrency, true}], 53 | ets:new(?KINETIC_DATA, EtsOpts), 54 | ets:new(?KINETIC_STREAM, EtsOpts), 55 | {ok, _ClientArgs} = update_data(Opts), 56 | case timer:apply_interval(1000, ?MODULE, update_data, [Opts]) of 57 | {ok, TRef} -> 58 | {ok, #kinetic_config{tref = TRef}}; 59 | Error -> 60 | {stop, Error} 61 | end. 62 | 63 | handle_call(stop, _From, State) -> 64 | {stop, normal, ok, State}. 65 | 66 | handle_cast(_Arg, State) -> 67 | {noreply, State}. 68 | 69 | terminate(_Reason, #kinetic_config{tref = TRef}) -> 70 | {ok, cancel} = timer:cancel(TRef), 71 | true = ets:delete(?KINETIC_DATA), 72 | true = ets:delete(?KINETIC_STREAM), 73 | ok. 74 | 75 | code_change(_OldVsn, State, _Extra) -> 76 | State. 77 | 78 | handle_info({'EXIT', _From, normal}, State) -> 79 | {noreply, State}; 80 | handle_info({'EXIT', From, Reason}, State) -> 81 | error_logger:info_msg("~p: ~p exited due to: ~p~n", [?MODULE, From, Reason]), 82 | {noreply, State}; 83 | handle_info(_Info, State) -> 84 | {noreply, State}. 85 | 86 | % Internal implementation 87 | 88 | region("us-east-1" ++ _R) -> 89 | "us-east-1"; 90 | region("us-west-1" ++ _R) -> 91 | "us-west-1"; 92 | region("us-west-2" ++ _R) -> 93 | "us-west-2"; 94 | region("ap-northeast-1" ++ _R) -> 95 | "ap-northeast-1"; 96 | region("ap-southeast-1" ++ _R) -> 97 | "ap-southeast-1"; 98 | region("eu-west-1" ++ _R) -> 99 | "eu-west-1". 100 | 101 | new_args(Opts) -> 102 | Region = 103 | case proplists:get_value(region, Opts, undefined) of 104 | undefined -> 105 | {ok, Zone} = imds:zone(), 106 | region(Zone); 107 | R -> 108 | R 109 | end, 110 | 111 | LHttpcOpts = proplists:get_value(lhttpc_opts, Opts, []), 112 | DefaultTimeout = proplists:get_value(timeout, Opts, 5000), 113 | Host = kinetic_utils:endpoint("kinesis", Region), 114 | Url = "https://" ++ Host, 115 | 116 | %% erliam should support named profiles for using specific roles or preconfigured 117 | %% long-term credentials to mint session tokens, but for now set keys in erliam app 118 | %% env if set in kinetic app env; these will be used to create session tokens: 119 | case {proplists:get_value(aws_access_key_id, Opts), 120 | proplists:get_value(aws_secret_access_key, Opts)} 121 | of 122 | {undefined, _} -> 123 | ok; 124 | {_, undefined} -> 125 | ok; 126 | {ConfiguredAccessKeyId, ConfiguredSecretAccessKey} -> 127 | ok = application:set_env(erliam, aws_access_key, ConfiguredAccessKeyId), 128 | ok = application:set_env(erliam, aws_secret_key, ConfiguredSecretAccessKey), 129 | ok = erliam:invalidate() 130 | end, 131 | 132 | #kinetic_arguments{region = Region, 133 | date = awsv4:isonow(), 134 | host = Host, 135 | url = Url, 136 | lhttpc_opts = LHttpcOpts, 137 | timeout = DefaultTimeout, 138 | aws_credentials = erliam:credentials()}. 139 | 140 | %% @todo: 141 | %% - rewrite new_args to use this 142 | %% - handle additional args 143 | merge_args(Args, []) -> 144 | Args; 145 | merge_args(Args, [{region, Region} | Rest]) -> 146 | Host = kinetic_utils:endpoint("kinesis", Region), 147 | Url = "https://" ++ Host, 148 | merge_args(Args#kinetic_arguments{region = Region, 149 | host = Host, 150 | url = Url}, 151 | Rest); 152 | merge_args(Args, [{timeout, Timeout} | Rest]) -> 153 | merge_args(Args#kinetic_arguments{timeout = Timeout}, Rest). 154 | -------------------------------------------------------------------------------- /src/kinetic_stream.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_stream). 2 | 3 | -behaviour(gen_server). 4 | 5 | -export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3, 6 | handle_info/2]). 7 | -export([stop/2, start_link/2, put_record/3]). 8 | -export([flush/2]). 9 | 10 | % @todo Stop exporting these functions just for tests 11 | -ifdef(TEST). 12 | 13 | -export([get_stream/2, send_to_kinesis/5]). 14 | 15 | -endif. 16 | 17 | -include("kinetic.hrl"). 18 | 19 | -record(kinetic_stream, 20 | {stream_name :: binary(), 21 | base_partition_name :: binary(), 22 | partitions_number = 1000 :: pos_integer(), 23 | timeout = 5000 :: pos_integer(), 24 | buffer = <<"">> :: binary(), 25 | buffer_size = 0 :: non_neg_integer(), 26 | current_partition_num = 0 :: non_neg_integer(), 27 | flush_interval = 1000 :: pos_integer(), 28 | flush_tref :: undefined | term(), 29 | retries = 3 :: pos_integer()}). 30 | 31 | start_link(StreamName, Config) -> 32 | gen_server:start_link(?MODULE, [StreamName, Config], []). 33 | 34 | stop(StreamName, Config) -> 35 | Stream = get_stream(StreamName, Config), 36 | gen_server:call(Stream, stop). 37 | 38 | put_record(StreamName, Config, Data) -> 39 | DataSize = erlang:size(Data), 40 | case DataSize > ?KINESIS_MAX_PUT_SIZE of 41 | true -> 42 | {error, max_size_exceeded}; 43 | false -> 44 | Stream = get_stream(StreamName, Config), 45 | gen_server:call(Stream, {put_record, Data, DataSize}, infinity) 46 | end. 47 | 48 | flush(StreamName, Config) -> 49 | Stream = get_stream(StreamName, Config), 50 | Stream ! flush. 51 | 52 | % gen_server behavior 53 | init([StreamName, {BasePartitionName}]) -> 54 | init([StreamName, {BasePartitionName, 1000}]); 55 | init([StreamName, {BasePartitionName, PartitionsNumber}]) -> 56 | init([StreamName, {BasePartitionName, PartitionsNumber, 3}]); 57 | init([StreamName, {BasePartitionName, PartitionsNumber, Retries}]) -> 58 | init([StreamName, {BasePartitionName, PartitionsNumber, Retries, 5000}]); 59 | init([StreamName, {BasePartitionName, PartitionsNumber, Retries, Timeout}]) -> 60 | init([StreamName, {BasePartitionName, PartitionsNumber, Retries, Timeout, 1000}]); 61 | init([StreamName, 62 | {BasePartitionName, PartitionsNumber, Retries, Timeout, FlushInterval}]) -> 63 | process_flag(trap_exit, true), 64 | case ets:insert_new(?KINETIC_STREAM, {StreamName, self()}) of 65 | true -> 66 | {ok, TRef} = timer:send_after(FlushInterval, self(), flush), 67 | {ok, 68 | #kinetic_stream{stream_name = StreamName, 69 | base_partition_name = BasePartitionName, 70 | partitions_number = PartitionsNumber, 71 | timeout = Timeout, 72 | buffer = <<"">>, 73 | buffer_size = 0, 74 | current_partition_num = 0, 75 | flush_interval = FlushInterval, 76 | flush_tref = TRef, 77 | retries = Retries}}; 78 | false -> 79 | ignore 80 | end. 81 | 82 | % buffer + Data is bigger than (or equal to) ?KINESIS_MAX_PUT_SIZE 83 | % buffer + Data is not bigger than ?KINESIS_MAX_PUT_SIZE 84 | handle_call({put_record, Data, DataSize}, 85 | _From, 86 | #kinetic_stream{buffer_size = BSize} = State) 87 | when BSize + DataSize > ?KINESIS_MAX_PUT_SIZE -> 88 | NewState = internal_flush(State), 89 | {reply, ok, reset_timer(NewState#kinetic_stream{buffer_size = DataSize, buffer = Data})}; 90 | handle_call({put_record, Data, DataSize}, 91 | _From, 92 | #kinetic_stream{buffer = Buffer, buffer_size = BSize} = State) -> 93 | {reply, 94 | ok, 95 | reset_timer(State#kinetic_stream{buffer = <>, 96 | buffer_size = BSize + DataSize})}; 97 | handle_call(stop, _From, State) -> 98 | {stop, normal, ok, State}. 99 | 100 | handle_cast(_Arg, State) -> 101 | {noreply, State}. 102 | 103 | terminate(_Reason, #kinetic_stream{stream_name = StreamName, flush_tref = TRef}) -> 104 | ets:delete(?MODULE, StreamName), 105 | timer:cancel(TRef), 106 | ok. 107 | 108 | code_change(_OldVsn, State, _Extra) -> 109 | State. 110 | 111 | handle_info(flush, State) -> 112 | NewState = internal_flush(State), 113 | {noreply, reset_timer(NewState)}; 114 | handle_info({'EXIT', _From, normal}, State) -> 115 | {noreply, State}; 116 | handle_info({'EXIT', From, Reason}, State) -> 117 | error_logger:info_msg("~p: ~p exited due to: ~p~n", [?MODULE, From, Reason]), 118 | {noreply, State}; 119 | handle_info(_Info, State) -> 120 | {noreply, State}. 121 | 122 | % Internal implementation 123 | get_stream(StreamName, Config) -> 124 | case ets:lookup(?KINETIC_STREAM, StreamName) of 125 | [] -> 126 | case supervisor:start_child(kinetic_stream_sup, [StreamName, Config]) of 127 | {ok, undefined} -> 128 | get_stream(StreamName, Config); 129 | {ok, Pid} -> 130 | Pid 131 | end; 132 | [{_Name, Pid}] -> 133 | case is_process_alive(Pid) of 134 | true -> 135 | Pid; 136 | false -> 137 | ets:delete(?KINETIC_STREAM, StreamName), 138 | get_stream(StreamName, Config) 139 | end 140 | end. 141 | 142 | internal_flush(#kinetic_stream{buffer = <<"">>} = State) -> 143 | State; 144 | internal_flush(#kinetic_stream{stream_name = StreamName, 145 | buffer = Buffer, 146 | timeout = Timeout, 147 | retries = Retries} = 148 | State) -> 149 | PartitionKey = partition_key(State), 150 | spawn_link(fun() -> 151 | send_to_kinesis(StreamName, Buffer, PartitionKey, Timeout, Retries + 1) 152 | end), 153 | increment_partition_num(State#kinetic_stream{buffer = <<"">>, buffer_size = 0}). 154 | 155 | increment_partition_num(#kinetic_stream{current_partition_num = Number, 156 | partitions_number = Number} = 157 | State) -> 158 | State#kinetic_stream{current_partition_num = 0}; 159 | increment_partition_num(#kinetic_stream{current_partition_num = Number} = State) -> 160 | State#kinetic_stream{current_partition_num = Number + 1}. 161 | 162 | partition_key(#kinetic_stream{current_partition_num = Number, 163 | base_partition_name = BasePartitionName}) -> 164 | BinNumber = integer_to_binary(Number), 165 | <>. 166 | 167 | reset_timer(#kinetic_stream{flush_interval = FlushInterval, flush_tref = TRef} = State) -> 168 | timer:cancel(TRef), 169 | {ok, NewTRef} = timer:send_after(FlushInterval, self(), flush), 170 | State#kinetic_stream{flush_tref = NewTRef}. 171 | 172 | send_to_kinesis(StreamName, Buffer, PartitionKey, Timeout, 0) -> 173 | erlang:error(max_retries_reached, [StreamName, PartitionKey, Timeout, Buffer]); 174 | send_to_kinesis(StreamName, Buffer, PartitionKey, Timeout, Retries) -> 175 | case kinetic:put_record([{<<"Data">>, b64fast:encode64(Buffer)}, 176 | {<<"PartitionKey">>, PartitionKey}, 177 | {<<"StreamName">>, StreamName}], 178 | Timeout) 179 | of 180 | {ok, _} -> 181 | {ok, done}; 182 | {error, {Code, Headers, RawBody}} -> 183 | Body = kinetic_utils:decode(RawBody), 184 | case proplists:get_value(<<"__type">>, Body) of 185 | <<"ProvisionedThroughputExceededException">> -> 186 | timer:sleep(1000), % not really exponential 187 | send_to_kinesis(StreamName, Buffer, PartitionKey, Timeout, Retries - 1); 188 | _ -> 189 | error_logger:info_msg("Request failed: Code: ~p~n~n~p~n~p~n", 190 | [Code, Headers, RawBody]), 191 | {error, {Code, Headers, Body}} 192 | end 193 | end. 194 | -------------------------------------------------------------------------------- /src/kinetic_stream_sup.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_stream_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | -export([start_link/0]). 6 | -export([init/1]). 7 | 8 | start_link() -> 9 | supervisor:start_link({local, ?MODULE}, ?MODULE, [[]]). 10 | 11 | init(_) -> 12 | KineticStream = 13 | {kinetic_stream, 14 | {kinetic_stream, start_link, []}, 15 | transient, 16 | 10000, 17 | worker, 18 | [kinetic_stream]}, 19 | 20 | {ok, {{simple_one_for_one, 10, 1}, [KineticStream]}}. 21 | -------------------------------------------------------------------------------- /src/kinetic_sup.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | -export([start_link/0, start_link/1]). 6 | -export([init/1, stop/1]). 7 | 8 | start_link() -> 9 | start_link([]). 10 | 11 | % Need the slightly stupid double code here to avoid an infinite loop 12 | % in case args is actually [] in the configuration. 13 | start_link([]) -> 14 | Args = kinetic_config:g(args), 15 | supervisor:start_link({local, ?MODULE}, ?MODULE, Args); 16 | start_link(Args) -> 17 | supervisor:start_link({local, ?MODULE}, ?MODULE, Args). 18 | 19 | init(Opts) -> 20 | KineticConfig = 21 | {kinetic_config, 22 | {kinetic_config, start_link, [Opts]}, 23 | permanent, 24 | 10000, 25 | worker, 26 | [kinetic_config]}, 27 | 28 | KineticStreamSup = 29 | {kinetic_stream_sup, 30 | {kinetic_stream_sup, start_link, []}, 31 | permanent, 32 | 10000, 33 | supervisor, 34 | dynamic}, 35 | 36 | {ok, {{one_for_one, 10, 1}, [KineticConfig, KineticStreamSup]}}. 37 | 38 | -spec stop(pid()) -> ok. 39 | stop(Pid) -> 40 | MRef = erlang:monitor(process, Pid), 41 | exit(Pid, shutdown), 42 | receive 43 | {'DOWN', MRef, process, _, _} -> 44 | ok 45 | end. 46 | -------------------------------------------------------------------------------- /src/kinetic_utils.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_utils). 2 | 3 | -export([endpoint/2, decode/1, encode/1]). 4 | 5 | endpoint("kinesis", "us-east-1") -> 6 | "kinesis.us-east-1.amazonaws.com"; 7 | endpoint("kinesis", "us-west-1") -> 8 | "kinesis.us-west-1.amazonaws.com"; 9 | endpoint("kinesis", "us-west-2") -> 10 | "kinesis.us-west-2.amazonaws.com"; 11 | endpoint("kinesis", "eu-west-1") -> 12 | "kinesis.eu-west-1.amazonaws.com"; 13 | endpoint("kinesis", "ap-northeast-1") -> 14 | "kinesis.ap-northeast-1.amazonaws.com"; 15 | endpoint("kinesis", "ap-southeast-1") -> 16 | "kinesis.ap-southeast-1.amazonaws.com". 17 | 18 | decode(<<"">>) -> 19 | []; 20 | decode(Body) -> 21 | try jiffy:decode(Body) of 22 | {Decoded} -> % enforces the dictionary 23 | Decoded; 24 | _ -> 25 | {error, not_a_dict} 26 | catch 27 | _:E -> 28 | {error, E} 29 | end. 30 | 31 | encode(Body) -> 32 | try 33 | jiffy:encode(Body) 34 | catch 35 | _:E -> 36 | {error, E} 37 | end. 38 | -------------------------------------------------------------------------------- /test/kinetic_config_tests.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_config_tests). 2 | 3 | -include("kinetic.hrl"). 4 | 5 | -include_lib("eunit/include/eunit.hrl"). 6 | 7 | -hank([{unnecessary_function_arguments, [{test_teardown, 1, 1}]}]). 8 | 9 | test_setup() -> 10 | meck:new(erliam, [passthrough]), 11 | meck:expect(erliam, invalidate, 0, ok), 12 | meck:expect(erliam, credentials, 0, fake_creds), 13 | meck:new(imds, [passthrough]), 14 | meck:expect(imds, zone, 0, {ok, "us-east-1b"}), 15 | meck:new(timer, [unstick, passthrough]), 16 | meck:expect(timer, 17 | apply_interval, 18 | fun(Interval, M, F, [Opts]) -> 19 | case proplists:get_value(should_err, Opts) of 20 | true -> 21 | {error, broken}; 22 | _ -> 23 | meck:passthrough([Interval, M, F, [Opts]]) 24 | end 25 | end). 26 | 27 | test_teardown(_) -> 28 | meck:unload(timer), 29 | meck:unload(imds), 30 | meck:unload(erliam). 31 | 32 | kinetic_config_test_() -> 33 | {inorder, 34 | {foreach, 35 | fun test_setup/0, 36 | fun test_teardown/1, 37 | [?_test(test_passed_metadata()), ?_test(test_config_env())]}}. 38 | 39 | merge_args_test_() -> 40 | [{"overriding the region should affect the region, host, and url", 41 | ?_test(begin 42 | Args1 = 43 | #kinetic_arguments{region = "region1", 44 | host = Host1 = "host1", 45 | url = Url1 = "url1"}, 46 | #kinetic_arguments{region = Region2, 47 | host = Host2, 48 | url = Url2} = 49 | kinetic_config:merge_args(Args1, [{region, "us-east-1"}]), 50 | ?assertEqual("us-east-1", Region2), 51 | ?assertNotEqual(Host1, Host2), 52 | ?assertNotEqual(Url1, Url2), 53 | ok 54 | end)}, 55 | {"it should be possible to override the timeout", 56 | ?_test(begin 57 | Args = kinetic_config:merge_args(#kinetic_arguments{timeout = 1}, [{timeout, 2}]), 58 | ?assertEqual(2, Args#kinetic_arguments.timeout), 59 | ok 60 | end)}]. 61 | 62 | test_config_env() -> 63 | application:set_env(kinetic, whatever, value), 64 | value = kinetic_config:g(whatever), 65 | undefined = kinetic_config:g(something). 66 | 67 | test_passed_metadata() -> 68 | {ok, _Pid} = 69 | kinetic_config:start_link([{aws_access_key_id, "whatever"}, 70 | {aws_secret_access_key, "secret"}]), 71 | ?assert(ets:info(?KINETIC_STREAM) =/= undefined), 72 | {ok, 73 | #kinetic_arguments{aws_credentials = fake_creds, 74 | region = "us-east-1", 75 | lhttpc_opts = []}} = 76 | kinetic_config:get_args(), 77 | kinetic_config:update_data([{aws_access_key_id, "whatever"}, 78 | {aws_secret_access_key, "secret"}]), 79 | {ok, 80 | #kinetic_arguments{aws_credentials = fake_creds, 81 | region = "us-east-1", 82 | lhttpc_opts = []}} = 83 | kinetic_config:get_args(), 84 | kinetic_config:stop(), 85 | {error, _} = kinetic_config:get_args(), 86 | undefined = ets:info(?KINETIC_STREAM). 87 | -------------------------------------------------------------------------------- /test/kinetic_stream_tests.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_stream_tests). 2 | 3 | -include("kinetic.hrl"). 4 | 5 | -include_lib("eunit/include/eunit.hrl"). 6 | 7 | -hank([{unnecessary_function_arguments, [{test_teardown, 1, 1}]}]). 8 | 9 | test_setup() -> 10 | ets:new(?KINETIC_STREAM, [named_table, set, public, {read_concurrency, true}]), 11 | meck:new(supervisor, [unstick, passthrough]), 12 | meck:sequence(supervisor, start_child, 2, [{ok, undefined}, {ok, pid}]), 13 | meck:new(kinetic, [passthrough]), 14 | meck:expect(kinetic, 15 | put_record, 16 | fun(Payload, Pid) -> 17 | case Pid of 18 | Pid when is_pid(Pid) -> 19 | Pid ! done; 20 | _ -> 21 | ok 22 | end, 23 | case proplists:get_value(<<"PartitionKey">>, Payload) of 24 | <<"otherstuff">> -> 25 | {error, {400, headers, <<"{\"__type\": \"OtherStuff\"}">>}}; 26 | <<"throughput">> -> 27 | {error, 28 | {400, 29 | headers, 30 | <<"{\"__type\": \"ProvisionedThroughputExceededException\"}">>}}; 31 | _ -> 32 | {ok, done} 33 | end 34 | end), 35 | meck:new(timer, [unstick, passthrough]), 36 | meck:expect(timer, send_after, fun(1000, _pid, flush) -> {ok, tref} end), 37 | meck:expect(timer, sleep, fun(1000) -> ok end), 38 | meck:expect(timer, cancel, fun(tref) -> ok end). 39 | 40 | test_teardown(_) -> 41 | ets:delete(?KINETIC_STREAM), 42 | meck:unload(timer), 43 | meck:unload(kinetic), 44 | meck:unload(supervisor). 45 | 46 | kinetic_stream_test_() -> 47 | {inorder, 48 | {foreach, 49 | fun test_setup/0, 50 | fun test_teardown/1, 51 | [?_test(test_get_stream()), 52 | ?_test(test_start_and_stop()), 53 | ?_test(test_functionality()), 54 | ?_test(test_retries())]}}. 55 | 56 | %% 57 | %% Tests 58 | %% 59 | test_get_stream() -> 60 | Pid = self(), 61 | pid = kinetic_stream:get_stream(<<"mystream">>, {<<"whatever">>}), 62 | ets:insert_new(?KINETIC_STREAM, {<<"mystream">>, self()}), 63 | Pid = kinetic_stream:get_stream(<<"mystream">>, {<<"whatever">>}), 64 | ets:delete(?KINETIC_STREAM, <<"mystream">>), 65 | 66 | ChildPid = spawn(fun() -> Pid ! done end), 67 | ok = 68 | receive 69 | done -> 70 | ok; 71 | _ -> 72 | bad 73 | after 1000 -> 74 | bad 75 | end, 76 | ets:insert_new(?KINETIC_STREAM, {<<"mystream">>, ChildPid}), 77 | pid = kinetic_stream:get_stream(<<"mystream">>, {<<"whatever">>}). 78 | 79 | test_start_and_stop() -> 80 | {ok, Pid} = kinetic_stream:start_link(<<"mystream">>, {<<"whatever">>}), 81 | Pid = kinetic_stream:get_stream(<<"mystream">>, {<<"whatever">>}), 82 | true = meck:called(timer, send_after, [1000, Pid, flush]), 83 | kinetic_stream:flush(<<"mystream">>, {<<"whatever">>}), 84 | kinetic_stream:stop(<<"mystream">>, {<<"whatever">>}), 85 | 2 = meck:num_calls(timer, cancel, [tref]), 86 | false = meck:called(kinetic, put_record, ['_', '_']), 87 | ok. 88 | 89 | test_functionality() -> 90 | Pid = self(), 91 | BigData = list_to_binary(string:chars($a, ?KINESIS_MAX_PUT_SIZE + 1)), 92 | SmallData = <<"data">>, 93 | RegularData = list_to_binary(string:chars($a, ?KINESIS_MAX_PUT_SIZE - 1)), 94 | S = <<"mystream">>, 95 | P = <<"whatever">>, 96 | % This is a total hack to use the Pid as the Timeout and have it passed around 97 | {ok, _Pid} = kinetic_stream:start_link(S, {P, 2, 3, Pid}), 98 | {error, max_size_exceeded} = kinetic_stream:put_record(S, {P}, BigData), 99 | ok = kinetic_stream:put_record(S, {P}, SmallData), 100 | kinetic_stream:flush(S, {P}), 101 | Payload0 = 102 | [{<<"Data">>, b64fast:encode64(SmallData)}, 103 | {<<"PartitionKey">>, <

>}, 104 | {<<"StreamName">>, S}], 105 | wait_for_flush(), 106 | true = meck:called(kinetic, put_record, [Payload0, Pid]), 107 | Payload1 = 108 | [{<<"Data">>, b64fast:encode64(<>)}, 109 | {<<"PartitionKey">>, <

>}, 110 | {<<"StreamName">>, S}], 111 | ok = kinetic_stream:put_record(S, {P}, SmallData), 112 | ok = kinetic_stream:put_record(S, {P}, SmallData), 113 | kinetic_stream:flush(S, {P}), 114 | wait_for_flush(), 115 | true = meck:called(kinetic, put_record, [Payload1, Pid]), 116 | ok = kinetic_stream:put_record(S, {P}, RegularData), 117 | ok = kinetic_stream:put_record(S, {P}, SmallData), 118 | Payload2 = 119 | [{<<"Data">>, b64fast:encode64(RegularData)}, 120 | {<<"PartitionKey">>, <

>}, 121 | {<<"StreamName">>, S}], 122 | receive after 100 -> 123 | ok 124 | end, 125 | true = meck:called(kinetic, put_record, [Payload2, Pid]), 126 | kinetic_stream:flush(S, {P}), 127 | wait_for_flush(), 128 | Payload3 = 129 | [{<<"Data">>, b64fast:encode64(SmallData)}, 130 | {<<"PartitionKey">>, <

>}, 131 | {<<"StreamName">>, S}], 132 | true = meck:called(kinetic, put_record, [Payload3, Pid]), 133 | ok. 134 | 135 | test_retries() -> 136 | SmallData = <<"data">>, 137 | S = <<"mystream">>, 138 | P = <<"otherstuff">>, 139 | Payload0 = 140 | [{<<"Data">>, b64fast:encode64(SmallData)}, 141 | {<<"PartitionKey">>, P}, 142 | {<<"StreamName">>, S}], 143 | {error, {_, _, _}} = kinetic_stream:send_to_kinesis(S, SmallData, P, 5000, 3), 144 | 1 = meck:num_calls(kinetic, put_record, [Payload0, 5000]), 145 | ok = 146 | try kinetic_stream:send_to_kinesis(S, SmallData, <<"throughput">>, 5000, 3) of 147 | _ -> 148 | bad 149 | catch 150 | error:max_retries_reached -> 151 | ok 152 | end, 153 | true = meck:called(timer, sleep, [1000]), 154 | ok. 155 | 156 | wait_for_flush() -> 157 | ok = 158 | receive 159 | done -> 160 | ok; 161 | _ -> 162 | bad 163 | after 1000 -> 164 | bad 165 | end. 166 | -------------------------------------------------------------------------------- /test/kinetic_sup_tests.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_sup_tests). 2 | 3 | -include("kinetic.hrl"). 4 | 5 | -include_lib("eunit/include/eunit.hrl"). 6 | 7 | -hank([{unnecessary_function_arguments, [{test_teardown, 1, 1}]}]). 8 | 9 | test_setup() -> 10 | meck:new(erliam, [passthrough]), 11 | meck:expect(erliam, invalidate, 0, ok), 12 | meck:expect(erliam, credentials, 0, fake_creds), 13 | meck:new(imds, [passthrough]), 14 | meck:expect(imds, zone, 0, {ok, "us-east-1b"}). 15 | 16 | test_teardown(_) -> 17 | meck:unload(imds), 18 | meck:unload(erliam). 19 | 20 | kinetic_sup_test_() -> 21 | {inorder, {foreach, fun test_setup/0, fun test_teardown/1, [?_test(test_supervisor())]}}. 22 | 23 | test_supervisor() -> 24 | process_flag(trap_exit, true), 25 | {ok, Pid} = 26 | kinetic_sup:start_link([{aws_access_key_id, "whatever"}, 27 | {aws_secret_access_key, "secret"}]), 28 | {ok, 29 | #kinetic_arguments{aws_credentials = fake_creds, 30 | region = "us-east-1", 31 | lhttpc_opts = []}} = 32 | kinetic_config:get_args(), 33 | 34 | kinetic_sup:stop(Pid), 35 | {error, _} = kinetic_config:get_args(), 36 | process_flag(trap_exit, false). 37 | -------------------------------------------------------------------------------- /test/kinetic_tests.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_tests). 2 | 3 | -include("kinetic.hrl"). 4 | 5 | -include_lib("eunit/include/eunit.hrl"). 6 | 7 | -hank([{unnecessary_function_arguments, [{test_teardown, 1, 1}]}]). 8 | 9 | test_arg_setup(Opts) -> 10 | meck:new(erliam, [passthrough]), 11 | meck:expect(erliam, invalidate, 0, ok), 12 | meck:expect(erliam, 13 | credentials, 14 | 0, 15 | awsv4:credentials_from_plist([{access_key_id, 16 | proplists:get_value(aws_access_key_id, Opts)}, 17 | {secret_access_key, 18 | proplists:get_value(aws_secret_access_key, Opts)}])), 19 | application:ensure_all_started(ssl), 20 | 21 | ets:new(?KINETIC_DATA, [named_table, set, public, {read_concurrency, true}]), 22 | meck:new(imds, [passthrough]), 23 | meck:expect(imds, zone, 0, {ok, "us-east-1b"}), 24 | 25 | {ok, _args} = kinetic_config:update_data(Opts), 26 | 27 | meck:new(lhttpc), 28 | meck:expect(lhttpc, 29 | request, 30 | fun (_Url, post, _Headers, _Body, _Timeout, error) -> 31 | {ok, {{400, bla}, headers, body}}; 32 | (_Url, post, _Headers, _Body, _Timeout, _Opts) -> 33 | {ok, {{200, bla}, headers, <<"{\"hello\": \"world\"}">>}} 34 | end). 35 | 36 | test_setup() -> 37 | Opts = [{aws_access_key_id, "whatever"}, {aws_secret_access_key, "secret"}], 38 | test_arg_setup(Opts). 39 | 40 | test_error_setup() -> 41 | Opts = 42 | [{aws_access_key_id, "whatever"}, 43 | {aws_secret_access_key, "secret"}, 44 | {lhttpc_opts, error}], 45 | test_arg_setup(Opts). 46 | 47 | test_teardown(_) -> 48 | ets:delete(?KINETIC_DATA), 49 | meck:unload(imds), 50 | meck:unload(lhttpc), 51 | meck:unload(erliam), 52 | application:stop(ssl). 53 | 54 | kinetic_test_() -> 55 | {inorder, 56 | {foreach, fun test_setup/0, fun test_teardown/1, [?_test(test_normal_functions())]}}. 57 | 58 | kinetic_error_test_() -> 59 | {inorder, 60 | {foreach, fun test_error_setup/0, fun test_teardown/1, [?_test(test_error_functions())]}}. 61 | 62 | sample_arglists(Payload) -> 63 | [[Payload], 64 | [Payload, []], 65 | [Payload, 12345], 66 | [Payload, [{timeout, 12345}]], 67 | [Payload, [{region, "us-east-1"}]], 68 | [Payload, [{region, "us-east-1"}, {timeout, 12345}]]]. 69 | 70 | test_normal_functions() -> 71 | lists:foreach(fun(F) -> 72 | [{ok, [{<<"hello">>, <<"world">>}]} = erlang:apply(kinetic, F, Args) 73 | || Args <- sample_arglists([])] 74 | end, 75 | [create_stream, 76 | delete_stream, 77 | describe_stream, 78 | get_records, 79 | get_shard_iterator, 80 | list_streams, 81 | merge_shards, 82 | put_record, 83 | split_shard]), 84 | 85 | lists:foreach(fun(F) -> {error, _} = erlang:apply(kinetic, F, [{whatever}]) end, 86 | [create_stream, 87 | delete_stream, 88 | describe_stream, 89 | get_records, 90 | get_shard_iterator, 91 | list_streams, 92 | merge_shards, 93 | put_record, 94 | split_shard]). 95 | 96 | test_error_functions() -> 97 | {ok, _args} = 98 | kinetic_config:update_data([{aws_access_key_id, "whatever"}, 99 | {aws_secret_access_key, "secret"}, 100 | {lhttpc_opts, error}]), 101 | lists:foreach(fun(F) -> 102 | [{error, {400, headers, body}} = erlang:apply(kinetic, F, Args) 103 | || Args <- sample_arglists([])] 104 | end, 105 | [create_stream, 106 | delete_stream, 107 | describe_stream, 108 | get_records, 109 | get_shard_iterator, 110 | list_streams, 111 | merge_shards, 112 | put_record, 113 | split_shard]), 114 | ets:delete_all_objects(?KINETIC_DATA), 115 | lists:foreach(fun(F) -> 116 | [{error, missing_args} = erlang:apply(kinetic, F, Args) 117 | || Args <- sample_arglists([])] 118 | end, 119 | [create_stream, 120 | delete_stream, 121 | describe_stream, 122 | get_records, 123 | get_shard_iterator, 124 | list_streams, 125 | merge_shards, 126 | put_record, 127 | split_shard]). 128 | 129 | put_records_test_() -> 130 | {setup, fun test_setup/0, fun test_teardown/1, fun test_put_records/0}. 131 | 132 | test_put_records() -> 133 | meck:expect(lhttpc, 134 | request, 135 | fun (_Url, post, _Headers, _Body, _Timeout, error) -> 136 | {ok, {{400, bla}, headers, body}}; 137 | (_Url, post, _Headers, _Body, _Timeout, _Opts) -> 138 | {ok, 139 | {{200, bla}, 140 | headers, 141 | <<"{\"FailedRecordCount\": 1,\n \"Records\":\n " 142 | " [{\"SequenceNumber\": \"10\", \"ShardId\": " 143 | "\"5\" },\n {\"ErrorCode\": \"404\", " 144 | "\"ErrorMessage\": \"Not found\"}]}">>}} 145 | end), 146 | 147 | {ok, [Result1, Result2]} = erlang:apply(kinetic, put_records, [[]]), 148 | ?assertEqual(ok, Result1), 149 | ?assertEqual({error, {<<"404">>, <<"Not found">>}}, Result2). 150 | -------------------------------------------------------------------------------- /test/kinetic_utils_tests.erl: -------------------------------------------------------------------------------- 1 | -module(kinetic_utils_tests). 2 | 3 | -include_lib("eunit/include/eunit.hrl"). 4 | 5 | kinetic_utils_test_() -> 6 | [?_test(test_endpoint()), ?_test(test_json_encoding_decoding())]. 7 | 8 | %% 9 | %% Tests 10 | %% 11 | 12 | test_json_encoding_decoding() -> 13 | {error, _} = kinetic_utils:encode({whatever}), 14 | {error, _} = kinetic_utils:decode(<<"{\"whatever\"}">>), 15 | {error, _} = kinetic_utils:decode(<<"hello">>), 16 | [] = kinetic_utils:decode(<<"">>), 17 | {error, not_a_dict} = kinetic_utils:decode(<<"\"hello\"">>), 18 | [{<<"hello">>, <<"world">>}] = kinetic_utils:decode(<<"{\"hello\": \"world\"}">>). 19 | 20 | test_endpoint() -> 21 | Service = "kinesis", 22 | Regions = 23 | ["us-east-1", "us-west-1", "us-west-2", "eu-west-1", "ap-northeast-1", "ap-southeast-1"], 24 | lists:foreach(fun(Region) -> 25 | Url = Service ++ "." ++ Region ++ ".amazonaws.com", 26 | Url = kinetic_utils:endpoint(Service, Region) 27 | end, 28 | Regions). 29 | --------------------------------------------------------------------------------