├── config
├── dev.exs
├── prod.exs
├── runtime.exs
├── config.exs
└── test.exs
├── test
├── test_helper.exs
├── support
│ ├── accumulators
│ │ └── failing_accumulator.ex
│ ├── events.ex
│ ├── temp_storage
│ │ └── test_storage.ex
│ ├── client_helper.ex
│ ├── behaviours
│ │ ├── kafka_ex.ex
│ │ └── brod.ex
│ ├── metrics.ex
│ ├── producers
│ │ └── test_producer.ex
│ ├── events
│ │ └── kafka.exs
│ ├── mocks.ex
│ ├── mox_helper.ex
│ └── collectors
│ │ └── collector_handlers.ex
└── kafka_batcher
│ ├── collector
│ └── utils_test.exs
│ ├── prom_ex
│ └── plugins
│ │ └── kafka_test.exs
│ ├── accumulator_test.exs
│ ├── connection_manager_test.exs
│ ├── temp_storage_test.exs
│ ├── producers
│ ├── kafka_ex_test.exs
│ └── kaffe_test.exs
│ ├── collector_test.exs
│ └── config_test.exs
├── .formatter.exs
├── .dialyzer_ignore.exs
├── lib
├── kafka_batcher
│ ├── behaviours
│ │ ├── batch_flusher.ex
│ │ ├── error_notifier.ex
│ │ ├── temp_storage.ex
│ │ ├── collector.ex
│ │ └── producer.ex
│ ├── accumulator
│ │ ├── default_batch_flusher.ex
│ │ └── state.ex
│ ├── temp_storage
│ │ ├── batch.ex
│ │ └── default.ex
│ ├── supervisor.ex
│ ├── default_error_notifier.ex
│ ├── collector
│ │ ├── utils.ex
│ │ ├── implementation.ex
│ │ └── state.ex
│ ├── accumulators_pool_supervisor.ex
│ ├── temp_storage.ex
│ ├── producers
│ │ ├── kaffe.ex
│ │ ├── kafka_ex.ex
│ │ └── base.ex
│ ├── connection_manager.ex
│ ├── prom_ex
│ │ └── plugins
│ │ │ └── kafka.ex
│ ├── accumulator.ex
│ ├── collector.ex
│ └── config.ex
└── kafka_batcher.ex
├── .credo.exs
├── Dockerfile.test
├── coveralls.json
├── CHANGELOG.md
├── .gitignore
├── mix.exs
├── README.md
├── LICENSE
└── mix.lock
/config/dev.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
--------------------------------------------------------------------------------
/config/prod.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
--------------------------------------------------------------------------------
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 | KafkaBatcher.ProducerHelper.init()
3 |
--------------------------------------------------------------------------------
/config/runtime.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | config :kafka_batcher, :kafka, endpoints: "localhost:9092"
4 |
--------------------------------------------------------------------------------
/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"],
4 | line_length: 128
5 | ]
6 |
--------------------------------------------------------------------------------
/.dialyzer_ignore.exs:
--------------------------------------------------------------------------------
1 | [
2 | # If we compile with another @storage_impl lib/kafka_batcher/temp_storage.ex:33 become reachable
3 | {"lib/kafka_batcher/temp_storage.ex", :guard_fail, 30}
4 | ]
5 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/behaviours/batch_flusher.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Behaviours.BatchFlusher do
2 | @moduledoc "Determines whether we have to produce the batch immediately"
3 |
4 | @callback flush?(binary(), map()) :: boolean()
5 | end
6 |
--------------------------------------------------------------------------------
/.credo.exs:
--------------------------------------------------------------------------------
1 | %{
2 | configs: [
3 | %{
4 | name: "default",
5 | strict: true,
6 | checks: %{
7 | extra: [
8 | {Credo.Check.Refactor.LongQuoteBlocks, max_line_count: 200},
9 | ]
10 | }
11 | }
12 | ]
13 | }
14 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/behaviours/error_notifier.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Behaviours.ErrorNotifier do
2 | @moduledoc """
3 | ErrorNotifier behaviour is used to report errors to external error-monitoring systems.
4 |
5 | In the most base implementation it could just log errors.
6 | """
7 |
8 | @callback report(exception :: Exception.t() | [type: String.t(), message: String.t()], options :: Keyword.t()) :: :ok
9 | end
10 |
--------------------------------------------------------------------------------
/test/support/accumulators/failing_accumulator.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Accumulators.FailingAccumulator do
2 | @moduledoc false
3 | use GenServer
4 |
5 | def start_link(_) do
6 | GenServer.start_link(__MODULE__, nil)
7 | end
8 |
9 | @impl true
10 | def init(nil) do
11 | {:ok, nil}
12 | end
13 |
14 | @impl true
15 | def handle_call(_request, _from, _state) do
16 | throw(:timeout)
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/test/support/events.ex:
--------------------------------------------------------------------------------
1 | defmodule PromEx.Test.Support.Events do
2 | @moduledoc false
3 |
4 | @doc false
5 | @spec execute_all(atom | String.t()) :: :ok
6 | def execute_all(plugin) do
7 | [File.cwd!(), "test", "support", "events", "#{plugin}.exs"]
8 | |> Path.join()
9 | |> Code.eval_file()
10 | |> elem(0)
11 | |> Enum.each(&:telemetry.execute(&1.event, &1.measurements, &1.metadata))
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/accumulator/default_batch_flusher.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Accumulator.DefaultBatchFlusher do
2 | @moduledoc """
3 | This module implements logic for force pushing current batch to Kafka,
4 | without waiting for other conditions (on size and/or interval).
5 | """
6 | @behaviour KafkaBatcher.Behaviours.BatchFlusher
7 |
8 | @impl KafkaBatcher.Behaviours.BatchFlusher
9 | def flush?(_key, _event) do
10 | false
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/Dockerfile.test:
--------------------------------------------------------------------------------
1 | FROM docker.samokat.io/platform/elixir/base-image:1.14.3.1-release
2 |
3 | ARG MIX_ENV=test
4 | ARG project_dir
5 |
6 | WORKDIR ${project_dir}
7 | COPY . .
8 |
9 | # Install mix packages
10 | RUN mix local.hex --force && \
11 | mix local.rebar --force
12 |
13 | RUN HEX_HTTP_CONCURRENCY=3 HEX_HTTP_TIMEOUT=240 mix deps.get && mix deps.compile && mix compile
14 | RUN mix format --check-formatted
15 | RUN mix dialyzer --ignore-exit-status
16 | RUN mix test.coverage
17 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/temp_storage/batch.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.TempStorage.Batch do
2 | @moduledoc """
3 | The struct used for KafkaBatcher.Behaviours.TempStorage behavior
4 | """
5 |
6 | defstruct [:messages, :topic, :partition, :producer_config]
7 |
8 | @type message() :: KafkaBatcher.MessageObject.t()
9 |
10 | @type t() :: %__MODULE__{
11 | messages: [message()],
12 | topic: String.t(),
13 | partition: String.t() | nil,
14 | producer_config: Keyword.t()
15 | }
16 | end
17 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/behaviours/temp_storage.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Behaviours.TempStorage do
2 | @moduledoc """
3 | KafkaBatcher.Behaviours.TempStorage behaviour is used to implement events saving logic in case when Kafka is not available.
4 | """
5 |
6 | alias KafkaBatcher.TempStorage.Batch
7 |
8 | @type topic_name() :: String.t()
9 |
10 | @doc "Save batch to retry"
11 | @callback save_batch(Batch.t()) :: :ok
12 |
13 | @doc "Check if the storage is empty"
14 | @callback empty?(topic_name()) :: boolean()
15 | end
16 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/supervisor.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Supervisor do
2 | @moduledoc """
3 | The root of KafkaBatcher supervision tree
4 | Starts Collector & AccumulatorsPoolSupervisor for each configured collector
5 | """
6 |
7 | use Supervisor
8 |
9 | def start_link(args) do
10 | Supervisor.start_link(__MODULE__, args, name: __MODULE__)
11 | end
12 |
13 | def init(_args) do
14 | children = KafkaBatcher.Config.collectors_spec()
15 |
16 | opts = [strategy: :one_for_one]
17 | Supervisor.init(children, opts)
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/test/support/temp_storage/test_storage.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.TempStorage.TestStorage do
2 | @moduledoc false
3 |
4 | @behaviour KafkaBatcher.Behaviours.TempStorage
5 | use KafkaBatcher.ClientHelper, reg_name: __MODULE__
6 | use KafkaBatcher.MoxHelper, client: __MODULE__
7 |
8 | @impl true
9 | def empty?(topic_name) do
10 | process_callback(%{action: :empty?, parameters: topic_name}, true)
11 | end
12 |
13 | @impl true
14 | def save_batch(batch) do
15 | process_callback(%{action: :save_batch, parameters: batch}, :ok)
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/default_error_notifier.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.DefaultErrorNotifier do
2 | @moduledoc """
3 | Default implementation of the `KafkaBatcher.Behaviours.ErrorNotifier` behaviour.
4 |
5 | It just logs the errors. You can implement your own implementation to send errors to some error monitoring system.
6 | """
7 | require Logger
8 | @behaviour KafkaBatcher.Behaviours.ErrorNotifier
9 |
10 | @impl KafkaBatcher.Behaviours.ErrorNotifier
11 | def report(exception, options \\ []) do
12 | Logger.error([inspect(exception), "\n", inspect(options)])
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/coveralls.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_stop_words": [
3 | "defmodule",
4 | "defrecord",
5 | "defimpl",
6 | "defexception",
7 | "defprotocol",
8 | "defstruct",
9 | "def.+(.+\\\\.+).+do",
10 | "^\\s+use\\s+"
11 | ],
12 |
13 | "custom_stop_words": [
14 | ],
15 |
16 | "coverage_options": {
17 | "treat_no_relevant_lines_as_covered": false,
18 | "output_dir": "cover/",
19 | "minimum_coverage": 0
20 | },
21 |
22 | "terminal_options": {
23 | "file_column_width": 40
24 | },
25 |
26 | "skip_files": [
27 | "lib/mix/tasks/gen_pg_storage.ex",
28 | "lib/kafka_batcher/behaviours/",
29 | "test/support/"
30 | ]
31 | }
32 |
33 |
--------------------------------------------------------------------------------
/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Config module.
3 | #
4 | # This configuration file is loaded before any dependency and
5 | # is restricted to this project.
6 |
7 | # General application configuration
8 | import Config
9 |
10 | config :kafka_batcher,
11 | producer_module: KafkaBatcher.Producers.Kaffe,
12 | storage_impl: KafkaBatcher.TempStorage.Default
13 |
14 | config :kafka_ex, :disable_default_worker, true
15 |
16 | # Import environment specific config. This must remain at the bottom
17 | # of this file so it overrides the configuration defined above.
18 | #
19 | import_config "#{config_env()}.exs"
20 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this image will be documented in this file.
4 | Please refer to https://keepachangelog.com/en/1.0.0/ for format.
5 |
6 | ## [Unreleased]
7 |
8 | ## [1.1.0] - 2025-08-12
9 | ### Changes
10 | - Improve KafkaBatcher.Collector reliability
11 | - Fix credo complaints
12 | - Fix dialyzer complaints
13 | - Fix flapping fail in Producers.KaffeTest
14 |
15 |
16 | ## [1.0.2] - 2024-05-08
17 | ### Bugfixes
18 | - Cache partitions count in the collector state
19 |
20 | ## [1.0.1] - 2024-03-11
21 | ### Bugfixes
22 | - Update last_check_timestamp only in case we really check the temporary storage
23 |
24 | ## [1.0.0] - 2024-02-26
25 | - Initial release to OpenSource
26 |
--------------------------------------------------------------------------------
/test/support/client_helper.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.ClientHelper do
2 | @moduledoc false
3 | use Agent
4 |
5 | defmacro __using__(opts) do
6 | quote location: :keep, bind_quoted: [opts: opts] do
7 | defp reg_name do
8 | Keyword.fetch!(unquote(Macro.escape(opts)), :reg_name)
9 | end
10 |
11 | def init do
12 | start_link()
13 | end
14 |
15 | def start_link do
16 | Agent.start_link(fn -> %KafkaBatcher.MoxHelper.State{} end, name: reg_name())
17 | end
18 |
19 | def set_state(response) do
20 | Agent.update(reg_name(), fn _ -> response end)
21 | end
22 |
23 | def get_state do
24 | Agent.get(reg_name(), & &1)
25 | end
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/behaviours/collector.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Behaviours.Collector do
2 | @moduledoc """
3 | Collector must implement add_events/1 callback to receive events.
4 |
5 | Event could be a MessageObject or a tuple with headers, key and value, but headers and key could be omitted
6 | """
7 |
8 | @type header_key() :: binary()
9 | @type header_value() :: binary()
10 | @type headers() :: [{header_key(), header_value()}]
11 | @type key :: binary() | nil
12 | @type value :: map() | binary()
13 | @type message_object :: KafkaBatcher.MessageObject.t()
14 | @type event :: {headers(), key(), value()} | {key(), value()} | value() | message_object()
15 | @type events :: list(event())
16 |
17 | @callback add_events(events :: events()) :: :ok | {:error, term()}
18 | end
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # The directory Mix will write compiled artifacts to.
2 | /_build/
3 |
4 | # If you run "mix test --cover", coverage assets end up here.
5 | /cover/
6 |
7 | # The directory Mix downloads your dependencies sources to.
8 | /deps/
9 |
10 | # Where third-party dependencies like ExDoc output generated docs.
11 | /doc/
12 |
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 |
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 |
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 |
22 | # Ignore package tarball (built via "mix hex.build").
23 | kafka*.tar
24 |
25 | # Temporary files, for example, from tests.
26 | /tmp/
27 |
28 | /priv/repo
29 | /lib/kafka_batcher/codegen
30 | .idea/
31 | .DS_Store
32 | *.iml
33 | .elixir_ls
--------------------------------------------------------------------------------
/test/support/behaviours/kafka_ex.ex:
--------------------------------------------------------------------------------
1 | defmodule Test.Support.Behaviours.KafkaEx do
2 | @moduledoc false
3 | # FOR TEST ONLY
4 | @type client_id() :: atom()
5 | @type config() :: Keyword.t()
6 | @type metadata_response() :: map()
7 | @type produce_request() :: map()
8 |
9 | @callback create_worker(client_id(), config()) ::
10 | Supervisor.on_start_child()
11 | @callback metadata(Keyword.t()) :: metadata_response()
12 | @callback get_partitions_count(client_id(), binary()) ::
13 | {:ok, pos_integer()} | {:error, binary() | atom()}
14 | @callback produce(produce_request(), Keyword.t()) :: nil | :ok | {:ok, integer} | {:error, any}
15 | end
16 |
17 | defmodule Test.Support.Behaviours.KafkaEx.Metadata do
18 | @moduledoc false
19 |
20 | @callback partitions_for_topic(Test.Support.Behaviours.KafkaEx.metadata_response(), binary()) :: list()
21 | end
22 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/behaviours/producer.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Behaviours.Producer do
2 | @moduledoc """
3 | KafkaBatcher.Behaviours.Producer adds an abstraction level over producer implementations in various Kafka libs.
4 | Defines the callbacks that a Kafka producer should implement
5 | """
6 | @type event :: KafkaBatcher.MessageObject.t()
7 | @type events :: list(event())
8 |
9 | @callback do_produce(
10 | events :: events(),
11 | topic :: binary(),
12 | partition :: non_neg_integer() | nil,
13 | config :: Keyword.t()
14 | ) :: :ok | {:error, binary() | atom()}
15 |
16 | @callback get_partitions_count(binary()) :: {:ok, integer()} | {:error, binary() | atom()}
17 |
18 | @callback start_client() :: {:ok, pid()} | {:error, any()}
19 |
20 | @callback start_producer(binary(), Keyword.t()) :: :ok | {:error, any()}
21 | end
22 |
--------------------------------------------------------------------------------
/test/support/behaviours/brod.ex:
--------------------------------------------------------------------------------
1 | defmodule Test.Support.Behaviours.Brod do
2 | @moduledoc false
3 | # FOR TEST ONLY
4 | @type portnum :: pos_integer()
5 | @type hostname :: binary() | :inet.hostname() | :inet.ip_address()
6 | @type endpoint :: {hostname(), portnum()}
7 | @type endpoints :: list(endpoint())
8 | @type client_id() :: atom()
9 | @type config() :: :proplists.proplist()
10 |
11 | @callback start_link_client(endpoints(), client_id(), config()) ::
12 | :ok | {:error, any()}
13 | @callback start_producer(client_id(), binary(), config()) ::
14 | :ok | {:error, any()}
15 | @callback get_partitions_count(client_id(), binary()) ::
16 | {:ok, pos_integer()} | {:error, binary() | atom()}
17 | @callback produce_sync(client_id(), binary(), non_neg_integer(), binary(), list({binary(), binary()})) ::
18 | :ok | {:error, any()}
19 | end
20 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/temp_storage/default.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.TempStorage.Default do
2 | @moduledoc """
3 | Default implementation of KafkaBatcher.Behaviours.TempStorage
4 |
5 | It just logs the messages. To have more fault tolerant implementation
6 | you should implement your own logic to save these messages into some persistent storage.
7 | """
8 |
9 | @behaviour KafkaBatcher.Behaviours.TempStorage
10 |
11 | require Logger
12 |
13 | @impl KafkaBatcher.Behaviours.TempStorage
14 | def save_batch(%KafkaBatcher.TempStorage.Batch{topic: topic, partition: partition, messages: messages}) do
15 | Logger.error("""
16 | KafkaBatcher: Failed to send #{inspect(Enum.count(messages))} messages to the kafka topic #{topic}##{partition}
17 | """)
18 |
19 | Enum.each(messages, fn message -> Logger.info(inspect(message)) end)
20 |
21 | :ok
22 | end
23 |
24 | @impl KafkaBatcher.Behaviours.TempStorage
25 | def empty?(_topic) do
26 | true
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/collector/utils.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Collector.Utils do
2 | @moduledoc """
3 | Provides functions for transforming events in different input formats into MessageObject struct
4 | """
5 | alias KafkaBatcher.MessageObject
6 |
7 | @type event ::
8 | binary()
9 | | {binary(), binary()}
10 | | {list(), binary(), binary()}
11 | | map()
12 | | MessageObject.t()
13 |
14 | @spec prepare_events([event()]) :: [MessageObject.t()]
15 | def prepare_events(events) do
16 | List.wrap(events)
17 | |> Enum.map(&transform_event/1)
18 | end
19 |
20 | @spec transform_event(event()) :: MessageObject.t()
21 | def transform_event(%MessageObject{} = event) do
22 | event
23 | end
24 |
25 | def transform_event(value) when is_map(value) or is_binary(value) do
26 | %MessageObject{value: value}
27 | end
28 |
29 | def transform_event({key, value}) do
30 | %MessageObject{key: key, value: value}
31 | end
32 |
33 | def transform_event({headers, key, value}) do
34 | %MessageObject{headers: headers, key: key, value: value}
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/test/support/metrics.ex:
--------------------------------------------------------------------------------
1 | defmodule PromEx.Test.Support.Metrics do
2 | @moduledoc false
3 |
4 | import ExUnit.Assertions
5 |
6 | @doc false
7 | @spec read_expected(atom | String.t()) :: [String.t()]
8 | def read_expected(plugin) do
9 | [File.cwd!(), "test", "support", "metrics", "#{plugin}.txt"]
10 | |> Path.join()
11 | |> File.read!()
12 | |> sort()
13 | end
14 |
15 | @doc false
16 | @spec sort(String.t()) :: [String.t()]
17 | def sort(metrics_string) do
18 | metrics_string
19 | |> String.split("\n", trim: true)
20 | |> Enum.sort()
21 | end
22 |
23 | @doc false
24 | @spec assert_prom_ex_metics(module(), atom()) :: :ok
25 | def assert_prom_ex_metics(prom_ex_module, expected_metrics_lookup) do
26 | collected_metrics =
27 | prom_ex_module
28 | |> PromEx.get_metrics()
29 | |> sort()
30 |
31 | expected_metrics = read_expected(expected_metrics_lookup)
32 |
33 | assert length(collected_metrics) == length(expected_metrics)
34 |
35 | collected_metrics
36 | |> Enum.zip(expected_metrics)
37 | |> Enum.each(fn {collected_line, expected_line} ->
38 | assert collected_line == expected_line
39 | end)
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/accumulators_pool_supervisor.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.AccumulatorsPoolSupervisor do
2 | @moduledoc """
3 | Manage dynamic pool of accumulators for the KafkaBatcher.Collector instance
4 | """
5 |
6 | use DynamicSupervisor
7 |
8 | alias KafkaBatcher.Accumulator
9 |
10 | @dialyzer {:no_return, {:init, 1}}
11 |
12 | def start_link(config) do
13 | DynamicSupervisor.start_link(__MODULE__, config, name: reg_name(config))
14 | end
15 |
16 | @doc "Returns a specification to start this module under a supervisor"
17 | def child_spec(config) do
18 | %{
19 | id: reg_name(config),
20 | start: {__MODULE__, :start_link, [config]},
21 | type: :supervisor
22 | }
23 | end
24 |
25 | def init(config) do
26 | # max_restarts value depends on partitions count in case when partitioned accumulation is used.
27 | # For example: 100 max_restarts -> 10 process restarts per second for 1 topic with 10 partitions
28 | DynamicSupervisor.init(
29 | strategy: :one_for_one,
30 | restart: :permanent,
31 | max_restarts: Keyword.get(config, :max_restart, 100),
32 | max_seconds: 1,
33 | extra_arguments: []
34 | )
35 | end
36 |
37 | def start_accumulator(args) do
38 | DynamicSupervisor.start_child(reg_name(args), Accumulator.child_spec(args))
39 | end
40 |
41 | def reg_name(args) do
42 | :"#{__MODULE__}.#{Keyword.fetch!(args, :topic_name)}"
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/test/kafka_batcher/collector/utils_test.exs:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Collector.UtilsTest do
2 | use ExUnit.Case
3 | alias KafkaBatcher.Collector.Utils
4 | alias KafkaBatcher.MessageObject
5 |
6 | test "transform events" do
7 | assert [%MessageObject{key: "key", value: "value"}] == Utils.prepare_events({"key", "value"})
8 |
9 | assert [
10 | %MessageObject{key: "key1", value: "value1"},
11 | %MessageObject{key: "key2", value: "value2"}
12 | ] == Utils.prepare_events([{"key1", "value1"}, {"key2", "value2"}])
13 |
14 | assert [
15 | %MessageObject{headers: "header1", key: "key1", value: "value1"},
16 | %MessageObject{key: "key2", value: "value2"}
17 | ] == Utils.prepare_events([{"header1", "key1", "value1"}, {"key2", "value2"}])
18 |
19 | assert [
20 | %MessageObject{headers: "header1", key: "key1", value: "value1"},
21 | %MessageObject{value: "value2"}
22 | ] == Utils.prepare_events([{"header1", "key1", "value1"}, "value2"])
23 |
24 | assert [
25 | %MessageObject{headers: "header1", key: "key1", value: "value1"},
26 | %MessageObject{value: %{some_value: "value2"}}
27 | ] == Utils.prepare_events([{"header1", "key1", "value1"}, %{some_value: "value2"}])
28 |
29 | assert_raise FunctionClauseError, fn -> Utils.prepare_events([{"header1", "key1", "value1"}, []]) end
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/test/kafka_batcher/prom_ex/plugins/kafka_test.exs:
--------------------------------------------------------------------------------
1 | defmodule PromEx.Plugins.KafkaTest do
2 | use ExUnit.Case, async: false
3 |
4 | alias KafkaBatcher.PromEx.Plugins.Kafka
5 | alias PromEx.Test.Support.{Events, Metrics}
6 |
7 | defmodule WebApp.PromEx do
8 | use PromEx, otp_app: :elixir
9 |
10 | @impl true
11 | def plugins, do: [{Kafka, metric_prefix: [:prom_ex, :kafka]}]
12 | end
13 |
14 | test "The telemetry with Kafka plugin works for start/stop" do
15 | Application.put_env(:kafka_batcher, :kafka_topic_aliases, %{
16 | "my.incoming-events.topic-long-name" => "incoming-events"
17 | })
18 |
19 | start_supervised!(WebApp.PromEx)
20 | Events.execute_all(:kafka)
21 |
22 | metrics =
23 | WebApp.PromEx
24 | |> PromEx.get_metrics()
25 | |> Metrics.sort()
26 |
27 | assert Metrics.read_expected(:kafka) == metrics
28 | end
29 |
30 | describe "The event_metrics/1" do
31 | test "should return the correct number of metrics" do
32 | assert length(Kafka.event_metrics(otp_app: :prom_ex)) == 2
33 | end
34 | end
35 |
36 | describe "The polling_metrics/1" do
37 | test "should return the correct number of metrics" do
38 | assert Kafka.polling_metrics([]) == []
39 | end
40 | end
41 |
42 | describe "The manual_metrics/1" do
43 | test "should return the correct number of metrics" do
44 | assert Kafka.manual_metrics(otp_app: :prom_ex) == []
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/test/support/producers/test_producer.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Producers.TestProducer do
2 | @moduledoc false
3 |
4 | @behaviour KafkaBatcher.Behaviours.Producer
5 | use KafkaBatcher.Producers.Base
6 | use KafkaBatcher.ClientHelper, reg_name: __MODULE__
7 | use KafkaBatcher.MoxHelper, client: __MODULE__
8 |
9 | @topic1 "topic1"
10 | @topic2 "topic2"
11 | @topic3 "topic3"
12 | @topic4 "topic4"
13 | @topic5 "topic5"
14 | @topic6 "topic6"
15 | @topic7 "topic7"
16 | @topic8 "topic8"
17 |
18 | @partition_counts %{
19 | @topic1 => 10,
20 | @topic2 => 20,
21 | @topic3 => 10,
22 | @topic4 => 4,
23 | @topic5 => 5,
24 | @topic6 => 6,
25 | @topic7 => 7,
26 | @topic8 => 8
27 | }
28 |
29 | @impl true
30 | def start_client do
31 | process_callback(%{action: :start_client}, {:ok, self()})
32 | end
33 |
34 | @impl true
35 | def start_producer(topic_name, config) do
36 | process_callback(%{action: :start_producer, parameters: {topic_name, config}}, :ok)
37 | end
38 |
39 | @impl true
40 | def get_partitions_count(topic_name) do
41 | response = {:ok, @partition_counts[topic_name]}
42 | process_callback(%{action: :get_partitions_count, parameters: topic_name}, response)
43 | end
44 |
45 | @impl true
46 | def do_produce(messages, topic, partition, config) do
47 | process_callback(%{action: :do_produce, parameters: {messages, topic, partition, config}}, :ok)
48 | end
49 |
50 | def topic_name(idx) when idx >= 1 and idx <= 8, do: "topic#{idx}"
51 | end
52 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/temp_storage.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.TempStorage do
2 | @moduledoc """
3 | Implements wrap-functions that are called to save batches when Kafka is unavailable.
4 | """
5 |
6 | alias KafkaBatcher.Collector.State, as: CollectorState
7 |
8 | @storage_impl Application.compile_env(:kafka_batcher, :storage_impl, KafkaBatcher.TempStorage.Default)
9 | @recheck_kafka_availability_interval Application.compile_env(
10 | :kafka_batcher,
11 | :recheck_kafka_availability_interval,
12 | 5_000
13 | )
14 |
15 | @spec save_batch(KafkaBatcher.TempStorage.Batch.t()) :: :ok
16 | def save_batch(batch), do: @storage_impl.save_batch(batch)
17 |
18 | @spec check_storage(CollectorState.t()) :: CollectorState.t()
19 | def check_storage(%CollectorState{last_check_timestamp: last_check_timestamp} = state) do
20 | now = System.os_time(:millisecond)
21 |
22 | if should_recheck?(last_check_timestamp, now) do
23 | recheck_and_update(state, now)
24 | else
25 | state
26 | end
27 | end
28 |
29 | defp recheck_and_update(%CollectorState{topic_name: topic, locked?: true} = state, now) do
30 | if @storage_impl.empty?(topic) do
31 | %CollectorState{state | locked?: false, last_check_timestamp: nil}
32 | else
33 | %CollectorState{state | last_check_timestamp: now}
34 | end
35 | end
36 |
37 | defp should_recheck?(last_check_timestamp, now) do
38 | is_nil(last_check_timestamp) || last_check_timestamp + @recheck_kafka_availability_interval < now
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/config/test.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | config :kafka_batcher, producer_module: KafkaBatcher.Producers.TestProducer
4 |
5 | config :kafka_batcher, :brod_client, KafkaBatcher.BrodMock
6 |
7 | config :kafka_batcher, :kafka_ex_client, KafkaBatcher.KafkaExMock
8 |
9 | config :kafka_batcher, :kafka_ex_metadata, KafkaBatcher.KafkaEx.MetadataMock
10 |
11 | config :kafka_batcher,
12 | recheck_kafka_availability_interval: 50,
13 | storage_impl: KafkaBatcher.TempStorage.TestStorage,
14 | reconnect_timeout: 100
15 |
16 | config :kafka_batcher, KafkaBatcher.Test.CalculatePartitionByValueCollector, topic_name: "topic1"
17 | config :kafka_batcher, KafkaBatcher.Test.SimpleCollector, topic_name: "topic2"
18 | config :kafka_batcher, KafkaBatcher.Test.BatchFlushCollector, topic_name: "topic3"
19 | config :kafka_batcher, KafkaBatcher.Test.CalculatePartitionByKeyCollector, topic_name: "topic4"
20 | config :kafka_batcher, KafkaBatcher.Test.SimpleCollectorWithDelay, topic_name: "topic6"
21 | config :kafka_batcher, KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl, topic_name: "topic7"
22 | config :kafka_batcher, KafkaBatcher.Test.SimpleCollectorMaxWaitTime, topic_name: "topic8"
23 |
24 | config :kafka_batcher,
25 | collectors: [
26 | KafkaBatcher.Test.CalculatePartitionByValueCollector,
27 | KafkaBatcher.Test.SimpleCollector,
28 | KafkaBatcher.Test.BatchFlushCollector,
29 | KafkaBatcher.Test.CalculatePartitionByKeyCollector,
30 | KafkaBatcher.Test.SimpleCollectorWithDelay,
31 | KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl,
32 | KafkaBatcher.Test.SimpleCollectorMaxWaitTime
33 | ]
34 |
35 | config :kafka_batcher, :kafka,
36 | telemetry: true,
37 | allow_topic_auto_creation: false,
38 | partition_strategy: :random,
39 | required_acks: 1
40 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/producers/kaffe.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Producers.Kaffe do
2 | @moduledoc """
3 | An implementation of the KafkaBatcher.Behaviours.Producer for Kaffe
4 | """
5 |
6 | @brod_client Application.compile_env(:kafka_batcher, :brod_client, :brod)
7 | @client_name :kafka_producer_client
8 |
9 | @behaviour KafkaBatcher.Behaviours.Producer
10 | use KafkaBatcher.Producers.Base
11 |
12 | ## -------------------------------------------------------------------------
13 | ## public api
14 | ## -------------------------------------------------------------------------
15 |
16 | @impl true
17 | def start_client do
18 | config = KafkaBatcher.Config.general_producer_config()
19 | endpoints = Keyword.fetch!(config, :endpoints)
20 |
21 | @brod_client.start_link_client(endpoints, @client_name, config)
22 | end
23 |
24 | @impl true
25 | def start_producer(topic_name, config) do
26 | @brod_client.start_producer(@client_name, topic_name, config)
27 | end
28 |
29 | @impl true
30 | def get_partitions_count(topic) do
31 | @brod_client.get_partitions_count(@client_name, topic)
32 | end
33 |
34 | @impl true
35 | def do_produce(messages, topic, partition, _config) do
36 | @brod_client.produce_sync(@client_name, topic, partition, "ignored", transform_messages(messages))
37 | end
38 |
39 | ## -------------------------------------------------------------------------
40 | ## internal functions
41 | ## -------------------------------------------------------------------------
42 |
43 | defp transform_messages(messages) do
44 | Enum.map(
45 | messages,
46 | fn %KafkaBatcher.MessageObject{headers: headers, key: key, value: value} ->
47 | %{
48 | headers: headers,
49 | ts: System.os_time(:millisecond),
50 | key: key,
51 | value: value
52 | }
53 | end
54 | )
55 | end
56 | end
57 |
--------------------------------------------------------------------------------
/test/kafka_batcher/accumulator_test.exs:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.AccumulatorTest do
2 | use ExUnit.Case, async: false
3 | use KafkaBatcher.Mocks
4 |
5 | alias KafkaBatcher.{Accumulator, MessageObject}
6 | alias KafkaBatcher.Producers.TestProducer
7 | alias KafkaBatcher.TempStorage.TestStorage
8 |
9 | setup_all do
10 | prepare_producers()
11 | end
12 |
13 | setup do
14 | prepare_mocks()
15 | end
16 |
17 | def prepare_producers do
18 | KafkaBatcher.ProducerHelper.connection_manager_up()
19 | :ok
20 | end
21 |
22 | def prepare_mocks do
23 | TestProducer.set_owner()
24 | TestProducer.set_notification_mode(:do_produce, :on)
25 | TestStorage.set_owner()
26 | TestStorage.set_notification_mode(:save_batch, :on)
27 | TestStorage.set_notification_mode(:empty?, :on)
28 |
29 | on_exit(fn ->
30 | TestProducer.set_notification_mode(:start_client, :off)
31 | TestProducer.set_notification_mode(:start_producer, :off)
32 | TestProducer.set_notification_mode(:do_produce, :off)
33 | TestStorage.set_notification_mode(:save_batch, :off)
34 | TestStorage.set_notification_mode(:empty?, :off)
35 | end)
36 | end
37 |
38 | test "accumulator cleanup with not empty batch" do
39 | topic_name = "topicForTerminate"
40 | partition_num = 1
41 |
42 | opts = [
43 | topic_name: topic_name,
44 | partition: partition_num,
45 | config: [
46 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
47 | batch_size: 10,
48 | max_wait_time: 10_000,
49 | min_delay: 10,
50 | max_batch_bytesize: 150
51 | ],
52 | collector: SomeCollector
53 | ]
54 |
55 | {:ok, pid} = Accumulator.start_link(opts)
56 | :erlang.unlink(pid)
57 |
58 | event = %MessageObject{value: "some_value", key: "some_key"}
59 | Accumulator.add_event(event, topic_name, partition_num)
60 |
61 | Process.exit(pid, :some_reason)
62 | assert_receive(%{action: :do_produce, parameters: parameters})
63 | {[^event], ^topic_name, ^partition_num, _} = parameters
64 | end
65 | end
66 |
--------------------------------------------------------------------------------
/test/support/events/kafka.exs:
--------------------------------------------------------------------------------
1 | [
2 | %{
3 | event: [:prom_ex, :plugin, :kafka, :producer],
4 | measurements: %{
5 | system_time: 1_654_972_577_479_492_414,
6 | duration: 100_115_245,
7 | batch_size: 200,
8 | batch_byte_size: 3000
9 | },
10 | metadata: %{
11 | topic: "my.incoming-events.topic-long-name",
12 | partition: 0
13 | }
14 | },
15 | %{
16 | event: [:prom_ex, :plugin, :kafka, :producer],
17 | measurements: %{
18 | system_time: 1_654_972_577_479_692_414,
19 | duration: 50_115_245,
20 | batch_size: 100,
21 | batch_byte_size: 1500
22 | },
23 | metadata: %{
24 | topic: "my.incoming-events.topic-long-name",
25 | partition: 1
26 | }
27 | },
28 | %{
29 | event: [:prom_ex, :plugin, :kafka, :producer],
30 | measurements: %{
31 | system_time: 1_654_972_577_479_992_414,
32 | duration: 500_115_100,
33 | batch_size: 1000,
34 | batch_byte_size: 5000
35 | },
36 | metadata: %{
37 | topic: "my.incoming-events.topic-long-name",
38 | partition: 2
39 | }
40 | },
41 | %{
42 | event: [:prom_ex, :plugin, :kafka, :consumer],
43 | measurements: %{
44 | system_time: 1_654_972_577_480_992_414,
45 | duration: 100_115_245,
46 | batch_size: 200,
47 | batch_byte_size: 3000
48 | },
49 | metadata: %{
50 | topic: "my.incoming-events.topic-long-name",
51 | partition: 0
52 | }
53 | },
54 | %{
55 | event: [:prom_ex, :plugin, :kafka, :consumer],
56 | measurements: %{
57 | system_time: 1_654_972_577_481_992_414,
58 | duration: 50_115_245,
59 | batch_size: 100,
60 | batch_byte_size: 1500
61 | },
62 | metadata: %{
63 | topic: "my.incoming-events.topic-long-name",
64 | partition: 1
65 | }
66 | },
67 | %{
68 | event: [:prom_ex, :plugin, :kafka, :consumer],
69 | measurements: %{
70 | system_time: 1_654_972_577_482_992_414,
71 | duration: 500_115_100,
72 | batch_size: 1000,
73 | batch_byte_size: 5000
74 | },
75 | metadata: %{
76 | topic: "my.incoming-events.topic-long-name",
77 | partition: 2
78 | }
79 | }
80 | ]
81 |
--------------------------------------------------------------------------------
/test/kafka_batcher/connection_manager_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ConnectionManagerTest do
2 | use ExUnit.Case, async: false
3 | use KafkaBatcher.Mocks
4 |
5 | alias KafkaBatcher.ConnectionManager
6 | alias KafkaBatcher.Producers.TestProducer
7 |
8 | @retry_timeout 100
9 |
10 | setup_all do
11 | prepare_producers()
12 | end
13 |
14 | setup do
15 | TestProducer.set_owner()
16 |
17 | on_exit(fn ->
18 | TestProducer.set_notification_mode(:start_client, :off)
19 | TestProducer.set_notification_mode(:start_producer, :off)
20 | TestProducer.set_response(:start_client, {:ok, :erlang.whereis(:user)})
21 | TestProducer.set_response(:start_producer, :ok)
22 | end)
23 | end
24 |
25 | def prepare_producers do
26 | KafkaBatcher.ProducerHelper.connection_manager_up()
27 | :ok
28 | end
29 |
30 | test "start client retry" do
31 | assert true == ConnectionManager.client_started?()
32 |
33 | TestProducer.set_response(:start_client, {:error, "failed connection"})
34 | TestProducer.set_notification_mode(:start_client, :on)
35 | TestProducer.set_notification_mode(:start_producer, :on)
36 |
37 | :ok = GenServer.stop(ConnectionManager)
38 | assert KafkaBatcher.ProducerHelper.ready_connection_manager?()
39 |
40 | assert_receive(%{action: :start_client}, 2 * @retry_timeout)
41 | TestProducer.set_response(:start_client, {:ok, Process.whereis(:user)})
42 | assert_receive(%{action: :start_client}, 2 * @retry_timeout)
43 | assert_receive(%{action: :start_producer}, 2 * @retry_timeout)
44 | assert true == ConnectionManager.client_started?()
45 | end
46 |
47 | test "start producer retry" do
48 | TestProducer.set_response(:start_client, {:ok, Process.whereis(:user)})
49 | TestProducer.set_notification_mode(:start_producer, :on)
50 | TestProducer.set_response(:start_producer, {:error, "failed connection"})
51 |
52 | :ok = GenServer.stop(ConnectionManager)
53 | assert KafkaBatcher.ProducerHelper.ready_connection_manager?()
54 |
55 | assert_receive(%{action: :start_producer}, 2 * @retry_timeout)
56 | TestProducer.set_response(:start_producer, :ok)
57 | assert_receive(%{action: :start_producer}, 2 * @retry_timeout)
58 | assert KafkaBatcher.ProducerHelper.ready_pool?()
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/test/kafka_batcher/temp_storage_test.exs:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.TempStorageTest do
2 | use ExUnit.Case, async: false
3 |
4 | alias KafkaBatcher.Collector.State, as: CollectorState
5 | alias KafkaBatcher.TempStorage
6 | alias KafkaBatcher.TempStorage.TestStorage
7 | alias Uniq.UUID
8 |
9 | @recheck_kafka_availability_interval Application.compile_env(
10 | :kafka_batcher,
11 | :recheck_kafka_availability_interval,
12 | 5_000
13 | )
14 |
15 | test "Check that TempStorage doesn't git the underlying storage more often than once in @recheck_kafka_availability_interval" do
16 | TestStorage.set_owner()
17 | TestStorage.set_notification_mode(:empty?, :on)
18 | TestStorage.set_response(:empty?, false)
19 |
20 | now = System.os_time(:millisecond)
21 | topic = UUID.uuid4()
22 |
23 | locked_state = TempStorage.check_storage(%CollectorState{topic_name: topic, locked?: true})
24 | assert Map.get(locked_state, :locked?) === true
25 | assert Map.get(locked_state, :last_check_timestamp) >= now
26 | assert_received %{action: :empty?, parameters: ^topic}
27 |
28 | # wait a bit, but less than @recheck_kafka_availability_interval
29 | Process.sleep(2)
30 | assert TempStorage.check_storage(locked_state) === locked_state
31 |
32 | refute_received %{action: :empty?, parameters: ^topic},
33 | "Should be called only once during @recheck_kafka_availability_interval"
34 |
35 | # let's pretend that we checked stogare more than @recheck_kafka_availability_interval ms ago
36 | old_state = Map.put(locked_state, :last_check_timestamp, now - @recheck_kafka_availability_interval)
37 | new_state = TempStorage.check_storage(old_state)
38 |
39 | assert new_state !== old_state, "last_check_timestamp should be updated"
40 | assert Map.delete(new_state, :last_check_timestamp) === Map.delete(old_state, :last_check_timestamp)
41 |
42 | assert_received %{action: :empty?, parameters: ^topic},
43 | "Should be called because passed more time than recheck_kafka_availability_interval"
44 |
45 | TestStorage.set_response(:empty?, true)
46 | TestStorage.set_notification_mode(:empty?, :off)
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/test/support/mocks.ex:
--------------------------------------------------------------------------------
1 | Mox.defmock(KafkaBatcher.BrodMock, for: Test.Support.Behaviours.Brod)
2 | Mox.defmock(KafkaBatcher.KafkaExMock, for: Test.Support.Behaviours.KafkaEx)
3 | Mox.defmock(KafkaBatcher.KafkaEx.MetadataMock, for: Test.Support.Behaviours.KafkaEx.Metadata)
4 |
5 | defmodule KafkaBatcher.Mocks do
6 | @moduledoc false
7 | @spec __using__(any()) :: {:__block__, [], [{:import, [...], [...]} | {:setup, [...], [...]}, ...]}
8 | defmacro __using__(_opts) do
9 | quote do
10 | import Mox
11 | setup :verify_on_exit!
12 | setup :set_mox_from_context
13 | end
14 | end
15 | end
16 |
17 | defmodule KafkaBatcher.ProducerHelper do
18 | alias KafkaBatcher.Producers.TestProducer
19 | alias KafkaBatcher.TempStorage.TestStorage
20 |
21 | @moduledoc false
22 | use ExUnit.Case
23 |
24 | def init do
25 | ## The call start_client explicitly, to avoid a race when restarting the ConnectionManager
26 | TestProducer.init()
27 | TestStorage.init()
28 | end
29 |
30 | def connection_manager_up do
31 | case Process.whereis(KafkaBatcher.Supervisor) do
32 | nil ->
33 | {:ok, sup_pid} = KafkaBatcher.Supervisor.start_link([])
34 | :erlang.unlink(sup_pid)
35 |
36 | pid when is_pid(pid) ->
37 | :ok
38 | end
39 |
40 | ## ready starting pool producers
41 | assert ready_connection_manager?()
42 | assert ready_pool?()
43 | :ok
44 | end
45 |
46 | def ready_connection_manager? do
47 | ready_connection_manager?(10)
48 | end
49 |
50 | defp ready_connection_manager?(0) do
51 | false
52 | end
53 |
54 | defp ready_connection_manager?(cnt) do
55 | case Process.whereis(KafkaBatcher.ConnectionManager) do
56 | nil ->
57 | Process.sleep(100)
58 | ready_connection_manager?(cnt - 1)
59 |
60 | _pid ->
61 | true
62 | end
63 | end
64 |
65 | def ready_pool? do
66 | ready_pool?(10)
67 | end
68 |
69 | defp ready_pool?(0) do
70 | false
71 | end
72 |
73 | defp ready_pool?(cnt) do
74 | case KafkaBatcher.ConnectionManager.client_started?() do
75 | true ->
76 | true
77 |
78 | false ->
79 | Process.sleep(100)
80 | ready_pool?(cnt - 1)
81 | end
82 | catch
83 | _, _reason ->
84 | Process.sleep(100)
85 | ready_pool?(cnt - 1)
86 | end
87 | end
88 |
--------------------------------------------------------------------------------
/lib/kafka_batcher.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher do
2 | @moduledoc """
3 |
4 | ```mermaid
5 | flowchart TD
6 | S -->|one for all collectors| CM(ConnectionManager)
7 | S -->|reg_name by topic| APS[AccumulatorsPoolSupervisor]
8 | S[Supervisor] -->|reg_name by topic| C(Collector)
9 | APS --> Accumulator0
10 | APS -->|starts Accumulator for each partition| Accumulator1
11 | APS --> Accumulator2
12 | ```
13 |
14 | or only one Accumulator in case when `collect_by_partition: false`
15 | ```mermaid
16 | flowchart TD
17 | S -->|one for all collectors| CM(ConnectionManager)
18 | S -->|reg_name by topic| APS[AccumulatorsPoolSupervisor]
19 | S[Supervisor] -->|reg_name by topic| C(Collector)
20 | APS --> Accumulator
21 | ```
22 |
23 | Sequence in case when Kafka is avaliable:
24 | ```mermaid
25 | sequenceDiagram
26 | actor U as LibraryUser
27 | participant C as Collector
28 | participant A as Accumulator
29 | participant P as Producer
30 | U ->> C: add_events/1
31 | C->>A: dispatch by partitions & add_event/3
32 | A->>A: accumulate events
33 | A->>P: when conditions met call producer
34 | P->>Kafka: produce_sync
35 | Kafka->>P: :ok
36 | P->>A: :ok
37 | A->>A: reset_state_after_produce/2
38 | ```
39 |
40 | Sequence in case when Kafka is unavaliable:
41 | ```mermaid
42 | sequenceDiagram
43 | actor U as LibraryUser
44 | participant C as Collector
45 | participant A as Accumulator
46 | participant P as Producer
47 | U ->> C: add_events/1
48 | C->>A: dispatch by partitions & add_event/3
49 | A->>A: accumulate events
50 | A->>P: when conditions met call producer
51 | P->>Kafka: produce_sync
52 | Kafka->>P: unavailable
53 | P->>A: {:error, reason}
54 | A->>TempStorage: save_batch/1
55 | ```
56 | """
57 |
58 | defmodule MessageObject do
59 | @moduledoc """
60 | Contains Kafka message fields
61 |
62 | ## Fields
63 |
64 | * `:key` - Kafka use it for partitioning
65 | * `:value` - the main payload of the message
66 | * `:headers` - a keyword list with auxiliary key-value pairs
67 | """
68 | defstruct key: "", value: "", headers: []
69 |
70 | @type t :: %MessageObject{key: binary(), value: map() | binary(), headers: list()}
71 | end
72 | end
73 |
--------------------------------------------------------------------------------
/test/support/mox_helper.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.MoxHelper do
2 | @moduledoc false
3 |
4 | defmodule State do
5 | @moduledoc false
6 |
7 | defstruct responses: %{},
8 | owner: nil,
9 | notifications: %{}
10 |
11 | def fetch(data, key), do: Map.fetch(data, key)
12 |
13 | def get_and_update(data, key, func) do
14 | Map.get_and_update(data, key, func)
15 | end
16 |
17 | def pop(data, key), do: Map.pop(data, key)
18 | end
19 |
20 | defmacro __using__(opts) do
21 | quote location: :keep, bind_quoted: [opts: opts] do
22 | defp client do
23 | Keyword.fetch!(unquote(Macro.escape(opts)), :client)
24 | end
25 |
26 | def set_response(action, response) do
27 | client().get_state()
28 | |> put_in([:responses, action], response)
29 | |> client().set_state()
30 | end
31 |
32 | def set_owner do
33 | client().get_state()
34 | |> Map.put(:owner, self())
35 | |> client().set_state()
36 | end
37 |
38 | def set_notification_mode(action, mode) when mode in [:on, :off] do
39 | client().get_state()
40 | |> put_in([:notifications, action], mode)
41 | |> client().set_state()
42 |
43 | if mode == :off do
44 | flush(action)
45 |
46 | client().get_state()
47 | |> put_in([:responses, action], nil)
48 | |> client().set_state()
49 | end
50 | end
51 |
52 | defp flush(action) do
53 | receive do
54 | %{action: ^action} ->
55 | flush(action)
56 | after
57 | 0 -> :ok
58 | end
59 | end
60 |
61 | defp process_callback(event, response \\ nil) do
62 | send_notification(event)
63 | handle_response(event, response)
64 | end
65 |
66 | defp send_notification(%{action: action} = event) do
67 | case client().get_state() do
68 | %{owner: owner, notifications: notifications} ->
69 | if Map.get(notifications, action) == :on do
70 | send(owner, event)
71 | end
72 |
73 | _ ->
74 | :ok
75 | end
76 | end
77 |
78 | defp handle_response(%{action: action} = event, response) do
79 | case client().get_state() do
80 | %{responses: responses} ->
81 | case Map.get(responses, action) do
82 | nil -> response
83 | new_response -> new_response
84 | end
85 |
86 | _ ->
87 | response
88 | end
89 | end
90 | end
91 | end
92 | end
93 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/producers/kafka_ex.ex:
--------------------------------------------------------------------------------
1 | if Code.ensure_loaded?(KafkaEx) do
2 | defmodule KafkaBatcher.Producers.KafkaEx do
3 | @moduledoc """
4 | An implementation of the KafkaBatcher.Behaviours.Producer for KafkaEx
5 | """
6 |
7 | @kafka_ex_client Application.compile_env(:kafka_batcher, :kafka_ex_client, KafkaEx)
8 | @metadata_response Application.compile_env(:kafka_batcher, :kafka_ex_metadata, KafkaEx.Protocol.Metadata.Response)
9 | @client_name :kafka_producer_client
10 |
11 | @behaviour KafkaBatcher.Behaviours.Producer
12 | use KafkaBatcher.Producers.Base
13 |
14 | ## -------------------------------------------------------------------------
15 | ## public api
16 | ## -------------------------------------------------------------------------
17 |
18 | ## KafkaEx start worker
19 | @impl true
20 | def start_client do
21 | uris = KafkaBatcher.Config.get_endpoints()
22 |
23 | @kafka_ex_client.create_worker(@client_name, uris: uris)
24 | end
25 |
26 | @impl true
27 | def start_producer(_topic_name, _config) do
28 | :ok
29 | end
30 |
31 | @impl true
32 | def get_partitions_count(topic) do
33 | count =
34 | @kafka_ex_client.metadata(topic: topic, worker_name: @client_name)
35 | |> @metadata_response.partitions_for_topic(topic)
36 | |> length()
37 |
38 | {:ok, count}
39 | end
40 |
41 | @impl true
42 | def do_produce(messages, topic, partition, config) do
43 | case @kafka_ex_client.produce(
44 | %KafkaEx.Protocol.Produce.Request{
45 | topic: topic,
46 | partition: partition,
47 | required_acks: Keyword.get(config, :required_acks),
48 | messages: transform_messages(messages)
49 | },
50 | worker_name: @client_name
51 | ) do
52 | {:ok, _offset} ->
53 | :ok
54 |
55 | :ok ->
56 | :ok
57 |
58 | nil ->
59 | {:error, "Producing was failed"}
60 |
61 | {:error, reason} ->
62 | {:error, reason}
63 | end
64 | end
65 |
66 | ## -------------------------------------------------------------------------
67 | ## internal functions
68 | ## -------------------------------------------------------------------------
69 |
70 | defp transform_messages(messages) do
71 | Enum.map(
72 | messages,
73 | fn
74 | %KafkaBatcher.MessageObject{key: key, value: value, headers: headers} ->
75 | %KafkaEx.Protocol.Produce.Message{headers: headers, key: key, value: value}
76 | end
77 | )
78 | end
79 | end
80 | end
81 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/collector/implementation.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Collector.Implementation do
2 | @moduledoc """
3 | Part of the KafkaBatcher.Collector implementation not related to GenServer behavior.
4 | """
5 |
6 | require Logger
7 | alias KafkaBatcher.{AccumulatorsPoolSupervisor, Collector.State, MessageObject}
8 | @producer Application.compile_env(:kafka_batcher, :producer_module, KafkaBatcher.Producers.Kaffe)
9 |
10 | def choose_partition(_message, _topic_name, _config, nil), do: {:error, :kafka_unavailable}
11 |
12 | def choose_partition(%MessageObject{key: key, value: value}, topic_name, config, partitions_count) do
13 | calc_partition_fn = Keyword.fetch!(config, :partition_fn)
14 |
15 | partition = calc_partition_fn.(topic_name, partitions_count, key, value)
16 | {:ok, partition}
17 | end
18 |
19 | def start_accumulators(%State{collect_by_partition: true, partitions_count: nil}) do
20 | {:error, :kafka_unavailable}
21 | end
22 |
23 | def start_accumulators(%State{collect_by_partition: true, partitions_count: count} = state) do
24 | start_accumulators_by_partitions(count, state)
25 | end
26 |
27 | def start_accumulators(%State{topic_name: topic_name, config: config, collect_by_partition: false} = state) do
28 | start_accumulator(topic_name: topic_name, config: config, collector: state.collector)
29 | end
30 |
31 | defp start_accumulators_by_partitions(count, %State{} = state) do
32 | opts = [
33 | topic_name: state.topic_name,
34 | config: state.config,
35 | collector: state.collector
36 | ]
37 |
38 | Enum.reduce_while(
39 | 0..(count - 1),
40 | :ok,
41 | fn partition, _ ->
42 | case start_accumulator(Keyword.put(opts, :partition, partition)) do
43 | :ok ->
44 | {:cont, :ok}
45 |
46 | {:error, reason} ->
47 | {:halt, {:error, reason}}
48 | end
49 | end
50 | )
51 | end
52 |
53 | defp start_accumulator(args) do
54 | case AccumulatorsPoolSupervisor.start_accumulator(args) do
55 | {:ok, _} ->
56 | :ok
57 |
58 | {:error, {:already_started, _pid}} ->
59 | :ok
60 |
61 | {:error, reason} ->
62 | Logger.warning("""
63 | KafkaBatcher: Accumulator has failed to start with args: #{inspect(args)}.
64 | Reason: #{inspect(reason)}}
65 | """)
66 |
67 | {:error, reason}
68 | end
69 | end
70 |
71 | @spec store_partition_count(State.t()) :: State.t()
72 | def store_partitions_count(%State{partitions_count: nil} = state) do
73 | case @producer.get_partitions_count(state.topic_name) do
74 | {:ok, partitions_count} ->
75 | %State{state | partitions_count: partitions_count}
76 |
77 | {:error, _reason} ->
78 | state
79 | end
80 | end
81 |
82 | def store_partition_count(%State{partitions_count: count} = state) when is_integer(count), do: state
83 | end
84 |
--------------------------------------------------------------------------------
/test/support/collectors/collector_handlers.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Test.CalculatePartitionByValueCollector do
2 | @moduledoc false
3 | use KafkaBatcher.Collector,
4 | collect_by_partition: true,
5 | partition_fn: &__MODULE__.calculate_partition/4,
6 | required_acks: -1,
7 | batch_size: 30
8 |
9 | def calculate_partition(_topic, partitions_count, _key, value) do
10 | val = value["client_id"] || value["device_id"]
11 | :erlang.phash2(val, partitions_count)
12 | end
13 | end
14 |
15 | defmodule KafkaBatcher.Test.SimpleCollector do
16 | @moduledoc false
17 | use KafkaBatcher.Collector,
18 | collect_by_partition: false,
19 | partition_strategy: :md5,
20 | batch_size: 10
21 | end
22 |
23 | defmodule KafkaBatcher.Test.BatchFlushCollector do
24 | @moduledoc false
25 | use KafkaBatcher.Collector,
26 | collect_by_partition: false,
27 | partition_strategy: :random,
28 | batch_size: 10,
29 | batch_flusher: Producers.CollectorTest.BatchFlusher
30 | end
31 |
32 | defmodule KafkaBatcher.Test.CalculatePartitionByKeyCollector do
33 | @moduledoc false
34 | use KafkaBatcher.Collector,
35 | collect_by_partition: true,
36 | partition_fn: &__MODULE__.calculate_partition/4,
37 | required_acks: 1,
38 | batch_size: 10
39 |
40 | def calculate_partition(_topic, partitions_count, key, _value) do
41 | :erlang.phash2(key, partitions_count)
42 | end
43 | end
44 |
45 | defmodule KafkaBatcher.Test.SimpleCollectorWithDelay do
46 | @moduledoc false
47 | use KafkaBatcher.Collector,
48 | collect_by_partition: false,
49 | partition_strategy: :md5,
50 | required_acks: 0,
51 | batch_size: 10,
52 | min_delay: 50
53 | end
54 |
55 | defmodule KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl do
56 | @moduledoc false
57 | use KafkaBatcher.Collector,
58 | collect_by_partition: false,
59 | partition_strategy: :md5,
60 | required_acks: 0,
61 | batch_size: 10,
62 | min_delay: 50,
63 | max_batch_bytesize: 400
64 | end
65 |
66 | defmodule KafkaBatcher.Test.SimpleCollectorMaxWaitTime do
67 | @moduledoc false
68 | use KafkaBatcher.Collector,
69 | collect_by_partition: false,
70 | partition_strategy: :md5,
71 | required_acks: 0,
72 | batch_size: 10,
73 | max_wait_time: 50,
74 | min_delay: 20
75 | end
76 |
77 | defmodule KafkaBatcher.Test.CollectorWithWrongConfig do
78 | @moduledoc false
79 | use KafkaBatcher.Collector,
80 | collect_by_partition: true
81 | end
82 |
83 | defmodule KafkaBatcher.Test.StartAccumulatorFail do
84 | @moduledoc false
85 | use KafkaBatcher.Collector,
86 | collect_by_partition: true
87 | end
88 |
89 | defmodule KafkaBatcher.Test.FailingCollector do
90 | @moduledoc false
91 | use GenServer
92 |
93 | def start_link(_) do
94 | GenServer.start_link(__MODULE__, nil)
95 | end
96 |
97 | @impl true
98 | def init(nil) do
99 | {:ok, nil}
100 | end
101 |
102 | @impl true
103 | def handle_call(_request, _from, _state) do
104 | throw(:timeout)
105 | end
106 | end
107 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/collector/state.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Collector.State do
2 | @moduledoc """
3 | Describes the state of KafkaBatcher.Collector and functions working with it
4 | """
5 |
6 | alias KafkaBatcher.{Accumulator, Collector, MessageObject, TempStorage}
7 | alias KafkaBatcher.Collector.{State, Utils}
8 |
9 | require Logger
10 |
11 | @type t :: %State{
12 | topic_name: String.t() | nil,
13 | config: Keyword.t(),
14 | collect_by_partition: boolean(),
15 | collector: atom() | nil,
16 | locked?: boolean(),
17 | last_check_timestamp: non_neg_integer() | nil,
18 | ready?: boolean(),
19 | timer_ref: :timer.tref() | nil,
20 | partitions_count: pos_integer() | nil
21 | }
22 |
23 | defstruct topic_name: nil,
24 | config: [],
25 | collect_by_partition: true,
26 | collector: nil,
27 | # these fields are used to handle case when Kafka went down suddenly
28 | locked?: false,
29 | last_check_timestamp: nil,
30 | # these fields are used to handle case when Kafka is not available at the start
31 | ready?: false,
32 | timer_ref: nil,
33 | partitions_count: nil
34 |
35 | @spec add_events(t(), [Utils.event()]) :: {:ok, t()} | {:error, term(), t()}
36 | def add_events(%State{} = state, events) do
37 | case events |> try_to_add_events(state) |> save_failed_events(state) do
38 | :ok ->
39 | {:ok, state}
40 |
41 | {:error, reason, _failed_event_batches} ->
42 | {:error, reason, %State{state | locked?: true}}
43 | end
44 | end
45 |
46 | defp try_to_add_events(events, %State{} = state) do
47 | events
48 | |> Enum.map(&Utils.transform_event/1)
49 | |> Enum.reduce(:ok, fn %MessageObject{} = event, result ->
50 | case choose_partition(state, event) do
51 | {:ok, partition} when result == :ok ->
52 | try_to_add_event(event, state.topic_name, partition)
53 |
54 | {:ok, partition} ->
55 | keep_failed_event(result, event, elem(result, 1), partition)
56 |
57 | {:error, reason} ->
58 | keep_failed_event(result, event, reason, nil)
59 | end
60 | end)
61 | end
62 |
63 | defp try_to_add_event(event, topic_name, partition) do
64 | case Accumulator.add_event(event, topic_name, partition) do
65 | :ok -> :ok
66 | {:error, reason} -> keep_failed_event(:ok, event, reason, partition)
67 | end
68 | end
69 |
70 | defp keep_failed_event(:ok, event, reason, partition) do
71 | {:error, reason, %{partition => [event]}}
72 | end
73 |
74 | defp keep_failed_event(
75 | {:error, reason, failed_event_batches},
76 | event,
77 | _reason,
78 | partition
79 | ) do
80 | {
81 | :error,
82 | reason,
83 | Map.update(failed_event_batches, partition, [event], &[event | &1])
84 | }
85 | end
86 |
87 | defp choose_partition(%State{collect_by_partition: true} = state, event) do
88 | Collector.Implementation.choose_partition(
89 | event,
90 | state.topic_name,
91 | state.config,
92 | state.partitions_count
93 | )
94 | end
95 |
96 | defp choose_partition(%State{collect_by_partition: false}, _event) do
97 | {:ok, nil}
98 | end
99 |
100 | defp save_failed_events(:ok, _state), do: :ok
101 |
102 | defp save_failed_events(
103 | {:error, _reason, failed_event_batches} = result,
104 | %State{} = state
105 | ) do
106 | for {partition, failed_events} <- failed_event_batches do
107 | TempStorage.save_batch(%TempStorage.Batch{
108 | messages: Enum.reverse(failed_events),
109 | topic: state.topic_name,
110 | partition: partition,
111 | producer_config: state.config
112 | })
113 | end
114 |
115 | result
116 | end
117 | end
118 |
--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.MixProject do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :kafka_batcher,
7 | version: "1.1.0",
8 | elixir: "~> 1.16",
9 | elixirc_paths: elixirc_paths(Mix.env()),
10 | compilers: Mix.compilers(),
11 | aliases: aliases(),
12 | deps: deps(),
13 | test_coverage: [
14 | tool: ExCoveralls
15 | ],
16 | preferred_cli_env: [
17 | coveralls: :test,
18 | "coveralls.detail": :test,
19 | "coveralls.post": :test,
20 | "coveralls.html": :test,
21 | "coveralls.cobertura": :test
22 | ],
23 | dialyzer: [
24 | plt_add_deps: :app_tree,
25 | plt_add_apps: [:mix, :eex],
26 | flags: ~w[
27 | error_handling extra_return missing_return underspecs unmatched_returns
28 | ]a,
29 | list_unused_filters: true
30 | ],
31 | package: [
32 | maintainers: ["Roman Smirnov", "Dmitry Begunkov"],
33 | description:
34 | "Library to increase the throughput of producing messages (coming one at a time) to Kafka by accumulating these messages into batches",
35 | links: %{"Source" => "https://github.com/samokat-oss/kafka-batcher"},
36 | licenses: ["Apache-2.0"]
37 | ],
38 | docs: [
39 | before_closing_head_tag: &add_js_to_docs/1
40 | ]
41 | ]
42 | end
43 |
44 | # Run "mix help compile.app" to learn about applications.
45 | def application do
46 | [
47 | extra_applications: [:logger]
48 | ]
49 | end
50 |
51 | # Specifies which paths to compile per environment.
52 | defp elixirc_paths(:test), do: ["lib", "test/support"]
53 | defp elixirc_paths(_), do: ["lib"]
54 |
55 | # Run "mix help deps" to learn about dependencies.
56 | defp deps do
57 | [
58 | {:kaffe, "~> 1.22"},
59 | {:kafka_ex, "~> 0.12", optional: true},
60 | {:jason, ">= 0.0.0"},
61 | {:prom_ex, ">= 0.0.0", optional: true},
62 | {:plug, "~> 1.0", optional: true},
63 | {:telemetry, "~> 1.0"},
64 | {:brod, "~> 3.16", override: true, only: [:dev, :test]},
65 | # For tests and code quality
66 | {:credo, "~> 1.7", only: [:dev, :test], runtime: false},
67 | {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false},
68 | {:uniq, "~> 0.1"},
69 | {:mox, "~> 1.0", only: :test},
70 | {:excoveralls, "~> 0.16", only: :test},
71 | {:ex_doc, "~> 0.30", only: :dev, runtime: false},
72 | {:makeup_html, ">= 0.0.0", only: :dev, runtime: false}
73 | ]
74 | end
75 |
76 | defp aliases do
77 | [
78 | "test.coverage": ["coveralls.cobertura"],
79 | check: ["compile --warnings-as-errors", "format --check-formatted", "credo", "dialyzer"]
80 | ]
81 | end
82 |
83 | defp add_js_to_docs(:epub), do: ""
84 |
85 | defp add_js_to_docs(:html) do
86 | """
87 |
88 |
109 | """
110 | end
111 | end
112 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/producers/base.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Producers.Base do
2 | @moduledoc """
3 | General part of the Kafka producer implementation
4 | """
5 |
6 | defmacro __using__(opts) do
7 | quote location: :keep, bind_quoted: [opts: opts] do
8 | @error_notifier Application.compile_env(:kafka_batcher, :error_notifier, KafkaBatcher.DefaultErrorNotifier)
9 |
10 | require Logger
11 |
12 | def produce_list(messages, topic, nil, config) when is_list(messages) and is_binary(topic) and is_list(config) do
13 | with {:ok, partitions_count} <- get_partitions_count(topic),
14 | grouped_messages <- group_messages(messages, topic, partitions_count, partition_strategy_from(config)),
15 | :ok <- produce_list_to_topic(grouped_messages, topic, config) do
16 | :ok
17 | else
18 | error ->
19 | @error_notifier.report(
20 | type: "KafkaBatcherProducerError",
21 | message: "event#produce topic=#{topic} error=#{inspect(error)}"
22 | )
23 |
24 | error
25 | end
26 | rescue
27 | err ->
28 | @error_notifier.report(err, stacktrace: __STACKTRACE__)
29 | {:error, :failed_push_to_kafka}
30 | end
31 |
32 | def produce_list(messages, topic, partition, config)
33 | when is_list(messages) and is_binary(topic) and is_list(config) and is_integer(partition) do
34 | produce_list_to_topic(%{partition => messages}, topic, config)
35 | rescue
36 | err ->
37 | @error_notifier.report(err, stacktrace: __STACKTRACE__)
38 | {:error, :failed_push_to_kafka}
39 | end
40 |
41 | def produce_list(messages, topic, partition, config) do
42 | @error_notifier.report(
43 | type: "KafkaBatcherProducerError",
44 | message: """
45 | Invalid params for produce_list/4:
46 | (topic #{inspect(topic)}, opts #{inspect(config)}, partition #{inspect(partition)} messages #{inspect(messages)})
47 | """
48 | )
49 |
50 | {:error, :internal_error}
51 | end
52 |
53 | defp produce_list_to_topic(message_list, topic, config) do
54 | message_list
55 | |> Enum.reduce_while(:ok, fn {partition, messages}, :ok ->
56 | Logger.debug("KafkaBatcher: event#produce_list_to_topic topic=#{topic} partition=#{partition}")
57 | start_time = System.monotonic_time()
58 |
59 | case __MODULE__.do_produce(messages, topic, partition, config) do
60 | :ok ->
61 | push_metrics(start_time, topic, partition, messages, telemetry_on?(config))
62 | {:cont, :ok}
63 |
64 | {:error, _reason} = error ->
65 | {:halt, error}
66 | end
67 | end)
68 | end
69 |
70 | defp group_messages(messages, _topic, partitions_count, :random) do
71 | partition = :rand.uniform(partitions_count) - 1
72 | %{partition => messages}
73 | end
74 |
75 | defp group_messages(messages, _topic, partitions_count, :md5) do
76 | Enum.group_by(messages, fn %KafkaBatcher.MessageObject{key: key} -> :erlang.phash2(key, partitions_count) end)
77 | end
78 |
79 | defp group_messages(messages, topic, partitions_count, partition_strategy_fn) when is_function(partition_strategy_fn) do
80 | Enum.group_by(messages, fn %KafkaBatcher.MessageObject{key: key, value: value} ->
81 | partition_strategy_fn.(topic, partitions_count, key, value)
82 | end)
83 | end
84 |
85 | defp telemetry_on?(opts) do
86 | Keyword.get(opts, :telemetry, true)
87 | end
88 |
89 | defp push_metrics(_start_time, _topic, _partition, _messages, false) do
90 | :ok
91 | end
92 |
93 | defp push_metrics(start_time, topic, partition, messages, true) do
94 | duration = System.monotonic_time() - start_time
95 |
96 | :telemetry.execute(
97 | [:prom_ex, :plugin, :kafka, :producer],
98 | %{
99 | duration: duration,
100 | batch_size: Enum.count(messages),
101 | batch_byte_size: :erlang.external_size(messages)
102 | },
103 | %{
104 | topic: topic,
105 | partition: partition
106 | }
107 | )
108 | end
109 |
110 | defp partition_strategy_from(opts) do
111 | case Keyword.fetch(opts, :partition_strategy) do
112 | {:ok, partition_strategy} ->
113 | partition_strategy
114 |
115 | :error ->
116 | KafkaBatcher.Config.general_producer_config()
117 | |> Keyword.get(:partition_strategy, :random)
118 | end
119 | end
120 | end
121 | end
122 | end
123 |
--------------------------------------------------------------------------------
/test/kafka_batcher/producers/kafka_ex_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Producers.KafkaExTest do
2 | alias KafkaBatcher.{MessageObject, Producers}
3 |
4 | use ExUnit.Case
5 | use KafkaBatcher.Mocks
6 |
7 | @client_name :kafka_producer_client
8 | @topic1 "topic1"
9 | @topic2 "topic2"
10 | @topic3 "topic3"
11 | @topic4 "topic4"
12 | @topic5 "topic5"
13 |
14 | @partition_topics %{
15 | @topic1 => 10,
16 | @topic2 => 20,
17 | @topic3 => 10,
18 | @topic4 => 4,
19 | @topic5 => 5
20 | }
21 |
22 | @messages [%MessageObject{key: "key1", value: "value1"}, %MessageObject{key: "key2", value: "value2"}]
23 | @expected_messages Enum.map(@messages, fn %MessageObject{key: key, value: value, headers: headers} ->
24 | %KafkaEx.Protocol.Produce.Message{key: key, value: value, headers: headers}
25 | end)
26 |
27 | setup :prepare_config
28 |
29 | def prepare_config(_context) do
30 | :ok
31 | end
32 |
33 | test "start client" do
34 | expect(KafkaBatcher.KafkaExMock, :create_worker, fn client_id, config ->
35 | assert client_id == @client_name
36 | assert config == [uris: [{"localhost", 9092}]]
37 | {:ok, self()}
38 | end)
39 |
40 | {:ok, _} = Producers.KafkaEx.start_client()
41 | end
42 |
43 | test "get partitions count" do
44 | partitions_count = Map.get(@partition_topics, @topic1)
45 |
46 | expect(KafkaBatcher.KafkaExMock, :metadata, fn opts ->
47 | assert opts == [topic: @topic1, worker_name: @client_name]
48 | %{@topic1 => %{:topic_metadatas => gen_list(partitions_count)}}
49 | end)
50 |
51 | expect(KafkaBatcher.KafkaEx.MetadataMock, :partitions_for_topic, fn metadata, topic ->
52 | %{@topic1 => %{:topic_metadatas => topic_metadatas}} = metadata
53 | assert partitions_count == length(topic_metadatas)
54 | assert topic == @topic1
55 | topic_metadatas
56 | end)
57 |
58 | {:ok, cnt1} = Producers.KafkaEx.get_partitions_count(@topic1)
59 | assert cnt1 == partitions_count
60 | end
61 |
62 | test "produce by partitions" do
63 | topic1_config = KafkaBatcher.Config.get_collector_config(@topic1)
64 |
65 | expect(KafkaBatcher.KafkaExMock, :produce, fn parameters, opts ->
66 | %{topic: topic, partition: partition, required_acks: require_acks, messages: kafka_messages} = parameters
67 | [worker_name: client_id] = opts
68 | assert client_id == @client_name
69 | assert topic == @topic1
70 | assert partition == 5
71 | assert require_acks == Keyword.get(topic1_config, :required_acks)
72 | assert kafka_messages == @expected_messages
73 | {:ok, :rand.uniform(1_000)}
74 | end)
75 |
76 | Producers.KafkaEx.produce_list(@messages, @topic1, 5, topic1_config)
77 | end
78 |
79 | test "produce without partitions" do
80 | partitions_count = Map.get(@partition_topics, @topic2)
81 |
82 | grouped_messages =
83 | Enum.group_by(@messages, fn %MessageObject{key: key} ->
84 | :erlang.phash2(key, partitions_count)
85 | end)
86 |
87 | expected_grouped_messages =
88 | Enum.group_by(
89 | @expected_messages,
90 | fn %KafkaEx.Protocol.Produce.Message{key: key} ->
91 | :erlang.phash2(key, partitions_count)
92 | end
93 | )
94 |
95 | topic2_config = KafkaBatcher.Config.get_collector_config(@topic2)
96 |
97 | Enum.each(grouped_messages, fn {partition, _messages} ->
98 | expect(KafkaBatcher.KafkaExMock, :produce, fn parameters, opts ->
99 | %{topic: topic, partition: call_partition, required_acks: require_acks, messages: kafka_messages} = parameters
100 | [worker_name: client_id] = opts
101 |
102 | assert client_id == @client_name
103 | assert topic == @topic2
104 | assert call_partition == partition
105 | assert require_acks == Keyword.get(topic2_config, :required_acks)
106 | assert kafka_messages == Map.get(expected_grouped_messages, partition)
107 | {:ok, :rand.uniform(1_000)}
108 | end)
109 | end)
110 |
111 | expect(KafkaBatcher.KafkaExMock, :metadata, fn opts ->
112 | assert opts == [topic: @topic2, worker_name: @client_name]
113 | %{@topic2 => %{:topic_metadatas => gen_list(partitions_count)}}
114 | end)
115 |
116 | expect(KafkaBatcher.KafkaEx.MetadataMock, :partitions_for_topic, fn metadata, topic ->
117 | %{@topic2 => %{:topic_metadatas => topic_metadatas}} = metadata
118 | assert partitions_count == length(topic_metadatas)
119 | assert topic == @topic2
120 | topic_metadatas
121 | end)
122 |
123 | Producers.KafkaEx.produce_list(@messages, @topic2, nil, topic2_config)
124 | end
125 |
126 | defp gen_list(len) do
127 | Enum.map(1..len, fn i -> i end)
128 | end
129 | end
130 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/connection_manager.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.ConnectionManager do
2 | @moduledoc """
3 | Abstraction layer over Kafka library.
4 |
5 | Manages a connection to Kafka, producers processes and does reconnect if something goes wrong.
6 | """
7 |
8 | require Logger
9 |
10 | defmodule State do
11 | @moduledoc "State of ConnectionManager process"
12 | defstruct client_started: false, client_pid: nil
13 |
14 | @type t :: %State{client_started: boolean(), client_pid: nil | pid()}
15 | end
16 |
17 | @producer Application.compile_env(:kafka_batcher, :producer_module, KafkaBatcher.Producers.Kaffe)
18 | @error_notifier Application.compile_env(:kafka_batcher, :error_notifier, KafkaBatcher.DefaultErrorNotifier)
19 | @reconnect_timeout Application.compile_env(:kafka_batcher, :reconnect_timeout, 5_000)
20 |
21 | use GenServer
22 |
23 | # Public API
24 | @spec start_link() :: :ignore | {:error, any()} | {:ok, pid()}
25 | def start_link do
26 | GenServer.start_link(__MODULE__, [], name: __MODULE__)
27 | end
28 |
29 | @doc "Returns a specification to start this module under a supervisor"
30 | @spec child_spec(nil) :: map()
31 | def child_spec(_ \\ nil) do
32 | %{
33 | id: __MODULE__,
34 | start: {__MODULE__, :start_link, []},
35 | type: :worker
36 | }
37 | end
38 |
39 | @doc "Checks that Kafka client is already started"
40 | @spec client_started?() :: boolean()
41 | def client_started? do
42 | GenServer.call(__MODULE__, :client_started?)
43 | end
44 |
45 | ##
46 | ## Callbacks
47 | ##
48 |
49 | @impl GenServer
50 | def init(_opts) do
51 | Process.flag(:trap_exit, true)
52 | {:ok, %State{}, {:continue, :start_client}}
53 | end
54 |
55 | @impl GenServer
56 | def handle_continue(:start_client, state) do
57 | {:noreply, connect(state)}
58 | end
59 |
60 | @impl GenServer
61 | def handle_call(:client_started?, _from, %State{client_started: started?} = state) do
62 | {:reply, started?, state}
63 | end
64 |
65 | def handle_call(msg, _from, state) do
66 | Logger.warning("KafkaBatcher: Unexpected call #{inspect(msg)}")
67 | {:reply, :ok, state}
68 | end
69 |
70 | @impl GenServer
71 | def handle_cast(_msg, state) do
72 | {:noreply, state}
73 | end
74 |
75 | @impl GenServer
76 | def handle_info({:EXIT, pid, reason}, %State{client_pid: nil} = state) do
77 | Logger.info("""
78 | KafkaBatcher: Client was crashed #{inspect(pid)}, but reconnect is already in progress.
79 | Reason: #{inspect(reason)}
80 | """)
81 |
82 | {:noreply, state}
83 | end
84 |
85 | @impl GenServer
86 | def handle_info({:EXIT, pid, reason}, %State{client_pid: pid} = state) do
87 | Logger.info("KafkaBatcher: Client was crashed #{inspect(pid)}. Reason #{inspect(reason)}. Trying to reconnect")
88 | state = connect(%State{state | client_pid: nil, client_started: false})
89 | {:noreply, state}
90 | end
91 |
92 | def handle_info(:reconnect, state) do
93 | Logger.info("KafkaBatcher: Retry connection")
94 | {:noreply, connect(state)}
95 | end
96 |
97 | def handle_info(msg, state) do
98 | Logger.warning("KafkaBatcher: Unexpected info #{inspect(msg)}")
99 | {:noreply, state}
100 | end
101 |
102 | @impl GenServer
103 | def terminate(reason, state) do
104 | Logger.info("KafkaBatcher: Terminating #{__MODULE__}. Reason #{inspect(reason)}")
105 | {:noreply, state}
106 | end
107 |
108 | ##
109 | ## INTERNAL FUNCTIONS
110 | ##
111 |
112 | defp connect(state) do
113 | case prepare_connection() do
114 | {:ok, pid} ->
115 | %State{state | client_started: true, client_pid: pid}
116 |
117 | :retry ->
118 | Process.send_after(self(), :reconnect, @reconnect_timeout)
119 | %State{state | client_started: false, client_pid: nil}
120 | end
121 | end
122 |
123 | defp start_producers do
124 | KafkaBatcher.Config.get_configs_by_topic_name()
125 | |> Enum.reduce_while(:ok, fn {topic_name, config}, _ ->
126 | case @producer.start_producer(topic_name, config) do
127 | :ok ->
128 | {:cont, :ok}
129 |
130 | {:error, reason} ->
131 | @error_notifier.report(
132 | type: "KafkaBatcherProducerStartFailed",
133 | message: "Topic: #{topic_name}. Reason #{inspect(reason)}"
134 | )
135 |
136 | {:halt, :error}
137 | end
138 | end)
139 | end
140 |
141 | defp prepare_connection do
142 | case start_client() do
143 | {:ok, pid} ->
144 | case start_producers() do
145 | :ok -> {:ok, pid}
146 | :error -> :retry
147 | end
148 |
149 | {:error, reason} ->
150 | @error_notifier.report(
151 | type: "KafkaBatcherClientStartFailed",
152 | message: "Kafka client start failed: #{inspect(reason)}"
153 | )
154 |
155 | :retry
156 | end
157 | end
158 |
159 | defp start_client do
160 | case @producer.start_client() do
161 | {:ok, pid} ->
162 | {:ok, pid}
163 |
164 | {:error, {:already_started, pid}} ->
165 | Logger.debug("KafkaBatcher: Kafka client already started: #{inspect(pid)}")
166 | {:ok, pid}
167 |
168 | error ->
169 | error
170 | end
171 | end
172 | end
173 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/accumulator/state.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Accumulator.State do
2 | @moduledoc """
3 | Encapsulates all logic to detect when batch will be ready to producing
4 |
5 | A batch is marked as ready for producing when one of the following conditions is met:
6 |
7 | * reached the max byte size of the batch
8 | * reached the batch size (messages count) limit
9 | * reached the waiting time limit (max delay before producing)
10 | * one of special events arrived (which triggers Flusher to produce immediately)
11 | * timer expired (in case when a few events arrived timer helps to control that the max waiting time is not exceeded)
12 | """
13 |
14 | alias KafkaBatcher.{Accumulator.State, MessageObject}
15 | @error_notifier Application.compile_env(:kafka_batcher, :error_notifier, KafkaBatcher.DefaultErrorNotifier)
16 |
17 | @type t :: %State{
18 | topic_name: binary(),
19 | partition: non_neg_integer() | nil,
20 | config: Keyword.t(),
21 | pending_messages: list(),
22 | last_produced_at: non_neg_integer(),
23 | batch_flusher: atom(),
24 | batch_size: non_neg_integer(),
25 | max_wait_time: non_neg_integer(),
26 | min_delay: non_neg_integer(),
27 | max_batch_bytesize: non_neg_integer(),
28 | batch_bytesize: non_neg_integer(),
29 | pending_messages_count: non_neg_integer(),
30 | producer_config: Keyword.t(),
31 | messages_to_produce: list(),
32 | cleanup_timer_ref: reference() | nil,
33 | status: atom(),
34 | collector: atom() | nil
35 | }
36 |
37 | defstruct topic_name: nil,
38 | partition: nil,
39 | config: [],
40 | pending_messages: [],
41 | last_produced_at: 0,
42 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
43 | batch_size: 0,
44 | max_wait_time: 0,
45 | min_delay: 0,
46 | max_batch_bytesize: 0,
47 | batch_bytesize: 0,
48 | pending_messages_count: 0,
49 | producer_config: [],
50 | messages_to_produce: [],
51 | cleanup_timer_ref: nil,
52 | status: :continue,
53 | collector: nil
54 |
55 | @spec add_new_message(State.t(), MessageObject.t(), non_neg_integer()) :: State.t()
56 | def add_new_message(%State{} = state, %MessageObject{key: key, value: value} = event, now) do
57 | new_message = %MessageObject{event | value: maybe_encode(value)}
58 |
59 | state
60 | |> consider_max_bytesize(new_message)
61 | |> consider_max_size_and_wait_time(now)
62 | |> consider_istant_flush(key, value)
63 | end
64 |
65 | @spec reset_state_after_failure(State.t()) :: State.t()
66 | def reset_state_after_failure(%State{} = state) do
67 | state = stop_timer(state)
68 | %State{state | status: :continue, messages_to_produce: []}
69 | end
70 |
71 | @spec reset_state_after_produce(State.t()) :: State.t()
72 | def reset_state_after_produce(%State{} = state) do
73 | now = System.os_time(:millisecond)
74 | state = stop_timer(state)
75 | %State{state | last_produced_at: now, messages_to_produce: [], status: :continue}
76 | end
77 |
78 | @spec mark_as_ready(State.t()) :: State.t()
79 | def mark_as_ready(%State{pending_messages: pending_messages, status: :continue} = state) do
80 | %State{
81 | state
82 | | status: :ready,
83 | messages_to_produce: pending_messages,
84 | pending_messages: [],
85 | pending_messages_count: 0,
86 | batch_bytesize: 0
87 | }
88 | end
89 |
90 | defp consider_max_bytesize(%State{status: :continue, batch_bytesize: batch_bytesize} = state, new_message) do
91 | message_size = :erlang.external_size(new_message)
92 |
93 | case batch_bytesize + message_size >= state.max_batch_bytesize do
94 | true when message_size >= state.max_batch_bytesize ->
95 | @error_notifier.report(
96 | type: "KafkaBatcherProducerError",
97 | message: """
98 | event#produce topic=#{state.topic_name} partition=#{state.partition}.
99 | Message size #{inspect(message_size)} exceeds limit #{inspect(state.max_batch_bytesize)}
100 | """
101 | )
102 |
103 | state
104 |
105 | true ->
106 | state |> mark_as_ready() |> put_to_pending(new_message)
107 |
108 | false ->
109 | put_to_pending(state, new_message)
110 | end
111 | end
112 |
113 | defp consider_max_size_and_wait_time(%State{status: :continue} = state, now) do
114 | if state.pending_messages_count >= state.batch_size and now - state.last_produced_at >= state.min_delay do
115 | mark_as_ready(state)
116 | else
117 | state
118 | end
119 | end
120 |
121 | defp consider_max_size_and_wait_time(%State{status: :ready} = state, _), do: state
122 |
123 | defp consider_istant_flush(%State{status: :continue} = state, key, value) do
124 | if state.batch_flusher.flush?(key, value) do
125 | mark_as_ready(state)
126 | else
127 | state
128 | end
129 | end
130 |
131 | defp consider_istant_flush(%State{status: :ready} = state, _, _), do: state
132 |
133 | defp put_to_pending(%State{} = state, new_message) do
134 | %State{
135 | state
136 | | pending_messages: [new_message | state.pending_messages],
137 | pending_messages_count: state.pending_messages_count + 1,
138 | batch_bytesize: state.batch_bytesize + :erlang.external_size(new_message)
139 | }
140 | end
141 |
142 | defp stop_timer(%__MODULE__{cleanup_timer_ref: cleanup_timer_ref} = state) when is_reference(cleanup_timer_ref) do
143 | _ = :erlang.cancel_timer(cleanup_timer_ref)
144 | ## If the timer has expired before its cancellation, we must empty the
145 | ## mail-box of the 'timeout'-message.
146 | receive do
147 | {:timeout, ^cleanup_timer_ref, :cleanup} -> :ok
148 | after
149 | 0 -> :ok
150 | end
151 |
152 | %__MODULE__{state | cleanup_timer_ref: nil}
153 | end
154 |
155 | defp stop_timer(state) do
156 | %__MODULE__{state | cleanup_timer_ref: nil}
157 | end
158 |
159 | defp maybe_encode(value) when is_binary(value) do
160 | value
161 | end
162 |
163 | defp maybe_encode(value) do
164 | Jason.encode!(value)
165 | end
166 | end
167 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/prom_ex/plugins/kafka.ex:
--------------------------------------------------------------------------------
1 | if Code.ensure_loaded?(PromEx) do
2 | defmodule KafkaBatcher.PromEx.Plugins.Kafka do
3 | @moduledoc """
4 | PromEx plugin to collect Prometheus metrics of interactions with Kafka.
5 | The following metrics are collected here:
6 |
7 | * prom_ex_kafka_producer_batch_total_size_byte_bucket
8 | * prom_ex_kafka_producer_batch_total_size_byte_sum
9 | * prom_ex_kafka_producer_batch_total_size_byte_count
10 | * prom_ex_kafka_producer_batch_messages_count_bucket
11 | * prom_ex_kafka_producer_batch_messages_count_sum
12 | * prom_ex_kafka_producer_batch_messages_count_count
13 | * prom_ex_kafka_producer_duration_seconds_bucket
14 | * prom_ex_kafka_producer_duration_seconds_sum
15 | * prom_ex_kafka_producer_duration_seconds_count
16 |
17 | Each metric has the following labels:
18 | * topic (topic name)
19 | * partition (partition number)
20 | * topic_alias (short name of topic to improve readability of Grafana dashboards in case when topic
21 |
22 | Configuration options that allow you to set metrics display preferences:
23 | :kafka_topic_aliases - allows you to set an alias for display in metrics
24 | :producer_buckets - allows to set bucket parameters for grouping metrics
25 | For example:
26 |
27 | config :kafka_batcher,
28 | kafka_topic_aliases: %{
29 | my_topic1 => "topic1",
30 | my_topic2 => "topic2"
31 | }
32 |
33 | config :kafka_batcher,
34 | :kafka_metric_opts,
35 | producer_buckets:
36 | [
37 | duration: [1, 2, 3, 4, 5, 10, 15, 20, 50, 100],
38 | byte_size: [1_000, 2_000, 5_000, 10_000, 20_000, 50_000, 100_000],
39 | messages_count: [1, 5, 10, 15, 20, 30, 40, 50, 100]
40 | ]
41 | """
42 |
43 | use PromEx.Plugin
44 | require Logger
45 |
46 | @producer_event_metrics [:prom_ex, :plugin, :kafka, :producer]
47 | @consumer_event_metrics [:prom_ex, :plugin, :kafka, :consumer]
48 |
49 | @default_producer_buckets [
50 | duration: [1, 2, 3, 4, 5, 10, 15, 20, 50, 100],
51 | byte_size: [1_000, 2_000, 5_000, 10_000, 20_000, 50_000, 100_000],
52 | messages_count: [1, 5, 10, 15, 20, 30, 40, 50, 100]
53 | ]
54 |
55 | @default_consumer_buckets [
56 | duration: [10, 20, 50, 100, 150, 200, 500, 1000, 2000],
57 | byte_size: [1_000, 2_000, 5_000, 10_000, 20_000, 50_000, 100_000],
58 | messages_count: [1, 5, 10, 15, 20, 30, 40, 50, 100]
59 | ]
60 |
61 | @impl true
62 | def event_metrics(_opts) do
63 | metric_prefix = [:prom_ex, :kafka]
64 |
65 | labels = %{}
66 | buckets = Application.get_env(:kafka_batcher, :kafka_metric_opts, [])
67 |
68 | [
69 | producer_event_metrics(metric_prefix, labels, buckets),
70 | consumer_event_metrics(metric_prefix, labels, buckets)
71 | ]
72 | end
73 |
74 | def producer_event_metrics(metric_prefix, labels, buckets) do
75 | producer_metrics_tags = Map.keys(labels) ++ [:topic, :partition, :topic_alias]
76 | buckets = Keyword.get(buckets, :producer_buckets, @default_producer_buckets)
77 |
78 | Event.build(
79 | :producer_event_metrics,
80 | build_kafka_metrics(
81 | metric_prefix,
82 | producer_metrics_tags,
83 | labels,
84 | :producer,
85 | @producer_event_metrics,
86 | buckets
87 | )
88 | )
89 | end
90 |
91 | def consumer_event_metrics(metric_prefix, labels, buckets) do
92 | consumer_metrics_tags = Map.keys(labels) ++ [:topic, :partition, :topic_alias]
93 | buckets = Keyword.get(buckets, :consumer_buckets, @default_consumer_buckets)
94 |
95 | Event.build(
96 | :consumer_event_metrics,
97 | build_kafka_metrics(
98 | metric_prefix,
99 | consumer_metrics_tags,
100 | labels,
101 | :consumer,
102 | @consumer_event_metrics,
103 | buckets
104 | )
105 | )
106 | end
107 |
108 | defp build_kafka_metrics(metric_prefix, metrics_tags, labels, name, event_name, buckets) do
109 | aliases = Application.get_env(:kafka_batcher, :kafka_topic_aliases, %{})
110 |
111 | [
112 | distribution(
113 | metric_prefix ++ [name, :duration, :seconds],
114 | event_name: event_name,
115 | description: "The time to produce one batch to Kafka.",
116 | reporter_options: [
117 | buckets: Keyword.fetch!(buckets, :duration)
118 | ],
119 | measurement: fn measurements, _metadata ->
120 | measurements.duration
121 | end,
122 | tag_values: fn metadata ->
123 | set_tags_value(metadata, aliases, labels)
124 | end,
125 | tags: metrics_tags,
126 | unit: {:native, :second}
127 | ),
128 | distribution(
129 | metric_prefix ++ [name, :batch, :messages, :count],
130 | event_name: event_name,
131 | description: "The count of messages in one batch #{name}",
132 | reporter_options: [
133 | buckets: Keyword.fetch!(buckets, :messages_count)
134 | ],
135 | measurement: fn measurements, _metadata ->
136 | measurements.batch_size
137 | end,
138 | tag_values: fn metadata ->
139 | set_tags_value(metadata, aliases, labels)
140 | end,
141 | tags: metrics_tags
142 | ),
143 | distribution(
144 | metric_prefix ++ [name, :batch, :total, :size, :byte],
145 | event_name: event_name,
146 | description: "The size of a batch #{name} of messages",
147 | reporter_options: [
148 | buckets: Keyword.fetch!(buckets, :byte_size)
149 | ],
150 | measurement: fn measurements, _metadata ->
151 | measurements.batch_byte_size
152 | end,
153 | tag_values: fn metadata ->
154 | set_tags_value(metadata, aliases, labels)
155 | end,
156 | tags: metrics_tags
157 | )
158 | ]
159 | end
160 |
161 | defp set_tags_value(%{topic: topic} = metadata, aliases, labels) do
162 | Map.take(metadata, [:topic, :partition])
163 | |> Map.put(:topic_alias, Map.get(aliases, topic, topic))
164 | |> Map.merge(labels)
165 | end
166 | end
167 | end
168 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/accumulator.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Accumulator do
2 | @moduledoc """
3 | Accumulator process is used to accumulate messages until Accumulator.State will go into "ready to producing" status.
4 | There are many conditions to detect this status, which can be configured through kafka_batcher settings.
5 | See details how it works in KafkaBatcher.Accumulator.State module
6 | """
7 |
8 | alias KafkaBatcher.{Accumulator.State, MessageObject, TempStorage}
9 | alias KafkaBatcher.Behaviours.Collector, as: CollectorBehaviour
10 |
11 | @error_notifier Application.compile_env(:kafka_batcher, :error_notifier, KafkaBatcher.DefaultErrorNotifier)
12 | @producer Application.compile_env(:kafka_batcher, :producer_module, KafkaBatcher.Producers.Kaffe)
13 |
14 | use GenServer
15 | require Logger
16 |
17 | def start_link(args) do
18 | GenServer.start_link(__MODULE__, args, name: reg_name(args))
19 | end
20 |
21 | @doc "Returns a specification to start this module under a supervisor"
22 | def child_spec(args) do
23 | {accumulator_mod, args} = Keyword.pop(args, :accumulator_mod, __MODULE__)
24 |
25 | %{
26 | id: reg_name(args),
27 | start: {accumulator_mod, :start_link, [args]}
28 | }
29 | end
30 |
31 | @doc """
32 | Finds appropriate Accumulator process by topic & partition and dispatches `event` to it
33 | """
34 | def add_event(%MessageObject{} = event, topic_name, partition \\ nil) do
35 | GenServer.call(reg_name(topic_name: topic_name, partition: partition), {:add_event, event})
36 | catch
37 | _, _reason ->
38 | Logger.warning("KafkaBatcher: Couldn't get through to accumulator")
39 | {:error, :accumulator_unavailable}
40 | end
41 |
42 | ##
43 | ## Callbacks
44 | ##
45 | @impl GenServer
46 | def init(args) do
47 | Process.flag(:trap_exit, true)
48 | state = build_state(args)
49 |
50 | Logger.debug("""
51 | KafkaBatcher: Accumulator process started: topic #{state.topic_name} partition #{state.partition} pid #{inspect(self())}
52 | """)
53 |
54 | {:ok, state}
55 | end
56 |
57 | @impl GenServer
58 | def handle_call({:add_event, event}, _from, state) do
59 | now = System.os_time(:millisecond)
60 |
61 | state
62 | |> State.add_new_message(event, now)
63 | |> set_cleanup_timer_if_not_exists()
64 | |> produce_messages_if_ready()
65 | |> case do
66 | {:ok, new_state} -> {:reply, :ok, new_state}
67 | {:error, reason, new_state} -> {:reply, {:error, reason}, new_state}
68 | end
69 | end
70 |
71 | @impl GenServer
72 | def handle_info({:timeout, cleanup_timer_ref, :cleanup}, %State{cleanup_timer_ref: cleanup_timer_ref} = state) do
73 | state
74 | |> State.mark_as_ready()
75 | |> produce_messages_if_ready()
76 | |> case do
77 | {:ok, new_state} ->
78 | {:noreply, new_state}
79 |
80 | {:error, _reason, new_state} ->
81 | state.collector.set_lock()
82 | {:noreply, new_state}
83 | end
84 | end
85 |
86 | # handle the trapped exit call
87 | def handle_info({:EXIT, _from, reason}, state) do
88 | cleanup(state)
89 | {:stop, reason, state}
90 | end
91 |
92 | def handle_info(term, state) do
93 | Logger.warning("""
94 | KafkaBatcher: Unknown message #{inspect(term)} to #{__MODULE__}.handle_info/2.
95 | Current state: #{inspect(drop_sensitive(state))}
96 | """)
97 |
98 | {:noreply, state}
99 | end
100 |
101 | @impl GenServer
102 | def terminate(_reason, state) do
103 | cleanup(state)
104 | end
105 |
106 | @impl GenServer
107 | def format_status(_reason, [pdict, state]) do
108 | [pdict, drop_sensitive(state)]
109 | end
110 |
111 | defp drop_sensitive(%State{config: config} = state) do
112 | %State{state | config: Keyword.drop(config, [:sasl])}
113 | end
114 |
115 | defp cleanup(%{pending_messages: [], messages_to_produce: []}) do
116 | Logger.info("KafkaBatcher: Terminating #{__MODULE__}: there are no pending messages.")
117 | end
118 |
119 | defp cleanup(%{pending_messages: pending_messages, messages_to_produce: messages_to_produce} = state) do
120 | _ = handle_produce(Enum.reverse(pending_messages ++ messages_to_produce), state)
121 | Logger.info("KafkaBatcher: Terminating #{__MODULE__}")
122 | end
123 |
124 | defp set_cleanup_timer_if_not_exists(%State{cleanup_timer_ref: nil} = state) do
125 | ref = :erlang.start_timer(state.max_wait_time, self(), :cleanup)
126 | %State{state | cleanup_timer_ref: ref}
127 | end
128 |
129 | defp set_cleanup_timer_if_not_exists(%State{} = state), do: state
130 |
131 | defp produce_messages_if_ready(%State{messages_to_produce: []} = state), do: {:ok, state}
132 |
133 | defp produce_messages_if_ready(%State{messages_to_produce: messages_to_produce} = state) do
134 | handle_produce(Enum.reverse(messages_to_produce), state)
135 | end
136 |
137 | defp save_messages_to_temp_storage(messages, state) do
138 | TempStorage.save_batch(%TempStorage.Batch{
139 | messages: messages,
140 | topic: state.topic_name,
141 | partition: state.partition,
142 | producer_config: state.config
143 | })
144 | end
145 |
146 | defp handle_produce(pending_messages, state) do
147 | case produce_list(pending_messages, state) do
148 | :ok ->
149 | {:ok, State.reset_state_after_produce(state)}
150 |
151 | {:error, reason} ->
152 | @error_notifier.report(
153 | type: "KafkaBatcherProducerError",
154 | message: "event#produce topic=#{state.topic_name} partition=#{state.partition} error=#{inspect(reason)}"
155 | )
156 |
157 | save_messages_to_temp_storage(pending_messages, state)
158 | {:error, reason, State.reset_state_after_failure(state)}
159 | end
160 | end
161 |
162 | @spec produce_list(messages :: [CollectorBehaviour.event()], state :: State.t()) :: :ok | {:error, any()}
163 | defp produce_list(messages, state) when is_list(messages) do
164 | @producer.produce_list(messages, state.topic_name, state.partition, state.config)
165 | catch
166 | _, reason ->
167 | {:error, reason}
168 | end
169 |
170 | defp build_state(args) do
171 | config = Keyword.fetch!(args, :config)
172 |
173 | %State{
174 | topic_name: Keyword.fetch!(args, :topic_name),
175 | partition: Keyword.get(args, :partition),
176 | config: config,
177 | batch_flusher: Keyword.fetch!(config, :batch_flusher),
178 | batch_size: Keyword.fetch!(config, :batch_size),
179 | max_wait_time: Keyword.fetch!(config, :max_wait_time),
180 | min_delay: Keyword.fetch!(config, :min_delay),
181 | max_batch_bytesize: Keyword.fetch!(config, :max_batch_bytesize),
182 | collector: Keyword.fetch!(args, :collector)
183 | }
184 | end
185 |
186 | defp reg_name(args) do
187 | topic_name = Keyword.fetch!(args, :topic_name)
188 |
189 | case Keyword.get(args, :partition) do
190 | nil ->
191 | :"#{__MODULE__}.#{topic_name}"
192 |
193 | partition ->
194 | :"#{__MODULE__}.#{topic_name}.#{partition}"
195 | end
196 | end
197 | end
198 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/collector.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Collector do
2 | @moduledoc """
3 | Implementation of collector for incoming events.
4 | The collector accumulates events in accordance with a given strategy using accumulators supervised by AccumulatorsPoolSupervisor.
5 | The strategy is specified by the following parameters:
6 | * `:partition_strategy`, allows values: :random, :md5 or function (e.g. `fn _topic, _partitions_count, key, _value -> key end`)
7 | * `:partition_fn`, a function that takes 4 arguments and returns a number of partition (see example below)
8 | * `:collect_by_partition`, if set to `true`, producer accumulates messages separately for each partition of topic
9 | * `:batch_size`, count of messages to be accumulated before producing
10 | * `:max_wait_time`, max interval between producings in milliseconds. The batch will be produced to Kafka either by `batch_size` or by `max_wait_time` parameter.
11 | * `:batch_flusher`, a module implementing a `flush?/2` function. If the function returns true, the current batch will be sent to Kafka immediately.
12 | * `:min_delay` - optional parameter. Set minimal delay before send events. This parameter allowed to increase max throughput
13 | * `:max_batch_bytesize` - optional parameter. Allows to set a limit on the maximum batch size in bytes.
14 |
15 | A collector can be described as follows (for example):
16 |
17 | defmodule KafkaBatcher.Test.Handler1 do
18 | use KafkaBatcher.Collector,
19 | collect_by_partition: true,
20 | topic_key: :topic1,
21 | partition_fn: &KafkaBatcher.Test.Handler1.calculate_partition/4,
22 | required_acks: -1,
23 | batch_size: 30,
24 | max_wait_time: 20_000,
25 | min_delay: 0
26 |
27 | def calculate_partition(_topic, partitions_count, _key, value) do
28 | val = value["client_id"] || value["device_id"]
29 | :erlang.phash2(val, partitions_count)
30 | end
31 | end
32 |
33 | A collector can save events that cannot be sent to Kafka to external storage, such as a database.
34 | A storage is specified in the config.exs like this:
35 |
36 | config :kafka_batcher,
37 | storage_impl: KafkaBatcher.Storage.YourTempStorage
38 |
39 | """
40 | require Logger
41 |
42 | defmacro __using__(opts) do
43 | quote location: :keep, bind_quoted: [opts: opts] do
44 | use GenServer
45 | require Logger
46 | alias KafkaBatcher.{AccumulatorsPoolSupervisor, Collector.State, TempStorage}
47 |
48 | @behaviour KafkaBatcher.Behaviours.Collector
49 | import KafkaBatcher.Collector.Implementation
50 |
51 | @error_notifier Application.compile_env(:kafka_batcher, :error_notifier, KafkaBatcher.DefaultErrorNotifier)
52 | @compile_config KafkaBatcher.Config.build_topic_config(opts)
53 |
54 | # Public API
55 | def start_link(args) do
56 | GenServer.start_link(__MODULE__, args, name: __MODULE__)
57 | end
58 |
59 | @doc "Returns a specification to start this module under a supervisor"
60 | def child_spec(config) do
61 | %{
62 | id: __MODULE__,
63 | start: {__MODULE__, :start_link, [config]},
64 | type: :worker
65 | }
66 | end
67 |
68 | def add_event(event), do: add_events([event])
69 |
70 | @impl KafkaBatcher.Behaviours.Collector
71 | def add_events(events) do
72 | GenServer.call(__MODULE__, {:add_events, events})
73 | catch
74 | _, _reason ->
75 | Logger.warning("KafkaBatcher: Couldn't get through to collector #{__MODULE__}")
76 | {:error, :kafka_unavailable}
77 | end
78 |
79 | @doc """
80 | Set the lock mode after a produce error in the topic
81 | """
82 | def set_lock do
83 | send(__MODULE__, :set_lock)
84 | end
85 |
86 | @doc """
87 | Retrieves the collector config
88 | """
89 | def get_config do
90 | GenServer.call(__MODULE__, :get_config)
91 | end
92 |
93 | def get_compile_config do
94 | @compile_config
95 | end
96 |
97 | # Callbacks
98 | @impl GenServer
99 | def init(config) do
100 | Process.flag(:trap_exit, true)
101 |
102 | state = build_state(config)
103 |
104 | Logger.debug("KafkaBatcher: Batch collector started: topic #{state.topic_name} pid #{inspect(self())}")
105 | send(self(), :init_accumulators)
106 | {:ok, state}
107 | end
108 |
109 | @impl GenServer
110 | def handle_call({:add_events, events}, _from, %State{ready?: false} = state) when is_list(events) do
111 | {:reply, {:error, :kafka_unavailable}, state}
112 | end
113 |
114 | @impl GenServer
115 | def handle_call({:add_events, events}, from, %State{locked?: true} = state) when is_list(events) do
116 | # If the temporal storage is empty - then Kafka is available, so we can unlock state and process handling.
117 | # In another case we don't want accumulate more messages in the memory, so we return an error to the caller.
118 | case TempStorage.check_storage(state) do
119 | %State{locked?: false} = new_state ->
120 | handle_call({:add_events, events}, from, new_state)
121 |
122 | new_state ->
123 | {:reply, {:error, :kafka_unavailable}, new_state}
124 | end
125 | end
126 |
127 | def handle_call({:add_events, events}, _from, %State{} = state) do
128 | case State.add_events(state, events) do
129 | {:ok, state} -> {:reply, :ok, state}
130 | {:error, reason, state} -> {:reply, {:error, reason}, state}
131 | end
132 | end
133 |
134 | def handle_call(:get_config, _from, state) do
135 | {:reply, state.config, state}
136 | end
137 |
138 | def handle_call(unknown, _from, state) do
139 | @error_notifier.report(
140 | type: "KafkaBatcherUnknownMessageCall",
141 | message: "#{__MODULE__} doesn't have a handle_call handler for #{inspect(unknown)}"
142 | )
143 | end
144 |
145 | @impl GenServer
146 | def handle_info(:init_accumulators, state) do
147 | new_state = store_partitions_count(state)
148 |
149 | case start_accumulators(new_state) do
150 | :ok ->
151 | Logger.debug("KafkaBatcher: Started accumulators for topic #{__MODULE__}")
152 | {:noreply, %State{new_state | ready?: true}}
153 |
154 | {:error, reason} ->
155 | Logger.info("KafkaBatcher: Failed to start accumulators. Topic #{__MODULE__}. Reason #{inspect(reason)}")
156 | ref = restart_timer(new_state)
157 | {:noreply, %State{new_state | timer_ref: ref, ready?: false}}
158 | end
159 | end
160 |
161 | def handle_info(:set_lock, state) do
162 | {:noreply, %State{state | locked?: true}}
163 | end
164 |
165 | @impl GenServer
166 | def handle_info(msg, state) do
167 | Logger.error("KafkaBatcher: Unexpected info #{inspect(msg)}")
168 | {:noreply, state}
169 | end
170 |
171 | @impl GenServer
172 | def terminate(reason, state) do
173 | Logger.info("KafkaBatcher: Terminating #{__MODULE__}. Reason #{inspect(reason)}")
174 | {:noreply, state}
175 | end
176 |
177 | @impl GenServer
178 | def format_status(_reason, [pdict, state]) do
179 | [pdict, drop_sensitive(state)]
180 | end
181 |
182 | defp drop_sensitive(%State{config: config} = state) do
183 | %State{state | config: Keyword.drop(config, [:sasl])}
184 | end
185 |
186 | # Private functions
187 |
188 | defp build_state(config) do
189 | %State{
190 | topic_name: Keyword.fetch!(config, :topic_name),
191 | config: config,
192 | collect_by_partition: Keyword.fetch!(config, :collect_by_partition),
193 | collector: __MODULE__
194 | }
195 | end
196 |
197 | defp restart_timer(%State{timer_ref: ref}) when :erlang.is_reference(ref) do
198 | _ = :erlang.cancel_timer(ref)
199 | do_restart()
200 | end
201 |
202 | defp restart_timer(_state) do
203 | do_restart()
204 | end
205 |
206 | defp do_restart do
207 | timeout = Application.get_env(:kafka_batcher, :reconnect_timeout, 5_000)
208 | :erlang.send_after(timeout, self(), :init_accumulators)
209 | end
210 | end
211 | end
212 | end
213 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # KafkaBatcher
2 |
3 | A library to increase the throughput of producing messages (coming one at a time) to Kafka by accumulating these messages into batches.
4 |
5 | ## Installation
6 |
7 | 1. Add `KafkaBatcher` to your list of dependencies in `mix.exs`:
8 |
9 | ```elixir
10 | def deps do
11 | [
12 | {:kafka_batcher, "~> 1.0.0"},
13 | # and one of kafka libraries
14 | # {:kaffe, "~> 1.24"}
15 | # or
16 | # {:kafka_ex, "~> 0.12"}
17 | ]
18 | end
19 | ```
20 |
21 | 2. Add KafkaBatcher supervisor to your supervisor tree
22 | ```elixir
23 | def start(_type, _args) do
24 | children = [
25 | # Describe the child spec
26 | KafkaBatcher.Supervisor
27 | ]
28 |
29 | opts = [strategy: :one_for_one, name: MyApp.Supervisor, max_restarts: 3, max_seconds: 5]
30 | Supervisor.start_link(children, opts)
31 | end
32 | ```
33 |
34 | 3. Configure a KafkaBatcher Producer
35 |
36 | Config example:
37 |
38 | ```elixir
39 | config :kafka_batcher, KafkaBatcher.Collector1, topic_name: "topic1"
40 | config :kafka_batcher, KafkaBatcher.Collector2, topic_name: "topic2"
41 | config :kafka_batcher, KafkaBatcher.Collector3, topic_name: "topic3"
42 | config :kafka_batcher, KafkaBatcher.Collector4, topic_name: "topic4"
43 | config :kafka_batcher, KafkaBatcher.Collector5, topic_name: "topic5"
44 |
45 | config :kafka_batcher, collectors:
46 | [
47 | KafkaBatcher.Collector1,
48 | KafkaBatcher.Collector2,
49 | KafkaBatcher.Collector3,
50 | KafkaBatcher.Collector4,
51 | KafkaBatcher.Collector5
52 | ]
53 |
54 | config :kafka_batcher, :kafka,
55 | endpoints: "localhost:9092",
56 | # in case you use SASL
57 | # sasl: %{mechanism: :scram_sha_512, login: "login", password: "password"},
58 | # ssl: true,
59 | telemetry: true,
60 | allow_topic_auto_creation: false,
61 | kafka_topic_aliases: %{
62 | "real_topic_name1" => "incoming-events",
63 | "real_topic_name2" => "special-topic"
64 | }
65 |
66 | # In case you use KafkaEx, you need to disable default worker to avoid crashes
67 | config :kafka_ex, :disable_default_worker, true
68 | ```
69 |
70 | Available parameters:
71 |
72 | * `:required_acks` How many acknowledgements the kafka broker should receive from the clustered replicas before acking producer.
73 | * `:endpoints` Kafka cluster endpoints, can be any of the brokers in the cluster, which does not necessarily have to be the leader of any partition, e.g. a load-balanced entrypoint to the remote Kafka cluster.
74 | More information in the [Brod producer](https://github.com/kafka4beam/brod/blob/master/src/brod_producer.erl) docs.
75 | * `:telemetry`, if set to `true`, metrics will be collected and exposed with PromEx.
76 | * `:allow_topic_auto_creation`, if set true, topics automatically are created with default parameters.
77 | * `:partition_strategy`, allows values: :random, :md5 or function (e.g. `fn _topic, _partitions_count, key, _value -> key end`)
78 | * `:partition_fn`, a function that takes four arguments and returns a number of the partition (see below)
79 | * `:collect_by_partition`, if set to `true`, producer accumulates messages separately for each partition of the topic
80 | * `:batch_size`, count of messages to be accumulated by collector before producing
81 | * `:max_wait_time`, max interval between batches in milliseconds. The batch will be produced to Kafka either by `batch_size` or by `max_wait_time` parameter.
82 | * `:batch_flusher`, a module implementing a function `flush?(binary(), map()) :: boolean()`.
83 | If the function returns true, the current batch will be sent to Kafka immediately.
84 | * `:kafka_topic_aliases` - you could define custom aliases for Kafka topics, it doesn't affect anything except metric labels.
85 | * `:sasl` - optional parameter. The parameter includes three parameters:
86 | %{mechanism: mechanism, login: "login", password: "password"}. Mechanism is
87 | plain | scram_sha_256 | scram_sha_512. Login and password should be type binary().
88 | * `:ssl` - optional parameter. Ssl should be type boolean(). By default `:ssl` is `false`.
89 | * `:min_delay` - optional parameter. Set minimal delay before send events. This parameter allows to increase max throughput in case when you get more messages (in term of count per second) than you expected when set `batch_size` parameter.
90 | * `:max_batch_bytesize` - optional parameter. Allows to set a limit on the maximum batch size. By default it is 1_000_000 bytes.
91 |
92 | **Important:** The size of one message should not exceed `max_batch_bytesize` setting. If you need to work with large messages you must increase `max_batch_bytesize` value and value of Kafka topic setting `max.message.bytes` as well.
93 |
94 | **Note:** you can still produce messages to any Kafka topic (even if it is not described in the kafka_batcher config) by using direct calls of Kaffe or KafkaEx.
95 |
96 |
97 | ## Usage
98 |
99 | ### Collector examples
100 |
101 | ```elixir
102 | defmodule MyApp.Collector1 do
103 | use KafkaBatcher.Collector,
104 | collect_by_partition: true,
105 | partition_fn: &MyApp.Collector1.calculate_partition/4,
106 | required_acks: -1,
107 | batch_size: 30,
108 | max_wait_time: 20_000
109 |
110 | def calculate_partition(_topic, partitions_count, _key, value) do
111 | val = value["client_id"] || value["device_id"]
112 | :erlang.phash2(val, partitions_count)
113 | end
114 | end
115 |
116 | defmodule MyApp.Collector2 do
117 | use KafkaBatcher.Collector,
118 | collect_by_partition: false,
119 | required_acks: 0,
120 | batch_size: 10,
121 | max_wait_time: 20_000
122 | end
123 |
124 | defmodule MyApp.Collector3 do
125 | use KafkaBatcher.Collector,
126 | collect_by_partition: false,
127 | required_acks: 0,
128 | batch_size: 10,
129 | max_wait_time: 20_000,
130 | partition_strategy: :random
131 | end
132 |
133 | defmodule MyApp.Collector4 do
134 | use KafkaBatcher.Collector,
135 | collect_by_partition: true,
136 | required_acks: 0,
137 | partition_fn: &MyApp.Collector4.calculate_partition/4,
138 | batch_size: 50,
139 | batch_flusher: MyApp.Collector4.BatchFlusher
140 |
141 | def calculate_partition(_topic, partitions_count, _key, value) do
142 | rem(key, partitions_count)
143 | end
144 |
145 | defmodule BatchFlusher do
146 | def flush?(_key, %{"type" => "SpecialType"}) do
147 | true
148 | end
149 |
150 | def flush?(_key, _value) do
151 | false
152 | end
153 | end
154 | end
155 |
156 | defmodule MyApp.Collector5 do
157 | use KafkaBatcher.Collector,
158 | collect_by_partition: false,
159 | partition_strategy: :md5,
160 | batch_size: 100,
161 | max_wait_time: 20_000,
162 | min_delay: 100,
163 | max_batch_bytesize: 1_000_000
164 | end
165 | ```
166 |
167 | ### Collector usage
168 |
169 | ```elixir
170 | defmodule MyApp.MyModule do
171 | ...
172 | def produce_to_kafka_topic1(event) do
173 | MyApp.Collector1.add_events(event)
174 | end
175 |
176 | def produce_to_kafka_topic2(event) do
177 | MyApp.Collector2.add_events(event)
178 | end
179 | ...
180 | end
181 |
182 | ```
183 |
184 | ### Getting current config of topic
185 |
186 | ```elixir
187 | KafkaBatcher.Config.get_collector_config("topic1")
188 | ```
189 |
190 | ### Getting all topics with config
191 |
192 | ```elixir
193 | KafkaBatcher.Config.get_configs_by_topic()
194 | ```
195 |
196 | ## Testing
197 |
198 | ```bash
199 | mix test
200 | ```
201 |
202 | or
203 |
204 | ```bash
205 | mix test --cover
206 | ```
207 | see https://github.com/parroty/excoveralls for details
208 |
209 |
210 | ## Prometheus metrics
211 |
212 | The library exposes the following metrics using the PromEx exporter plugin:
213 |
214 | - `prom_ex_kafka_consumer_batch_messages_count` (histogram) - The count of messages in one batch consumer.
215 | - `prom_ex_kafka_consumer_batch_total_size_byte` (histogram) - The size of a batch consumer of messages.
216 | - `prom_ex_kafka_consumer_duration_seconds` (histogram) - The time to produce one batch to Kafka.
217 | - `prom_ex_kafka_producer_batch_messages_count` (histogram) - The count of messages in one batch producer.
218 | - `prom_ex_kafka_producer_batch_total_size_byte` (histogram) - The size of a batch producer of messages.
219 | - `prom_ex_kafka_producer_duration_seconds` (histogram) - The time to produce one batch to Kafka.
220 |
--------------------------------------------------------------------------------
/test/kafka_batcher/producers/kaffe_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Producers.KaffeTest do
2 | alias KafkaBatcher.{
3 | MessageObject,
4 | Producers,
5 | Test.CalculatePartitionByValueCollector
6 | }
7 |
8 | use ExUnit.Case
9 | use KafkaBatcher.Mocks
10 |
11 | @client_name :kafka_producer_client
12 | @topic1 "topic1"
13 | @topic2 "topic2"
14 | @topic3 "topic3"
15 | @topic4 "topic4"
16 | @topic5 "topic5"
17 |
18 | @partition_topics %{
19 | @topic1 => 10,
20 | @topic2 => 20,
21 | @topic3 => 10,
22 | @topic4 => 4,
23 | @topic5 => 5
24 | }
25 |
26 | @messages [%MessageObject{key: "key1", value: "value1"}, %MessageObject{key: "key2", value: "value2"}]
27 | @expected_messages Enum.map(
28 | @messages,
29 | fn %MessageObject{headers: headers, key: key, value: value} ->
30 | %{
31 | headers: headers,
32 | key: key,
33 | value: value
34 | }
35 | end
36 | )
37 |
38 | setup :backup_kafka_batcher_env
39 | setup :prepare_config
40 |
41 | def backup_kafka_batcher_env(_) do
42 | config = Application.get_env(:kafka_batcher, :kafka)
43 |
44 | on_exit(fn ->
45 | Application.put_env(:kafka_batcher, :kafka, config)
46 | end)
47 | end
48 |
49 | def prepare_config(%{test: :"test start client with SASL_SSL"} = _context) do
50 | config =
51 | Application.get_env(:kafka_batcher, :kafka)
52 | |> Keyword.put(:sasl, %{mechanism: :scram_sha_512, login: "login", password: "password"})
53 | |> Keyword.put(:ssl, true)
54 |
55 | Application.put_env(:kafka_batcher, :kafka, config)
56 | end
57 |
58 | def prepare_config(%{test: :"test start client with empty parameter SASL_SSL"} = _context) do
59 | config =
60 | Application.get_env(:kafka_batcher, :kafka)
61 | |> Keyword.put(:sasl, %{})
62 | |> Keyword.put(:ssl, false)
63 |
64 | Application.put_env(:kafka_batcher, :kafka, config)
65 | end
66 |
67 | def prepare_config(_context) do
68 | config =
69 | Application.get_env(:kafka_batcher, :kafka)
70 | |> Keyword.delete(:sasl)
71 | |> Keyword.delete(:ssl)
72 |
73 | Application.put_env(:kafka_batcher, :kafka, config)
74 | end
75 |
76 | test "start client" do
77 | expect(KafkaBatcher.BrodMock, :start_link_client, fn endpoints, client_id, config ->
78 | assert client_id == @client_name
79 | assert endpoints == [{"localhost", 9092}]
80 |
81 | assert config == [
82 | collect_by_partition: false,
83 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
84 | max_wait_time: 1000,
85 | batch_size: 10,
86 | min_delay: 0,
87 | max_batch_bytesize: 1_000_000,
88 | ssl: false,
89 | sasl: :undefined,
90 | endpoints: [{"localhost", 9092}],
91 | telemetry: true,
92 | allow_topic_auto_creation: false,
93 | partition_strategy: :random,
94 | required_acks: 1
95 | ]
96 |
97 | {:ok, self()}
98 | end)
99 |
100 | {:ok, _} = Producers.Kaffe.start_client()
101 | end
102 |
103 | test "start client with SASL_SSL" do
104 | expect(KafkaBatcher.BrodMock, :start_link_client, fn endpoints, client_id, config ->
105 | assert client_id == @client_name
106 | assert endpoints == [{"localhost", 9092}]
107 |
108 | assert config == [
109 | collect_by_partition: false,
110 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
111 | max_wait_time: 1000,
112 | batch_size: 10,
113 | min_delay: 0,
114 | max_batch_bytesize: 1_000_000,
115 | ssl: true,
116 | sasl: {:scram_sha_512, "login", "password"},
117 | endpoints: [{"localhost", 9092}],
118 | telemetry: true,
119 | allow_topic_auto_creation: false,
120 | partition_strategy: :random,
121 | required_acks: 1
122 | ]
123 |
124 | {:ok, self()}
125 | end)
126 |
127 | {:ok, _} = Producers.Kaffe.start_client()
128 | end
129 |
130 | test "start client with empty parameter SASL_SSL" do
131 | expect(KafkaBatcher.BrodMock, :start_link_client, fn endpoints, client_id, config ->
132 | assert client_id == @client_name
133 | assert endpoints == [{"localhost", 9092}]
134 |
135 | assert config == [
136 | collect_by_partition: false,
137 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
138 | max_wait_time: 1000,
139 | batch_size: 10,
140 | min_delay: 0,
141 | max_batch_bytesize: 1_000_000,
142 | ssl: false,
143 | sasl: :undefined,
144 | endpoints: [{"localhost", 9092}],
145 | telemetry: true,
146 | allow_topic_auto_creation: false,
147 | partition_strategy: :random,
148 | required_acks: 1
149 | ]
150 |
151 | {:ok, self()}
152 | end)
153 |
154 | {:ok, _} = Producers.Kaffe.start_client()
155 | end
156 |
157 | test "start client with bad SASL parameter" do
158 | old_config = Application.get_env(:kafka_batcher, :kafka)
159 |
160 | config =
161 | old_config
162 | |> Keyword.put(:sasl, %{mechanism: :bad_mechanism, login: "login", password: "password"})
163 | |> Keyword.put(:ssl, false)
164 |
165 | assert_raise KafkaBatcher.Config.SASLConfigError, fn ->
166 | Application.put_env(:kafka_batcher, :kafka, config)
167 | KafkaBatcher.Config.general_producer_config()
168 | end
169 |
170 | Application.put_env(:kafka_batcher, :kafka, old_config)
171 | end
172 |
173 | test "start producer" do
174 | expect(KafkaBatcher.BrodMock, :start_producer, fn client_id, topic_name, config ->
175 | assert client_id == @client_name
176 | assert topic_name == @topic1
177 |
178 | assert [
179 | {:ssl, false},
180 | {:sasl, :undefined},
181 | {:endpoints, [{"localhost", 9092}]},
182 | {:partition_fn, &CalculatePartitionByValueCollector.calculate_partition/4},
183 | {:allow_topic_auto_creation, false},
184 | {:partition_strategy, :random},
185 | {:required_acks, -1},
186 | {:collect_by_partition, true},
187 | {:telemetry, true},
188 | {:batch_flusher, KafkaBatcher.Accumulator.DefaultBatchFlusher},
189 | {:max_wait_time, 1000},
190 | {:batch_size, 30},
191 | {:min_delay, 0},
192 | {:max_batch_bytesize, 1_000_000},
193 | {:topic_name, "topic1"}
194 | ] == config
195 |
196 | :ok
197 | end)
198 |
199 | topic1_config = KafkaBatcher.Config.get_collector_config(@topic1)
200 | :ok = Producers.Kaffe.start_producer(@topic1, topic1_config)
201 | end
202 |
203 | test "get partitions count" do
204 | expect(KafkaBatcher.BrodMock, :get_partitions_count, fn client_id, topic ->
205 | assert client_id == @client_name
206 | assert topic == @topic1
207 | {:ok, Map.get(@partition_topics, @topic1)}
208 | end)
209 |
210 | cnt = Map.get(@partition_topics, @topic1)
211 | {:ok, cnt1} = Producers.Kaffe.get_partitions_count(@topic1)
212 | assert cnt == cnt1
213 | end
214 |
215 | test "produce sync by partitions" do
216 | expect(KafkaBatcher.BrodMock, :produce_sync, fn client_id, topic, partition, _key, messages ->
217 | assert client_id == @client_name
218 | assert topic == @topic1
219 | assert partition == 5
220 | assert Enum.map(messages, fn message -> Map.drop(message, [:ts]) end) == @expected_messages
221 | :ok
222 | end)
223 |
224 | topic1_config = KafkaBatcher.Config.get_collector_config(@topic1)
225 | Producers.Kaffe.produce_list(@messages, @topic1, 5, topic1_config)
226 | end
227 |
228 | test "produce sync without partitions" do
229 | partitions_count = Map.get(@partition_topics, @topic2)
230 |
231 | grouped_messages =
232 | Enum.group_by(
233 | @expected_messages,
234 | fn %{key: key, value: _value} ->
235 | :erlang.phash2(key, partitions_count)
236 | end
237 | )
238 | |> Enum.into(%{})
239 |
240 | Enum.each(grouped_messages, fn {partition, messages} ->
241 | expect(KafkaBatcher.BrodMock, :produce_sync, fn client_id, topic, expect_partition, _key, _expect_messages ->
242 | assert client_id == @client_name
243 | assert topic == @topic2
244 | assert expect_partition == partition
245 | assert Enum.map(messages, fn message -> Map.drop(message, [:ts]) end) == messages
246 | :ok
247 | end)
248 | end)
249 |
250 | expect(KafkaBatcher.BrodMock, :get_partitions_count, fn client_id, topic ->
251 | assert client_id == @client_name
252 | assert topic == @topic2
253 | {:ok, partitions_count}
254 | end)
255 |
256 | topic2_config = KafkaBatcher.Config.get_collector_config(@topic2)
257 | Producers.Kaffe.produce_list(@messages, @topic2, nil, topic2_config)
258 | end
259 | end
260 |
--------------------------------------------------------------------------------
/lib/kafka_batcher/config.ex:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.Config do
2 | @moduledoc """
3 | KafkaBatcher configuration processing.
4 | All config parameters are described in details in README.md
5 | Examples of configs can be found in the files config/test.exs and test/support/collectors/collector_handlers.ex
6 | """
7 |
8 | defmodule SASLConfigError do
9 | defexception [:message]
10 |
11 | def exception(_term) do
12 | %SASLConfigError{message: "Sasl config error"}
13 | end
14 | end
15 |
16 | defmodule CollectorMissingError do
17 | defexception [:message]
18 |
19 | def exception(term) do
20 | %CollectorMissingError{message: "Collector #{inspect(term)} missing"}
21 | end
22 | end
23 |
24 | defmodule BadConfigError do
25 | defexception [:message]
26 |
27 | def exception(term) do
28 | %BadConfigError{message: "Topic config error #{inspect(term)}"}
29 | end
30 | end
31 |
32 | @type sasl_mechanism() :: :plain | :scram_sha_256 | :scram_sha_512
33 | @type sasl_type() :: {sasl_mechanism(), binary(), binary()} | :undefined
34 |
35 | @spec collectors_spec() :: [:supervisor.child_spec()]
36 | def collectors_spec do
37 | collector_configs = get_configs_by_collector!()
38 |
39 | children_specs =
40 | Enum.reduce(
41 | collector_configs,
42 | [],
43 | fn {collector, config}, all_children ->
44 | collector_spec = collector.child_spec(config)
45 | accum_sup_spec = KafkaBatcher.AccumulatorsPoolSupervisor.child_spec(config)
46 |
47 | [collector_spec, accum_sup_spec | all_children]
48 | end
49 | )
50 |
51 | conn_manager_spec = KafkaBatcher.ConnectionManager.child_spec()
52 | Enum.reverse([conn_manager_spec | children_specs])
53 | end
54 |
55 | @spec general_producer_config() :: Keyword.t()
56 | def general_producer_config do
57 | Application.get_env(:kafka_batcher, :kafka, [])
58 | |> Keyword.take(allowed_producer_keys())
59 | |> set_endpoints()
60 | |> set_sasl()
61 | |> set_ssl()
62 | |> then(fn config -> Keyword.merge(default_producer_config(), config) end)
63 | end
64 |
65 | @doc """
66 | Return all configured topics with its config.
67 | """
68 | @spec get_configs_by_topic_name() :: list({binary(), Keyword.t()})
69 | def get_configs_by_topic_name do
70 | get_configs_by_collector!()
71 | |> Enum.map(fn {_, config} ->
72 | {Keyword.fetch!(config, :topic_name), config}
73 | end)
74 | |> Enum.into(%{})
75 | end
76 |
77 | @spec get_configs_by_collector!() :: list({atom(), Keyword.t()})
78 | def get_configs_by_collector! do
79 | Enum.map(fetch_runtime_configs(), fn {collector, runtime_config} ->
80 | config =
81 | general_producer_config()
82 | |> Keyword.merge(get_compile_config!(collector))
83 | |> Keyword.merge(runtime_config)
84 |
85 | case validate_config!({collector, config}) do
86 | :ok ->
87 | {collector, config}
88 |
89 | {:error, reasons} ->
90 | raise(KafkaBatcher.Config.BadConfigError, "Collector config failed: #{inspect(reasons)}")
91 | end
92 | end)
93 | end
94 |
95 | @spec get_collector_config(topic_name :: binary()) :: Keyword.t()
96 | def get_collector_config(topic_name) do
97 | case get_configs_by_topic_name()[topic_name] do
98 | nil -> general_producer_config()
99 | config -> config
100 | end
101 | end
102 |
103 | @spec build_topic_config(opts :: Keyword.t()) :: Keyword.t()
104 | def build_topic_config(opts) do
105 | default_config = default_producer_config()
106 |
107 | allowed_producer_keys()
108 | |> Enum.map(fn key -> {key, Keyword.get(opts, key) || Keyword.get(default_config, key)} end)
109 | |> Enum.filter(fn {_, value} -> value != nil end)
110 | end
111 |
112 | @spec get_endpoints :: list({binary(), non_neg_integer()})
113 | def get_endpoints do
114 | Application.get_env(:kafka_batcher, :kafka, [])
115 | |> get_endpoints()
116 | end
117 |
118 | @spec get_endpoints(config :: Keyword.t()) :: list({binary(), non_neg_integer()})
119 | def get_endpoints(config) do
120 | Keyword.fetch!(config, :endpoints)
121 | |> parse_endpoints()
122 | end
123 |
124 | defp parse_endpoints(endpoints) do
125 | endpoints |> String.split(",") |> Enum.map(&parse_endpoint/1)
126 | end
127 |
128 | defp parse_endpoint(url) do
129 | [host, port] = String.split(url, ":")
130 | {host, :erlang.binary_to_integer(port)}
131 | end
132 |
133 | defp validate_config!({collector, config}) do
134 | required_keys()
135 | |> Enum.reduce(
136 | [],
137 | fn
138 | key, acc when is_atom(key) ->
139 | case Keyword.has_key?(config, key) do
140 | true ->
141 | acc
142 |
143 | false ->
144 | ["collector #{inspect(collector)}. Not found required key #{inspect(key)}" | acc]
145 | end
146 |
147 | %{cond: condition, keys: keys}, acc ->
148 | case check_conditions?(condition, config) do
149 | true ->
150 | check_keys(keys, config, collector, acc)
151 |
152 | false ->
153 | acc
154 | end
155 | end
156 | )
157 | |> case do
158 | [] -> :ok
159 | reasons -> {:error, reasons}
160 | end
161 | end
162 |
163 | defp check_keys(keys, config, collector, acc) do
164 | case keys -- Keyword.keys(config) do
165 | [] ->
166 | acc
167 |
168 | fields ->
169 | ["collector #{inspect(collector)}. Not found required keys #{inspect(fields)}" | acc]
170 | end
171 | end
172 |
173 | defp check_conditions?(cond, config) do
174 | Enum.all?(
175 | cond,
176 | fn {key, value} ->
177 | Keyword.get(config, key) == value
178 | end
179 | )
180 | end
181 |
182 | defp required_keys do
183 | [
184 | :topic_name,
185 | %{cond: [collect_by_partition: true], keys: [:partition_fn]},
186 | %{cond: [collect_by_partition: false], keys: [:partition_strategy]}
187 | ]
188 | end
189 |
190 | defp set_endpoints(config) do
191 | Keyword.put(config, :endpoints, get_endpoints(config))
192 | end
193 |
194 | defp set_sasl(config) do
195 | new_sasl =
196 | case validate_sasl_config!(Keyword.get(config, :sasl)) do
197 | {:ok, sasl_config_tuple} -> sasl_config_tuple
198 | _ -> :undefined
199 | end
200 |
201 | Keyword.put(config, :sasl, new_sasl)
202 | end
203 |
204 | defp allowed_producer_keys do
205 | keys =
206 | default_producer_config()
207 | |> Keyword.keys()
208 |
209 | [[:endpoints, :partition_fn, :topic_name, :ssl, :sasl] | keys]
210 | |> List.flatten()
211 | end
212 |
213 | @spec validate_sasl_config!(map() | nil) :: {:ok, sasl_type()} | {:error, :is_not_set} | no_return()
214 | defp validate_sasl_config!(%{mechanism: mechanism, login: login, password: password} = config) do
215 | valid_mechanism = mechanism in [:plain, :scram_sha_256, :scram_sha_512]
216 |
217 | if valid_mechanism && password != nil && login != nil do
218 | {:ok, {mechanism, login, password}}
219 | else
220 | raise(KafkaBatcher.Config.SASLConfigError, "SASL config failed: #{inspect(config)}")
221 | end
222 | end
223 |
224 | defp validate_sasl_config!(sasl_config) when sasl_config == nil or sasl_config == %{} do
225 | {:error, :is_not_set}
226 | end
227 |
228 | defp validate_sasl_config!(bad_sasl_config) do
229 | raise(KafkaBatcher.Config.SASLConfigError, "SASL config failed: #{inspect(bad_sasl_config)}")
230 | end
231 |
232 | defp set_ssl(config) do
233 | Keyword.put(config, :ssl, get_ssl(config))
234 | end
235 |
236 | defp get_ssl(config) do
237 | Keyword.get(config, :ssl, false)
238 | end
239 |
240 | defp get_compile_config!(module) do
241 | with {:module, ^module} <- Code.ensure_compiled(module),
242 | {:ok, [_ | _] = config} <- {:ok, module.get_compile_config()} do
243 | config
244 | else
245 | _ ->
246 | raise(KafkaBatcher.Config.CollectorMissingError, "Collector: #{inspect(module)} missing")
247 | end
248 | end
249 |
250 | defp fetch_runtime_configs do
251 | Application.get_env(:kafka_batcher, :collectors)
252 | |> Enum.map(&fetch_runtime_config/1)
253 | end
254 |
255 | defp fetch_runtime_config(collector_name) do
256 | case Application.fetch_env(:kafka_batcher, collector_name) do
257 | {:ok, config} ->
258 | {collector_name, config}
259 |
260 | _ ->
261 | {collector_name, []}
262 | end
263 | end
264 |
265 | defp default_producer_config do
266 | [
267 | ## brod producer parameters
268 | ## https://github.com/kafka4beam/brod/blob/master/src/brod_producer.erl
269 | allow_topic_auto_creation: false,
270 | partition_strategy: :random,
271 | required_acks: -1,
272 | ## KafkaBatcher parameters
273 | ## specified start pool processes for collection events by partitions
274 | collect_by_partition: false,
275 | ## send metric values to prom_ex application
276 | telemetry: true,
277 | # This module implements logic for force pushing current batch to Kafka,
278 | # without waiting for other conditions (on size and/or interval).
279 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
280 | # These parameters are required for the collector
281 | max_wait_time: 1_000,
282 | batch_size: 10,
283 | min_delay: 0,
284 | max_batch_bytesize: 1_000_000
285 | ]
286 | end
287 | end
288 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
--------------------------------------------------------------------------------
/mix.lock:
--------------------------------------------------------------------------------
1 | %{
2 | "brod": {:hex, :brod, "3.19.1", "6e42e0b495108f8c691717654c6efef7a02f69d1eaaf885bb8d0f7aa8c04b9c7", [:rebar3], [{:kafka_protocol, "4.1.5", [hex: :kafka_protocol, repo: "hexpm", optional: false]}, {:snappyer, "1.2.9", [hex: :snappyer, repo: "hexpm", optional: false]}], "hexpm", "241899cff62e175cd60de4acd4b72f40edb3529b18853f8b22a8a35e4c76d71d"},
3 | "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
4 | "castore": {:hex, :castore, "1.0.15", "8aa930c890fe18b6fe0a0cff27b27d0d4d231867897bd23ea772dee561f032a3", [:mix], [], "hexpm", "96ce4c69d7d5d7a0761420ef743e2f4096253931a3ba69e5ff8ef1844fe446d3"},
5 | "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"},
6 | "crc32cer": {:hex, :crc32cer, "0.1.8", "c6c2275c5fb60a95f4935d414f30b50ee9cfed494081c9b36ebb02edfc2f48db", [:rebar3], [], "hexpm", "251499085482920deb6c9b7aadabf9fb4c432f96add97ab42aee4501e5b6f591"},
7 | "credo": {:hex, :credo, "1.7.12", "9e3c20463de4b5f3f23721527fcaf16722ec815e70ff6c60b86412c695d426c1", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8493d45c656c5427d9c729235b99d498bd133421f3e0a683e5c1b561471291e5"},
8 | "dialyxir": {:hex, :dialyxir, "1.4.6", "7cca478334bf8307e968664343cbdb432ee95b4b68a9cba95bdabb0ad5bdfd9a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "8cf5615c5cd4c2da6c501faae642839c8405b49f8aa057ad4ae401cb808ef64d"},
9 | "earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"},
10 | "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"},
11 | "ex_doc": {:hex, :ex_doc, "0.38.2", "504d25eef296b4dec3b8e33e810bc8b5344d565998cd83914ffe1b8503737c02", [:mix], [{:earmark_parser, "~> 1.4.44", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "732f2d972e42c116a70802f9898c51b54916e542cc50968ac6980512ec90f42b"},
12 | "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"},
13 | "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"},
14 | "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"},
15 | "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"},
16 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
17 | "kaffe": {:hex, :kaffe, "1.27.2", "68944e6ac4a57543331c55a6d74265c087cd34c0e506f05929363d953fd5a5c2", [:mix], [{:brod, "~> 3.0", [hex: :brod, repo: "hexpm", optional: false]}, {:retry, ">= 0.15.0 and < 0.19.0", [hex: :retry, repo: "hexpm", optional: false]}], "hexpm", "35400e1a04e69c34898a07ace092c3bc94a356947ac2449f6dbb21e25daa1232"},
18 | "kafka_ex": {:hex, :kafka_ex, "0.14.0", "1baefdc5e6c9f67613846fb2e5ee76f98b2d79aed67ae8884c58996d0f69683b", [:mix], [{:kayrock, "~> 0.2.0", [hex: :kayrock, repo: "hexpm", optional: false]}], "hexpm", "4ccab4c458cc027d24e58bff03594d37c5f1ead898bb7c596a66ae8169eb1dcf"},
19 | "kafka_protocol": {:hex, :kafka_protocol, "4.1.5", "d15e64994a8ca99716ab47db4132614359ac1bfa56d6c5b4341fdc1aa4041518", [:rebar3], [{:crc32cer, "0.1.8", [hex: :crc32cer, repo: "hexpm", optional: false]}], "hexpm", "c956c9357fef493b7072a35d0c3e2be02aa5186c804a412d29e62423bb15e5d9"},
20 | "kayrock": {:hex, :kayrock, "0.2.0", "b2e55b555dcb333e6e099d66f2db20f898ca9461d5bf1c4c65acebc0bf69a814", [:mix], [{:connection, "~> 1.1", [hex: :connection, repo: "hexpm", optional: false]}, {:crc32cer, "~> 0.1", [hex: :crc32cer, repo: "hexpm", optional: false]}, {:varint, "~> 1.2", [hex: :varint, repo: "hexpm", optional: false]}], "hexpm", "b2dcf44024fef1bcf02419b55bb78ac18a9bffca2bdd25d2b1941709b567155a"},
21 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"},
22 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"},
23 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"},
24 | "makeup_html": {:hex, :makeup_html, "0.2.0", "9f810da8d43d625ccd3f7ea25997e588fa541d80e0a8c6b895157ad5c7e9ca13", [:mix], [{:makeup, "~> 1.2", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "0856f7beb9a6a642ab1307e06d990fe39f0ba58690d0b8e662aa2e027ba331b2"},
25 | "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
26 | "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"},
27 | "mox": {:hex, :mox, "1.2.0", "a2cd96b4b80a3883e3100a221e8adc1b98e4c3a332a8fc434c39526babafd5b3", [:mix], [{:nimble_ownership, "~> 1.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}], "hexpm", "c7b92b3cc69ee24a7eeeaf944cd7be22013c52fcb580c1f33f50845ec821089a"},
28 | "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
29 | "nimble_ownership": {:hex, :nimble_ownership, "1.0.1", "f69fae0cdd451b1614364013544e66e4f5d25f36a2056a9698b793305c5aa3a6", [:mix], [], "hexpm", "3825e461025464f519f3f3e4a1f9b68c47dc151369611629ad08b636b73bb22d"},
30 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"},
31 | "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
32 | "octo_fetch": {:hex, :octo_fetch, "0.4.0", "074b5ecbc08be10b05b27e9db08bc20a3060142769436242702931c418695b19", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "cf8be6f40cd519d7000bb4e84adcf661c32e59369ca2827c4e20042eda7a7fc6"},
33 | "peep": {:hex, :peep, "3.5.0", "9f6ead7b0f2c684494200c8fc02e7e62e8c459afe861b29bd859e4c96f402ed8", [:mix], [{:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:plug, "~> 1.16", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5a73a99c6e60062415efeb7e536a663387146463a3d3df1417da31fd665ac210"},
34 | "plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"},
35 | "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
36 | "prom_ex": {:hex, :prom_ex, "1.11.0", "1f6d67f2dead92224cb4f59beb3e4d319257c5728d9638b4a5e8ceb51a4f9c7e", [:mix], [{:absinthe, ">= 1.7.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.1.0", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.11.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.18", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.10.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.4", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:peep, "~> 3.0", [hex: :peep, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.7.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.20.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.16.0", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 2.6.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.2", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.1", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "76b074bc3730f0802978a7eb5c7091a65473eaaf07e99ec9e933138dcc327805"},
37 | "retry": {:hex, :retry, "0.18.0", "dc58ebe22c95aa00bc2459f9e0c5400e6005541cf8539925af0aa027dc860543", [:mix], [], "hexpm", "9483959cc7bf69c9e576d9dfb2b678b71c045d3e6f39ab7c9aa1489df4492d73"},
38 | "snappyer": {:hex, :snappyer, "1.2.9", "9cc58470798648ce34c662ca0aa6daae31367667714c9a543384430a3586e5d3", [:rebar3], [], "hexpm", "18d00ca218ae613416e6eecafe1078db86342a66f86277bd45c95f05bf1c8b29"},
39 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"},
40 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
41 | "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"},
42 | "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"},
43 | "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"},
44 | "uniq": {:hex, :uniq, "0.6.1", "369660ecbc19051be526df3aa85dc393af5f61f45209bce2fa6d7adb051ae03c", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm", "6426c34d677054b3056947125b22e0daafd10367b85f349e24ac60f44effb916"},
45 | "varint": {:hex, :varint, "1.5.1", "17160c70d0428c3f8a7585e182468cac10bbf165c2360cf2328aaa39d3fb1795", [:mix], [], "hexpm", "24f3deb61e91cb988056de79d06f01161dd01be5e0acae61d8d936a552f1be73"},
46 | }
47 |
--------------------------------------------------------------------------------
/test/kafka_batcher/collector_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Producers.CollectorTest do
2 | use ExUnit.Case, async: false
3 | use KafkaBatcher.Mocks
4 |
5 | alias KafkaBatcher.MessageObject
6 | alias KafkaBatcher.Producers.TestProducer
7 | alias KafkaBatcher.TempStorage.Batch
8 | alias KafkaBatcher.TempStorage.TestStorage
9 | alias KafkaBatcher.Test
10 |
11 | @template_events [
12 | %{
13 | "id" => "event1",
14 | "client_id" => "12345"
15 | },
16 | %{
17 | "id" => "event2",
18 | "client_id" => "123",
19 | "type" => "push_delivery_success"
20 | },
21 | %{
22 | "id" => "event3",
23 | "client_id" => "123",
24 | "type" => "push_open_success"
25 | },
26 | %{
27 | "id" => "event4",
28 | "client_id" => "456",
29 | "type" => "push_open_success"
30 | },
31 | %{
32 | "id" => "event5",
33 | "client_id" => "456",
34 | "type" => "push_delivery_success"
35 | },
36 | %{
37 | "id" => "event6",
38 | "device_id" => "9999",
39 | "type" => "Catalog - Category - Filter",
40 | "source" => "https://samokat.ru/samokat-app"
41 | },
42 | %{
43 | "id" => "event7",
44 | "device_id" => "5324",
45 | "type" => "Catalog - Main - View",
46 | "source" => "https://samokat.ru/samokat-app"
47 | },
48 | %{
49 | "id" => "event8",
50 | "client_id" => "9999",
51 | "type" => "Catalog - Category - Filter",
52 | "source" => "https://samokat.ru/samokat-app"
53 | },
54 | %{
55 | "id" => "event9",
56 | "client_id" => "5324",
57 | "type" => "Catalog - Main - View",
58 | "source" => "https://samokat.ru/samokat-app"
59 | },
60 | %{
61 | "id" => "event10",
62 | "client_id" => "9999",
63 | "type" => "Global - App - To foreground",
64 | "source" => "vma.samokat.ru"
65 | },
66 | %{
67 | "id" => "event10",
68 | "client_id" => "9999",
69 | "type" => "Global - App - To foreground",
70 | "source" => "unknown_source"
71 | }
72 | ]
73 |
74 | setup_all do
75 | prepare_producers()
76 | end
77 |
78 | setup do
79 | prepare_mocks()
80 | end
81 |
82 | def prepare_mocks do
83 | TestProducer.set_owner()
84 | TestProducer.set_notification_mode(:do_produce, :on)
85 | TestProducer.set_notification_mode(:get_partitions_count, :on)
86 | TestStorage.set_owner()
87 | TestStorage.set_notification_mode(:save_batch, :on)
88 | TestStorage.set_notification_mode(:empty?, :on)
89 |
90 | on_exit(fn ->
91 | TestProducer.set_notification_mode(:start_client, :off)
92 | TestProducer.set_notification_mode(:start_producer, :off)
93 | TestProducer.set_notification_mode(:do_produce, :off)
94 | TestProducer.set_notification_mode(:get_partitions_count, :off)
95 | TestStorage.set_notification_mode(:save_batch, :off)
96 | TestStorage.set_notification_mode(:empty?, :off)
97 | end)
98 | end
99 |
100 | def prepare_producers do
101 | KafkaBatcher.ProducerHelper.connection_manager_up()
102 | :ok
103 | end
104 |
105 | test "produce by_partitions, calculate partition by value" do
106 | topic1_config = Test.CalculatePartitionByValueCollector.get_config()
107 | topic1 = TestProducer.topic_name(1)
108 |
109 | sup_name = :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.#{topic1}"
110 |
111 | [{pool_sup, _, _, _}] =
112 | Supervisor.which_children(KafkaBatcher.Supervisor)
113 | |> Enum.filter(fn
114 | {^sup_name, _, _, _} -> true
115 | _ -> false
116 | end)
117 |
118 | Supervisor.which_children(pool_sup)
119 | |> Enum.all?(fn {_, _, _, [module]} -> module == Test.CalculatePartitionByValueCollector end)
120 |
121 | grouped_events =
122 | Enum.group_by(
123 | @template_events,
124 | fn event ->
125 | {:ok, partitions_count} = TestProducer.get_partitions_count(topic1)
126 | calc_partition(event, "", topic1, partitions_count, topic1_config)
127 | end,
128 | fn event -> {get_key_id(event), event} end
129 | )
130 | |> generate_events_by_group(Keyword.fetch!(topic1_config, :batch_size))
131 |
132 | grouped_messages =
133 | Enum.map(grouped_events, transform_messages())
134 | |> Enum.into(%{})
135 |
136 | Enum.map(
137 | grouped_events,
138 | fn {_part, events} ->
139 | events
140 | end
141 | )
142 | |> List.flatten()
143 | |> Test.CalculatePartitionByValueCollector.add_events()
144 |
145 | Enum.each(
146 | grouped_messages,
147 | fn {call_partition, messages} ->
148 | parameters = {messages, topic1, call_partition, topic1_config}
149 | assert_receive(%{action: :do_produce, parameters: ^parameters})
150 | end
151 | )
152 | end
153 |
154 | test "produce by_partitions, calculate partition by key" do
155 | topic4_config = Test.CalculatePartitionByKeyCollector.get_config()
156 | topic4 = TestProducer.topic_name(4)
157 |
158 | grouped_events =
159 | Enum.group_by(
160 | @template_events,
161 | fn event ->
162 | {:ok, partitions_count} = TestProducer.get_partitions_count(topic4)
163 | calc_partition(event, get_key_id(event), topic4, partitions_count, topic4_config)
164 | end,
165 | fn event -> {get_key_id(event), event} end
166 | )
167 | |> generate_events_by_group(Keyword.fetch!(topic4_config, :batch_size))
168 |
169 | grouped_messages =
170 | Enum.map(grouped_events, transform_messages())
171 | |> Enum.into(%{})
172 |
173 | Enum.map(
174 | grouped_events,
175 | fn {_part, events} ->
176 | events
177 | end
178 | )
179 | |> List.flatten()
180 | |> Test.CalculatePartitionByKeyCollector.add_events()
181 |
182 | Enum.each(
183 | grouped_messages,
184 | fn {call_partition, messages} ->
185 | parameters = {messages, topic4, call_partition, topic4_config}
186 | assert_receive(%{action: :do_produce, parameters: ^parameters})
187 | end
188 | )
189 | end
190 |
191 | test "produce simple collector" do
192 | topic2_config = Test.SimpleCollector.get_config()
193 | topic2 = TestProducer.topic_name(2)
194 | batch_size = Keyword.fetch!(topic2_config, :batch_size)
195 |
196 | events = generate_events(@template_events, batch_size)
197 | messages = Enum.map(events, fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
198 |
199 | Enum.map(events, fn event -> {"", event} end)
200 | |> Test.SimpleCollector.add_events()
201 |
202 | assert_receive(%{action: :do_produce, parameters: parameters})
203 | {^messages, ^topic2, _call_partition, ^topic2_config} = parameters
204 | end
205 |
206 | test "produce simple collector with error" do
207 | topic2_config = Test.SimpleCollector.get_config()
208 | topic2 = TestProducer.topic_name(2)
209 | batch_size = Keyword.fetch!(topic2_config, :batch_size)
210 |
211 | events = generate_events(@template_events, batch_size)
212 | source_messages = Enum.map(events, fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
213 |
214 | TestProducer.set_response(:do_produce, {:error, :usual_error})
215 |
216 | Enum.map(events, fn event -> {"", event} end)
217 | |> Test.SimpleCollector.add_events()
218 |
219 | assert_receive(%{action: :do_produce, parameters: parameters})
220 | {^source_messages, ^topic2, _call_partition, ^topic2_config} = parameters
221 |
222 | assert_receive(%{action: :save_batch, parameters: retry_data})
223 |
224 | %Batch{
225 | messages: retry_messages,
226 | topic: retry_topic,
227 | partition: retry_partition,
228 | producer_config: retry_config
229 | } = retry_data
230 |
231 | assert retry_topic === topic2
232 | assert retry_config === topic2_config
233 | assert retry_partition === nil
234 | assert retry_messages === source_messages
235 |
236 | interval = Application.fetch_env!(:kafka_batcher, :recheck_kafka_availability_interval)
237 |
238 | Process.sleep(interval + 1)
239 |
240 | Enum.map(events, fn event -> {"", event} end)
241 | |> Test.SimpleCollector.add_events()
242 |
243 | assert_receive(%{action: :do_produce, parameters: _parameters})
244 | assert_received(%{action: :save_batch})
245 | assert_received(%{action: :empty?, parameters: ^topic2})
246 | TestProducer.set_response(:do_produce, :ok)
247 | end
248 |
249 | test "produce simple collector by max wait" do
250 | topic8_config = Test.SimpleCollectorMaxWaitTime.get_config()
251 | topic8 = TestProducer.topic_name(8)
252 | max_wait_time = Keyword.fetch!(topic8_config, :max_wait_time)
253 |
254 | events = generate_events(@template_events, 1)
255 | messages = Enum.map(events, fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
256 |
257 | Enum.map(events, fn event -> {"", event} end)
258 | |> Test.SimpleCollectorMaxWaitTime.add_events()
259 |
260 | assert_receive(%{action: :do_produce, parameters: parameters}, max_wait_time + 100)
261 | {^messages, ^topic8, _call_partition, ^topic8_config} = parameters
262 | end
263 |
264 | test "produce simple collector by max wait with producing failed" do
265 | topic8_config = Test.SimpleCollectorMaxWaitTime.get_config()
266 | topic8 = TestProducer.topic_name(8)
267 | max_wait_time = Keyword.fetch!(topic8_config, :max_wait_time)
268 |
269 | events = generate_events(@template_events, 1)
270 | messages = Enum.map(events, fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
271 | TestProducer.set_response(:do_produce, {:error, :kafka_unavailable})
272 | TestProducer.set_response(:empty?, false)
273 |
274 | Enum.map(events, fn event -> {"", event} end)
275 | |> Test.SimpleCollectorMaxWaitTime.add_events()
276 |
277 | assert_receive(%{action: :do_produce, parameters: _parameters})
278 |
279 | Process.sleep(2 * max_wait_time)
280 |
281 | Enum.map(events, fn event -> {"", event} end)
282 | |> Test.SimpleCollectorMaxWaitTime.add_events()
283 |
284 | TestProducer.set_response(:do_produce, :ok)
285 |
286 | assert_received(%{action: :get_partitions_count, parameters: ^topic8})
287 | assert_received(%{action: :empty?, parameters: ^topic8})
288 |
289 | assert_receive(%{action: :do_produce, parameters: parameters})
290 | {^messages, ^topic8, _call_partition, ^topic8_config} = parameters
291 | end
292 |
293 | test "produce simple collector with delay" do
294 | topic6_config = Test.SimpleCollectorWithDelay.get_config()
295 | topic6 = TestProducer.topic_name(6)
296 | batch_size = Keyword.fetch!(topic6_config, :batch_size)
297 |
298 | events = generate_events(@template_events, batch_size)
299 | messages = Enum.map(events, fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
300 |
301 | delay = Keyword.get(topic6_config, :min_delay)
302 |
303 | Enum.map(events, fn event -> {"", event} end)
304 | |> Test.SimpleCollectorWithDelay.add_events()
305 |
306 | assert_receive(%{action: :do_produce, parameters: parameters})
307 | {^messages, ^topic6, _call_partition, ^topic6_config} = parameters
308 |
309 | Enum.map(events, fn event -> {"", event} end)
310 | |> Test.SimpleCollectorWithDelay.add_events()
311 |
312 | refute_receive(%{action: :do_produce}, delay, "second call should be delayed")
313 |
314 | Test.SimpleCollectorWithDelay.add_events([hd(messages)])
315 | assert_receive(%{action: :do_produce})
316 | end
317 |
318 | test "produce simple collector with max byte size control" do
319 | topic7_config = Test.SimpleCollectorMaxByteSizeControl.get_config()
320 | topic7 = TestProducer.topic_name(7)
321 | batch_size = Keyword.fetch!(topic7_config, :batch_size)
322 |
323 | events =
324 | generate_events(@template_events, batch_size)
325 | |> Enum.map(fn event -> %MessageObject{key: "", value: Jason.encode!(event)} end)
326 |
327 | max_batch_bytesize = Keyword.fetch!(topic7_config, :max_batch_bytesize)
328 |
329 | {cnt_msg, _} =
330 | Enum.reduce(events, {1, 0}, fn event, {cnt, size} ->
331 | case size + :erlang.external_size(event) do
332 | new_size when new_size >= max_batch_bytesize ->
333 | {cnt + 1, 0}
334 |
335 | new_size ->
336 | {cnt, new_size}
337 | end
338 | end)
339 |
340 | Test.SimpleCollectorMaxByteSizeControl.add_events(events)
341 |
342 | Enum.each(
343 | 1..cnt_msg,
344 | fn _ ->
345 | assert_receive(%{action: :do_produce, parameters: parameters})
346 | {call_messages, ^topic7, _call_partition, topic7_config} = parameters
347 | sum = call_messages |> Enum.map(&:erlang.external_size/1) |> Enum.sum()
348 | assert sum <= Keyword.fetch!(topic7_config, :max_batch_bytesize)
349 | end
350 | )
351 | end
352 |
353 | test "produce with batch flusher" do
354 | event1 = %{
355 | "id" => "event2",
356 | "client_id" => "123",
357 | "type" => "Some type"
358 | }
359 |
360 | event2 = %{
361 | "id" => "event2",
362 | "client_id" => "123",
363 | "type" => "Flush Type"
364 | }
365 |
366 | topic3_config = Test.BatchFlushCollector.get_config()
367 | topic3 = TestProducer.topic_name(3)
368 |
369 | expect_messages = [
370 | %MessageObject{headers: [], key: "", value: Jason.encode!(event1)},
371 | %MessageObject{headers: [], key: "", value: Jason.encode!(event2)}
372 | ]
373 |
374 | events = [event1, event2]
375 |
376 | Enum.each(events, fn event -> Test.BatchFlushCollector.add_events([{"", event}]) end)
377 |
378 | assert_receive(%{action: :do_produce, parameters: parameters})
379 | {^expect_messages, ^topic3, _call_partition, ^topic3_config} = parameters
380 | end
381 |
382 | test "start accumulators fail" do
383 | topic_name = "topic_accumulators_fail"
384 |
385 | opts = [
386 | topic_name: topic_name,
387 | collect_by_partition: true,
388 | batch_flusher: KafkaBatcher.Accumulator.DefaultBatchFlusher,
389 | batch_size: 1,
390 | max_wait_time: 100,
391 | min_delay: 10,
392 | max_batch_bytesize: 200,
393 | partition_fn: fn _topic, partitions_count, key, _value -> :erlang.phash2(key, partitions_count) end
394 | ]
395 |
396 | {:ok, _sup_pid} = KafkaBatcher.AccumulatorsPoolSupervisor.start_link(opts)
397 |
398 | TestProducer.set_response(:get_partitions_count, {:error, "bad topic name"})
399 | TestProducer.set_notification_mode(:get_partitions_count, :on)
400 | TestProducer.set_notification_mode(:do_produce, :on)
401 |
402 | {:ok, _pid} = Test.StartAccumulatorFail.start_link(opts)
403 |
404 | assert_receive(%{action: :get_partitions_count, parameters: ^topic_name}, 200)
405 | TestProducer.set_response(:get_partitions_count, {:ok, 1})
406 | assert_receive(%{action: :get_partitions_count, parameters: ^topic_name}, 200)
407 |
408 | event1 = %{
409 | "id" => "event2",
410 | "client_id" => "999",
411 | "type" => "Type"
412 | }
413 |
414 | message1 = %MessageObject{headers: [], key: "", value: Jason.encode!(event1)}
415 | ## :erlang.external_size(new_message) = 142 byte
416 | expect_messages = [message1]
417 |
418 | Test.StartAccumulatorFail.add_events([message1, message1])
419 | assert_receive(%{action: :do_produce, parameters: parameters}, 200)
420 | {^expect_messages, ^topic_name, _call_partition, _config} = parameters
421 |
422 | ## After timeout
423 | assert_receive(%{action: :do_produce, parameters: parameters}, 200)
424 | {^expect_messages, ^topic_name, _call_partition, _config} = parameters
425 | end
426 |
427 | describe "with a failing accumulator" do
428 | setup do
429 | topic2_config = Test.SimpleCollector.get_config()
430 | topic2 = TestProducer.topic_name(2)
431 | pool_sup = KafkaBatcher.AccumulatorsPoolSupervisor.reg_name(topic_name: topic2)
432 |
433 | Supervisor.terminate_child(KafkaBatcher.Supervisor, pool_sup)
434 | Supervisor.restart_child(KafkaBatcher.Supervisor, pool_sup)
435 |
436 | opts = [
437 | topic_name: topic2,
438 | config: topic2_config,
439 | collector: Test.SimpleCollector
440 | ]
441 |
442 | opts
443 | |> Keyword.put(:accumulator_mod, KafkaBatcher.Accumulators.FailingAccumulator)
444 | |> KafkaBatcher.AccumulatorsPoolSupervisor.start_accumulator()
445 |
446 | on_exit(fn ->
447 | Supervisor.terminate_child(KafkaBatcher.Supervisor, pool_sup)
448 | Supervisor.restart_child(KafkaBatcher.Supervisor, pool_sup)
449 | KafkaBatcher.AccumulatorsPoolSupervisor.start_accumulator(opts)
450 | end)
451 |
452 | {:ok, %{topic: topic2, topic_config: topic2_config}}
453 | end
454 |
455 | test "fails to produce with SimpleCollector", %{
456 | topic: topic,
457 | topic_config: topic_config
458 | } do
459 | batch_size = Keyword.fetch!(topic_config, :batch_size)
460 |
461 | events = generate_events(@template_events, batch_size)
462 |
463 | assert {:error, :accumulator_unavailable} =
464 | events
465 | |> Enum.map(fn event -> {"", event} end)
466 | |> KafkaBatcher.Test.SimpleCollector.add_events()
467 |
468 | assert_receive(%{action: :save_batch, parameters: retry_data})
469 |
470 | %Batch{
471 | messages: retry_messages,
472 | topic: retry_topic,
473 | partition: retry_partition,
474 | producer_config: retry_config
475 | } = retry_data
476 |
477 | assert retry_topic === topic
478 | assert retry_config === topic_config
479 | assert retry_partition === nil
480 |
481 | assert retry_messages ===
482 | Enum.map(
483 | events,
484 | fn event -> %MessageObject{key: "", value: event} end
485 | )
486 | end
487 | end
488 |
489 | describe "with a failing collector" do
490 | setup do
491 | topic2_config = Test.SimpleCollector.get_config()
492 |
493 | Supervisor.terminate_child(KafkaBatcher.Supervisor, Test.SimpleCollector)
494 | Supervisor.delete_child(KafkaBatcher.Supervisor, Test.SimpleCollector)
495 |
496 | Supervisor.start_child(
497 | KafkaBatcher.Supervisor,
498 | Supervisor.child_spec(Test.FailingCollector, id: Test.SimpleCollector)
499 | )
500 |
501 | on_exit(fn ->
502 | Supervisor.terminate_child(KafkaBatcher.Supervisor, Test.SimpleCollector)
503 | Supervisor.delete_child(KafkaBatcher.Supervisor, Test.SimpleCollector)
504 | Supervisor.start_child(KafkaBatcher.Supervisor, Test.SimpleCollector.child_spec(topic2_config))
505 | end)
506 |
507 | {:ok, %{topic_config: topic2_config}}
508 | end
509 |
510 | test "fails to produce with SimpleCollector", %{topic_config: topic_config} do
511 | batch_size = Keyword.fetch!(topic_config, :batch_size)
512 |
513 | events = generate_events(@template_events, batch_size)
514 |
515 | assert {:error, :kafka_unavailable} =
516 | events
517 | |> Enum.map(fn event -> {"", event} end)
518 | |> KafkaBatcher.Test.SimpleCollector.add_events()
519 | end
520 | end
521 |
522 | ## INTERNAL FUNCTIONS
523 | defp calc_partition(event, key, topic_name, partitions_count, topic1_config) do
524 | calc_fn = Keyword.fetch!(topic1_config, :partition_fn)
525 | calc_fn.(topic_name, partitions_count, key, event)
526 | end
527 |
528 | defp transform_messages do
529 | fn {partition, events} ->
530 | {partition,
531 | Enum.map(events, fn {key, event} ->
532 | %MessageObject{headers: [], key: key, value: Jason.encode!(event)}
533 | end)}
534 | end
535 | end
536 |
537 | defp get_key_id(event) do
538 | event["client_id"] || event["device_id"]
539 | end
540 |
541 | ## grouping messages by partitions
542 | defp generate_events_by_group(grouped_messages, batch_size) do
543 | Enum.reduce(grouped_messages, %{}, fn {partition, messages}, acc ->
544 | new_messages = generate_events(messages, batch_size)
545 | Map.put(acc, partition, new_messages)
546 | end)
547 | end
548 |
549 | ## generating messages in an amount equal to the batch_size
550 | defp generate_events([message | _] = messages, batch_size) do
551 | case Enum.count(messages) do
552 | len when len < batch_size ->
553 | messages ++ List.duplicate(message, batch_size - len)
554 |
555 | len when len > batch_size ->
556 | Enum.slice(messages, 0..(batch_size - 1))
557 |
558 | _ ->
559 | messages
560 | end
561 | end
562 |
563 | defmodule BatchFlusher do
564 | @behaviour KafkaBatcher.Behaviours.BatchFlusher
565 |
566 | @impl KafkaBatcher.Behaviours.BatchFlusher
567 | def flush?(_key, %{"type" => "Flush Type"}) do
568 | true
569 | end
570 |
571 | def flush?(_key, _event) do
572 | false
573 | end
574 | end
575 | end
576 |
--------------------------------------------------------------------------------
/test/kafka_batcher/config_test.exs:
--------------------------------------------------------------------------------
1 | defmodule KafkaBatcher.ConfigTest do
2 | alias KafkaBatcher.Accumulator.DefaultBatchFlusher
3 | alias KafkaBatcher.Config
4 |
5 | alias KafkaBatcher.Test.{
6 | CalculatePartitionByKeyCollector,
7 | CalculatePartitionByValueCollector
8 | }
9 |
10 | use ExUnit.Case
11 |
12 | doctest Config
13 |
14 | test "supervisor child spec for collectors" do
15 | assert [
16 | %{
17 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic1",
18 | start:
19 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
20 | [
21 | [
22 | ssl: false,
23 | sasl: :undefined,
24 | endpoints: [{"localhost", 9092}],
25 | partition_fn: &CalculatePartitionByValueCollector.calculate_partition/4,
26 | allow_topic_auto_creation: false,
27 | partition_strategy: :random,
28 | required_acks: -1,
29 | collect_by_partition: true,
30 | telemetry: true,
31 | batch_flusher: DefaultBatchFlusher,
32 | max_wait_time: 1000,
33 | batch_size: 30,
34 | min_delay: 0,
35 | max_batch_bytesize: 1_000_000,
36 | topic_name: "topic1"
37 | ]
38 | ]},
39 | type: :supervisor
40 | },
41 | %{
42 | id: CalculatePartitionByValueCollector,
43 | start:
44 | {CalculatePartitionByValueCollector, :start_link,
45 | [
46 | [
47 | ssl: false,
48 | sasl: :undefined,
49 | endpoints: [{"localhost", 9092}],
50 | partition_fn: &CalculatePartitionByValueCollector.calculate_partition/4,
51 | allow_topic_auto_creation: false,
52 | partition_strategy: :random,
53 | required_acks: -1,
54 | collect_by_partition: true,
55 | telemetry: true,
56 | batch_flusher: DefaultBatchFlusher,
57 | max_wait_time: 1000,
58 | batch_size: 30,
59 | min_delay: 0,
60 | max_batch_bytesize: 1_000_000,
61 | topic_name: "topic1"
62 | ]
63 | ]},
64 | type: :worker
65 | },
66 | %{
67 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic2",
68 | start:
69 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
70 | [
71 | [
72 | ssl: false,
73 | sasl: :undefined,
74 | endpoints: [{"localhost", 9092}],
75 | allow_topic_auto_creation: false,
76 | partition_strategy: :md5,
77 | required_acks: -1,
78 | collect_by_partition: false,
79 | telemetry: true,
80 | batch_flusher: DefaultBatchFlusher,
81 | max_wait_time: 1000,
82 | batch_size: 10,
83 | min_delay: 0,
84 | max_batch_bytesize: 1_000_000,
85 | topic_name: "topic2"
86 | ]
87 | ]},
88 | type: :supervisor
89 | },
90 | %{
91 | id: KafkaBatcher.Test.SimpleCollector,
92 | start:
93 | {KafkaBatcher.Test.SimpleCollector, :start_link,
94 | [
95 | [
96 | ssl: false,
97 | sasl: :undefined,
98 | endpoints: [{"localhost", 9092}],
99 | allow_topic_auto_creation: false,
100 | partition_strategy: :md5,
101 | required_acks: -1,
102 | collect_by_partition: false,
103 | telemetry: true,
104 | batch_flusher: DefaultBatchFlusher,
105 | max_wait_time: 1000,
106 | batch_size: 10,
107 | min_delay: 0,
108 | max_batch_bytesize: 1_000_000,
109 | topic_name: "topic2"
110 | ]
111 | ]},
112 | type: :worker
113 | },
114 | %{
115 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic3",
116 | start:
117 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
118 | [
119 | [
120 | ssl: false,
121 | sasl: :undefined,
122 | endpoints: [{"localhost", 9092}],
123 | allow_topic_auto_creation: false,
124 | partition_strategy: :random,
125 | required_acks: -1,
126 | collect_by_partition: false,
127 | telemetry: true,
128 | batch_flusher: Producers.CollectorTest.BatchFlusher,
129 | max_wait_time: 1000,
130 | batch_size: 10,
131 | min_delay: 0,
132 | max_batch_bytesize: 1_000_000,
133 | topic_name: "topic3"
134 | ]
135 | ]},
136 | type: :supervisor
137 | },
138 | %{
139 | id: KafkaBatcher.Test.BatchFlushCollector,
140 | start:
141 | {KafkaBatcher.Test.BatchFlushCollector, :start_link,
142 | [
143 | [
144 | ssl: false,
145 | sasl: :undefined,
146 | endpoints: [{"localhost", 9092}],
147 | allow_topic_auto_creation: false,
148 | partition_strategy: :random,
149 | required_acks: -1,
150 | collect_by_partition: false,
151 | telemetry: true,
152 | batch_flusher: Producers.CollectorTest.BatchFlusher,
153 | max_wait_time: 1000,
154 | batch_size: 10,
155 | min_delay: 0,
156 | max_batch_bytesize: 1_000_000,
157 | topic_name: "topic3"
158 | ]
159 | ]},
160 | type: :worker
161 | },
162 | %{
163 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic4",
164 | start:
165 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
166 | [
167 | [
168 | ssl: false,
169 | sasl: :undefined,
170 | endpoints: [{"localhost", 9092}],
171 | partition_fn: &CalculatePartitionByKeyCollector.calculate_partition/4,
172 | allow_topic_auto_creation: false,
173 | partition_strategy: :random,
174 | required_acks: 1,
175 | collect_by_partition: true,
176 | telemetry: true,
177 | batch_flusher: DefaultBatchFlusher,
178 | max_wait_time: 1000,
179 | batch_size: 10,
180 | min_delay: 0,
181 | max_batch_bytesize: 1_000_000,
182 | topic_name: "topic4"
183 | ]
184 | ]},
185 | type: :supervisor
186 | },
187 | %{
188 | id: CalculatePartitionByKeyCollector,
189 | start:
190 | {CalculatePartitionByKeyCollector, :start_link,
191 | [
192 | [
193 | ssl: false,
194 | sasl: :undefined,
195 | endpoints: [{"localhost", 9092}],
196 | partition_fn: &CalculatePartitionByKeyCollector.calculate_partition/4,
197 | allow_topic_auto_creation: false,
198 | partition_strategy: :random,
199 | required_acks: 1,
200 | collect_by_partition: true,
201 | telemetry: true,
202 | batch_flusher: DefaultBatchFlusher,
203 | max_wait_time: 1000,
204 | batch_size: 10,
205 | min_delay: 0,
206 | max_batch_bytesize: 1_000_000,
207 | topic_name: "topic4"
208 | ]
209 | ]},
210 | type: :worker
211 | },
212 | %{
213 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic6",
214 | start:
215 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
216 | [
217 | [
218 | ssl: false,
219 | sasl: :undefined,
220 | endpoints: [{"localhost", 9092}],
221 | allow_topic_auto_creation: false,
222 | partition_strategy: :md5,
223 | required_acks: 0,
224 | collect_by_partition: false,
225 | telemetry: true,
226 | batch_flusher: DefaultBatchFlusher,
227 | max_wait_time: 1000,
228 | batch_size: 10,
229 | min_delay: 50,
230 | max_batch_bytesize: 1_000_000,
231 | topic_name: "topic6"
232 | ]
233 | ]},
234 | type: :supervisor
235 | },
236 | %{
237 | id: KafkaBatcher.Test.SimpleCollectorWithDelay,
238 | start:
239 | {KafkaBatcher.Test.SimpleCollectorWithDelay, :start_link,
240 | [
241 | [
242 | ssl: false,
243 | sasl: :undefined,
244 | endpoints: [{"localhost", 9092}],
245 | allow_topic_auto_creation: false,
246 | partition_strategy: :md5,
247 | required_acks: 0,
248 | collect_by_partition: false,
249 | telemetry: true,
250 | batch_flusher: DefaultBatchFlusher,
251 | max_wait_time: 1000,
252 | batch_size: 10,
253 | min_delay: 50,
254 | max_batch_bytesize: 1_000_000,
255 | topic_name: "topic6"
256 | ]
257 | ]},
258 | type: :worker
259 | },
260 | %{
261 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic7",
262 | start:
263 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
264 | [
265 | [
266 | ssl: false,
267 | sasl: :undefined,
268 | endpoints: [{"localhost", 9092}],
269 | allow_topic_auto_creation: false,
270 | partition_strategy: :md5,
271 | required_acks: 0,
272 | collect_by_partition: false,
273 | telemetry: true,
274 | batch_flusher: DefaultBatchFlusher,
275 | max_wait_time: 1000,
276 | batch_size: 10,
277 | min_delay: 50,
278 | max_batch_bytesize: 400,
279 | topic_name: "topic7"
280 | ]
281 | ]},
282 | type: :supervisor
283 | },
284 | %{
285 | id: KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl,
286 | start:
287 | {KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl, :start_link,
288 | [
289 | [
290 | ssl: false,
291 | sasl: :undefined,
292 | endpoints: [{"localhost", 9092}],
293 | allow_topic_auto_creation: false,
294 | partition_strategy: :md5,
295 | required_acks: 0,
296 | collect_by_partition: false,
297 | telemetry: true,
298 | batch_flusher: DefaultBatchFlusher,
299 | max_wait_time: 1000,
300 | batch_size: 10,
301 | min_delay: 50,
302 | max_batch_bytesize: 400,
303 | topic_name: "topic7"
304 | ]
305 | ]},
306 | type: :worker
307 | },
308 | %{
309 | id: :"Elixir.KafkaBatcher.AccumulatorsPoolSupervisor.topic8",
310 | start:
311 | {KafkaBatcher.AccumulatorsPoolSupervisor, :start_link,
312 | [
313 | [
314 | ssl: false,
315 | sasl: :undefined,
316 | endpoints: [{"localhost", 9092}],
317 | allow_topic_auto_creation: false,
318 | partition_strategy: :md5,
319 | required_acks: 0,
320 | collect_by_partition: false,
321 | telemetry: true,
322 | batch_flusher: DefaultBatchFlusher,
323 | max_wait_time: 50,
324 | batch_size: 10,
325 | min_delay: 20,
326 | max_batch_bytesize: 1_000_000,
327 | topic_name: "topic8"
328 | ]
329 | ]},
330 | type: :supervisor
331 | },
332 | %{
333 | id: KafkaBatcher.Test.SimpleCollectorMaxWaitTime,
334 | start:
335 | {KafkaBatcher.Test.SimpleCollectorMaxWaitTime, :start_link,
336 | [
337 | [
338 | ssl: false,
339 | sasl: :undefined,
340 | endpoints: [{"localhost", 9092}],
341 | allow_topic_auto_creation: false,
342 | partition_strategy: :md5,
343 | required_acks: 0,
344 | collect_by_partition: false,
345 | telemetry: true,
346 | batch_flusher: DefaultBatchFlusher,
347 | max_wait_time: 50,
348 | batch_size: 10,
349 | min_delay: 20,
350 | max_batch_bytesize: 1_000_000,
351 | topic_name: "topic8"
352 | ]
353 | ]},
354 | type: :worker
355 | },
356 | %{
357 | id: KafkaBatcher.ConnectionManager,
358 | start: {KafkaBatcher.ConnectionManager, :start_link, []},
359 | type: :worker
360 | }
361 | ] ==
362 | Config.collectors_spec()
363 | end
364 |
365 | test "general collectors config" do
366 | assert [
367 | {:collect_by_partition, false},
368 | {:batch_flusher, DefaultBatchFlusher},
369 | {:max_wait_time, 1000},
370 | {:batch_size, 10},
371 | {:min_delay, 0},
372 | {:max_batch_bytesize, 1_000_000},
373 | {:ssl, false},
374 | {:sasl, :undefined},
375 | {:endpoints, [{"localhost", 9092}]},
376 | {:telemetry, true},
377 | {:allow_topic_auto_creation, false},
378 | {:partition_strategy, :random},
379 | {:required_acks, 1}
380 | ] == Config.general_producer_config()
381 | end
382 |
383 | test "get configs by topic_name" do
384 | assert %{
385 | "topic1" => [
386 | ssl: false,
387 | sasl: :undefined,
388 | endpoints: [{"localhost", 9092}],
389 | partition_fn: &CalculatePartitionByValueCollector.calculate_partition/4,
390 | allow_topic_auto_creation: false,
391 | partition_strategy: :random,
392 | required_acks: -1,
393 | collect_by_partition: true,
394 | telemetry: true,
395 | batch_flusher: DefaultBatchFlusher,
396 | max_wait_time: 1000,
397 | batch_size: 30,
398 | min_delay: 0,
399 | max_batch_bytesize: 1_000_000,
400 | topic_name: "topic1"
401 | ],
402 | "topic2" => [
403 | ssl: false,
404 | sasl: :undefined,
405 | endpoints: [{"localhost", 9092}],
406 | allow_topic_auto_creation: false,
407 | partition_strategy: :md5,
408 | required_acks: -1,
409 | collect_by_partition: false,
410 | telemetry: true,
411 | batch_flusher: DefaultBatchFlusher,
412 | max_wait_time: 1000,
413 | batch_size: 10,
414 | min_delay: 0,
415 | max_batch_bytesize: 1_000_000,
416 | topic_name: "topic2"
417 | ],
418 | "topic3" => [
419 | ssl: false,
420 | sasl: :undefined,
421 | endpoints: [{"localhost", 9092}],
422 | allow_topic_auto_creation: false,
423 | partition_strategy: :random,
424 | required_acks: -1,
425 | collect_by_partition: false,
426 | telemetry: true,
427 | batch_flusher: Producers.CollectorTest.BatchFlusher,
428 | max_wait_time: 1000,
429 | batch_size: 10,
430 | min_delay: 0,
431 | max_batch_bytesize: 1_000_000,
432 | topic_name: "topic3"
433 | ],
434 | "topic4" => [
435 | ssl: false,
436 | sasl: :undefined,
437 | endpoints: [{"localhost", 9092}],
438 | partition_fn: &CalculatePartitionByKeyCollector.calculate_partition/4,
439 | allow_topic_auto_creation: false,
440 | partition_strategy: :random,
441 | required_acks: 1,
442 | collect_by_partition: true,
443 | telemetry: true,
444 | batch_flusher: DefaultBatchFlusher,
445 | max_wait_time: 1000,
446 | batch_size: 10,
447 | min_delay: 0,
448 | max_batch_bytesize: 1_000_000,
449 | topic_name: "topic4"
450 | ],
451 | "topic6" => [
452 | ssl: false,
453 | sasl: :undefined,
454 | endpoints: [{"localhost", 9092}],
455 | allow_topic_auto_creation: false,
456 | partition_strategy: :md5,
457 | required_acks: 0,
458 | collect_by_partition: false,
459 | telemetry: true,
460 | batch_flusher: DefaultBatchFlusher,
461 | max_wait_time: 1000,
462 | batch_size: 10,
463 | min_delay: 50,
464 | max_batch_bytesize: 1_000_000,
465 | topic_name: "topic6"
466 | ],
467 | "topic7" => [
468 | ssl: false,
469 | sasl: :undefined,
470 | endpoints: [{"localhost", 9092}],
471 | allow_topic_auto_creation: false,
472 | partition_strategy: :md5,
473 | required_acks: 0,
474 | collect_by_partition: false,
475 | telemetry: true,
476 | batch_flusher: DefaultBatchFlusher,
477 | max_wait_time: 1000,
478 | batch_size: 10,
479 | min_delay: 50,
480 | max_batch_bytesize: 400,
481 | topic_name: "topic7"
482 | ],
483 | "topic8" => [
484 | ssl: false,
485 | sasl: :undefined,
486 | endpoints: [{"localhost", 9092}],
487 | allow_topic_auto_creation: false,
488 | partition_strategy: :md5,
489 | required_acks: 0,
490 | collect_by_partition: false,
491 | telemetry: true,
492 | batch_flusher: DefaultBatchFlusher,
493 | max_wait_time: 50,
494 | batch_size: 10,
495 | min_delay: 20,
496 | max_batch_bytesize: 1_000_000,
497 | topic_name: "topic8"
498 | ]
499 | } == Config.get_configs_by_topic_name()
500 | end
501 |
502 | test "get configs by collectors" do
503 | assert [
504 | "Elixir.KafkaBatcher.Test.CalculatePartitionByValueCollector": [
505 | ssl: false,
506 | sasl: :undefined,
507 | endpoints: [{"localhost", 9092}],
508 | partition_fn: &CalculatePartitionByValueCollector.calculate_partition/4,
509 | allow_topic_auto_creation: false,
510 | partition_strategy: :random,
511 | required_acks: -1,
512 | collect_by_partition: true,
513 | telemetry: true,
514 | batch_flusher: DefaultBatchFlusher,
515 | max_wait_time: 1000,
516 | batch_size: 30,
517 | min_delay: 0,
518 | max_batch_bytesize: 1_000_000,
519 | topic_name: "topic1"
520 | ],
521 | "Elixir.KafkaBatcher.Test.SimpleCollector": [
522 | ssl: false,
523 | sasl: :undefined,
524 | endpoints: [{"localhost", 9092}],
525 | allow_topic_auto_creation: false,
526 | partition_strategy: :md5,
527 | required_acks: -1,
528 | collect_by_partition: false,
529 | telemetry: true,
530 | batch_flusher: DefaultBatchFlusher,
531 | max_wait_time: 1000,
532 | batch_size: 10,
533 | min_delay: 0,
534 | max_batch_bytesize: 1_000_000,
535 | topic_name: "topic2"
536 | ],
537 | "Elixir.KafkaBatcher.Test.BatchFlushCollector": [
538 | ssl: false,
539 | sasl: :undefined,
540 | endpoints: [{"localhost", 9092}],
541 | allow_topic_auto_creation: false,
542 | partition_strategy: :random,
543 | required_acks: -1,
544 | collect_by_partition: false,
545 | telemetry: true,
546 | batch_flusher: Producers.CollectorTest.BatchFlusher,
547 | max_wait_time: 1000,
548 | batch_size: 10,
549 | min_delay: 0,
550 | max_batch_bytesize: 1_000_000,
551 | topic_name: "topic3"
552 | ],
553 | "Elixir.KafkaBatcher.Test.CalculatePartitionByKeyCollector": [
554 | ssl: false,
555 | sasl: :undefined,
556 | endpoints: [{"localhost", 9092}],
557 | partition_fn: &CalculatePartitionByKeyCollector.calculate_partition/4,
558 | allow_topic_auto_creation: false,
559 | partition_strategy: :random,
560 | required_acks: 1,
561 | collect_by_partition: true,
562 | telemetry: true,
563 | batch_flusher: DefaultBatchFlusher,
564 | max_wait_time: 1000,
565 | batch_size: 10,
566 | min_delay: 0,
567 | max_batch_bytesize: 1_000_000,
568 | topic_name: "topic4"
569 | ],
570 | "Elixir.KafkaBatcher.Test.SimpleCollectorWithDelay": [
571 | ssl: false,
572 | sasl: :undefined,
573 | endpoints: [{"localhost", 9092}],
574 | allow_topic_auto_creation: false,
575 | partition_strategy: :md5,
576 | required_acks: 0,
577 | collect_by_partition: false,
578 | telemetry: true,
579 | batch_flusher: DefaultBatchFlusher,
580 | max_wait_time: 1000,
581 | batch_size: 10,
582 | min_delay: 50,
583 | max_batch_bytesize: 1_000_000,
584 | topic_name: "topic6"
585 | ],
586 | "Elixir.KafkaBatcher.Test.SimpleCollectorMaxByteSizeControl": [
587 | ssl: false,
588 | sasl: :undefined,
589 | endpoints: [{"localhost", 9092}],
590 | allow_topic_auto_creation: false,
591 | partition_strategy: :md5,
592 | required_acks: 0,
593 | collect_by_partition: false,
594 | telemetry: true,
595 | batch_flusher: DefaultBatchFlusher,
596 | max_wait_time: 1000,
597 | batch_size: 10,
598 | min_delay: 50,
599 | max_batch_bytesize: 400,
600 | topic_name: "topic7"
601 | ],
602 | "Elixir.KafkaBatcher.Test.SimpleCollectorMaxWaitTime": [
603 | ssl: false,
604 | sasl: :undefined,
605 | endpoints: [{"localhost", 9092}],
606 | allow_topic_auto_creation: false,
607 | partition_strategy: :md5,
608 | required_acks: 0,
609 | collect_by_partition: false,
610 | telemetry: true,
611 | batch_flusher: DefaultBatchFlusher,
612 | max_wait_time: 50,
613 | batch_size: 10,
614 | min_delay: 20,
615 | max_batch_bytesize: 1_000_000,
616 | topic_name: "topic8"
617 | ]
618 | ] == Config.get_configs_by_collector!()
619 |
620 | old_collectors_config = Application.get_env(:kafka_batcher, :collectors)
621 |
622 | assert_raise KafkaBatcher.Config.BadConfigError, fn ->
623 | new_collectors_config = [KafkaBatcher.Test.CollectorWithWrongConfig | old_collectors_config]
624 | Application.put_env(:kafka_batcher, :collectors, new_collectors_config)
625 | Config.get_configs_by_collector!()
626 | end
627 |
628 | Application.put_env(:kafka_batcher, :collectors, old_collectors_config)
629 | end
630 |
631 | test "get config by collectors not exist collector" do
632 | old_collectors_config = Application.get_env(:kafka_batcher, :collectors)
633 |
634 | assert_raise KafkaBatcher.Config.CollectorMissingError, fn ->
635 | new_collectors_config = [KafkaBatcher.Test.NotExistsCollector | old_collectors_config]
636 | Application.put_env(:kafka_batcher, :collectors, new_collectors_config)
637 | Config.get_configs_by_collector!()
638 | end
639 |
640 | Application.put_env(:kafka_batcher, :collectors, old_collectors_config)
641 | end
642 |
643 | test "get collector config by topic name" do
644 | assert [
645 | {:ssl, false},
646 | {:sasl, :undefined},
647 | {:endpoints, [{"localhost", 9092}]},
648 | {:partition_fn, &CalculatePartitionByValueCollector.calculate_partition/4},
649 | {:allow_topic_auto_creation, false},
650 | {:partition_strategy, :random},
651 | {:required_acks, -1},
652 | {:collect_by_partition, true},
653 | {:telemetry, true},
654 | {:batch_flusher, DefaultBatchFlusher},
655 | {:max_wait_time, 1000},
656 | {:batch_size, 30},
657 | {:min_delay, 0},
658 | {:max_batch_bytesize, 1_000_000},
659 | {:topic_name, "topic1"}
660 | ] == Config.get_collector_config("topic1")
661 | end
662 |
663 | test "build collector config" do
664 | right_config1 = [
665 | collect_by_partition: false,
666 | partition_strategy: :md5,
667 | batch_size: 10
668 | ]
669 |
670 | assert [
671 | {:allow_topic_auto_creation, false},
672 | {:partition_strategy, :md5},
673 | {:required_acks, -1},
674 | {:collect_by_partition, false},
675 | {:telemetry, true},
676 | {:batch_flusher, DefaultBatchFlusher},
677 | {:max_wait_time, 1000},
678 | {:batch_size, 10},
679 | {:min_delay, 0},
680 | {:max_batch_bytesize, 1_000_000}
681 | ] == Config.build_topic_config(right_config1)
682 |
683 | right_config2 = [
684 | collect_by_partition: false,
685 | partition_strategy: :random,
686 | required_acks: 1,
687 | batch_size: 10,
688 | min_delay: 50,
689 | max_batch_bytesize: 10
690 | ]
691 |
692 | assert [
693 | {:allow_topic_auto_creation, false},
694 | {:partition_strategy, :random},
695 | {:required_acks, 1},
696 | {:collect_by_partition, false},
697 | {:telemetry, true},
698 | {:batch_flusher, DefaultBatchFlusher},
699 | {:max_wait_time, 1000},
700 | {:batch_size, 10},
701 | {:min_delay, 50},
702 | {:max_batch_bytesize, 10}
703 | ] == Config.build_topic_config(right_config2)
704 |
705 | extra_params_config = [
706 | some_params1: false,
707 | some_params1: 100,
708 | required_acks: 1,
709 | batch_size: 10
710 | ]
711 |
712 | assert [
713 | {:allow_topic_auto_creation, false},
714 | {:partition_strategy, :random},
715 | {:required_acks, 1},
716 | {:collect_by_partition, false},
717 | {:telemetry, true},
718 | {:batch_flusher, DefaultBatchFlusher},
719 | {:max_wait_time, 1000},
720 | {:batch_size, 10},
721 | {:min_delay, 0},
722 | {:max_batch_bytesize, 1_000_000}
723 | ] == Config.build_topic_config(extra_params_config)
724 | end
725 |
726 | test "get endpoints" do
727 | assert [{"localhost", 9092}] == Config.get_endpoints()
728 | end
729 | end
730 |
--------------------------------------------------------------------------------