├── .gitattributes ├── .github └── workflows │ └── build.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── contrib └── examples │ └── elixir │ ├── README.md │ ├── config │ ├── config.exs │ └── dev.exs │ ├── docker-compose.yaml │ ├── docs │ ├── kafdrop.png │ ├── partition1.png │ └── streamingevents.png │ ├── lib │ ├── brod_sample.ex │ └── brod_sample │ │ ├── application.ex │ │ ├── create_topic.ex │ │ ├── group_subscriber.ex │ │ ├── group_subscriber_v2.ex │ │ └── publisher.ex │ ├── mix.exs │ ├── mix.lock │ └── test │ ├── brod_sample_test.exs │ └── test_helper.exs ├── elvis.config ├── guides └── examples │ ├── Authentication.md │ └── elixir │ ├── Consumer.md │ └── Publisher.md ├── include ├── brod.hrl └── brod_int.hrl ├── rebar.config ├── rebar.config.script ├── scripts ├── .env ├── .gitignore ├── cover-print-not-covered-lines.escript ├── docker-compose-kraft.yml ├── docker-compose.yml └── setup-test-env.sh ├── src ├── brod.app.src ├── brod.erl ├── brod_cg_commits.erl ├── brod_client.erl ├── brod_consumer.erl ├── brod_consumers_sup.erl ├── brod_group_coordinator.erl ├── brod_group_member.erl ├── brod_group_subscriber.erl ├── brod_group_subscriber_v2.erl ├── brod_group_subscriber_worker.erl ├── brod_kafka_apis.erl ├── brod_kafka_request.erl ├── brod_producer.erl ├── brod_producer_buffer.erl ├── brod_producers_sup.erl ├── brod_sup.erl ├── brod_supervisor3.erl ├── brod_topic_subscriber.erl ├── brod_topic_subscriber_cb_fun.erl ├── brod_transaction.erl ├── brod_transaction_processor.erl └── brod_utils.erl ├── sys.config.example └── test ├── brod_SUITE.erl ├── brod_cg_commits_SUITE.erl ├── brod_client_SUITE.erl ├── brod_compression_SUITE.erl ├── brod_consumer_SUITE.erl ├── brod_demo_cg_collector.erl ├── brod_demo_group_subscriber_koc.erl ├── brod_demo_group_subscriber_loc.erl ├── brod_demo_topic_subscriber.erl ├── brod_group_coordinator_SUITE.erl ├── brod_group_subscriber_SUITE.erl ├── brod_group_subscriber_test.hrl ├── brod_kafka_apis_tests.erl ├── brod_offset_txn_SUITE.erl ├── brod_producer_SUITE.erl ├── brod_producer_buffer_SUITE.erl ├── brod_producer_stub_SUITE.erl ├── brod_test_group_subscriber.erl ├── brod_test_macros.hrl ├── brod_test_setup.hrl ├── brod_topic_subscriber_SUITE.erl ├── brod_txn_SUITE.erl ├── brod_txn_processor_SUITE.erl ├── brod_utils_tests.erl ├── data └── ssl │ └── README.md └── kafka_test_helper.erl /.gitattributes: -------------------------------------------------------------------------------- 1 | erlang.mk -diff 2 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: brod 2 | on: 3 | push: 4 | branches: 5 | - "*" 6 | pull_request: 7 | branches: 8 | - master 9 | env: 10 | OTP_VERSION: "27" 11 | REBAR_VERSION: "3.24.0" 12 | 13 | jobs: 14 | lint: 15 | runs-on: ubuntu-22.04 16 | name: Lint 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v2 20 | - name: OTP 21 | uses: erlef/setup-beam@v1 22 | with: 23 | version-type: strict 24 | otp-version: ${{ env.OTP_VERSION }} 25 | rebar3-version: ${{ env.REBAR_VERSION }} 26 | - name: Cache Build 27 | id: cache-lint-hex 28 | uses: actions/cache@v3 29 | with: 30 | path: _build 31 | key: ${{ runner.os }}-${{ env.OTP_VERSION }}-rebar-${{ hashFiles('**/rebar.lock') }} 32 | restore-keys: ${{ runner.os }}-rebar- 33 | - name: Cache Dialyzer PLTs 34 | uses: actions/cache@v3 35 | with: 36 | path: ~/.cache/rebar3/rebar3_*_plt 37 | key: ${{ runner.os }}-dialyzer-${{ hashFiles('**/rebar.lock') }} 38 | restore-keys: ${{ runner.os }}-dialyzer- 39 | - name: Lint, Dialyzer & Xref 40 | run: rebar3 do compile,lint,dialyzer,xref 41 | build: 42 | runs-on: ubuntu-22.04 43 | name: OTP ${{matrix.vsn[0]}} / Kafka ${{matrix.vsn[1]}} 44 | strategy: 45 | fail-fast: false 46 | matrix: 47 | vsn: [["26.2", "0.9"], ["27.2", "0.10"], ["26.2", "0.11"], ["27.2", "1.1"], ["26.2", "2.8"], ["27.2", "3.9"], ["27.2", "4.0"]] 48 | steps: 49 | - name: Checkout 50 | uses: actions/checkout@v2 51 | - name: Cache Build 52 | uses: actions/cache@v3 53 | with: 54 | path: _build 55 | key: ${{ runner.os }}-${{ matrix.vsn[0] }}-rebar-${{ hashFiles('**/rebar.lock') }} 56 | restore-keys: ${{ runner.os }}-${{ matrix.vsn[0] }}-rebar- 57 | - name: Install Erlang 58 | uses: erlef/setup-beam@v1 59 | with: 60 | version-type: strict 61 | otp-version: ${{matrix.vsn[0]}} 62 | rebar3-version: ${{ env.REBAR_VERSION }} 63 | - name: Compile 64 | run: rebar3 do compile 65 | - name: Run tests 66 | run: | 67 | export KAFKA_VERSION=${{ matrix.vsn[1] }} 68 | . scripts/.env 69 | export ZOOKEEPER_IP 70 | echo "Running Kafka ${KAFKA_VERSION}, Zookeeper: ${ZOOKEEPER_IP}" 71 | make test-env 72 | make t 73 | - name: Store test logs 74 | uses: actions/upload-artifact@v4 75 | if: always() 76 | with: 77 | name: ct-logs-otp-${{matrix.vsn[0]}}-kafka-${{matrix.vsn[1]}} 78 | path: _build/test/logs 79 | - name: Create Cover Reports 80 | env: 81 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 82 | run: rebar3 do cover 83 | docs: 84 | needs: build 85 | runs-on: ubuntu-22.04 86 | name: Publish Documentation 87 | steps: 88 | - name: Checkout 89 | uses: actions/checkout@v2 90 | - name: OTP 91 | uses: erlef/setup-beam@v1 92 | with: 93 | version-type: strict 94 | otp-version: ${{ env.OTP_VERSION }} 95 | rebar3-version: ${{ env.REBAR_VERSION }} 96 | - name: Build Documentation 97 | run: rebar3 do hex build 98 | - name: Publish documentation 99 | uses: actions/upload-artifact@v4 100 | with: 101 | name: edoc 102 | path: doc 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | doc/ 2 | erl_crash.dump 3 | *.crashdump 4 | /deps/ 5 | /logs/ 6 | ebin/ 7 | cover/ 8 | test/*.beam 9 | test/ct.cover.spec 10 | brod.d 11 | .brod.plt 12 | .erlang.mk 13 | *.coverdata 14 | ct.coverdata 15 | sys.config 16 | xrefr 17 | brod.plt 18 | _build/ 19 | _rel/ 20 | .eunit/ 21 | .rebar/ 22 | *.testdata 23 | *.log 24 | relx 25 | docker/ 26 | TAGS 27 | .vscode/ 28 | test/data/ssl/*.pem 29 | /rebar.lock 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | KAFKA_VERSION ?= 4.0 2 | export KAFKA_VERSION 3 | 4 | .PHONY: all 5 | all: compile 6 | 7 | .PHONY: compile 8 | compile: 9 | @rebar3 compile 10 | 11 | .PHONY: lint 12 | lint: 13 | @rebar3 lint 14 | 15 | .PHONY: test-env 16 | test-env: 17 | @./scripts/setup-test-env.sh 18 | @mkdir -p ./test/data/ssl 19 | @docker cp kafka-1:/localhost-ca-crt.pem ./test/data/ssl/ca.pem 20 | @docker cp kafka-1:/localhost-client-key.pem ./test/data/ssl/client-key.pem 21 | @docker cp kafka-1:/localhost-client-crt.pem ./test/data/ssl/client-crt.pem 22 | 23 | .PHONY: ut 24 | ut: 25 | @rebar3 eunit -v --cover_export_name ut-$(KAFKA_VERSION) 26 | 27 | .PHONY: ct 28 | ct: 29 | @rebar3 ct -v --cover_export_name ct-$(KAFKA_VERSION) 30 | 31 | # version check, eunit and all common tests 32 | .PHONY: t 33 | t: ut ct 34 | 35 | .PHONY: clean 36 | clean: 37 | @rebar3 clean 38 | @rm -rf _build 39 | @rm -rf ebin deps doc 40 | @rm -f pipe.testdata 41 | 42 | .PHONY: hex-publish 43 | hex-publish: clean 44 | @rebar3 hex publish --repo=hexpm 45 | @rebar3 hex build 46 | 47 | ## tests that require kafka running at localhost 48 | INTEGRATION_CTS = brod_cg_commits brod_client brod_compression brod_consumer brod_producer brod_group_subscriber brod_topic_subscriber brod 49 | 50 | .PHONY: cover 51 | cover: 52 | @rebar3 cover -v 53 | 54 | .PHONY: dialyzer 55 | dialyzer: 56 | @rebar3 dialyzer 57 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Kafka client library in Erlang 2 | Copyright 2014-2021 Klarna Bank AB (publ) 3 | 4 | This product includes software developed by 5 | Klarna Bank AB (publ) (https://www.klarna.com) 6 | -------------------------------------------------------------------------------- /contrib/examples/elixir/README.md: -------------------------------------------------------------------------------- 1 | # BrodSample 2 | 3 | This is an example of how to use `brod` with Elixir. 4 | 5 | ## Kafka 6 | 7 | For this, we assume you have Kafka up and running at `localhost:9092` 8 | 9 | You can use this docker-compose: `https://github.com/obsidiandynamics/kafdrop/blob/master/docker-compose/kafka-kafdrop/docker-compose.yaml` to have Kafdrop running and be able to create topics through a UI on `localhost:9000` 10 | 11 | To follow this you have to create a topic called `streaming.events` with more than 1 partition. 12 | 13 | ## Dependency 14 | 15 | First thing you'll need is to add brod to your dependencies 16 | To find the latest version published on hex, run: `mix hex.search brod` 17 | 18 | As of writing this, the output was: 19 | 20 | ```sh 21 | ➜ brod_sample git:(master) ✗ mix hex.search brod 22 | Package Description Version URL 23 | brod Apache Kafka Erlang client library 3.10.0 https://hex.pm/packages/brod 24 | ``` 25 | 26 | Now just add it to your deps on `mix.exs` 27 | 28 | ```elixir 29 | defp deps do 30 | [ 31 | {:brod, "~> 3.10.0"} 32 | ] 33 | end 34 | ``` 35 | 36 | ## Client configuration 37 | 38 | We need to setup a client to be used by our consumer and producer, so the first thing we are going to do is to set this up by using configuration 39 | Let's use our `dev.exs` to set this up by adding the following: 40 | 41 | ```elixir 42 | import Config 43 | 44 | config :brod, 45 | clients: [ 46 | kafka_client: [ 47 | endpoints: [localhost: 9092] 48 | ] 49 | ] 50 | ``` 51 | 52 | Here we are setting a client named `kafka_client`, you can choose whatever name you like for this, we are also configuring the endpoints to our kafka cluster, in production you are ideally setting up multiple endpoints of your broker, something like: 53 | 54 | ```elixir 55 | import Config 56 | 57 | config :brod, 58 | clients: [ 59 | kafka_client: [ 60 | endpoints: [192.168.0.2:9092,192.168.0.3:9092,192.168.0.4:9092] 61 | ] 62 | ] 63 | ``` 64 | 65 | Now with the `kafka_client` in place we can look at how to publish and consumer those messages 66 | 67 | ## Publisher 68 | 69 | To send a message with brod we can use the `produce_sync` function, you can take a better look at the docs and see this and other possibilities at: 70 | 71 | Now, lets make a module to allow us publishing to Kafka 72 | 73 | ```elixir 74 | defmodule BrodSample.Publisher do 75 | def publish(topic, partition, partition_key, message) do 76 | :brod.produce_sync( 77 | :kafka_client, 78 | topic, 79 | partition, 80 | partition_key, 81 | message 82 | ) 83 | end 84 | end 85 | ``` 86 | 87 | Now, if we try to publish a message now it will return us, `{:error, {:producer_not_found, "sample"}}`, the error is clearly showing that it didn't found a producer, so we need to configure brod to start our producer 88 | 89 | Luckily we can also use configuration to tell brod to automatically start our producers by changing our config to: 90 | 91 | ```elixir 92 | # config/dev.exs 93 | 94 | import Config 95 | 96 | config :brod, 97 | clients: [ 98 | kafka_client: [ 99 | endpoints: [localhost: 9092], 100 | auto_start_producers: true # This will auto-start the producers with default configs 101 | ] 102 | ] 103 | 104 | ``` 105 | 106 | Now let's run and give it a try 107 | 108 | ```sh 109 | ➜ brod_sample git:(master) ✗ iex -S mix 110 | Erlang/OTP 22 [erts-10.7.1] [source] [64-bit] [smp:12:12] [ds:12:12:10] [async-threads:1] [hipe] 111 | 112 | 113 | 10:58:41.442 [info] [supervisor: {:local, :brod_sup}, started: [pid: #PID<0.210.0>, id: :kafka_client, mfargs: {:brod_client, :start_link, [[localhost: 9092], :kafka_client, [endpoints: [localhost: 9092], auto_start_producers: true]]}, restart_type: {:permanent, 10}, shutdown: 5000, child_type: :worker]] 114 | Interactive Elixir (1.10.2) - press Ctrl+C to exit (type h() ENTER for help) 115 | iex(1)> 116 | ``` 117 | 118 | Now, we can use our module, doing 119 | `iex(1)> BrodSample.Publisher.publish("streaming.events", 0, "", "Hello brod!")` 120 | If everything worked, brod will return a `:ok` 121 | 122 | So this sent the message `"Hello brod!"` to the topic named `"streaming.events"` on the partition number 0 and an empty partition key 123 | 124 | Let's take a look at kafdrop 125 | 126 | ![kafdrop](./docs/kafdrop.png) 127 | 128 | We can see that there is something on partition 0 129 | 130 | Opening it up we see 131 | ![topic](./docs/streamingevents.png) 132 | 133 | ### Using partition key 134 | 135 | The most common way to send messages to kafka is by using a partition key and based on that deciding to what partition the message should go, let's see how we can achieve that 136 | 137 | First, we need to know how many partitions our topic have, so we don't try sending the message to a non-existing partition, for that we can also use brod 138 | `{:ok, count} = :brod.get_partitions_count(client, topic_name)` 139 | 140 | Now with this information, we need to make sure that the same partition key always go to the same topic, we can achieve this by using phash2 included on erlang 141 | `:erlang.phash2(key, count)` 142 | 143 | This will return a number based on the key argument and not being bigger than the `count` we pass to it 144 | 145 | Taking all of that into our module we have the following 146 | 147 | ```elixir 148 | defmodule BrodSample.Publisher do 149 | def publish(topic, partition_key, message) do 150 | :brod.produce_sync( 151 | :kafka_client, 152 | topic, 153 | :hash, 154 | partition_key, 155 | message 156 | ) 157 | end 158 | end 159 | ``` 160 | 161 | Let's take it for a spin 162 | 163 | ```elixir 164 | iex(2)> recompile 165 | Compiling 1 file (.ex) 166 | :ok 167 | iex(3)> BrodSample.Publisher.publish("streaming.events", "my_key", "Hello brod!") 168 | :ok 169 | ``` 170 | 171 | Now we can see on kafdrop that this message was sent to partition 1 due to its key 172 | ![partition1](./docs/partition1.png) 173 | 174 | ## Consumers 175 | 176 | Now we need to get those messages and do something with 177 | 178 | First what we need is to define a group of subcribers to our topic, brod provides us an implementation called `group_subscriber_v2` which will create a worker for each partition of our topic, this not only allow us to have a better throughput, but in case one of these partitions end up having problems only that worker will be affected. 179 | 180 | Let's take a look at the docs of the `group_subscriber_v2` at 181 | The first thing we can see is that it has some required functions and some optional. 182 | 183 | Required callback functions: `init/2`, `handle_message/2`. 184 | Optional callback functions: `assign_partitions/3`, `get_committed_offset/3`, `terminate/2`. 185 | 186 | So lets start creating a module with the `group_subscriber_v2` behaviour 187 | 188 | ```elixir 189 | defmodule BrodSample.GroupSubscriberV2 do 190 | @behaviour :brod_group_subscriber_v2 191 | end 192 | ``` 193 | 194 | Now we need to implement at least the `init/2` and `handle_message/2`, let's start off simple 195 | 196 | ```elixir 197 | defmodule BrodSample.GroupSubscriberV2 do 198 | @behaviour :brod_group_subscriber_v2 199 | def init(_arg, _arg2) do 200 | {:ok, []} 201 | end 202 | 203 | def handle_message(message, state) do 204 | IO.inspect(message, label: "message") 205 | {:ok, :commit, []} 206 | end 207 | end 208 | ``` 209 | 210 | We created a basic init, and our `handle_message/2` will simply inspect the message and then ack that message on Kafka 211 | 212 | Now you might be wondering, where is the configurations? Consumer group, topics, begin offset? 213 | 214 | Well, we now need to define all of those and tell brod to use this module as the callback module, what we want to do is call `:brod.start_link_group_subscriber_v2(config)` with the correct configurations. 215 | 216 | This can be done the following way. 217 | 218 | ```elixir 219 | group_config = [ 220 | offset_commit_policy: :commit_to_kafka_v2, 221 | offset_commit_interval_seconds: 5, 222 | rejoin_delay_seconds: 2, 223 | reconnect_cool_down_seconds: 10 224 | ] 225 | 226 | config = %{ 227 | client: :kafka_client, 228 | group_id: "consumer_group_name", 229 | topics: ["streaming.events"], 230 | cb_module: BrodSample.GroupSubscriberV2, 231 | group_config: group_config, 232 | consumer_config: [begin_offset: :earliest] 233 | } 234 | 235 | {:ok, pid} = :brod.start_link_group_subscriber_v2(config) 236 | ``` 237 | 238 | There's a lot of information in here, so let's take a look on the most important ones. 239 | 240 | ### client 241 | 242 | Expects the identifier of your kafka client, remember that we configure `:kafka_client` on the `dev.exs`, here we are just referencing that client that we already configured. 243 | 244 | ### group_id 245 | 246 | This is the name of the consumer_group that will be used 247 | 248 | ### cb_module 249 | 250 | The module where you defined the `init/2` and `handle_message/2` 251 | 252 | ### group_config 253 | 254 | The configurations to use for the group coordinator 255 | 256 | ### consumer_config 257 | 258 | Configurations for the partition consumer, here we only defined the `begin_offset` to `:earliest`, this means that our consumer will start from the earliest message available on the topic, you can also use `:latest` to start with the latests message available (This basically means that your consumer group will only get messages after it comes online) 259 | 260 | After all of that we call `{:ok, pid} = :brod.start_link_group_subscriber_v2(config)` and that's it, brod will now start a worker for each partition our topic has and start consuming messages. 261 | 262 | You should now see on your console all the messages you've sent earlier 263 | 264 | ## Warning 265 | 266 | If you are not running your application in cluster mode you may go into some issues as the `group_subscriber` on multiple nodes may force each other re-join the group, if you wish to simulate this you can start 267 | -------------------------------------------------------------------------------- /contrib/examples/elixir/config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | import_config "#{Mix.env()}.exs" 4 | -------------------------------------------------------------------------------- /contrib/examples/elixir/config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :brod, 4 | clients: [ 5 | # You can choose the name of the client 6 | kafka_client: [ 7 | endpoints: ["kafka-default.dev.podium-dev.com": 19092], 8 | # This will auto-start the producers with default configs 9 | auto_start_producers: true 10 | ] 11 | ] 12 | -------------------------------------------------------------------------------- /contrib/examples/elixir/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | kafdrop: 4 | image: obsidiandynamics/kafdrop 5 | restart: "no" 6 | ports: 7 | - "9000:9000" 8 | environment: 9 | KAFKA_BROKERCONNECT: "kafka:29092" 10 | JVM_OPTS: "-Xms16M -Xmx48M -Xss180K -XX:-TieredCompilation -XX:+UseStringDeduplication -noverify" 11 | depends_on: 12 | - "kafka" 13 | kafka: 14 | image: obsidiandynamics/kafka 15 | restart: "no" 16 | ports: 17 | - "2181:2181" 18 | - "9092:9092" 19 | environment: 20 | KAFKA_LISTENERS: "INTERNAL://:29092,EXTERNAL://:9092" 21 | KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://localhost:9092" 22 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT" 23 | KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL" 24 | KAFKA_ZOOKEEPER_SESSION_TIMEOUT: "6000" 25 | KAFKA_RESTART_ATTEMPTS: "10" 26 | KAFKA_RESTART_DELAY: "5" 27 | ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: "0" 28 | -------------------------------------------------------------------------------- /contrib/examples/elixir/docs/kafdrop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kafka4beam/brod/ee6e50c9fbefb4f093833c24c14aaf2ee07f74b6/contrib/examples/elixir/docs/kafdrop.png -------------------------------------------------------------------------------- /contrib/examples/elixir/docs/partition1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kafka4beam/brod/ee6e50c9fbefb4f093833c24c14aaf2ee07f74b6/contrib/examples/elixir/docs/partition1.png -------------------------------------------------------------------------------- /contrib/examples/elixir/docs/streamingevents.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kafka4beam/brod/ee6e50c9fbefb4f093833c24c14aaf2ee07f74b6/contrib/examples/elixir/docs/streamingevents.png -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample do 2 | @moduledoc false 3 | end 4 | -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample/application.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.Application do 2 | # See https://hexdocs.pm/elixir/Application.html 3 | # for more information on OTP Applications 4 | @moduledoc false 5 | 6 | use Application 7 | 8 | def start(_type, _args) do 9 | children = [ 10 | BrodSample.GroupSubscriber 11 | ] 12 | opts = [strategy: :one_for_one, name: BrodSample.Supervisor] 13 | Supervisor.start_link(children, opts) 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample/create_topic.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.TopicManagment do 2 | def create(name) do 3 | topic_config = [ 4 | %{ 5 | config_entries: [], 6 | num_partitions: 6, 7 | replica_assignment: [], 8 | replication_factor: 1, 9 | topic: name 10 | } 11 | ] 12 | 13 | :brod.create_topics( 14 | ["localhost": 9092], 15 | topic_config, 16 | %{timeout: 1_000} 17 | ) 18 | end 19 | 20 | def delete(name) do 21 | :brod.delete_topics( 22 | [localhost: 9092], 23 | [name], 24 | 10_000 25 | ) 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample/group_subscriber.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.GroupSubscriber do 2 | require Logger 3 | require Record 4 | import Record, only: [defrecord: 2, extract: 2] 5 | defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl") 6 | 7 | def child_spec(arg) do 8 | %{ 9 | id: BrodSample.GroupSubscriber, 10 | start: {BrodSample.GroupSubscriber, :start, []} 11 | } 12 | end 13 | 14 | def start() do 15 | group_config = [ 16 | offset_commit_policy: :commit_to_kafka_v2, 17 | offset_commit_interval_seconds: 5, 18 | rejoin_delay_seconds: 2, 19 | reconnect_cool_down_seconds: 10 20 | ] 21 | 22 | {:ok, _subscriber} = 23 | :brod.start_link_group_subscriber( 24 | :kafka_client, 25 | "consumer-group-name", 26 | ["sample"], 27 | group_config, 28 | _consumer_config = [begin_offset: :earliest], 29 | _callback_module = __MODULE__, 30 | _callback_init_args = [] 31 | ) 32 | end 33 | 34 | def init(_group_id, _callback_init_args) do 35 | {:ok, []} 36 | end 37 | 38 | def handle_message( 39 | _topic, 40 | _partition, 41 | {:kafka_message, _offset, _key, body, _op, _timestamp, []} = message, 42 | state 43 | ) do 44 | Logger.info("Message #{body}") 45 | Logger.info("Message #{inspect(state)}") 46 | 47 | case body do 48 | "error_bodyy" -> :error 49 | _ -> {:ok, :ack, state} 50 | end 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample/group_subscriber_v2.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.GroupSubscriberV2 do 2 | @behaviour :brod_group_subscriber_v2 3 | require Logger 4 | 5 | def start() do 6 | group_config = [ 7 | offset_commit_policy: :commit_to_kafka_v2, 8 | offset_commit_interval_seconds: 5, 9 | rejoin_delay_seconds: 2, 10 | reconnect_cool_down_seconds: 10 11 | ] 12 | 13 | config = %{ 14 | client: :kafka_client, 15 | group_id: "from_zero", 16 | topics: ["sample"], 17 | cb_module: __MODULE__, 18 | group_config: group_config, 19 | consumer_config: [begin_offset: :earliest] 20 | } 21 | 22 | :brod.start_link_group_subscriber_v2(config) 23 | end 24 | 25 | def init(_arg, _arg2) do 26 | {:ok, []} 27 | end 28 | 29 | def handle_message(message, state) do 30 | {_kafka_message_set, _content, partition, _unkow, _set} = message 31 | 32 | case partition do 33 | # 1 -> {:error} 34 | _ -> {:ok, :commit, []} 35 | end 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /contrib/examples/elixir/lib/brod_sample/publisher.ex: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.Publisher do 2 | def publish(topic, partition_key, message) do 3 | {:ok, count} = :brod.get_partitions_count(:kafka_client, topic) 4 | 5 | :brod.produce_sync( 6 | :kafka_client, 7 | topic, 8 | :erlang.phash2(partition_key, count), 9 | partition_key, 10 | message 11 | ) 12 | end 13 | 14 | def publish_sample_messages(topic, n) do 15 | 0..n 16 | |> Task.async_stream(fn n -> 17 | :brod.produce( 18 | :kafka_client, 19 | topic, 20 | n, 21 | "oi", 22 | "Message number #{n}" 23 | ) 24 | end) 25 | |> Enum.to_list() 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /contrib/examples/elixir/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule BrodSample.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :brod_sample, 7 | version: "0.1.0", 8 | elixir: "~> 1.10", 9 | start_permanent: Mix.env() == :prod, 10 | deps: deps() 11 | ] 12 | end 13 | 14 | def application do 15 | [ 16 | extra_applications: [:logger], 17 | mod: {BrodSample.Application, []} 18 | ] 19 | end 20 | 21 | defp deps do 22 | [ 23 | {:brod, "~> 3.10.0"}, 24 | {:jason, "~> 1.2.1 "} 25 | ] 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /contrib/examples/elixir/mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "brod": {:hex, :brod, "3.10.0", "13e39f0789c40d38e354d7b9a47a7168a258f1796165b79f0a5d16150f612601", [:rebar3], [{:kafka_protocol, "2.3.6", [hex: :kafka_protocol, repo: "hexpm", optional: false]}, {:supervisor3, "1.1.8", [hex: :supervisor3, repo: "hexpm", optional: false]}], "hexpm", "e328a6e84b3a252bb00bd6291df2e41f35e279ee01456b7e77d763a63e388534"}, 3 | "crc32cer": {:hex, :crc32cer, "0.1.4", "a656dff19474d1a1fc5bb0081610ab6b0695b23affc47fa90abeb079a8ef9752", [:rebar3], [], "hexpm", "964735a5422cf65bbc5354860a560fff546f0026f83f8860525bd58ab5bade5d"}, 4 | "jason": {:hex, :jason, "1.2.1", "12b22825e22f468c02eb3e4b9985f3d0cb8dc40b9bd704730efa11abd2708c44", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "b659b8571deedf60f79c5a608e15414085fa141344e2716fbd6988a084b5f993"}, 5 | "kafka_protocol": {:hex, :kafka_protocol, "2.3.6", "df076a8ef49fffae3535c805cb00f3a057ce1895e63398bf8a10569eeeac02f8", [:rebar, :rebar3], [{:crc32cer, "0.1.4", [hex: :crc32cer, repo: "hexpm", optional: false]}, {:snappyer, "1.2.5", [hex: :snappyer, repo: "hexpm", optional: false]}], "hexpm", "7cb061fe46babc7fd269d2c0e5b4dba5d1efc4f7dacce85b17a9cca973106b23"}, 6 | "snappyer": {:hex, :snappyer, "1.2.5", "9154b9ac84031f0a799f72a4aa87df23ab2193b5631475fa2cdc304382d2df77", [:rebar3], [], "hexpm", "d2adc26a81efd5f138397a38a0bb545188d302972721f8be0de37fa452c8aed7"}, 7 | "supervisor3": {:hex, :supervisor3, "1.1.8", "5cf95c95342b589ec8d74689eea0646c0a3eb92820241e0c2d0ca4c104df92bc", [:make, :rebar, :rebar3], [], "hexpm", "4814b4d4343e777cc724312a588061828703f05149129cda2cb30d14105b1128"}, 8 | } 9 | -------------------------------------------------------------------------------- /contrib/examples/elixir/test/brod_sample_test.exs: -------------------------------------------------------------------------------- 1 | defmodule BrodSampleTest do 2 | use ExUnit.Case 3 | doctest BrodSample 4 | 5 | test "greets the world" do 6 | assert BrodSample.hello() == :world 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /contrib/examples/elixir/test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | -------------------------------------------------------------------------------- /elvis.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | [ {elvis, 3 | [ {config, 4 | [ #{dirs => [ "src" 5 | ], 6 | filter => "*.erl", 7 | rules => [ {elvis_text_style, line_length, 8 | #{ limit => 100, 9 | skip_comments => false 10 | }} 11 | , {elvis_text_style, no_tabs} 12 | , {elvis_text_style, no_trailing_whitespace} 13 | %% Ignoring macro_names since brod_client and brod_producer 14 | %% define macro's with arguments in lower_case 15 | %% , {elvis_style, macro_names} 16 | , {elvis_style, macro_module_names} 17 | , {elvis_style, operator_spaces, 18 | #{ rules => [ {right,","} 19 | , {right,"+"} 20 | , {left,"+"} 21 | , {right,"*"} 22 | , {left,"*"} 23 | , {right,"--"} 24 | , {left,"--"} 25 | , {right,"++"} 26 | , {left,"++"} 27 | ] 28 | }} 29 | , {elvis_style, nesting_level, 30 | #{ level => 3, 31 | ignore => [ brod_group_coordinator 32 | , brod_utils 33 | ] 34 | }} 35 | , {elvis_style, god_modules, 36 | #{ limit => 25, 37 | ignore => [brod, brod_client, brod_utils] 38 | }} 39 | , {elvis_style, no_nested_try_catch} 40 | , {elvis_style, invalid_dynamic_call, 41 | #{ignore => [ brod_group_coordinator 42 | , brod_group_subscriber_worker 43 | ] 44 | }} 45 | , {elvis_style, used_ignored_variable} 46 | , {elvis_style, no_behavior_info} 47 | , {elvis_style, module_naming_convention, 48 | #{ ignore => [], 49 | regex => "^([a-z][a-z0-9]*_?)([a-z0-9]*_?)*$" 50 | }} 51 | , {elvis_style, function_naming_convention, 52 | #{ regex => "^([a-z][a-z0-9]*_?)([a-z0-9]*_?)*$" 53 | }} 54 | , {elvis_style, variable_naming_convention, 55 | #{ regex => "^_?([A-Z][0-9a-zA-Z_]*)$" 56 | }} 57 | , {elvis_style, state_record_and_type} 58 | , {elvis_style, no_spec_with_records} 59 | , {elvis_style, dont_repeat_yourself, 60 | #{ min_complexity => 15 61 | }} 62 | , {elvis_style, no_debug_call} 63 | ] 64 | }, 65 | #{ dirs => ["test"] 66 | , filter => "*.erl" 67 | , rules => [ {elvis_text_style, line_length, 68 | #{ limit => 100 69 | , skip_comments => false 70 | }} 71 | ] 72 | } 73 | ] 74 | } 75 | , {verbose, true} 76 | ] 77 | } 78 | ]. 79 | 80 | %%%_* Emacs ==================================================================== 81 | %%% Local Variables: 82 | %%% allout-layout: t 83 | %%% erlang-indent-level: 2 84 | %%% End: 85 | -------------------------------------------------------------------------------- /guides/examples/Authentication.md: -------------------------------------------------------------------------------- 1 | # Authentication 2 | 3 | ## SASL/PLAIN 4 | 5 | ### Erlang 6 | 7 | ```erlang 8 | [{brod, 9 | [{clients 10 | , [{kafka_client 11 | , [ { endpoints, [{"localhost", 9092}] } 12 | , { ssl, true} 13 | , { sasl, {plain, "GFRW5BSQHKEH0TSG", "GrL3CNTkLhsvtBr8srGn0VilMpgDb4lPD"}} 14 | ] 15 | } 16 | ] 17 | } 18 | ] 19 | }] 20 | ``` 21 | 22 | ### Elixir 23 | 24 | ```elixir 25 | import Config 26 | 27 | config :brod, 28 | clients: [ 29 | kafka_client: [ 30 | endpoints: [ 31 | localhost: 9092 32 | ], 33 | ssl: true, 34 | sasl: { 35 | :plain, 36 | System.get_env("KAFKA_USERNAME"), 37 | System.get_env("KAFKA_PASSWORD") 38 | } 39 | ] 40 | ] 41 | ``` 42 | 43 | ## SSL Certificate Validation 44 | 45 | Erlang's default configuration for SSL is [verify_none](https://github.com/erlang/otp/blob/OTP-24.3.4/lib/ssl/src/ssl_internal.hrl#L120-L218) 46 | which means that certificates are accepted but not validated. brod passes SSL options to the [kafka_protocol](https://hex.pm/packages/kafka_protocol) library 47 | where they are used to create the [SSL connection](https://github.com/kafka4beam/kafka_protocol/blob/4.0.3/src/kpro_connection.erl#L305). 48 | 49 | For more info see the Erlang Ecosystem Foundation's [server certificate verification](https://erlef.github.io/security-wg/secure_coding_and_deployment_hardening/ssl.html#server-certificate-verification) recommendations. 50 | 51 | ## Erlang 52 | 53 | ```erlang 54 | [{brod, 55 | [{clients 56 | , [{kafka_client 57 | , [ { endpoints, [{"localhost", 9092}] } 58 | , { ssl, [ { verify, verify_peer } 59 | , { cacertfile, "/etc/ssl/certs/ca-certificates.crt" } 60 | , { depth, 3 } 61 | , { customize_hostname_check, 62 | [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} 63 | , {version, ['tlsv1.3', 'tlsv1.2']} 64 | ]} 65 | , { sasl, {plain, "GFRW5BSQHKEH0TSG", "GrL3CNTkLhsvtBr8srGn0VilMpgDb4lPD"}} 66 | ] 67 | } 68 | ] 69 | } 70 | ] 71 | }] 72 | ``` 73 | 74 | ## Elixir 75 | 76 | ```elixir 77 | import Config 78 | 79 | config :brod, 80 | clients: [ 81 | kafka_client: [ 82 | endpoints: [ 83 | localhost: 9092 84 | ], 85 | ssl: [ 86 | verify: :verify_peer, 87 | cacertfile: "/etc/ssl/certs/ca-certificates.crt", 88 | depth: 3, 89 | customize_hostname_check: [ 90 | match_fun: :public_key.pkix_verify_hostname_match_fun(:https) 91 | ], 92 | ], 93 | sasl: { 94 | :plain, 95 | System.get_env("KAFKA_USERNAME"), 96 | System.get_env("KAFKA_PASSWORD") 97 | } 98 | ] 99 | ] 100 | ``` 101 | 102 | The examples above are using `/etc/ssl/certs/ca-certificates.crt` which is the certificate authority that comes 103 | with [alpine](https://hub.docker.com/_/alpine) linux. You will need to provide a path to a valid certificate authority 104 | certificate or use [certifi](https://hex.pm/packages/certifi) 105 | -------------------------------------------------------------------------------- /guides/examples/elixir/Consumer.md: -------------------------------------------------------------------------------- 1 | # Consumer Example 2 | 3 | > #### Info {: .info} 4 | > 5 | > There is also a more complete example [here](https://github.com/kafka4beam/brod/tree/master/contrib/examples/elixir). 6 | 7 | Ensure `:brod` is added to your deps on `mix.exs` 8 | 9 | ```elixir 10 | defp deps do 11 | [ 12 | {:brod, "~> 3.10.0"} 13 | ] 14 | end 15 | ``` 16 | 17 | Both examples require a brod client with name `:kafka_client` to be already started. 18 | You can do that either statically by specifying it in the configuration (see an 19 | [example](https://github.com/kafka4beam/brod/blob/master/contrib/examples/elixir/config/dev.exs)) 20 | or dynamically 21 | (e.g. by calling `:brod.start_client([{"localhost", 9092}], :kafka_client)`). 22 | 23 | ## Group Subscriber 24 | 25 | Either the `brod_group_subscriber_v2` or `brod_group_subscriber` behaviours can be used 26 | to consume messages. The key difference is that the v2 subscriber runs a worker for each 27 | partition in a separate Erlang process, allowing parallel message processing. 28 | 29 | Here is an example of callback module that implements the `brod_group_subscriber_v2` behaviour to consume messages. 30 | 31 | ```elixir 32 | defmodule BrodSample.GroupSubscriberV2 do 33 | @behaviour :brod_group_subscriber_v2 34 | 35 | def child_spec(_arg) do 36 | config = %{ 37 | client: :kafka_client, 38 | group_id: "consumer_group_name", 39 | topics: ["streaming.events"], 40 | cb_module: __MODULE__, 41 | consumer_config: [{:begin_offset, :earliest}], 42 | init_data: [], 43 | message_type: :message_set, 44 | group_config: [ 45 | offset_commit_policy: :commit_to_kafka_v2, 46 | offset_commit_interval_seconds: 5, 47 | rejoin_delay_seconds: 60, 48 | reconnect_cool_down_seconds: 60 49 | ] 50 | } 51 | 52 | %{ 53 | id: __MODULE__, 54 | start: {:brod_group_subscriber_v2, :start_link, [config]}, 55 | type: :worker, 56 | restart: :temporary, 57 | shutdown: 5000 58 | } 59 | end 60 | 61 | @impl :brod_group_subscriber_v2 62 | def init(_group_id, _init_data), do: {:ok, []} 63 | 64 | @impl :brod_group_subscriber_v2 65 | def handle_message(message, _state) do 66 | IO.inspect(message, label: "message") 67 | {:ok, :commit, []} 68 | end 69 | end 70 | ``` 71 | 72 | The example module implements `child_spec/1` so that our consumer can be started by a Supervisor. The restart policy is set to `:temporary` 73 | because, in this case, if a message can not be processed, then there is no point in restarting. This might not always 74 | be the case. 75 | 76 | See `:brod_group_subscriber_v2.start_link/1` for details on the configuration options. 77 | 78 | See docs for more details about the required or optional callbacks. 79 | 80 | ## Partition Subscriber 81 | 82 | A more low-level approach can be used when you want a more fine-grained control or when you have only a single partition. 83 | 84 | ```elixir 85 | defmodule BrodSample.PartitionSubscriber do 86 | use GenServer 87 | 88 | import Record, only: [defrecord: 2, extract: 2] 89 | 90 | defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl") 91 | defrecord :kafka_message_set, extract(:kafka_message_set, from_lib: "brod/include/brod.hrl") 92 | defrecord :kafka_fetch_error, extract(:kafka_fetch_error, from_lib: "brod/include/brod.hrl") 93 | 94 | defmodule State do 95 | @enforce_keys [:consumer_pid] 96 | defstruct consumer_pid: nil 97 | end 98 | 99 | defmodule KafkaMessage do 100 | @enforce_keys [:offset, :key, :value, :ts] 101 | defstruct offset: nil, key: nil, value: nil, ts: nil 102 | end 103 | 104 | def start_link(topic, partition) do 105 | GenServer.start_link(__MODULE__, {topic, partition}) 106 | end 107 | 108 | @impl true 109 | def init({topic, partition}) do 110 | # start the consumer(s) 111 | # if you have more than one partition, do it somewhere else once for all partitions 112 | # (e.g. in the parent process) 113 | :ok = :brod.start_consumer(:kafka_client, topic, begin_offset: :latest) 114 | 115 | {:ok, consumer_pid} = :brod.subscribe(:kafka_client, self(), topic, partition, []) 116 | # you may also want to handle error when subscribing 117 | # and to monitor the consumer pid (and resubscribe when the consumer crashes) 118 | 119 | {:ok, %State{consumer_pid: consumer_pid}} 120 | end 121 | 122 | @impl true 123 | def handle_info( 124 | {consumer_pid, kafka_message_set(messages: msgs)}, 125 | %State{consumer_pid: consumer_pid} = state 126 | ) do 127 | for msg <- msgs do 128 | msg = kafka_message_to_struct(msg) 129 | 130 | # process the message... 131 | IO.inspect(msg) 132 | 133 | # and then acknowledge it 134 | :brod.consume_ack(consumer_pid, msg.offset) 135 | end 136 | 137 | {:noreply, state} 138 | end 139 | 140 | def handle_info({pid, kafka_fetch_error()} = error, %State{consumer_pid: pid} = state) do 141 | # you may want to handle the error differently 142 | {:stop, error, state} 143 | end 144 | 145 | defp kafka_message_to_struct(kafka_message(offset: offset, key: key, value: value, ts: ts)) do 146 | %KafkaMessage{ 147 | offset: offset, 148 | key: key, 149 | value: value, 150 | ts: DateTime.from_unix!(ts, :millisecond) 151 | } 152 | end 153 | end 154 | ``` -------------------------------------------------------------------------------- /guides/examples/elixir/Publisher.md: -------------------------------------------------------------------------------- 1 | # Publisher Example 2 | 3 | > #### Info {: .info} 4 | > 5 | > There is also a more complete example [here](https://github.com/kafka4beam/brod/tree/master/contrib/examples/elixir). 6 | 7 | Ensure `:brod` is added to your deps on `mix.exs` 8 | 9 | ```elixir 10 | defp deps do 11 | [ 12 | {:brod, "~> 3.10.0"} 13 | ] 14 | end 15 | ``` 16 | 17 | ## Client Configuration 18 | 19 | To use producers, you have to start a client first. 20 | 21 | You can do that by adding the following configuration (e.g. into `config/dev.exs`): 22 | 23 | ```elixir 24 | import Config 25 | 26 | config :brod, 27 | clients: [ 28 | kafka_client: [ 29 | endpoints: [localhost: 9092], 30 | auto_start_producers: true, 31 | # The following :ssl and :sasl configs are not 32 | # required when running kafka locally unauthenticated 33 | ssl: true, 34 | sasl: { 35 | :plain, 36 | System.get_env("KAFKA_CLUSTER_API_KEY"), 37 | System.get_env("KAFKA_CLUSTER_API_SECRET") 38 | } 39 | ] 40 | ] 41 | ``` 42 | 43 | or by starting it dynamically with this snippet (you can also add SSL/SASL configuration if you want to): 44 | 45 | ```elixir 46 | :brod.start_client([localhost: 9092], :kafka_client, auto_start_producers: true) 47 | ``` 48 | 49 | _Note:_ `kafka_client` can be any valid atom. And `:endpoints` accepts multiple host port tuples (e.g. `endpoints: [{"192.168.0.2", 9092}, {"192.168.0.3", 9092}, ...]`). 50 | 51 | If you don't pass the `auto_start_producers: true` option, you also have to manually start producers before calling `:brod.produce_sync/5` (and other produce functions). 52 | For example like this: `:brod.start_producer(:kafka_client, "my_topic", [])`. 53 | 54 | See `:brod.start_client/3` for a list of all available options. 55 | 56 | ## Publisher 57 | 58 | To send a message with brod we can use the `:brod.produce_sync/5` function 59 | 60 | ```elixir 61 | defmodule BrodExample.Publisher do 62 | def publish(topic, partition, key, message) do 63 | :brod.produce_sync(:kafka_client, topic, :hash, key, message) 64 | end 65 | end 66 | ``` 67 | 68 | There are also other ways (functions) how to produce messages, you can find them in the [overview](https://hexdocs.pm/brod/readme.html#producers) and in the `brod` 69 | module documentation. 70 | 71 | ### Using partition key 72 | 73 | When providing `:hash` as the _partition_ when calling `:brod.produce_sync/5` is equivalent to the following: 74 | 75 | ```elixir 76 | {:ok, count} = :brod.get_partitions_count(:kafka_client, topic) 77 | partition = rem(:erlang.phash2(key), count) 78 | :brod.produce_sync(:kafka_client, topic, partition, key, message) 79 | ``` 80 | 81 | Internally brod will get the partition count, generate a hash for the key within the range of partitions, 82 | and publish the message to the calculated hash. This is the same sticky routing that Kafka's [ProducerRecord](https://kafka.apache.org/23/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html) implements: 83 | 84 | > If no partition is specified but a key is present a partition will be chosen using a hash of the key. If neither key nor partition is present a partition will be assigned in a round-robin fashion. 85 | -------------------------------------------------------------------------------- /include/brod.hrl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2014-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | -ifndef(__BROD_HRL). 18 | -define(__BROD_HRL, true). 19 | 20 | %% -record(kafka_message, {...}). 21 | -include_lib("kafka_protocol/include/kpro_public.hrl"). 22 | 23 | -define(BROD_DEFAULT_CLIENT_ID, brod_default_client). 24 | -define(BROD_CONSUMER_GROUP_PROTOCOL_VERSION, 0). 25 | 26 | -record(kafka_message_set, 27 | { topic :: brod:topic() 28 | , partition :: brod:partition() 29 | , high_wm_offset :: integer() %% max offset of the partition 30 | , messages :: [brod:message()] %% exposed to brod user 31 | | kpro:incomplete_batch() %% this union member 32 | %% is internal only 33 | }). 34 | 35 | -record(kafka_fetch_error, 36 | { topic :: brod:topic() 37 | , partition :: brod:partition() 38 | , error_code :: brod:error_code() 39 | , error_desc = "" 40 | }). 41 | 42 | -record(brod_call_ref, { caller :: undefined | pid() 43 | , callee :: undefined | pid() 44 | , ref :: undefined | reference() 45 | }). 46 | 47 | -define(BROD_PRODUCE_UNKNOWN_OFFSET, -1). 48 | 49 | -record(brod_produce_reply, { call_ref :: brod:call_ref() 50 | , base_offset :: undefined | brod:offset() 51 | , result :: brod:produce_result() 52 | }). 53 | 54 | -record(kafka_group_member_metadata, 55 | { version :: non_neg_integer() 56 | , topics :: [brod:topic()] 57 | , user_data :: binary() 58 | }). 59 | 60 | -record(brod_received_assignment, 61 | { topic :: brod:topic() 62 | , partition :: brod:partition() 63 | , begin_offset :: undefined | brod:offset() | {begin_offset, brod:offset_time()} 64 | }). 65 | 66 | -type brod_received_assignments() :: [#brod_received_assignment{}]. 67 | 68 | -type brod_partition_fun() :: fun(( Topic :: brod:topic() 69 | , PartitionsCnt :: integer() 70 | , Key :: brod:key() 71 | , Value :: brod:value()) -> 72 | {ok, Partition :: brod:partition()}). 73 | 74 | 75 | -record(brod_cg, { id :: brod:group_id() 76 | , protocol_type :: brod:cg_protocol_type() 77 | }). 78 | 79 | -define(BROD_FOLD_RET(Acc, NextOffset, Reason), {Acc, NextOffset, Reason}). 80 | 81 | -define(BROD_DEFAULT_TIMEOUT, timer:seconds(5)). 82 | 83 | -endif. % include brod.hrl 84 | 85 | %%% Local Variables: 86 | %%% erlang-indent-level: 2 87 | %%% End: 88 | -------------------------------------------------------------------------------- /include/brod_int.hrl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2014-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | -ifndef(__BROD_INT_HRL). 18 | -define(__BROD_INT_HRL, true). 19 | 20 | -include("brod.hrl"). 21 | -include_lib("kafka_protocol/include/kpro.hrl"). 22 | -include_lib("kernel/include/logger.hrl"). 23 | 24 | -define(undef, undefined). 25 | 26 | -define(OFFSET_EARLIEST, earliest). 27 | -define(OFFSET_LATEST, latest). 28 | -define(IS_SPECIAL_OFFSET(O), (O =:= ?OFFSET_EARLIEST orelse 29 | O =:= ?OFFSET_LATEST orelse 30 | O =:= -2 orelse 31 | O =:= -1)). 32 | 33 | -record(socket, { pid :: pid() 34 | , host :: string() 35 | , port :: integer() 36 | , node_id :: integer() 37 | }). 38 | 39 | -record(cbm_init_data, 40 | { committed_offsets :: brod_topic_subscriber:committed_offsets() 41 | , cb_fun :: brod_topic_subscriber:cb_fun() 42 | , cb_data :: term() 43 | }). 44 | 45 | -type sasl_opt() :: {plain, User :: string() | binary(), 46 | Pass :: string() | binary() | 47 | fun(() -> string() | binary())} 48 | | {plain, File :: file:name_all()} 49 | | {callback, module(), term()} 50 | | ?undef. 51 | 52 | %% Is kafka error code 53 | -define(IS_ERROR(EC), ((EC) =/= ?no_error)). 54 | 55 | -define(KV(Key, Value), {Key, Value}). 56 | -define(TKV(Ts, Key, Value), {Ts, Key, Value}). 57 | 58 | -define(acked, brod_produce_req_acked). 59 | -define(buffered, brod_produce_req_buffered). 60 | 61 | -define(KAFKA_0_9, {0, 9}). 62 | -define(KAFKA_0_10, {0, 10}). 63 | 64 | -ifdef(OTP_RELEASE). 65 | -define(BIND_STACKTRACE(Var), :Var). 66 | -define(GET_STACKTRACE(Var), ok). 67 | -else. 68 | -define(BIND_STACKTRACE(Var), ). 69 | -define(GET_STACKTRACE(Var), Var = erlang:get_stacktrace()). 70 | -endif. 71 | 72 | %% Brod logging wrappers around Logger API calls. Insert 'brod' domain 73 | %% to allow applications to filter Brod logs as they wish. 74 | -define(BROD_LOG_WARNING(Fmt, Args), ?LOG_WARNING(Fmt, Args, #{domain => [brod]})). 75 | -define(BROD_LOG_ERROR(Fmt, Args), ?LOG_ERROR( Fmt, Args, #{domain => [brod]})). 76 | -define(BROD_LOG_INFO(Fmt, Args), ?LOG_INFO( Fmt, Args, #{domain => [brod]})). 77 | -define(BROD_LOG(Level, Fmt, Args), ?LOG(Level, Fmt, Args, #{domain => [brod]})). 78 | 79 | -endif. % include brod_int.hrl 80 | 81 | %%%_* Emacs ==================================================================== 82 | %%% Local Variables: 83 | %%% allout-layout: t 84 | %%% erlang-indent-level: 2 85 | %%% End: 86 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {deps, [{kafka_protocol, "4.2.6"}]}. 2 | {project_plugins, [{rebar3_lint, "~> 3.2.5"}]}. 3 | {edoc_opts, [{preprocess, true}]}. 4 | {erl_opts, [warnings_as_errors, warn_unused_vars,warn_shadow_vars,warn_obsolete_guard,debug_info]}. 5 | {xref_checks, [undefined_function_calls, undefined_functions, 6 | locals_not_used, deprecated_function_calls, 7 | deprecated_functions]}. 8 | {profiles, [ 9 | {test, [ 10 | {deps, [ {hut, "1.3.0"} 11 | , {jsone, "1.7.0"} 12 | , {meck, "0.9.2"} 13 | , {proper, "1.5.0"} 14 | , {snappyer, "1.2.9"} 15 | , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {branch, "1.0.10"}}} 16 | , {lz4b, "0.0.13"} 17 | , {ezstd, "1.1.0"} 18 | ]}, 19 | {erl_opts, [warnings_as_errors, {d, build_brod_cli}]} 20 | ]} 21 | ]}. 22 | {ex_doc, 23 | [ {extras, 24 | [ {"CHANGELOG.md", #{title => "Changelog"}} 25 | , {"README.md", #{title => "Overview"}} 26 | , {"LICENSE", #{title => "License"}} 27 | , "guides/examples/elixir/Publisher.md" 28 | , "guides/examples/elixir/Consumer.md" 29 | , "guides/examples/Authentication.md" 30 | ]} 31 | , {groups_for_extras, [{"Elixir", [ <<"guides/examples/elixir/Publisher.md">> 32 | , <<"guides/examples/elixir/Consumer.md">> 33 | ]} 34 | ,{"Usage", [<<"guides/examples/Authentication.md">>]}]} 35 | , {main, "README.md"} 36 | , {homepage_url, "https://hexdocs.pm/brod"} 37 | , {source_url, "https://github.com/kafka4beam/brod"} 38 | , {source_ref, "master"} 39 | , {prefix_ref_vsn_with_v, false} 40 | , {api_reference, false} 41 | ]}. 42 | {hex, [{doc, ex_doc}]}. 43 | {ct_opts, [{enable_builtin_hooks, false}]}. 44 | {dialyzer, [{warnings, [unknown]}]}. 45 | {cover_enabled, true}. 46 | {cover_opts, [verbose]}. 47 | {cover_export_enabled, true}. 48 | {plugins, []}. 49 | -------------------------------------------------------------------------------- /rebar.config.script: -------------------------------------------------------------------------------- 1 | Plugins = 2 | case erlang:list_to_integer(erlang:system_info(otp_release)) of 3 | Version when Version > 23 -> 4 | ExtraPlugins = [{rebar3_ex_doc, "~> 0.2.9"},{rebar3_hex, "~> 7.0.1"}], 5 | {project_plugins, lists:merge(ExtraPlugins, proplists:get_value(project_plugins, CONFIG))}; 6 | _ -> {project_plugins, proplists:get_value(project_plugins, CONFIG)} 7 | end, 8 | [Plugins | CONFIG]. 9 | -------------------------------------------------------------------------------- /scripts/.env: -------------------------------------------------------------------------------- 1 | KAFKA_NET=172.28.0.0/16 2 | ZOOKEEPER_IP=172.28.0.10 3 | KAFKA_1_IP=172.28.0.11 4 | KAFKA_2_IP=172.28.0.12 5 | -------------------------------------------------------------------------------- /scripts/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *.key 4 | *.srl 5 | *.p12 6 | *.jks 7 | -------------------------------------------------------------------------------- /scripts/cover-print-not-covered-lines.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %% -*- erlang -*- 3 | %%! -pa _build/test/lib/brod/ebin 4 | 5 | %%% 6 | %%% Copyright (c) 2015-2021, Klarna Bank AB (publ) 7 | %%% 8 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 9 | %%% you may not use this file except in compliance with the License. 10 | %%% You may obtain a copy of the License at 11 | %%% 12 | %%% http://www.apache.org/licenses/LICENSE-2.0 13 | %%% 14 | %%% Unless required by applicable law or agreed to in writing, software 15 | %%% distributed under the License is distributed on an "AS IS" BASIS, 16 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | %%% See the License for the specific language governing permissions and 18 | %%% limitations under the License. 19 | %%% 20 | 21 | -mode(compile). 22 | 23 | main([]) -> 24 | Files = filelib:wildcard("_build/test/cover/*.coverdata"), 25 | ok = import_coverdata(Files), 26 | Modules = get_imported_modules(), 27 | Result = [{Mod, analyse_module(Mod)} || Mod <- Modules], 28 | lists:foreach(fun({Module, NotCoveredLines}) -> 29 | print_mod_summary(Module, lists:sort(NotCoveredLines)) 30 | end, Result). 31 | 32 | import_coverdata([]) -> ok; 33 | import_coverdata([Filename | Rest]) -> 34 | io:format(user, "Importing coverdata file: ~s\n", [Filename]), 35 | Parent = self(), 36 | Ref = make_ref(), 37 | erlang:spawn_link( 38 | fun() -> 39 | %% shutup the chatty prints from cover:xxx calls 40 | {ok, F} = file:open("/dev/null", [write]), 41 | group_leader(F, self()), 42 | ok = cover:import(Filename), 43 | Parent ! {ok, Ref}, 44 | %% keep it alive 45 | receive stop -> 46 | exit(normal) 47 | end 48 | end), 49 | receive 50 | {ok, Ref} -> 51 | import_coverdata(Rest) 52 | end. 53 | 54 | get_imported_modules() -> 55 | All = cover:imported_modules(), 56 | Filtered = 57 | lists:filter( 58 | fun(Mod) -> 59 | case lists:reverse(atom_to_list(Mod)) of 60 | "ETIUS_" ++ _ -> false; %% ignore coverage for xxx_SUITE 61 | _ -> true 62 | end 63 | end, All), 64 | lists:sort(Filtered). 65 | 66 | analyse_module(Module) -> 67 | {ok, Lines} = cover:analyse(Module, coverage, line), 68 | lists:foldr( 69 | fun({{_Mod, 0}, _}, Acc) -> Acc; 70 | ({{_Mod, _Line}, {1, 0}}, Acc) -> Acc; 71 | ({{_Mod, Line}, {0, 1}}, Acc) -> [Line | Acc] 72 | end, [], Lines). 73 | 74 | print_mod_summary(_Module, []) -> ok; 75 | print_mod_summary(Module, NotCoveredLines) -> 76 | io:format(user, "================ ~p ================\n", [Module]), 77 | case whicherl(Module) of 78 | Filename when is_list(Filename) -> 79 | print_lines(Filename, NotCoveredLines); 80 | _ -> 81 | erlang:error({erl_file_not_found, Module}) 82 | end. 83 | 84 | print_lines(_Filename, []) -> 85 | ok; 86 | print_lines(Filename, Lines) -> 87 | {ok, Fd} = file:open(Filename, [read]), 88 | try 89 | print_lines(Fd, 1, Lines) 90 | after 91 | file:close(Fd) 92 | end. 93 | 94 | print_lines(_Fd, _N, []) -> 95 | ok; 96 | print_lines(Fd, N, [M | Rest] = Lines) -> 97 | Continue = 98 | case io:get_line(Fd, "") of 99 | eof -> 100 | erlang:error({eof, N, Lines}); 101 | Line when N =:= M -> 102 | io:format(user, "~5p: ~s", [N, Line]), 103 | Rest; 104 | _ -> 105 | Lines 106 | end, 107 | print_lines(Fd, N+1, Continue). 108 | 109 | whicherl(Module) when is_atom(Module) -> 110 | {ok, {Module, [{compile_info, Props}]}} = 111 | beam_lib:chunks(code:which(Module), [compile_info]), 112 | proplists:get_value(source, Props). 113 | 114 | %%%_* Emacs ==================================================================== 115 | %%% Local Variables: 116 | %%% allout-layout: t 117 | %%% erlang-indent-level: 2 118 | %%% End: 119 | -------------------------------------------------------------------------------- /scripts/docker-compose-kraft.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | kafka-net: 3 | driver: bridge 4 | ipam: 5 | config: 6 | - subnet: ${KAFKA_NET} 7 | 8 | services: 9 | kafka_1: 10 | image: "zmstone/kafka:1.1-${KAFKA_VERSION}" 11 | container_name: 'kafka-1' 12 | networks: 13 | kafka-net: 14 | ipv4_address: ${KAFKA_1_IP} 15 | ports: 16 | - "9092:9092" 17 | - "9093:9093" 18 | - "9094:9094" 19 | - "9095:9095" 20 | environment: 21 | BROKER_ID: 1 22 | CONTROLLER_PORT: 9090 23 | INNER_PORT: 9091 24 | PLAINTEXT_PORT: 9092 25 | SSL_PORT: 9093 26 | SASL_SSL_PORT: 9094 27 | SASL_PLAINTEXT_PORT: 9095 28 | ADVERTISED_HOSTNAME: ${KAFKA_1_IP} 29 | INNER_HOSTNAME: ${KAFKA_1_IP} 30 | VOTERS: 1@${KAFKA_1_IP}:9090,2@${KAFKA_2_IP}:9090 31 | kafka_2: 32 | image: "zmstone/kafka:1.1-${KAFKA_VERSION}" 33 | container_name: 'kafka-2' 34 | networks: 35 | kafka-net: 36 | ipv4_address: ${KAFKA_2_IP} 37 | ports: 38 | - "9192:9092" 39 | - "9193:9093" 40 | - "9194:9094" 41 | - "9195:9095" 42 | environment: 43 | BROKER_ID: 2 44 | CONTROLLER_PORT: 9090 45 | INNER_PORT: 9091 46 | PLAINTEXT_PORT: 9092 47 | SSL_PORT: 9093 48 | SASL_SSL_PORT: 9094 49 | SASL_PLAINTEXT_PORT: 9095 50 | ADVERTISED_HOSTNAME: ${KAFKA_2_IP} 51 | INNER_HOSTNAME: ${KAFKA_2_IP} 52 | VOTERS: 1@${KAFKA_1_IP}:9090,2@${KAFKA_2_IP}:9090 53 | -------------------------------------------------------------------------------- /scripts/docker-compose.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | kafka-net: 3 | driver: bridge 4 | ipam: 5 | config: 6 | - subnet: ${KAFKA_NET} 7 | 8 | services: 9 | zookeeper: 10 | image: "zmstone/kafka:${KAFKA_IMAGE_VERSION}" 11 | container_name: zookeeper 12 | networks: 13 | kafka-net: 14 | ipv4_address: ${ZOOKEEPER_IP} 15 | command: run zookeeper 16 | kafka_1: 17 | depends_on: 18 | - zookeeper 19 | image: "zmstone/kafka:${KAFKA_IMAGE_VERSION}" 20 | container_name: "kafka-1" 21 | networks: 22 | kafka-net: 23 | ipv4_address: ${KAFKA_1_IP} 24 | ports: 25 | - "9092:9092" 26 | - "9093:9093" 27 | - "9094:9094" 28 | - "9095:9095" 29 | environment: 30 | BROKER_ID: 1 31 | PLAINTEXT_PORT: 9092 32 | SSL_PORT: 9093 33 | SASL_SSL_PORT: 9094 34 | SASL_PLAINTEXT_PORT: 9095 35 | ADVERTISED_HOSTNAME: ${KAFKA_1_IP} 36 | ZOOKEEPER_CONNECT: ${ZOOKEEPER_IP}:2181 37 | kafka_2: 38 | depends_on: 39 | - zookeeper 40 | image: "zmstone/kafka:${KAFKA_IMAGE_VERSION}" 41 | container_name: "kafka-2" 42 | networks: 43 | kafka-net: 44 | ipv4_address: ${KAFKA_2_IP} 45 | ports: 46 | - "9192:9092" 47 | - "9193:9093" 48 | - "9194:9094" 49 | - "9195:9095" 50 | environment: 51 | BROKER_ID: 2 52 | PLAINTEXT_PORT: 9092 53 | SSL_PORT: 9093 54 | SASL_SSL_PORT: 9094 55 | SASL_PLAINTEXT_PORT: 9095 56 | ADVERTISED_HOSTNAME: ${KAFKA_2_IP} 57 | ZOOKEEPER_CONNECT: ${ZOOKEEPER_IP}:2181 58 | -------------------------------------------------------------------------------- /scripts/setup-test-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | if [ -n "${DEBUG:-}" ]; then 4 | set -x 5 | fi 6 | 7 | TD="$(cd "$(dirname "$0")" && pwd)" 8 | 9 | . "$TD/.env" 10 | 11 | docker ps > /dev/null || { 12 | echo "You must be a member of docker group to run this script" 13 | exit 1 14 | } 15 | 16 | function docker_compose() { 17 | if command -v docker-compose ; then 18 | docker-compose $@ 19 | else 20 | docker compose version &> /dev/null 21 | if [ $? -eq 0 ]; then 22 | docker compose $@ 23 | else 24 | exit "couldn't find docker compose, needed for testing" 25 | fi 26 | fi 27 | } 28 | 29 | KAFKA_VERSION="${KAFKA_VERSION:${1:-4.0.0}}" 30 | 31 | case $KAFKA_VERSION in 32 | 0.9*) 33 | KAFKA_VERSION="0.9" 34 | ;; 35 | 0.10*) 36 | KAFKA_VERSION="0.10" 37 | ;; 38 | 0.11*) 39 | KAFKA_VERSION="0.11" 40 | ;; 41 | 1.*) 42 | KAFKA_VERSION="1.1" 43 | ;; 44 | 2.*) 45 | KAFKA_VERSION="2.8" 46 | ;; 47 | 3.*) 48 | KAFKA_VERSION="3.9" 49 | ;; 50 | 4.*) 51 | KAFKA_VERSION="4.0" 52 | ;; 53 | *) 54 | echo "Unsupported version $KAFKA_VERSION" 55 | exit 1 56 | ;; 57 | esac 58 | 59 | export KAFKA_IMAGE_VERSION="1.1.1-${KAFKA_VERSION}" 60 | echo "env KAFKA_IMAGE_VERSION=$KAFKA_IMAGE_VERSION" 61 | 62 | KAFKA_MAJOR=$(echo "$KAFKA_VERSION" | cut -d. -f1) 63 | if [ "$KAFKA_MAJOR" -lt 3 ]; then 64 | NEED_ZOOKEEPER=true 65 | else 66 | NEED_ZOOKEEPER=false 67 | fi 68 | 69 | function bootstrap_opts() { 70 | if [[ "$NEED_ZOOKEEPER" = true ]]; then 71 | echo "--zookeeper ${ZOOKEEPER_IP}:2181" 72 | else 73 | echo "--bootstrap-server ${KAFKA_1_IP}:9092" 74 | fi 75 | } 76 | 77 | docker_compose -f $TD/docker-compose.yml down || true 78 | docker_compose -f $TD/docker-compose-kraft.yml down || true 79 | 80 | if [[ "$NEED_ZOOKEEPER" = true ]]; then 81 | docker_compose -f $TD/docker-compose.yml up -d 82 | else 83 | docker_compose -f $TD/docker-compose-kraft.yml up -d 84 | fi 85 | 86 | # give kafka some time 87 | sleep 5 88 | 89 | MAX_WAIT_SEC=10 90 | 91 | function wait_for_kafka() { 92 | local which_kafka="$1" 93 | local n=0 94 | local topic_list listener 95 | while true; do 96 | cmd="opt/kafka/bin/kafka-topics.sh $(bootstrap_opts) --list" 97 | topic_list="$(docker exec $which_kafka $cmd 2>&1)" 98 | if [ "${topic_list-}" = '' ]; then 99 | break 100 | fi 101 | if [ $n -gt $MAX_WAIT_SEC ]; then 102 | echo "timeout waiting for $which_kafka" 103 | echo "last print: ${topic_list:-}" 104 | exit 1 105 | fi 106 | n=$(( n + 1 )) 107 | sleep 1 108 | done 109 | } 110 | 111 | wait_for_kafka kafka-1 112 | wait_for_kafka kafka-2 113 | 114 | function create_topic() { 115 | TOPIC_NAME="$1" 116 | PARTITIONS="${2:-1}" 117 | REPLICAS="${3:-1}" 118 | CMD="/opt/kafka/bin/kafka-topics.sh $(bootstrap_opts) --create --partitions $PARTITIONS --replication-factor $REPLICAS --topic $TOPIC_NAME --config min.insync.replicas=1" 119 | docker exec kafka-1 bash -c "$CMD" 120 | } 121 | 122 | create_topic "dummy" || true 123 | create_topic "brod_SUITE" 124 | create_topic "brod-client-SUITE-topic" 125 | create_topic "brod_consumer_SUITE" 126 | create_topic "brod_producer_SUITE" 2 127 | create_topic "brod-group-coordinator" 3 2 128 | create_topic "brod-group-coordinator-1" 3 2 129 | create_topic "brod-demo-topic-subscriber" 3 2 130 | create_topic "brod-demo-group-subscriber-koc" 3 2 131 | create_topic "brod-demo-group-subscriber-loc" 3 2 132 | create_topic "brod_txn_SUITE_1" 3 2 133 | create_topic "brod_txn_SUITE_2" 3 2 134 | create_topic "brod_txn_subscriber_input" 3 2 135 | create_topic "brod_txn_subscriber_output_1" 3 2 136 | create_topic "brod_txn_subscriber_output_2" 3 2 137 | create_topic "brod_compression_SUITE" 138 | create_topic "lz4-test" 139 | create_topic "test-topic" 140 | 141 | if [ "$KAFKA_MAJOR" -ge 2 ]; then 142 | MAYBE_NEW_CONSUMER="" 143 | else 144 | MAYBE_NEW_CONSUMER="--new-consumer" 145 | fi 146 | # this is to warm-up kafka group coordinator for tests 147 | docker exec kafka-1 /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server ${KAFKA_1_IP}:9092 $MAYBE_NEW_CONSUMER --group test-group --describe > /dev/null 2>&1 || true 148 | 149 | # for kafka 0.11 or later, add sasl-scram test credentials 150 | if [[ "$KAFKA_VERSION" != 0.9* ]] && [[ "$KAFKA_VERSION" != 0.10* ]]; then 151 | docker exec kafka-1 /opt/kafka/bin/kafka-configs.sh \ 152 | $(bootstrap_opts) \ 153 | --alter \ 154 | --add-config 'SCRAM-SHA-256=[iterations=8192,password=ecila]' \ 155 | --entity-type users \ 156 | --entity-name alice 157 | 158 | docker exec kafka-1 /opt/kafka/bin/kafka-configs.sh \ 159 | $(bootstrap_opts) \ 160 | --alter \ 161 | --add-config 'SCRAM-SHA-512=[password=ecila]' \ 162 | --entity-type users \ 163 | --entity-name alice 164 | fi 165 | -------------------------------------------------------------------------------- /src/brod.app.src: -------------------------------------------------------------------------------- 1 | %% -*- mode:erlang -*- 2 | {application,brod, 3 | [{description,"Apache Kafka Erlang client library"}, 4 | {vsn,"git"}, 5 | {registered,[]}, 6 | {applications,[kernel,stdlib,kafka_protocol]}, 7 | {env,[]}, 8 | {mod, {brod, []}}, 9 | {modules,[]}, 10 | {licenses, ["Apache License 2.0"]}, 11 | {links, [{"Github", "https://github.com/kafka4beam/brod"}]}, 12 | {build_tools, ["rebar3"]}, 13 | {files, ["src", "include", "rebar.config", "rebar.config.script", 14 | "README.md","LICENSE", "NOTICE", "Makefile"]}]}. 15 | -------------------------------------------------------------------------------- /src/brod_consumers_sup.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2015-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @private brod consumers supervisor 19 | %%% @end 20 | %%%============================================================================= 21 | 22 | -module(brod_consumers_sup). 23 | -behaviour(brod_supervisor3). 24 | 25 | -export([ init/1 26 | , post_init/1 27 | , start_link/0 28 | , find_consumer/3 29 | , start_consumer/4 30 | , stop_consumer/2 31 | ]). 32 | 33 | -include("brod_int.hrl"). 34 | 35 | -define(TOPICS_SUP, brod_consumers_sup). 36 | -define(PARTITIONS_SUP, brod_consumers_sup2). 37 | 38 | %% By default, restart ?PARTITIONS_SUP after a 10-seconds delay 39 | -define(DEFAULT_PARTITIONS_SUP_RESTART_DELAY, 10). 40 | 41 | %% By default, restart partition consumer worker process after a 2-seconds delay 42 | -define(DEFAULT_CONSUMER_RESTART_DELAY, 2). 43 | 44 | %%%_* APIs ===================================================================== 45 | 46 | %% @doc Start a root consumers supervisor. 47 | -spec start_link() -> {ok, pid()}. 48 | start_link() -> 49 | brod_supervisor3:start_link(?MODULE, ?TOPICS_SUP). 50 | 51 | %% @doc Dynamically start a per-topic supervisor. 52 | -spec start_consumer(pid(), pid(), brod:topic(), brod:consumer_config()) -> 53 | {ok, pid()} | {error, any()}. 54 | start_consumer(SupPid, ClientPid, TopicName, Config) -> 55 | Spec = consumers_sup_spec(ClientPid, TopicName, Config), 56 | brod_supervisor3:start_child(SupPid, Spec). 57 | 58 | 59 | %% @doc Dynamically stop a per-topic supervisor. 60 | -spec stop_consumer(pid(), brod:topic()) -> ok | {error, any()}. 61 | stop_consumer(SupPid, TopicName) -> 62 | brod_supervisor3:terminate_child(SupPid, TopicName), 63 | brod_supervisor3:delete_child(SupPid, TopicName). 64 | 65 | %% @doc Find a brod_consumer process pid running under ?PARTITIONS_SUP 66 | -spec find_consumer(pid(), brod:topic(), brod:partition()) -> 67 | {ok, pid()} | {error, Reason} when 68 | Reason :: {consumer_not_found, brod:topic()} 69 | | {consumer_not_found, brod:topic(), brod:partition()} 70 | | {consumer_down, any()}. 71 | find_consumer(SupPid, Topic, Partition) -> 72 | case brod_supervisor3:find_child(SupPid, Topic) of 73 | [] -> 74 | %% no such topic worker started, 75 | %% check sys.config or brod:start_link_client args 76 | {error, {consumer_not_found, Topic}}; 77 | [PartitionsSupPid] -> 78 | try 79 | case brod_supervisor3:find_child(PartitionsSupPid, Partition) of 80 | [] -> 81 | %% no such partition? 82 | {error, {consumer_not_found, Topic, Partition}}; 83 | [Pid] -> 84 | {ok, Pid} 85 | end 86 | catch exit : {Reason, _} -> 87 | {error, {consumer_down, Reason}} 88 | end 89 | end. 90 | 91 | %% @doc brod_supervisor3 callback. 92 | init(?TOPICS_SUP) -> 93 | {ok, {{one_for_one, 0, 1}, []}}; 94 | init({?PARTITIONS_SUP, _ClientPid, _Topic, _Config}) -> 95 | post_init. 96 | 97 | post_init({?PARTITIONS_SUP, ClientPid, Topic, Config}) -> 98 | %% spawn consumer process for every partition 99 | %% in a topic if partitions are not set explicitly 100 | %% in the config 101 | %% TODO: make it dynamic when consumer groups API is ready 102 | case get_partitions(ClientPid, Topic, Config) of 103 | {ok, Partitions} -> 104 | Children = [ consumer_spec(ClientPid, Topic, Partition, Config) 105 | || Partition <- Partitions ], 106 | {ok, {{one_for_one, 0, 1}, Children}}; 107 | Error -> 108 | Error 109 | end; 110 | post_init(_) -> 111 | ignore. 112 | 113 | get_partitions(ClientPid, Topic, Config) -> 114 | case proplists:get_value(partitions, Config, []) of 115 | [] -> 116 | get_all_partitions(ClientPid, Topic); 117 | [_|_] = List -> 118 | {ok, List} 119 | end. 120 | 121 | get_all_partitions(ClientPid, Topic) -> 122 | case brod_client:get_partitions_count(ClientPid, Topic) of 123 | {ok, PartitionsCnt} -> 124 | {ok, lists:seq(0, PartitionsCnt - 1)}; 125 | {error, _} = Error -> 126 | Error 127 | end. 128 | 129 | consumers_sup_spec(ClientPid, TopicName, Config0) -> 130 | DelaySecs = proplists:get_value(topic_restart_delay_seconds, Config0, 131 | ?DEFAULT_PARTITIONS_SUP_RESTART_DELAY), 132 | Config = proplists:delete(topic_restart_delay_seconds, Config0), 133 | Args = [?MODULE, {?PARTITIONS_SUP, ClientPid, TopicName, Config}], 134 | { _Id = TopicName 135 | , _Start = {brod_supervisor3, start_link, Args} 136 | , _Restart = {permanent, DelaySecs} 137 | , _Shutdown = infinity 138 | , _Type = supervisor 139 | , _Module = [?MODULE] 140 | }. 141 | 142 | consumer_spec(ClientPid, Topic, Partition, Config0) -> 143 | DelaySecs = proplists:get_value(partition_restart_delay_seconds, Config0, 144 | ?DEFAULT_CONSUMER_RESTART_DELAY), 145 | Config = proplists:delete(partition_restart_delay_seconds, Config0), 146 | Args = [ClientPid, Topic, Partition, Config], 147 | { _Id = Partition 148 | , _Start = {brod_consumer, start_link, Args} 149 | , _Restart = {transient, DelaySecs} %% restart only when not normal exit 150 | , _Shutdown = 5000 151 | , _Type = worker 152 | , _Module = [brod_consumer] 153 | }. 154 | 155 | %%%_* Internal Functions ======================================================= 156 | 157 | %%%_* Emacs ==================================================================== 158 | %%% Local Variables: 159 | %%% allout-layout: t 160 | %%% erlang-indent-level: 2 161 | %%% End: 162 | -------------------------------------------------------------------------------- /src/brod_group_member.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2016-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @doc 19 | %%% Implement `brod_group_member' behaviour callbacks to allow a 20 | %%% process to act as a group member without having to deal with Kafka 21 | %%% group protocol details. A typical workflow: 22 | %%% 23 | %%% 1. Spawn a group coordinator by calling 24 | %%% {@link brod_group_coordinator:start_link/6}. 25 | %%% 26 | %%% 2. Subscribe to partitions received in the assignments from 27 | %%% `assignments_received/4' callback. 28 | %%% 29 | %%% 3. Receive messages from the assigned partitions (delivered by 30 | %%% the partition workers (the pollers) implemented in `brod_consumer'). 31 | %%% 32 | %%% 4. Unsubscribe from all previously subscribed partitions when 33 | %%% `assignments_revoked/1' is called. 34 | %%% 35 | %%% For group members that commit offsets to Kafka, do: 36 | %%% 37 | %%% 1. Call {@link brod_group_coordinator:ack/5}. to acknowledge successful 38 | %%% consumption of the messages. Group coordinator will commit the 39 | %%% acknowledged offsets at configured interval. 40 | %%% 41 | %%% 2. Call {@link brod_group_coordinator:commit_offsets/2} 42 | %%% to force an immediate offset commit if necessary. 43 | %%% 44 | %%% For group members that manage offsets locally, do: 45 | %%% 46 | %%% 1. Implement the `get_committed_offsets/2' callback. 47 | %%% This callback is evaluated every time when new assignments are received. 48 | %%% @end 49 | %%%============================================================================= 50 | 51 | -module(brod_group_member). 52 | 53 | -include("brod_int.hrl"). 54 | 55 | -optional_callbacks([assign_partitions/3, 56 | user_data/1 57 | ]). 58 | 59 | %% Call the callback module to initialize assignments. 60 | %% NOTE: This function is called only when `offset_commit_policy' is 61 | %% `consumer_managed' in group config. 62 | %% see brod_group_coordinator:start_link/6. for more group config details 63 | %% NOTE: The committed offsets should be the offsets for successfully processed 64 | %% (acknowledged) messages, not the begin-offset to start fetching from. 65 | -callback get_committed_offsets(pid(), [{brod:topic(), brod:partition()}]) -> 66 | {ok, [{{brod:topic(), brod:partition()}, brod:offset()}]}. 67 | 68 | %% Called when the member is elected as the consumer group leader. 69 | %% The first element in the group member list is ensured to be the leader. 70 | %% NOTE: this function is called only when 'partition_assignment_strategy' is 71 | %% 'callback_implemented' in group config. 72 | %% see brod_group_coordinator:start_link/6. for more group config details. 73 | -callback assign_partitions(pid(), [brod:group_member()], 74 | [{brod:topic(), brod:partition()}]) -> 75 | [{brod:group_member_id(), 76 | [brod:partition_assignment()]}]. 77 | 78 | %% Called when assignments are received from group leader. 79 | %% the member process should now call brod:subscribe/5 80 | %% to start receiving message from kafka. 81 | -callback assignments_received(pid(), brod:group_member_id(), 82 | brod:group_generation_id(), 83 | brod:received_assignments()) -> ok. 84 | 85 | %% Called before group re-balancing, the member should call 86 | %% brod:unsubscribe/3 to unsubscribe from all currently subscribed partitions. 87 | -callback assignments_revoked(pid()) -> ok. 88 | 89 | %% Called when making join request. This metadata is to let group leader know 90 | %% more details about the member. e.g. its location and or capacity etc. 91 | %% so that leader can make smarter decisions when assigning partitions to it. 92 | -callback user_data(pid()) -> binary(). 93 | 94 | %%%_* Emacs ==================================================================== 95 | %%% Local Variables: 96 | %%% allout-layout: t 97 | %%% erlang-indent-level: 2 98 | %%% End: 99 | -------------------------------------------------------------------------------- /src/brod_group_subscriber_worker.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2019-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%% @private 18 | -module(brod_group_subscriber_worker). 19 | 20 | -behaviour(brod_topic_subscriber). 21 | 22 | -include("brod_int.hrl"). 23 | 24 | %% brod_topic_subscriber callbacks 25 | -export([init/2, handle_message/3, handle_info/2, terminate/2]). 26 | 27 | -type start_options() :: 28 | #{ group_id := brod:group_id() 29 | , topic := brod:topic() 30 | , partition := brod:partition() 31 | , begin_offset := brod:offset() | ?undef 32 | , cb_module := module() 33 | , cb_config := term() 34 | , commit_fun := brod_group_subscriber_v2:commit_fun() 35 | , ack_fun := brod_group_subscriber_v2:ack_fun() 36 | }. 37 | 38 | -record(state, 39 | { start_options :: start_options() 40 | , cb_module :: module() 41 | , cb_state :: term() 42 | , commit_fun :: brod_group_subscriber_v2:commit_fun() 43 | }). 44 | 45 | -export_type([start_options/0]). 46 | 47 | %%%=================================================================== 48 | %%% brod_topic_subscriber callbacks 49 | %%%=================================================================== 50 | 51 | init(Topic, StartOpts) -> 52 | #{ cb_module := CbModule 53 | , cb_config := CbConfig 54 | , partition := Partition 55 | , begin_offset := BeginOffset 56 | , commit_fun := CommitFun 57 | } = StartOpts, 58 | InitInfo = maps:with( [topic, partition, group_id, commit_fun, ack_fun] 59 | , StartOpts 60 | ), 61 | ?BROD_LOG_INFO("Starting group_subscriber_worker: ~p~n" 62 | "Offset: ~p~nPid: ~p~n" 63 | , [InitInfo, BeginOffset, self()] 64 | ), 65 | {ok, CbState} = CbModule:init(InitInfo, CbConfig), 66 | State = #state{ start_options = StartOpts 67 | , cb_module = CbModule 68 | , cb_state = CbState 69 | , commit_fun = CommitFun 70 | }, 71 | CommittedOffsets = resolve_committed_offsets(Topic, Partition, BeginOffset), 72 | {ok, CommittedOffsets, State}. 73 | 74 | 75 | handle_message(_Partition, Msg, State) -> 76 | #state{ cb_module = CbModule 77 | , cb_state = CbState 78 | , commit_fun = Commit 79 | } = State, 80 | case CbModule:handle_message(Msg, CbState) of 81 | {ok, commit, NewCbState} -> 82 | NewState = State#state{cb_state = NewCbState}, 83 | Commit(get_last_offset(Msg)), 84 | {ok, ack, NewState}; 85 | {ok, ack, NewCbState} -> 86 | %% Unlike the old group_subscriber here `ack' means just `ack' 87 | %% without commit 88 | NewState = State#state{cb_state = NewCbState}, 89 | {ok, ack, NewState}; 90 | {ok, NewCbState} -> 91 | NewState = State#state{cb_state = NewCbState}, 92 | {ok, NewState} 93 | end. 94 | 95 | handle_info(Info, #state{cb_module = CbModule , cb_state = CbState} = State) -> 96 | %% Any unhandled messages are forwarded to the callback module to 97 | %% support arbitrary message-passing. 98 | %% Only the {noreply, State} return value is supported. 99 | case brod_utils:optional_callback(CbModule, handle_info, [Info, CbState], {noreply, CbState}) of 100 | {noreply, NewCbState} -> 101 | {noreply, State#state{cb_state = NewCbState}} 102 | end. 103 | 104 | terminate(Reason, #state{cb_module = CbModule, cb_state = State}) -> 105 | brod_utils:optional_callback(CbModule, terminate, [Reason, State], ok). 106 | 107 | %%%=================================================================== 108 | %%% Internal functions 109 | %%%=================================================================== 110 | 111 | -spec get_last_offset(brod:message() | brod:message_set()) -> 112 | brod:offset(). 113 | get_last_offset(#kafka_message{offset = Offset}) -> 114 | Offset; 115 | get_last_offset(#kafka_message_set{messages = Messages}) -> 116 | #kafka_message{offset = Offset} = lists:last(Messages), 117 | Offset. 118 | 119 | 120 | resolve_committed_offsets(_T, _P, ?undef) -> 121 | %% the default begin offset in consumer config will be used 122 | []; 123 | resolve_committed_offsets(_T, Partition, Offset) when ?IS_SPECIAL_OFFSET(Offset) -> 124 | [{Partition, Offset}]; 125 | resolve_committed_offsets(_T, Partition, Offset) when is_integer(Offset) andalso Offset >= 0 -> 126 | %% Note: brod_topic_subscriber expects 127 | %% _acked_ offset rather than _begin_ offset 128 | %% in `init' callback return. In order to get 129 | %% begin offset it increments the value, 130 | %% which we don't want, hence decrement. 131 | [{Partition, Offset - 1}]; 132 | resolve_committed_offsets(Topic, Partition, Offset) -> 133 | ?BROD_LOG_WARNING("Discarded invalid committed offset ~p for: ~s:~p~n", 134 | [Topic, Partition, Offset]), 135 | []. 136 | -------------------------------------------------------------------------------- /src/brod_kafka_apis.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2017-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @private 18 | %% Version ranges are cached per host and per connection pid in ets 19 | 20 | -module(brod_kafka_apis). 21 | 22 | -export([ default_version/1 23 | , pick_version/2 24 | , supported_versions/0 25 | , start_link/0 26 | , stop/0 27 | ]). 28 | 29 | -export([ code_change/3 30 | , handle_call/3 31 | , handle_cast/2 32 | , handle_info/2 33 | , init/1 34 | , terminate/2 35 | ]). 36 | 37 | -include("brod_int.hrl"). 38 | 39 | -export_type([ api/0 40 | , vsn/0 41 | ]). 42 | 43 | -define(SERVER, ?MODULE). 44 | -define(ETS, ?MODULE). 45 | 46 | -record(state, {}). 47 | 48 | -type vsn() :: kpro:vsn(). 49 | -type range() :: {vsn(), vsn()}. 50 | -type api() :: kpro:api(). 51 | -type conn() :: kpro:connection(). 52 | 53 | %% @doc Start process. 54 | -spec start_link() -> {ok, pid()}. 55 | start_link() -> 56 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 57 | 58 | -spec stop() -> ok. 59 | stop() -> 60 | gen_server:call(?SERVER, stop, infinity). 61 | 62 | %% @doc Get default supported version for the given API. 63 | -spec default_version(api()) -> vsn(). 64 | default_version(API) -> 65 | {Min, _Max} = supported_versions(API), 66 | Min. 67 | 68 | %% @doc Pick API version for the given API. 69 | -spec pick_version(conn(), api()) -> vsn(). 70 | pick_version(Conn, API) -> 71 | do_pick_version(Conn, API, supported_versions(API)). 72 | 73 | %%%_* gen_server callbacks ===================================================== 74 | 75 | init([]) -> 76 | ?ETS = ets:new(?ETS, [named_table, public]), 77 | {ok, #state{}}. 78 | 79 | handle_info({'DOWN', _Mref, process, Conn, _Reason}, State) -> 80 | _ = ets:delete(?ETS, Conn), 81 | {noreply, State}; 82 | handle_info(Info, State) -> 83 | ?BROD_LOG_ERROR("unknown info ~p", [Info]), 84 | {noreply, State}. 85 | 86 | handle_cast({monitor_connection, Conn}, State) -> 87 | erlang:monitor(process, Conn), 88 | {noreply, State}; 89 | handle_cast(Cast, State) -> 90 | ?BROD_LOG_ERROR("unknown cast ~p", [Cast]), 91 | {noreply, State}. 92 | 93 | handle_call(stop, From, State) -> 94 | gen_server:reply(From, ok), 95 | {stop, normal, State}; 96 | handle_call(Call, _From, State) -> 97 | {reply, {error, {unknown_call, Call}}, State}. 98 | 99 | code_change(_OldVsn, State, _Extra) -> 100 | {ok, State}. 101 | 102 | terminate(_Reason, _State) -> 103 | ok. 104 | 105 | %%%_* Internals ================================================================ 106 | 107 | -spec do_pick_version(conn(), api(), range()) -> vsn(). 108 | do_pick_version(_Conn, _API, {V, V}) -> V; 109 | do_pick_version(Conn, API, {Min, Max} = MyRange) -> 110 | case lookup_vsn_range(Conn, API) of 111 | none -> 112 | Min; %% no version received from kafka, use min 113 | {KproMin, KproMax} = Range when KproMin > Max orelse KproMax < Min -> 114 | erlang:error({unsupported_vsn_range, API, MyRange, Range}); 115 | {_, KproMax} -> 116 | min(KproMax, Max) %% try to use highest version 117 | end. 118 | 119 | %% Lookup API from cache, return 'none' if not found. 120 | -dialyzer([{nowarn_function, [lookup_vsn_range/2]}]). 121 | -spec lookup_vsn_range(conn(), api()) -> {vsn(), vsn()} | none. 122 | lookup_vsn_range(Conn, API) -> 123 | case ets:lookup(?ETS, Conn) of 124 | [] -> 125 | case kpro:get_api_versions(Conn) of 126 | {ok, Versions} when is_map(Versions) -> 127 | %% public ets, insert it by caller 128 | ets:insert(?ETS, {Conn, Versions}), 129 | %% tell ?SERVER to monitor the connection 130 | %% so to delete it from cache when 'DOWN' is received 131 | ok = monitor_connection(Conn), 132 | maps:get(API, Versions, none); 133 | {error, _Reason} -> 134 | none %% connection died, ignore 135 | end; 136 | [{Conn, Vsns}] -> 137 | maps:get(API, Vsns, none) 138 | end. 139 | 140 | supported_versions() -> 141 | #{ produce => {0, 7} 142 | , fetch => {0, 10} 143 | , list_offsets => {0, 2} 144 | , metadata => {0, 2} 145 | , offset_commit => {2, 2} 146 | , offset_fetch => {1, 2} 147 | , find_coordinator => {0, 0} 148 | , join_group => {0, 6} 149 | , heartbeat => {0, 4} 150 | , leave_group => {0, 4} 151 | , sync_group => {0, 0} 152 | , describe_groups => {0, 5} 153 | , list_groups => {0, 3} 154 | , create_topics => {0, 4} 155 | , delete_topics => {0, 4} 156 | }. 157 | 158 | %% Do not change range without verification. 159 | supported_versions(API) -> 160 | try 161 | maps:get(API, supported_versions()) 162 | catch 163 | _ : _ -> 164 | erlang:error({unsupported_api, API}) 165 | end. 166 | 167 | monitor_connection(Conn) -> 168 | gen_server:cast(?SERVER, {monitor_connection, Conn}). 169 | 170 | %%%_* Emacs ==================================================================== 171 | %%% Local Variables: 172 | %%% allout-layout: t 173 | %%% erlang-indent-level: 2 174 | %%% End: 175 | -------------------------------------------------------------------------------- /src/brod_kafka_request.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2017-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @doc Helper functions for building request messages. 18 | -module(brod_kafka_request). 19 | 20 | -export([ create_topics/3 21 | , delete_topics/3 22 | , fetch/8 23 | , list_groups/1 24 | , list_offsets/4 25 | , join_group/2 26 | , metadata/2 27 | , offset_commit/2 28 | , offset_fetch/3 29 | , produce/7 30 | , sync_group/2 31 | ]). 32 | 33 | -include("brod_int.hrl"). 34 | 35 | -type api() :: brod_kafka_apis:api(). 36 | -type vsn() :: brod_kafka_apis:vsn(). 37 | -type topic() :: brod:topic(). 38 | -type topic_config() :: kpro:struct(). 39 | -type partition() :: brod:partition(). 40 | -type offset() :: brod:offset(). 41 | -type conn() :: kpro:connection(). 42 | 43 | 44 | %% @doc Make a produce request, If the first arg is a connection pid, call 45 | %% `brod_kafka_apis:pick_version/2' to resolve version. 46 | -spec produce(conn() | vsn(), topic(), partition(), 47 | kpro:batch_input(), integer(), integer(), 48 | brod:compression()) -> kpro:req(). 49 | produce(MaybePid, Topic, Partition, BatchInput, 50 | RequiredAcks, AckTimeout, Compression) -> 51 | Vsn = pick_version(produce, MaybePid), 52 | kpro_req_lib:produce(Vsn, Topic, Partition, BatchInput, 53 | #{ required_acks => RequiredAcks 54 | , ack_timeout => AckTimeout 55 | , compression => Compression 56 | }). 57 | 58 | %% @doc Make a create_topics request. 59 | -spec create_topics(vsn() | conn(), [topic_config()], #{timeout => kpro:int32(), 60 | validate_only => boolean()}) -> kpro:req(). 61 | create_topics(Connection, TopicConfigs, RequestConfigs) 62 | when is_pid(Connection) -> 63 | Vsn = brod_kafka_apis:pick_version(Connection, create_topics), 64 | create_topics(Vsn, TopicConfigs, RequestConfigs); 65 | create_topics(Vsn, TopicConfigs, RequestConfigs) -> 66 | kpro_req_lib:create_topics(Vsn, TopicConfigs, RequestConfigs). 67 | 68 | %% @doc Make a delete_topics request. 69 | -spec delete_topics(vsn() | conn(), [topic()], pos_integer()) -> kpro:req(). 70 | delete_topics(Connection, Topics, Timeout) when is_pid(Connection) -> 71 | Vsn = brod_kafka_apis:pick_version(Connection, delete_topics), 72 | delete_topics(Vsn, Topics, Timeout); 73 | delete_topics(Vsn, Topics, Timeout) -> 74 | kpro_req_lib:delete_topics(Vsn, Topics, #{timeout => Timeout}). 75 | 76 | %% @doc Make a fetch request, If the first arg is a connection pid, call 77 | %% `brod_kafka_apis:pick_version/2' to resolve version. 78 | -spec fetch(conn(), topic(), partition(), offset(), 79 | kpro:wait(), kpro:count(), kpro:count(), 80 | kpro:isolation_level()) -> kpro:req(). 81 | fetch(Pid, Topic, Partition, Offset, 82 | WaitTime, MinBytes, MaxBytes, IsolationLevel) -> 83 | Vsn = pick_version(fetch, Pid), 84 | kpro_req_lib:fetch(Vsn, Topic, Partition, Offset, 85 | #{ max_wait_time => WaitTime 86 | , min_bytes => MinBytes 87 | , max_bytes => MaxBytes 88 | , isolation_level => IsolationLevel 89 | }). 90 | 91 | %% @doc Make a `list_offsets' request message for offset resolution. 92 | %% In kafka protocol, -2 and -1 are semantic 'time' to request for 93 | %% 'earliest' and 'latest' offsets. 94 | %% In brod implementation, -2, -1, 'earliest' and 'latest' 95 | %% are semantic 'offset', this is why often a variable named 96 | %% Offset is used as the Time argument. 97 | -spec list_offsets(conn(), topic(), partition(), brod:offset_time()) -> 98 | kpro:req(). 99 | list_offsets(Connection, Topic, Partition, TimeOrSemanticOffset) -> 100 | Time = ensure_integer_offset_time(TimeOrSemanticOffset), 101 | Vsn = pick_version(list_offsets, Connection), 102 | kpro_req_lib:list_offsets(Vsn, Topic, Partition, Time). 103 | 104 | %% @doc Make a metadata request. 105 | -spec metadata(vsn() | conn(), all | [topic()]) -> kpro:req(). 106 | metadata(Connection, Topics) when is_pid(Connection) -> 107 | Vsn = brod_kafka_apis:pick_version(Connection, metadata), 108 | metadata(Vsn, Topics); 109 | metadata(Vsn, Topics) -> 110 | kpro_req_lib:metadata(Vsn, Topics). 111 | 112 | %% @doc Make a offset fetch request. 113 | %% NOTE: empty topics list only works for kafka 0.10.2.0 or later 114 | -spec offset_fetch(conn(), brod:group_id(), Topics) -> kpro:req() 115 | when Topics :: [{topic(), [partition()]}]. 116 | offset_fetch(Connection, GroupId, Topics0) -> 117 | Topics = 118 | lists:map( 119 | fun({Topic, Partitions}) -> 120 | [ {name, Topic} 121 | , {partition_indexes, Partitions} 122 | ] 123 | end, Topics0), 124 | Body = [ {group_id, GroupId} 125 | , {topics, case Topics of 126 | [] -> ?kpro_null; 127 | _ -> Topics 128 | end} 129 | ], 130 | Vsn = pick_version(offset_fetch, Connection), 131 | kpro:make_request(offset_fetch, Vsn, Body). 132 | 133 | %% @doc Make a `list_groups' request. 134 | -spec list_groups(conn()) -> kpro:req(). 135 | list_groups(Connection) -> 136 | Vsn = pick_version(list_groups, Connection), 137 | kpro:make_request(list_groups, Vsn, []). 138 | 139 | %% @doc Make a `join_group' request. 140 | -spec join_group(conn(), kpro:struct()) -> kpro:req(). 141 | join_group(Conn, Fields) -> 142 | make_req(join_group, Conn, Fields). 143 | 144 | %% @doc Make a `sync_group' request. 145 | -spec sync_group(conn(), kpro:struct()) -> kpro:req(). 146 | sync_group(Conn, Fields) -> 147 | make_req(sync_group, Conn, Fields). 148 | 149 | %% @doc Make a `offset_commit' request. 150 | -spec offset_commit(conn(), kpro:struct()) -> kpro:req(). 151 | offset_commit(Conn, Fields) -> 152 | make_req(offset_commit, Conn, Fields). 153 | 154 | %%%_* Internal Functions ======================================================= 155 | 156 | make_req(API, Conn, Fields) when is_pid(Conn) -> 157 | Vsn = pick_version(API, Conn), 158 | make_req(API, Vsn, Fields); 159 | make_req(API, Vsn, Fields) -> 160 | kpro:make_request(API, Vsn, Fields). 161 | 162 | -spec pick_version(api(), pid()) -> vsn(). 163 | pick_version(_API, Vsn) when is_integer(Vsn) -> Vsn; 164 | pick_version(API, Connection) when is_pid(Connection) -> 165 | brod_kafka_apis:pick_version(Connection, API); 166 | pick_version(API, _) -> 167 | brod_kafka_apis:default_version(API). 168 | 169 | -spec ensure_integer_offset_time(brod:offset_time()) -> integer(). 170 | ensure_integer_offset_time(?OFFSET_EARLIEST) -> -2; 171 | ensure_integer_offset_time(?OFFSET_LATEST) -> -1; 172 | ensure_integer_offset_time(T) when is_integer(T) -> T. 173 | 174 | %%%_* Emacs ==================================================================== 175 | %%% Local Variables: 176 | %%% allout-layout: t 177 | %%% erlang-indent-level: 2 178 | %%% End: 179 | -------------------------------------------------------------------------------- /src/brod_producers_sup.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2015-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @private brod producers supervisor 19 | %%% @end 20 | %%%============================================================================= 21 | 22 | -module(brod_producers_sup). 23 | -behaviour(brod_supervisor3). 24 | 25 | -export([ init/1 26 | , post_init/1 27 | , start_link/0 28 | , find_producer/3 29 | , start_producer/4 30 | , stop_producer/2 31 | ]). 32 | 33 | -include("brod_int.hrl"). 34 | 35 | -define(TOPICS_SUP, brod_producers_sup). 36 | -define(PARTITIONS_SUP, brod_producers_sup2). 37 | 38 | %% Minimum delay seconds to work with brod_supervisor3 39 | -define(MIN_SUPERVISOR3_DELAY_SECS, 1). 40 | 41 | %% By default, restart ?PARTITIONS_SUP after a 10-seconds delay 42 | -define(DEFAULT_PARTITIONS_SUP_RESTART_DELAY, 10). 43 | 44 | %% By default, restart partition producer worker process after a 5-seconds delay 45 | -define(DEFAULT_PRODUCER_RESTART_DELAY, 5). 46 | 47 | %%%_* APIs ===================================================================== 48 | 49 | %% @doc Start a root producers supervisor. 50 | %% For more details: @see brod_producer:start_link/4 51 | %% @end 52 | -spec start_link() -> {ok, pid()}. 53 | start_link() -> 54 | brod_supervisor3:start_link(?MODULE, ?TOPICS_SUP). 55 | 56 | %% @doc Dynamically start a per-topic supervisor 57 | -spec start_producer(pid(), pid(), brod:topic(), brod:producer_config()) -> 58 | {ok, pid()} | {error, any()}. 59 | start_producer(SupPid, ClientPid, TopicName, Config) -> 60 | Spec = producers_sup_spec(ClientPid, TopicName, Config), 61 | brod_supervisor3:start_child(SupPid, Spec). 62 | 63 | %% @doc Dynamically stop a per-topic supervisor 64 | -spec stop_producer(pid(), brod:topic()) -> ok | {}. 65 | stop_producer(SupPid, TopicName) -> 66 | brod_supervisor3:terminate_child(SupPid, TopicName), 67 | brod_supervisor3:delete_child(SupPid, TopicName). 68 | 69 | %% @doc Find a brod_producer process pid running under ?PARTITIONS_SUP. 70 | -spec find_producer(pid(), brod:topic(), brod:partition()) -> 71 | {ok, pid()} | {error, Reason} when 72 | Reason :: {producer_not_found, brod:topic()} 73 | | {producer_not_found, brod:topic(), brod:partition()} 74 | | {producer_down, any()}. 75 | find_producer(SupPid, Topic, Partition) -> 76 | case brod_supervisor3:find_child(SupPid, Topic) of 77 | [] -> 78 | %% no such topic worker started, 79 | %% check sys.config or brod:start_link_client args 80 | {error, {producer_not_found, Topic}}; 81 | [PartitionsSupPid] -> 82 | try 83 | case brod_supervisor3:find_child(PartitionsSupPid, Partition) of 84 | [] -> 85 | %% no such partition? 86 | {error, {producer_not_found, Topic, Partition}}; 87 | [Pid] -> 88 | {ok, Pid} 89 | end 90 | catch exit : {Reason, _} -> 91 | {error, {producer_down, Reason}} 92 | end 93 | end. 94 | 95 | %% @doc brod_supervisor3 callback. 96 | init(?TOPICS_SUP) -> 97 | {ok, {{one_for_one, 0, 1}, []}}; 98 | init({?PARTITIONS_SUP, _ClientPid, _Topic, _Config}) -> 99 | post_init. 100 | 101 | post_init({?PARTITIONS_SUP, ClientPid, Topic, Config}) -> 102 | case brod_client:get_partitions_count(ClientPid, Topic) of 103 | {ok, PartitionsCnt} -> 104 | Children = [ producer_spec(ClientPid, Topic, Partition, Config) 105 | || Partition <- lists:seq(0, PartitionsCnt - 1) ], 106 | %% Producer may crash in case of exception in case of network failure, 107 | %% or error code received in produce response (e.g. leader transition) 108 | %% In any case, restart right away will erry likely fail again. 109 | %% Hence set MaxR=0 here to cool-down for a configurable N-seconds 110 | %% before supervisor tries to restart it. 111 | {ok, {{one_for_one, 0, 1}, Children}}; 112 | {error, Reason} -> 113 | {error, Reason} 114 | end. 115 | 116 | producers_sup_spec(ClientPid, TopicName, Config0) -> 117 | {Config, DelaySecs} = 118 | take_delay_secs(Config0, topic_restart_delay_seconds, 119 | ?DEFAULT_PARTITIONS_SUP_RESTART_DELAY), 120 | Args = [?MODULE, {?PARTITIONS_SUP, ClientPid, TopicName, Config}], 121 | { _Id = TopicName 122 | , _Start = {brod_supervisor3, start_link, Args} 123 | , _Restart = {permanent, DelaySecs} 124 | , _Shutdown = infinity 125 | , _Type = supervisor 126 | , _Module = [?MODULE] 127 | }. 128 | 129 | producer_spec(ClientPid, Topic, Partition, Config0) -> 130 | {Config, DelaySecs} = 131 | take_delay_secs(Config0, partition_restart_delay_seconds, 132 | ?DEFAULT_PRODUCER_RESTART_DELAY), 133 | Args = [ClientPid, Topic, Partition, Config], 134 | { _Id = Partition 135 | , _Start = {brod_producer, start_link, Args} 136 | , _Restart = {permanent, DelaySecs} 137 | , _Shutdown = 5000 138 | , _Type = worker 139 | , _Module = [brod_producer] 140 | }. 141 | 142 | %%%_* Internal Functions ======================================================= 143 | 144 | -spec take_delay_secs(brod:producer_config(), atom(), integer()) -> 145 | {brod:producer_config(), integer()}. 146 | take_delay_secs(Config, Name, DefaultValue) -> 147 | Secs = 148 | case proplists:get_value(Name, Config) of 149 | N when is_integer(N) andalso N >= ?MIN_SUPERVISOR3_DELAY_SECS -> 150 | N; 151 | _ -> 152 | DefaultValue 153 | end, 154 | {proplists:delete(Name, Config), Secs}. 155 | 156 | %%%_* Emacs ==================================================================== 157 | %%% Local Variables: 158 | %%% allout-layout: t 159 | %%% erlang-indent-level: 2 160 | %%% End: 161 | -------------------------------------------------------------------------------- /src/brod_sup.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2015-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @private brod supervisor 19 | %%% 20 | %%% Hierarchy: 21 | %%% brod_sup (one_for_one) 22 | %%% | 23 | %%% +--client_1 24 | %%% | | 25 | %%% | +-- producers_sup level 1 26 | %%% | | | 27 | %%% | | +-- producers_sup level 2 for topic 1 28 | %%% | | | | 29 | %%% | | | +-- partition_0_worker 30 | %%% | | | | 31 | %%% | | | +-- partition_1_worker 32 | %%% | | | |... 33 | %%% | | | 34 | %%% | | +-- producers_sup level 2 for topic 2 35 | %%% | | | |... 36 | %%% | | |... 37 | %%% | | 38 | %%% | +-- consumers_sup level 1 39 | %%% | | 40 | %%% | +-- consumer_sup level 2 for topic 1 41 | %%% | | | 42 | %%% | | +-- partition_0_worker 43 | %%% | | | 44 | %%% | | +-- partition_1_worker 45 | %%% | | |... 46 | %%% | | 47 | %%% | +-- consumer_sup level 2 for topic 2 48 | %%% | | |... 49 | %%% | |... 50 | %%% | 51 | %%% +-- client_2 52 | %%% | |... 53 | %%% |... 54 | %%% 55 | %%% @end 56 | %%%============================================================================= 57 | 58 | -module(brod_sup). 59 | -behaviour(brod_supervisor3). 60 | 61 | -export([ init/1 62 | , post_init/1 63 | , start_link/0 64 | , start_client/3 65 | , stop_client/1 66 | , find_client/1 67 | ]). 68 | 69 | -include("brod_int.hrl"). 70 | 71 | -define(SUP, ?MODULE). 72 | 73 | %% By default, restart client process after a 10-seconds delay 74 | -define(DEFAULT_CLIENT_RESTART_DELAY, 10). 75 | 76 | %%%_* APIs ===================================================================== 77 | 78 | %% @doc Start root supervisor. 79 | %% 80 | %% To start permanent clients add 'clients' section in sys.config. 81 | %% So far only 'endpoints' config is mandatory, other options are optional. 82 | %% 83 | %% ``` 84 | %% [ 85 | %% %% Permanent clients 86 | %% { clients 87 | %% , [ {client_1 %% unique client ID 88 | %% , [ {endpoints, [{"localhost", 9092}]} 89 | %% , {restart_delay_seconds, 10} 90 | %% , {get_metadata_timeout_seconds, 5} 91 | %% , {reconnect_cool_down_seconds, 1} 92 | %% , {allow_topic_auto_creation, true} 93 | %% , {auto_start_producers, false} 94 | %% , {default_producer_config, []} 95 | %% ] 96 | %% } 97 | %% ] 98 | %% } 99 | %% ]. 100 | %% ''' 101 | %% @end 102 | -spec start_link() -> {ok, pid()}. 103 | start_link() -> 104 | brod_supervisor3:start_link({local, ?SUP}, ?MODULE, clients_sup). 105 | 106 | -spec start_client([brod:endpoint()], 107 | brod:client_id(), 108 | brod:client_config()) -> ok | {error, any()}. 109 | start_client(Endpoints, ClientId, Config) -> 110 | ClientSpec = client_spec(Endpoints, ClientId, Config), 111 | case brod_supervisor3:start_child(?SUP, ClientSpec) of 112 | {ok, _Pid} -> ok; 113 | Error -> Error 114 | end. 115 | 116 | -spec stop_client(brod:client_id()) -> ok | {error, any()}. 117 | stop_client(ClientId) -> 118 | _ = brod_supervisor3:terminate_child(?SUP, ClientId), 119 | brod_supervisor3:delete_child(?SUP, ClientId). 120 | 121 | -spec find_client(brod:client_id()) -> [pid()]. 122 | find_client(Client) -> 123 | brod_supervisor3:find_child(?SUP, Client). 124 | 125 | %% @doc brod_supervisor3 callback 126 | init(clients_sup) -> 127 | %% start and link it to root supervisor 128 | {ok, _} = brod_kafka_apis:start_link(), 129 | Clients = application:get_env(brod, clients, []), 130 | ClientSpecs = 131 | lists:map(fun({ClientId, Args}) -> 132 | is_atom(ClientId) orelse exit({bad_client_id, ClientId}), 133 | client_spec(ClientId, Args) 134 | end, Clients), 135 | %% A client may crash and restart due to network failure 136 | %% e.g. when none of the kafka endpoints are reachable. 137 | %% In this case, restart right away will very likely fail again. 138 | %% Hence set MaxR=0 here to cool-down for a configurable N-seconds 139 | %% before supervisor tries to restart it. 140 | {ok, {{one_for_one, 0, 1}, ClientSpecs}}. 141 | 142 | %% @doc brod_supervisor3 callback. 143 | post_init(_) -> 144 | ignore. 145 | 146 | %%%_* Internal functions ======================================================= 147 | client_spec(ClientId, Config) -> 148 | Endpoints = proplists:get_value(endpoints, Config, []), 149 | client_spec(Endpoints, ClientId, Config). 150 | 151 | client_spec([], ClientId, _Config) -> 152 | Error = lists:flatten( 153 | io_lib:format("No endpoints found in brod client '~p' config", 154 | [ClientId])), 155 | exit(Error); 156 | client_spec(Endpoints, ClientId, Config0) -> 157 | DelaySecs = proplists:get_value(restart_delay_seconds, Config0, 158 | ?DEFAULT_CLIENT_RESTART_DELAY), 159 | Config1 = proplists:delete(restart_delay_seconds, Config0), 160 | Config = brod_utils:init_sasl_opt(Config1), 161 | StartArgs = [Endpoints, ClientId, Config], 162 | { _Id = ClientId 163 | , _Start = {brod_client, start_link, StartArgs} 164 | , _Restart = {permanent, DelaySecs} 165 | , _Shutdown = 5000 166 | , _Type = worker 167 | , _Module = [brod_client] 168 | }. 169 | 170 | %%%_* Emacs ==================================================================== 171 | %%% Local Variables: 172 | %%% allout-layout: t 173 | %%% erlang-indent-level: 2 174 | %%% End: 175 | -------------------------------------------------------------------------------- /src/brod_topic_subscriber_cb_fun.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2020-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @private 19 | %%% A wrapper module that enables backward compatible use of 20 | %%% `brod_topic_subscriber' with a fun instead of a callback module. 21 | %%% @end 22 | %%%============================================================================= 23 | -module(brod_topic_subscriber_cb_fun). 24 | 25 | -behavior(brod_topic_subscriber). 26 | 27 | -export([init/2, handle_message/3]). 28 | 29 | -include("brod_int.hrl"). 30 | 31 | %% @private This is needed to implement backward-consistent `cb_fun' 32 | %% interface. 33 | init(_Topic, #cbm_init_data{ committed_offsets = CommittedOffsets 34 | , cb_fun = CbFun 35 | , cb_data = CbState 36 | }) -> 37 | {ok, CommittedOffsets, {CbFun, CbState}}. 38 | 39 | handle_message(Partition, Msg, {CbFun, CbState0}) -> 40 | case CbFun(Partition, Msg, CbState0) of 41 | {ok, ack, CbState} -> 42 | {ok, ack, {CbFun, CbState}}; 43 | {ok, CbState} -> 44 | {ok, {CbFun, CbState}}; 45 | Err -> 46 | Err 47 | end. 48 | -------------------------------------------------------------------------------- /src/brod_transaction_processor.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2023 @axs-mvd and contributors 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @doc `brod_transaction_processor' allows the execution of a function in the context 18 | %% of a transaction. It abstracts the usage of a group subscriber reading and writing 19 | %% using a transaction in each fetch cycle. 20 | %% For example, the following snippets are equivalent 21 | %% 22 | %%------------------------------------------------- 23 | %% 24 | %%function_that_does_something(Messages, ...) -> 25 | %% write_some_messages_into_some_topic(Messages, ...), 26 | %% write_some_other_messages_into_yet_another_topic(Messages, ...). 27 | %% 28 | %%handle_message(Topic, Partition, Messages, State) -> 29 | %% {ok, Tx} = brod:transaction(...) % opens a transaction 30 | %% function_that_does_something(Messages, ...) % adds the writes to the transaction 31 | %% ok = brod:txn_add_offsets(...) % add offsets to the transsaction 32 | %% ok = btrod:commit(Tx) % commit 33 | %% {ok, ack_no_commit, State} 34 | %% 35 | %%------------------------------------------------- 36 | %% 37 | %%brod_transaction_processor:do( 38 | %% fun(Transaction, Messages) -> 39 | %% write_some_messages_into_some_topic(Messages, ...), 40 | %% write_some_other_messages_into_yet_another_topic(Messages, ...) 41 | %% end, 42 | %% ...) 43 | %% 44 | %%------------------------------------------------- 45 | %% 46 | -module(brod_transaction_processor). 47 | 48 | -include("brod.hrl"). 49 | 50 | %public API 51 | -export([do/3]). 52 | 53 | % group subscriber callbacks 54 | -export([ init/2 55 | , handle_message/4 56 | , get_committed_offsets/3]). 57 | 58 | % type exports 59 | -export_type([ do_options/0 60 | , process_function/0]). 61 | 62 | %%============================================================================== 63 | %% Type declarations 64 | %%============================================================================== 65 | 66 | -type client() :: client_id() | pid(). 67 | -type client_id() :: atom(). 68 | -type do_options() :: #{ group_config => proplists:proplist() 69 | , consumer_config => proplists:proplist() 70 | , transaction_config => proplists:proplist() 71 | , group_id => binary() 72 | , topics => [binary()]}. 73 | -type message_set() :: #kafka_message_set{}. 74 | -type transaction() :: brod_transaction:transaction(). 75 | 76 | 77 | -type process_function() :: fun((transaction(), message_set()) -> ok 78 | | {error, any()}). 79 | 80 | %% @doc executes the ProcessFunction within the context of a transaction. 81 | %% Options is a map that can include 82 | %% `group_config' as the configuration for the group suscriber. 83 | %% `consumer_config' as the configuration for the consumer suscriber. 84 | %% `transaction_config' transacction config. 85 | %% `group_id' as the subscriber group id. 86 | %% `topics' topics to fetch from. 87 | %% 88 | %% FizzBuzz sample: 89 | %% 90 | %% fizz_buzz(N) when (N rem 15) == 0 -> "FizzBuzz" 91 | %% fizz_buzz(N) when (N rem 3) == 0 -> "Fizz" 92 | %% fizz_buzz(N) when (N rem 5) == 0 -> "Buzz"; 93 | %% fizz_buzz(N) -> N end. 94 | %% 95 | %% brod_transaction_processor:do( 96 | %% fun(Transaction, #kafka_message_set{ topic = _Topic 97 | %% , partition = Partition 98 | %% , messages = Messages} = _MessageSet) -> 99 | %% FizzBuzzed = 100 | %% lists:map(fun(#kafka_message{ key = Key 101 | %% , value = Value}) -> 102 | %% #{ key => Key 103 | %% , value => fizz_buzz(Value)} 104 | %% end, Messages), 105 | %% 106 | %% brod:txn_produce(Transaction, 107 | %% ?OUTPUT_TOPIC, 108 | %% Partition, 109 | %% FizzBuzzed), 110 | %% 111 | %% ok 112 | %% end, Client, #{ topics => [?INPUT_TOPIC] 113 | %% , group_id => ?PROCESSOR_GROUP_ID}). 114 | %% 115 | -spec do(process_function(), client(), do_options()) -> {ok, pid()} 116 | | {error, any()}. 117 | do(ProcessFun, Client, Opts) -> 118 | 119 | Defaults = #{ group_config => [{offset_commit_policy, consumer_managed}] 120 | %% note that if you change the group_config you must include 121 | %% the above option, as it enables our fetcher to manage 122 | %% the offsets itself 123 | , consumer_config => []}, 124 | 125 | #{ group_id := GroupId 126 | , topics := Topics 127 | , group_config := GroupConfig 128 | , consumer_config := ConsumerConfig} = maps:merge(Defaults, Opts), 129 | 130 | InitState = #{client => Client, 131 | process_function => ProcessFun}, 132 | 133 | brod:start_link_group_subscriber(Client, 134 | GroupId, 135 | Topics, 136 | GroupConfig, 137 | ConsumerConfig, 138 | message_set, 139 | ?MODULE, 140 | InitState). 141 | 142 | %%============================================================================== 143 | %% group subscriber callbacks 144 | %%============================================================================== 145 | init(GroupId, #{ client := Client 146 | , process_function := ProcessFun 147 | } = Opts) -> 148 | #{ tx_id := TxId 149 | , transaction_config := Config} = 150 | maps:merge(#{ tx_id => make_transactional_id() 151 | , transaction_config => [] 152 | }, Opts), 153 | 154 | {ok, #{ client => Client 155 | , transaction_config => Config 156 | , tx_id => TxId 157 | , process_function => ProcessFun 158 | , group_id => GroupId 159 | }}. 160 | 161 | handle_message(Topic, 162 | Partition, 163 | #kafka_message_set{ topic = Topic 164 | , partition = Partition 165 | , messages = _Messages 166 | } = MessageSet, 167 | #{ process_function := ProcessFun 168 | , client := Client 169 | , tx_id := TxId 170 | , transaction_config := TransactionConfig 171 | , group_id := GroupId 172 | } = State) -> 173 | 174 | {ok, Tx} = brod:transaction(Client, TxId, TransactionConfig), 175 | ok = ProcessFun(Tx, MessageSet), 176 | ok = brod:txn_add_offsets(Tx, GroupId, offsets_to_commit(MessageSet)), 177 | ok = brod:commit(Tx), 178 | {ok, ack_no_commit, State}. 179 | 180 | get_committed_offsets(GroupId, TPs, #{client := Client} = State) -> 181 | {ok, Offsets} = brod:fetch_committed_offsets(Client, GroupId), 182 | TPOs = 183 | lists:flatmap( fun(#{name := Topic, partitions := Partitions}) -> 184 | [{{Topic, Partition}, COffset} || 185 | #{ partition_index := Partition 186 | , committed_offset := COffset 187 | } <- Partitions, 188 | lists:member({Topic, Partition}, TPs)] 189 | end 190 | , Offsets 191 | ), 192 | {ok, TPOs, State}. 193 | 194 | %%============================================================================== 195 | %% Internal functions 196 | %%============================================================================== 197 | 198 | make_transactional_id() -> 199 | iolist_to_binary([atom_to_list(?MODULE), "-txn-", 200 | base64:encode(crypto:strong_rand_bytes(8))]). 201 | 202 | 203 | offsets_to_commit(#kafka_message_set{ topic = Topic 204 | , partition = Partition 205 | , messages = Messages 206 | }) -> 207 | #kafka_message{offset = Offset} = lists:last(Messages), 208 | #{{Topic, Partition} => Offset}. 209 | 210 | %%%_* Emacs ==================================================================== 211 | %%% Local Variables: 212 | %%% allout-layout: t 213 | %%% erlang-indent-level: 2 214 | %%% End: 215 | -------------------------------------------------------------------------------- /sys.config.example: -------------------------------------------------------------------------------- 1 | %% -*- mode: Erlang; fill-column: 80; -*- 2 | [{brod, 3 | [ { clients 4 | %% A client manages a set of tcp connections for all 5 | %% producers and consumers under its hierarchy. 6 | %% see brod_sup.erl for more details about supervision tree 7 | , [ { c1 8 | , [ {endpoints, [{"localhost", 9092}]} 9 | , {restart_delay_seconds, 10} 10 | , {auto_start_producers, true} 11 | , {default_producer_config, 12 | [ {topic_restart_delay_seconds, 10} 13 | , {partition_restart_delay_seconds, 2} 14 | , {required_acks, leader_only} 15 | ] 16 | } 17 | ] 18 | } 19 | , { c2 20 | , [ {endpoints, [{"localhost", 9093}]} 21 | , {restart_delay_seconds, 10} 22 | , {auto_start_producers, true} 23 | , {default_producer_config, 24 | [ {topic_restart_delay_seconds, 10} 25 | , {partition_restart_delay_seconds, 2} 26 | , {required_acks, all_isr} 27 | ] 28 | } 29 | , {ssl, 30 | [ {certfile, "client.crt"} 31 | , {keyfile, "client.key"} 32 | , {cacertfile, "ca.crt"} 33 | ]} 34 | ] 35 | } 36 | ] 37 | } 38 | ] 39 | }]. 40 | -------------------------------------------------------------------------------- /test/brod_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2019-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @private 18 | -module(brod_SUITE). 19 | 20 | %% Test framework 21 | -export([ init_per_suite/1 22 | , end_per_suite/1 23 | , all/0 24 | , suite/0 25 | ]). 26 | 27 | %% Test cases 28 | -export([ t_create_delete_topics/1 29 | , t_delete_topics_not_found/1 30 | ]). 31 | 32 | -include_lib("common_test/include/ct.hrl"). 33 | -include_lib("stdlib/include/assert.hrl"). 34 | 35 | -define(HOSTS, [{"localhost", 9092}]). 36 | -define(TOPIC, list_to_binary(atom_to_list(?MODULE))). 37 | -define(TIMEOUT, 280000). 38 | 39 | %%%_* ct callbacks ============================================================= 40 | 41 | suite() -> [{timetrap, {minutes, 5}}]. 42 | 43 | init_per_suite(Config) -> 44 | case kafka_test_helper:kafka_version() of 45 | {0, 9} -> 46 | {skip, "no_topic_manaegment_apis"}; 47 | _ -> 48 | {ok, _} = application:ensure_all_started(brod), 49 | Config 50 | end. 51 | 52 | end_per_suite(_Config) -> 53 | ok. 54 | 55 | all() -> [F || {F, _A} <- module_info(exports), 56 | case atom_to_list(F) of 57 | "t_" ++ _ -> true; 58 | _ -> false 59 | end]. 60 | 61 | %%%_* Test functions =========================================================== 62 | 63 | t_create_delete_topics(Config) when is_list(Config) -> 64 | Topic = iolist_to_binary(["test-topic-", integer_to_list(erlang:system_time())]), 65 | TopicConfig = [ 66 | #{ 67 | configs => [], 68 | num_partitions => 1, 69 | assignments => [], 70 | replication_factor => 1, 71 | name => Topic 72 | } 73 | ], 74 | try 75 | ?assertEqual(ok, 76 | brod:create_topics(?HOSTS, TopicConfig, #{timeout => ?TIMEOUT}, 77 | #{connect_timeout => ?TIMEOUT})) 78 | after 79 | ?assertEqual(ok, brod:delete_topics(?HOSTS, [Topic], ?TIMEOUT, 80 | #{connect_timeout => ?TIMEOUT})) 81 | end. 82 | 83 | t_delete_topics_not_found(Config) when is_list(Config) -> 84 | ?assertEqual({error, unknown_topic_or_partition}, 85 | brod:delete_topics(?HOSTS, [<<"no-such-topic">>], ?TIMEOUT, 86 | #{connect_timeout => ?TIMEOUT})). 87 | 88 | %%%_* Emacs ==================================================================== 89 | %%% Local Variables: 90 | %%% allout-layout: t 91 | %%% erlang-indent-level: 2 92 | %%% End: 93 | -------------------------------------------------------------------------------- /test/brod_cg_commits_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2017-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | -module(brod_cg_commits_SUITE). 18 | 19 | %% Test framework 20 | -export([ init_per_suite/1 21 | , end_per_suite/1 22 | , common_init_per_testcase/2 23 | , common_end_per_testcase/2 24 | , suite/0 25 | ]). 26 | 27 | %% Test cases 28 | -export([ t_set_then_reset/1 29 | ]). 30 | 31 | -include("brod_test_setup.hrl"). 32 | -include_lib("snabbkaffe/include/ct_boilerplate.hrl"). 33 | -include("brod.hrl"). 34 | 35 | -define(CLIENT_ID, ?MODULE). 36 | 37 | -define(GROUP_ID, list_to_binary(atom_to_list(?MODULE))). 38 | 39 | 40 | %%%_* ct callbacks ============================================================= 41 | 42 | suite() -> [{timetrap, {seconds, 60}}]. 43 | 44 | init_per_suite(Config) -> 45 | kafka_test_helper:init_per_suite(Config). 46 | 47 | end_per_suite(_Config) -> ok. 48 | 49 | common_init_per_testcase(Case, Config0) -> 50 | Config = kafka_test_helper:common_init_per_testcase(?MODULE, Case, Config0), 51 | ok = brod:start_client(bootstrap_hosts(), ?CLIENT_ID, client_config()), 52 | Config. 53 | 54 | common_end_per_testcase(Case, Config) -> 55 | brod:stop_client(?CLIENT_ID), 56 | kafka_test_helper:common_end_per_testcase(Case, Config). 57 | 58 | %%%_* Test cases =============================================================== 59 | 60 | t_set_then_reset(topics) -> 61 | %% Create 1 topic with 3 partitions: 62 | [{?topic(1), 3}]; 63 | t_set_then_reset(Config) when is_list(Config) -> 64 | Topic = ?topic(1), 65 | Partitions = [0, 1, 2], 66 | Offsets0 = [{0, 0}, {1, 0}, {2, 0}], 67 | ok = do_commit(Topic, Offsets0), 68 | {ok, Rsp0} = 69 | brod_utils:fetch_committed_offsets( bootstrap_hosts() 70 | , client_config() 71 | , ?GROUP_ID 72 | , [{Topic, Partitions}] 73 | ), 74 | ok = assert_offsets([{Topic, Offsets0}], Rsp0), 75 | Offsets1 = [{0, 1}, {1, 1}, {2, 1}], 76 | ok = do_commit(Topic, Offsets1), 77 | {ok, Rsp1} = 78 | brod_utils:fetch_committed_offsets( bootstrap_hosts() 79 | , client_config() 80 | , ?GROUP_ID 81 | , [{Topic, Partitions}] 82 | ), 83 | ok = assert_offsets([{Topic, Offsets1}], Rsp1), 84 | ok. 85 | 86 | %% assuming ExpectedOffsets are sorted 87 | assert_offsets(ExpectedOffsets, Rsp) -> 88 | RetrievedOffsets = transform_rsp(Rsp, []), 89 | ?assertEqual(ExpectedOffsets, RetrievedOffsets). 90 | 91 | transform_rsp([], Acc) -> 92 | lists:keysort(1, Acc); 93 | transform_rsp([Struct | Rest], Acc) -> 94 | Topic = kpro:find(name, Struct), 95 | PartitionRsp = kpro:find(partitions, Struct), 96 | Partitions = transform_rsp_partitions(PartitionRsp, []), 97 | transform_rsp(Rest, [{Topic, Partitions} | Acc]). 98 | 99 | transform_rsp_partitions([], Acc) -> 100 | lists:keysort(1, Acc); 101 | transform_rsp_partitions([Struct | Rest], Acc) -> 102 | Partition = kpro:find(partition_index, Struct), 103 | Offset = kpro:find(committed_offset, Struct), 104 | transform_rsp_partitions(Rest, [{Partition, Offset} | Acc]). 105 | 106 | do_commit(Topic, Offsets) -> 107 | Input = [{id, ?GROUP_ID}, 108 | {topic, Topic}, 109 | {offsets, Offsets}], 110 | {ok, Pid} = brod_cg_commits:start_link(?CLIENT_ID, Input), 111 | ok = brod_cg_commits:sync(Pid), 112 | ok = brod_cg_commits:stop(Pid), 113 | ok. 114 | 115 | %%%_* Emacs ==================================================================== 116 | %%% Local Variables: 117 | %%% allout-layout: t 118 | %%% erlang-indent-level: 2 119 | %%% End: 120 | -------------------------------------------------------------------------------- /test/brod_compression_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2016-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @private 18 | -module(brod_compression_SUITE). 19 | 20 | %% Test framework 21 | -export([ init_per_suite/1 22 | , end_per_suite/1 23 | , init_per_testcase/2 24 | , end_per_testcase/2 25 | , all/0 26 | , suite/0 27 | ]). 28 | 29 | %% Test cases 30 | -export([ t_produce_gzip/1 31 | , t_produce_snappy/1 32 | , t_produce_lz4/1 33 | , t_produce_zstd/1 34 | , t_produce_compressed_batch_consume_from_middle_gzip/1 35 | , t_produce_compressed_batch_consume_from_middle_snappy/1 36 | , t_produce_compressed_batch_consume_from_middle_lz4/1 37 | , t_produce_compressed_batch_consume_from_middle_zstd/1 38 | ]). 39 | 40 | -include_lib("common_test/include/ct.hrl"). 41 | -include_lib("eunit/include/eunit.hrl"). 42 | -include("brod_int.hrl"). 43 | 44 | -define(HOSTS, [{"localhost", 9092}]). 45 | -define(TOPIC, list_to_binary(atom_to_list(?MODULE))). 46 | 47 | -define(config(Name), proplists:get_value(Name, Config)). 48 | 49 | %%%_* ct callbacks ============================================================= 50 | 51 | suite() -> [{timetrap, {seconds, 30}}]. 52 | 53 | init_per_suite(Config) -> 54 | {ok, _} = application:ensure_all_started(brod), 55 | Config. 56 | 57 | end_per_suite(_Config) -> ok. 58 | 59 | init_per_testcase(Case, Config) -> 60 | Client = Case, 61 | brod:stop_client(Client), 62 | ok = start_client(?HOSTS, Client), 63 | NewConfig = [{client, Client} | Config], 64 | try 65 | ?MODULE:Case({init, NewConfig}) 66 | catch 67 | error : function_clause -> 68 | NewConfig 69 | end. 70 | 71 | end_per_testcase(Case, Config) -> 72 | try 73 | ?MODULE:Case({'end', Config}) 74 | catch 75 | error : function_clause -> 76 | ok 77 | end, 78 | brod:stop_client(?config(client)), 79 | Config. 80 | 81 | all() -> [F || {F, _A} <- module_info(exports), 82 | is_test_case(F) andalso kafka_supports_compression_in_test(F)]. 83 | 84 | is_test_case(F) -> 85 | case atom_to_list(F) of 86 | "t_" ++ _ -> true; 87 | _ -> false 88 | end. 89 | 90 | kafka_supports_compression_in_test(F) -> 91 | CompressionMinVsns = #{ 92 | "gzip" => {0, 0}, 93 | "snappy" => {0, 8}, 94 | "lz4" => {0, 10}, 95 | "zstd" => {2, 1} 96 | }, 97 | KafkaVsn = kafka_version(), 98 | lists:all( 99 | fun({Compression, MinVsn}) -> 100 | IsTestContainCompression = string:str(atom_to_list(F), Compression) > 0, 101 | not IsTestContainCompression orelse KafkaVsn >= MinVsn 102 | end, 103 | maps:to_list(CompressionMinVsns) 104 | ). 105 | 106 | %%%_* Test functions =========================================================== 107 | 108 | t_produce_gzip(Config) -> 109 | run(fun produce/1, gzip, Config). 110 | 111 | t_produce_snappy(Config) -> 112 | run(fun produce/1, snappy, Config). 113 | 114 | t_produce_lz4(Config) -> 115 | run(fun produce/1, lz4, Config). 116 | 117 | t_produce_zstd(Config) -> 118 | run(fun produce/1, zstd, Config). 119 | 120 | t_produce_compressed_batch_consume_from_middle_gzip(Config) -> 121 | run(fun produce_compressed_batch_consume_from_middle/1, gzip, Config). 122 | 123 | t_produce_compressed_batch_consume_from_middle_snappy(Config) -> 124 | run(fun produce_compressed_batch_consume_from_middle/1, snappy, Config). 125 | 126 | t_produce_compressed_batch_consume_from_middle_lz4(Config) -> 127 | run(fun produce_compressed_batch_consume_from_middle/1, lz4, Config). 128 | 129 | t_produce_compressed_batch_consume_from_middle_zstd(Config) -> 130 | run(fun produce_compressed_batch_consume_from_middle/1, zstd, Config). 131 | 132 | %%%_* Help functions =========================================================== 133 | 134 | run(Case, Compression, Config) -> 135 | NewConfig = add_config({compression, Compression}, Config), 136 | Case(NewConfig). 137 | 138 | add_config(Config, {Op, Configs}) -> {Op, add_config(Config, Configs)}; 139 | add_config(Config, Configs) when is_list(Configs) -> [Config | Configs]. 140 | 141 | produce(Config) when is_list(Config) -> 142 | Topic = ?TOPIC, 143 | Client = ?config(client), 144 | ProducerConfig = [ {min_compression_batch_size, 0} 145 | , {compression, ?config(compression)} 146 | ], 147 | ok = brod:start_producer(Client, Topic, ProducerConfig), 148 | {K, V} = make_unique_kv(), 149 | {ok, Offset} = brod:produce_sync_offset(Client, Topic, 0, K, V), 150 | ok = brod:start_consumer(Client, Topic, [{begin_offset, Offset}]), 151 | {ok, ConsumerPid} = brod:subscribe(Client, self(), ?TOPIC, 0, []), 152 | receive 153 | {ConsumerPid, MsgSet} -> 154 | #kafka_message_set{ messages = [Message] 155 | , partition = 0 156 | } = MsgSet, 157 | ?assertEqual(K, Message#kafka_message.key), 158 | ?assertEqual(V, Message#kafka_message.value); 159 | Other -> 160 | erlang:error({unexpected, Other}) 161 | after 10000 -> 162 | ct:fail(timeout) 163 | end, 164 | ok. 165 | 166 | produce_compressed_batch_consume_from_middle(Config) when is_list(Config) -> 167 | Topic = ?TOPIC, 168 | Client = ?config(client), 169 | BatchCount = 100, 170 | %% get the latest offset before producing the batch 171 | {ok, Offset0} = brod:resolve_offset(?HOSTS, Topic, 0, latest, 172 | client_config()), 173 | ct:pal("offset before batch: ~p", [Offset0]), 174 | ProducerConfig = [ {min_compression_batch_size, 0} 175 | , {compression, ?config(compression)} 176 | ], 177 | ok = brod:start_producer(Client, Topic, ProducerConfig), 178 | KvList = [make_unique_kv() || _ <- lists:seq(1, BatchCount)], 179 | ok = brod:produce_sync(Client, Topic, 0, <<>>, KvList), 180 | %% Get the latest offset after the batch is produced 181 | {ok, Offset1} = brod:resolve_offset(?HOSTS, Topic, 0, latest, 182 | client_config()), 183 | ct:pal("offset after batch: ~p", [Offset1]), 184 | ?assertEqual(Offset1, Offset0 + BatchCount), 185 | HalfBatch = BatchCount div 2, 186 | BatchMiddle = Offset0 + HalfBatch, 187 | %% kafka should decompress the compressed message set, 188 | %% and assign offsets to each and every messages in the batch, 189 | %% compress it back, then write to disk. fetching from an offset in 190 | %% the middle of a compressed batch will result in a full 191 | %% delivery of the compressed batch, but brod_consumer should 192 | %% filter out the ones before the requested offset. 193 | ok = brod:start_consumer(Client, Topic, [{begin_offset, BatchMiddle}]), 194 | {ok, _ConsumerPid} = brod:subscribe(Client, self(), ?TOPIC, 0, []), 195 | Messages = receive_messages(BatchCount - HalfBatch, []), 196 | Expected0 = lists:zip(lists:seq(Offset0, Offset1-1), KvList), 197 | Expected = lists:sublist(Expected0, HalfBatch+1, BatchCount-HalfBatch), 198 | lists:foreach( 199 | fun({{Offset, {K, V}}, Message}) -> 200 | ?assertMatch(#kafka_message{ offset = Offset 201 | , key = K 202 | , value = V 203 | }, Message) 204 | end, lists:zip(Expected, Messages)). 205 | 206 | receive_messages(0, Acc) -> Acc; 207 | receive_messages(Count, Acc) when Count > 0 -> 208 | receive 209 | {_ConsumerPid, MsgSet} -> 210 | #kafka_message_set{messages = Messages} = MsgSet, 211 | receive_messages(Count - length(Messages), Acc ++ Messages); 212 | Other -> 213 | erlang:error({unexpected, Other}) 214 | after 10000 -> 215 | erlang:error(timeout) 216 | end. 217 | 218 | %% os:timestamp should be unique enough for testing 219 | make_unique_kv() -> 220 | { iolist_to_binary(["key-", make_ts_str()]) 221 | , iolist_to_binary(["val-", make_ts_str()]) 222 | }. 223 | 224 | make_ts_str() -> brod_utils:os_time_utc_str(). 225 | 226 | start_client(Hosts, ClientId) -> 227 | Config = client_config(), 228 | brod:start_client(Hosts, ClientId, Config). 229 | 230 | client_config() -> 231 | kafka_test_helper:client_config(). 232 | 233 | kafka_version() -> kafka_test_helper:kafka_version(). 234 | 235 | 236 | %%%_* Emacs ==================================================================== 237 | %%% Local Variables: 238 | %%% allout-layout: t 239 | %%% erlang-indent-level: 2 240 | %%% End: 241 | -------------------------------------------------------------------------------- /test/brod_demo_cg_collector.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2016-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @doc 19 | %%% This is a topic subscriber example 20 | %%% The subscriber subscribes to all partitions of the consumer offset topic 21 | %%% (by default __consumer_offsets), decode the messages and put the values 22 | %%% to an ETS table. 23 | %%% see kpro_consumer_group.erl for details about data schema 24 | %%% 25 | %%% This can be useful to build your own consumer lagging monitoring or 26 | %%% dashboarding tools. 27 | %%% @end 28 | %%%============================================================================= 29 | 30 | -module(brod_demo_cg_collector). 31 | 32 | -behaviour(brod_topic_subscriber). 33 | 34 | -include("brod.hrl"). 35 | 36 | -define(CLIENT, ?MODULE). 37 | -define(ETS, consumer_offsets). 38 | 39 | -export([ start/0 40 | , start/1 41 | , start/2 42 | , start/3 43 | , start/4 44 | ]). 45 | 46 | %% brod_topic_subscriber callback 47 | -export([ init/2 48 | , handle_message/3 49 | ]). 50 | 51 | -record(state, {ets}). 52 | 53 | start() -> 54 | start([{"localhost", 9092}]). 55 | start(BootstrapHosts) -> 56 | start(BootstrapHosts, ?CLIENT). 57 | start(BootstrapHosts, ClientId) -> 58 | start(BootstrapHosts, ClientId, <<"__consumer_offsets">>). 59 | start(BootstrapHosts, ClientId, CgTopic) -> 60 | start(BootstrapHosts, ClientId, CgTopic, ?ETS). 61 | 62 | start(BootstrapHosts, ClientId, CgTopic, EtsName) -> 63 | ClientConfig = [], 64 | {ok, _} = application:ensure_all_started(brod), 65 | ok = brod:start_client(BootstrapHosts, ClientId, ClientConfig), 66 | brod_topic_subscriber:start_link(ClientId, CgTopic, _Partitions = all, 67 | [{begin_offset, earliest}], 68 | ?MODULE, EtsName). 69 | 70 | init(_Topic, EtsName) -> 71 | EtsName = ets:new(EtsName, [named_table, ordered_set, public]), 72 | {ok, [], #state{ets = EtsName}}. 73 | 74 | handle_message(_Partition, #kafka_message{key = KeyBin, value = ValueBin}, 75 | #state{ets = Ets} = State) -> 76 | {Tag, Key, Value} = kpro_consumer_group:decode(KeyBin, ValueBin), 77 | Kf = fun(K) -> {K, V} = lists:keyfind(K, 1, Key), V end, 78 | case Tag of 79 | offset -> update_ets(Ets, {Kf(group_id), Kf(name), Kf(partition_index)}, Value); 80 | group -> update_ets(Ets, Kf(group_id), Value) 81 | end, 82 | {ok, ack, State}. 83 | 84 | %%%_* Internal Functions ======================================================= 85 | 86 | update_ets(Ets, Key, _Value = []) -> ets:delete(Ets, Key); 87 | update_ets(Ets, Key, Value) -> ets:insert(Ets, {Key, Value}). 88 | 89 | %%%_* Emacs ==================================================================== 90 | %%% Local Variables: 91 | %%% allout-layout: t 92 | %%% erlang-indent-level: 2 93 | %%% End: 94 | -------------------------------------------------------------------------------- /test/brod_demo_group_subscriber_koc.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2016-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @doc 19 | %%% This is a consumer group subscriber example 20 | %%% This is called 'koc' as in kafka-offset-commit, 21 | %%% it demos a all-configs-by-default minimal implementation of a 22 | %%% consumer group subscriber which commits offsets to kafka. 23 | %%% See bootstrap/0 for more details about all prerequisite. 24 | %%% @end 25 | %%%============================================================================= 26 | 27 | -module(brod_demo_group_subscriber_koc). 28 | 29 | -behaviour(brod_group_subscriber). 30 | 31 | -export([ bootstrap/0 32 | , bootstrap/1 33 | , bootstrap/2 34 | ]). 35 | 36 | %% behaviour callbacks 37 | -export([ init/2 38 | , handle_message/4 39 | ]). 40 | 41 | -export([ message_handler_loop/3 ]). 42 | 43 | -include("brod.hrl"). 44 | -include("brod_int.hrl"). 45 | 46 | -define(PRODUCE_DELAY_SECONDS, 5). 47 | 48 | -record(callback_state, 49 | { handlers = [] :: [{{brod:topic(), brod:partition()}, pid()}] 50 | , message_type = message :: message | message_set 51 | }). 52 | 53 | %% @doc This function bootstraps everything to demo group subscribers. 54 | %% Prerequisites: 55 | %% - bootstrap docker host at {"localhost", 9092} 56 | %% - kafka topic named <<"brod-demo-group-subscriber-koc">> 57 | %% Processes to spawn: 58 | %% - A brod client 59 | %% - A producer which produces sequence numbers to each partition 60 | %% - X group subscribers, X is the number of partitions 61 | %% 62 | %% * consumed sequence numbers are printed to console 63 | %% * consumed offsets are committed to kafka (using v2 commit requests) 64 | %% 65 | %% NOTE: Here we spawn two clients in one Erlang node just to demo how 66 | %% group subscribers work. 67 | %% Please keep in mind that a group subscriber requires one dedicated 68 | %% TCP connection for group leader election, heartbeats and state syncing etc. 69 | %% It is a good practice to limit the number of group members per Erlang 70 | %% node for each group. One group member per Erlang node should be enough 71 | %% for most of the use cases. 72 | %% A group subscriber may receive messages from all topic-partitions assigned, 73 | %% in its handle_message callback function, it may process the message 74 | %% synchronously, or dispatch it to any number of worker processes for 75 | %% concurrent processing, acks can be sent from the worker processes 76 | %% by calling brod_group_subscriber:ack/4 77 | -spec bootstrap() -> ok. 78 | bootstrap() -> 79 | bootstrap(?PRODUCE_DELAY_SECONDS). 80 | 81 | bootstrap(DelaySeconds) -> 82 | bootstrap(DelaySeconds, message). 83 | 84 | bootstrap(DelaySeconds, MessageType) -> 85 | BootstrapHosts = [{"localhost", 9092}], 86 | Topic = <<"brod-demo-group-subscriber-koc">>, 87 | {ok, _} = application:ensure_all_started(brod), 88 | 89 | %% A group ID is to be shared between the members (which often run in 90 | %% different Erlang nodes or even hosts). 91 | GroupId = <<"brod-demo-group-subscriber-koc-consumer-group">>, 92 | %% Different members may subscribe to identical or different set of topics. 93 | %% In the assignments, a member receives only the partitions from the 94 | %% subscribed topic set. 95 | TopicSet = [Topic], 96 | %% In this demo, we spawn two members in the same Erlang node. 97 | MemberClients = [ 'brod-demo-group-subscriber-koc-client-1' 98 | , 'brod-demo-group-subscriber-koc-client-2' 99 | ], 100 | ok = bootstrap_subscribers(MemberClients, BootstrapHosts, GroupId, TopicSet, 101 | MessageType), 102 | 103 | %% start one producer process for each partition to feed sequence numbers 104 | %% to kafka, then consumed by the group subscribers. 105 | ProducerClientId = ?MODULE, 106 | ok = brod:start_client(BootstrapHosts, ProducerClientId, client_config()), 107 | ok = brod:start_producer(ProducerClientId, Topic, _ProducerConfig = []), 108 | {ok, PartitionCount} = brod:get_partitions_count(ProducerClientId, Topic), 109 | ok = spawn_producers(ProducerClientId, Topic, DelaySeconds, PartitionCount), 110 | ok. 111 | 112 | %% @doc Initialize nothing in our case. 113 | init(_GroupId, _CallbackInitArg = {ClientId, Topics, MessageType}) -> 114 | %% For demo, spawn one message handler per topic-partition. 115 | %% Depending on the use case: 116 | %% It might be enough to handle the message locally in the subscriber 117 | %% pid without dispatching to handlers. (e.g. brod_demo_group_subscriber_loc) 118 | %% Or there could be a pool of handlers if the messages can be processed 119 | %% in arbitrary order. 120 | Handlers = spawn_message_handlers(ClientId, Topics), 121 | {ok, #callback_state{handlers = Handlers, message_type = MessageType}}. 122 | 123 | %% @doc Handle one message or a message-set. 124 | handle_message(Topic, Partition, 125 | #kafka_message{} = Message, 126 | #callback_state{ handlers = Handlers 127 | , message_type = message 128 | } = State) -> 129 | process_message(Topic, Partition, Handlers, Message), 130 | %% or return {ok, ack, State} in case the message can be handled 131 | %% synchronously here without dispatching to a worker 132 | {ok, State}; 133 | handle_message(Topic, Partition, 134 | #kafka_message_set{messages = Messages} = _MessageSet, 135 | #callback_state{ handlers = Handlers 136 | , message_type = message_set 137 | } = State) -> 138 | [process_message(Topic, Partition, Handlers, Message) || Message <- Messages], 139 | {ok, State}. 140 | 141 | %%%_* Internal Functions ======================================================= 142 | 143 | process_message(Topic, Partition, Handlers, Message) -> 144 | %% send to a worker process 145 | {_, Pid} = lists:keyfind({Topic, Partition}, 1, Handlers), 146 | Pid ! Message. 147 | 148 | bootstrap_subscribers([], _BootstrapHosts, _GroupId, _Topics, _MsgType) -> ok; 149 | bootstrap_subscribers([ClientId | Rest], BootstrapHosts, GroupId, 150 | Topics, MessageType) -> 151 | ok = brod:start_client(BootstrapHosts, ClientId, client_config()), 152 | %% commit offsets to kafka every 5 seconds 153 | GroupConfig = [{offset_commit_policy, commit_to_kafka_v2} 154 | ,{offset_commit_interval_seconds, 1} 155 | ], 156 | {ok, _Subscriber} = 157 | brod:start_link_group_subscriber( 158 | ClientId, GroupId, Topics, GroupConfig, 159 | _ConsumerConfig = [{begin_offset, earliest}], MessageType, 160 | _CallbackModule = ?MODULE, 161 | _CallbackInitArg = {ClientId, Topics, MessageType}), 162 | bootstrap_subscribers(Rest, BootstrapHosts, GroupId, Topics, MessageType). 163 | 164 | spawn_producers(ClientId, Topic, DelaySeconds, P) when is_integer(P) -> 165 | Partitions = lists:seq(0, P-1), 166 | spawn_producers(ClientId, Topic, DelaySeconds, Partitions); 167 | spawn_producers(ClientId, Topic, DelaySeconds, [Partition | Partitions]) -> 168 | erlang:spawn_link( 169 | fun() -> 170 | producer_loop(ClientId, Topic, Partition, DelaySeconds, 0) 171 | end), 172 | spawn_producers(ClientId, Topic, DelaySeconds, Partitions); 173 | spawn_producers(_ClientId, _Topic, _DelaySeconds, []) -> ok. 174 | 175 | producer_loop(ClientId, Topic, Partition, DelaySeconds, Seqno) -> 176 | KafkaValue = iolist_to_binary(integer_to_list(Seqno)), 177 | ok = brod:produce_sync(ClientId, Topic, Partition, _Key = <<>>, KafkaValue), 178 | timer:sleep(timer:seconds(DelaySeconds)), 179 | producer_loop(ClientId, Topic, Partition, DelaySeconds, Seqno+1). 180 | 181 | %% Spawn one message handler per partition. Some of them may sit 182 | %% idle if the partition is assigned to another group member. 183 | %% Perhaps hibernate if idle for certain minutes. 184 | %% Or even spawn dynamically in `handle_message` callback and 185 | %% `exit(normal)` when idle for long. 186 | -spec spawn_message_handlers(brod:client_id(), [brod:topic()]) -> 187 | [{{brod:topic(), brod:partition()}, pid()}]. 188 | spawn_message_handlers(_ClientId, []) -> []; 189 | spawn_message_handlers(ClientId, [Topic | Rest]) -> 190 | {ok, PartitionCount} = brod:get_partitions_count(ClientId, Topic), 191 | [{{Topic, Partition}, 192 | spawn_link(?MODULE, message_handler_loop, [Topic, Partition, self()])} 193 | || Partition <- lists:seq(0, PartitionCount-1)] ++ 194 | spawn_message_handlers(ClientId, Rest). 195 | 196 | message_handler_loop(Topic, Partition, SubscriberPid) -> 197 | receive 198 | #kafka_message{ offset = Offset 199 | , value = Value 200 | } -> 201 | Seqno = list_to_integer(binary_to_list(Value)), 202 | Now = os_time_utc_str(), 203 | ?BROD_LOG_INFO("~p ~s-~p ~s: offset:~w seqno:~w\n", 204 | [self(), Topic, Partition, Now, Offset, Seqno]), 205 | brod_group_subscriber:ack(SubscriberPid, Topic, Partition, Offset), 206 | ?MODULE:message_handler_loop(Topic, Partition, SubscriberPid) 207 | after 1000 -> 208 | ?MODULE:message_handler_loop(Topic, Partition, SubscriberPid) 209 | end. 210 | 211 | -spec os_time_utc_str() -> string(). 212 | os_time_utc_str() -> 213 | Ts = os:timestamp(), 214 | {{Y,M,D}, {H,Min,Sec}} = calendar:now_to_universal_time(Ts), 215 | {_, _, Micro} = Ts, 216 | S = io_lib:format("~4.4.0w-~2.2.0w-~2.2.0w:~2.2.0w:~2.2.0w:~2.2.0w.~6.6.0w", 217 | [Y, M, D, H, Min, Sec, Micro]), 218 | lists:flatten(S). 219 | 220 | client_config() -> 221 | kafka_test_helper:client_config(). 222 | 223 | %%%_* Emacs ==================================================================== 224 | %%% Local Variables: 225 | %%% allout-layout: t 226 | %%% erlang-indent-level: 2 227 | %%% End: 228 | -------------------------------------------------------------------------------- /test/brod_demo_topic_subscriber.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2016-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %%%============================================================================= 18 | %%% @doc 19 | %%% This is a topic subscriber example 20 | %%% @end 21 | %%%============================================================================= 22 | 23 | -module(brod_demo_topic_subscriber). 24 | -behaviour(brod_topic_subscriber). 25 | 26 | %% behaviour callbacks 27 | -export([ init/2 28 | , handle_message/3 29 | ]). 30 | 31 | -export([ bootstrap/0 32 | , bootstrap/2 33 | ]). 34 | 35 | -export([ delete_commit_history/1 36 | ]). 37 | 38 | -include("brod.hrl"). 39 | -include("brod_int.hrl"). 40 | 41 | -define(PRODUCE_DELAY_SECONDS, 5). 42 | -define(TOPIC, <<"brod-demo-topic-subscriber">>). 43 | 44 | -record(state, { offset_dir :: string() 45 | , message_type :: message | message_type 46 | }). 47 | 48 | %% @doc This function bootstraps everything to demo of topic subscriber. 49 | %% Prerequisites: 50 | %% - bootstrap docker host at {"localhost", 9092} 51 | %% - kafka topic named <<"brod-demo-topic-subscriber">> 52 | %% Processes to spawn: 53 | %% - A brod client 54 | %% - A producer which produces sequence numbers to each partition 55 | %% - A subscriber which subscribes to all partitions. 56 | %% 57 | %% * consumed sequence numbers are printed to console 58 | %% * consumed offsets are written to file /tmp/T/P.offset 59 | %% where T is the topic name and X is the partition number 60 | -spec bootstrap() -> ok. 61 | bootstrap() -> 62 | bootstrap(?PRODUCE_DELAY_SECONDS, message). 63 | 64 | bootstrap(DelaySeconds, MessageType) -> 65 | ClientId = ?MODULE, 66 | BootstrapHosts = [{"localhost", 9092}], 67 | ClientConfig = client_config(), 68 | Topic = ?TOPIC, 69 | {ok, _} = application:ensure_all_started(brod), 70 | ok = brod:start_client(BootstrapHosts, ClientId, ClientConfig), 71 | ok = brod:start_producer(ClientId, Topic, _ProducerConfig = []), 72 | {ok, _Pid} = spawn_consumer(ClientId, Topic, MessageType), 73 | {ok, PartitionCount} = brod:get_partitions_count(ClientId, Topic), 74 | Partitions = lists:seq(0, PartitionCount - 1), 75 | ok = spawn_producers(ClientId, Topic, DelaySeconds, Partitions), 76 | ok. 77 | 78 | %% @doc Get committed offsets from file `/tmp/' 79 | init(Topic, MessageType) -> 80 | OffsetDir = commit_dir(Topic), 81 | Offsets = read_offsets(OffsetDir), 82 | State = #state{ offset_dir = OffsetDir 83 | , message_type = MessageType 84 | }, 85 | {ok, Offsets, State}. 86 | 87 | %% @doc Handle one message (not message-set). 88 | handle_message(Partition, Message, 89 | #state{ offset_dir = Dir 90 | , message_type = message 91 | } = State) -> 92 | process_message(Dir, Partition, Message), 93 | {ok, ack, State}; 94 | handle_message(Partition, MessageSet, 95 | #state{ offset_dir = Dir 96 | , message_type = message_set 97 | } = State) -> 98 | #kafka_message_set{ partition = Partition 99 | , messages = Messages 100 | } = MessageSet, 101 | [process_message(Dir, Partition, Message) || Message <- Messages], 102 | {ok, ack, State}. 103 | 104 | delete_commit_history(Topic) -> 105 | Files = list_offset_files(commit_dir(Topic)), 106 | lists:foreach(fun(F) -> file:delete(F) end, Files). 107 | 108 | %%%_* Internal Functions ======================================================= 109 | 110 | -spec process_message(file:fd(), brod:partition(), brod:message()) -> ok. 111 | process_message(Dir, Partition, Message) -> 112 | #kafka_message{ offset = Offset 113 | , value = Value 114 | } = Message, 115 | Seqno = binary_to_integer(Value), 116 | Now = os_time_utc_str(), 117 | ?BROD_LOG_INFO("~p ~p ~s: offset:~w seqno:~w\n", 118 | [self(), Partition, Now, Offset, Seqno]), 119 | ok = commit_offset(Dir, Partition, Offset). 120 | 121 | -spec read_offsets(string()) -> [{brod:partition(), brod:offset()}]. 122 | read_offsets(Dir) -> 123 | Files = list_offset_files(Dir), 124 | lists:map(fun(Filename) -> read_offset(Dir, Filename) end, Files). 125 | 126 | list_offset_files(Dir) when is_binary(Dir) -> 127 | list_offset_files(binary_to_list(Dir)); 128 | list_offset_files(Dir) -> 129 | filelib:wildcard("*.offset", Dir). 130 | 131 | -spec read_offset(string(), string()) -> {brod:partition(), brod:offset()}. 132 | read_offset(Dir, Filename) -> 133 | PartitionStr = filename:basename(Filename, ".offset"), 134 | Partition = list_to_integer(PartitionStr), 135 | {ok, OffsetBin} = file:read_file(filename:join(Dir, Filename)), 136 | OffsetStr = string:strip(binary_to_list(OffsetBin), both, $\n), 137 | Offset = list_to_integer(OffsetStr), 138 | {Partition, Offset}. 139 | 140 | filename(Dir, Partition) -> 141 | filename:join([Dir, integer_to_list(Partition) ++ ".offset"]). 142 | 143 | commit_offset(Dir, Partition, Offset) -> 144 | Filename = filename(Dir, Partition), 145 | ok = filelib:ensure_dir(Filename), 146 | ok = file:write_file(Filename, [integer_to_list(Offset), $\n]). 147 | 148 | spawn_consumer(ClientId, Topic, MessageType) -> 149 | CallbackInitArg = MessageType, 150 | Config = [{offset_reset_policy, reset_to_earliest}], 151 | brod_topic_subscriber:start_link(ClientId, Topic, all, 152 | Config, MessageType, 153 | _CallbackModule = ?MODULE, 154 | CallbackInitArg). 155 | 156 | spawn_producers(_ClientId, _Topic, _DelaySeconds, []) -> ok; 157 | spawn_producers(ClientId, Topic, DelaySeconds, [Partition | Partitions]) -> 158 | erlang:spawn_link( 159 | fun() -> 160 | producer_loop(ClientId, Topic, Partition, DelaySeconds, 0) 161 | end), 162 | spawn_producers(ClientId, Topic, DelaySeconds, Partitions). 163 | 164 | producer_loop(ClientId, Topic, Partition, DelaySeconds, Seqno) -> 165 | KafkaValue = iolist_to_binary(integer_to_list(Seqno)), 166 | ok = brod:produce_sync(ClientId, Topic, Partition, _Key = <<>>, KafkaValue), 167 | timer:sleep(timer:seconds(DelaySeconds)), 168 | producer_loop(ClientId, Topic, Partition, DelaySeconds, Seqno+1). 169 | 170 | -spec os_time_utc_str() -> string(). 171 | os_time_utc_str() -> 172 | Ts = os:timestamp(), 173 | {{Y,M,D}, {H,Min,Sec}} = calendar:now_to_universal_time(Ts), 174 | {_, _, Micro} = Ts, 175 | S = io_lib:format("~4.4.0w-~2.2.0w-~2.2.0w:~2.2.0w:~2.2.0w:~2.2.0w.~6.6.0w", 176 | [Y, M, D, H, Min, Sec, Micro]), 177 | lists:flatten(S). 178 | 179 | client_config() -> 180 | kafka_test_helper:client_config(). 181 | 182 | commit_dir(Topic) -> 183 | filename:join(["/tmp", Topic]). 184 | 185 | %%%_* Emacs ==================================================================== 186 | %%% Local Variables: 187 | %%% allout-layout: t 188 | %%% erlang-indent-level: 2 189 | %%% End: 190 | -------------------------------------------------------------------------------- /test/brod_group_coordinator_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2015-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @private 18 | -module(brod_group_coordinator_SUITE). 19 | -define(CLIENT_ID, ?MODULE). 20 | -define(OTHER_CLIENT_ID, other_coordinator_id). 21 | -define(TOPIC, <<"brod-group-coordinator">>). 22 | -define(TOPIC1, <<"brod-group-coordinator-1">>). 23 | -define(GROUP, <<"brod-group-coordinator">>). 24 | -define(PARTITION, 0). 25 | 26 | %% Test framework 27 | -export([ init_per_suite/1 28 | , end_per_suite/1 29 | , common_init_per_testcase/2 30 | , common_end_per_testcase/2 31 | , suite/0 32 | ]). 33 | 34 | %% brod coordinator callbacks 35 | -export([ assignments_revoked/1 36 | , assignments_received/4 37 | ]). 38 | 39 | %% Test cases 40 | -export([ t_acks_during_revoke/1 41 | , t_update_topics_triggers_rebalance/1 42 | ]). 43 | 44 | -define(assert_receive(Pattern, Return), 45 | receive 46 | Pattern -> Return 47 | after 48 | 30000 -> ct:fail(erlang:process_info(self(), messages)) 49 | end). 50 | 51 | -include_lib("snabbkaffe/include/ct_boilerplate.hrl"). 52 | -include("brod.hrl"). 53 | 54 | %%%_* ct callbacks ============================================================= 55 | 56 | suite() -> [{timetrap, {seconds, 60}}]. 57 | 58 | init_per_suite(Config) -> Config. 59 | end_per_suite(_Config) -> ok. 60 | 61 | common_init_per_testcase(_Case, Config) -> 62 | {ok, _} = application:ensure_all_started(brod), 63 | BootstrapHosts = kafka_test_helper:bootstrap_hosts(), 64 | ClientConfig = client_config(), 65 | ok = brod:start_client(BootstrapHosts, ?CLIENT_ID, ClientConfig), 66 | ok = brod:start_client(BootstrapHosts, ?OTHER_CLIENT_ID, ClientConfig), 67 | ok = brod:start_producer(?CLIENT_ID, ?TOPIC, _ProducerConfig = []), 68 | Config. 69 | 70 | common_end_per_testcase(_Case, Config) when is_list(Config) -> 71 | ok = brod:stop_client(?CLIENT_ID), 72 | ok = brod:stop_client(?OTHER_CLIENT_ID), 73 | ok = application:stop(brod). 74 | 75 | client_config() -> 76 | kafka_test_helper:client_config(). 77 | 78 | %%%_* Group coordinator callbacks ============================================== 79 | 80 | assignments_revoked({Pid, Count}) -> 81 | Pid ! {assignments_revoked, Count}, 82 | receive continue -> ok end, 83 | ok. 84 | 85 | assignments_received({Pid, Count}, _MemberId, GenerationId, TopicAssignments) -> 86 | Pid ! {assignments_received, Count, GenerationId, TopicAssignments}, 87 | ok. 88 | 89 | %%%_* Test functions =========================================================== 90 | 91 | t_acks_during_revoke(Config) when is_list(Config) -> 92 | {ok, GroupCoordinator1Pid} = 93 | brod_group_coordinator:start_link(?CLIENT_ID, ?GROUP, [?TOPIC], 94 | _Config = [], ?MODULE, {self(), 1}), 95 | 96 | ?assert_receive({assignments_revoked, 1}, ok), 97 | GroupCoordinator1Pid ! continue, 98 | GenerationId = ?assert_receive({assignments_received, 1, GId, _}, GId), 99 | 100 | {ok, Offset} = 101 | brod:produce_sync_offset(?CLIENT_ID, ?TOPIC, ?PARTITION, <<>>, <<1, 2, 3>>), 102 | 103 | {ok, {_, [_]}} = brod:fetch(?CLIENT_ID, ?TOPIC, ?PARTITION, Offset), 104 | 105 | {ok, GroupCoordinator2Pid} = 106 | brod_group_coordinator:start_link(?OTHER_CLIENT_ID, ?GROUP, [?TOPIC], 107 | _Config = [], ?MODULE, {self(), 2}), 108 | 109 | %% Allow new partition to be started 110 | ?assert_receive({assignments_revoked, 2}, ok), 111 | GroupCoordinator2Pid ! continue, 112 | 113 | %% We only ack when we are inside assignments_revoked 114 | ?assert_receive({assignments_revoked, 1}, ok), 115 | brod_group_coordinator:ack(GroupCoordinator1Pid, GenerationId, 116 | ?TOPIC, ?PARTITION, Offset), 117 | GroupCoordinator1Pid ! continue, 118 | 119 | TopicAssignments1 = ?assert_receive({assignments_received, 1, _, TA1}, TA1), 120 | TopicAssignments2 = ?assert_receive({assignments_received, 2, _, TA2}, TA2), 121 | Assignments = TopicAssignments1 ++ TopicAssignments2, 122 | 123 | %% The assignment needs to start at the chosen offset. 124 | ?assertMatch( [ok] 125 | , [ok || #brod_received_assignment{ 126 | partition=?PARTITION, 127 | begin_offset=BeginOffset 128 | } <- Assignments, 129 | BeginOffset == Offset + 1] 130 | ), 131 | 132 | ok. 133 | 134 | t_update_topics_triggers_rebalance(Config) when is_list(Config) -> 135 | {ok, GroupCoordinatorPid} = 136 | brod_group_coordinator:start_link(?CLIENT_ID, ?GROUP, [?TOPIC], 137 | _Config = [], ?MODULE, {self(), 1}), 138 | ?assert_receive({assignments_revoked, 1}, ok), 139 | GroupCoordinatorPid ! continue, 140 | GenerationId1 = ?assert_receive({assignments_received, 1, GId1, _}, GId1), 141 | brod_group_coordinator:update_topics(GroupCoordinatorPid, [?TOPIC1]), 142 | ?assert_receive({assignments_revoked, 1}, ok), 143 | GroupCoordinatorPid ! continue, 144 | {GenerationId2, TopicAssignments} = 145 | ?assert_receive({assignments_received, 1, GId2, TA}, {GId2, TA}), 146 | ?assert(GenerationId2 > GenerationId1), 147 | ?assert(lists:all( 148 | fun(#brod_received_assignment{topic=Topic}) -> 149 | Topic == ?TOPIC1 150 | end, TopicAssignments)). 151 | 152 | %%%_* Emacs ==================================================================== 153 | %%% Local Variables: 154 | %%% allout-layout: t 155 | %%% erlang-indent-level: 2 156 | %%% End: 157 | -------------------------------------------------------------------------------- /test/brod_group_subscriber_test.hrl: -------------------------------------------------------------------------------- 1 | -include_lib("snabbkaffe/include/snabbkaffe.hrl"). 2 | 3 | -record(state, { is_async_ack 4 | , is_async_commit 5 | , is_assign_partitions 6 | , topic 7 | , partition 8 | }). 9 | 10 | -define(MSG(Ref, Pid, Topic, Partition, Offset, Value), 11 | {Ref, Pid, Topic, Partition, Offset, Value}). 12 | 13 | -define(TOPIC1, <<"brod-group-subscriber-1">>). 14 | -define(TOPIC2, <<"brod-group-subscriber-2">>). 15 | -define(TOPIC3, <<"brod-group-subscriber-3">>). 16 | -define(TOPIC4, <<"brod-group-subscriber-4">>). 17 | -define(GROUP_ID, list_to_binary(atom_to_list(?MODULE))). 18 | -define(config(Name), proplists:get_value(Name, Config)). 19 | -------------------------------------------------------------------------------- /test/brod_kafka_apis_tests.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2017-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | -module(brod_kafka_apis_tests). 18 | 19 | -include_lib("eunit/include/eunit.hrl"). 20 | 21 | -define(WITH_MECK(Versions, EXPR), 22 | fun() -> 23 | try 24 | ok = setup(Versions), 25 | EXPR 26 | after 27 | ok = clear() 28 | end 29 | end()). 30 | 31 | start_stop_test() -> 32 | _ = application:stop(brod), %% other tests might have it started 33 | {ok, _Pid} = brod_kafka_apis:start_link(), 34 | ?assert(lists:member(brod_kafka_apis, ets:all())), 35 | ok = brod_kafka_apis:stop(). 36 | 37 | pick_brod_max_version_test() -> 38 | %% brod supports max = 7, kafka supports max = 100 39 | ?WITH_MECK(#{produce => {0, 100}}, 40 | ?assertEqual(7, brod_kafka_apis:pick_version(self(), produce))). 41 | 42 | pick_kafka_max_version_test() -> 43 | %% brod supports max = 2, kafka supports max = 1 44 | ?WITH_MECK(#{produce => {0, 1}}, 45 | ?assertEqual(1, brod_kafka_apis:pick_version(self(), produce))). 46 | 47 | pick_min_brod_version_test() -> 48 | %% no versions received from kafka 49 | ?WITH_MECK(#{}, 50 | ?assertEqual(0, brod_kafka_apis:pick_version(self(), produce))). 51 | 52 | pick_min_brod_version_2_test() -> 53 | %% received 'fetch' API version, lookup 'produce' 54 | ?WITH_MECK(#{fetch => {0, 0}}, 55 | ?assertEqual(0, brod_kafka_apis:pick_version(self(), produce))). 56 | 57 | no_version_range_intersection_test() -> 58 | %% brod supports 0 - 11, kafka supports 80 - 90 59 | ?WITH_MECK(#{produce => {80, 90}}, 60 | ?assertError({unsupported_vsn_range, _, _, _}, 61 | brod_kafka_apis:pick_version(self(), produce))). 62 | 63 | setup(Versions) -> 64 | _ = application:stop(brod), %% other tests might have it started 65 | _ = brod_kafka_apis:start_link(), 66 | meck:new(kpro, [passthrough, no_passthrough_cover, no_history]), 67 | meck:expect(kpro, get_api_versions, fun(_) -> {ok, Versions} end), 68 | ok. 69 | 70 | clear() -> 71 | brod_kafka_apis:stop(), 72 | meck:unload(kpro), 73 | ok. 74 | 75 | %%%_* Emacs ==================================================================== 76 | %%% Local Variables: 77 | %%% allout-layout: t 78 | %%% erlang-indent-level: 2 79 | %%% End: 80 | -------------------------------------------------------------------------------- /test/brod_offset_txn_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(brod_offset_txn_SUITE). 2 | 3 | -export([ init_per_suite/1 4 | , end_per_suite/1 5 | , init_per_testcase/2 6 | , end_per_testcase/2 7 | , all/0 8 | , suite/0 9 | ]). 10 | 11 | -export([ t_simple_test/1 12 | , t_no_commit_test/1 13 | ]). 14 | 15 | -export([ init/2 16 | , handle_message/4 17 | , get_committed_offsets/3 18 | ]). 19 | 20 | -include_lib("common_test/include/ct.hrl"). 21 | -include_lib("stdlib/include/assert.hrl"). 22 | 23 | -include("include/brod.hrl"). 24 | 25 | -define(HOSTS, [{"localhost", 9092}]). 26 | -define(TOPIC_OUTPUT_1, <<"brod_txn_subscriber_output_1">>). 27 | -define(TOPIC_OUTPUT_2, <<"brod_txn_subscriber_output_2">>). 28 | -define(TOPIC_INPUT, <<"brod_txn_subscriber_input">>). 29 | -define(CLIENT_ID, client_consumer_group). 30 | -define(GROUP_ID, <<"group_id_for_testing">>). 31 | -define(TIMEOUT, 4000). 32 | -define(config(Name), proplists:get_value(Name, Config)). 33 | 34 | %%%_* ct callbacks ============================================================= 35 | 36 | suite() -> [{timetrap, {seconds, 30}}]. 37 | 38 | init_per_suite(Config) -> 39 | case kafka_test_helper:kafka_version() of 40 | {0, Minor} when Minor < 11 -> 41 | {skip, "no_transaction"}; 42 | _ -> 43 | {ok, _} = application:ensure_all_started(brod), 44 | Config 45 | end. 46 | 47 | end_per_suite(_Config) -> ok. 48 | 49 | init_per_testcase(Case, Config) -> 50 | try ?MODULE:Case({'init', Config}) 51 | catch error : function_clause -> 52 | init_client(Case, Config) 53 | end. 54 | 55 | init_client(Case, Config) -> 56 | Client = Case, 57 | brod:stop_client(Client), 58 | ClientConfig = client_config(), 59 | ok = brod:start_client(?HOSTS, Client, ClientConfig), 60 | 61 | [ {client, Client} 62 | , {client_config, ClientConfig} | Config]. 63 | 64 | end_per_testcase(_Case, Config) -> 65 | Subscriber = ?config(subscriber), 66 | is_pid(Subscriber) andalso unlink(Subscriber), 67 | is_pid(Subscriber) andalso exit(Subscriber, kill), 68 | Pid = whereis(?config(client)), 69 | try 70 | Ref = erlang:monitor(process, Pid), 71 | brod:stop_client(?config(client)), 72 | receive 73 | {'DOWN', Ref, process, Pid, _} -> ok 74 | end 75 | catch _ : _ -> 76 | ok 77 | end, 78 | Config. 79 | 80 | all() -> 81 | [F || {F, _A} <- module_info(exports), 82 | case atom_to_list(F) of 83 | "t_" ++ _ -> true; 84 | _ -> false 85 | end]. 86 | 87 | client_config() -> kafka_test_helper:client_config(). 88 | 89 | init(GroupId, 90 | #{ client := Client 91 | , observer := OPid}) -> 92 | {ok, #{ client => Client 93 | , observer => OPid 94 | , group_id => GroupId}}. 95 | 96 | handle_message(Topic, 97 | Partition, 98 | #kafka_message{ offset = Offset 99 | , key = Key 100 | , value = Value}, 101 | #{ client := Client 102 | , group_id := GroupId 103 | , observer := ObserverPid} = State) -> 104 | 105 | {ok, Tx} = brod:transaction(Client, <<"some_transaction">>, []), 106 | {ok, _} = brod:txn_produce(Tx, ?TOPIC_OUTPUT_1, Partition, Key, Value), 107 | {ok, _} = brod:txn_produce(Tx, ?TOPIC_OUTPUT_2, Partition, Key, Value), 108 | ok = brod:txn_add_offsets(Tx, GroupId, #{{Topic, Partition} => Offset}), 109 | 110 | case Value of 111 | <<"no_commit">> -> 112 | ok = brod:abort(Tx); 113 | _ -> 114 | ok = brod:commit(Tx) 115 | end, 116 | 117 | ObserverPid ! {offset, Offset}, 118 | 119 | {ok, ack_no_commit, State}. 120 | 121 | get_committed_offsets(GroupId, TPs, #{client := Client} = State) -> 122 | {ok, Offsets} = brod:fetch_committed_offsets(Client, GroupId), 123 | TPOs = 124 | lists:filter(fun({TP, _Offset}) -> 125 | lists:member(TP, TPs) 126 | end, 127 | lists:foldl(fun(#{ name := Topic 128 | , partitions := Partitions}, TPOs) -> 129 | lists:append(TPOs, 130 | lists:map(fun(#{ committed_offset := COffset 131 | , partition_index := Partition}) -> 132 | {{Topic, Partition}, COffset} 133 | end, Partitions)) 134 | end, [], Offsets)), 135 | {ok, 136 | TPOs, 137 | State}. 138 | 139 | get_offset() -> 140 | timer:sleep(100), 141 | case get_committed_offsets(?GROUP_ID, 142 | [{?TOPIC_INPUT, 0}], 143 | #{client => ?CLIENT_ID}) of 144 | 145 | {ok, [{{?TOPIC_INPUT, 0}, Offset}], _} -> Offset; 146 | {ok, [], _} -> 0 147 | end. 148 | 149 | send_no_commit_message() -> 150 | send_message(rand(), <<"no_commit">>). 151 | 152 | send_simple_message() -> 153 | send_message(rand(), <<"simple">>). 154 | 155 | send_message(Key, Value) -> 156 | brod:start_producer(?CLIENT_ID, ?TOPIC_INPUT, []), 157 | {ok, Offset} = brod:produce_sync_offset(?CLIENT_ID, ?TOPIC_INPUT, 0, Key, Value), 158 | Offset. 159 | 160 | start_subscriber() -> 161 | GroupConfig = [{offset_commit_policy, consumer_managed}], 162 | 163 | ConsumerConfig = [ {prefetch_count, 3} 164 | , {sleep_timeout, 0} 165 | , {max_wait_time, 100} 166 | , {partition_restart_delay_seconds, 1} 167 | , {begin_offset, 0} 168 | ], 169 | 170 | brod:start_link_group_subscriber(?CLIENT_ID, 171 | ?GROUP_ID, 172 | [?TOPIC_INPUT], 173 | GroupConfig, 174 | ConsumerConfig, 175 | message, 176 | ?MODULE, 177 | #{ client => ?CLIENT_ID 178 | , observer => self()}). 179 | 180 | wait_to_last() -> 181 | receive 182 | _ -> wait_to_last() 183 | after ?TIMEOUT -> done 184 | end. 185 | 186 | wait_for_offset(ExpectedOffset) -> 187 | receive 188 | {offset, Offset} when Offset == ExpectedOffset -> 189 | done; 190 | {offset, _UnexpectedOffset} -> 191 | wait_for_offset(ExpectedOffset) 192 | after ?TIMEOUT -> timeout 193 | end. 194 | 195 | rand() -> base64:encode(crypto:strong_rand_bytes(8)). 196 | 197 | t_simple_test(Config) when is_list(Config) -> 198 | ok = brod:start_client(?HOSTS, ?CLIENT_ID, []), 199 | {ok, SubscriberPid} = start_subscriber(), 200 | done = wait_to_last(), 201 | InitialOffset = get_offset(), 202 | {ok, OffsetOutput1} = brod:resolve_offset(?HOSTS, ?TOPIC_OUTPUT_1, 0), 203 | {ok, OffsetOutput2} = brod:resolve_offset(?HOSTS, ?TOPIC_OUTPUT_2, 0), 204 | MessageOffset = send_simple_message(), 205 | done = wait_for_offset(MessageOffset), 206 | CurrentOffset = get_offset(), 207 | ?assertMatch(MessageOffset, CurrentOffset), 208 | 209 | ?assertMatch(true, InitialOffset =< CurrentOffset), 210 | ok = brod_group_subscriber:stop(SubscriberPid), 211 | ?assertMatch(false, is_process_alive(SubscriberPid)), 212 | 213 | {ok, {_, MessagesO1}} = brod:fetch(?CLIENT_ID, 214 | ?TOPIC_OUTPUT_1, 215 | 0, OffsetOutput1, 216 | #{isolation_level => read_committed}), 217 | ?assertMatch(1, length(MessagesO1)), 218 | 219 | {ok, {_, MessagesO2}} = brod:fetch(?CLIENT_ID, 220 | ?TOPIC_OUTPUT_2, 221 | 0, OffsetOutput2, 222 | #{isolation_level => read_committed}), 223 | ?assertMatch(1, length(MessagesO2)), 224 | ok. 225 | 226 | t_no_commit_test(Config) when is_list(Config) -> 227 | ok = brod:start_client(?HOSTS, ?CLIENT_ID, []), 228 | {ok, SubscriberPid} = start_subscriber(), 229 | wait_to_last(), 230 | {ok, OutputOffset1} = brod:resolve_offset(?HOSTS, ?TOPIC_OUTPUT_1, 0), 231 | {ok, OutputOffset2} = brod:resolve_offset(?HOSTS, ?TOPIC_OUTPUT_2, 0), 232 | InitialOffset = send_no_commit_message(), 233 | done = wait_for_offset(InitialOffset), 234 | CurrentOffset = get_offset(), 235 | 236 | ?assertMatch(true, InitialOffset >= CurrentOffset), 237 | ok = brod_group_subscriber:stop(SubscriberPid), 238 | false = is_process_alive(SubscriberPid), 239 | 240 | {ok, {_, MessagesO1}} = brod:fetch(?CLIENT_ID, 241 | ?TOPIC_OUTPUT_1, 242 | 0, OutputOffset1, 243 | #{isolation_level => read_committed}), 244 | ?assertMatch(0, length(MessagesO1)), 245 | 246 | {ok, {_, MessagesO2}} = brod:fetch(?CLIENT_ID, 247 | ?TOPIC_OUTPUT_2, 248 | 0, OutputOffset2, 249 | #{isolation_level => read_committed}), 250 | ?assertMatch(0, length(MessagesO2)), 251 | ok. 252 | -------------------------------------------------------------------------------- /test/brod_test_group_subscriber.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2015-2021 Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | %% @private 18 | -module(brod_test_group_subscriber). 19 | 20 | -behavior(brod_group_subscriber_v2). 21 | 22 | -include("brod.hrl"). 23 | -include("brod_int.hrl"). 24 | -include("brod_group_subscriber_test.hrl"). 25 | 26 | %% brod subscriber callbacks 27 | -export([ init/2 28 | , get_committed_offset/3 29 | , handle_message/2 30 | , assign_partitions/3 31 | , terminate/2 32 | ]). 33 | 34 | init(InitInfo, Config) -> 35 | #{topic := Topic, partition := Partition} = InitInfo, 36 | IsAsyncAck = maps:get(async_ack, Config, false), 37 | IsAsyncCommit = maps:get(async_commit, Config, false), 38 | IsAssignPartitions = maps:get(assign_partitions, Config, false), 39 | ?BROD_LOG_INFO("Started a test group subscriber.~n" 40 | "Config: ~p~nInitInfo: ~p~n" 41 | , [Config, InitInfo]), 42 | {ok, #state{ is_async_ack = IsAsyncAck 43 | , is_async_commit = IsAsyncCommit 44 | , is_assign_partitions = IsAssignPartitions 45 | , topic = Topic 46 | , partition = Partition 47 | }}. 48 | 49 | handle_message(Message, 50 | #state{ is_async_ack = IsAsyncAck 51 | , is_async_commit = IsAsyncCommit 52 | , topic = Topic 53 | , partition = Partition 54 | } = State) -> 55 | #kafka_message{ offset = Offset 56 | , value = Value 57 | } = Message, 58 | ?tp(group_subscriber_handle_message, 59 | #{ topic => Topic 60 | , partition => Partition 61 | , offset => Offset 62 | , value => Value 63 | , worker => self() 64 | }), 65 | case {IsAsyncAck, IsAsyncCommit} of 66 | {true, _} -> {ok, State}; 67 | {false, false} -> {ok, commit, State}; 68 | {false, true} -> {ok, ack, State} 69 | end. 70 | 71 | get_committed_offset(_CbConfig, _Topic, _Partition) -> 72 | %% always return undefined: always fetch from latest available offset 73 | ?undef. 74 | 75 | assign_partitions(_CbConfig, Members, TopicPartitions) -> 76 | PartitionsAssignments = [{Topic, [PartitionsN]} 77 | || {Topic, PartitionsN} <- TopicPartitions], 78 | [{element(1, hd(Members)), PartitionsAssignments}]. 79 | 80 | terminate(Reason, #state{ topic = Topic 81 | , partition = Partition 82 | }) -> 83 | ?tp(brod_test_group_subscriber_terminate, 84 | #{ topic => Topic 85 | , partition => Partition 86 | , readon => Reason 87 | }). 88 | -------------------------------------------------------------------------------- /test/brod_test_macros.hrl: -------------------------------------------------------------------------------- 1 | -ifndef(BROD_TEST_MACROS_HRL). 2 | -define(BROD_TEST_MACROS_HRL, true). 3 | 4 | -include_lib("kafka_protocol/include/kpro.hrl"). 5 | -include_lib("hut/include/hut.hrl"). 6 | -include_lib("snabbkaffe/include/snabbkaffe.hrl"). 7 | -include_lib("stdlib/include/assert.hrl"). 8 | 9 | %%==================================================================== 10 | %% Macros 11 | %%==================================================================== 12 | 13 | -define(TEST_CLIENT_ID, brod_test_client). 14 | 15 | -define(KAFKA_HOST, "localhost"). 16 | -define(KAFKA_PORT, 9092). 17 | 18 | -define(topic(TestCase, N), 19 | list_to_binary(atom_to_list(TestCase) ++ integer_to_list(N))). 20 | -define(topic(N), ?topic(?FUNCTION_NAME, N)). 21 | -define(topic, ?topic(1)). 22 | 23 | -define(group_id(TestCase, N), 24 | list_to_binary(atom_to_list(TestCase) ++ "_grp" ++ integer_to_list(N))). 25 | -define(group_id(N), ?group_id(?FUNCTION_NAME, N)). 26 | -define(group_id, ?group_id(1)). 27 | 28 | -endif. 29 | -------------------------------------------------------------------------------- /test/brod_test_setup.hrl: -------------------------------------------------------------------------------- 1 | -ifndef(KAFKA_CT_SETUP_HRL). 2 | -define(KAFKA_CT_SETUP_HRL, true). 3 | 4 | -import(kafka_test_helper, [ produce/2 5 | , produce/3 6 | , produce/4 7 | , payloads/1 8 | , produce_payloads/3 9 | , create_topic/2 10 | , get_acked_offsets/2 11 | , check_committed_offsets/2 12 | , wait_n_messages/2 13 | , wait_n_messages/3 14 | , consumer_config/0 15 | , client_config/0 16 | , bootstrap_hosts/0 17 | ]). 18 | 19 | -include("brod_test_macros.hrl"). 20 | 21 | -endif. 22 | -------------------------------------------------------------------------------- /test/brod_txn_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(brod_txn_SUITE). 2 | %% Test framework 3 | -export([ init_per_suite/1 4 | , end_per_suite/1 5 | , init_per_testcase/2 6 | , end_per_testcase/2 7 | , all/0 8 | , suite/0 9 | ]). 10 | 11 | -export([ t_multiple_writes_transaction/1 12 | , t_simple_transaction/1 13 | , t_abort_transaction/1 14 | , t_batch_transaction/1 15 | ]). 16 | 17 | -include_lib("stdlib/include/assert.hrl"). 18 | 19 | -include("include/brod.hrl"). 20 | 21 | -define(HOSTS, [{"localhost", 9092}]). 22 | -define(TOPIC_1, list_to_binary(atom_to_list(?MODULE)++"_1")). 23 | -define(TOPIC_2, list_to_binary(atom_to_list(?MODULE)++"_2")). 24 | -define(TIMEOUT, 280000). 25 | -define(config(Name), proplists:get_value(Name, Config)). 26 | 27 | %%%_* ct callbacks ============================================================= 28 | 29 | suite() -> [{timetrap, {seconds, 30}}]. 30 | 31 | init_per_suite(Config) -> 32 | case kafka_test_helper:kafka_version() of 33 | {0, Minor} when Minor < 11 -> 34 | {skip, "no_transaction"}; 35 | _ -> 36 | {ok, _} = application:ensure_all_started(brod), 37 | Config 38 | end. 39 | 40 | end_per_suite(_Config) -> ok. 41 | 42 | init_per_testcase(Case, Config) -> 43 | try ?MODULE:Case({'init', Config}) 44 | catch error : function_clause -> 45 | init_client(Case, Config) 46 | end. 47 | 48 | init_client(Case, Config) -> 49 | Client = Case, 50 | brod:stop_client(Client), 51 | ClientConfig = client_config(), 52 | ok = brod:start_client(?HOSTS, Client, ClientConfig), 53 | TesterPid = self(), 54 | Subscriber = spawn_link(fun() -> subscriber_loop(TesterPid) end), 55 | Topics = [?TOPIC_1, ?TOPIC_2], 56 | lists:foreach(fun(Topic) -> 57 | ok = brod:start_consumer(Client, Topic, []), 58 | brod:subscribe(Client, Subscriber, Topic, 0, []) 59 | end, Topics), 60 | 61 | [{client, Client}, 62 | {client_config, ClientConfig}, 63 | {topics, Topics} | Config]. 64 | 65 | end_per_testcase(_Case, Config) -> 66 | Subscriber = ?config(subscriber), 67 | is_pid(Subscriber) andalso unlink(Subscriber), 68 | is_pid(Subscriber) andalso exit(Subscriber, kill), 69 | Pid = whereis(?config(client)), 70 | try 71 | Ref = erlang:monitor(process, Pid), 72 | brod:stop_client(?config(client)), 73 | receive 74 | {'DOWN', Ref, process, Pid, _} -> ok 75 | end 76 | catch _ : _ -> 77 | ok 78 | end, 79 | Config. 80 | 81 | all() -> [F || {F, _A} <- module_info(exports), 82 | case atom_to_list(F) of 83 | "t_" ++ _ -> true; 84 | _ -> false 85 | end]. 86 | 87 | client_config() -> 88 | []. 89 | 90 | subscriber_loop(TesterPid) -> 91 | receive 92 | {ConsumerPid, KMS} -> 93 | #kafka_message_set{ messages = Messages 94 | , partition = Partition} = KMS, 95 | lists:foreach(fun(#kafka_message{offset = Offset, key = K, value = V}) -> 96 | TesterPid ! {Partition, K, V}, 97 | ok = brod:consume_ack(ConsumerPid, Offset) 98 | end, Messages), 99 | subscriber_loop(TesterPid); 100 | Msg -> 101 | ct:fail("unexpected message received by test subscriber.\n~p", [Msg]) 102 | end. 103 | 104 | receive_messages(none) -> 105 | receive 106 | {_Partition, _K, _V} = M -> {unexpected_message, M} 107 | after 1000 -> ok 108 | end; 109 | 110 | receive_messages(ExpectedMessages) -> 111 | case sets:is_empty(ExpectedMessages) of 112 | true -> ok; 113 | _ -> 114 | receive 115 | {_Partition, _K, _V} = M -> 116 | case sets:is_element(M, ExpectedMessages) of 117 | false -> {unexpected_message, M}; 118 | true -> 119 | receive_messages(sets:del_element(M, ExpectedMessages)) 120 | end 121 | after ?TIMEOUT -> 122 | {still_waiting_for, ExpectedMessages} 123 | end 124 | end. 125 | 126 | rand() -> base64:encode(crypto:strong_rand_bytes(8)). 127 | 128 | t_simple_transaction(Config) when is_list(Config) -> 129 | 130 | {ok, Tx} = brod:transaction(?config(client), <<"transaction-id">>, []), 131 | ?assertMatch(true, is_process_alive(Tx)), 132 | 133 | Results = lists:map(fun(Topic) -> 134 | Partition = 0, 135 | Key = rand(), 136 | Value = rand(), 137 | {ok, _Offset} = brod:txn_produce(Tx, Topic, Partition, Key, Value), 138 | {Partition, Key, Value} 139 | end, ?config(topics)), 140 | 141 | ?assertMatch(ok, receive_messages(none)), 142 | ?assertMatch(ok, brod:commit(Tx)), 143 | ?assertMatch(false, is_process_alive(Tx)), 144 | ?assertMatch(ok, receive_messages(sets:from_list(Results))), 145 | ?assertMatch(ok, receive_messages(none)), 146 | ok. 147 | 148 | t_batch_transaction(Config) when is_list(Config) -> 149 | 150 | {ok, Tx} = brod:transaction(?config(client), <<"transaction-id">>, []), 151 | ?assertMatch(true, is_process_alive(Tx)), 152 | 153 | Results = 154 | lists:flatten(lists:map(fun(Topic) -> 155 | Batch = lists:map( 156 | fun(_) -> 157 | #{key => rand() 158 | , value => rand() 159 | , ts => kpro_lib:now_ts()} 160 | end, lists:seq(1, 10)), 161 | 162 | Partition = 0, 163 | {ok, _Offset} = brod:txn_produce(Tx, Topic, Partition, Batch), 164 | 165 | lists:map(fun(#{key := Key 166 | , value := Value}) -> 167 | {Partition, Key, Value} 168 | end, Batch) 169 | 170 | end, ?config(topics))), 171 | 172 | ?assertMatch(ok, receive_messages(none)), 173 | ?assertMatch(ok, brod:commit(Tx)), 174 | ?assertMatch(false, is_process_alive(Tx)), 175 | ?assertMatch(ok, receive_messages(sets:from_list(Results))), 176 | ?assertMatch(ok, receive_messages(none)), 177 | ok. 178 | 179 | t_abort_transaction(Config) when is_list(Config) -> 180 | 181 | {ok, Tx} = brod:transaction(?config(client), <<"transaction-id">>, []), 182 | ?assertMatch(true, is_process_alive(Tx)), 183 | 184 | _ = lists:map(fun(Topic) -> 185 | Partition = 0, 186 | Key = rand(), 187 | Value = rand(), 188 | {ok, _Offset} = brod:txn_produce(Tx, Topic, Partition, Key, Value), 189 | {Partition, Key, Value} 190 | end, ?config(topics)), 191 | 192 | ?assertMatch(ok, receive_messages(none)), 193 | ?assertMatch(ok, brod:abort(Tx)), 194 | ?assertMatch(false, is_process_alive(Tx)), 195 | ?assertMatch(ok, receive_messages(none)), 196 | ok. 197 | 198 | t_multiple_writes_transaction(Config) when is_list(Config) -> 199 | 200 | {ok, Tx} = brod:transaction(?config(client), <<"transaction-id">>, []), 201 | ?assertMatch(true, is_process_alive(Tx)), 202 | 203 | FirstWave = lists:map(fun(Topic) -> 204 | Partition = 0, 205 | Key = rand(), 206 | Value = rand(), 207 | {ok, _Offset} = brod:txn_produce(Tx, Topic, Partition, Key, Value), 208 | {Partition, Key, Value} 209 | end, ?config(topics)), 210 | 211 | SecondWave = lists:map(fun(Topic) -> 212 | Partition = 0, 213 | Key = rand(), 214 | Value = rand(), 215 | {ok, _Offset} = brod:txn_produce(Tx, Topic, Partition, Key, Value), 216 | {Partition, Key, Value} 217 | end, ?config(topics)), 218 | 219 | Results = lists:append(FirstWave, SecondWave), 220 | 221 | ?assertMatch(ok, receive_messages(none)), 222 | ?assertMatch(ok, brod:commit(Tx)), 223 | ?assertMatch(false, is_process_alive(Tx)), 224 | ?assertMatch(ok, receive_messages(sets:from_list(Results))), 225 | ?assertMatch(ok, receive_messages(none)), 226 | ok. 227 | 228 | -------------------------------------------------------------------------------- /test/brod_txn_processor_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(brod_txn_processor_SUITE). 2 | 3 | -export([ init_per_suite/1 4 | , end_per_suite/1 5 | , init_per_testcase/2 6 | , end_per_testcase/2 7 | , all/0 8 | , suite/0 9 | ]). 10 | 11 | -export([ t_simple_test/1 12 | , t_broken_test/1 13 | ]). 14 | 15 | -export([ init/2 16 | , handle_message/4 17 | ]). 18 | 19 | -include_lib("stdlib/include/assert.hrl"). 20 | 21 | -include("include/brod.hrl"). 22 | 23 | -define(HOSTS, [{"localhost", 9092}]). 24 | 25 | -define(INPUT_TOPIC, <<"brod_txn_subscriber_input">>). 26 | -define(OUTPUT_TOPIC_1, <<"brod_txn_subscriber_output_1">>). 27 | -define(OUTPUT_TOPIC_2, <<"brod_txn_subscriber_output_2">>). 28 | -define(GROUP_ID, <<"group_id_for_testing">>). 29 | -define(PROCESSOR_GROUP_ID, <<"processor_group_id_for_testing">>). 30 | -define(TIMEOUT, 10000). 31 | -define(config(Name), proplists:get_value(Name, Config)). 32 | 33 | %%%_* ct callbacks ============================================================= 34 | 35 | suite() -> [{timetrap, {seconds, 30}}]. 36 | 37 | init_per_suite(Config) -> 38 | case kafka_test_helper:kafka_version() of 39 | {0, Minor} when Minor < 11 -> 40 | {skip, "no_transaction"}; 41 | _ -> 42 | {ok, _} = application:ensure_all_started(brod), 43 | Config 44 | end. 45 | 46 | end_per_suite(_Config) -> ok. 47 | 48 | init_per_testcase(Case, Config) -> 49 | try ?MODULE:Case({'init', Config}) 50 | catch error : function_clause -> 51 | init_client(Case, Config) 52 | end. 53 | 54 | init_client(Case, Config) -> 55 | Client = Case, 56 | brod:stop_client(Client), 57 | ClientConfig = client_config(), 58 | ok = brod:start_client(?HOSTS, Client, ClientConfig), 59 | 60 | [ {client, Client} 61 | , {client_config, ClientConfig} | Config]. 62 | 63 | end_per_testcase(_Case, Config) -> 64 | brod:stop_client(?config(client)), 65 | Config. 66 | 67 | all() -> [F || {F, _A} <- module_info(exports), 68 | case atom_to_list(F) of 69 | "t_" ++ _ -> true; 70 | _ -> false 71 | end]. 72 | 73 | client_config() -> kafka_test_helper:client_config(). 74 | 75 | rand() -> 76 | iolist_to_binary([base64:encode(crypto:strong_rand_bytes(8))]). 77 | 78 | produce_messages(Client) -> 79 | ok = brod:start_producer(Client, ?INPUT_TOPIC, []), 80 | 81 | lists:map(fun(_) -> 82 | Key = rand(), 83 | Value = rand(), 84 | {ok, _} = brod:produce_sync_offset(Client, ?INPUT_TOPIC, 0, Key, Value), 85 | 86 | Key 87 | end, lists:seq(1, 10)). 88 | 89 | receive_messages(ExpectedMessages) -> 90 | case sets:is_empty(ExpectedMessages) of 91 | true -> ok; 92 | false -> 93 | receive 94 | {'EXIT', _, _} -> 95 | receive_messages(ExpectedMessages); 96 | {_Topic, _Key} = M -> 97 | receive_messages(sets:del_element(M, ExpectedMessages)) 98 | after 99 | ?TIMEOUT -> 100 | {error, timeout} 101 | end 102 | end. 103 | 104 | t_simple_test(Config) -> 105 | Client = ?config(client), 106 | {ok, FetcherPid} = start_fetchers(self(), Client), 107 | {ok, ProcessorPid} = start_processor(Client), 108 | 109 | ?assertMatch(true, is_process_alive(FetcherPid)), 110 | ?assertMatch(true, is_process_alive(ProcessorPid)), 111 | 112 | Keys = produce_messages(Client), 113 | 114 | ExpectedMessages = sets:from_list( 115 | lists:flatten( 116 | lists:map(fun(Key) -> 117 | [{?OUTPUT_TOPIC_1, Key}, 118 | {?OUTPUT_TOPIC_2, Key}] 119 | end, Keys))), 120 | 121 | ?assertMatch(ok, receive_messages(ExpectedMessages)), 122 | 123 | ?assertMatch(ok, gen_server:stop(FetcherPid)), 124 | ?assertMatch(ok, gen_server:stop(ProcessorPid)), 125 | 126 | ok. 127 | 128 | t_broken_test(Config) -> 129 | 130 | Client = ?config(client), 131 | {ok, FetcherPid} = start_fetchers(self(), Client), 132 | process_flag(trap_exit, true), 133 | {ok, ProcessorPid} = start_broken_processor(Client), 134 | 135 | ?assertMatch(true, is_process_alive(FetcherPid)), 136 | 137 | Keys = produce_messages(Client), 138 | 139 | ExpectedMessages = sets:from_list( 140 | lists:flatten( 141 | lists:map(fun(Key) -> 142 | [{?OUTPUT_TOPIC_1, Key}, 143 | {?OUTPUT_TOPIC_2, Key}] 144 | end, Keys))), 145 | process_flag(trap_exit, false), 146 | 147 | ?assertMatch({error, timeout}, receive_messages(ExpectedMessages)), 148 | 149 | ?assertMatch(ok, gen_server:stop(FetcherPid)), 150 | ?assertMatch(ok, gen_server:stop(ProcessorPid)), 151 | 152 | ok. 153 | 154 | start_broken_processor(Client) -> 155 | brod:txn_do( 156 | fun(Transaction, #kafka_message_set{ topic = _Topic 157 | , partition = Partition 158 | , messages = Messages 159 | } = _MessageSet) -> 160 | lists:foreach(fun(#kafka_message{ key = Key 161 | , value = Value 162 | }) -> 163 | brod:txn_produce(Transaction, 164 | ?OUTPUT_TOPIC_1, 165 | Partition, 166 | [#{ key => Key 167 | , value => Value 168 | }]), 169 | 170 | brod:txn_produce(Transaction, 171 | ?OUTPUT_TOPIC_2, 172 | Partition, 173 | [#{ key => Key 174 | , value => Value 175 | }]), 176 | %% this should break a few things .) 177 | false = is_process_alive(self()) 178 | end, Messages), 179 | ok 180 | end, Client, #{ topics => [?INPUT_TOPIC] 181 | , group_id => ?PROCESSOR_GROUP_ID}). 182 | 183 | start_processor(Client) -> 184 | brod:txn_do( 185 | fun(Transaction, #kafka_message_set{ topic = _Topic 186 | , partition = Partition 187 | , messages = Messages 188 | } = _MessageSet) -> 189 | 190 | lists:foreach(fun(#kafka_message{ key = Key 191 | , value = Value}) -> 192 | brod:txn_produce(Transaction, 193 | ?OUTPUT_TOPIC_1, 194 | Partition, 195 | [#{ key => Key 196 | , value => Value 197 | }]), 198 | 199 | brod:txn_produce(Transaction, 200 | ?OUTPUT_TOPIC_2, 201 | Partition, 202 | [#{ key => Key 203 | , value => Value 204 | }]) 205 | end, Messages), 206 | ok 207 | end, Client, #{ topics => [?INPUT_TOPIC] 208 | , group_id => ?GROUP_ID}). 209 | 210 | 211 | start_fetchers(ObserverPid, Client) -> 212 | brod:start_link_group_subscriber(Client, 213 | ?GROUP_ID, 214 | [?OUTPUT_TOPIC_1, ?OUTPUT_TOPIC_2], 215 | [], 216 | [{isolation_level, read_committed}], 217 | message_set, 218 | ?MODULE, 219 | #{ client => Client 220 | , observer_pid => ObserverPid}). 221 | 222 | 223 | %========== group subscriber callbacks 224 | init(GroupId, #{ client := Client 225 | , observer_pid := ObserverPid}) -> 226 | {ok, #{ client => Client 227 | , group_id => GroupId 228 | , observer_pid => ObserverPid}}. 229 | 230 | handle_message(Topic, 231 | Partition, 232 | #kafka_message_set{ topic = Topic 233 | , partition = Partition 234 | , messages = Messages 235 | }, 236 | #{observer_pid := ObserverPid} = State) -> 237 | 238 | lists:foreach(fun(#kafka_message{key = Key}) -> 239 | ObserverPid ! {Topic, Key} 240 | end, Messages), 241 | 242 | {ok, ack, State}. 243 | -------------------------------------------------------------------------------- /test/brod_utils_tests.erl: -------------------------------------------------------------------------------- 1 | %%% 2 | %%% Copyright (c) 2018-2021, Klarna Bank AB (publ) 3 | %%% 4 | %%% Licensed under the Apache License, Version 2.0 (the "License"); 5 | %%% you may not use this file except in compliance with the License. 6 | %%% You may obtain a copy of the License at 7 | %%% 8 | %%% http://www.apache.org/licenses/LICENSE-2.0 9 | %%% 10 | %%% Unless required by applicable law or agreed to in writing, software 11 | %%% distributed under the License is distributed on an "AS IS" BASIS, 12 | %%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | %%% See the License for the specific language governing permissions and 14 | %%% limitations under the License. 15 | %%% 16 | 17 | -module(brod_utils_tests). 18 | 19 | -include_lib("eunit/include/eunit.hrl"). 20 | -include("brod_int.hrl"). 21 | 22 | make_batch_input_test_() -> 23 | [fun() -> ?assertMatch([#{key := <<>>, value := <<>>}], 24 | mk(?undef, ?undef)) end, 25 | fun() -> ?assertMatch([#{key := <<>>, value := <<>>}], mk([], [])) end, 26 | fun() -> ?assertMatch([#{key := <<"foo">>}], mk("foo", [])) end, 27 | fun() -> ?assertMatch([#{value:= <<"foo">>}], mk("bar", "foo")) end, 28 | fun() -> ?assertMatch([#{ts := 1, key := <<>>, value:= <<"foo">>}], 29 | mk("", {1, "foo"})) end, 30 | fun() -> ?assertMatch([#{key := <<"foo">>, value := <<"bar">>}], 31 | mk("ignore", [{"foo", "bar"}])) end, 32 | fun() -> ?assertMatch([#{ts := 1, key := <<"foo">>, value := <<"bar">>}], 33 | mk("ignore", [{1, "foo", "bar"}])) end, 34 | fun() -> ?assertMatch([#{ts := 1, key := <<"k1">>, value := <<"v1">>}, 35 | #{ts := 2, key := <<"k2">>, value := <<"v2">>}, 36 | #{ts := 3, key := <<"k3">>, value := <<"v3">>}], 37 | mk("ignore", [{1, "k1", "v1"}, 38 | {<<>>, [{2, "k2", "v2"}, 39 | {4, <<>>, [{3, "k3", "v3"}]} 40 | ]} 41 | ])) end, 42 | fun() -> ?assertMatch([#{ts := _, key := <<"foo">>, value := <<"bar">>}], 43 | mk("foo", #{value => "bar"})) end, 44 | fun() -> ?assertMatch([#{ts := _, key := <<"key">>, value := <<"bar">>}], 45 | mk("foo", #{key => "key", value => "bar"})) end, 46 | fun() -> ?assertMatch([#{ts := 1, key := <<"key">>, value := <<"v">>}], 47 | mk("foo", #{ts => 1, key => "key", value => "v"})) end, 48 | fun() -> ?assertEqual([#{ts => 1, key => <<"k">>, value => <<"v">>, 49 | headers => [{<<"hk">>, <<"hv">>}]}], 50 | mk("foo", #{ts => 1, key => "k", value => "v", 51 | headers => [{"hk", "hv"}]})) end 52 | ]. 53 | 54 | mk(K, V) -> brod_utils:make_batch_input(K, V). 55 | 56 | %%%_* Emacs ==================================================================== 57 | %%% Local Variables: 58 | %%% allout-layout: t 59 | %%% erlang-indent-level: 2 60 | %%% End: 61 | -------------------------------------------------------------------------------- /test/data/ssl/README.md: -------------------------------------------------------------------------------- 1 | This dir holds files for TLS/SSL tests. 2 | The files are copied from Kafka docker image in the `make test-env` step. 3 | See how the docker image is built here: https://github.com/zmstone/docker-kafka 4 | -------------------------------------------------------------------------------- /test/kafka_test_helper.erl: -------------------------------------------------------------------------------- 1 | -module(kafka_test_helper). 2 | 3 | -export([ init_per_suite/1 4 | , common_init_per_testcase/3 5 | , common_end_per_testcase/2 6 | , produce/2 7 | , produce/3 8 | , produce/4 9 | , payloads/1 10 | , produce_payloads/3 11 | , create_topic/3 12 | , get_acked_offsets/1 13 | , check_committed_offsets/2 14 | , wait_n_messages/3 15 | , wait_n_messages/2 16 | , consumer_config/0 17 | , client_config/0 18 | , bootstrap_hosts/0 19 | , kill_process/2 20 | , kafka_version/0 21 | ]). 22 | 23 | -include("brod_test_macros.hrl"). 24 | -include_lib("snabbkaffe/include/snabbkaffe.hrl"). 25 | 26 | init_per_suite(Config) -> 27 | [ {proper_timeout, 10000} 28 | | Config]. 29 | 30 | common_init_per_testcase(Module, Case, Config) -> 31 | %% Create a client and a producer for putting test data to Kafka. 32 | %% By default name of the test topic is equal to the name of 33 | %% testcase. 34 | {ok, _} = application:ensure_all_started(brod), 35 | ok = brod:start_client(bootstrap_hosts(), ?TEST_CLIENT_ID, client_config()), 36 | Topics = try Module:Case(topics) of 37 | L -> L 38 | catch 39 | _:_ -> [{?topic(Case, 1), 1}] 40 | end, 41 | [prepare_topic(I) || I <- Topics], 42 | Config. 43 | 44 | common_end_per_testcase(_Case, _Config) -> 45 | catch brod:stop_client(?TEST_CLIENT_ID), 46 | ok. 47 | 48 | produce(TopicPartition, Value) -> 49 | produce(TopicPartition, <<>>, Value). 50 | 51 | produce(TopicPartition, Key, Value) -> 52 | produce(TopicPartition, Key, Value, []). 53 | 54 | produce({Topic, Partition}, Key, Value, Headers) -> 55 | ?tp(test_topic_produce, #{ topic => Topic 56 | , partition => Partition 57 | , key => Key 58 | , value => Value 59 | , headers => Headers 60 | }), 61 | {ok, Offset} = brod:produce_sync_offset( ?TEST_CLIENT_ID 62 | , Topic 63 | , Partition 64 | , <<>> 65 | , [#{ value => Value 66 | , key => Key 67 | , headers => Headers 68 | }] 69 | ), 70 | ?log(notice, "Produced at ~p ~p, offset: ~p", [Topic, Partition, Offset]), 71 | Offset. 72 | 73 | payloads(Config) -> 74 | MaxSeqNo = proplists:get_value(max_seqno, Config, 10), 75 | [<> || I <- lists:seq(1, MaxSeqNo)]. 76 | 77 | %% Produce binaries to the topic and return offset of the last message: 78 | produce_payloads(Topic, Partition, Config) -> 79 | Payloads = payloads(Config), 80 | ?log(notice, "Producing payloads to ~p", [{Topic, Partition}]), 81 | L = [produce({Topic, Partition}, I) + 1 || I <- payloads(Config)], 82 | LastOffset = lists:last(L), 83 | {LastOffset, Payloads}. 84 | 85 | client_config() -> 86 | case kafka_version() of 87 | {0, 9} -> [{query_api_versions, false}]; 88 | _ -> [] 89 | end. 90 | 91 | maybe_zookeeper() -> 92 | {Major, _} = kafka_version(), 93 | case Major >= 3 of 94 | true -> 95 | %% Kafka 2.2 started supporting --bootstap-server, but 2.x still supports --zookeeper 96 | %% Starting from 3.0, --zookeeper is no longer supported, must use --bootstrap-server 97 | "--bootstrap-server localhost:9092"; 98 | false -> 99 | "--zookeeper " ++ env("ZOOKEEPER_IP") ++ ":2181" 100 | end. 101 | 102 | kafka_version() -> 103 | VsnStr = env("KAFKA_VERSION"), 104 | [Major, Minor | _] = string:tokens(VsnStr, "."), 105 | {list_to_integer(Major), list_to_integer(Minor)}. 106 | 107 | prepare_topic(Topic) when is_binary(Topic) -> 108 | prepare_topic({Topic, 1}); 109 | prepare_topic({Topic, NumPartitions}) -> 110 | prepare_topic({Topic, NumPartitions, 2}); 111 | prepare_topic({Topic, NumPartitions, NumReplicas}) -> 112 | delete_topic(Topic), 113 | 0 = create_topic(Topic, NumPartitions, NumReplicas), 114 | ok = brod:start_producer(?TEST_CLIENT_ID, Topic, _ProducerConfig = []). 115 | 116 | delete_topic(Name) -> 117 | Delete = "/opt/kafka/bin/kafka-topics.sh " ++ maybe_zookeeper() ++ 118 | " --delete --topic ~s", 119 | exec_in_kafka_container(Delete, [Name]). 120 | 121 | create_topic(Name, NumPartitions, NumReplicas) -> 122 | Create = "/opt/kafka/bin/kafka-topics.sh " ++ maybe_zookeeper() ++ 123 | " --create --partitions ~p --replication-factor ~p" 124 | " --topic ~s --config min.insync.replicas=1", 125 | 0 = exec_in_kafka_container(Create, [NumPartitions, NumReplicas, Name]), 126 | wait_for_topic_by_describe(Name). 127 | 128 | exec_in_kafka_container(FMT, Args) -> 129 | CMD0 = lists:flatten(io_lib:format(FMT, Args)), 130 | CMD = "docker exec kafka-1 bash -c '" ++ CMD0 ++ "'", 131 | Port = open_port({spawn, CMD}, [exit_status, stderr_to_stdout]), 132 | ?log(notice, "Running ~s~nin kafka container", [CMD0]), 133 | collect_port_output(Port, CMD). 134 | 135 | %% Kafka 3.9 in KRaft mode may not see the topic immediately after creation. 136 | wait_for_topic_by_describe(Name) -> 137 | wait_for_topic_by_describe(Name, 0, undefined). 138 | 139 | wait_for_topic_by_describe(_Name, Attempt, Reason) when Attempt >= 10 -> 140 | error({failed_to_create_topic, Reason}); 141 | wait_for_topic_by_describe(Name, Attempt, _Reason) -> 142 | Describe = "/opt/kafka/bin/kafka-topics.sh " ++ maybe_zookeeper() ++ " --describe --topic ~s", 143 | try 144 | 0 = exec_in_kafka_container(Describe, [Name]) 145 | catch 146 | C:E -> 147 | timer:sleep(100), 148 | wait_for_topic_by_describe(Name, Attempt + 1, {C, E}) 149 | end. 150 | 151 | collect_port_output(Port, CMD) -> 152 | receive 153 | {Port, {data, Str}} -> 154 | ?log(notice, "~s", [Str]), 155 | collect_port_output(Port, CMD); 156 | {Port, {exit_status, ExitStatus}} -> 157 | ExitStatus 158 | after 20000 -> 159 | error({port_timeout, CMD}) 160 | end. 161 | 162 | -spec get_acked_offsets(brod:group_id()) -> 163 | #{brod:partition() => brod:offset()}. 164 | get_acked_offsets(GroupId) -> 165 | {ok, [#{partition_responses := Resp}]} = 166 | brod:fetch_committed_offsets(?TEST_CLIENT_ID, GroupId), 167 | Fun = fun(#{partition := P, offset := O}, Acc) -> 168 | Acc #{P => O} 169 | end, 170 | lists:foldl(Fun, #{}, Resp). 171 | 172 | %% Validate offsets committed to Kafka: 173 | check_committed_offsets(GroupId, Offsets) -> 174 | CommittedOffsets = get_acked_offsets(GroupId), 175 | lists:foreach( fun({TopicPartition, Offset}) -> 176 | %% Explanation for + 1: brod's `roundrobin_v2' 177 | %% protocol keeps _first unprocessed_ offset 178 | %% rather than _last processed_. And this is 179 | %% confusing. 180 | ?assertEqual( Offset + 1 181 | , maps:get( TopicPartition, CommittedOffsets 182 | , undefined) 183 | ) 184 | end 185 | , Offsets 186 | ). 187 | 188 | %% Wait until total number of messages processed by a consumer group 189 | %% becomes equal to the expected value 190 | wait_n_messages(TestGroupId, Expected, NRetries) -> 191 | ?retry(1000, NRetries, 192 | begin 193 | Offsets = get_acked_offsets(TestGroupId), 194 | NMessages = lists:sum(maps:values(Offsets)), 195 | ?log( notice 196 | , "Number of messages processed by consumer group: ~p; " 197 | "total: ~p/~p" 198 | , [Offsets, NMessages, Expected] 199 | ), 200 | ?assert(NMessages >= Expected) 201 | end). 202 | 203 | wait_n_messages(TestGroupId, NMessages) -> 204 | wait_n_messages(TestGroupId, NMessages, 30). 205 | 206 | consumer_config() -> 207 | %% Makes brod restart faster, this will hopefully shave off some 208 | %% time from failure suites: 209 | [ {max_wait_time, 500} 210 | , {sleep_timeout, 100} 211 | , {begin_offset, 0} 212 | ]. 213 | 214 | bootstrap_hosts() -> 215 | [ {"localhost", 9092} 216 | ]. 217 | 218 | kill_process(Pid, Signal) -> 219 | Mon = monitor(process, Pid), 220 | ?tp(kill_consumer, #{pid => Pid}), 221 | exit(Pid, Signal), 222 | receive 223 | {'DOWN', Mon, process, Pid, _Reason} -> 224 | ok 225 | after 1000 -> 226 | ct:fail("timed out waiting for the process to die") 227 | end. 228 | 229 | env(Var) -> 230 | case os:getenv(Var) of 231 | [_|_] = Val-> Val; 232 | _ -> error({env_var_missing, Var}) 233 | end. 234 | --------------------------------------------------------------------------------