├── .circleci └── config.yml ├── .gitattributes ├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .golangci.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── addoffsetstotxn.go ├── addoffsetstotxn_test.go ├── addpartitionstotxn.go ├── addpartitionstotxn_test.go ├── address.go ├── address_test.go ├── alterclientquotas.go ├── alterclientquotas_test.go ├── alterconfigs.go ├── alterconfigs_test.go ├── alterpartitionreassignments.go ├── alterpartitionreassignments_test.go ├── alteruserscramcredentials.go ├── alteruserscramcredentials_test.go ├── apiversions.go ├── apiversions_test.go ├── balancer.go ├── balancer_test.go ├── batch.go ├── batch_test.go ├── buffer.go ├── builder_test.go ├── client.go ├── client_test.go ├── commit.go ├── commit_test.go ├── compress ├── compress.go ├── compress_test.go ├── gzip │ └── gzip.go ├── lz4 │ └── lz4.go ├── snappy │ ├── go-xerial-snappy │ │ ├── LICENSE │ │ ├── README.md │ │ ├── corpus │ │ │ ├── 1 │ │ │ ├── 020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2 │ │ │ ├── 05979b224be0294bf350310d4ba5257c9bb815db-3 │ │ │ ├── 0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 │ │ │ ├── 361a1c6d2a8f80780826c3d83ad391d0475c922f-4 │ │ │ ├── 4117af68228fa64339d362cf980c68ffadff96c8-12 │ │ │ ├── 4142249be82c8a617cf838eef05394ece39becd3-9 │ │ │ ├── 41ea8c7d904f1cd913b52e9ead4a96c639d76802-10 │ │ │ ├── 44083e1447694980c0ee682576e32358c9ee883f-2 │ │ │ ├── 4d6b359bd538feaa7d36c89235d07d0a443797ac-1 │ │ │ ├── 521e7e67b6063a75e0eeb24b0d1dd20731d34ad8-4 │ │ │ ├── 526e6f85d1b8777f0d9f70634c9f8b77fbdccdff-7 │ │ │ ├── 581b8fe7088f921567811fdf30e1f527c9f48e5e │ │ │ ├── 60cd10738158020f5843b43960158c3d116b3a71-11 │ │ │ ├── 652b031b4b9d601235f86ef62523e63d733b8623-3 │ │ │ ├── 684a011f6fdfc7ae9863e12381165e82d2a2e356-9 │ │ │ ├── 72e42fc8e5eaed6a8a077f420fc3bd1f9a7c0919-1 │ │ │ ├── 80881d1b911b95e0203b3b0e7dc6360c35f7620f-7 │ │ │ ├── 8484b3082d522e0a1f315db1fa1b2a5118be7cc3-8 │ │ │ ├── 9635bb09260f100bc4a2ee4e3b980fecc5b874ce-1 │ │ │ ├── 99d36b0b5b1be7151a508dd440ec725a2576c41c-1 │ │ │ ├── 9d339eddb4e2714ea319c3fb571311cb95fdb067-6 │ │ │ ├── b2419fcb7a9aef359de67cb6bd2b8a8c1f5c100f-4 │ │ │ ├── c1951b29109ec1017f63535ce3699630f46f54e1-5 │ │ │ ├── cb806bc4f67316af02d6ae677332a3b6005a18da-5 │ │ │ ├── cd7dd228703739e9252c7ea76f1c5f82ab44686a-10 │ │ │ ├── ce3671e91907349cea04fc3f2a4b91c65b99461d-3 │ │ │ ├── ce3c6f4c31f74d72fbf74c17d14a8d29aa62059e-6 │ │ │ ├── da39a3ee5e6b4b0d3255bfef95601890afd80709-1 │ │ │ ├── e2230aa0ecaebb9b890440effa13f501a89247b2-1 │ │ │ ├── efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 │ │ │ ├── f0445ac39e03978bbc8011316ac8468015ddb72c-1 │ │ │ └── f241da53c6bc1fe3368c55bf28db86ce15a2c784-2 │ │ ├── fuzz.go │ │ ├── snappy.go │ │ └── snappy_test.go │ ├── snappy.go │ ├── xerial.go │ └── xerial_test.go └── zstd │ └── zstd.go ├── compression.go ├── conn.go ├── conn_test.go ├── consumergroup.go ├── consumergroup_test.go ├── crc32.go ├── crc32_test.go ├── createacls.go ├── createacls_test.go ├── createpartitions.go ├── createpartitions_test.go ├── createtopics.go ├── createtopics_test.go ├── deleteacls.go ├── deleteacls_test.go ├── deletegroups.go ├── deletegroups_test.go ├── deletetopics.go ├── deletetopics_test.go ├── describeacls.go ├── describeacls_test.go ├── describeclientquotas.go ├── describeconfigs.go ├── describeconfigs_test.go ├── describegroups.go ├── describegroups_test.go ├── describeuserscramcredentials.go ├── describeuserscramcredentials_test.go ├── dialer.go ├── dialer_test.go ├── discard.go ├── discard_test.go ├── docker-compose.yml ├── docker_compose_versions ├── README.md ├── docker-compose-010.yml ├── docker-compose-270.yml ├── docker-compose-370.yml └── docker-compose-400.yml ├── electleaders.go ├── electleaders_test.go ├── endtxn.go ├── error.go ├── error_test.go ├── example_consumergroup_test.go ├── example_groupbalancer_test.go ├── example_writer_test.go ├── examples ├── .gitignore ├── consumer-logger │ ├── Dockerfile │ ├── go.mod │ ├── go.sum │ └── main.go ├── consumer-mongo-db │ ├── Dockerfile │ ├── go.mod │ ├── go.sum │ └── main.go ├── docker-compose.yaml ├── kafka │ └── kafka-variables.env ├── producer-api │ ├── Dockerfile │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── test.http └── producer-random │ ├── Dockerfile │ ├── go.mod │ ├── go.sum │ └── main.go ├── fetch.go ├── fetch_test.go ├── findcoordinator.go ├── findcoordinator_test.go ├── fixtures ├── v1-v1.hex ├── v1-v1.pcapng ├── v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex ├── v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.pcapng ├── v1c-v1-v1c.hex ├── v1c-v1-v1c.pcapng ├── v1c-v1c.hex ├── v1c-v1c.pcapng ├── v2-v2.hex ├── v2-v2.pcapng ├── v2b-v1.hex ├── v2b-v1.pcapng ├── v2bc-v1-v1c.hex ├── v2bc-v1-v1c.pcapng ├── v2bc-v1.hex ├── v2bc-v1.pcapng ├── v2bc-v1c.hex ├── v2bc-v1c.pcapng ├── v2c-v2-v2c.hex ├── v2c-v2-v2c.pcapng ├── v2c-v2c.hex └── v2c-v2c.pcapng ├── go.mod ├── go.sum ├── groupbalancer.go ├── groupbalancer_test.go ├── gzip └── gzip.go ├── heartbeat.go ├── heartbeat_test.go ├── incrementalalterconfigs.go ├── incrementalalterconfigs_test.go ├── initproducerid.go ├── initproducerid_test.go ├── joingroup.go ├── joingroup_test.go ├── kafka.go ├── kafka_test.go ├── leavegroup.go ├── leavegroup_test.go ├── listgroups.go ├── listgroups_test.go ├── listoffset.go ├── listoffset_test.go ├── listpartitionreassignments.go ├── listpartitionreassignments_test.go ├── logger.go ├── lz4 └── lz4.go ├── message.go ├── message_reader.go ├── message_test.go ├── metadata.go ├── metadata_test.go ├── offsetcommit.go ├── offsetcommit_test.go ├── offsetdelete.go ├── offsetdelete_test.go ├── offsetfetch.go ├── offsetfetch_test.go ├── produce.go ├── produce_test.go ├── protocol.go ├── protocol ├── addoffsetstotxn │ ├── addoffsetstotxn.go │ └── addoffsetstotxn_test.go ├── addpartitionstotxn │ ├── addpartitionstotxn.go │ └── addpartitionstotxn_test.go ├── alterclientquotas │ ├── alterclientquotas.go │ └── alterclientquotas_test.go ├── alterconfigs │ ├── alterconfigs.go │ └── alterconfigs_test.go ├── alterpartitionreassignments │ ├── alterpartitionreassignments.go │ └── alterpartitionreassignments_test.go ├── alteruserscramcredentials │ ├── alteruserscramcredentials.go │ └── alteruserscramcredentials_test.go ├── apiversions │ ├── apiversions.go │ └── apiversions_test.go ├── buffer.go ├── buffer_test.go ├── cluster.go ├── conn.go ├── consumer │ ├── consumer.go │ └── consumer_test.go ├── createacls │ ├── createacls.go │ └── createacls_test.go ├── createpartitions │ ├── createpartitions.go │ └── createpartitions_test.go ├── createtopics │ └── createtopics.go ├── decode.go ├── deleteacls │ ├── deleteacls.go │ └── deleteacls_test.go ├── deletegroups │ ├── deletegroups.go │ └── deletegroups_test.go ├── deletetopics │ ├── deletetopics.go │ └── deletetopics_test.go ├── describeacls │ ├── describeacls.go │ └── describeacls_test.go ├── describeclientquotas │ ├── describeclientquotas.go │ └── describeclientquotas_test.go ├── describeconfigs │ ├── describeconfigs.go │ └── describeconfigs_test.go ├── describegroups │ └── describegroups.go ├── describeuserscramcredentials │ ├── describeuserscramcredentials.go │ └── describeuserscramcredentials_test.go ├── electleaders │ ├── electleaders.go │ └── electleaders_test.go ├── encode.go ├── endtxn │ ├── endtxn.go │ └── endtxn_test.go ├── error.go ├── fetch │ ├── fetch.go │ └── fetch_test.go ├── findcoordinator │ └── findcoordinator.go ├── heartbeat │ ├── heartbeat.go │ └── heartbeat_test.go ├── incrementalalterconfigs │ ├── incrementalalterconfigs.go │ └── incrementalalterconfigs_test.go ├── initproducerid │ ├── initproducerid.go │ └── initproducerid_test.go ├── joingroup │ ├── joingroup.go │ └── joingroup_test.go ├── leavegroup │ ├── leavegroup.go │ └── leavegroup_test.go ├── listgroups │ └── listgroups.go ├── listoffsets │ ├── listoffsets.go │ └── listoffsets_test.go ├── listpartitionreassignments │ ├── listpartitionreassignments.go │ └── listpartitionreassignments_test.go ├── metadata │ ├── metadata.go │ └── metadata_test.go ├── offsetcommit │ ├── offsetcommit.go │ └── offsetcommit_test.go ├── offsetdelete │ ├── offsetdelete.go │ └── offsetdelete_test.go ├── offsetfetch │ └── offsetfetch.go ├── produce │ ├── produce.go │ └── produce_test.go ├── protocol.go ├── protocol_test.go ├── prototest │ ├── bytes.go │ ├── prototest.go │ ├── reflect.go │ ├── request.go │ └── response.go ├── rawproduce │ ├── rawproduce.go │ └── rawproduce_test.go ├── record.go ├── record_batch.go ├── record_batch_test.go ├── record_v1.go ├── record_v2.go ├── reflect.go ├── reflect_unsafe.go ├── request.go ├── response.go ├── response_test.go ├── roundtrip.go ├── saslauthenticate │ └── saslauthenticate.go ├── saslhandshake │ └── saslhandshake.go ├── size.go ├── syncgroup │ ├── syncgroup.go │ └── syncgroup_test.go └── txnoffsetcommit │ ├── txnoffsetcommit.go │ └── txnoffsetcommit_test.go ├── protocol_test.go ├── rawproduce.go ├── rawproduce_test.go ├── read.go ├── read_test.go ├── reader.go ├── reader_test.go ├── record.go ├── recordbatch.go ├── resolver.go ├── resource.go ├── resource_test.go ├── sasl ├── aws_msk_iam │ ├── go.mod │ ├── go.sum │ ├── msk_iam.go │ └── msk_iam_test.go ├── aws_msk_iam_v2 │ ├── README.md │ ├── example_test.go │ ├── go.mod │ ├── go.sum │ ├── msk_iam.go │ └── msk_iam_test.go ├── plain │ └── plain.go ├── sasl.go ├── sasl_test.go └── scram │ └── scram.go ├── saslauthenticate.go ├── saslauthenticate_test.go ├── saslhandshake.go ├── saslhandshake_test.go ├── scripts └── wait-for-kafka.sh ├── sizeof.go ├── snappy └── snappy.go ├── stats.go ├── syncgroup.go ├── syncgroup_test.go ├── testing ├── conn.go ├── version.go └── version_test.go ├── time.go ├── topics ├── list_topics.go └── list_topics_test.go ├── transport.go ├── transport_test.go ├── txnoffsetcommit.go ├── txnoffsetcommit_test.go ├── write.go ├── write_test.go ├── writer.go ├── writer_test.go └── zstd └── zstd.go /.gitattributes: -------------------------------------------------------------------------------- 1 | fixtures/*.hex binary 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | > A clear and concise description of what the bug is. 13 | 14 | **Kafka Version** 15 | 16 | > * What version(s) of Kafka are you testing against? 17 | > * What version of kafka-go are you using? 18 | 19 | **To Reproduce** 20 | 21 | > Resources to reproduce the behavior: 22 | 23 | ```yaml 24 | --- 25 | # docker-compose.yaml 26 | # 27 | # Adding a docker-compose file will help the maintainers setup the environment 28 | # to reproduce the issue. 29 | # 30 | # If one the docker-compose files available in the repository may be used, 31 | # mentioning it is also a useful alternative. 32 | ... 33 | ``` 34 | 35 | ```go 36 | package main 37 | 38 | import ( 39 | "github.com/segmentio/kafka-go" 40 | ) 41 | 42 | func main() { 43 | // Adding a fully reproducible example will help maintainers provide 44 | // assistance to debug the issues. 45 | ... 46 | } 47 | ``` 48 | 49 | **Expected Behavior** 50 | 51 | > A clear and concise description of what you expected to happen. 52 | 53 | **Observed Behavior** 54 | 55 | > A clear and concise description of the behavior you observed. 56 | 57 | ``` 58 | Often times, pasting the logging output from a kafka.Reader or kafka.Writer will 59 | provide useful details to help maintainers investigate the issue and provide a 60 | fix. If possible, providing stack traces or CPU/memory profiles may also contain 61 | valuable information to understand the conditions that triggered the issue. 62 | ``` 63 | 64 | **Additional Context** 65 | 66 | > Add any other context about the problem here. 67 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the solution you would like** 11 | 12 | > A clear and concise description of what you want to happen. 13 | 14 | **Supporting documentation** 15 | 16 | > Please provides links to relevant Kafka protocol docs and/or KIPs. 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | /kafkacli 26 | 27 | # Emacs 28 | *~ 29 | 30 | # VIM 31 | *.swp 32 | 33 | # Goland 34 | .idea 35 | 36 | #IntelliJ 37 | *.iml 38 | 39 | # govendor 40 | /vendor/*/ 41 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | enable: 3 | - bodyclose 4 | - errorlint 5 | - goconst 6 | - godot 7 | - gofmt 8 | - goimports 9 | - prealloc 10 | 11 | disable: 12 | # Temporarily disabling so it can be addressed in a dedicated PR. 13 | - errcheck 14 | - goerr113 15 | 16 | linters-settings: 17 | goconst: 18 | ignore-tests: true 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Segment 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | KAFKA_SKIP_NETTEST=1 \ 3 | KAFKA_VERSION=2.3.1 \ 4 | go test -race -cover ./... 5 | 6 | docker: 7 | docker compose up -d 8 | -------------------------------------------------------------------------------- /addoffsetstotxn.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "time" 8 | 9 | "github.com/segmentio/kafka-go/protocol/addoffsetstotxn" 10 | ) 11 | 12 | // AddOffsetsToTxnRequest is the request structure for the AddOffsetsToTxn function. 13 | type AddOffsetsToTxnRequest struct { 14 | // Address of the kafka broker to send the request to. 15 | Addr net.Addr 16 | 17 | // The transactional id key 18 | TransactionalID string 19 | 20 | // The Producer ID (PID) for the current producer session; 21 | // received from an InitProducerID request. 22 | ProducerID int 23 | 24 | // The epoch associated with the current producer session for the given PID 25 | ProducerEpoch int 26 | 27 | // The unique group identifier. 28 | GroupID string 29 | } 30 | 31 | // AddOffsetsToTxnResponse is the response structure for the AddOffsetsToTxn function. 32 | type AddOffsetsToTxnResponse struct { 33 | // The amount of time that the broker throttled the request. 34 | Throttle time.Duration 35 | 36 | // An error that may have occurred when attempting to add the offsets 37 | // to a transaction. 38 | // 39 | // The errors contain the kafka error code. Programs may use the standard 40 | // errors.Is function to test the error against kafka error codes. 41 | Error error 42 | } 43 | 44 | // AddOffsetsToTnx sends an add offsets to txn request to a kafka broker and returns the response. 45 | func (c *Client) AddOffsetsToTxn( 46 | ctx context.Context, 47 | req *AddOffsetsToTxnRequest, 48 | ) (*AddOffsetsToTxnResponse, error) { 49 | m, err := c.roundTrip(ctx, req.Addr, &addoffsetstotxn.Request{ 50 | TransactionalID: req.TransactionalID, 51 | ProducerID: int64(req.ProducerID), 52 | ProducerEpoch: int16(req.ProducerEpoch), 53 | GroupID: req.GroupID, 54 | }) 55 | if err != nil { 56 | return nil, fmt.Errorf("kafka.(*Client).AddOffsetsToTxn: %w", err) 57 | } 58 | 59 | r := m.(*addoffsetstotxn.Response) 60 | 61 | res := &AddOffsetsToTxnResponse{ 62 | Throttle: makeDuration(r.ThrottleTimeMs), 63 | Error: makeError(r.ErrorCode, ""), 64 | } 65 | 66 | return res, nil 67 | } 68 | -------------------------------------------------------------------------------- /address.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "net" 5 | "strings" 6 | ) 7 | 8 | // TCP constructs an address with the network set to "tcp". 9 | func TCP(address ...string) net.Addr { return makeNetAddr("tcp", address) } 10 | 11 | func makeNetAddr(network string, addresses []string) net.Addr { 12 | switch len(addresses) { 13 | case 0: 14 | return nil // maybe panic instead? 15 | case 1: 16 | return makeAddr(network, addresses[0]) 17 | default: 18 | return makeMultiAddr(network, addresses) 19 | } 20 | } 21 | 22 | func makeAddr(network, address string) net.Addr { 23 | return &networkAddress{ 24 | network: network, 25 | address: canonicalAddress(address), 26 | } 27 | } 28 | 29 | func makeMultiAddr(network string, addresses []string) net.Addr { 30 | multi := make(multiAddr, len(addresses)) 31 | for i, address := range addresses { 32 | multi[i] = makeAddr(network, address) 33 | } 34 | return multi 35 | } 36 | 37 | type networkAddress struct { 38 | network string 39 | address string 40 | } 41 | 42 | func (a *networkAddress) Network() string { return a.network } 43 | 44 | func (a *networkAddress) String() string { return a.address } 45 | 46 | type multiAddr []net.Addr 47 | 48 | func (m multiAddr) Network() string { return m.join(net.Addr.Network) } 49 | 50 | func (m multiAddr) String() string { return m.join(net.Addr.String) } 51 | 52 | func (m multiAddr) join(f func(net.Addr) string) string { 53 | switch len(m) { 54 | case 0: 55 | return "" 56 | case 1: 57 | return f(m[0]) 58 | } 59 | s := make([]string, len(m)) 60 | for i, a := range m { 61 | s[i] = f(a) 62 | } 63 | return strings.Join(s, ",") 64 | } 65 | -------------------------------------------------------------------------------- /address_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | ) 7 | 8 | func TestNetworkAddress(t *testing.T) { 9 | tests := []struct { 10 | addr net.Addr 11 | network string 12 | address string 13 | }{ 14 | { 15 | addr: TCP("127.0.0.1"), 16 | network: "tcp", 17 | address: "127.0.0.1:9092", 18 | }, 19 | 20 | { 21 | addr: TCP("::1"), 22 | network: "tcp", 23 | address: "[::1]:9092", 24 | }, 25 | 26 | { 27 | addr: TCP("localhost"), 28 | network: "tcp", 29 | address: "localhost:9092", 30 | }, 31 | 32 | { 33 | addr: TCP("localhost:9092"), 34 | network: "tcp", 35 | address: "localhost:9092", 36 | }, 37 | 38 | { 39 | addr: TCP("localhost", "localhost:9093", "localhost:9094"), 40 | network: "tcp,tcp,tcp", 41 | address: "localhost:9092,localhost:9093,localhost:9094", 42 | }, 43 | } 44 | 45 | for _, test := range tests { 46 | t.Run(test.network+"+"+test.address, func(t *testing.T) { 47 | if s := test.addr.Network(); s != test.network { 48 | t.Errorf("network mismatch: want %q but got %q", test.network, s) 49 | } 50 | if s := test.addr.String(); s != test.address { 51 | t.Errorf("network mismatch: want %q but got %q", test.address, s) 52 | } 53 | }) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /alterconfigs_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestClientAlterConfigs(t *testing.T) { 12 | if !ktesting.KafkaIsAtLeast("0.11.0") { 13 | return 14 | } 15 | 16 | const ( 17 | MaxMessageBytes = "max.message.bytes" 18 | MaxMessageBytesValue = "200000" 19 | ) 20 | 21 | client, shutdown := newLocalClient() 22 | defer shutdown() 23 | 24 | topic := makeTopic() 25 | createTopic(t, topic, 1) 26 | defer deleteTopic(t, topic) 27 | 28 | _, err := client.AlterConfigs(context.Background(), &AlterConfigsRequest{ 29 | Resources: []AlterConfigRequestResource{{ 30 | ResourceType: ResourceTypeTopic, 31 | ResourceName: topic, 32 | Configs: []AlterConfigRequestConfig{{ 33 | Name: MaxMessageBytes, 34 | Value: MaxMessageBytesValue, 35 | }, 36 | }, 37 | }}, 38 | }) 39 | 40 | if err != nil { 41 | t.Fatal(err) 42 | } 43 | 44 | describeResp, err := client.DescribeConfigs(context.Background(), &DescribeConfigsRequest{ 45 | Resources: []DescribeConfigRequestResource{{ 46 | ResourceType: ResourceTypeTopic, 47 | ResourceName: topic, 48 | ConfigNames: []string{MaxMessageBytes}, 49 | }}, 50 | }) 51 | 52 | if err != nil { 53 | t.Fatal(err) 54 | } 55 | 56 | maxMessageBytesValue := "0" 57 | for _, resource := range describeResp.Resources { 58 | if resource.ResourceType == int8(ResourceTypeTopic) && resource.ResourceName == topic { 59 | for _, entry := range resource.ConfigEntries { 60 | if entry.ConfigName == MaxMessageBytes { 61 | maxMessageBytesValue = entry.ConfigValue 62 | } 63 | } 64 | } 65 | } 66 | assert.Equal(t, maxMessageBytesValue, MaxMessageBytesValue) 67 | } 68 | -------------------------------------------------------------------------------- /alteruserscramcredentials_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | ) 9 | 10 | func TestAlterUserScramCredentials(t *testing.T) { 11 | // https://issues.apache.org/jira/browse/KAFKA-10259 12 | if !ktesting.KafkaIsAtLeast("2.7.0") { 13 | return 14 | } 15 | 16 | client, shutdown := newLocalClient() 17 | defer shutdown() 18 | 19 | name := makeTopic() 20 | 21 | createRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{ 22 | Upsertions: []UserScramCredentialsUpsertion{ 23 | { 24 | Name: name, 25 | Mechanism: ScramMechanismSha512, 26 | Iterations: 15000, 27 | Salt: []byte("my-salt"), 28 | SaltedPassword: []byte("my-salted-password"), 29 | }, 30 | }, 31 | }) 32 | 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | 37 | if len(createRes.Results) != 1 { 38 | t.Fatalf("expected 1 createResult; got %d", len(createRes.Results)) 39 | } 40 | 41 | if createRes.Results[0].User != name { 42 | t.Fatalf("expected createResult with user: %s, got %s", name, createRes.Results[0].User) 43 | } 44 | 45 | if createRes.Results[0].Error != nil { 46 | t.Fatalf("didn't expect an error in createResult, got %v", createRes.Results[0].Error) 47 | } 48 | 49 | deleteRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{ 50 | Deletions: []UserScramCredentialsDeletion{ 51 | { 52 | Name: name, 53 | Mechanism: ScramMechanismSha512, 54 | }, 55 | }, 56 | }) 57 | 58 | if err != nil { 59 | t.Fatal(err) 60 | } 61 | 62 | if len(deleteRes.Results) != 1 { 63 | t.Fatalf("expected 1 deleteResult; got %d", len(deleteRes.Results)) 64 | } 65 | 66 | if deleteRes.Results[0].User != name { 67 | t.Fatalf("expected deleteResult with user: %s, got %s", name, deleteRes.Results[0].User) 68 | } 69 | 70 | if deleteRes.Results[0].Error != nil { 71 | t.Fatalf("didn't expect an error in deleteResult, got %v", deleteRes.Results[0].Error) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /apiversions.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "net" 6 | 7 | "github.com/segmentio/kafka-go/protocol" 8 | "github.com/segmentio/kafka-go/protocol/apiversions" 9 | ) 10 | 11 | // ApiVersionsRequest is a request to the ApiVersions API. 12 | type ApiVersionsRequest struct { 13 | // Address of the kafka broker to send the request to. 14 | Addr net.Addr 15 | } 16 | 17 | // ApiVersionsResponse is a response from the ApiVersions API. 18 | type ApiVersionsResponse struct { 19 | // Error is set to a non-nil value if an error was encountered. 20 | Error error 21 | 22 | // ApiKeys contains the specific details of each supported API. 23 | ApiKeys []ApiVersionsResponseApiKey 24 | } 25 | 26 | // ApiVersionsResponseApiKey includes the details of which versions are supported for a single API. 27 | type ApiVersionsResponseApiKey struct { 28 | // ApiKey is the ID of the API. 29 | ApiKey int 30 | 31 | // ApiName is a human-friendly description of the API. 32 | ApiName string 33 | 34 | // MinVersion is the minimum API version supported by the broker. 35 | MinVersion int 36 | 37 | // MaxVersion is the maximum API version supported by the broker. 38 | MaxVersion int 39 | } 40 | 41 | func (c *Client) ApiVersions( 42 | ctx context.Context, 43 | req *ApiVersionsRequest, 44 | ) (*ApiVersionsResponse, error) { 45 | apiReq := &apiversions.Request{} 46 | protoResp, err := c.roundTrip( 47 | ctx, 48 | req.Addr, 49 | apiReq, 50 | ) 51 | if err != nil { 52 | return nil, err 53 | } 54 | apiResp := protoResp.(*apiversions.Response) 55 | 56 | resp := &ApiVersionsResponse{ 57 | Error: makeError(apiResp.ErrorCode, ""), 58 | } 59 | for _, apiKey := range apiResp.ApiKeys { 60 | resp.ApiKeys = append( 61 | resp.ApiKeys, 62 | ApiVersionsResponseApiKey{ 63 | ApiKey: int(apiKey.ApiKey), 64 | ApiName: protocol.ApiKey(apiKey.ApiKey).String(), 65 | MinVersion: int(apiKey.MinVersion), 66 | MaxVersion: int(apiKey.MaxVersion), 67 | }, 68 | ) 69 | } 70 | 71 | return resp, err 72 | } 73 | -------------------------------------------------------------------------------- /apiversions_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | ) 7 | 8 | func TestClientApiVersions(t *testing.T) { 9 | ctx := context.Background() 10 | 11 | client, shutdown := newLocalClient() 12 | defer shutdown() 13 | 14 | resp, err := client.ApiVersions(ctx, &ApiVersionsRequest{}) 15 | if err != nil { 16 | t.Fatal(err) 17 | } 18 | if resp.Error != nil { 19 | t.Error( 20 | "Unexpected error in response", 21 | "expected", nil, 22 | "got", resp.Error, 23 | ) 24 | } 25 | 26 | if len(resp.ApiKeys) == 0 { 27 | t.Error( 28 | "Unexpected apiKeys length", 29 | "expected greater than", 0, 30 | "got", 0, 31 | ) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /batch_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net" 8 | "strconv" 9 | "testing" 10 | ) 11 | 12 | func TestBatchDontExpectEOF(t *testing.T) { 13 | topic := makeTopic() 14 | 15 | broker, err := (&Dialer{ 16 | Resolver: &net.Resolver{}, 17 | }).LookupLeader(context.Background(), "tcp", "localhost:9092", topic, 0) 18 | if err != nil { 19 | t.Fatal("failed to open a new kafka connection:", err) 20 | } 21 | 22 | nc, err := net.Dial("tcp", net.JoinHostPort(broker.Host, strconv.Itoa(broker.Port))) 23 | if err != nil { 24 | t.Fatalf("cannot connect to partition leader at %s:%d: %s", broker.Host, broker.Port, err) 25 | } 26 | 27 | conn := NewConn(nc, topic, 0) 28 | defer conn.Close() 29 | 30 | nc.(*net.TCPConn).CloseRead() 31 | 32 | batch := conn.ReadBatch(1024, 8192) 33 | 34 | if _, err := batch.ReadMessage(); !errors.Is(err, io.ErrUnexpectedEOF) { 35 | t.Error("bad error when reading message:", err) 36 | } 37 | 38 | if err := batch.Close(); !errors.Is(err, io.ErrUnexpectedEOF) { 39 | t.Error("bad error when closing the batch:", err) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /buffer.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bytes" 5 | "sync" 6 | ) 7 | 8 | var bufferPool = sync.Pool{ 9 | New: func() interface{} { return newBuffer() }, 10 | } 11 | 12 | func newBuffer() *bytes.Buffer { 13 | b := new(bytes.Buffer) 14 | b.Grow(65536) 15 | return b 16 | } 17 | 18 | func acquireBuffer() *bytes.Buffer { 19 | return bufferPool.Get().(*bytes.Buffer) 20 | } 21 | 22 | func releaseBuffer(b *bytes.Buffer) { 23 | if b != nil { 24 | b.Reset() 25 | bufferPool.Put(b) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /commit.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | // A commit represents the instruction of publishing an update of the last 4 | // offset read by a program for a topic and partition. 5 | type commit struct { 6 | topic string 7 | partition int 8 | offset int64 9 | } 10 | 11 | // makeCommit builds a commit value from a message, the resulting commit takes 12 | // its topic, partition, and offset from the message. 13 | func makeCommit(msg Message) commit { 14 | return commit{ 15 | topic: msg.Topic, 16 | partition: msg.Partition, 17 | offset: msg.Offset + 1, 18 | } 19 | } 20 | 21 | // makeCommits generates a slice of commits from a list of messages, it extracts 22 | // the topic, partition, and offset of each message and builds the corresponding 23 | // commit slice. 24 | func makeCommits(msgs ...Message) []commit { 25 | commits := make([]commit, len(msgs)) 26 | 27 | for i, m := range msgs { 28 | commits[i] = makeCommit(m) 29 | } 30 | 31 | return commits 32 | } 33 | 34 | // commitRequest is the data type exchanged between the CommitMessages method 35 | // and internals of the reader's implementation. 36 | type commitRequest struct { 37 | commits []commit 38 | errch chan<- error 39 | } 40 | -------------------------------------------------------------------------------- /commit_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import "testing" 4 | 5 | func TestMakeCommit(t *testing.T) { 6 | msg := Message{ 7 | Topic: "blah", 8 | Partition: 1, 9 | Offset: 2, 10 | } 11 | 12 | commit := makeCommit(msg) 13 | if commit.topic != msg.Topic { 14 | t.Errorf("bad topic: expected %v; got %v", msg.Topic, commit.topic) 15 | } 16 | if commit.partition != msg.Partition { 17 | t.Errorf("bad partition: expected %v; got %v", msg.Partition, commit.partition) 18 | } 19 | if commit.offset != msg.Offset+1 { 20 | t.Errorf("expected committed offset to be 1 greater than msg offset") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /compress/lz4/lz4.go: -------------------------------------------------------------------------------- 1 | package lz4 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | 7 | "github.com/pierrec/lz4/v4" 8 | ) 9 | 10 | var ( 11 | readerPool sync.Pool 12 | writerPool sync.Pool 13 | ) 14 | 15 | // Codec is the implementation of a compress.Codec which supports creating 16 | // readers and writers for kafka messages compressed with lz4. 17 | type Codec struct{} 18 | 19 | // Code implements the compress.Codec interface. 20 | func (c *Codec) Code() int8 { return 3 } 21 | 22 | // Name implements the compress.Codec interface. 23 | func (c *Codec) Name() string { return "lz4" } 24 | 25 | // NewReader implements the compress.Codec interface. 26 | func (c *Codec) NewReader(r io.Reader) io.ReadCloser { 27 | z, _ := readerPool.Get().(*lz4.Reader) 28 | if z != nil { 29 | z.Reset(r) 30 | } else { 31 | z = lz4.NewReader(r) 32 | } 33 | return &reader{Reader: z} 34 | } 35 | 36 | // NewWriter implements the compress.Codec interface. 37 | func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { 38 | z, _ := writerPool.Get().(*lz4.Writer) 39 | if z != nil { 40 | z.Reset(w) 41 | } else { 42 | z = lz4.NewWriter(w) 43 | } 44 | return &writer{Writer: z} 45 | } 46 | 47 | type reader struct{ *lz4.Reader } 48 | 49 | func (r *reader) Close() (err error) { 50 | if z := r.Reader; z != nil { 51 | r.Reader = nil 52 | z.Reset(nil) 53 | readerPool.Put(z) 54 | } 55 | return 56 | } 57 | 58 | type writer struct{ *lz4.Writer } 59 | 60 | func (w *writer) Close() (err error) { 61 | if z := w.Writer; z != nil { 62 | w.Writer = nil 63 | err = z.Close() 64 | z.Reset(nil) 65 | writerPool.Put(z) 66 | } 67 | return 68 | } 69 | -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Evan Huus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/README.md: -------------------------------------------------------------------------------- 1 | # go-xerial-snappy 2 | 3 | [![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) 4 | 5 | Xerial-compatible Snappy framing support for golang. 6 | 7 | Packages using Xerial for snappy encoding use a framing format incompatible with 8 | basically everything else in existence. This package wraps Go's built-in snappy 9 | package to support it. 10 | 11 | Apps that use this format include Apache Kafka (see 12 | https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for 13 | details). 14 | -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2: -------------------------------------------------------------------------------- 1 | ��Y -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/361a1c6d2a8f80780826c3d83ad391d0475c922f-4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/361a1c6d2a8f80780826c3d83ad391d0475c922f-4 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/4117af68228fa64339d362cf980c68ffadff96c8-12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/4117af68228fa64339d362cf980c68ffadff96c8-12 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/4142249be82c8a617cf838eef05394ece39becd3-9: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/4142249be82c8a617cf838eef05394ece39becd3-9 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/41ea8c7d904f1cd913b52e9ead4a96c639d76802-10: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/41ea8c7d904f1cd913b52e9ead4a96c639d76802-10 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/44083e1447694980c0ee682576e32358c9ee883f-2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/44083e1447694980c0ee682576e32358c9ee883f-2 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/4d6b359bd538feaa7d36c89235d07d0a443797ac-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/4d6b359bd538feaa7d36c89235d07d0a443797ac-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/521e7e67b6063a75e0eeb24b0d1dd20731d34ad8-4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/521e7e67b6063a75e0eeb24b0d1dd20731d34ad8-4 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/526e6f85d1b8777f0d9f70634c9f8b77fbdccdff-7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/526e6f85d1b8777f0d9f70634c9f8b77fbdccdff-7 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/581b8fe7088f921567811fdf30e1f527c9f48e5e: -------------------------------------------------------------------------------- 1 | package -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/60cd10738158020f5843b43960158c3d116b3a71-11: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/60cd10738158020f5843b43960158c3d116b3a71-11 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/652b031b4b9d601235f86ef62523e63d733b8623-3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/652b031b4b9d601235f86ef62523e63d733b8623-3 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/684a011f6fdfc7ae9863e12381165e82d2a2e356-9: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/684a011f6fdfc7ae9863e12381165e82d2a2e356-9 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/72e42fc8e5eaed6a8a077f420fc3bd1f9a7c0919-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/72e42fc8e5eaed6a8a077f420fc3bd1f9a7c0919-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/80881d1b911b95e0203b3b0e7dc6360c35f7620f-7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/80881d1b911b95e0203b3b0e7dc6360c35f7620f-7 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/8484b3082d522e0a1f315db1fa1b2a5118be7cc3-8: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/8484b3082d522e0a1f315db1fa1b2a5118be7cc3-8 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/9635bb09260f100bc4a2ee4e3b980fecc5b874ce-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/9635bb09260f100bc4a2ee4e3b980fecc5b874ce-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/99d36b0b5b1be7151a508dd440ec725a2576c41c-1: -------------------------------------------------------------------------------- 1 | PLAI -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/9d339eddb4e2714ea319c3fb571311cb95fdb067-6: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/9d339eddb4e2714ea319c3fb571311cb95fdb067-6 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/b2419fcb7a9aef359de67cb6bd2b8a8c1f5c100f-4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/b2419fcb7a9aef359de67cb6bd2b8a8c1f5c100f-4 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/c1951b29109ec1017f63535ce3699630f46f54e1-5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/c1951b29109ec1017f63535ce3699630f46f54e1-5 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/cb806bc4f67316af02d6ae677332a3b6005a18da-5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/cb806bc4f67316af02d6ae677332a3b6005a18da-5 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/cd7dd228703739e9252c7ea76f1c5f82ab44686a-10: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/cd7dd228703739e9252c7ea76f1c5f82ab44686a-10 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/ce3671e91907349cea04fc3f2a4b91c65b99461d-3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/ce3671e91907349cea04fc3f2a4b91c65b99461d-3 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/ce3c6f4c31f74d72fbf74c17d14a8d29aa62059e-6: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/ce3c6f4c31f74d72fbf74c17d14a8d29aa62059e-6 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/e2230aa0ecaebb9b890440effa13f501a89247b2-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/e2230aa0ecaebb9b890440effa13f501a89247b2-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/f0445ac39e03978bbc8011316ac8468015ddb72c-1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/f0445ac39e03978bbc8011316ac8468015ddb72c-1 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/corpus/f241da53c6bc1fe3368c55bf28db86ce15a2c784-2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/compress/snappy/go-xerial-snappy/corpus/f241da53c6bc1fe3368c55bf28db86ce15a2c784-2 -------------------------------------------------------------------------------- /compress/snappy/go-xerial-snappy/fuzz.go: -------------------------------------------------------------------------------- 1 | // +build gofuzz 2 | 3 | package snappy 4 | 5 | func Fuzz(data []byte) int { 6 | decode, err := Decode(data) 7 | if decode == nil && err == nil { 8 | panic("nil error with nil result") 9 | } 10 | 11 | if err != nil { 12 | return 0 13 | } 14 | 15 | return 1 16 | } 17 | -------------------------------------------------------------------------------- /compression.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/segmentio/kafka-go/compress" 7 | ) 8 | 9 | type Compression = compress.Compression 10 | 11 | const ( 12 | Gzip Compression = compress.Gzip 13 | Snappy Compression = compress.Snappy 14 | Lz4 Compression = compress.Lz4 15 | Zstd Compression = compress.Zstd 16 | ) 17 | 18 | type CompressionCodec = compress.Codec 19 | 20 | var ( 21 | errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported") 22 | ) 23 | 24 | // resolveCodec looks up a codec by Code(). 25 | func resolveCodec(code int8) (CompressionCodec, error) { 26 | codec := compress.Compression(code).Codec() 27 | if codec == nil { 28 | return nil, errUnknownCodec 29 | } 30 | return codec, nil 31 | } 32 | -------------------------------------------------------------------------------- /crc32.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "encoding/binary" 5 | "hash/crc32" 6 | ) 7 | 8 | type crc32Writer struct { 9 | table *crc32.Table 10 | buffer [8]byte 11 | crc32 uint32 12 | } 13 | 14 | func (w *crc32Writer) update(b []byte) { 15 | w.crc32 = crc32.Update(w.crc32, w.table, b) 16 | } 17 | 18 | func (w *crc32Writer) writeInt8(i int8) { 19 | w.buffer[0] = byte(i) 20 | w.update(w.buffer[:1]) 21 | } 22 | 23 | func (w *crc32Writer) writeInt16(i int16) { 24 | binary.BigEndian.PutUint16(w.buffer[:2], uint16(i)) 25 | w.update(w.buffer[:2]) 26 | } 27 | 28 | func (w *crc32Writer) writeInt32(i int32) { 29 | binary.BigEndian.PutUint32(w.buffer[:4], uint32(i)) 30 | w.update(w.buffer[:4]) 31 | } 32 | 33 | func (w *crc32Writer) writeInt64(i int64) { 34 | binary.BigEndian.PutUint64(w.buffer[:8], uint64(i)) 35 | w.update(w.buffer[:8]) 36 | } 37 | 38 | func (w *crc32Writer) writeBytes(b []byte) { 39 | n := len(b) 40 | if b == nil { 41 | n = -1 42 | } 43 | w.writeInt32(int32(n)) 44 | w.update(b) 45 | } 46 | 47 | func (w *crc32Writer) Write(b []byte) (int, error) { 48 | w.update(b) 49 | return len(b), nil 50 | } 51 | 52 | func (w *crc32Writer) WriteString(s string) (int, error) { 53 | w.update([]byte(s)) 54 | return len(s), nil 55 | } 56 | -------------------------------------------------------------------------------- /crc32_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bytes" 5 | "hash/crc32" 6 | "testing" 7 | ) 8 | 9 | func TestMessageCRC32(t *testing.T) { 10 | m := message{ 11 | MagicByte: 1, 12 | Timestamp: 42, 13 | Key: nil, 14 | Value: []byte("Hello World!"), 15 | } 16 | 17 | b := &bytes.Buffer{} 18 | w := &writeBuffer{w: b} 19 | w.write(m) 20 | 21 | h := crc32.New(crc32.IEEETable) 22 | h.Write(b.Bytes()[4:]) 23 | 24 | sum1 := h.Sum32() 25 | sum2 := uint32(m.crc32(&crc32Writer{table: crc32.IEEETable})) 26 | 27 | if sum1 != sum2 { 28 | t.Error("bad CRC32:") 29 | t.Logf("expected: %d", sum1) 30 | t.Logf("found: %d", sum2) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /createacls_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | ) 9 | 10 | func TestClientCreateACLs(t *testing.T) { 11 | if !ktesting.KafkaIsAtLeast("2.0.1") { 12 | return 13 | } 14 | 15 | client, shutdown := newLocalClient() 16 | defer shutdown() 17 | 18 | topic := makeTopic() 19 | group := makeGroupID() 20 | 21 | createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{ 22 | ACLs: []ACLEntry{ 23 | { 24 | Principal: "User:alice", 25 | PermissionType: ACLPermissionTypeAllow, 26 | Operation: ACLOperationTypeRead, 27 | ResourceType: ResourceTypeTopic, 28 | ResourcePatternType: PatternTypeLiteral, 29 | ResourceName: topic, 30 | Host: "*", 31 | }, 32 | { 33 | Principal: "User:bob", 34 | PermissionType: ACLPermissionTypeAllow, 35 | Operation: ACLOperationTypeRead, 36 | ResourceType: ResourceTypeGroup, 37 | ResourcePatternType: PatternTypeLiteral, 38 | ResourceName: group, 39 | Host: "*", 40 | }, 41 | }, 42 | }) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | 47 | for _, err := range createRes.Errors { 48 | if err != nil { 49 | t.Error(err) 50 | } 51 | } 52 | } 53 | 54 | func TestACLPermissionTypeMarshal(t *testing.T) { 55 | for i := ACLPermissionTypeUnknown; i <= ACLPermissionTypeAllow; i++ { 56 | text, err := i.MarshalText() 57 | if err != nil { 58 | t.Errorf("couldn't marshal %d to text: %s", i, err) 59 | } 60 | var got ACLPermissionType 61 | err = got.UnmarshalText(text) 62 | if err != nil { 63 | t.Errorf("couldn't unmarshal %s to ACLPermissionType: %s", text, err) 64 | } 65 | if got != i { 66 | t.Errorf("got %d, want %d", got, i) 67 | } 68 | } 69 | } 70 | 71 | func TestACLOperationTypeMarshal(t *testing.T) { 72 | for i := ACLOperationTypeUnknown; i <= ACLOperationTypeIdempotentWrite; i++ { 73 | text, err := i.MarshalText() 74 | if err != nil { 75 | t.Errorf("couldn't marshal %d to text: %s", i, err) 76 | } 77 | var got ACLOperationType 78 | err = got.UnmarshalText(text) 79 | if err != nil { 80 | t.Errorf("couldn't unmarshal %s to ACLOperationType: %s", text, err) 81 | } 82 | if got != i { 83 | t.Errorf("got %d, want %d", got, i) 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /createpartitions_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | ) 9 | 10 | func TestClientCreatePartitions(t *testing.T) { 11 | if !ktesting.KafkaIsAtLeast("1.0.1") { 12 | return 13 | } 14 | 15 | client, shutdown := newLocalClient() 16 | defer shutdown() 17 | 18 | topic := makeTopic() 19 | createTopic(t, topic, 1) 20 | defer deleteTopic(t, topic) 21 | 22 | res, err := client.CreatePartitions(context.Background(), &CreatePartitionsRequest{ 23 | Topics: []TopicPartitionsConfig{ 24 | { 25 | Name: topic, 26 | Count: 2, 27 | TopicPartitionAssignments: []TopicPartitionAssignment{ 28 | { 29 | BrokerIDs: []int32{1}, 30 | }, 31 | }, 32 | }, 33 | }, 34 | ValidateOnly: false, 35 | }) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | if err := res.Errors[topic]; err != nil { 41 | t.Error(err) 42 | } 43 | } 44 | 45 | func TestClientCreatePartitionsNoAssignments(t *testing.T) { 46 | if !ktesting.KafkaIsAtLeast("1.0.1") { 47 | return 48 | } 49 | 50 | client, shutdown := newLocalClient() 51 | defer shutdown() 52 | 53 | topic := makeTopic() 54 | createTopic(t, topic, 1) 55 | defer deleteTopic(t, topic) 56 | 57 | res, err := client.CreatePartitions(context.Background(), &CreatePartitionsRequest{ 58 | Topics: []TopicPartitionsConfig{ 59 | { 60 | Name: topic, 61 | Count: 2, 62 | }, 63 | }, 64 | ValidateOnly: false, 65 | }) 66 | if err != nil { 67 | t.Fatal(err) 68 | } 69 | 70 | if err := res.Errors[topic]; err != nil { 71 | t.Error(err) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /deletegroups.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "time" 8 | 9 | "github.com/segmentio/kafka-go/protocol/deletegroups" 10 | ) 11 | 12 | // DeleteGroupsRequest represents a request sent to a kafka broker to delete 13 | // consumer groups. 14 | type DeleteGroupsRequest struct { 15 | // Address of the kafka broker to send the request to. 16 | Addr net.Addr 17 | 18 | // Identifiers of groups to delete. 19 | GroupIDs []string 20 | } 21 | 22 | // DeleteGroupsResponse represents a response from a kafka broker to a consumer group 23 | // deletion request. 24 | type DeleteGroupsResponse struct { 25 | // The amount of time that the broker throttled the request. 26 | Throttle time.Duration 27 | 28 | // Mapping of group ids to errors that occurred while attempting to delete those groups. 29 | // 30 | // The errors contain the kafka error code. Programs may use the standard 31 | // errors.Is function to test the error against kafka error codes. 32 | Errors map[string]error 33 | } 34 | 35 | // DeleteGroups sends a delete groups request and returns the response. The request is sent to the group coordinator of the first group 36 | // of the request. All deleted groups must be managed by the same group coordinator. 37 | func (c *Client) DeleteGroups( 38 | ctx context.Context, 39 | req *DeleteGroupsRequest, 40 | ) (*DeleteGroupsResponse, error) { 41 | m, err := c.roundTrip(ctx, req.Addr, &deletegroups.Request{ 42 | GroupIDs: req.GroupIDs, 43 | }) 44 | if err != nil { 45 | return nil, fmt.Errorf("kafka.(*Client).DeleteGroups: %w", err) 46 | } 47 | 48 | r := m.(*deletegroups.Response) 49 | 50 | ret := &DeleteGroupsResponse{ 51 | Throttle: makeDuration(r.ThrottleTimeMs), 52 | Errors: make(map[string]error, len(r.Responses)), 53 | } 54 | 55 | for _, t := range r.Responses { 56 | ret.Errors[t.GroupID] = makeError(t.ErrorCode, "") 57 | } 58 | 59 | return ret, nil 60 | } 61 | -------------------------------------------------------------------------------- /deletegroups_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | ktesting "github.com/segmentio/kafka-go/testing" 10 | ) 11 | 12 | func TestClientDeleteGroups(t *testing.T) { 13 | if !ktesting.KafkaIsAtLeast("1.1.0") { 14 | t.Skip("Skipping test because kafka version is not high enough.") 15 | } 16 | 17 | client, shutdown := newLocalClient() 18 | defer shutdown() 19 | 20 | topic := makeTopic() 21 | createTopic(t, topic, 1) 22 | 23 | groupID := makeGroupID() 24 | 25 | group, err := NewConsumerGroup(ConsumerGroupConfig{ 26 | ID: groupID, 27 | Topics: []string{topic}, 28 | Brokers: []string{"localhost:9092"}, 29 | HeartbeatInterval: 2 * time.Second, 30 | RebalanceTimeout: 2 * time.Second, 31 | RetentionTime: time.Hour, 32 | Logger: &testKafkaLogger{T: t}, 33 | }) 34 | if err != nil { 35 | t.Fatal(err) 36 | } 37 | defer group.Close() 38 | 39 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 40 | defer cancel() 41 | 42 | gen, err := group.Next(ctx) 43 | if gen == nil { 44 | t.Fatalf("expected generation 1 not to be nil") 45 | } 46 | if err != nil { 47 | t.Fatalf("expected no error, but got %+v", err) 48 | } 49 | 50 | // delete not empty group 51 | res, err := client.DeleteGroups(ctx, &DeleteGroupsRequest{ 52 | GroupIDs: []string{groupID}, 53 | }) 54 | 55 | if err != nil { 56 | t.Fatal(err) 57 | } 58 | 59 | if !errors.Is(res.Errors[groupID], NonEmptyGroup) { 60 | t.Fatalf("expected NonEmptyGroup error, but got %+v", res.Errors[groupID]) 61 | } 62 | 63 | err = group.Close() 64 | if err != nil { 65 | t.Fatal(err) 66 | } 67 | 68 | // delete empty group 69 | res, err = client.DeleteGroups(ctx, &DeleteGroupsRequest{ 70 | GroupIDs: []string{groupID}, 71 | }) 72 | 73 | if err != nil { 74 | t.Fatal(err) 75 | } 76 | 77 | if err = res.Errors[groupID]; err != nil { 78 | t.Error(err) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /deletetopics_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "reflect" 8 | "testing" 9 | ) 10 | 11 | func TestClientDeleteTopics(t *testing.T) { 12 | client, shutdown := newLocalClient() 13 | defer shutdown() 14 | 15 | topic := makeTopic() 16 | createTopic(t, topic, 1) 17 | 18 | res, err := client.DeleteTopics(context.Background(), &DeleteTopicsRequest{ 19 | Topics: []string{topic}, 20 | }) 21 | 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | 26 | if err := res.Errors[topic]; err != nil { 27 | t.Error(err) 28 | } 29 | } 30 | 31 | func TestDeleteTopicsResponseV1(t *testing.T) { 32 | item := deleteTopicsResponse{ 33 | TopicErrorCodes: []deleteTopicsResponseV0TopicErrorCode{ 34 | { 35 | Topic: "a", 36 | ErrorCode: 7, 37 | }, 38 | }, 39 | } 40 | 41 | b := bytes.NewBuffer(nil) 42 | w := &writeBuffer{w: b} 43 | item.writeTo(w) 44 | 45 | var found deleteTopicsResponse 46 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 47 | if err != nil { 48 | t.Fatal(err) 49 | } 50 | if remain != 0 { 51 | t.Fatalf("expected 0 remain, got %v", remain) 52 | } 53 | if !reflect.DeepEqual(item, found) { 54 | t.Fatal("expected item and found to be the same") 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /describeacls_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestClientDescribeACLs(t *testing.T) { 12 | if !ktesting.KafkaIsAtLeast("2.0.1") { 13 | return 14 | } 15 | 16 | client, shutdown := newLocalClient() 17 | defer shutdown() 18 | 19 | topic := makeTopic() 20 | group := makeGroupID() 21 | 22 | createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{ 23 | ACLs: []ACLEntry{ 24 | { 25 | Principal: "User:alice", 26 | PermissionType: ACLPermissionTypeAllow, 27 | Operation: ACLOperationTypeRead, 28 | ResourceType: ResourceTypeTopic, 29 | ResourcePatternType: PatternTypeLiteral, 30 | ResourceName: topic, 31 | Host: "*", 32 | }, 33 | { 34 | Principal: "User:bob", 35 | PermissionType: ACLPermissionTypeAllow, 36 | Operation: ACLOperationTypeRead, 37 | ResourceType: ResourceTypeGroup, 38 | ResourcePatternType: PatternTypeLiteral, 39 | ResourceName: group, 40 | Host: "*", 41 | }, 42 | }, 43 | }) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | 48 | for _, err := range createRes.Errors { 49 | if err != nil { 50 | t.Error(err) 51 | } 52 | } 53 | 54 | describeResp, err := client.DescribeACLs(context.Background(), &DescribeACLsRequest{ 55 | Filter: ACLFilter{ 56 | ResourceTypeFilter: ResourceTypeTopic, 57 | ResourceNameFilter: topic, 58 | ResourcePatternTypeFilter: PatternTypeLiteral, 59 | Operation: ACLOperationTypeRead, 60 | PermissionType: ACLPermissionTypeAllow, 61 | }, 62 | }) 63 | if err != nil { 64 | t.Fatal(err) 65 | } 66 | 67 | expectedDescribeResp := DescribeACLsResponse{ 68 | Throttle: 0, 69 | Error: makeError(0, ""), 70 | Resources: []ACLResource{ 71 | { 72 | ResourceType: ResourceTypeTopic, 73 | ResourceName: topic, 74 | PatternType: PatternTypeLiteral, 75 | ACLs: []ACLDescription{ 76 | { 77 | Principal: "User:alice", 78 | Host: "*", 79 | Operation: ACLOperationTypeRead, 80 | PermissionType: ACLPermissionTypeAllow, 81 | }, 82 | }, 83 | }, 84 | }, 85 | } 86 | 87 | assert.Equal(t, expectedDescribeResp, *describeResp) 88 | } 89 | -------------------------------------------------------------------------------- /describeconfigs_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestClientDescribeConfigs(t *testing.T) { 12 | if !ktesting.KafkaIsAtLeast("0.11.0") { 13 | return 14 | } 15 | 16 | const ( 17 | MaxMessageBytes = "max.message.bytes" 18 | MaxMessageBytesValue = "200000" 19 | ) 20 | 21 | client, shutdown := newLocalClient() 22 | defer shutdown() 23 | 24 | topic := makeTopic() 25 | createTopic(t, topic, 1) 26 | defer deleteTopic(t, topic) 27 | 28 | _, err := client.AlterConfigs(context.Background(), &AlterConfigsRequest{ 29 | Resources: []AlterConfigRequestResource{{ 30 | ResourceType: ResourceTypeTopic, 31 | ResourceName: topic, 32 | Configs: []AlterConfigRequestConfig{{ 33 | Name: MaxMessageBytes, 34 | Value: MaxMessageBytesValue, 35 | }, 36 | }, 37 | }}, 38 | }) 39 | 40 | if err != nil { 41 | t.Fatal(err) 42 | } 43 | 44 | describeResp, err := client.DescribeConfigs(context.Background(), &DescribeConfigsRequest{ 45 | Resources: []DescribeConfigRequestResource{{ 46 | ResourceType: ResourceTypeTopic, 47 | ResourceName: topic, 48 | ConfigNames: []string{MaxMessageBytes}, 49 | }}, 50 | }) 51 | 52 | if err != nil { 53 | t.Fatal(err) 54 | } 55 | 56 | maxMessageBytesValue := "0" 57 | for _, resource := range describeResp.Resources { 58 | if resource.ResourceType == int8(ResourceTypeTopic) && resource.ResourceName == topic { 59 | for _, entry := range resource.ConfigEntries { 60 | if entry.ConfigName == MaxMessageBytes { 61 | maxMessageBytesValue = entry.ConfigValue 62 | } 63 | } 64 | } 65 | } 66 | assert.Equal(t, maxMessageBytesValue, MaxMessageBytesValue) 67 | } 68 | -------------------------------------------------------------------------------- /discard.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import "bufio" 4 | 5 | func discardN(r *bufio.Reader, sz int, n int) (int, error) { 6 | var err error 7 | if n <= sz { 8 | n, err = r.Discard(n) 9 | } else { 10 | n, err = r.Discard(sz) 11 | if err == nil { 12 | err = errShortRead 13 | } 14 | } 15 | return sz - n, err 16 | } 17 | 18 | func discardInt32(r *bufio.Reader, sz int) (int, error) { 19 | return discardN(r, sz, 4) 20 | } 21 | 22 | func discardString(r *bufio.Reader, sz int) (int, error) { 23 | return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { 24 | if n < 0 { 25 | return sz, nil 26 | } 27 | return discardN(r, sz, n) 28 | }) 29 | } 30 | 31 | func discardBytes(r *bufio.Reader, sz int) (int, error) { 32 | return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { 33 | if n < 0 { 34 | return sz, nil 35 | } 36 | return discardN(r, sz, n) 37 | }) 38 | } 39 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 2 | version: '3' 3 | services: 4 | zookeeper: 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | image: bitnami/zookeeper:latest 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ALLOW_ANONYMOUS_LOGIN: yes 12 | kafka: 13 | container_name: kafka 14 | image: bitnami/kafka:3.7.0 15 | restart: on-failure:3 16 | links: 17 | - zookeeper 18 | ports: 19 | - 9092:9092 20 | - 9093:9093 21 | environment: 22 | KAFKA_CFG_BROKER_ID: 1 23 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' 24 | KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost' 25 | KAFKA_CFG_ADVERTISED_PORT: '9092' 26 | KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 27 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true' 28 | KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000' 29 | KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' 30 | KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' 31 | KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' 32 | KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.authorizer.AclAuthorizer' 33 | KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' 34 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" 35 | ALLOW_PLAINTEXT_LISTENER: yes 36 | entrypoint: 37 | - "/bin/bash" 38 | - "-c" 39 | - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh 40 | -------------------------------------------------------------------------------- /docker_compose_versions/docker-compose-010.yml: -------------------------------------------------------------------------------- 1 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 2 | version: '3' 3 | services: 4 | zookeeper: 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | image: bitnami/zookeeper:latest 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ALLOW_ANONYMOUS_LOGIN: yes 12 | kafka: 13 | container_name: kafka 14 | image: bitnami/kafka:0.10.2.1 15 | restart: on-failure:3 16 | links: 17 | - zookeeper 18 | ports: 19 | - 9092:9092 20 | - 9093:9093 21 | environment: 22 | KAFKA_BROKER_ID: 1 23 | KAFKA_DELETE_TOPIC_ENABLE: 'true' 24 | KAFKA_ADVERTISED_HOST_NAME: 'localhost' 25 | KAFKA_ADVERTISED_PORT: '9092' 26 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 27 | KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' 28 | KAFKA_MESSAGE_MAX_BYTES: '200000000' 29 | KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' 30 | KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' 31 | KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' 32 | KAFKA_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer' 33 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' 34 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_server_jaas.conf" 35 | ALLOW_PLAINTEXT_LISTENER: yes 36 | entrypoint: 37 | - "/bin/bash" 38 | - "-c" 39 | - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/bitnami/kafka/config/kafka_server_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram; exec /app-entrypoint.sh /start-kafka.sh 40 | -------------------------------------------------------------------------------- /docker_compose_versions/docker-compose-270.yml: -------------------------------------------------------------------------------- 1 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 2 | version: '3' 3 | services: 4 | zookeeper: 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | image: bitnami/zookeeper:latest 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ALLOW_ANONYMOUS_LOGIN: yes 12 | kafka: 13 | container_name: kafka 14 | image: bitnami/kafka:2.7.0 15 | restart: on-failure:3 16 | links: 17 | - zookeeper 18 | ports: 19 | - 9092:9092 20 | - 9093:9093 21 | environment: 22 | KAFKA_CFG_BROKER_ID: 1 23 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' 24 | KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost' 25 | KAFKA_CFG_ADVERTISED_PORT: '9092' 26 | KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 27 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true' 28 | KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000' 29 | KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' 30 | KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' 31 | KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' 32 | KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer' 33 | KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' 34 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" 35 | ALLOW_PLAINTEXT_LISTENER: yes 36 | entrypoint: 37 | - "/bin/bash" 38 | - "-c" 39 | - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh 40 | -------------------------------------------------------------------------------- /docker_compose_versions/docker-compose-370.yml: -------------------------------------------------------------------------------- 1 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 2 | version: '3' 3 | services: 4 | zookeeper: 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | image: bitnami/zookeeper:latest 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ALLOW_ANONYMOUS_LOGIN: yes 12 | kafka: 13 | container_name: kafka 14 | image: bitnami/kafka:3.7.0 15 | restart: on-failure:3 16 | links: 17 | - zookeeper 18 | ports: 19 | - 9092:9092 20 | - 9093:9093 21 | environment: 22 | KAFKA_CFG_BROKER_ID: 1 23 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' 24 | KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost' 25 | KAFKA_CFG_ADVERTISED_PORT: '9092' 26 | KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 27 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true' 28 | KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000' 29 | KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' 30 | KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' 31 | KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' 32 | KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.authorizer.AclAuthorizer' 33 | KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' 34 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" 35 | ALLOW_PLAINTEXT_LISTENER: yes 36 | entrypoint: 37 | - "/bin/bash" 38 | - "-c" 39 | - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh 40 | -------------------------------------------------------------------------------- /docker_compose_versions/docker-compose-400.yml: -------------------------------------------------------------------------------- 1 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 2 | version: '3' 3 | services: 4 | kafka: 5 | container_name: kafka 6 | image: bitnami/kafka:4.0.0 7 | restart: on-failure:3 8 | ports: 9 | - 9092:9092 10 | - 9093:9093 11 | environment: 12 | KAFKA_CFG_NODE_ID: 1 13 | KAFKA_CFG_BROKER_ID: 1 14 | KAFKA_CFG_PROCESS_ROLES: broker,controller 15 | KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost' 16 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER 17 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAIN:PLAINTEXT,SASL:SASL_PLAINTEXT 18 | KAFKA_CFG_LISTENERS: CONTROLLER://:9094,PLAIN://:9092,SASL://:9093 19 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAIN://localhost:9092,SASL://localhost:9093 20 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAIN 21 | KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' 22 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@localhost:9094 23 | ALLOW_PLAINTEXT_LISTENER: yes 24 | KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' 25 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" 26 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true' 27 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' 28 | KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000' 29 | KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'org.apache.kafka.metadata.authorizer.StandardAuthorizer' 30 | KAFKA_CFG_SUPER_USERS: User:adminscram256;User:adminscram512;User:adminplain 31 | KAFKA_CLIENT_USERS: adminscram256,adminscram512,adminplain 32 | KAFKA_CLIENT_PASSWORDS: admin-secret-256,admin-secret-512,admin-secret 33 | KAFKA_CLIENT_SASL_MECHANISMS: SCRAM-SHA-256,SCRAM-SHA-512,PLAIN 34 | KAFKA_INTER_BROKER_USER: adminscram512 35 | KAFKA_INTER_BROKER_PASSWORD: admin-secret-512 36 | KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-512 37 | # Note you will need to increase this to at least 4GB of memory for the tests to pass 38 | # https://github.com/segmentio/kafka-go/issues/1360#issuecomment-2858935900 39 | KAFKA_HEAP_OPTS: '-Xmx1000m -Xms1000m' 40 | KAFKA_JVM_OPTS: '-XX:+UseG1GC' -------------------------------------------------------------------------------- /electleaders_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | ) 9 | 10 | func TestClientElectLeaders(t *testing.T) { 11 | if !ktesting.KafkaIsAtLeast("2.4.0") { 12 | return 13 | } 14 | 15 | ctx := context.Background() 16 | client, shutdown := newLocalClient() 17 | defer shutdown() 18 | 19 | topic := makeTopic() 20 | createTopic(t, topic, 2) 21 | defer deleteTopic(t, topic) 22 | 23 | // Local kafka only has 1 broker, so leader elections are no-ops. 24 | resp, err := client.ElectLeaders( 25 | ctx, 26 | &ElectLeadersRequest{ 27 | Topic: topic, 28 | Partitions: []int{0, 1}, 29 | }, 30 | ) 31 | 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | if resp.Error != nil { 36 | t.Error( 37 | "Unexpected error in response", 38 | "expected", nil, 39 | "got", resp.Error, 40 | ) 41 | } 42 | if len(resp.PartitionResults) != 2 { 43 | t.Error( 44 | "Unexpected length of partition results", 45 | "expected", 2, 46 | "got", len(resp.PartitionResults), 47 | ) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /endtxn.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "time" 8 | 9 | "github.com/segmentio/kafka-go/protocol/endtxn" 10 | ) 11 | 12 | // EndTxnRequest represets a request sent to a kafka broker to end a transaction. 13 | type EndTxnRequest struct { 14 | // Address of the kafka broker to send the request to. 15 | Addr net.Addr 16 | 17 | // The transactional id key. 18 | TransactionalID string 19 | 20 | // The Producer ID (PID) for the current producer session 21 | ProducerID int 22 | 23 | // The epoch associated with the current producer session for the given PID 24 | ProducerEpoch int 25 | 26 | // Committed should be set to true if the transaction was committed, false otherwise. 27 | Committed bool 28 | } 29 | 30 | // EndTxnResponse represents a resposne from a kafka broker to an end transaction request. 31 | type EndTxnResponse struct { 32 | // The amount of time that the broker throttled the request. 33 | Throttle time.Duration 34 | 35 | // Error is non-nil if an error occureda and contains the kafka error code. 36 | // Programs may use the standard errors.Is function to test the error 37 | // against kafka error codes. 38 | Error error 39 | } 40 | 41 | // EndTxn sends an EndTxn request to a kafka broker and returns its response. 42 | func (c *Client) EndTxn(ctx context.Context, req *EndTxnRequest) (*EndTxnResponse, error) { 43 | m, err := c.roundTrip(ctx, req.Addr, &endtxn.Request{ 44 | TransactionalID: req.TransactionalID, 45 | ProducerID: int64(req.ProducerID), 46 | ProducerEpoch: int16(req.ProducerEpoch), 47 | Committed: req.Committed, 48 | }) 49 | if err != nil { 50 | return nil, fmt.Errorf("kafka.(*Client).EndTxn: %w", err) 51 | } 52 | 53 | r := m.(*endtxn.Response) 54 | 55 | res := &EndTxnResponse{ 56 | Throttle: makeDuration(r.ThrottleTimeMs), 57 | Error: makeError(r.ErrorCode, ""), 58 | } 59 | 60 | return res, nil 61 | } 62 | -------------------------------------------------------------------------------- /example_writer_test.go: -------------------------------------------------------------------------------- 1 | package kafka_test 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/segmentio/kafka-go" 7 | ) 8 | 9 | func ExampleWriter() { 10 | w := &kafka.Writer{ 11 | Addr: kafka.TCP("localhost:9092"), 12 | Topic: "Topic-1", 13 | } 14 | 15 | w.WriteMessages(context.Background(), 16 | kafka.Message{ 17 | Key: []byte("Key-A"), 18 | Value: []byte("Hello World!"), 19 | }, 20 | ) 21 | 22 | w.Close() 23 | } 24 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | kafka-go -------------------------------------------------------------------------------- /examples/consumer-logger/Dockerfile: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # STEP 1 build executable binary # 3 | ##################################### 4 | FROM golang:alpine AS builder 5 | 6 | # Install git. 7 | # Git is required for fetching the dependencies. 8 | RUN apk update && apk add --no-cache git 9 | 10 | WORKDIR /app 11 | 12 | COPY go.mod . 13 | COPY go.sum . 14 | 15 | RUN go mod download 16 | 17 | COPY . . 18 | 19 | # Build the binary. 20 | RUN CGO_ENABLED=0 GOOS=linux go build -o main 21 | 22 | ##################################### 23 | # STEP 2 build a small image # 24 | ##################################### 25 | FROM scratch 26 | 27 | # Copy our static executable. 28 | COPY --from=builder /app/main /app/main 29 | 30 | # Run the hello binary. 31 | ENTRYPOINT ["/app/main"] -------------------------------------------------------------------------------- /examples/consumer-logger/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/example/consumer-logger 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/klauspost/compress v1.12.2 // indirect 7 | github.com/segmentio/kafka-go v0.4.28 8 | ) 9 | -------------------------------------------------------------------------------- /examples/consumer-logger/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strings" 9 | 10 | kafka "github.com/segmentio/kafka-go" 11 | ) 12 | 13 | func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader { 14 | brokers := strings.Split(kafkaURL, ",") 15 | return kafka.NewReader(kafka.ReaderConfig{ 16 | Brokers: brokers, 17 | GroupID: groupID, 18 | Topic: topic, 19 | MinBytes: 10e3, // 10KB 20 | MaxBytes: 10e6, // 10MB 21 | }) 22 | } 23 | 24 | func main() { 25 | // get kafka reader using environment variables. 26 | kafkaURL := os.Getenv("kafkaURL") 27 | topic := os.Getenv("topic") 28 | groupID := os.Getenv("groupID") 29 | 30 | reader := getKafkaReader(kafkaURL, topic, groupID) 31 | 32 | defer reader.Close() 33 | 34 | fmt.Println("start consuming ... !!") 35 | for { 36 | m, err := reader.ReadMessage(context.Background()) 37 | if err != nil { 38 | log.Fatalln(err) 39 | } 40 | fmt.Printf("message at topic:%v partition:%v offset:%v %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/consumer-mongo-db/Dockerfile: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # STEP 1 build executable binary # 3 | ##################################### 4 | FROM golang:alpine AS builder 5 | 6 | # Install git. 7 | # Git is required for fetching the dependencies. 8 | RUN apk update && apk add --no-cache git 9 | 10 | WORKDIR /app 11 | 12 | COPY go.mod . 13 | COPY go.sum . 14 | 15 | RUN go mod download 16 | 17 | COPY . . 18 | 19 | # Build the binary. 20 | RUN CGO_ENABLED=0 GOOS=linux go build -o main 21 | 22 | ##################################### 23 | # STEP 2 build a small image # 24 | ##################################### 25 | FROM scratch 26 | 27 | # Copy our static executable. 28 | COPY --from=builder /app/main /app/main 29 | 30 | # Run the hello binary. 31 | ENTRYPOINT ["/app/main"] -------------------------------------------------------------------------------- /examples/consumer-mongo-db/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/example/consumer-mongo-db 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/go-stack/stack v1.8.0 // indirect 7 | github.com/klauspost/compress v1.12.2 // indirect 8 | github.com/mongodb/mongo-go-driver v0.3.0 9 | github.com/segmentio/kafka-go v0.4.28 10 | github.com/tidwall/pretty v1.1.0 // indirect 11 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect 12 | ) 13 | -------------------------------------------------------------------------------- /examples/consumer-mongo-db/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | 9 | "github.com/mongodb/mongo-go-driver/mongo" 10 | kafka "github.com/segmentio/kafka-go" 11 | ) 12 | 13 | func getMongoCollection(mongoURL, dbName, collectionName string) *mongo.Collection { 14 | client, err := mongo.Connect(context.Background(), mongoURL) 15 | if err != nil { 16 | log.Fatal(err) 17 | } 18 | 19 | // Check the connection 20 | err = client.Ping(context.Background(), nil) 21 | if err != nil { 22 | log.Fatal(err) 23 | } 24 | 25 | fmt.Println("Connected to MongoDB ... !!") 26 | 27 | db := client.Database(dbName) 28 | collection := db.Collection(collectionName) 29 | return collection 30 | } 31 | 32 | func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader { 33 | return kafka.NewReader(kafka.ReaderConfig{ 34 | Brokers: []string{kafkaURL}, 35 | GroupID: groupID, 36 | Topic: topic, 37 | MinBytes: 10e3, // 10KB 38 | MaxBytes: 10e6, // 10MB 39 | }) 40 | } 41 | 42 | func main() { 43 | 44 | // get Mongo db Collection using environment variables. 45 | mongoURL := os.Getenv("mongoURL") 46 | dbName := os.Getenv("dbName") 47 | collectionName := os.Getenv("collectionName") 48 | collection := getMongoCollection(mongoURL, dbName, collectionName) 49 | 50 | // get kafka reader using environment variables. 51 | kafkaURL := os.Getenv("kafkaURL") 52 | topic := os.Getenv("topic") 53 | groupID := os.Getenv("groupID") 54 | reader := getKafkaReader(kafkaURL, topic, groupID) 55 | 56 | defer reader.Close() 57 | 58 | fmt.Println("start consuming ... !!") 59 | 60 | for { 61 | msg, err := reader.ReadMessage(context.Background()) 62 | if err != nil { 63 | log.Fatal(err) 64 | } 65 | insertResult, err := collection.InsertOne(context.Background(), msg) 66 | if err != nil { 67 | log.Fatal(err) 68 | } 69 | 70 | fmt.Println("Inserted a single document: ", insertResult.InsertedID) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /examples/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | zookeeper: 5 | hostname: zookeeper 6 | image: bitnami/zookeeper:latest 7 | restart: always 8 | expose: 9 | - "2181" 10 | ports: 11 | - "2181:2181" 12 | environment: 13 | ALLOW_ANONYMOUS_LOGIN: yes 14 | 15 | kafka: 16 | hostname: kafka 17 | image: bitnami/kafka:2.7.0 18 | restart: always 19 | env_file: 20 | - kafka/kafka-variables.env 21 | depends_on: 22 | - zookeeper 23 | expose: 24 | - "9092" 25 | - "8082" 26 | - "8083" 27 | ports: 28 | - '9092:9092' 29 | - '8082:8082' 30 | - '8083:8083' 31 | 32 | mongo-db: 33 | image: mongo:4.0 34 | restart: always 35 | expose: 36 | - "27017" 37 | ports: 38 | - "27017:27017" 39 | environment: 40 | MONGO_DATA_DIR: /data/db 41 | MONGO_LOG_DIR: /dev/null 42 | 43 | consumer-mongo-db: 44 | build: 45 | context: consumer-mongo-db 46 | environment: 47 | mongoURL: mongodb://mongo-db:27017 48 | dbName: example_db 49 | collectionName: example_coll 50 | kafkaURL: kafka:9092 51 | topic: topic1 52 | groupID: mongo-group 53 | depends_on: 54 | - kafka 55 | - mongo-db 56 | restart: always 57 | 58 | consumer-logger: 59 | build: 60 | context: consumer-logger 61 | environment: 62 | kafkaURL: kafka:9092 63 | topic: topic1 64 | groupID: logger-group 65 | depends_on: 66 | - kafka 67 | restart: always 68 | 69 | producer-random: 70 | build: 71 | context: producer-random 72 | environment: 73 | kafkaURL: kafka:9092 74 | topic: topic1 75 | depends_on: 76 | - kafka 77 | restart: always 78 | 79 | producer-api: 80 | build: 81 | context: producer-api 82 | environment: 83 | kafkaURL: kafka:9092 84 | topic: topic1 85 | expose: 86 | - "8080" 87 | ports: 88 | - "8080:8080" 89 | depends_on: 90 | - kafka 91 | restart: always 92 | -------------------------------------------------------------------------------- /examples/kafka/kafka-variables.env: -------------------------------------------------------------------------------- 1 | 2 | KAFKA_CFG_ADVERTISED_HOST_NAME=kafka 3 | KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 4 | KAFKA_CFG_CONNECT_BOOTSTRAP_SERVERS=localhost:9092 5 | 6 | KAFKA_CFG_CONNECT_REST_PORT=8082 7 | KAFKA_CFG_CONNECT_REST_ADVERTISED_HOST_NAME="localhost" 8 | 9 | KAFKA_CFG_CONNECT_KEY_CONVERTER="org.apache.kafka.connect.json.JsonConverter" 10 | KAFKA_CFG_CONNECT_VALUE_CONVERTER="org.apache.kafka.connect.json.JsonConverter" 11 | KAFKA_CFG_CONNECT_KEY_CONVERTER_SCHEMAS_ENABLE=0 12 | KAFKA_CFG_CONNECT_VALUE_CONVERTER_SCHEMAS_ENABLE=0 13 | 14 | KAFKA_CFG_CONNECT_INTERNAL_KEY_CONVERTER="org.apache.kafka.connect.json.JsonConverter" 15 | KAFKA_CFG_CONNECT_INTERNAL_VALUE_CONVERTER="org.apache.kafka.connect.json.JsonConverter" 16 | KAFKA_CFG_CONNECT_INTERNAL_KEY_CONVERTER_SCHEMAS_ENABLE=0 17 | KAFKA_CFG_CONNECT_INTERNAL_VALUE_CONVERTER_SCHEMAS_ENABLE=0 18 | 19 | KAFKA_CFG_CONNECT_OFFSET_STORAGE_FILE_FILENAME="/tmp/connect.offsets" 20 | # Flush much faster than normal, which is useful for testing/debugging 21 | KAFKA_CFG_CONNECT_OFFSET_FLUSH_INTERVAL_MS=10000 22 | 23 | ALLOW_PLAINTEXT_LISTENER: yes 24 | -------------------------------------------------------------------------------- /examples/producer-api/Dockerfile: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # STEP 1 build executable binary # 3 | ##################################### 4 | FROM golang:alpine AS builder 5 | 6 | # Install git. 7 | # Git is required for fetching the dependencies. 8 | RUN apk update && apk add --no-cache git 9 | 10 | WORKDIR /app 11 | 12 | COPY go.mod . 13 | COPY go.sum . 14 | 15 | RUN go mod download 16 | 17 | COPY . . 18 | 19 | # Build the binary. 20 | RUN CGO_ENABLED=0 GOOS=linux go build -o main 21 | 22 | ##################################### 23 | # STEP 2 build a small image # 24 | ##################################### 25 | FROM scratch 26 | 27 | # Copy our static executable. 28 | COPY --from=builder /app/main /app/main 29 | 30 | # Run the hello binary. 31 | ENTRYPOINT ["/app/main"] -------------------------------------------------------------------------------- /examples/producer-api/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/example/producer-api 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/klauspost/compress v1.12.2 // indirect 7 | github.com/segmentio/kafka-go v0.4.28 8 | ) 9 | -------------------------------------------------------------------------------- /examples/producer-api/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "log" 7 | "net/http" 8 | "os" 9 | 10 | kafka "github.com/segmentio/kafka-go" 11 | ) 12 | 13 | func producerHandler(kafkaWriter *kafka.Writer) func(http.ResponseWriter, *http.Request) { 14 | return http.HandlerFunc(func(wrt http.ResponseWriter, req *http.Request) { 15 | body, err := ioutil.ReadAll(req.Body) 16 | if err != nil { 17 | log.Fatalln(err) 18 | } 19 | msg := kafka.Message{ 20 | Key: []byte(fmt.Sprintf("address-%s", req.RemoteAddr)), 21 | Value: body, 22 | } 23 | err = kafkaWriter.WriteMessages(req.Context(), msg) 24 | 25 | if err != nil { 26 | wrt.Write([]byte(err.Error())) 27 | log.Fatalln(err) 28 | } 29 | }) 30 | } 31 | 32 | func getKafkaWriter(kafkaURL, topic string) *kafka.Writer { 33 | return &kafka.Writer{ 34 | Addr: kafka.TCP(kafkaURL), 35 | Topic: topic, 36 | Balancer: &kafka.LeastBytes{}, 37 | } 38 | } 39 | 40 | func main() { 41 | // get kafka writer using environment variables. 42 | kafkaURL := os.Getenv("kafkaURL") 43 | topic := os.Getenv("topic") 44 | kafkaWriter := getKafkaWriter(kafkaURL, topic) 45 | 46 | defer kafkaWriter.Close() 47 | 48 | // Add handle func for producer. 49 | http.HandleFunc("/", producerHandler(kafkaWriter)) 50 | 51 | // Run the web server. 52 | fmt.Println("start producer-api ... !!") 53 | log.Fatal(http.ListenAndServe(":8080", nil)) 54 | } 55 | -------------------------------------------------------------------------------- /examples/producer-api/test.http: -------------------------------------------------------------------------------- 1 | ### send data text 2 | POST http://localhost:8080 3 | Content-Type: text/plain 4 | 5 | "Hello-api" 6 | 7 | ### send data json 8 | POST http://localhost:8080 9 | Content-Type: application/json 10 | 11 | { 12 | "data":"Hello-api" 13 | } -------------------------------------------------------------------------------- /examples/producer-random/Dockerfile: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # STEP 1 build executable binary # 3 | ##################################### 4 | FROM golang:alpine AS builder 5 | 6 | # Install git. 7 | # Git is required for fetching the dependencies. 8 | RUN apk update && apk add --no-cache git 9 | 10 | WORKDIR /app 11 | 12 | COPY go.mod . 13 | COPY go.sum . 14 | 15 | RUN go mod download 16 | 17 | COPY . . 18 | 19 | # Build the binary. 20 | RUN CGO_ENABLED=0 GOOS=linux go build -o main 21 | 22 | ##################################### 23 | # STEP 2 build a small image # 24 | ##################################### 25 | FROM scratch 26 | 27 | # Copy our static executable. 28 | COPY --from=builder /app/main /app/main 29 | 30 | # Run the hello binary. 31 | ENTRYPOINT ["/app/main"] -------------------------------------------------------------------------------- /examples/producer-random/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/example/producer-random 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/davecgh/go-spew v1.1.1 // indirect 7 | github.com/google/uuid v1.1.0 8 | github.com/klauspost/compress v1.12.2 // indirect 9 | github.com/segmentio/kafka-go v0.4.28 10 | ) 11 | -------------------------------------------------------------------------------- /examples/producer-random/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/google/uuid" 10 | kafka "github.com/segmentio/kafka-go" 11 | ) 12 | 13 | func newKafkaWriter(kafkaURL, topic string) *kafka.Writer { 14 | return &kafka.Writer{ 15 | Addr: kafka.TCP(kafkaURL), 16 | Topic: topic, 17 | Balancer: &kafka.LeastBytes{}, 18 | } 19 | } 20 | 21 | func main() { 22 | // get kafka writer using environment variables. 23 | kafkaURL := os.Getenv("kafkaURL") 24 | topic := os.Getenv("topic") 25 | writer := newKafkaWriter(kafkaURL, topic) 26 | defer writer.Close() 27 | fmt.Println("start producing ... !!") 28 | for i := 0; ; i++ { 29 | key := fmt.Sprintf("Key-%d", i) 30 | msg := kafka.Message{ 31 | Key: []byte(key), 32 | Value: []byte(fmt.Sprint(uuid.New())), 33 | } 34 | err := writer.WriteMessages(context.Background(), msg) 35 | if err != nil { 36 | fmt.Println(err) 37 | } else { 38 | fmt.Println("produced", key) 39 | } 40 | time.Sleep(1 * time.Second) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /fixtures/v1-v1.hex: -------------------------------------------------------------------------------- 1 | 000001660000000a00000000000015c79861000000010009746573742d6564677900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000011f00000000000000000000003ca293717501000000017c4f08dc7f00000005616c706861000000217b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00000000000000010000003b3d4abab001000000017c4f08dc970000000462657461000000217b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d00000000000000020000003cbcad5cde01000000017c4f09b16d0000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003c8585230b01000000017c4f09b6b20000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d -------------------------------------------------------------------------------- /fixtures/v1-v1.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v1-v1.pcapng -------------------------------------------------------------------------------- /fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.pcapng -------------------------------------------------------------------------------- /fixtures/v1c-v1-v1c.hex: -------------------------------------------------------------------------------- 1 | 000002350000000a0000000000003d15acfe00000001000b746573742d627265657a7900000001000000000000000000000000000600000000000000060000000000000000ffffffff000001ec000000000000000100000079779afa8b01010000017c4f11cdc9ffffffff000000631f8b0800000000000000636080039bf9617b7418810cc61a7fc1b32b810cd6c49c828c442043b15a2939bf34af44c9ca4047292d332727b548c94a29110e946aa16680b45b5b967f780937e72490c192945a82db98243850aa05001ea2107b8f00000000000000000000020000003cda0e410e01000000017c4f1212630000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003c0470399301000000017c4f12154e0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d0000000000000004000000613b0e4db601010000017c4f124947ffffffff0000004b1f8b080000000000000063608003bb67b39e743302198c35fe429eee40067b6a4171664e7e1e90a958ad949c5f9a57a26465a0a394969993935aa464a5940a074ab5007d95b7894a00000000000000000000050000005edb50180901010000017c4f124fd0ffffffff000000481f8b080000000000000063608003ebfbf2b32c18810cc61a7f21ff0b40064b556a49229056ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8303a55a005ec594df47000000 -------------------------------------------------------------------------------- /fixtures/v1c-v1-v1c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v1c-v1-v1c.pcapng -------------------------------------------------------------------------------- /fixtures/v1c-v1c.hex: -------------------------------------------------------------------------------- 1 | 000001a20000000a0000000000001abffa5700000001000a746573742d677574737900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000015a0000000000000001000000789125e5e201010000017c4f0ee474ffffffff000000621f8b0800000000000000636080039bfcfd51598c4006638d3fdf131f20833531a7202311c850ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55aa81920edd67a221c2e70734a800c96a4d412dcc624c181522d001d8564f48f00000000000000000000020000005f66e75d9b01010000017c4f0f55f5ffffffff000000491f8b0800000000000000636080039bfd2566fe8c4006638d3f7fe8572083353d31373711c850ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8603a55a008ef7186d4800000000000000000000030000005f3cff26a901010000017c4f0f5d5cffffffff000000491f8b0800000000000000636080031b6f8db3d18c4006638d3f7f6c0c90c19a929a5392086428562b25e797e695285919e828a565e6e4a416295929a5c081522d00dd1f6ff148000000 -------------------------------------------------------------------------------- /fixtures/v1c-v1c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v1c-v1c.pcapng -------------------------------------------------------------------------------- /fixtures/v2-v2.hex: -------------------------------------------------------------------------------- 1 | 000001760000000a0000000000001163921100000001000a746573742d6c7563696400000001000000000000000000000000000400000000000000040000000000000000ffffffff0000012e00000000000000000000008a00000000023978fc3b0000000000010000017c4f173eb90000017c4f173ed2ffffffffffffffffffffffffffff00000002580000000a616c706861427b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00560032020862657461427b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d0000000000000000020000008c0000000002fa7514ab0000000000010000017c4f175fa00000017c4f17631fffffffffffffffffffffffffffff00000002580000000a67616d6d61427b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d005a00fe0d020a64656c7461427b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d00 -------------------------------------------------------------------------------- /fixtures/v2-v2.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2-v2.pcapng -------------------------------------------------------------------------------- /fixtures/v2b-v1.hex: -------------------------------------------------------------------------------- 1 | 0000016e0000000a00000000000023f24a1a00000001000b746573742d66656973747900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000012500000000000000000000008a000000000267762fd10000000000010000017c4e71efe10000017c4e71effdffffffffffffffffffffffffffff00000002580000000a616c706861427b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00560038020862657461427b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d0000000000000000020000003c0d5ba69301000000017c4e743d2100000005616c706861000000217b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00000000000000030000003be6e3d42501000000017c4e743d410000000462657461000000217b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d -------------------------------------------------------------------------------- /fixtures/v2b-v1.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2b-v1.pcapng -------------------------------------------------------------------------------- /fixtures/v2bc-v1-v1c.hex: -------------------------------------------------------------------------------- 1 | 000001e60000000a000000000000530076a100000001000a746573742d686172647900000001000000000000000000000000000600000000000000060000000000000000ffffffff0000019e000000000000000000000079000000000214d2dc1d0001000000010000017c4ead43a90000017c4ead43c3ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000003c48deb52601000000017c4eae54050000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003ca2ba5edc01000000017c4eae5cff0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d00000000000000050000007d2e0ea95201010000017c4eb07250ffffffff000000671f8b080000000000000063608003bb5b69b1958c4006638ddf86fcef40067b6a4171664e7e1e90a958ad949c5f9a57a26465a0a394969993935aa464a5940a074ab550534006587fdc55d00633a92800c860a94a2d49c4694c1a1c28d5020087e0fa5d91000000 -------------------------------------------------------------------------------- /fixtures/v2bc-v1-v1c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2bc-v1-v1c.pcapng -------------------------------------------------------------------------------- /fixtures/v2bc-v1.hex: -------------------------------------------------------------------------------- 1 | 0000015d0000000a0000000000006d36526200000001000a746573742d686172647900000001000000000000000000000000000400000000000000040000000000000000ffffffff00000115000000000000000000000079000000000214d2dc1d0001000000010000017c4ead43a90000017c4ead43c3ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000003c48deb52601000000017c4eae54050000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003ca2ba5edc01000000017c4eae5cff0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d -------------------------------------------------------------------------------- /fixtures/v2bc-v1.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2bc-v1.pcapng -------------------------------------------------------------------------------- /fixtures/v2bc-v1c.hex: -------------------------------------------------------------------------------- 1 | 000001520000000a0000000000004aa4215500000001000b746573742d6b61726d696300000001000000000000000000000000000400000000000000040000000000000000ffffffff00000109000000000000000000000079000000000218f2e1220001000000010000017c4e8edde60000017c4e8eddffffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300623268ea4d4121c6a93e000a81600b5931557590000000000000000000003000000785a33562401010000017c4e8f57f1ffffffff000000621f8b0800000000000000636080031b93ef814f19810cc61abffef0ab40066b624e41462290a158ad949c5f9a57a26465a0a394969993935aa464a59408074ab5503340daad5d459b6ec0cdf90864b024a596e03626090e946a016143eac78f000000 -------------------------------------------------------------------------------- /fixtures/v2bc-v1c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2bc-v1c.pcapng -------------------------------------------------------------------------------- /fixtures/v2c-v2-v2c.hex: -------------------------------------------------------------------------------- 1 | 000001ee0000000a000000000000670352ac00000001000a746573742d6e6174747900000001000000000000000000000000000600000000000000060000000000000000ffffffff000001a600000000000000000000007900000000025da9bf740001000000010000017c4f1eea730000017c4f1eea8dffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000008c0000000002f53e2b600000000000010000017c4f1f1a600000017c4f1f1c65ffffffffffffffffffffffffffff00000002580000000a67616d6d61427b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d005a008a08020a64656c7461427b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d0000000000000000040000007d000000000268a8ca640001000000010000017c4f1f49f90000017c4f1f4db4ffffffffffffffffffffffffffff000000021f8b08000000000000008b616060e04b2d28ceccc9cf73aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8503a55a8608866f7c4c1c55a92589d815a7c101503100ebf4f0655c000000 -------------------------------------------------------------------------------- /fixtures/v2c-v2-v2c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2c-v2-v2c.pcapng -------------------------------------------------------------------------------- /fixtures/v2c-v2c.hex: -------------------------------------------------------------------------------- 1 | 000001560000000a0000000000005698dc5100000001000c746573742d6f6e656972696300000001000000000000000000000000000400000000000000040000000000000000ffffffff0000010c00000000000000000000007900000000021ad503db0001000000010000017c4f1a1f540000017c4f1a1f70ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a8630060b268ea4d4121c6a93e000a816009fa88cc75900000000000000000000020000007b0000000002d346070a0001000000010000017c4f1a46110000017c4f1a48d0ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04a4fcccd4d74aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8603a55a8628867f5c4c5c29a939253854a7c0015035000f1406dd5b000000 -------------------------------------------------------------------------------- /fixtures/v2c-v2c.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/kafka-go/af1725fb4fc0d856653afb2aeaeaa3cbfea18aec/fixtures/v2c-v2c.pcapng -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/klauspost/compress v1.15.9 7 | github.com/pierrec/lz4/v4 v4.1.15 8 | github.com/stretchr/testify v1.8.0 9 | github.com/xdg-go/scram v1.1.2 10 | golang.org/x/net v0.38.0 11 | ) 12 | 13 | require ( 14 | github.com/davecgh/go-spew v1.1.1 // indirect 15 | github.com/pmezard/go-difflib v1.0.0 // indirect 16 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 17 | github.com/xdg-go/stringprep v1.0.4 // indirect 18 | golang.org/x/text v0.23.0 // indirect 19 | gopkg.in/yaml.v3 v3.0.1 // indirect 20 | ) 21 | 22 | retract [v0.4.36, v0.4.37] 23 | -------------------------------------------------------------------------------- /gzip/gzip.go: -------------------------------------------------------------------------------- 1 | // Package gzip does nothing, it's kept for backward compatibility to avoid 2 | // breaking the majority of programs that imported it to install the compression 3 | // codec, which is now always included. 4 | package gzip 5 | 6 | import ( 7 | gz "github.com/klauspost/compress/gzip" 8 | "github.com/segmentio/kafka-go/compress/gzip" 9 | ) 10 | 11 | const ( 12 | Code = 1 13 | DefaultCompressionLevel = gz.DefaultCompression 14 | ) 15 | 16 | type CompressionCodec = gzip.Codec 17 | 18 | func NewCompressionCodec() *CompressionCodec { 19 | return NewCompressionCodecLevel(DefaultCompressionLevel) 20 | } 21 | 22 | func NewCompressionCodecLevel(level int) *CompressionCodec { 23 | return &CompressionCodec{Level: level} 24 | } 25 | -------------------------------------------------------------------------------- /heartbeat_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "log" 8 | "os" 9 | "reflect" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestClientHeartbeat(t *testing.T) { 15 | client, topic, shutdown := newLocalClientAndTopic() 16 | defer shutdown() 17 | 18 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) 19 | defer cancel() 20 | 21 | groupID := makeGroupID() 22 | 23 | group, err := NewConsumerGroup(ConsumerGroupConfig{ 24 | ID: groupID, 25 | Topics: []string{topic}, 26 | Brokers: []string{"localhost:9092"}, 27 | HeartbeatInterval: 2 * time.Second, 28 | RebalanceTimeout: 2 * time.Second, 29 | RetentionTime: time.Hour, 30 | Logger: log.New(os.Stdout, "cg-test: ", 0), 31 | }) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | defer group.Close() 36 | 37 | gen, err := group.Next(ctx) 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | 42 | ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) 43 | defer cancel() 44 | 45 | resp, err := client.Heartbeat(ctx, &HeartbeatRequest{ 46 | GroupID: groupID, 47 | GenerationID: gen.ID, 48 | MemberID: gen.MemberID, 49 | }) 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | 54 | if resp.Error != nil { 55 | t.Error(resp.Error) 56 | } 57 | } 58 | 59 | func TestHeartbeatRequestV0(t *testing.T) { 60 | item := heartbeatResponseV0{ 61 | ErrorCode: 2, 62 | } 63 | 64 | b := bytes.NewBuffer(nil) 65 | w := &writeBuffer{w: b} 66 | item.writeTo(w) 67 | 68 | var found heartbeatResponseV0 69 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 70 | if err != nil { 71 | t.Error(err) 72 | t.FailNow() 73 | } 74 | if remain != 0 { 75 | t.Errorf("expected 0 remain, got %v", remain) 76 | t.FailNow() 77 | } 78 | if !reflect.DeepEqual(item, found) { 79 | t.Error("expected item and found to be the same") 80 | t.FailNow() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /incrementalalterconfigs_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | ktesting "github.com/segmentio/kafka-go/testing" 9 | ) 10 | 11 | func TestClientIncrementalAlterConfigs(t *testing.T) { 12 | if !ktesting.KafkaIsAtLeast("2.4.0") { 13 | return 14 | } 15 | 16 | const ( 17 | configKey = "max.message.bytes" 18 | configValue = "200000" 19 | ) 20 | 21 | ctx := context.Background() 22 | client, shutdown := newLocalClient() 23 | defer shutdown() 24 | 25 | topic := makeTopic() 26 | createTopic(t, topic, 1) 27 | defer deleteTopic(t, topic) 28 | 29 | resp, err := client.IncrementalAlterConfigs( 30 | ctx, 31 | &IncrementalAlterConfigsRequest{ 32 | Resources: []IncrementalAlterConfigsRequestResource{ 33 | { 34 | ResourceName: topic, 35 | ResourceType: ResourceTypeTopic, 36 | Configs: []IncrementalAlterConfigsRequestConfig{ 37 | { 38 | Name: configKey, 39 | Value: configValue, 40 | ConfigOperation: ConfigOperationSet, 41 | }, 42 | }, 43 | }, 44 | }, 45 | }, 46 | ) 47 | if err != nil { 48 | t.Fatal(err) 49 | } 50 | 51 | expRes := []IncrementalAlterConfigsResponseResource{ 52 | { 53 | ResourceType: ResourceTypeTopic, 54 | ResourceName: topic, 55 | }, 56 | } 57 | if !reflect.DeepEqual(expRes, resp.Resources) { 58 | t.Error( 59 | "Wrong response resources", 60 | "expected", expRes, 61 | "got", resp.Resources, 62 | ) 63 | } 64 | 65 | dResp, err := client.DescribeConfigs( 66 | ctx, 67 | &DescribeConfigsRequest{ 68 | Resources: []DescribeConfigRequestResource{ 69 | { 70 | ResourceType: ResourceTypeTopic, 71 | ResourceName: topic, 72 | ConfigNames: []string{ 73 | "max.message.bytes", 74 | }, 75 | }, 76 | }, 77 | }, 78 | ) 79 | if err != nil { 80 | t.Fatal(err) 81 | } 82 | if len(dResp.Resources) != 1 || len(dResp.Resources[0].ConfigEntries) != 1 { 83 | t.Fatal("Invalid structure for DescribeResourcesResponse") 84 | } 85 | 86 | v := dResp.Resources[0].ConfigEntries[0].ConfigValue 87 | if v != configValue { 88 | t.Error( 89 | "Wrong altered value for max.message.bytes", 90 | "expected", configValue, 91 | "got", v, 92 | ) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /listgroups_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "fmt" 8 | "reflect" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func TestListGroupsResponseV1(t *testing.T) { 14 | item := listGroupsResponseV1{ 15 | ErrorCode: 2, 16 | Groups: []listGroupsResponseGroupV1{ 17 | { 18 | GroupID: "a", 19 | ProtocolType: "b", 20 | }, 21 | }, 22 | } 23 | 24 | b := bytes.NewBuffer(nil) 25 | w := &writeBuffer{w: b} 26 | item.writeTo(w) 27 | 28 | var found listGroupsResponseV1 29 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 30 | if err != nil { 31 | t.Error(err) 32 | t.FailNow() 33 | } 34 | if remain != 0 { 35 | t.Errorf("expected 0 remain, got %v", remain) 36 | t.FailNow() 37 | } 38 | if !reflect.DeepEqual(item, found) { 39 | t.Error("expected item and found to be the same") 40 | t.FailNow() 41 | } 42 | } 43 | 44 | func TestClientListGroups(t *testing.T) { 45 | client, shutdown := newLocalClient() 46 | defer shutdown() 47 | 48 | topic := makeTopic() 49 | gid := fmt.Sprintf("%s-test-group", topic) 50 | 51 | createTopic(t, topic, 1) 52 | defer deleteTopic(t, topic) 53 | 54 | w := newTestWriter(WriterConfig{ 55 | Topic: topic, 56 | }) 57 | 58 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 59 | defer cancel() 60 | 61 | err := w.WriteMessages( 62 | ctx, 63 | Message{ 64 | Key: []byte("key"), 65 | Value: []byte("value"), 66 | }, 67 | ) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | 72 | r := NewReader(ReaderConfig{ 73 | Brokers: []string{"localhost:9092"}, 74 | Topic: topic, 75 | GroupID: gid, 76 | MinBytes: 10, 77 | MaxBytes: 1000, 78 | }) 79 | _, err = r.ReadMessage(ctx) 80 | if err != nil { 81 | t.Fatal(err) 82 | } 83 | 84 | resp, err := client.ListGroups( 85 | ctx, 86 | &ListGroupsRequest{}, 87 | ) 88 | if err != nil { 89 | t.Fatal(err) 90 | } 91 | if resp.Error != nil { 92 | t.Error( 93 | "Unexpected error in response", 94 | "expected", nil, 95 | "got", resp.Error, 96 | ) 97 | } 98 | hasGroup := false 99 | for _, group := range resp.Groups { 100 | if group.GroupID == gid { 101 | hasGroup = true 102 | break 103 | } 104 | } 105 | 106 | if !hasGroup { 107 | t.Error("Group not found in list") 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /listoffset_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestClientListOffsets(t *testing.T) { 10 | client, topic, shutdown := newLocalClientAndTopic() 11 | defer shutdown() 12 | 13 | now := time.Now() 14 | 15 | _, err := client.Produce(context.Background(), &ProduceRequest{ 16 | Topic: topic, 17 | Partition: 0, 18 | RequiredAcks: -1, 19 | Records: NewRecordReader( 20 | Record{Time: now, Value: NewBytes([]byte(`hello-1`))}, 21 | Record{Time: now, Value: NewBytes([]byte(`hello-2`))}, 22 | Record{Time: now, Value: NewBytes([]byte(`hello-3`))}, 23 | ), 24 | }) 25 | 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | 30 | res, err := client.ListOffsets(context.Background(), &ListOffsetsRequest{ 31 | Topics: map[string][]OffsetRequest{ 32 | topic: {FirstOffsetOf(0), LastOffsetOf(0)}, 33 | }, 34 | }) 35 | 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | if len(res.Topics) != 1 { 41 | t.Fatal("invalid number of topics found in list offsets response:", len(res.Topics)) 42 | } 43 | 44 | partitions, ok := res.Topics[topic] 45 | if !ok { 46 | t.Fatal("missing topic in the list offsets response:", topic) 47 | } 48 | if len(partitions) != 1 { 49 | t.Fatal("invalid number of partitions found in list offsets response:", len(partitions)) 50 | } 51 | partition := partitions[0] 52 | 53 | if partition.Partition != 0 { 54 | t.Error("invalid partition id found in list offsets response:", partition.Partition) 55 | } 56 | 57 | if partition.FirstOffset != 0 { 58 | t.Error("invalid first offset found in list offsets response:", partition.FirstOffset) 59 | } 60 | 61 | if partition.LastOffset != 3 { 62 | t.Error("invalid last offset found in list offsets response:", partition.LastOffset) 63 | } 64 | 65 | if firstOffsetTime := partition.Offsets[partition.FirstOffset]; !firstOffsetTime.IsZero() { 66 | t.Error("unexpected first offset time in list offsets response:", partition.Offsets) 67 | } 68 | 69 | if lastOffsetTime := partition.Offsets[partition.LastOffset]; !lastOffsetTime.IsZero() { 70 | t.Error("unexpected last offset time in list offsets response:", partition.Offsets) 71 | } 72 | 73 | if partition.Error != nil { 74 | t.Error("unexpected error in list offsets response:", partition.Error) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /listpartitionreassignments_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | ktesting "github.com/segmentio/kafka-go/testing" 8 | ) 9 | 10 | func TestClientListPartitionReassignments(t *testing.T) { 11 | if !ktesting.KafkaIsAtLeast("2.4.0") { 12 | return 13 | } 14 | 15 | ctx := context.Background() 16 | client, shutdown := newLocalClient() 17 | defer shutdown() 18 | 19 | topic := makeTopic() 20 | createTopic(t, topic, 2) 21 | defer deleteTopic(t, topic) 22 | 23 | // Can't really get an ongoing partition reassignment with local Kafka, so just do a superficial test here. 24 | resp, err := client.ListPartitionReassignments( 25 | ctx, 26 | &ListPartitionReassignmentsRequest{ 27 | Topics: map[string]ListPartitionReassignmentsRequestTopic{ 28 | topic: {PartitionIndexes: []int{0, 1}}, 29 | }, 30 | }, 31 | ) 32 | 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | if resp.Error != nil { 37 | t.Error( 38 | "Unexpected error in response", 39 | "expected", nil, 40 | "got", resp.Error, 41 | ) 42 | } 43 | if len(resp.Topics) != 0 { 44 | t.Error( 45 | "Unexpected length of topic results", 46 | "expected", 0, 47 | "got", len(resp.Topics), 48 | ) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | // Logger interface API for log.Logger. 4 | type Logger interface { 5 | Printf(string, ...interface{}) 6 | } 7 | 8 | // LoggerFunc is a bridge between Logger and any third party logger 9 | // Usage: 10 | // l := NewLogger() // some logger 11 | // r := kafka.NewReader(kafka.ReaderConfig{ 12 | // Logger: kafka.LoggerFunc(l.Infof), 13 | // ErrorLogger: kafka.LoggerFunc(l.Errorf), 14 | // }) 15 | type LoggerFunc func(string, ...interface{}) 16 | 17 | func (f LoggerFunc) Printf(msg string, args ...interface{}) { f(msg, args...) } 18 | -------------------------------------------------------------------------------- /lz4/lz4.go: -------------------------------------------------------------------------------- 1 | // Package lz4 does nothing, it's kept for backward compatibility to avoid 2 | // breaking the majority of programs that imported it to install the compression 3 | // codec, which is now always included. 4 | package lz4 5 | 6 | import "github.com/segmentio/kafka-go/compress/lz4" 7 | 8 | const ( 9 | Code = 3 10 | ) 11 | 12 | type CompressionCodec = lz4.Codec 13 | 14 | func NewCompressionCodec() *CompressionCodec { 15 | return &CompressionCodec{} 16 | } 17 | -------------------------------------------------------------------------------- /metadata_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | ) 7 | 8 | func TestClientMetadata(t *testing.T) { 9 | client, topic, shutdown := newLocalClientAndTopic() 10 | defer shutdown() 11 | 12 | metadata, err := client.Metadata(context.Background(), &MetadataRequest{ 13 | Topics: []string{topic}, 14 | }) 15 | 16 | if err != nil { 17 | t.Fatal(err) 18 | } 19 | 20 | if len(metadata.Brokers) == 0 { 21 | t.Error("no brokers were returned in the metadata response") 22 | } 23 | 24 | for _, b := range metadata.Brokers { 25 | if b == (Broker{}) { 26 | t.Error("unexpected broker with zero-value in metadata response") 27 | } 28 | } 29 | 30 | if len(metadata.Topics) == 0 { 31 | t.Error("no topics were returned in the metadata response") 32 | } else { 33 | topicMetadata := metadata.Topics[0] 34 | 35 | if topicMetadata.Name != topic { 36 | t.Error("invalid topic name:", topicMetadata.Name) 37 | } 38 | 39 | if len(topicMetadata.Partitions) == 0 { 40 | t.Error("no partitions were returned in the topic metadata response") 41 | } else { 42 | partitionMetadata := topicMetadata.Partitions[0] 43 | 44 | if partitionMetadata.Topic != topic { 45 | t.Error("invalid partition topic name:", partitionMetadata.Topic) 46 | } 47 | 48 | if partitionMetadata.ID != 0 { 49 | t.Error("invalid partition index:", partitionMetadata.ID) 50 | } 51 | 52 | if partitionMetadata.Leader == (Broker{}) { 53 | t.Error("no partition leader was returned in the partition metadata response") 54 | } 55 | 56 | if partitionMetadata.Error != nil { 57 | t.Error("unexpected error found in the partition metadata response:", partitionMetadata.Error) 58 | } 59 | 60 | // assume newLocalClientAndTopic creates the topic with one 61 | // partition 62 | if len(topicMetadata.Partitions) > 1 { 63 | t.Error("too many partitions were returned in the topic metadata response") 64 | } 65 | } 66 | 67 | if topicMetadata.Error != nil { 68 | t.Error("unexpected error found in the topic metadata response:", topicMetadata.Error) 69 | } 70 | 71 | if len(metadata.Topics) > 1 { 72 | t.Error("too many topics were returned in the metadata response") 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /protocol/addoffsetstotxn/addoffsetstotxn.go: -------------------------------------------------------------------------------- 1 | package addoffsetstotxn 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v3,max=v3,tag"` 13 | 14 | TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` 15 | ProducerID int64 `kafka:"min=v0,max=v3"` 16 | ProducerEpoch int16 `kafka:"min=v0,max=v3"` 17 | GroupID string `kafka:"min=v0,max=v3|min=v3,max=v3,compact"` 18 | } 19 | 20 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } 21 | 22 | func (r *Request) Transaction() string { return r.TransactionalID } 23 | 24 | var _ protocol.TransactionalMessage = (*Request)(nil) 25 | 26 | type Response struct { 27 | // We need at least one tagged field to indicate that this is a "flexible" message 28 | // type. 29 | _ struct{} `kafka:"min=v3,max=v3,tag"` 30 | 31 | ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` 32 | ErrorCode int16 `kafka:"min=v0,max=v3"` 33 | } 34 | 35 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } 36 | -------------------------------------------------------------------------------- /protocol/addoffsetstotxn/addoffsetstotxn_test.go: -------------------------------------------------------------------------------- 1 | package addoffsetstotxn_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/addoffsetstotxn" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestAddOffsetsToTxnRequest(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2, 3} { 12 | prototest.TestRequest(t, version, &addoffsetstotxn.Request{ 13 | TransactionalID: "transactional-id-0", 14 | ProducerID: 1, 15 | ProducerEpoch: 10, 16 | GroupID: "group-id-0", 17 | }) 18 | } 19 | } 20 | 21 | func TestAddOffsetsToTxnResponse(t *testing.T) { 22 | for _, version := range []int16{0, 1, 2, 3} { 23 | prototest.TestResponse(t, version, &addoffsetstotxn.Response{ 24 | ThrottleTimeMs: 10, 25 | ErrorCode: 1, 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /protocol/addpartitionstotxn/addpartitionstotxn.go: -------------------------------------------------------------------------------- 1 | package addpartitionstotxn 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v3,max=v3,tag"` 13 | 14 | TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` 15 | ProducerID int64 `kafka:"min=v0,max=v3"` 16 | ProducerEpoch int16 `kafka:"min=v0,max=v3"` 17 | Topics []RequestTopic `kafka:"min=v0,max=v3"` 18 | } 19 | 20 | type RequestTopic struct { 21 | // We need at least one tagged field to indicate that this is a "flexible" message 22 | // type. 23 | _ struct{} `kafka:"min=v3,max=v3,tag"` 24 | 25 | Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` 26 | Partitions []int32 `kafka:"min=v0,max=v3"` 27 | } 28 | 29 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } 30 | 31 | func (r *Request) Transaction() string { return r.TransactionalID } 32 | 33 | var _ protocol.TransactionalMessage = (*Request)(nil) 34 | 35 | type Response struct { 36 | // We need at least one tagged field to indicate that this is a "flexible" message 37 | // type. 38 | _ struct{} `kafka:"min=v3,max=v3,tag"` 39 | 40 | ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` 41 | Results []ResponseResult `kafka:"min=v0,max=v3"` 42 | } 43 | 44 | type ResponseResult struct { 45 | // We need at least one tagged field to indicate that this is a "flexible" message 46 | // type. 47 | _ struct{} `kafka:"min=v3,max=v3,tag"` 48 | 49 | Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` 50 | Results []ResponsePartition `kafka:"min=v0,max=v3"` 51 | } 52 | 53 | type ResponsePartition struct { 54 | // We need at least one tagged field to indicate that this is a "flexible" message 55 | // type. 56 | _ struct{} `kafka:"min=v3,max=v3,tag"` 57 | 58 | PartitionIndex int32 `kafka:"min=v0,max=v3"` 59 | ErrorCode int16 `kafka:"min=v0,max=v3"` 60 | } 61 | 62 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } 63 | -------------------------------------------------------------------------------- /protocol/addpartitionstotxn/addpartitionstotxn_test.go: -------------------------------------------------------------------------------- 1 | package addpartitionstotxn_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/addpartitionstotxn" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestAddPartitionsToTxnRequest(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2, 3} { 12 | prototest.TestRequest(t, version, &addpartitionstotxn.Request{ 13 | TransactionalID: "transaction-id-0", 14 | ProducerID: 10, 15 | ProducerEpoch: 100, 16 | Topics: []addpartitionstotxn.RequestTopic{ 17 | { 18 | Name: "topic-1", 19 | Partitions: []int32{0, 1, 2, 3}, 20 | }, 21 | { 22 | Name: "topic-2", 23 | Partitions: []int32{0, 1, 2}, 24 | }, 25 | }, 26 | }) 27 | } 28 | } 29 | 30 | func TestAddPartitionsToTxnResponse(t *testing.T) { 31 | for _, version := range []int16{0, 1, 2, 3} { 32 | prototest.TestResponse(t, version, &addpartitionstotxn.Response{ 33 | ThrottleTimeMs: 20, 34 | Results: []addpartitionstotxn.ResponseResult{ 35 | { 36 | Name: "topic-1", 37 | Results: []addpartitionstotxn.ResponsePartition{ 38 | { 39 | PartitionIndex: 0, 40 | ErrorCode: 19, 41 | }, 42 | { 43 | PartitionIndex: 1, 44 | ErrorCode: 0, 45 | }, 46 | }, 47 | }, 48 | { 49 | Name: "topic-2", 50 | Results: []addpartitionstotxn.ResponsePartition{ 51 | { 52 | PartitionIndex: 0, 53 | ErrorCode: 0, 54 | }, 55 | }, 56 | }, 57 | }, 58 | }) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /protocol/alterclientquotas/alterclientquotas_test.go: -------------------------------------------------------------------------------- 1 | package alterclientquotas_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/alterclientquotas" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | ) 14 | 15 | func TestAlterClientQuotasRequest(t *testing.T) { 16 | prototest.TestRequest(t, v0, &alterclientquotas.Request{ 17 | ValidateOnly: true, 18 | Entries: []alterclientquotas.Entry{ 19 | { 20 | Entities: []alterclientquotas.Entity{ 21 | { 22 | EntityType: "client-id", 23 | EntityName: "my-client-id", 24 | }, 25 | }, 26 | Ops: []alterclientquotas.Ops{ 27 | { 28 | Key: "producer_byte_rate", 29 | Value: 1.0, 30 | Remove: false, 31 | }, 32 | }, 33 | }, 34 | }, 35 | }) 36 | 37 | prototest.TestRequest(t, v1, &alterclientquotas.Request{ 38 | ValidateOnly: true, 39 | Entries: []alterclientquotas.Entry{ 40 | { 41 | Entities: []alterclientquotas.Entity{ 42 | { 43 | EntityType: "client-id", 44 | EntityName: "my-client-id", 45 | }, 46 | }, 47 | Ops: []alterclientquotas.Ops{ 48 | { 49 | Key: "producer_byte_rate", 50 | Value: 1.0, 51 | Remove: false, 52 | }, 53 | }, 54 | }, 55 | }, 56 | }) 57 | } 58 | 59 | func TestAlterClientQuotasResponse(t *testing.T) { 60 | prototest.TestResponse(t, v0, &alterclientquotas.Response{ 61 | ThrottleTimeMs: 500, 62 | Results: []alterclientquotas.ResponseQuotas{ 63 | { 64 | ErrorCode: 1, 65 | ErrorMessage: "foo", 66 | Entities: []alterclientquotas.Entity{ 67 | { 68 | EntityType: "client-id", 69 | EntityName: "my-client-id", 70 | }, 71 | }, 72 | }, 73 | }, 74 | }) 75 | 76 | prototest.TestResponse(t, v1, &alterclientquotas.Response{ 77 | ThrottleTimeMs: 500, 78 | Results: []alterclientquotas.ResponseQuotas{ 79 | { 80 | ErrorCode: 1, 81 | ErrorMessage: "foo", 82 | Entities: []alterclientquotas.Entity{ 83 | { 84 | EntityType: "client-id", 85 | EntityName: "my-client-id", 86 | }, 87 | }, 88 | }, 89 | }, 90 | }) 91 | } 92 | -------------------------------------------------------------------------------- /protocol/alterconfigs/alterconfigs.go: -------------------------------------------------------------------------------- 1 | package alterconfigs 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterConfigs 10 | type Request struct { 11 | Resources []RequestResources `kafka:"min=v0,max=v1"` 12 | ValidateOnly bool `kafka:"min=v0,max=v1"` 13 | } 14 | 15 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } 16 | 17 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 18 | return cluster.Brokers[cluster.Controller], nil 19 | } 20 | 21 | type RequestResources struct { 22 | ResourceType int8 `kafka:"min=v0,max=v1"` 23 | ResourceName string `kafka:"min=v0,max=v1"` 24 | Configs []RequestConfig `kafka:"min=v0,max=v1"` 25 | } 26 | 27 | type RequestConfig struct { 28 | Name string `kafka:"min=v0,max=v1"` 29 | Value string `kafka:"min=v0,max=v1,nullable"` 30 | } 31 | 32 | type Response struct { 33 | ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` 34 | Responses []ResponseResponses `kafka:"min=v0,max=v1"` 35 | } 36 | 37 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } 38 | 39 | type ResponseResponses struct { 40 | ErrorCode int16 `kafka:"min=v0,max=v1"` 41 | ErrorMessage string `kafka:"min=v0,max=v1,nullable"` 42 | ResourceType int8 `kafka:"min=v0,max=v1"` 43 | ResourceName string `kafka:"min=v0,max=v1"` 44 | } 45 | 46 | var ( 47 | _ protocol.BrokerMessage = (*Request)(nil) 48 | ) 49 | -------------------------------------------------------------------------------- /protocol/alterconfigs/alterconfigs_test.go: -------------------------------------------------------------------------------- 1 | package alterconfigs_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/alterconfigs" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | ) 14 | 15 | func TestAlterConfigsRequest(t *testing.T) { 16 | prototest.TestRequest(t, v0, &alterconfigs.Request{ 17 | ValidateOnly: true, 18 | Resources: []alterconfigs.RequestResources{ 19 | { 20 | ResourceType: 1, 21 | ResourceName: "foo", 22 | Configs: []alterconfigs.RequestConfig{ 23 | { 24 | Name: "foo", 25 | Value: "foo", 26 | }, 27 | }, 28 | }, 29 | }, 30 | }) 31 | 32 | prototest.TestRequest(t, v1, &alterconfigs.Request{ 33 | ValidateOnly: true, 34 | Resources: []alterconfigs.RequestResources{ 35 | { 36 | ResourceType: 1, 37 | ResourceName: "foo", 38 | Configs: []alterconfigs.RequestConfig{ 39 | { 40 | Name: "foo", 41 | Value: "foo", 42 | }, 43 | }, 44 | }, 45 | }, 46 | }) 47 | } 48 | 49 | func TestAlterConfigsResponse(t *testing.T) { 50 | prototest.TestResponse(t, v0, &alterconfigs.Response{ 51 | ThrottleTimeMs: 500, 52 | Responses: []alterconfigs.ResponseResponses{ 53 | { 54 | ErrorCode: 1, 55 | ErrorMessage: "foo", 56 | ResourceType: 1, 57 | ResourceName: "foo", 58 | }, 59 | }, 60 | }) 61 | 62 | prototest.TestResponse(t, v1, &alterconfigs.Response{ 63 | ThrottleTimeMs: 500, 64 | Responses: []alterconfigs.ResponseResponses{ 65 | { 66 | ErrorCode: 1, 67 | ErrorMessage: "foo", 68 | ResourceType: 1, 69 | ResourceName: "foo", 70 | }, 71 | }, 72 | }) 73 | } 74 | -------------------------------------------------------------------------------- /protocol/alterpartitionreassignments/alterpartitionreassignments.go: -------------------------------------------------------------------------------- 1 | package alterpartitionreassignments 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterPartitionReassignments 10 | type Request struct { 11 | // We need at least one tagged field to indicate that this is a "flexible" message 12 | // type. 13 | _ struct{} `kafka:"min=v0,max=v0,tag"` 14 | 15 | TimeoutMs int32 `kafka:"min=v0,max=v0"` 16 | Topics []RequestTopic `kafka:"min=v0,max=v0"` 17 | } 18 | 19 | type RequestTopic struct { 20 | Name string `kafka:"min=v0,max=v0"` 21 | Partitions []RequestPartition `kafka:"min=v0,max=v0"` 22 | } 23 | 24 | type RequestPartition struct { 25 | PartitionIndex int32 `kafka:"min=v0,max=v0"` 26 | Replicas []int32 `kafka:"min=v0,max=v0,nullable"` 27 | } 28 | 29 | func (r *Request) ApiKey() protocol.ApiKey { 30 | return protocol.AlterPartitionReassignments 31 | } 32 | 33 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 34 | return cluster.Brokers[cluster.Controller], nil 35 | } 36 | 37 | type Response struct { 38 | // We need at least one tagged field to indicate that this is a "flexible" message 39 | // type. 40 | _ struct{} `kafka:"min=v0,max=v0,tag"` 41 | 42 | ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` 43 | ErrorCode int16 `kafka:"min=v0,max=v0"` 44 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 45 | Results []ResponseResult `kafka:"min=v0,max=v0"` 46 | } 47 | 48 | type ResponseResult struct { 49 | Name string `kafka:"min=v0,max=v0"` 50 | Partitions []ResponsePartition `kafka:"min=v0,max=v0"` 51 | } 52 | 53 | type ResponsePartition struct { 54 | PartitionIndex int32 `kafka:"min=v0,max=v0"` 55 | ErrorCode int16 `kafka:"min=v0,max=v0"` 56 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 57 | } 58 | 59 | func (r *Response) ApiKey() protocol.ApiKey { 60 | return protocol.AlterPartitionReassignments 61 | } 62 | -------------------------------------------------------------------------------- /protocol/alterpartitionreassignments/alterpartitionreassignments_test.go: -------------------------------------------------------------------------------- 1 | package alterpartitionreassignments_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/alterpartitionreassignments" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | ) 13 | 14 | func TestAlterPartitionReassignmentsRequest(t *testing.T) { 15 | prototest.TestRequest(t, v0, &alterpartitionreassignments.Request{ 16 | TimeoutMs: 1, 17 | Topics: []alterpartitionreassignments.RequestTopic{ 18 | { 19 | Name: "topic-1", 20 | Partitions: []alterpartitionreassignments.RequestPartition{ 21 | { 22 | PartitionIndex: 1, 23 | Replicas: []int32{1, 2, 3}, 24 | }, 25 | { 26 | PartitionIndex: 2, 27 | }, 28 | }, 29 | }, 30 | }, 31 | }) 32 | } 33 | 34 | func TestAlterPartitionReassignmentsResponse(t *testing.T) { 35 | prototest.TestResponse(t, v0, &alterpartitionreassignments.Response{ 36 | ErrorCode: 1, 37 | ErrorMessage: "error", 38 | ThrottleTimeMs: 1, 39 | Results: []alterpartitionreassignments.ResponseResult{ 40 | { 41 | Name: "topic-1", 42 | Partitions: []alterpartitionreassignments.ResponsePartition{ 43 | { 44 | PartitionIndex: 1, 45 | ErrorMessage: "error", 46 | ErrorCode: 1, 47 | }, 48 | { 49 | PartitionIndex: 2, 50 | }, 51 | }, 52 | }, 53 | }, 54 | }) 55 | } 56 | -------------------------------------------------------------------------------- /protocol/alteruserscramcredentials/alteruserscramcredentials.go: -------------------------------------------------------------------------------- 1 | package alteruserscramcredentials 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that v2+ uses "flexible" 11 | // messages. 12 | _ struct{} `kafka:"min=v0,max=v0,tag"` 13 | 14 | Deletions []RequestUserScramCredentialsDeletion `kafka:"min=v0,max=v0"` 15 | Upsertions []RequestUserScramCredentialsUpsertion `kafka:"min=v0,max=v0"` 16 | } 17 | 18 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials } 19 | 20 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 21 | return cluster.Brokers[cluster.Controller], nil 22 | } 23 | 24 | type RequestUserScramCredentialsDeletion struct { 25 | // We need at least one tagged field to indicate that v2+ uses "flexible" 26 | // messages. 27 | _ struct{} `kafka:"min=v0,max=v0,tag"` 28 | 29 | Name string `kafka:"min=v0,max=v0,compact"` 30 | Mechanism int8 `kafka:"min=v0,max=v0"` 31 | } 32 | 33 | type RequestUserScramCredentialsUpsertion struct { 34 | // We need at least one tagged field to indicate that v2+ uses "flexible" 35 | // messages. 36 | _ struct{} `kafka:"min=v0,max=v0,tag"` 37 | 38 | Name string `kafka:"min=v0,max=v0,compact"` 39 | Mechanism int8 `kafka:"min=v0,max=v0"` 40 | Iterations int32 `kafka:"min=v0,max=v0"` 41 | Salt []byte `kafka:"min=v0,max=v0,compact"` 42 | SaltedPassword []byte `kafka:"min=v0,max=v0,compact"` 43 | } 44 | 45 | type Response struct { 46 | // We need at least one tagged field to indicate that v2+ uses "flexible" 47 | // messages. 48 | _ struct{} `kafka:"min=v0,max=v0,tag"` 49 | 50 | ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` 51 | Results []ResponseUserScramCredentials `kafka:"min=v0,max=v0"` 52 | } 53 | 54 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials } 55 | 56 | type ResponseUserScramCredentials struct { 57 | // We need at least one tagged field to indicate that v2+ uses "flexible" 58 | // messages. 59 | _ struct{} `kafka:"min=v0,max=v0,tag"` 60 | 61 | User string `kafka:"min=v0,max=v0,compact"` 62 | ErrorCode int16 `kafka:"min=v0,max=v0"` 63 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 64 | } 65 | 66 | var _ protocol.BrokerMessage = (*Request)(nil) 67 | -------------------------------------------------------------------------------- /protocol/alteruserscramcredentials/alteruserscramcredentials_test.go: -------------------------------------------------------------------------------- 1 | package alteruserscramcredentials_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/alteruserscramcredentials" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | ) 13 | 14 | func TestAlterUserScramCredentialsRequest(t *testing.T) { 15 | prototest.TestRequest(t, v0, &alteruserscramcredentials.Request{ 16 | Deletions: []alteruserscramcredentials.RequestUserScramCredentialsDeletion{ 17 | { 18 | Name: "foo-1", 19 | Mechanism: 1, 20 | }, 21 | }, 22 | Upsertions: []alteruserscramcredentials.RequestUserScramCredentialsUpsertion{ 23 | { 24 | Name: "foo-2", 25 | Mechanism: 2, 26 | Iterations: 15000, 27 | Salt: []byte("my-salt"), 28 | SaltedPassword: []byte("my-salted-password"), 29 | }, 30 | }, 31 | }) 32 | } 33 | 34 | func TestAlterUserScramCredentialsResponse(t *testing.T) { 35 | prototest.TestResponse(t, v0, &alteruserscramcredentials.Response{ 36 | ThrottleTimeMs: 500, 37 | Results: []alteruserscramcredentials.ResponseUserScramCredentials{ 38 | { 39 | User: "foo", 40 | ErrorCode: 1, 41 | ErrorMessage: "foo-error", 42 | }, 43 | }, 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /protocol/apiversions/apiversions.go: -------------------------------------------------------------------------------- 1 | package apiversions 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | _ struct{} `kafka:"min=v0,max=v2"` 11 | } 12 | 13 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.ApiVersions } 14 | 15 | type Response struct { 16 | ErrorCode int16 `kafka:"min=v0,max=v2"` 17 | ApiKeys []ApiKeyResponse `kafka:"min=v0,max=v2"` 18 | ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` 19 | } 20 | 21 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.ApiVersions } 22 | 23 | type ApiKeyResponse struct { 24 | ApiKey int16 `kafka:"min=v0,max=v2"` 25 | MinVersion int16 `kafka:"min=v0,max=v2"` 26 | MaxVersion int16 `kafka:"min=v0,max=v2"` 27 | } 28 | -------------------------------------------------------------------------------- /protocol/apiversions/apiversions_test.go: -------------------------------------------------------------------------------- 1 | package apiversions_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/apiversions" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | v2 = 2 14 | ) 15 | 16 | func TestApiversionsRequest(t *testing.T) { 17 | prototest.TestRequest(t, v0, &apiversions.Request{}) 18 | 19 | prototest.TestRequest(t, v1, &apiversions.Request{}) 20 | 21 | prototest.TestRequest(t, v2, &apiversions.Request{}) 22 | } 23 | 24 | func TestApiversionsResponse(t *testing.T) { 25 | prototest.TestResponse(t, v0, &apiversions.Response{ 26 | ErrorCode: 0, 27 | ApiKeys: []apiversions.ApiKeyResponse{ 28 | { 29 | ApiKey: 0, 30 | MinVersion: 0, 31 | MaxVersion: 2, 32 | }, 33 | }, 34 | }) 35 | 36 | prototest.TestResponse(t, v1, &apiversions.Response{ 37 | ErrorCode: 0, 38 | ApiKeys: []apiversions.ApiKeyResponse{ 39 | { 40 | ApiKey: 0, 41 | MinVersion: 0, 42 | MaxVersion: 2, 43 | }, 44 | }, 45 | ThrottleTimeMs: 10, 46 | }) 47 | 48 | prototest.TestResponse(t, v2, &apiversions.Response{ 49 | ErrorCode: 0, 50 | ApiKeys: []apiversions.ApiKeyResponse{ 51 | { 52 | ApiKey: 0, 53 | MinVersion: 0, 54 | MaxVersion: 2, 55 | }, 56 | }, 57 | ThrottleTimeMs: 50, 58 | }) 59 | } 60 | -------------------------------------------------------------------------------- /protocol/conn.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "net" 7 | "sync/atomic" 8 | "time" 9 | ) 10 | 11 | type Conn struct { 12 | buffer *bufio.Reader 13 | conn net.Conn 14 | clientID string 15 | idgen int32 16 | versions atomic.Value // map[ApiKey]int16 17 | } 18 | 19 | func NewConn(conn net.Conn, clientID string) *Conn { 20 | return &Conn{ 21 | buffer: bufio.NewReader(conn), 22 | conn: conn, 23 | clientID: clientID, 24 | } 25 | } 26 | 27 | func (c *Conn) String() string { 28 | return fmt.Sprintf("kafka://%s@%s->%s", c.clientID, c.LocalAddr(), c.RemoteAddr()) 29 | } 30 | 31 | func (c *Conn) Close() error { 32 | return c.conn.Close() 33 | } 34 | 35 | func (c *Conn) Discard(n int) (int, error) { 36 | return c.buffer.Discard(n) 37 | } 38 | 39 | func (c *Conn) Peek(n int) ([]byte, error) { 40 | return c.buffer.Peek(n) 41 | } 42 | 43 | func (c *Conn) Read(b []byte) (int, error) { 44 | return c.buffer.Read(b) 45 | } 46 | 47 | func (c *Conn) Write(b []byte) (int, error) { 48 | return c.conn.Write(b) 49 | } 50 | 51 | func (c *Conn) LocalAddr() net.Addr { 52 | return c.conn.LocalAddr() 53 | } 54 | 55 | func (c *Conn) RemoteAddr() net.Addr { 56 | return c.conn.RemoteAddr() 57 | } 58 | 59 | func (c *Conn) SetDeadline(t time.Time) error { 60 | return c.conn.SetDeadline(t) 61 | } 62 | 63 | func (c *Conn) SetReadDeadline(t time.Time) error { 64 | return c.conn.SetReadDeadline(t) 65 | } 66 | 67 | func (c *Conn) SetWriteDeadline(t time.Time) error { 68 | return c.conn.SetWriteDeadline(t) 69 | } 70 | 71 | func (c *Conn) SetVersions(versions map[ApiKey]int16) { 72 | connVersions := make(map[ApiKey]int16, len(versions)) 73 | 74 | for k, v := range versions { 75 | connVersions[k] = v 76 | } 77 | 78 | c.versions.Store(connVersions) 79 | } 80 | 81 | func (c *Conn) RoundTrip(msg Message) (Message, error) { 82 | correlationID := atomic.AddInt32(&c.idgen, +1) 83 | versions, _ := c.versions.Load().(map[ApiKey]int16) 84 | apiVersion := versions[msg.ApiKey()] 85 | 86 | if p, _ := msg.(PreparedMessage); p != nil { 87 | p.Prepare(apiVersion) 88 | } 89 | 90 | if raw, ok := msg.(RawExchanger); ok && raw.Required(versions) { 91 | return raw.RawExchange(c) 92 | } 93 | 94 | return RoundTrip(c, apiVersion, correlationID, c.clientID, msg) 95 | } 96 | 97 | var ( 98 | _ net.Conn = (*Conn)(nil) 99 | _ bufferedReader = (*Conn)(nil) 100 | ) 101 | -------------------------------------------------------------------------------- /protocol/consumer/consumer.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | const MaxVersionSupported = 1 4 | 5 | type Subscription struct { 6 | Version int16 `kafka:"min=v0,max=v1"` 7 | Topics []string `kafka:"min=v0,max=v1"` 8 | UserData []byte `kafka:"min=v0,max=v1,nullable"` 9 | OwnedPartitions []TopicPartition `kafka:"min=v1,max=v1"` 10 | } 11 | 12 | type Assignment struct { 13 | Version int16 `kafka:"min=v0,max=v1"` 14 | AssignedPartitions []TopicPartition `kafka:"min=v0,max=v1"` 15 | UserData []byte `kafka:"min=v0,max=v1,nullable"` 16 | } 17 | 18 | type TopicPartition struct { 19 | Topic string `kafka:"min=v0,max=v1"` 20 | Partitions []int32 `kafka:"min=v0,max=v1"` 21 | } 22 | -------------------------------------------------------------------------------- /protocol/consumer/consumer_test.go: -------------------------------------------------------------------------------- 1 | package consumer_test 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/segmentio/kafka-go/protocol" 8 | "github.com/segmentio/kafka-go/protocol/consumer" 9 | ) 10 | 11 | func TestSubscription(t *testing.T) { 12 | subscription := consumer.Subscription{ 13 | Topics: []string{"topic-1", "topic-2"}, 14 | UserData: []byte("user-data"), 15 | OwnedPartitions: []consumer.TopicPartition{ 16 | { 17 | Topic: "topic-1", 18 | Partitions: []int32{1, 2, 3}, 19 | }, 20 | }, 21 | } 22 | 23 | for _, version := range []int16{1, 0} { 24 | if version == 0 { 25 | subscription.OwnedPartitions = nil 26 | } 27 | data, err := protocol.Marshal(version, subscription) 28 | if err != nil { 29 | t.Fatal(err) 30 | } 31 | var gotSubscription consumer.Subscription 32 | err = protocol.Unmarshal(data, version, &gotSubscription) 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | if !reflect.DeepEqual(subscription, gotSubscription) { 37 | t.Fatalf("unexpected result after marshal/unmarshal \nexpected\n %#v\ngot\n %#v", subscription, gotSubscription) 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /protocol/createacls/createacls.go: -------------------------------------------------------------------------------- 1 | package createacls 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that v2+ uses "flexible" 11 | // messages. 12 | _ struct{} `kafka:"min=v2,max=v3,tag"` 13 | 14 | Creations []RequestACLs `kafka:"min=v0,max=v3"` 15 | } 16 | 17 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls } 18 | 19 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 20 | return cluster.Brokers[cluster.Controller], nil 21 | } 22 | 23 | type RequestACLs struct { 24 | // We need at least one tagged field to indicate that v2+ uses "flexible" 25 | // messages. 26 | _ struct{} `kafka:"min=v2,max=v3,tag"` 27 | 28 | ResourceType int8 `kafka:"min=v0,max=v3"` 29 | ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` 30 | ResourcePatternType int8 `kafka:"min=v1,max=v3"` 31 | Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` 32 | Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` 33 | Operation int8 `kafka:"min=v0,max=v3"` 34 | PermissionType int8 `kafka:"min=v0,max=v3"` 35 | } 36 | 37 | type Response struct { 38 | // We need at least one tagged field to indicate that v2+ uses "flexible" 39 | // messages. 40 | _ struct{} `kafka:"min=v2,max=v3,tag"` 41 | 42 | ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` 43 | Results []ResponseACLs `kafka:"min=v0,max=v3"` 44 | } 45 | 46 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls } 47 | 48 | type ResponseACLs struct { 49 | // We need at least one tagged field to indicate that v2+ uses "flexible" 50 | // messages. 51 | _ struct{} `kafka:"min=v2,max=v3,tag"` 52 | 53 | ErrorCode int16 `kafka:"min=v0,max=v3"` 54 | ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` 55 | } 56 | 57 | var _ protocol.BrokerMessage = (*Request)(nil) 58 | -------------------------------------------------------------------------------- /protocol/createpartitions/createpartitions.go: -------------------------------------------------------------------------------- 1 | package createpartitions 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_CreatePartitions. 10 | // TODO: Support version 2. 11 | type Request struct { 12 | Topics []RequestTopic `kafka:"min=v0,max=v1"` 13 | TimeoutMs int32 `kafka:"min=v0,max=v1"` 14 | ValidateOnly bool `kafka:"min=v0,max=v1"` 15 | } 16 | 17 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } 18 | 19 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 20 | return cluster.Brokers[cluster.Controller], nil 21 | } 22 | 23 | type RequestTopic struct { 24 | Name string `kafka:"min=v0,max=v1"` 25 | Count int32 `kafka:"min=v0,max=v1"` 26 | Assignments []RequestAssignment `kafka:"min=v0,max=v1,nullable"` 27 | } 28 | 29 | type RequestAssignment struct { 30 | BrokerIDs []int32 `kafka:"min=v0,max=v1"` 31 | } 32 | 33 | type Response struct { 34 | ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` 35 | Results []ResponseResult `kafka:"min=v0,max=v1"` 36 | } 37 | 38 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } 39 | 40 | type ResponseResult struct { 41 | Name string `kafka:"min=v0,max=v1"` 42 | ErrorCode int16 `kafka:"min=v0,max=v1"` 43 | ErrorMessage string `kafka:"min=v0,max=v1,nullable"` 44 | } 45 | 46 | var _ protocol.BrokerMessage = (*Request)(nil) 47 | -------------------------------------------------------------------------------- /protocol/createpartitions/createpartitions_test.go: -------------------------------------------------------------------------------- 1 | package createpartitions_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/createpartitions" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | ) 14 | 15 | func TestCreatePartitionsRequest(t *testing.T) { 16 | prototest.TestRequest(t, v0, &createpartitions.Request{ 17 | Topics: []createpartitions.RequestTopic{ 18 | { 19 | Name: "foo", 20 | Count: 1, 21 | Assignments: []createpartitions.RequestAssignment{ 22 | { 23 | BrokerIDs: []int32{1, 2, 3}, 24 | }, 25 | }, 26 | }, 27 | }, 28 | TimeoutMs: 500, 29 | ValidateOnly: false, 30 | }) 31 | 32 | prototest.TestRequest(t, v1, &createpartitions.Request{ 33 | Topics: []createpartitions.RequestTopic{ 34 | { 35 | Name: "foo", 36 | Count: 1, 37 | Assignments: []createpartitions.RequestAssignment{ 38 | { 39 | BrokerIDs: []int32{1, 2, 3}, 40 | }, 41 | }, 42 | }, 43 | }, 44 | TimeoutMs: 500, 45 | ValidateOnly: false, 46 | }) 47 | } 48 | 49 | func TestCreatePartitionsResponse(t *testing.T) { 50 | prototest.TestResponse(t, v0, &createpartitions.Response{ 51 | ThrottleTimeMs: 500, 52 | Results: []createpartitions.ResponseResult{ 53 | { 54 | Name: "foo", 55 | ErrorCode: 1, 56 | ErrorMessage: "foo", 57 | }, 58 | }, 59 | }) 60 | 61 | prototest.TestResponse(t, v1, &createpartitions.Response{ 62 | ThrottleTimeMs: 500, 63 | Results: []createpartitions.ResponseResult{ 64 | { 65 | Name: "foo", 66 | ErrorCode: 1, 67 | ErrorMessage: "foo", 68 | }, 69 | }, 70 | }) 71 | } 72 | -------------------------------------------------------------------------------- /protocol/deletegroups/deletegroups.go: -------------------------------------------------------------------------------- 1 | package deletegroups 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v2,max=v2,tag"` 13 | 14 | GroupIDs []string `kafka:"min=v0,max=v2"` 15 | } 16 | 17 | func (r *Request) Group() string { 18 | // use first group to determine group coordinator 19 | if len(r.GroupIDs) > 0 { 20 | return r.GroupIDs[0] 21 | } 22 | return "" 23 | } 24 | 25 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } 26 | 27 | var ( 28 | _ protocol.GroupMessage = (*Request)(nil) 29 | ) 30 | 31 | type Response struct { 32 | // We need at least one tagged field to indicate that this is a "flexible" message 33 | // type. 34 | _ struct{} `kafka:"min=v2,max=v2,tag"` 35 | 36 | ThrottleTimeMs int32 `kafka:"min=v0,max=v2"` 37 | Responses []ResponseGroup `kafka:"min=v0,max=v2"` 38 | } 39 | 40 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } 41 | 42 | type ResponseGroup struct { 43 | GroupID string `kafka:"min=v0,max=v2"` 44 | ErrorCode int16 `kafka:"min=v0,max=v2"` 45 | } 46 | -------------------------------------------------------------------------------- /protocol/deletegroups/deletegroups_test.go: -------------------------------------------------------------------------------- 1 | package deletegroups_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/deletegroups" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestDeleteGroupsRequest(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2} { 12 | prototest.TestRequest(t, version, &deletegroups.Request{ 13 | GroupIDs: []string{"group1", "group2"}, 14 | }) 15 | } 16 | } 17 | 18 | func TestDeleteGroupsResponse(t *testing.T) { 19 | for _, version := range []int16{0, 1, 2} { 20 | prototest.TestResponse(t, version, &deletegroups.Response{ 21 | Responses: []deletegroups.ResponseGroup{ 22 | { 23 | GroupID: "group1", 24 | ErrorCode: 0, 25 | }, 26 | { 27 | GroupID: "group2", 28 | ErrorCode: 1, 29 | }, 30 | }, 31 | }) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /protocol/deletetopics/deletetopics.go: -------------------------------------------------------------------------------- 1 | package deletetopics 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | TopicNames []string `kafka:"min=v0,max=v3"` 11 | TimeoutMs int32 `kafka:"min=v0,max=v3"` 12 | } 13 | 14 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } 15 | 16 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 17 | return cluster.Brokers[cluster.Controller], nil 18 | } 19 | 20 | type Response struct { 21 | ThrottleTimeMs int32 `kafka:"min=v1,max=v3"` 22 | Responses []ResponseTopic `kafka:"min=v0,max=v3"` 23 | } 24 | 25 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } 26 | 27 | type ResponseTopic struct { 28 | Name string `kafka:"min=v0,max=v3"` 29 | ErrorCode int16 `kafka:"min=v0,max=v3"` 30 | } 31 | 32 | var ( 33 | _ protocol.BrokerMessage = (*Request)(nil) 34 | ) 35 | -------------------------------------------------------------------------------- /protocol/deletetopics/deletetopics_test.go: -------------------------------------------------------------------------------- 1 | package deletetopics_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/deletetopics" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | v3 = 3 14 | ) 15 | 16 | func TestDeleteTopicsRequest(t *testing.T) { 17 | prototest.TestRequest(t, v0, &deletetopics.Request{ 18 | TopicNames: []string{"foo", "bar"}, 19 | TimeoutMs: 500, 20 | }) 21 | 22 | prototest.TestRequest(t, v1, &deletetopics.Request{ 23 | TopicNames: []string{"foo", "bar"}, 24 | TimeoutMs: 500, 25 | }) 26 | 27 | prototest.TestRequest(t, v3, &deletetopics.Request{ 28 | TopicNames: []string{"foo", "bar"}, 29 | TimeoutMs: 500, 30 | }) 31 | } 32 | 33 | func TestDeleteTopicsResponse(t *testing.T) { 34 | prototest.TestResponse(t, v0, &deletetopics.Response{ 35 | Responses: []deletetopics.ResponseTopic{ 36 | { 37 | Name: "foo", 38 | ErrorCode: 1, 39 | }, 40 | { 41 | Name: "bar", 42 | ErrorCode: 1, 43 | }, 44 | }, 45 | }) 46 | 47 | prototest.TestResponse(t, v1, &deletetopics.Response{ 48 | ThrottleTimeMs: 500, 49 | Responses: []deletetopics.ResponseTopic{ 50 | { 51 | Name: "foo", 52 | ErrorCode: 1, 53 | }, 54 | { 55 | Name: "bar", 56 | ErrorCode: 1, 57 | }, 58 | }, 59 | }) 60 | 61 | prototest.TestResponse(t, v3, &deletetopics.Response{ 62 | ThrottleTimeMs: 500, 63 | Responses: []deletetopics.ResponseTopic{ 64 | { 65 | Name: "foo", 66 | ErrorCode: 1, 67 | }, 68 | { 69 | Name: "bar", 70 | ErrorCode: 1, 71 | }, 72 | }, 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /protocol/describeclientquotas/describeclientquotas_test.go: -------------------------------------------------------------------------------- 1 | package describeclientquotas_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/describeclientquotas" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | ) 14 | 15 | func TestDescribeClientQuotasRequest(t *testing.T) { 16 | prototest.TestRequest(t, v0, &describeclientquotas.Request{ 17 | Strict: true, 18 | Components: []describeclientquotas.Component{ 19 | { 20 | EntityType: "client-id", 21 | MatchType: 0, 22 | Match: "my-client-id", 23 | }, 24 | }, 25 | }) 26 | 27 | prototest.TestRequest(t, v1, &describeclientquotas.Request{ 28 | Strict: true, 29 | Components: []describeclientquotas.Component{ 30 | { 31 | EntityType: "client-id", 32 | MatchType: 0, 33 | Match: "my-client-id", 34 | }, 35 | }, 36 | }) 37 | } 38 | 39 | func TestDescribeClientQuotasResponse(t *testing.T) { 40 | prototest.TestResponse(t, v0, &describeclientquotas.Response{ 41 | ThrottleTimeMs: 1, 42 | ErrorCode: 1, 43 | ErrorMessage: "foo", 44 | Entries: []describeclientquotas.ResponseQuotas{ 45 | { 46 | Entities: []describeclientquotas.Entity{ 47 | { 48 | EntityType: "client-id", 49 | EntityName: "my-client-id", 50 | }, 51 | }, 52 | Values: []describeclientquotas.Value{ 53 | { 54 | Key: "foo", 55 | Value: 1.0, 56 | }, 57 | }, 58 | }, 59 | }, 60 | }) 61 | 62 | prototest.TestResponse(t, v1, &describeclientquotas.Response{ 63 | ThrottleTimeMs: 1, 64 | ErrorCode: 1, 65 | ErrorMessage: "foo", 66 | Entries: []describeclientquotas.ResponseQuotas{ 67 | { 68 | Entities: []describeclientquotas.Entity{ 69 | { 70 | EntityType: "client-id", 71 | EntityName: "my-client-id", 72 | }, 73 | }, 74 | Values: []describeclientquotas.Value{ 75 | { 76 | Key: "foo", 77 | Value: 1.0, 78 | }, 79 | }, 80 | }, 81 | }, 82 | }) 83 | } 84 | -------------------------------------------------------------------------------- /protocol/describeconfigs/describeconfigs_test.go: -------------------------------------------------------------------------------- 1 | package describeconfigs 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "reflect" 8 | "testing" 9 | 10 | "github.com/segmentio/kafka-go/protocol" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestResponse_Merge(t *testing.T) { 15 | t.Run("happy path", func(t *testing.T) { 16 | r := &Response{} 17 | 18 | r1 := &Response{ 19 | Resources: []ResponseResource{ 20 | {ResourceName: "r1"}, 21 | }, 22 | } 23 | r2 := &Response{ 24 | Resources: []ResponseResource{ 25 | {ResourceName: "r2"}, 26 | }, 27 | } 28 | 29 | got, err := r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, r2}) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | 34 | want := &Response{ 35 | Resources: []ResponseResource{ 36 | {ResourceName: "r1"}, 37 | {ResourceName: "r2"}, 38 | }, 39 | } 40 | 41 | if !reflect.DeepEqual(want, got) { 42 | t.Fatalf("wanted response: \n%+v, got \n%+v", want, got) 43 | } 44 | }) 45 | 46 | t.Run("with errors", func(t *testing.T) { 47 | r := &Response{} 48 | 49 | r1 := &Response{ 50 | Resources: []ResponseResource{ 51 | {ResourceName: "r1"}, 52 | }, 53 | } 54 | 55 | _, err := r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, io.EOF}) 56 | if !errors.Is(err, io.EOF) { 57 | t.Fatalf("wanted err io.EOF, got %v", err) 58 | } 59 | }) 60 | 61 | t.Run("panic with unexpected type", func(t *testing.T) { 62 | defer func() { 63 | msg := recover() 64 | require.Equal(t, "BUG: result must be a message or an error but not string", fmt.Sprintf("%s", msg)) 65 | }() 66 | r := &Response{} 67 | 68 | r1 := &Response{ 69 | Resources: []ResponseResource{ 70 | {ResourceName: "r1"}, 71 | }, 72 | } 73 | 74 | _, _ = r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, "how did a string got here"}) 75 | t.Fatal("did not panic") 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /protocol/describeuserscramcredentials/describeuserscramcredentials.go: -------------------------------------------------------------------------------- 1 | package describeuserscramcredentials 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that v2+ uses "flexible" 11 | // messages. 12 | _ struct{} `kafka:"min=v0,max=v0,tag"` 13 | 14 | Users []RequestUser `kafka:"min=v0,max=v0"` 15 | } 16 | 17 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials } 18 | 19 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 20 | return cluster.Brokers[cluster.Controller], nil 21 | } 22 | 23 | type RequestUser struct { 24 | // We need at least one tagged field to indicate that v2+ uses "flexible" 25 | // messages. 26 | _ struct{} `kafka:"min=v0,max=v0,tag"` 27 | 28 | Name string `kafka:"min=v0,max=v0,compact"` 29 | } 30 | 31 | type Response struct { 32 | // We need at least one tagged field to indicate that v2+ uses "flexible" 33 | // messages. 34 | _ struct{} `kafka:"min=v0,max=v0,tag"` 35 | 36 | ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` 37 | ErrorCode int16 `kafka:"min=v0,max=v0"` 38 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 39 | Results []ResponseResult `kafka:"min=v0,max=v0"` 40 | } 41 | 42 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials } 43 | 44 | type ResponseResult struct { 45 | // We need at least one tagged field to indicate that v2+ uses "flexible" 46 | // messages. 47 | _ struct{} `kafka:"min=v0,max=v0,tag"` 48 | 49 | User string `kafka:"min=v0,max=v0,compact"` 50 | ErrorCode int16 `kafka:"min=v0,max=v0"` 51 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 52 | CredentialInfos []CredentialInfo `kafka:"min=v0,max=v0"` 53 | } 54 | 55 | type CredentialInfo struct { 56 | // We need at least one tagged field to indicate that v2+ uses "flexible" 57 | // messages. 58 | _ struct{} `kafka:"min=v0,max=v0,tag"` 59 | 60 | Mechanism int8 `kafka:"min=v0,max=v0"` 61 | Iterations int32 `kafka:"min=v0,max=v0"` 62 | } 63 | 64 | var _ protocol.BrokerMessage = (*Request)(nil) 65 | -------------------------------------------------------------------------------- /protocol/describeuserscramcredentials/describeuserscramcredentials_test.go: -------------------------------------------------------------------------------- 1 | package describeuserscramcredentials_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/describeuserscramcredentials" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | ) 13 | 14 | func TestDescribeUserScramCredentialsRequest(t *testing.T) { 15 | prototest.TestRequest(t, v0, &describeuserscramcredentials.Request{ 16 | Users: []describeuserscramcredentials.RequestUser{ 17 | { 18 | Name: "foo-1", 19 | }, 20 | }, 21 | }) 22 | } 23 | 24 | func TestDescribeUserScramCredentialsResponse(t *testing.T) { 25 | prototest.TestResponse(t, v0, &describeuserscramcredentials.Response{ 26 | ThrottleTimeMs: 500, 27 | Results: []describeuserscramcredentials.ResponseResult{ 28 | { 29 | User: "foo", 30 | ErrorCode: 1, 31 | ErrorMessage: "foo-error", 32 | CredentialInfos: []describeuserscramcredentials.CredentialInfo{ 33 | { 34 | Mechanism: 2, 35 | Iterations: 15000, 36 | }, 37 | }, 38 | }, 39 | }, 40 | }) 41 | } 42 | -------------------------------------------------------------------------------- /protocol/electleaders/electleaders.go: -------------------------------------------------------------------------------- 1 | package electleaders 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ElectLeaders 10 | type Request struct { 11 | ElectionType int8 `kafka:"min=v1,max=v1"` 12 | TopicPartitions []RequestTopicPartitions `kafka:"min=v0,max=v1"` 13 | TimeoutMs int32 `kafka:"min=v0,max=v1"` 14 | } 15 | 16 | type RequestTopicPartitions struct { 17 | Topic string `kafka:"min=v0,max=v1"` 18 | PartitionIDs []int32 `kafka:"min=v0,max=v1"` 19 | } 20 | 21 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } 22 | 23 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 24 | return cluster.Brokers[cluster.Controller], nil 25 | } 26 | 27 | type Response struct { 28 | ThrottleTime int32 `kafka:"min=v0,max=v1"` 29 | ErrorCode int16 `kafka:"min=v1,max=v1"` 30 | ReplicaElectionResults []ResponseReplicaElectionResult `kafka:"min=v0,max=v1"` 31 | } 32 | 33 | type ResponseReplicaElectionResult struct { 34 | Topic string `kafka:"min=v0,max=v1"` 35 | PartitionResults []ResponsePartitionResult `kafka:"min=v0,max=v1"` 36 | } 37 | 38 | type ResponsePartitionResult struct { 39 | PartitionID int32 `kafka:"min=v0,max=v1"` 40 | ErrorCode int16 `kafka:"min=v0,max=v1"` 41 | ErrorMessage string `kafka:"min=v0,max=v1,nullable"` 42 | } 43 | 44 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } 45 | -------------------------------------------------------------------------------- /protocol/electleaders/electleaders_test.go: -------------------------------------------------------------------------------- 1 | package electleaders_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/electleaders" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | v1 = 1 13 | ) 14 | 15 | func TestElectLeadersRequest(t *testing.T) { 16 | prototest.TestRequest(t, v0, &electleaders.Request{ 17 | TimeoutMs: 500, 18 | TopicPartitions: []electleaders.RequestTopicPartitions{ 19 | { 20 | Topic: "foo", 21 | PartitionIDs: []int32{100, 101, 102}, 22 | }, 23 | }, 24 | }) 25 | 26 | prototest.TestRequest(t, v1, &electleaders.Request{ 27 | ElectionType: 1, 28 | TimeoutMs: 500, 29 | TopicPartitions: []electleaders.RequestTopicPartitions{ 30 | { 31 | Topic: "foo", 32 | PartitionIDs: []int32{100, 101, 102}, 33 | }, 34 | }, 35 | }) 36 | } 37 | 38 | func TestElectLeadersResponse(t *testing.T) { 39 | prototest.TestResponse(t, v0, &electleaders.Response{ 40 | ThrottleTime: 500, 41 | ReplicaElectionResults: []electleaders.ResponseReplicaElectionResult{ 42 | { 43 | Topic: "foo", 44 | PartitionResults: []electleaders.ResponsePartitionResult{ 45 | {PartitionID: 100, ErrorCode: 0, ErrorMessage: ""}, 46 | {PartitionID: 101, ErrorCode: 0, ErrorMessage: ""}, 47 | {PartitionID: 102, ErrorCode: 0, ErrorMessage: ""}, 48 | }, 49 | }, 50 | }, 51 | }) 52 | 53 | prototest.TestResponse(t, v1, &electleaders.Response{ 54 | ThrottleTime: 500, 55 | ErrorCode: 1, 56 | ReplicaElectionResults: []electleaders.ResponseReplicaElectionResult{ 57 | { 58 | Topic: "foo", 59 | PartitionResults: []electleaders.ResponsePartitionResult{ 60 | {PartitionID: 100, ErrorCode: 0, ErrorMessage: ""}, 61 | {PartitionID: 101, ErrorCode: 0, ErrorMessage: ""}, 62 | {PartitionID: 102, ErrorCode: 0, ErrorMessage: ""}, 63 | }, 64 | }, 65 | }, 66 | }) 67 | } 68 | -------------------------------------------------------------------------------- /protocol/endtxn/endtxn.go: -------------------------------------------------------------------------------- 1 | package endtxn 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v3,max=v3,tag"` 13 | 14 | TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` 15 | ProducerID int64 `kafka:"min=v0,max=v3"` 16 | ProducerEpoch int16 `kafka:"min=v0,max=v3"` 17 | Committed bool `kafka:"min=v0,max=v3"` 18 | } 19 | 20 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.EndTxn } 21 | 22 | func (r *Request) Transaction() string { return r.TransactionalID } 23 | 24 | var _ protocol.TransactionalMessage = (*Request)(nil) 25 | 26 | type Response struct { 27 | // We need at least one tagged field to indicate that this is a "flexible" message 28 | // type. 29 | _ struct{} `kafka:"min=v3,max=v3,tag"` 30 | 31 | ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` 32 | ErrorCode int16 `kafka:"min=v0,max=v3"` 33 | } 34 | 35 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.EndTxn } 36 | -------------------------------------------------------------------------------- /protocol/endtxn/endtxn_test.go: -------------------------------------------------------------------------------- 1 | package endtxn_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/endtxn" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestEndTxnRequest(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2, 3} { 12 | prototest.TestRequest(t, version, &endtxn.Request{ 13 | TransactionalID: "transactional-id-1", 14 | ProducerID: 1, 15 | ProducerEpoch: 100, 16 | Committed: false, 17 | }) 18 | } 19 | } 20 | 21 | func TestEndTxnResponse(t *testing.T) { 22 | for _, version := range []int16{0, 1, 2, 3} { 23 | prototest.TestResponse(t, version, &endtxn.Response{ 24 | ThrottleTimeMs: 1000, 25 | ErrorCode: 4, 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /protocol/findcoordinator/findcoordinator.go: -------------------------------------------------------------------------------- 1 | package findcoordinator 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | Key string `kafka:"min=v0,max=v2"` 11 | KeyType int8 `kafka:"min=v1,max=v2"` 12 | } 13 | 14 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } 15 | 16 | type Response struct { 17 | ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` 18 | ErrorCode int16 `kafka:"min=v0,max=v2"` 19 | ErrorMessage string `kafka:"min=v1,max=v2,nullable"` 20 | NodeID int32 `kafka:"min=v0,max=v2"` 21 | Host string `kafka:"min=v0,max=v2"` 22 | Port int32 `kafka:"min=v0,max=v2"` 23 | } 24 | 25 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } 26 | -------------------------------------------------------------------------------- /protocol/heartbeat/heartbeat.go: -------------------------------------------------------------------------------- 1 | package heartbeat 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_Heartbeat 10 | type Request struct { 11 | // We need at least one tagged field to indicate that this is a "flexible" message 12 | // type. 13 | _ struct{} `kafka:"min=v4,max=v4,tag"` 14 | 15 | GroupID string `kafka:"min=v0,max=v4"` 16 | GenerationID int32 `kafka:"min=v0,max=v4"` 17 | MemberID string `kafka:"min=v0,max=v4"` 18 | GroupInstanceID string `kafka:"min=v3,max=v4,nullable"` 19 | } 20 | 21 | func (r *Request) ApiKey() protocol.ApiKey { 22 | return protocol.Heartbeat 23 | } 24 | 25 | type Response struct { 26 | // We need at least one tagged field to indicate that this is a "flexible" message 27 | // type. 28 | _ struct{} `kafka:"min=v4,max=v4,tag"` 29 | 30 | ErrorCode int16 `kafka:"min=v0,max=v4"` 31 | ThrottleTimeMs int32 `kafka:"min=v1,max=v4"` 32 | } 33 | 34 | func (r *Response) ApiKey() protocol.ApiKey { 35 | return protocol.Heartbeat 36 | } 37 | -------------------------------------------------------------------------------- /protocol/heartbeat/heartbeat_test.go: -------------------------------------------------------------------------------- 1 | package heartbeat_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/heartbeat" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestHeartbeatRequest(t *testing.T) { 11 | // Versions 0-3 have all the same fields. 12 | for _, version := range []int16{0, 1, 2, 3} { 13 | prototest.TestRequest(t, version, &heartbeat.Request{ 14 | GroupID: "group-1", 15 | GenerationID: 1, 16 | MemberID: "member-1", 17 | }) 18 | } 19 | 20 | for _, version := range []int16{4} { 21 | prototest.TestRequest(t, version, &heartbeat.Request{ 22 | GroupID: "group-2", 23 | GenerationID: 10, 24 | MemberID: "member-2", 25 | GroupInstanceID: "instace-1", 26 | }) 27 | } 28 | } 29 | 30 | func TestHeartbeatResponse(t *testing.T) { 31 | for _, version := range []int16{0} { 32 | prototest.TestResponse(t, version, &heartbeat.Response{ 33 | ErrorCode: 4, 34 | }) 35 | } 36 | 37 | // Versions 1-4 have all the same fields. 38 | for _, version := range []int16{1, 2, 3, 4} { 39 | prototest.TestResponse(t, version, &heartbeat.Response{ 40 | ErrorCode: 4, 41 | ThrottleTimeMs: 10, 42 | }) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /protocol/initproducerid/initproducerid.go: -------------------------------------------------------------------------------- 1 | package initproducerid 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v2,max=v4,tag"` 13 | 14 | TransactionalID string `kafka:"min=v0,max=v4,nullable"` 15 | TransactionTimeoutMs int32 `kafka:"min=v0,max=v4"` 16 | ProducerID int64 `kafka:"min=v3,max=v4"` 17 | ProducerEpoch int16 `kafka:"min=v3,max=v4"` 18 | } 19 | 20 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.InitProducerId } 21 | 22 | func (r *Request) Transaction() string { return r.TransactionalID } 23 | 24 | var _ protocol.TransactionalMessage = (*Request)(nil) 25 | 26 | type Response struct { 27 | // We need at least one tagged field to indicate that this is a "flexible" message 28 | // type. 29 | _ struct{} `kafka:"min=v2,max=v4,tag"` 30 | 31 | ThrottleTimeMs int32 `kafka:"min=v0,max=v4"` 32 | ErrorCode int16 `kafka:"min=v0,max=v4"` 33 | ProducerID int64 `kafka:"min=v0,max=v4"` 34 | ProducerEpoch int16 `kafka:"min=v0,max=v4"` 35 | } 36 | 37 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.InitProducerId } 38 | -------------------------------------------------------------------------------- /protocol/initproducerid/initproducerid_test.go: -------------------------------------------------------------------------------- 1 | package initproducerid_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/initproducerid" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestInitProducerIDRequest(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2} { 12 | prototest.TestRequest(t, version, &initproducerid.Request{ 13 | TransactionalID: "transactional-id-0", 14 | TransactionTimeoutMs: 1000, 15 | }) 16 | } 17 | 18 | // Version 2 added: 19 | // ProducerID 20 | // ProducerEpoch 21 | for _, version := range []int16{3, 4} { 22 | prototest.TestRequest(t, version, &initproducerid.Request{ 23 | TransactionalID: "transactional-id-0", 24 | TransactionTimeoutMs: 1000, 25 | ProducerID: 10, 26 | ProducerEpoch: 5, 27 | }) 28 | } 29 | } 30 | 31 | func TestInitProducerIDResponse(t *testing.T) { 32 | for _, version := range []int16{0, 1, 2, 3, 4} { 33 | prototest.TestResponse(t, version, &initproducerid.Response{ 34 | ThrottleTimeMs: 1000, 35 | ErrorCode: 9, 36 | ProducerID: 10, 37 | ProducerEpoch: 1000, 38 | }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /protocol/leavegroup/leavegroup.go: -------------------------------------------------------------------------------- 1 | package leavegroup 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v4,max=v4,tag"` 13 | 14 | GroupID string `kafka:"min=v0,max=v2|min=v3,max=v4,compact"` 15 | MemberID string `kafka:"min=v0,max=v2"` 16 | Members []RequestMember `kafka:"min=v3,max=v4"` 17 | } 18 | 19 | func (r *Request) Prepare(apiVersion int16) { 20 | if apiVersion < 3 { 21 | if len(r.Members) > 0 { 22 | r.MemberID = r.Members[0].MemberID 23 | } 24 | } 25 | } 26 | 27 | type RequestMember struct { 28 | // We need at least one tagged field to indicate that this is a "flexible" message 29 | // type. 30 | _ struct{} `kafka:"min=v4,max=v4,tag"` 31 | 32 | MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` 33 | GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` 34 | } 35 | 36 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } 37 | 38 | func (r *Request) Group() string { return r.GroupID } 39 | 40 | var ( 41 | _ protocol.GroupMessage = (*Request)(nil) 42 | _ protocol.PreparedMessage = (*Request)(nil) 43 | ) 44 | 45 | type Response struct { 46 | // We need at least one tagged field to indicate that this is a "flexible" message 47 | // type. 48 | _ struct{} `kafka:"min=v4,max=v4,tag"` 49 | 50 | ErrorCode int16 `kafka:"min=v0,max=v4"` 51 | ThrottleTimeMS int32 `kafka:"min=v1,max=v4"` 52 | Members []ResponseMember `kafka:"min=v3,max=v4"` 53 | } 54 | 55 | type ResponseMember struct { 56 | // We need at least one tagged field to indicate that this is a "flexible" message 57 | // type. 58 | _ struct{} `kafka:"min=v4,max=v4,tag"` 59 | 60 | MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` 61 | GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` 62 | ErrorCode int16 `kafka:"min=v3,max=v4"` 63 | } 64 | 65 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } 66 | -------------------------------------------------------------------------------- /protocol/leavegroup/leavegroup_test.go: -------------------------------------------------------------------------------- 1 | package leavegroup_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/leavegroup" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestLeaveGroupReq(t *testing.T) { 11 | for _, version := range []int16{0, 1, 2} { 12 | prototest.TestRequest(t, version, &leavegroup.Request{ 13 | GroupID: "group-id", 14 | MemberID: "member-id", 15 | }) 16 | } 17 | 18 | // Version 3 added 19 | // Members 20 | // removed 21 | // MemberID 22 | for _, version := range []int16{3, 4} { 23 | prototest.TestRequest(t, version, &leavegroup.Request{ 24 | GroupID: "group-id", 25 | Members: []leavegroup.RequestMember{ 26 | { 27 | MemberID: "member-id-1", 28 | GroupInstanceID: "group-instance-id", 29 | }, 30 | }, 31 | }) 32 | } 33 | } 34 | 35 | func TestLeaveGroupResp(t *testing.T) { 36 | for _, version := range []int16{0} { 37 | prototest.TestResponse(t, version, &leavegroup.Response{ 38 | ErrorCode: 10, 39 | }) 40 | } 41 | 42 | // Version 1 added 43 | // ThrottleTimeMS 44 | for _, version := range []int16{1, 2} { 45 | prototest.TestResponse(t, version, &leavegroup.Response{ 46 | ErrorCode: 10, 47 | ThrottleTimeMS: 100, 48 | }) 49 | } 50 | 51 | // Version 3 added 52 | // Members 53 | for _, version := range []int16{3, 4} { 54 | prototest.TestResponse(t, version, &leavegroup.Response{ 55 | ErrorCode: 10, 56 | ThrottleTimeMS: 100, 57 | Members: []leavegroup.ResponseMember{ 58 | { 59 | MemberID: "member-id-1", 60 | GroupInstanceID: "group-instance-id", 61 | ErrorCode: 10, 62 | }, 63 | }, 64 | }) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /protocol/listgroups/listgroups.go: -------------------------------------------------------------------------------- 1 | package listgroups 2 | 3 | import ( 4 | "github.com/segmentio/kafka-go/protocol" 5 | ) 6 | 7 | func init() { 8 | protocol.Register(&Request{}, &Response{}) 9 | } 10 | 11 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListGroups 12 | type Request struct { 13 | _ struct{} `kafka:"min=v0,max=v2"` 14 | brokerID int32 15 | } 16 | 17 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListGroups } 18 | 19 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 20 | return cluster.Brokers[r.brokerID], nil 21 | } 22 | 23 | func (r *Request) Split(cluster protocol.Cluster) ( 24 | []protocol.Message, 25 | protocol.Merger, 26 | error, 27 | ) { 28 | messages := []protocol.Message{} 29 | 30 | for _, broker := range cluster.Brokers { 31 | messages = append(messages, &Request{brokerID: broker.ID}) 32 | } 33 | 34 | return messages, new(Response), nil 35 | } 36 | 37 | type Response struct { 38 | ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` 39 | ErrorCode int16 `kafka:"min=v0,max=v2"` 40 | Groups []ResponseGroup `kafka:"min=v0,max=v2"` 41 | } 42 | 43 | type ResponseGroup struct { 44 | GroupID string `kafka:"min=v0,max=v2"` 45 | ProtocolType string `kafka:"min=v0,max=v2"` 46 | 47 | // Use this to store which broker returned the response 48 | BrokerID int32 `kafka:"-"` 49 | } 50 | 51 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListGroups } 52 | 53 | func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( 54 | protocol.Message, 55 | error, 56 | ) { 57 | response := &Response{} 58 | 59 | for r, result := range results { 60 | m, err := protocol.Result(result) 61 | if err != nil { 62 | return nil, err 63 | } 64 | brokerResp := m.(*Response) 65 | respGroups := []ResponseGroup{} 66 | 67 | for _, brokerResp := range brokerResp.Groups { 68 | respGroups = append( 69 | respGroups, 70 | ResponseGroup{ 71 | GroupID: brokerResp.GroupID, 72 | ProtocolType: brokerResp.ProtocolType, 73 | BrokerID: requests[r].(*Request).brokerID, 74 | }, 75 | ) 76 | } 77 | 78 | response.Groups = append(response.Groups, respGroups...) 79 | } 80 | 81 | return response, nil 82 | } 83 | -------------------------------------------------------------------------------- /protocol/listpartitionreassignments/listpartitionreassignments.go: -------------------------------------------------------------------------------- 1 | package listpartitionreassignments 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | // Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListPartitionReassignments. 10 | 11 | type Request struct { 12 | // We need at least one tagged field to indicate that this is a "flexible" message 13 | // type. 14 | _ struct{} `kafka:"min=v0,max=v0,tag"` 15 | 16 | TimeoutMs int32 `kafka:"min=v0,max=v0"` 17 | Topics []RequestTopic `kafka:"min=v0,max=v0,nullable"` 18 | } 19 | 20 | type RequestTopic struct { 21 | // We need at least one tagged field to indicate that this is a "flexible" message 22 | // type. 23 | _ struct{} `kafka:"min=v0,max=v0,tag"` 24 | 25 | Name string `kafka:"min=v0,max=v0"` 26 | PartitionIndexes []int32 `kafka:"min=v0,max=v0"` 27 | } 28 | 29 | func (r *Request) ApiKey() protocol.ApiKey { 30 | return protocol.ListPartitionReassignments 31 | } 32 | 33 | func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { 34 | return cluster.Brokers[cluster.Controller], nil 35 | } 36 | 37 | type Response struct { 38 | // We need at least one tagged field to indicate that this is a "flexible" message 39 | // type. 40 | _ struct{} `kafka:"min=v0,max=v0,tag"` 41 | 42 | ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` 43 | ErrorCode int16 `kafka:"min=v0,max=v0"` 44 | ErrorMessage string `kafka:"min=v0,max=v0,nullable"` 45 | Topics []ResponseTopic `kafka:"min=v0,max=v0"` 46 | } 47 | 48 | type ResponseTopic struct { 49 | // We need at least one tagged field to indicate that this is a "flexible" message 50 | // type. 51 | _ struct{} `kafka:"min=v0,max=v0,tag"` 52 | 53 | Name string `kafka:"min=v0,max=v0"` 54 | Partitions []ResponsePartition `kafka:"min=v0,max=v0"` 55 | } 56 | 57 | type ResponsePartition struct { 58 | // We need at least one tagged field to indicate that this is a "flexible" message 59 | // type. 60 | _ struct{} `kafka:"min=v0,max=v0,tag"` 61 | 62 | PartitionIndex int32 `kafka:"min=v0,max=v0"` 63 | Replicas []int32 `kafka:"min=v0,max=v0"` 64 | AddingReplicas []int32 `kafka:"min=v0,max=v0"` 65 | RemovingReplicas []int32 `kafka:"min=v0,max=v0"` 66 | } 67 | 68 | func (r *Response) ApiKey() protocol.ApiKey { 69 | return protocol.ListPartitionReassignments 70 | } 71 | -------------------------------------------------------------------------------- /protocol/listpartitionreassignments/listpartitionreassignments_test.go: -------------------------------------------------------------------------------- 1 | package listpartitionreassignments_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/listpartitionreassignments" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | const ( 11 | v0 = 0 12 | ) 13 | 14 | func TestListPartitionReassignmentsRequest(t *testing.T) { 15 | prototest.TestRequest(t, v0, &listpartitionreassignments.Request{ 16 | Topics: []listpartitionreassignments.RequestTopic{ 17 | { 18 | Name: "topic-1", 19 | PartitionIndexes: []int32{1, 2, 3}, 20 | }, 21 | }, 22 | }) 23 | } 24 | 25 | func TestListPartitionReassignmentsResponse(t *testing.T) { 26 | prototest.TestResponse(t, v0, &listpartitionreassignments.Response{ 27 | Topics: []listpartitionreassignments.ResponseTopic{ 28 | { 29 | Name: "topic-1", 30 | Partitions: []listpartitionreassignments.ResponsePartition{ 31 | { 32 | PartitionIndex: 1, 33 | Replicas: []int32{1, 2, 3}, 34 | AddingReplicas: []int32{4, 5, 6}, 35 | RemovingReplicas: []int32{7, 8, 9}, 36 | }, 37 | }, 38 | }, 39 | }, 40 | }) 41 | } 42 | -------------------------------------------------------------------------------- /protocol/metadata/metadata.go: -------------------------------------------------------------------------------- 1 | package metadata 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | TopicNames []string `kafka:"min=v0,max=v8,nullable"` 11 | AllowAutoTopicCreation bool `kafka:"min=v4,max=v8"` 12 | IncludeClusterAuthorizedOperations bool `kafka:"min=v8,max=v8"` 13 | IncludeTopicAuthorizedOperations bool `kafka:"min=v8,max=v8"` 14 | } 15 | 16 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.Metadata } 17 | 18 | type Response struct { 19 | ThrottleTimeMs int32 `kafka:"min=v3,max=v8"` 20 | Brokers []ResponseBroker `kafka:"min=v0,max=v8"` 21 | ClusterID string `kafka:"min=v2,max=v8,nullable"` 22 | ControllerID int32 `kafka:"min=v1,max=v8"` 23 | Topics []ResponseTopic `kafka:"min=v0,max=v8"` 24 | ClusterAuthorizedOperations int32 `kafka:"min=v8,max=v8"` 25 | } 26 | 27 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.Metadata } 28 | 29 | type ResponseBroker struct { 30 | NodeID int32 `kafka:"min=v0,max=v8"` 31 | Host string `kafka:"min=v0,max=v8"` 32 | Port int32 `kafka:"min=v0,max=v8"` 33 | Rack string `kafka:"min=v1,max=v8,nullable"` 34 | } 35 | 36 | type ResponseTopic struct { 37 | ErrorCode int16 `kafka:"min=v0,max=v8"` 38 | Name string `kafka:"min=v0,max=v8"` 39 | IsInternal bool `kafka:"min=v1,max=v8"` 40 | Partitions []ResponsePartition `kafka:"min=v0,max=v8"` 41 | TopicAuthorizedOperations int32 `kafka:"min=v8,max=v8"` 42 | } 43 | 44 | type ResponsePartition struct { 45 | ErrorCode int16 `kafka:"min=v0,max=v8"` 46 | PartitionIndex int32 `kafka:"min=v0,max=v8"` 47 | LeaderID int32 `kafka:"min=v0,max=v8"` 48 | LeaderEpoch int32 `kafka:"min=v7,max=v8"` 49 | ReplicaNodes []int32 `kafka:"min=v0,max=v8"` 50 | IsrNodes []int32 `kafka:"min=v0,max=v8"` 51 | OfflineReplicas []int32 `kafka:"min=v5,max=v8"` 52 | } 53 | -------------------------------------------------------------------------------- /protocol/offsetcommit/offsetcommit.go: -------------------------------------------------------------------------------- 1 | package offsetcommit 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | GroupID string `kafka:"min=v0,max=v7"` 11 | GenerationID int32 `kafka:"min=v1,max=v7"` 12 | MemberID string `kafka:"min=v1,max=v7"` 13 | RetentionTimeMs int64 `kafka:"min=v2,max=v4"` 14 | GroupInstanceID string `kafka:"min=v7,max=v7,nullable"` 15 | Topics []RequestTopic `kafka:"min=v0,max=v7"` 16 | } 17 | 18 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } 19 | 20 | func (r *Request) Group() string { return r.GroupID } 21 | 22 | type RequestTopic struct { 23 | Name string `kafka:"min=v0,max=v7"` 24 | Partitions []RequestPartition `kafka:"min=v0,max=v7"` 25 | } 26 | 27 | type RequestPartition struct { 28 | PartitionIndex int32 `kafka:"min=v0,max=v7"` 29 | CommittedOffset int64 `kafka:"min=v0,max=v7"` 30 | CommitTimestamp int64 `kafka:"min=v1,max=v1"` 31 | CommittedLeaderEpoch int32 `kafka:"min=v6,max=v7"` 32 | CommittedMetadata string `kafka:"min=v0,max=v7,nullable"` 33 | } 34 | 35 | var ( 36 | _ protocol.GroupMessage = (*Request)(nil) 37 | ) 38 | 39 | type Response struct { 40 | ThrottleTimeMs int32 `kafka:"min=v3,max=v7"` 41 | Topics []ResponseTopic `kafka:"min=v0,max=v7"` 42 | } 43 | 44 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } 45 | 46 | type ResponseTopic struct { 47 | Name string `kafka:"min=v0,max=v7"` 48 | Partitions []ResponsePartition `kafka:"min=v0,max=v7"` 49 | } 50 | 51 | type ResponsePartition struct { 52 | PartitionIndex int32 `kafka:"min=v0,max=v7"` 53 | ErrorCode int16 `kafka:"min=v0,max=v7"` 54 | } 55 | -------------------------------------------------------------------------------- /protocol/offsetdelete/offsetdelete.go: -------------------------------------------------------------------------------- 1 | package offsetdelete 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | GroupID string `kafka:"min=v0,max=v0"` 11 | Topics []RequestTopic `kafka:"min=v0,max=v0"` 12 | } 13 | 14 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } 15 | 16 | func (r *Request) Group() string { return r.GroupID } 17 | 18 | type RequestTopic struct { 19 | Name string `kafka:"min=v0,max=v0"` 20 | Partitions []RequestPartition `kafka:"min=v0,max=v0"` 21 | } 22 | 23 | type RequestPartition struct { 24 | PartitionIndex int32 `kafka:"min=v0,max=v0"` 25 | } 26 | 27 | var ( 28 | _ protocol.GroupMessage = (*Request)(nil) 29 | ) 30 | 31 | type Response struct { 32 | ErrorCode int16 `kafka:"min=v0,max=v0"` 33 | ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` 34 | Topics []ResponseTopic `kafka:"min=v0,max=v0"` 35 | } 36 | 37 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } 38 | 39 | type ResponseTopic struct { 40 | Name string `kafka:"min=v0,max=v0"` 41 | Partitions []ResponsePartition `kafka:"min=v0,max=v0"` 42 | } 43 | 44 | type ResponsePartition struct { 45 | PartitionIndex int32 `kafka:"min=v0,max=v0"` 46 | ErrorCode int16 `kafka:"min=v0,max=v0"` 47 | } 48 | -------------------------------------------------------------------------------- /protocol/offsetdelete/offsetdelete_test.go: -------------------------------------------------------------------------------- 1 | package offsetdelete_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/kafka-go/protocol/offsetdelete" 7 | "github.com/segmentio/kafka-go/protocol/prototest" 8 | ) 9 | 10 | func TestOffsetDeleteRequest(t *testing.T) { 11 | for _, version := range []int16{0} { 12 | prototest.TestRequest(t, version, &offsetdelete.Request{ 13 | GroupID: "group-0", 14 | Topics: []offsetdelete.RequestTopic{ 15 | { 16 | Name: "topic-0", 17 | Partitions: []offsetdelete.RequestPartition{ 18 | { 19 | PartitionIndex: 0, 20 | }, 21 | { 22 | PartitionIndex: 1, 23 | }, 24 | }, 25 | }, 26 | }, 27 | }) 28 | } 29 | } 30 | 31 | func TestOffsetDeleteResponse(t *testing.T) { 32 | for _, version := range []int16{0} { 33 | prototest.TestResponse(t, version, &offsetdelete.Response{ 34 | ErrorCode: 0, 35 | Topics: []offsetdelete.ResponseTopic{ 36 | { 37 | Name: "topic-0", 38 | Partitions: []offsetdelete.ResponsePartition{ 39 | { 40 | PartitionIndex: 0, 41 | ErrorCode: 1, 42 | }, 43 | { 44 | PartitionIndex: 1, 45 | ErrorCode: 1, 46 | }, 47 | }, 48 | }, 49 | }, 50 | }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /protocol/offsetfetch/offsetfetch.go: -------------------------------------------------------------------------------- 1 | package offsetfetch 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | GroupID string `kafka:"min=v0,max=v5"` 11 | Topics []RequestTopic `kafka:"min=v0,max=v5,nullable"` 12 | } 13 | 14 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } 15 | 16 | func (r *Request) Group() string { return r.GroupID } 17 | 18 | type RequestTopic struct { 19 | Name string `kafka:"min=v0,max=v5"` 20 | PartitionIndexes []int32 `kafka:"min=v0,max=v5"` 21 | } 22 | 23 | var ( 24 | _ protocol.GroupMessage = (*Request)(nil) 25 | ) 26 | 27 | type Response struct { 28 | ThrottleTimeMs int32 `kafka:"min=v3,max=v5"` 29 | Topics []ResponseTopic `kafka:"min=v0,max=v5"` 30 | ErrorCode int16 `kafka:"min=v2,max=v5"` 31 | } 32 | 33 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } 34 | 35 | type ResponseTopic struct { 36 | Name string `kafka:"min=v0,max=v5"` 37 | Partitions []ResponsePartition `kafka:"min=v0,max=v5"` 38 | } 39 | 40 | type ResponsePartition struct { 41 | PartitionIndex int32 `kafka:"min=v0,max=v5"` 42 | CommittedOffset int64 `kafka:"min=v0,max=v5"` 43 | ComittedLeaderEpoch int32 `kafka:"min=v5,max=v5"` 44 | Metadata string `kafka:"min=v0,max=v5,nullable"` 45 | ErrorCode int16 `kafka:"min=v0,max=v5"` 46 | } 47 | -------------------------------------------------------------------------------- /protocol/prototest/bytes.go: -------------------------------------------------------------------------------- 1 | package prototest 2 | 3 | import ( 4 | "github.com/segmentio/kafka-go/protocol" 5 | ) 6 | 7 | // Bytes constructs a Bytes which exposes the content of b. 8 | func Bytes(b []byte) protocol.Bytes { 9 | return protocol.NewBytes(b) 10 | } 11 | 12 | // String constructs a Bytes which exposes the content of s. 13 | func String(s string) protocol.Bytes { 14 | return protocol.NewBytes([]byte(s)) 15 | } 16 | -------------------------------------------------------------------------------- /protocol/prototest/response.go: -------------------------------------------------------------------------------- 1 | package prototest 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/hex" 7 | "fmt" 8 | "io" 9 | "testing" 10 | 11 | "github.com/segmentio/kafka-go/protocol" 12 | ) 13 | 14 | func TestResponse(t *testing.T, version int16, msg protocol.Message) { 15 | reset := load(msg) 16 | 17 | t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) { 18 | b := &bytes.Buffer{} 19 | 20 | if err := protocol.WriteResponse(b, version, 1234, msg); err != nil { 21 | t.Fatal(err) 22 | } 23 | 24 | reset() 25 | 26 | t.Logf("\n%s", hex.Dump(b.Bytes())) 27 | 28 | correlationID, res, err := protocol.ReadResponse(b, msg.ApiKey(), version) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | if correlationID != 1234 { 33 | t.Errorf("correlation id mismatch: %d != %d", correlationID, 1234) 34 | } 35 | if !deepEqual(msg, res) { 36 | t.Errorf("response message mismatch:") 37 | t.Logf("expected: %+v", msg) 38 | t.Logf("found: %+v", res) 39 | } 40 | closeMessage(res) 41 | }) 42 | } 43 | 44 | func BenchmarkResponse(b *testing.B, version int16, msg protocol.Message) { 45 | reset := load(msg) 46 | 47 | b.Run(fmt.Sprintf("v%d", version), func(b *testing.B) { 48 | apiKey := msg.ApiKey() 49 | buffer := &bytes.Buffer{} 50 | buffer.Grow(1024) 51 | 52 | b.Run("read", func(b *testing.B) { 53 | w := io.Writer(buffer) 54 | 55 | if err := protocol.WriteResponse(w, version, 1234, msg); err != nil { 56 | b.Fatal(err) 57 | } 58 | 59 | reset() 60 | 61 | p := buffer.Bytes() 62 | x := bytes.NewReader(p) 63 | r := bufio.NewReader(x) 64 | 65 | for i := 0; i < b.N; i++ { 66 | _, res, err := protocol.ReadResponse(r, apiKey, version) 67 | if err != nil { 68 | b.Fatal(err) 69 | } 70 | closeMessage(res) 71 | x.Reset(p) 72 | r.Reset(x) 73 | } 74 | 75 | b.SetBytes(int64(len(p))) 76 | buffer.Reset() 77 | }) 78 | 79 | b.Run("write", func(b *testing.B) { 80 | w := io.Writer(buffer) 81 | n := int64(0) 82 | 83 | for i := 0; i < b.N; i++ { 84 | if err := protocol.WriteResponse(w, version, 1234, msg); err != nil { 85 | b.Fatal(err) 86 | } 87 | reset() 88 | n = int64(buffer.Len()) 89 | buffer.Reset() 90 | } 91 | 92 | b.SetBytes(n) 93 | }) 94 | }) 95 | } 96 | -------------------------------------------------------------------------------- /protocol/response_test.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func TestReadResponseUnexpectedTLSDetection(t *testing.T) { 12 | var buf bytes.Buffer 13 | 14 | buf.Write([]byte{tlsAlertByte, 0x03, 0x03, 10, 0, 0, 0}) 15 | 16 | correlationID, _, err := ReadResponse(&buf, ApiVersions, 0) 17 | if !errors.Is(err, io.ErrUnexpectedEOF) { 18 | t.Fatalf("expected an io.ErrUnexpectedEOF from ReadResponse got %v", err) 19 | } 20 | 21 | if !strings.Contains(err.Error(), "broker appears to be expecting TLS") { 22 | t.Fatalf("expected error messae to contain %s got %s", "broker appears to be expecting TLS", err.Error()) 23 | } 24 | 25 | if correlationID != 0 { 26 | t.Fatalf("expected correlationID of 0 got %d", correlationID) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /protocol/roundtrip.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // RoundTrip sends a request to a kafka broker and returns the response. 8 | func RoundTrip(rw io.ReadWriter, apiVersion int16, correlationID int32, clientID string, req Message) (Message, error) { 9 | if err := WriteRequest(rw, apiVersion, correlationID, clientID, req); err != nil { 10 | return nil, err 11 | } 12 | if !hasResponse(req) { 13 | return nil, nil 14 | } 15 | id, res, err := ReadResponse(rw, req.ApiKey(), apiVersion) 16 | if err != nil { 17 | return nil, err 18 | } 19 | if id != correlationID { 20 | return nil, Errorf("correlation id mismatch (expected=%d, found=%d)", correlationID, id) 21 | } 22 | return res, nil 23 | } 24 | 25 | func hasResponse(msg Message) bool { 26 | x, _ := msg.(interface{ HasResponse() bool }) 27 | return x == nil || x.HasResponse() 28 | } 29 | -------------------------------------------------------------------------------- /protocol/saslauthenticate/saslauthenticate.go: -------------------------------------------------------------------------------- 1 | package saslauthenticate 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | 7 | "github.com/segmentio/kafka-go/protocol" 8 | ) 9 | 10 | func init() { 11 | protocol.Register(&Request{}, &Response{}) 12 | } 13 | 14 | type Request struct { 15 | AuthBytes []byte `kafka:"min=v0,max=v1"` 16 | } 17 | 18 | func (r *Request) RawExchange(rw io.ReadWriter) (protocol.Message, error) { 19 | if err := r.writeTo(rw); err != nil { 20 | return nil, err 21 | } 22 | return r.readResp(rw) 23 | } 24 | 25 | func (*Request) Required(versions map[protocol.ApiKey]int16) bool { 26 | const v0 = 0 27 | return versions[protocol.SaslHandshake] == v0 28 | } 29 | 30 | func (r *Request) writeTo(w io.Writer) error { 31 | size := len(r.AuthBytes) + 4 32 | buf := make([]byte, size) 33 | binary.BigEndian.PutUint32(buf[:4], uint32(len(r.AuthBytes))) 34 | copy(buf[4:], r.AuthBytes) 35 | _, err := w.Write(buf) 36 | return err 37 | } 38 | 39 | func (r *Request) readResp(read io.Reader) (protocol.Message, error) { 40 | var lenBuf [4]byte 41 | if _, err := io.ReadFull(read, lenBuf[:]); err != nil { 42 | return nil, err 43 | } 44 | respLen := int32(binary.BigEndian.Uint32(lenBuf[:])) 45 | data := make([]byte, respLen) 46 | 47 | if _, err := io.ReadFull(read, data[:]); err != nil { 48 | return nil, err 49 | } 50 | return &Response{ 51 | AuthBytes: data, 52 | }, nil 53 | } 54 | 55 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } 56 | 57 | type Response struct { 58 | ErrorCode int16 `kafka:"min=v0,max=v1"` 59 | ErrorMessage string `kafka:"min=v0,max=v1,nullable"` 60 | AuthBytes []byte `kafka:"min=v0,max=v1"` 61 | SessionLifetimeMs int64 `kafka:"min=v1,max=v1"` 62 | } 63 | 64 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } 65 | 66 | var _ protocol.RawExchanger = (*Request)(nil) 67 | -------------------------------------------------------------------------------- /protocol/saslhandshake/saslhandshake.go: -------------------------------------------------------------------------------- 1 | package saslhandshake 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | Mechanism string `kafka:"min=v0,max=v1"` 11 | } 12 | 13 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } 14 | 15 | type Response struct { 16 | ErrorCode int16 `kafka:"min=v0,max=v1"` 17 | Mechanisms []string `kafka:"min=v0,max=v1"` 18 | } 19 | 20 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } 21 | -------------------------------------------------------------------------------- /protocol/size.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "math/bits" 5 | ) 6 | 7 | func sizeOfVarString(s string) int { 8 | return sizeOfVarInt(int64(len(s))) + len(s) 9 | } 10 | 11 | func sizeOfVarNullBytes(b []byte) int { 12 | if b == nil { 13 | return sizeOfVarInt(-1) 14 | } 15 | n := len(b) 16 | return sizeOfVarInt(int64(n)) + n 17 | } 18 | 19 | func sizeOfVarNullBytesIface(b Bytes) int { 20 | if b == nil { 21 | return sizeOfVarInt(-1) 22 | } 23 | n := b.Len() 24 | return sizeOfVarInt(int64(n)) + n 25 | } 26 | 27 | func sizeOfVarInt(i int64) int { 28 | return sizeOfUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) // zig-zag encoding 29 | } 30 | 31 | func sizeOfUnsignedVarInt(i uint64) int { 32 | return (bits.Len64(i|1) + 6) / 7 33 | } 34 | -------------------------------------------------------------------------------- /protocol/syncgroup/syncgroup.go: -------------------------------------------------------------------------------- 1 | package syncgroup 2 | 3 | import "github.com/segmentio/kafka-go/protocol" 4 | 5 | func init() { 6 | protocol.Register(&Request{}, &Response{}) 7 | } 8 | 9 | type Request struct { 10 | // We need at least one tagged field to indicate that this is a "flexible" message 11 | // type. 12 | _ struct{} `kafka:"min=v4,max=v5,tag"` 13 | 14 | GroupID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` 15 | GenerationID int32 `kafka:"min=v0,max=v5|min=v4,max=v5,compact"` 16 | MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` 17 | GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v5,nullable,compact"` 18 | ProtocolType string `kafka:"min=v5,max=v5"` 19 | ProtocolName string `kafka:"min=v5,max=v5"` 20 | Assignments []RequestAssignment `kafka:"min=v0,max=v5"` 21 | } 22 | 23 | type RequestAssignment struct { 24 | // We need at least one tagged field to indicate that this is a "flexible" message 25 | // type. 26 | _ struct{} `kafka:"min=v4,max=v5,tag"` 27 | 28 | MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` 29 | Assignment []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` 30 | } 31 | 32 | func (r *Request) ApiKey() protocol.ApiKey { return protocol.SyncGroup } 33 | 34 | func (r *Request) Group() string { return r.GroupID } 35 | 36 | var _ protocol.GroupMessage = (*Request)(nil) 37 | 38 | type Response struct { 39 | // We need at least one tagged field to indicate that this is a "flexible" message 40 | // type. 41 | _ struct{} `kafka:"min=v4,max=v5,tag"` 42 | 43 | ThrottleTimeMS int32 `kafka:"min=v1,max=v5"` 44 | ErrorCode int16 `kafka:"min=v0,max=v5"` 45 | ProtocolType string `kafka:"min=v5,max=v5"` 46 | ProtocolName string `kafka:"min=v5,max=v5"` 47 | Assignments []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` 48 | } 49 | 50 | func (r *Response) ApiKey() protocol.ApiKey { return protocol.SyncGroup } 51 | -------------------------------------------------------------------------------- /record.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "github.com/segmentio/kafka-go/protocol" 5 | ) 6 | 7 | // Header is a key/value pair type representing headers set on records. 8 | type Header = protocol.Header 9 | 10 | // Bytes is an interface representing a sequence of bytes. This abstraction 11 | // makes it possible for programs to inject data into produce requests without 12 | // having to load in into an intermediary buffer, or read record keys and values 13 | // from a fetch response directly from internal buffers. 14 | // 15 | // Bytes are not safe to use concurrently from multiple goroutines. 16 | type Bytes = protocol.Bytes 17 | 18 | // NewBytes constructs a Bytes value from a byte slice. 19 | // 20 | // If b is nil, nil is returned. 21 | func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) } 22 | 23 | // ReadAll reads b into a byte slice. 24 | func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) } 25 | 26 | // Record is an interface representing a single kafka record. 27 | // 28 | // Record values are not safe to use concurrently from multiple goroutines. 29 | type Record = protocol.Record 30 | 31 | // RecordReader is an interface representing a sequence of records. Record sets 32 | // are used in both produce and fetch requests to represent the sequence of 33 | // records that are sent to or receive from kafka brokers. 34 | // 35 | // RecordReader values are not safe to use concurrently from multiple goroutines. 36 | type RecordReader = protocol.RecordReader 37 | 38 | // NewRecordReader reconstructs a RecordSet which exposes the sequence of records 39 | // passed as arguments. 40 | func NewRecordReader(records ...Record) RecordReader { 41 | return protocol.NewRecordReader(records...) 42 | } 43 | -------------------------------------------------------------------------------- /resolver.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "net" 6 | ) 7 | 8 | // The Resolver interface is used as an abstraction to provide service discovery 9 | // of the hosts of a kafka cluster. 10 | type Resolver interface { 11 | // LookupHost looks up the given host using the local resolver. 12 | // It returns a slice of that host's addresses. 13 | LookupHost(ctx context.Context, host string) (addrs []string, err error) 14 | } 15 | 16 | // BrokerResolver is an interface implemented by types that translate host 17 | // names into a network address. 18 | // 19 | // This resolver is not intended to be a general purpose interface. Instead, 20 | // it is tailored to the particular needs of the kafka protocol, with the goal 21 | // being to provide a flexible mechanism for extending broker name resolution 22 | // while retaining context that is specific to interacting with a kafka cluster. 23 | // 24 | // Resolvers must be safe to use from multiple goroutines. 25 | type BrokerResolver interface { 26 | // Returns the IP addresses of the broker passed as argument. 27 | LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) 28 | } 29 | 30 | // NewBrokerResolver constructs a Resolver from r. 31 | // 32 | // If r is nil, net.DefaultResolver is used instead. 33 | func NewBrokerResolver(r *net.Resolver) BrokerResolver { 34 | return brokerResolver{r} 35 | } 36 | 37 | type brokerResolver struct { 38 | *net.Resolver 39 | } 40 | 41 | func (r brokerResolver) LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) { 42 | ipAddrs, err := r.LookupIPAddr(ctx, broker.Host) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | if len(ipAddrs) == 0 { 48 | return nil, &net.DNSError{ 49 | Err: "no addresses were returned by the resolver", 50 | Name: broker.Host, 51 | IsTemporary: true, 52 | IsNotFound: true, 53 | } 54 | } 55 | 56 | return ipAddrs, nil 57 | } 58 | -------------------------------------------------------------------------------- /resource_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import "testing" 4 | 5 | func TestResourceTypeMarshal(t *testing.T) { 6 | for i := ResourceTypeUnknown; i <= ResourceTypeDelegationToken; i++ { 7 | text, err := i.MarshalText() 8 | if err != nil { 9 | t.Errorf("couldn't marshal %d to text: %s", i, err) 10 | } 11 | var got ResourceType 12 | err = got.UnmarshalText(text) 13 | if err != nil { 14 | t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err) 15 | } 16 | if got != i { 17 | t.Errorf("got %d, want %d", got, i) 18 | } 19 | } 20 | } 21 | 22 | // Verify that the text version of ResourceTypeBroker is "Cluster". 23 | // This is added since ResourceTypeBroker and ResourceTypeCluster 24 | // have the same value. 25 | func TestResourceTypeBroker(t *testing.T) { 26 | text, err := ResourceTypeBroker.MarshalText() 27 | if err != nil { 28 | t.Errorf("couldn't marshal %d to text: %s", ResourceTypeBroker, err) 29 | } 30 | if string(text) != "Cluster" { 31 | t.Errorf("got %s, want %s", string(text), "Cluster") 32 | } 33 | var got ResourceType 34 | err = got.UnmarshalText(text) 35 | if err != nil { 36 | t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err) 37 | } 38 | if got != ResourceTypeBroker { 39 | t.Errorf("got %d, want %d", got, ResourceTypeBroker) 40 | } 41 | } 42 | 43 | func TestPatternTypeMarshal(t *testing.T) { 44 | for i := PatternTypeUnknown; i <= PatternTypePrefixed; i++ { 45 | text, err := i.MarshalText() 46 | if err != nil { 47 | t.Errorf("couldn't marshal %d to text: %s", i, err) 48 | } 49 | var got PatternType 50 | err = got.UnmarshalText(text) 51 | if err != nil { 52 | t.Errorf("couldn't unmarshal %s to PatternType: %s", text, err) 53 | } 54 | if got != i { 55 | t.Errorf("got %d, want %d", got, i) 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /sasl/aws_msk_iam/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/sasl/aws_msk_iam 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.41.3 7 | github.com/segmentio/kafka-go v0.4.28 8 | ) 9 | -------------------------------------------------------------------------------- /sasl/aws_msk_iam_v2/README.md: -------------------------------------------------------------------------------- 1 | # AWS MSK IAM V2 2 | 3 | This extension provides a capability to get authenticated with [AWS Managed Apache Kafka](https://aws.amazon.com/msk/) 4 | through AWS IAM. 5 | 6 | ## How to use 7 | 8 | This module is an extension for MSK users and thus this is isolated from `kafka-go` module. 9 | You can add this module to your dependency by running the command below. 10 | 11 | ```shell 12 | go get github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 13 | ``` 14 | 15 | Please find the sample code in [example_test.go](./example_test.go), you can use the `Mechanism` for SASL authentication of `Reader` and `Writer`. 16 | -------------------------------------------------------------------------------- /sasl/aws_msk_iam_v2/example_test.go: -------------------------------------------------------------------------------- 1 | package aws_msk_iam_v2_test 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "time" 7 | 8 | "github.com/aws/aws-sdk-go-v2/config" 9 | "github.com/segmentio/kafka-go" 10 | "github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2" 11 | ) 12 | 13 | func main() { 14 | cfg, err := config.LoadDefaultConfig(context.TODO()) 15 | if err != nil { 16 | panic(err) 17 | } 18 | mechanism := aws_msk_iam_v2.NewMechanism(cfg) 19 | _ = kafka.ReaderConfig{ 20 | Brokers: []string{"https://localhost"}, 21 | GroupID: "some-consumer-group", 22 | GroupTopics: []string{"some-topic"}, 23 | Dialer: &kafka.Dialer{ 24 | Timeout: 10 * time.Second, 25 | DualStack: true, 26 | SASLMechanism: mechanism, 27 | TLS: &tls.Config{}, 28 | }, 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /sasl/aws_msk_iam_v2/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.16.12 7 | github.com/aws/aws-sdk-go-v2/config v1.17.2 8 | github.com/aws/aws-sdk-go-v2/credentials v1.12.15 9 | github.com/segmentio/kafka-go v0.4.34 10 | github.com/stretchr/testify v1.8.0 11 | ) 12 | -------------------------------------------------------------------------------- /sasl/plain/plain.go: -------------------------------------------------------------------------------- 1 | package plain 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/segmentio/kafka-go/sasl" 8 | ) 9 | 10 | // Mechanism implements the PLAIN mechanism and passes the credentials in clear 11 | // text. 12 | type Mechanism struct { 13 | Username string 14 | Password string 15 | } 16 | 17 | func (Mechanism) Name() string { 18 | return "PLAIN" 19 | } 20 | 21 | func (m Mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { 22 | // Mechanism is stateless, so it can also implement sasl.Session 23 | return m, []byte(fmt.Sprintf("\x00%s\x00%s", m.Username, m.Password)), nil 24 | } 25 | 26 | func (m Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { 27 | // kafka will return error if it rejected the credentials, so we'd only 28 | // arrive here on success. 29 | return true, nil, nil 30 | } 31 | -------------------------------------------------------------------------------- /sasl/scram/scram.go: -------------------------------------------------------------------------------- 1 | package scram 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "crypto/sha512" 7 | "hash" 8 | 9 | "github.com/segmentio/kafka-go/sasl" 10 | "github.com/xdg-go/scram" 11 | ) 12 | 13 | // Algorithm determines the hash function used by SCRAM to protect the user's 14 | // credentials. 15 | type Algorithm interface { 16 | // Name returns the algorithm's name, e.g. "SCRAM-SHA-256" 17 | Name() string 18 | 19 | // Hash returns a new hash.Hash. 20 | Hash() hash.Hash 21 | } 22 | 23 | type sha256Algo struct{} 24 | 25 | func (sha256Algo) Name() string { 26 | return "SCRAM-SHA-256" 27 | } 28 | 29 | func (sha256Algo) Hash() hash.Hash { 30 | return sha256.New() 31 | } 32 | 33 | type sha512Algo struct{} 34 | 35 | func (sha512Algo) Name() string { 36 | return "SCRAM-SHA-512" 37 | } 38 | 39 | func (sha512Algo) Hash() hash.Hash { 40 | return sha512.New() 41 | } 42 | 43 | var ( 44 | SHA256 Algorithm = sha256Algo{} 45 | SHA512 Algorithm = sha512Algo{} 46 | ) 47 | 48 | type mechanism struct { 49 | algo Algorithm 50 | client *scram.Client 51 | } 52 | 53 | type session struct { 54 | convo *scram.ClientConversation 55 | } 56 | 57 | // Mechanism returns a new sasl.Mechanism that will use SCRAM with the provided 58 | // Algorithm to securely transmit the provided credentials to Kafka. 59 | // 60 | // SCRAM-SHA-256 and SCRAM-SHA-512 were added to Kafka in 0.10.2.0. These 61 | // mechanisms will not work with older versions. 62 | func Mechanism(algo Algorithm, username, password string) (sasl.Mechanism, error) { 63 | hashGen := scram.HashGeneratorFcn(algo.Hash) 64 | client, err := hashGen.NewClient(username, password, "") 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | return &mechanism{ 70 | algo: algo, 71 | client: client, 72 | }, nil 73 | } 74 | 75 | func (m *mechanism) Name() string { 76 | return m.algo.Name() 77 | } 78 | 79 | func (m *mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { 80 | convo := m.client.NewConversation() 81 | str, err := convo.Step("") 82 | if err != nil { 83 | return nil, nil, err 84 | } 85 | return &session{convo: convo}, []byte(str), nil 86 | } 87 | 88 | func (s *session) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { 89 | str, err := s.convo.Step(string(challenge)) 90 | return s.convo.Done(), []byte(str), err 91 | } 92 | -------------------------------------------------------------------------------- /saslauthenticate.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | ) 6 | 7 | type saslAuthenticateRequestV0 struct { 8 | // Data holds the SASL payload 9 | Data []byte 10 | } 11 | 12 | func (t saslAuthenticateRequestV0) size() int32 { 13 | return sizeofBytes(t.Data) 14 | } 15 | 16 | func (t *saslAuthenticateRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { 17 | return readBytes(r, sz, &t.Data) 18 | } 19 | 20 | func (t saslAuthenticateRequestV0) writeTo(wb *writeBuffer) { 21 | wb.writeBytes(t.Data) 22 | } 23 | 24 | type saslAuthenticateResponseV0 struct { 25 | // ErrorCode holds response error code 26 | ErrorCode int16 27 | 28 | ErrorMessage string 29 | 30 | Data []byte 31 | } 32 | 33 | func (t saslAuthenticateResponseV0) size() int32 { 34 | return sizeofInt16(t.ErrorCode) + sizeofString(t.ErrorMessage) + sizeofBytes(t.Data) 35 | } 36 | 37 | func (t saslAuthenticateResponseV0) writeTo(wb *writeBuffer) { 38 | wb.writeInt16(t.ErrorCode) 39 | wb.writeString(t.ErrorMessage) 40 | wb.writeBytes(t.Data) 41 | } 42 | 43 | func (t *saslAuthenticateResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { 44 | if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { 45 | return 46 | } 47 | if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { 48 | return 49 | } 50 | if remain, err = readBytes(r, remain, &t.Data); err != nil { 51 | return 52 | } 53 | return 54 | } 55 | -------------------------------------------------------------------------------- /saslauthenticate_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "reflect" 7 | "testing" 8 | ) 9 | 10 | func TestSASLAuthenticateRequestV0(t *testing.T) { 11 | item := saslAuthenticateRequestV0{ 12 | Data: []byte("\x00user\x00pass"), 13 | } 14 | 15 | b := bytes.NewBuffer(nil) 16 | w := &writeBuffer{w: b} 17 | item.writeTo(w) 18 | 19 | var found saslAuthenticateRequestV0 20 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 21 | if err != nil { 22 | t.Error(err) 23 | t.FailNow() 24 | } 25 | if remain != 0 { 26 | t.Errorf("expected 0 remain, got %v", remain) 27 | t.FailNow() 28 | } 29 | if !reflect.DeepEqual(item, found) { 30 | t.Error("expected item and found to be the same") 31 | t.FailNow() 32 | } 33 | } 34 | 35 | func TestSASLAuthenticateResponseV0(t *testing.T) { 36 | item := saslAuthenticateResponseV0{ 37 | ErrorCode: 2, 38 | ErrorMessage: "Message", 39 | Data: []byte("bytes"), 40 | } 41 | 42 | b := bytes.NewBuffer(nil) 43 | w := &writeBuffer{w: b} 44 | item.writeTo(w) 45 | 46 | var found saslAuthenticateResponseV0 47 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 48 | if err != nil { 49 | t.Error(err) 50 | t.FailNow() 51 | } 52 | if remain != 0 { 53 | t.Errorf("expected 0 remain, got %v", remain) 54 | t.FailNow() 55 | } 56 | if !reflect.DeepEqual(item, found) { 57 | t.Error("expected item and found to be the same") 58 | t.FailNow() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /saslhandshake.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | ) 6 | 7 | // saslHandshakeRequestV0 implements the format for V0 and V1 SASL 8 | // requests (they are identical). 9 | type saslHandshakeRequestV0 struct { 10 | // Mechanism holds the SASL Mechanism chosen by the client. 11 | Mechanism string 12 | } 13 | 14 | func (t saslHandshakeRequestV0) size() int32 { 15 | return sizeofString(t.Mechanism) 16 | } 17 | 18 | func (t *saslHandshakeRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { 19 | return readString(r, sz, &t.Mechanism) 20 | } 21 | 22 | func (t saslHandshakeRequestV0) writeTo(wb *writeBuffer) { 23 | wb.writeString(t.Mechanism) 24 | } 25 | 26 | // saslHandshakeResponseV0 implements the format for V0 and V1 SASL 27 | // responses (they are identical). 28 | type saslHandshakeResponseV0 struct { 29 | // ErrorCode holds response error code 30 | ErrorCode int16 31 | 32 | // Array of mechanisms enabled in the server 33 | EnabledMechanisms []string 34 | } 35 | 36 | func (t saslHandshakeResponseV0) size() int32 { 37 | return sizeofInt16(t.ErrorCode) + sizeofStringArray(t.EnabledMechanisms) 38 | } 39 | 40 | func (t saslHandshakeResponseV0) writeTo(wb *writeBuffer) { 41 | wb.writeInt16(t.ErrorCode) 42 | wb.writeStringArray(t.EnabledMechanisms) 43 | } 44 | 45 | func (t *saslHandshakeResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { 46 | if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { 47 | return 48 | } 49 | if remain, err = readStringArray(r, remain, &t.EnabledMechanisms); err != nil { 50 | return 51 | } 52 | return 53 | } 54 | -------------------------------------------------------------------------------- /saslhandshake_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "reflect" 7 | "testing" 8 | ) 9 | 10 | func TestSASLHandshakeRequestV0(t *testing.T) { 11 | item := saslHandshakeRequestV0{ 12 | Mechanism: "SCRAM-SHA-512", 13 | } 14 | 15 | b := bytes.NewBuffer(nil) 16 | w := &writeBuffer{w: b} 17 | item.writeTo(w) 18 | 19 | var found saslHandshakeRequestV0 20 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 21 | if err != nil { 22 | t.Error(err) 23 | t.FailNow() 24 | } 25 | if remain != 0 { 26 | t.Errorf("expected 0 remain, got %v", remain) 27 | t.FailNow() 28 | } 29 | if !reflect.DeepEqual(item, found) { 30 | t.Error("expected item and found to be the same") 31 | t.FailNow() 32 | } 33 | } 34 | 35 | func TestSASLHandshakeResponseV0(t *testing.T) { 36 | item := saslHandshakeResponseV0{ 37 | ErrorCode: 2, 38 | EnabledMechanisms: []string{"PLAIN", "SCRAM-SHA-512"}, 39 | } 40 | 41 | b := bytes.NewBuffer(nil) 42 | w := &writeBuffer{w: b} 43 | item.writeTo(w) 44 | 45 | var found saslHandshakeResponseV0 46 | remain, err := (&found).readFrom(bufio.NewReader(b), b.Len()) 47 | if err != nil { 48 | t.Error(err) 49 | t.FailNow() 50 | } 51 | if remain != 0 { 52 | t.Errorf("expected 0 remain, got %v", remain) 53 | t.FailNow() 54 | } 55 | if !reflect.DeepEqual(item, found) { 56 | t.Error("expected item and found to be the same") 57 | t.FailNow() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /scripts/wait-for-kafka.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | COUNTER=0; 4 | echo foo | nc localhost 9092 5 | STATUS=$? 6 | ATTEMPTS=60 7 | until [ ${STATUS} -eq 0 ] || [ "$COUNTER" -ge "${ATTEMPTS}" ]; 8 | do 9 | let COUNTER=$COUNTER+1; 10 | sleep 1; 11 | echo "[$COUNTER] waiting for 9092 port to be open"; 12 | echo foo | nc localhost 9092 13 | STATUS=$? 14 | done 15 | 16 | if [ "${COUNTER}" -gt "${ATTEMPTS}" ]; 17 | then 18 | echo "Kafka is not running, failing" 19 | exit 1 20 | fi -------------------------------------------------------------------------------- /sizeof.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import "fmt" 4 | 5 | type sizable interface { 6 | size() int32 7 | } 8 | 9 | func sizeof(a interface{}) int32 { 10 | switch v := a.(type) { 11 | case int8: 12 | return 1 13 | case int16: 14 | return 2 15 | case int32: 16 | return 4 17 | case int64: 18 | return 8 19 | case string: 20 | return sizeofString(v) 21 | case bool: 22 | return 1 23 | case []byte: 24 | return sizeofBytes(v) 25 | case sizable: 26 | return v.size() 27 | } 28 | panic(fmt.Sprintf("unsupported type: %T", a)) 29 | } 30 | 31 | func sizeofInt16(_ int16) int32 { 32 | return 2 33 | } 34 | 35 | func sizeofInt32(_ int32) int32 { 36 | return 4 37 | } 38 | 39 | func sizeofInt64(_ int64) int32 { 40 | return 8 41 | } 42 | 43 | func sizeofString(s string) int32 { 44 | return 2 + int32(len(s)) 45 | } 46 | 47 | func sizeofNullableString(s *string) int32 { 48 | if s == nil { 49 | return 2 50 | } 51 | return sizeofString(*s) 52 | } 53 | 54 | func sizeofBytes(b []byte) int32 { 55 | return 4 + int32(len(b)) 56 | } 57 | 58 | func sizeofArray(n int, f func(int) int32) int32 { 59 | s := int32(4) 60 | for i := 0; i != n; i++ { 61 | s += f(i) 62 | } 63 | return s 64 | } 65 | 66 | func sizeofInt32Array(a []int32) int32 { 67 | return 4 + (4 * int32(len(a))) 68 | } 69 | 70 | func sizeofStringArray(a []string) int32 { 71 | return sizeofArray(len(a), func(i int) int32 { return sizeofString(a[i]) }) 72 | } 73 | -------------------------------------------------------------------------------- /snappy/snappy.go: -------------------------------------------------------------------------------- 1 | // Package snappy does nothing, it's kept for backward compatibility to avoid 2 | // breaking the majority of programs that imported it to install the compression 3 | // codec, which is now always included. 4 | package snappy 5 | 6 | import "github.com/segmentio/kafka-go/compress/snappy" 7 | 8 | type CompressionCodec = snappy.Codec 9 | 10 | type Framing = snappy.Framing 11 | 12 | const ( 13 | Code = 2 14 | Framed = snappy.Framed 15 | Unframed = snappy.Unframed 16 | ) 17 | 18 | func NewCompressionCodec() *CompressionCodec { 19 | return NewCompressionCodecFraming(Framed) 20 | } 21 | 22 | func NewCompressionCodecFraming(framing Framing) *CompressionCodec { 23 | return &CompressionCodec{Framing: framing} 24 | } 25 | -------------------------------------------------------------------------------- /testing/conn.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "sync" 7 | ) 8 | 9 | type ConnWaitGroup struct { 10 | DialFunc func(context.Context, string, string) (net.Conn, error) 11 | sync.WaitGroup 12 | } 13 | 14 | func (g *ConnWaitGroup) Dial(ctx context.Context, network, address string) (net.Conn, error) { 15 | c, err := g.DialFunc(ctx, network, address) 16 | if err != nil { 17 | return nil, err 18 | } 19 | g.Add(1) 20 | return &groupConn{Conn: c, group: g}, nil 21 | } 22 | 23 | type groupConn struct { 24 | net.Conn 25 | group *ConnWaitGroup 26 | once sync.Once 27 | } 28 | 29 | func (c *groupConn) Close() error { 30 | defer c.once.Do(c.group.Done) 31 | return c.Conn.Close() 32 | } 33 | -------------------------------------------------------------------------------- /testing/version.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | type semver []int 10 | 11 | func (v semver) atLeast(other semver) bool { 12 | for i := range v { 13 | if i >= len(other) { 14 | break 15 | } 16 | if v[i] < other[i] { 17 | return false 18 | } 19 | if v[i] > other[i] { 20 | return true 21 | } 22 | } 23 | for i := len(v); i < len(other); i++ { 24 | if other[i] > 0 { 25 | return false 26 | } 27 | } 28 | return true 29 | } 30 | 31 | // kafkaVersion is set in the circle config. It can also be provided on the 32 | // command line in order to target a particular kafka version. 33 | var kafkaVersion = parseVersion(os.Getenv("KAFKA_VERSION")) 34 | 35 | // KafkaIsAtLeast returns true when the test broker is running a protocol 36 | // version that is semver or newer. It determines the broker's version using 37 | // the `KAFKA_VERSION` environment variable. If the var is unset, then this 38 | // function will return true. 39 | func KafkaIsAtLeast(semver string) bool { 40 | return kafkaVersion.atLeast(parseVersion(semver)) 41 | } 42 | 43 | func parseVersion(semver string) semver { 44 | if semver == "" { 45 | return nil 46 | } 47 | parts := strings.Split(semver, ".") 48 | version := make([]int, len(parts)) 49 | for i := range version { 50 | v, err := strconv.Atoi(parts[i]) 51 | if err != nil { 52 | // panic-ing because tests should be using hard-coded version values 53 | panic("invalid version string: " + semver) 54 | } 55 | version[i] = v 56 | } 57 | return version 58 | } 59 | -------------------------------------------------------------------------------- /testing/version_test.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSemVersionAtLeastEmpty(t *testing.T) { 8 | result := semver([]int{}).atLeast(semver([]int{1, 2})) 9 | if result { 10 | t.Errorf("Empty version can't be at least 1.2") 11 | } 12 | } 13 | 14 | func TestSemVersionAtLeastShorter(t *testing.T) { 15 | result := semver([]int{1, 1}).atLeast(semver([]int{1, 1, 2})) 16 | if result { 17 | t.Errorf("Version 1.1 version can't be at least 1.1.2") 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /time.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "math" 5 | "time" 6 | ) 7 | 8 | const ( 9 | maxTimeout = time.Duration(math.MaxInt32) * time.Millisecond 10 | minTimeout = time.Duration(math.MinInt32) * time.Millisecond 11 | defaultRTT = 1 * time.Second 12 | ) 13 | 14 | func makeTime(t int64) time.Time { 15 | if t <= 0 { 16 | return time.Time{} 17 | } 18 | return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)).UTC() 19 | } 20 | 21 | func timestamp(t time.Time) int64 { 22 | if t.IsZero() { 23 | return 0 24 | } 25 | return t.UnixNano() / int64(time.Millisecond) 26 | } 27 | 28 | func makeDuration(ms int32) time.Duration { 29 | return time.Duration(ms) * time.Millisecond 30 | } 31 | 32 | func milliseconds(d time.Duration) int32 { 33 | switch { 34 | case d > maxTimeout: 35 | d = maxTimeout 36 | case d < minTimeout: 37 | d = minTimeout 38 | } 39 | return int32(d / time.Millisecond) 40 | } 41 | 42 | func deadlineToTimeout(deadline time.Time, now time.Time) time.Duration { 43 | if deadline.IsZero() { 44 | return maxTimeout 45 | } 46 | return deadline.Sub(now) 47 | } 48 | 49 | func adjustDeadlineForRTT(deadline time.Time, now time.Time, rtt time.Duration) time.Time { 50 | if !deadline.IsZero() { 51 | timeout := deadline.Sub(now) 52 | if timeout < rtt { 53 | rtt = timeout / 4 54 | } 55 | deadline = deadline.Add(-rtt) 56 | } 57 | return deadline 58 | } 59 | -------------------------------------------------------------------------------- /topics/list_topics.go: -------------------------------------------------------------------------------- 1 | // Package topics is an experimental package that provides additional tooling 2 | // around Kafka Topics. This package does not make any promises around 3 | // backwards compatibility. 4 | package topics 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "regexp" 10 | 11 | "github.com/segmentio/kafka-go" 12 | ) 13 | 14 | // List returns a slice of all the Topics. 15 | func List(ctx context.Context, client *kafka.Client) (topics []kafka.Topic, err error) { 16 | if client == nil { 17 | return nil, errors.New("client is required") 18 | } 19 | response, err := client.Metadata(ctx, &kafka.MetadataRequest{ 20 | Addr: client.Addr, 21 | }) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | return response.Topics, nil 27 | } 28 | 29 | // ListRe returns a slice of Topics that match a regex. 30 | func ListRe(ctx context.Context, cli *kafka.Client, re *regexp.Regexp) (topics []kafka.Topic, err error) { 31 | alltopics, err := List(ctx, cli) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | for _, val := range alltopics { 37 | if re.MatchString(val.Name) { 38 | topics = append(topics, val) 39 | } 40 | } 41 | return topics, nil 42 | } 43 | -------------------------------------------------------------------------------- /zstd/zstd.go: -------------------------------------------------------------------------------- 1 | // Package zstd does nothing, it's kept for backward compatibility to avoid 2 | // breaking the majority of programs that imported it to install the compression 3 | // codec, which is now always included. 4 | package zstd 5 | 6 | import "github.com/segmentio/kafka-go/compress/zstd" 7 | 8 | const ( 9 | Code = 4 10 | DefaultCompressionLevel = 3 11 | ) 12 | 13 | type CompressionCodec = zstd.Codec 14 | 15 | func NewCompressionCodec() *CompressionCodec { 16 | return NewCompressionCodecWith(DefaultCompressionLevel) 17 | } 18 | 19 | func NewCompressionCodecWith(level int) *CompressionCodec { 20 | return &CompressionCodec{Level: level} 21 | } 22 | --------------------------------------------------------------------------------