├── go.sum ├── Makefile ├── go.mod ├── test ├── mtls │ ├── ca-cert.srl │ ├── broker.p12 │ ├── kafka.server.keystore.jks │ ├── kafka.server.truststore.jks │ ├── extensions.cnf │ ├── broker-csr.pem │ ├── client-csr.pem │ ├── Makefile │ ├── broker-cert.pem │ ├── client-cert.pem │ ├── ca-cert.pem │ ├── ca-key.pem │ ├── broker-key.pem │ └── client-key.pem ├── v1_0 │ ├── README.md │ ├── Dockerfile │ ├── zookeeper.properties │ ├── docker-compose.yaml │ ├── log4j.properties │ ├── server.properties │ └── server.properties.v1_0 ├── docker-compose.yaml ├── Dockerfile ├── zookeeper.properties ├── log4j.properties ├── wait-for-it.sh └── server.properties ├── api ├── doc.go ├── Heartbeat │ ├── response.go │ └── request.go ├── SyncGroup │ ├── response.go │ ├── response.go.v2 │ ├── request.go │ └── request.go.v2 ├── ApiVersions │ ├── request.go │ └── response.go ├── FindCoordinator │ ├── response.go │ ├── request.go │ ├── util.go │ └── util_test.go ├── OffsetCommit │ ├── response.go │ └── request.go ├── JoinGroup │ ├── response.go │ ├── response.go.v4 │ ├── request.go.v4 │ └── request.go ├── OffsetFetch │ ├── response.go │ └── request.go ├── Metadata │ ├── request.go │ └── response.go ├── CreateTopics │ ├── response.go │ └── request.go ├── ListOffsets │ ├── response.go │ └── request.go ├── Produce │ ├── response.go │ └── request.go ├── request.go ├── Fetch │ ├── response.go │ ├── response.go.v11 │ ├── request.go │ └── request.go.v11 ├── response.go └── api.go ├── .travis.yml ├── .gitignore ├── varint ├── varint_test.go └── varint.go ├── compression └── compression.go ├── wire ├── wire_test.go └── wire.go ├── client ├── producer │ ├── v1_0_test.go │ ├── producer.go │ └── producer_test.go ├── fetcher │ ├── fetcher_test.go │ └── fetcher.go ├── partition_test.go ├── client_test.go ├── group_test.go ├── client.go ├── partition.go └── group.go ├── LICENSE ├── README.md ├── libkafka.go ├── record ├── record_test.go └── record.go ├── batch ├── batch_test.go └── batch.go └── errors.go /go.sum: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test 2 | test: 3 | go test ./... -run=Unit 4 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mkocikowski/libkafka 2 | 3 | go 1.13 4 | -------------------------------------------------------------------------------- /test/mtls/ca-cert.srl: -------------------------------------------------------------------------------- 1 | 62224CAC43AE31D4F981CF410E3949CA7D71DE90 2 | -------------------------------------------------------------------------------- /api/doc.go: -------------------------------------------------------------------------------- 1 | // Package api defines Kafka protocol requests and responses. 2 | package api 3 | -------------------------------------------------------------------------------- /test/mtls/broker.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mkocikowski/libkafka/HEAD/test/mtls/broker.p12 -------------------------------------------------------------------------------- /test/mtls/kafka.server.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mkocikowski/libkafka/HEAD/test/mtls/kafka.server.keystore.jks -------------------------------------------------------------------------------- /test/mtls/kafka.server.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mkocikowski/libkafka/HEAD/test/mtls/kafka.server.truststore.jks -------------------------------------------------------------------------------- /api/Heartbeat/response.go: -------------------------------------------------------------------------------- 1 | package Heartbeat 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | } 7 | -------------------------------------------------------------------------------- /test/mtls/extensions.cnf: -------------------------------------------------------------------------------- 1 | [v3_ca] 2 | basicConstraints = CA:FALSE 3 | keyUsage = digitalSignature, keyEncipherment 4 | subjectAltName = DNS:localhost 5 | -------------------------------------------------------------------------------- /api/SyncGroup/response.go: -------------------------------------------------------------------------------- 1 | package SyncGroup 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | Assignment []byte 7 | } 8 | -------------------------------------------------------------------------------- /api/SyncGroup/response.go.v2: -------------------------------------------------------------------------------- 1 | package SyncGroup 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | Assignment []byte 7 | } 8 | -------------------------------------------------------------------------------- /api/ApiVersions/request.go: -------------------------------------------------------------------------------- 1 | package ApiVersions 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | func NewRequest() *api.Request { 8 | return &api.Request{ 9 | ApiKey: api.ApiVersions, 10 | ApiVersion: 0, 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /api/FindCoordinator/response.go: -------------------------------------------------------------------------------- 1 | package FindCoordinator 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | ErrorMessage string 7 | NodeId int32 8 | Host string 9 | Port int32 10 | } 11 | -------------------------------------------------------------------------------- /api/ApiVersions/response.go: -------------------------------------------------------------------------------- 1 | package ApiVersions 2 | 3 | type Response struct { 4 | ErrorCode int16 5 | ApiKeys []ApiKeyVersion // slice index same as ApiKey 6 | } 7 | 8 | type ApiKeyVersion struct { 9 | ApiKey int16 10 | MinVersion int16 11 | MaxVersion int16 12 | } 13 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - "1.13" 5 | 6 | services: 7 | - docker 8 | 9 | before_install: 10 | - docker-compose -f test/docker-compose.yaml build 11 | - docker-compose -f test/docker-compose.yaml up -d 12 | - test/wait-for-it.sh -h localhost -p 9092 -t 15 13 | 14 | script: go test -v ./... 15 | -------------------------------------------------------------------------------- /api/OffsetCommit/response.go: -------------------------------------------------------------------------------- 1 | package OffsetCommit 2 | 3 | type Response struct { 4 | Topics []TopicResponse 5 | } 6 | 7 | type TopicResponse struct { 8 | Name string 9 | Partitions []PartitionResponse 10 | } 11 | 12 | type PartitionResponse struct { 13 | PartitionIndex int32 14 | ErrorCode int16 15 | } 16 | -------------------------------------------------------------------------------- /test/v1_0/README.md: -------------------------------------------------------------------------------- 1 | This is a separate environment that, in addition to the 2.3 kafka on 9092, also spins up 1.0 on 2 | 9093. This is used to test "by hand" the temporary support for producing to kafka 1.0. This env 3 | needs to be up for running tests with `-tags=v1_0`. 4 | 5 | This is not in the CI. This is a hack that will go away (cough). 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | bin/ 3 | cmd/ 4 | 5 | # Binaries for programs and plugins 6 | *.exe 7 | *.exe~ 8 | *.dll 9 | *.so 10 | *.dylib 11 | 12 | # Test binary, built with `go test -c` 13 | *.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # Dependency directories (remove the comment below to include it) 19 | # vendor/ 20 | -------------------------------------------------------------------------------- /api/JoinGroup/response.go: -------------------------------------------------------------------------------- 1 | package JoinGroup 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | GenerationId int32 7 | ProtocolName string 8 | Leader string 9 | MemberId string 10 | Members []Member // for leader this will not be empty 11 | } 12 | 13 | type Member struct { 14 | MemberId string 15 | Metadata []byte 16 | } 17 | -------------------------------------------------------------------------------- /api/JoinGroup/response.go.v4: -------------------------------------------------------------------------------- 1 | package JoinGroup 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | GenerationId int32 7 | ProtocolName string 8 | Leader string 9 | MemberId string 10 | Members []Member // for leader this will not be empty 11 | } 12 | 13 | type Member struct { 14 | MemberId string 15 | Metadata []byte 16 | } 17 | -------------------------------------------------------------------------------- /api/OffsetFetch/response.go: -------------------------------------------------------------------------------- 1 | package OffsetFetch 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | Topics []TopicResponse 6 | ErrorCode int16 7 | } 8 | 9 | type TopicResponse struct { 10 | Name string 11 | Partitions []PartitionResponse 12 | } 13 | 14 | type PartitionResponse struct { 15 | PartitionIndex int32 16 | CommitedOffset int64 17 | Metadata string 18 | ErrorCode int16 19 | } 20 | -------------------------------------------------------------------------------- /api/Metadata/request.go: -------------------------------------------------------------------------------- 1 | package Metadata 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | func NewRequest(topics []string) *api.Request { 8 | return &api.Request{ 9 | ApiKey: api.Metadata, 10 | ApiVersion: 5, 11 | Body: Request{ 12 | Topics: topics, 13 | AllowAutoTopicCreation: false, 14 | }, 15 | } 16 | } 17 | 18 | type Request struct { 19 | Topics []string 20 | AllowAutoTopicCreation bool 21 | } 22 | -------------------------------------------------------------------------------- /varint/varint_test.go: -------------------------------------------------------------------------------- 1 | package varint 2 | 3 | import ( 4 | "encoding/binary" 5 | "math" 6 | "testing" 7 | ) 8 | 9 | func TestZigZag64(t *testing.T) { 10 | tests := []int64{0, 1, -1, math.MaxInt32, math.MinInt32, math.MaxInt64, math.MinInt64} 11 | for _, tt := range tests { 12 | buf := make([]byte, binary.MaxVarintLen64) 13 | var b []byte 14 | b = PutZigZag64(b, buf, tt) 15 | i, _ := DecodeZigZag64(b) 16 | if i != tt { 17 | t.Fatal(tt, i) 18 | } 19 | //t.Log(tt, b, i) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /api/Heartbeat/request.go: -------------------------------------------------------------------------------- 1 | package Heartbeat 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | func NewRequest(group, member string, generation int32) *api.Request { 8 | return &api.Request{ 9 | ApiKey: api.Heartbeat, 10 | ApiVersion: 1, 11 | Body: Request{ 12 | GroupId: group, 13 | GenerationId: generation, 14 | MemberId: member, 15 | }, 16 | } 17 | } 18 | 19 | type Request struct { 20 | GroupId string 21 | GenerationId int32 22 | MemberId string 23 | } 24 | -------------------------------------------------------------------------------- /api/CreateTopics/response.go: -------------------------------------------------------------------------------- 1 | package CreateTopics 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | Topics []TopicResponse 6 | } 7 | 8 | /* 9 | func (r *Response) Err() error { 10 | if r.Topics[0].ErrorCode == libkafka.NONE { 11 | return nil 12 | } 13 | return &libkafka.Error{ 14 | Code: r.Topics[0].ErrorCode, 15 | Message: r.Topics[0].ErrorMessage, 16 | } 17 | } 18 | */ 19 | 20 | type TopicResponse struct { 21 | Name string 22 | ErrorCode int16 23 | ErrorMessage string 24 | } 25 | -------------------------------------------------------------------------------- /api/FindCoordinator/request.go: -------------------------------------------------------------------------------- 1 | package FindCoordinator 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | const ( 8 | CoordinatorGroup = iota 9 | CoordinatorTransaction 10 | ) 11 | 12 | func NewRequest(groupId string) *api.Request { 13 | return &api.Request{ 14 | ApiKey: api.FindCoordinator, 15 | ApiVersion: 1, 16 | Body: Request{ 17 | Key: groupId, 18 | KeyType: CoordinatorGroup, 19 | }, 20 | } 21 | } 22 | 23 | type Request struct { 24 | Key string // groupId 25 | KeyType int8 26 | } 27 | -------------------------------------------------------------------------------- /api/FindCoordinator/util.go: -------------------------------------------------------------------------------- 1 | package FindCoordinator 2 | 3 | // https://github.com/apache/kafka/blob/2.4/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala#L1301 4 | type Group struct { 5 | Version int16 6 | Group string 7 | Topic string 8 | Partition int32 9 | } 10 | 11 | // https://github.com/apache/kafka/blob/2.4/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala#L1330 12 | type Val struct { 13 | Version int16 14 | Offset int64 15 | Metadata string 16 | Timestamp int64 17 | } 18 | -------------------------------------------------------------------------------- /api/OffsetFetch/request.go: -------------------------------------------------------------------------------- 1 | package OffsetFetch 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | func NewRequest(group, topic string, partition int32) *api.Request { 8 | t := Topic{ 9 | Name: topic, 10 | PartitionIndexes: []int32{partition}, 11 | } 12 | return &api.Request{ 13 | ApiKey: api.OffsetFetch, 14 | ApiVersion: 3, 15 | Body: Request{ 16 | GroupId: group, 17 | Topics: []Topic{t}, 18 | }, 19 | } 20 | } 21 | 22 | type Request struct { 23 | GroupId string 24 | Topics []Topic 25 | } 26 | 27 | type Topic struct { 28 | Name string 29 | PartitionIndexes []int32 30 | } 31 | -------------------------------------------------------------------------------- /compression/compression.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | // https://kafka.apache.org/documentation/#recordbatch 4 | const ( 5 | None = iota 6 | Gzip 7 | Snappy 8 | Lz4 9 | Zstd 10 | 11 | /* 12 | TimestampCreate = 0b0000 13 | TimestampLogAppend = 0b1000 14 | */ 15 | ) 16 | 17 | // Nop implements the batch.Compressor and batch.Decompressor. Use it to 18 | // marshal and unmarshal uncompressed record batches. 19 | type Nop struct{} 20 | 21 | func (*Nop) Compress(b []byte) ([]byte, error) { return b, nil } 22 | func (*Nop) Decompress(b []byte) ([]byte, error) { return b, nil } 23 | func (*Nop) Type() int16 { return None } 24 | -------------------------------------------------------------------------------- /api/ListOffsets/response.go: -------------------------------------------------------------------------------- 1 | package ListOffsets 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | Responses []TopicResponse 6 | } 7 | 8 | type TopicResponse struct { 9 | Topic string 10 | Partitions []PartitionResponse 11 | } 12 | 13 | type PartitionResponse struct { 14 | Partition int32 15 | ErrorCode int16 16 | Timestamp int64 17 | Offset int64 18 | } 19 | 20 | func (r *Response) Offset(topic string, partition int32) int64 { 21 | for _, t := range r.Responses { 22 | if t.Topic != topic { 23 | continue 24 | } 25 | for _, p := range t.Partitions { 26 | if p.Partition != partition { 27 | continue 28 | } 29 | return p.Offset 30 | } 31 | } 32 | return -1 33 | } 34 | -------------------------------------------------------------------------------- /api/Produce/response.go: -------------------------------------------------------------------------------- 1 | package Produce 2 | 3 | import ( 4 | "bytes" 5 | "reflect" 6 | 7 | "github.com/mkocikowski/libkafka/wire" 8 | ) 9 | 10 | func UnmarshalResponse(b []byte) (*Response, error) { 11 | r := &Response{} 12 | buf := bytes.NewBuffer(b) 13 | err := wire.Read(buf, reflect.ValueOf(r)) 14 | return r, err 15 | } 16 | 17 | type Response struct { 18 | TopicResponses []TopicResponse 19 | ThrottleTimeMs int32 20 | } 21 | 22 | type TopicResponse struct { 23 | Topic string 24 | PartitionResponses []PartitionResponse 25 | } 26 | 27 | type PartitionResponse struct { 28 | Partition int32 29 | ErrorCode int16 30 | BaseOffset int64 31 | LogAppendTime int64 32 | LogStartOffset int64 33 | } 34 | -------------------------------------------------------------------------------- /api/SyncGroup/request.go: -------------------------------------------------------------------------------- 1 | package SyncGroup 2 | 3 | // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal 4 | 5 | import ( 6 | "github.com/mkocikowski/libkafka/api" 7 | ) 8 | 9 | func NewRequest(group, member string, generation int32, assignments []Assignment) *api.Request { 10 | return &api.Request{ 11 | ApiKey: api.SyncGroup, 12 | ApiVersion: 1, 13 | Body: Request{ 14 | GroupId: group, 15 | GenerationId: generation, 16 | MemberId: member, 17 | Assignments: assignments, 18 | }, 19 | } 20 | } 21 | 22 | type Request struct { 23 | GroupId string 24 | GenerationId int32 25 | MemberId string 26 | Assignments []Assignment 27 | } 28 | 29 | type Assignment struct { 30 | MemberId string 31 | Assignment []byte 32 | } 33 | -------------------------------------------------------------------------------- /api/SyncGroup/request.go.v2: -------------------------------------------------------------------------------- 1 | package SyncGroup 2 | 3 | // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal 4 | 5 | import ( 6 | "github.com/mkocikowski/libkafka/api" 7 | ) 8 | 9 | func NewRequest(group, member string, generation int32, assignments []Assignment) *api.Request { 10 | return &api.Request{ 11 | ApiKey: api.SyncGroup, 12 | ApiVersion: 2, 13 | Body: Request{ 14 | GroupId: group, 15 | GenerationId: generation, 16 | MemberId: member, 17 | Assignments: assignments, 18 | }, 19 | } 20 | } 21 | 22 | type Request struct { 23 | GroupId string 24 | GenerationId int32 25 | MemberId string 26 | Assignments []Assignment 27 | } 28 | 29 | type Assignment struct { 30 | MemberId string 31 | Assignment []byte 32 | } 33 | -------------------------------------------------------------------------------- /api/request.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "reflect" 7 | 8 | "github.com/mkocikowski/libkafka/wire" 9 | ) 10 | 11 | // https://kafka.apache.org/protocol 12 | // https://kafka.apache.org/documentation/#messageformat 13 | // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets 14 | 15 | type Request struct { 16 | ApiKey int16 17 | ApiVersion int16 18 | CorrelationId int32 19 | ClientId string 20 | Body interface{} 21 | } 22 | 23 | func (r *Request) Bytes() []byte { 24 | tmp := new(bytes.Buffer) 25 | wire.Write(tmp, reflect.ValueOf(r)) 26 | buf := new(bytes.Buffer) 27 | binary.Write(buf, binary.BigEndian, int32(tmp.Len())) 28 | tmp.WriteTo(buf) 29 | return buf.Bytes() 30 | } 31 | -------------------------------------------------------------------------------- /api/Fetch/response.go: -------------------------------------------------------------------------------- 1 | package Fetch 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | TopicResponses []TopicResponse 6 | } 7 | 8 | func (r *Response) PartitionResponse() *PartitionResponse { 9 | defer func() { recover() }() 10 | return &(r.TopicResponses[0].PartitionResponses[0]) 11 | } 12 | 13 | type TopicResponse struct { 14 | Topic string 15 | PartitionResponses []PartitionResponse 16 | } 17 | 18 | type PartitionResponse struct { 19 | Partition int32 20 | ErrorCode int16 21 | HighWatermark int64 22 | LastStableOffset int64 23 | LogStartOffset int64 24 | AbortedTransactions []AbortedTransaction 25 | // 26 | RecordSet []byte // NULLABLE_BYTES 27 | } 28 | 29 | type AbortedTransaction struct { 30 | ProducerId int64 31 | FirstOffset int64 32 | } 33 | -------------------------------------------------------------------------------- /wire/wire_test.go: -------------------------------------------------------------------------------- 1 | package wire 2 | 3 | import ( 4 | "bytes" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | type Outer struct { 10 | Int16 int16 11 | Int16Array []int16 12 | Struct Inner 13 | StructArray []Inner 14 | } 15 | 16 | type Inner struct { 17 | Int16 int16 18 | } 19 | 20 | func TestWriteRead(t *testing.T) { 21 | m := &Outer{ 22 | Int16: 1, 23 | Int16Array: []int16{2, 3}, 24 | Struct: Inner{4}, 25 | StructArray: []Inner{Inner{5}, Inner{6}}, 26 | } 27 | t.Logf("%+v", m) 28 | buf := new(bytes.Buffer) 29 | if err := Write(buf, reflect.ValueOf(m)); err != nil { 30 | t.Fatal(err) 31 | } 32 | b := buf.Bytes() 33 | t.Log(b) 34 | n := &Outer{} 35 | if err := Read(bytes.NewReader(b), reflect.ValueOf(n)); err != nil { 36 | t.Fatal(err) 37 | } 38 | t.Logf("%+v", n) 39 | } 40 | -------------------------------------------------------------------------------- /test/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | services: 3 | kafka: 4 | build: . 5 | ports: 6 | - 9092:9092 7 | - 9093:9093 8 | volumes: 9 | - ./server.properties:/opt/kafka/config/server.properties 10 | - ./log4j.properties:/opt/kafka/config/log4j.properties 11 | - ./mtls/kafka.server.keystore.jks:/opt/kafka/config/kafka.server.keystore.jks 12 | - ./mtls/kafka.server.truststore.jks:/opt/kafka/config/kafka.server.truststore.jks 13 | command: ./bin/kafka-server-start.sh ./config/server.properties 14 | depends_on: 15 | - zookeeper # need un-register the broker before shutting down zk to be able to start back up 16 | zookeeper: 17 | build: . 18 | volumes: 19 | - ./zookeeper.properties:/opt/kafka/config/zookeeper.properties 20 | command: ./bin/zookeeper-server-start.sh ./config/zookeeper.properties 21 | -------------------------------------------------------------------------------- /test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | #LABEL name="kafka" version="1.0.0" java="openjdk-8-jre-headless" linux="debian:stretch" owner="mkocikowski" 4 | LABEL name="kafka" version="2.3.0" java="openjdk-8-jre-headless" linux="debian:stretch" owner="mkocikowski" 5 | 6 | RUN apt-get update && \ 7 | apt-get install -y apt-transport-https openjdk-8-jre-headless unzip wget && \ 8 | apt-get clean && \ 9 | rm -fr /var/lib/apt/lists/* 10 | 11 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 12 | 13 | RUN mkdir -p /opt 14 | 15 | #RUN wget -q -O - https://archive.apache.org/dist/kafka/1.0.0/kafka_2.12-1.0.0.tgz | tar -zxf - -C /opt 16 | RUN wget -q -O - https://archive.apache.org/dist/kafka/2.3.0/kafka_2.12-2.3.0.tgz | tar -zxf - -C /opt 17 | 18 | #RUN mv /opt/kafka_2.12-1.0.0 /opt/kafka 19 | RUN mv /opt/kafka_2.12-2.3.0 /opt/kafka 20 | 21 | ENV PATH=$PATH:/opt/kafka 22 | 23 | WORKDIR /opt/kafka 24 | -------------------------------------------------------------------------------- /test/v1_0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | LABEL name="kafka" version="1.0.0" java="openjdk-8-jre-headless" linux="debian:stretch" owner="mkocikowski" 4 | #LABEL name="kafka" version="2.3.0" java="openjdk-8-jre-headless" linux="debian:stretch" owner="mkocikowski" 5 | 6 | RUN apt-get update && \ 7 | apt-get install -y apt-transport-https openjdk-8-jre-headless unzip wget && \ 8 | apt-get clean && \ 9 | rm -fr /var/lib/apt/lists/* 10 | 11 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 12 | 13 | RUN mkdir -p /opt 14 | 15 | RUN wget -q -O - https://archive.apache.org/dist/kafka/1.0.0/kafka_2.12-1.0.0.tgz | tar -zxf - -C /opt 16 | #RUN wget -q -O - http://mirrors.advancedhosters.com/apache/kafka/2.3.0/kafka_2.12-2.3.0.tgz | tar -zxf - -C /opt 17 | 18 | RUN mv /opt/kafka_2.12-1.0.0 /opt/kafka 19 | #RUN mv /opt/kafka_2.12-2.3.0 /opt/kafka 20 | 21 | ENV PATH=$PATH:/opt/kafka 22 | 23 | WORKDIR /opt/kafka 24 | -------------------------------------------------------------------------------- /api/ListOffsets/request.go: -------------------------------------------------------------------------------- 1 | package ListOffsets 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | // timestamp is milliseconds since epoch 8 | func NewRequest(topic string, partition int32, timestampMs int64) *api.Request { 9 | p := []RequestPartition{{Partition: partition, Timestamp: timestampMs}} 10 | t := []RequestTopic{{Topic: topic, Partitions: p}} 11 | return &api.Request{ 12 | ApiKey: api.ListOffsets, 13 | ApiVersion: 2, 14 | Body: RequestBody{ 15 | ReplicaId: -1, 16 | IsolationLevel: 0, 17 | Topics: t, 18 | }, 19 | } 20 | } 21 | 22 | type RequestBody struct { 23 | ReplicaId int32 24 | IsolationLevel int8 25 | Topics []RequestTopic 26 | } 27 | 28 | type RequestTopic struct { 29 | Topic string 30 | Partitions []RequestPartition 31 | } 32 | 33 | type RequestPartition struct { 34 | Partition int32 35 | Timestamp int64 36 | } 37 | -------------------------------------------------------------------------------- /api/JoinGroup/request.go.v4: -------------------------------------------------------------------------------- 1 | package JoinGroup 2 | 3 | // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal 4 | 5 | import ( 6 | "github.com/mkocikowski/libkafka/api" 7 | ) 8 | 9 | func NewRequest(group, member, protocol string, protocols []Protocol) *api.Request { 10 | return &api.Request{ 11 | ApiKey: api.JoinGroup, 12 | ApiVersion: 4, 13 | Body: Request{ 14 | GroupId: group, 15 | SessionTimeoutMs: 10000, 16 | RebalanceTimeoutMs: 5000, 17 | MemberId: member, 18 | ProtocolType: protocol, 19 | Protocols: protocols, 20 | }, 21 | } 22 | } 23 | 24 | type Request struct { 25 | GroupId string 26 | SessionTimeoutMs int32 27 | RebalanceTimeoutMs int32 28 | MemberId string 29 | ProtocolType string 30 | Protocols []Protocol 31 | } 32 | 33 | type Protocol struct { 34 | Name string 35 | Metadata []byte 36 | } 37 | -------------------------------------------------------------------------------- /api/JoinGroup/request.go: -------------------------------------------------------------------------------- 1 | package JoinGroup 2 | 3 | // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal 4 | 5 | import ( 6 | "github.com/mkocikowski/libkafka/api" 7 | ) 8 | 9 | func NewRequest(group, member, protocol string, protocols []Protocol) *api.Request { 10 | return &api.Request{ 11 | ApiKey: api.JoinGroup, 12 | ApiVersion: 2, 13 | Body: Request{ 14 | GroupId: group, 15 | SessionTimeoutMs: 10000, // if no heartbeat this long then rebalance 16 | RebalanceTimeoutMs: 5000, // wait this long for members to join 17 | MemberId: member, 18 | ProtocolType: protocol, 19 | Protocols: protocols, 20 | }, 21 | } 22 | } 23 | 24 | type Request struct { 25 | GroupId string 26 | SessionTimeoutMs int32 27 | RebalanceTimeoutMs int32 28 | MemberId string 29 | ProtocolType string 30 | Protocols []Protocol 31 | } 32 | 33 | type Protocol struct { 34 | Name string 35 | Metadata []byte 36 | } 37 | -------------------------------------------------------------------------------- /client/producer/v1_0_test.go: -------------------------------------------------------------------------------- 1 | // +build v1_0 2 | 3 | package producer 4 | 5 | import ( 6 | "fmt" 7 | "math/rand" 8 | "testing" 9 | "time" 10 | 11 | "github.com/mkocikowski/libkafka" 12 | "github.com/mkocikowski/libkafka/client" 13 | ) 14 | 15 | func init() { 16 | rand.Seed(time.Now().UnixNano()) 17 | } 18 | 19 | func TestIntergationPartitionProducer_v1_0(t *testing.T) { 20 | bootstrap := "localhost:9093" 21 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 22 | if _, err := client.CallCreateTopic(bootstrap, topic, 1, 1); err != nil { 23 | t.Fatal(err) 24 | } 25 | p := &PartitionProducer{ 26 | PartitionClient: client.PartitionClient{ 27 | Bootstrap: bootstrap, 28 | Topic: topic, 29 | Partition: 0, 30 | }, 31 | Acks: 1, 32 | TimeoutMs: 1000, 33 | } 34 | resp, err := p.ProduceStrings(time.Now(), "foo", "bar") 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | if code := resp.ErrorCode; code != libkafka.ERR_NONE { 39 | t.Fatal(&libkafka.Error{Code: code}) 40 | } 41 | t.Logf("%+v", resp) 42 | } 43 | -------------------------------------------------------------------------------- /api/FindCoordinator/util_test.go: -------------------------------------------------------------------------------- 1 | package FindCoordinator 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/mkocikowski/libkafka/wire" 10 | ) 11 | 12 | //const key = `AAEAJ2NsaWNraG91c2UtcmVxdWVzdHMtYm90LWRldGVjdGlvbi1wcm9kMgAWcmVxdWVzdHNfYm90X2RldGVjdGlvbgAAAAE=` 13 | 14 | func TestUnitGroup(t *testing.T) { 15 | //const fixture = `AAEAIHdvcmtlcnMtcnVudGltZS1pbnNlcnRlci1zdGFnaW5nAA93b3JrZXJzX3J1bnRpbWUAAABK` 16 | const fixture = `AAEAIHdvcmtlcnMtcnVudGltZS1pbnNlcnRlci1zdGFnaW5nAA93b3JrZXJzX3J1bnRpbWUAAABK` 17 | b, _ := base64.StdEncoding.DecodeString(fixture) 18 | r := &Group{} 19 | wire.Read(bytes.NewReader(b), reflect.ValueOf(r)) 20 | t.Logf("%+v", r) 21 | } 22 | 23 | const val = `AAEAAAATknd9pgAAAAABcCByu6EAAAFwJZkXoQ==` 24 | 25 | func TestUnitVal(t *testing.T) { 26 | //const fixture = `AAEAAAAUSj2EhQAAAAABcD//s0cAAAFwRSYPRw==` 27 | const fixture = `AAEAAAATkWaDEAAAAAABcCBNVWoAAAFwJXOxag==` 28 | b, _ := base64.StdEncoding.DecodeString(fixture) 29 | r := &Val{} 30 | wire.Read(bytes.NewReader(b), reflect.ValueOf(r)) 31 | t.Logf("%+v", r) 32 | } 33 | -------------------------------------------------------------------------------- /api/Fetch/response.go.v11: -------------------------------------------------------------------------------- 1 | package Fetch 2 | 3 | type Response struct { 4 | ThrottleTimeMs int32 5 | ErrorCode int16 6 | SessionId int32 7 | TopicResponses []TopicResponse 8 | } 9 | 10 | func (r *Response) PartitionResponse() *PartitionResponse { 11 | defer func() { recover() }() 12 | return &(r.TopicResponses[0].PartitionResponses[0]) 13 | } 14 | 15 | type TopicResponse struct { 16 | Topic string 17 | PartitionResponses []PartitionResponse 18 | } 19 | 20 | type PartitionResponse struct { 21 | Partition int32 22 | ErrorCode int16 23 | HighWatermark int64 24 | LastStableOffset int64 25 | LogStartOffset int64 26 | AbortedTransactions []AbortedTransaction 27 | PreferredReadReplica int32 28 | // 29 | RecordSet []byte // NULLABLE_BYTES 30 | } 31 | 32 | /* 33 | func (p *PartitionResponse) Err() error { 34 | if p.ErrorCode == libkafka.NONE { 35 | return nil 36 | } 37 | return &libkafka.Error{ 38 | Code: p.ErrorCode, 39 | } 40 | } 41 | */ 42 | 43 | type AbortedTransaction struct { 44 | ProducerId int64 45 | FirstOffset int64 46 | } 47 | -------------------------------------------------------------------------------- /test/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/tmp/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | -------------------------------------------------------------------------------- /test/v1_0/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/tmp/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | -------------------------------------------------------------------------------- /test/v1_0/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | services: 3 | kafka: 4 | image: mkocikowski/kafka:2.3.0 5 | ports: 6 | - 9092:9092 7 | volumes: 8 | - ./server.properties:/opt/kafka/config/server.properties 9 | - ./log4j.properties:/opt/kafka/config/log4j.properties 10 | command: ./bin/kafka-server-start.sh ./config/server.properties 11 | depends_on: 12 | - zookeeper # need un-register the broker before shutting down zk to be able to start back up 13 | kafka_v1_0: 14 | image: mkocikowski/kafka:1.0.0 15 | ports: 16 | - 9093:9093 17 | volumes: 18 | - ./server.properties.v1_0:/opt/kafka/config/server.properties 19 | - ./log4j.properties:/opt/kafka/config/log4j.properties 20 | command: ./bin/kafka-server-start.sh ./config/server.properties 21 | depends_on: 22 | - zookeeper # need un-register the broker before shutting down zk to be able to start back up 23 | zookeeper: 24 | build: . 25 | volumes: 26 | - ./zookeeper.properties:/opt/kafka/config/zookeeper.properties 27 | command: ./bin/zookeeper-server-start.sh ./config/zookeeper.properties 28 | -------------------------------------------------------------------------------- /api/response.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "reflect" 9 | 10 | "github.com/mkocikowski/libkafka/wire" 11 | ) 12 | 13 | func Read(r io.Reader) (*Response, error) { 14 | var size int32 15 | err := binary.Read(r, binary.BigEndian, &size) 16 | if err != nil { 17 | return nil, fmt.Errorf("error reading response size: %v", err) 18 | } 19 | b := make([]byte, int(size)) 20 | _, err = io.ReadFull(r, b) 21 | if err != nil { 22 | return nil, fmt.Errorf("error reading response body: %v", err) 23 | } 24 | //log.Println(size, len(b)) 25 | return &Response{body: b}, nil 26 | } 27 | 28 | type Response struct { 29 | body []byte 30 | } 31 | 32 | func (r *Response) CorrelationId() int32 { 33 | var c int32 34 | err := binary.Read(bytes.NewReader(r.body), binary.BigEndian, &c) 35 | if err != nil { 36 | panic(err) 37 | } 38 | return c 39 | } 40 | 41 | func (r *Response) Unmarshal(v interface{}) error { 42 | // [4:] skips bytes used for correlation id 43 | return wire.Read(bytes.NewReader(r.body[4:]), reflect.ValueOf(v)) 44 | } 45 | 46 | func (r *Response) Bytes() []byte { 47 | // [4:] skips bytes used for correlation id 48 | return r.body[4:] 49 | } 50 | -------------------------------------------------------------------------------- /api/Produce/request.go: -------------------------------------------------------------------------------- 1 | package Produce 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | type Args struct { 8 | ClientId string 9 | Topic string 10 | Partition int32 11 | Acks int16 // 0: no, 1: leader only, -1: all ISRs (as specified by min.insync.replicas) 12 | TimeoutMs int32 13 | } 14 | 15 | func NewRequest(args *Args, recordSet []byte) *api.Request { 16 | d := Data{ 17 | Partition: args.Partition, 18 | RecordSet: recordSet, 19 | } 20 | t := TopicData{ 21 | Topic: args.Topic, 22 | Data: []Data{d}, 23 | } 24 | return &api.Request{ 25 | ApiKey: api.Produce, 26 | ApiVersion: 7, 27 | CorrelationId: 0, 28 | ClientId: args.ClientId, 29 | Body: Request{ 30 | TransactionalId: "", 31 | Acks: args.Acks, 32 | TimeoutMs: args.TimeoutMs, 33 | TopicData: []TopicData{t}, 34 | }, 35 | } 36 | } 37 | 38 | type Request struct { 39 | TransactionalId string // NULLABLE_STRING 40 | Acks int16 // 0: no, 1: leader only, -1: all ISRs (as specified by min.insync.replicas) 41 | TimeoutMs int32 42 | TopicData []TopicData 43 | } 44 | 45 | type TopicData struct { 46 | Topic string 47 | Data []Data 48 | } 49 | 50 | type Data struct { 51 | Partition int32 52 | RecordSet []byte 53 | } 54 | -------------------------------------------------------------------------------- /api/CreateTopics/request.go: -------------------------------------------------------------------------------- 1 | package CreateTopics 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | /* 8 | message.format.version 1.0 9 | compression.type uncompressed 10 | message.timestamp.type CreateTime|LogAppendTime 11 | message.timestamp.difference.max.ms 1000 ignored if LogAppendTime 12 | */ 13 | 14 | func NewRequest(topic string, numPartitions int32, replicationFactor int16, configs []Config) *api.Request { 15 | t := Topic{ 16 | Name: topic, 17 | NumPartitions: numPartitions, 18 | ReplicationFactor: replicationFactor, 19 | Assignments: []Assignment{}, 20 | Configs: configs, 21 | } 22 | return &api.Request{ 23 | ApiKey: api.CreateTopics, 24 | ApiVersion: 2, 25 | Body: Request{ 26 | Topics: []Topic{t}, 27 | TimeoutMs: 1000, 28 | ValidateOnly: false, 29 | }, 30 | } 31 | } 32 | 33 | type Request struct { 34 | Topics []Topic 35 | TimeoutMs int32 36 | ValidateOnly bool 37 | } 38 | 39 | type Topic struct { 40 | Name string 41 | NumPartitions int32 42 | ReplicationFactor int16 43 | Assignments []Assignment 44 | Configs []Config 45 | } 46 | 47 | type Assignment struct { 48 | PartitionIndex int32 49 | BrokerIds []int32 50 | } 51 | 52 | type Config struct { 53 | Name string 54 | Value string 55 | } 56 | -------------------------------------------------------------------------------- /api/Fetch/request.go: -------------------------------------------------------------------------------- 1 | package Fetch 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | type Args struct { 8 | ClientId string 9 | Topic string 10 | Partition int32 11 | Offset int64 12 | MinBytes int32 13 | MaxBytes int32 14 | MaxWaitTimeMs int32 15 | } 16 | 17 | func NewRequest(args *Args) *api.Request { 18 | p := Partition{ 19 | Partition: args.Partition, 20 | FetchOffset: args.Offset, 21 | PartitionMaxBytes: args.MaxBytes, 22 | } 23 | t := Topic{ 24 | Topic: args.Topic, 25 | Partitions: []Partition{p, p}, 26 | } 27 | return &api.Request{ 28 | ApiKey: api.Fetch, 29 | ApiVersion: 6, 30 | CorrelationId: 0, 31 | ClientId: args.ClientId, 32 | Body: Request{ 33 | ReplicaId: -1, 34 | MaxWaitTimeMs: args.MaxWaitTimeMs, 35 | MinBytes: args.MinBytes, 36 | MaxBytes: args.MaxBytes, 37 | Topics: []Topic{t}, 38 | }, 39 | } 40 | } 41 | 42 | type Request struct { 43 | ReplicaId int32 44 | MaxWaitTimeMs int32 45 | MinBytes int32 46 | MaxBytes int32 47 | IsolationLevel int8 // not used 48 | Topics []Topic 49 | } 50 | 51 | type Topic struct { 52 | Topic string 53 | Partitions []Partition 54 | } 55 | 56 | type Partition struct { 57 | Partition int32 58 | FetchOffset int64 59 | LogStartOffset int64 // not used 60 | PartitionMaxBytes int32 61 | } 62 | -------------------------------------------------------------------------------- /varint/varint.go: -------------------------------------------------------------------------------- 1 | // Package varint implements varint and ZigZag encoding and decoding. 2 | package varint 3 | 4 | // PutZigZag64 encodes an int64 into dst using buf as buffer. 5 | func PutZigZag64(dst, buf []byte, x int64) []byte { 6 | // use signed number to get arithmetic right shift. 7 | n := PutVarint(buf, uint64(x<<1^(x>>63))) 8 | return append(dst, buf[:n]...) 9 | } 10 | 11 | // https://github.com/gogo/protobuf/blob/master/proto/decode.go#L242 12 | func DecodeZigZag64(buf []byte) (int64, int) { 13 | x, n := DecodeVarint(buf) 14 | x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) 15 | return int64(x), n 16 | } 17 | 18 | // PutVarint encodes an int64 into buf and returns the number of bytes written. 19 | // If the buffer is too small, PutVarint will panic. 20 | // Based on https://github.com/golang/protobuf/blob/master/proto/encode.go#L72 21 | func PutVarint(buf []byte, x uint64) int { 22 | n := 0 23 | for n = 0; x > 127; n++ { 24 | buf[n] = 0x80 | uint8(x&0x7F) 25 | x >>= 7 26 | } 27 | buf[n] = uint8(x) 28 | return n + 1 29 | } 30 | 31 | // https://github.com/golang/protobuf/blob/master/proto/decode.go#L57 32 | func DecodeVarint(buf []byte) (x uint64, n int) { 33 | for shift := uint(0); shift < 64; shift += 7 { 34 | if n >= len(buf) { 35 | return 0, 0 36 | } 37 | b := uint64(buf[n]) 38 | n++ 39 | x |= (b & 0x7F) << shift 40 | if (b & 0x80) == 0 { 41 | return x, n 42 | } 43 | } 44 | // The number is too large to represent in a 64-bit value. 45 | return 0, 0 46 | } 47 | -------------------------------------------------------------------------------- /api/Fetch/request.go.v11: -------------------------------------------------------------------------------- 1 | package Fetch 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | func NewRequest(topic string, partition int32, offset int64) *api.Request { 8 | p := Partition{ 9 | Partition: partition, 10 | CurrentLeaderEpoch: -1, 11 | FetchOffset: offset, 12 | PartitionMaxBytes: 100 << 20, 13 | } 14 | t := Topic{ 15 | Topic: topic, 16 | Partitions: []Partition{p, p}, 17 | } 18 | return &api.Request{ 19 | ApiKey: api.Fetch, 20 | ApiVersion: 11, 21 | CorrelationId: 0, 22 | ClientId: "", 23 | Body: Request{ 24 | ReplicaId: -1, 25 | MaxWaitTimeMs: 1000, 26 | MinBytes: 1 << 20, 27 | MaxBytes: 100 << 20, 28 | Topics: []Topic{t}, 29 | ForgottenTopics: []ForgottenTopic{}, 30 | }, 31 | } 32 | } 33 | 34 | type Request struct { 35 | ReplicaId int32 36 | MaxWaitTimeMs int32 37 | MinBytes int32 38 | MaxBytes int32 39 | IsolationLevel int8 // not used 40 | SessionId int32 41 | SessionEpoch int32 42 | Topics []Topic 43 | ForgottenTopics []ForgottenTopic 44 | RackId string 45 | } 46 | 47 | type Topic struct { 48 | Topic string 49 | Partitions []Partition 50 | } 51 | 52 | type Partition struct { 53 | Partition int32 54 | CurrentLeaderEpoch int32 55 | FetchOffset int64 56 | LogStartOffset int64 // not used 57 | PartitionMaxBytes int32 58 | } 59 | 60 | type ForgottenTopic struct { 61 | Topic string 62 | Partitions []int32 63 | } 64 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Mik Kocikowski 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /test/mtls/broker-csr.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEVjCCAj4CAQAwETEPMA0GA1UEAwwGYnJva2VyMIICIjANBgkqhkiG9w0BAQEF 3 | AAOCAg8AMIICCgKCAgEAvdaK3zljhDF5JphZsAM0fE8AqGOm9hOGAH3WrNnYHtaN 4 | BWV7l7jEAAHfTo24MNj/xuGG+NSYn+QZne6SlA9xD9TaoKbF5fvQK5yQkeqLdUOV 5 | vrbnQZkmO8lXPTQQx58a/w1oIj3EPTgiwSiFDXnXFPfDVIFlwselHjOsq6cs5b+w 6 | 7ty6LjE5mQeCBOzE6cEs1nw+rR7OCZxcuVIwVU3U2BrhbJn07lPd9aKrq9ghde6I 7 | klCN5z3F9+5NeL1aD38KwEjAX0lY9lZxCqQf+zI1p7O5HYGwxKPnQpEgnlHUEzwW 8 | fK/Tn48M4FBZHElLnd+X2VfAJS8DnYFZp+f+1GkKOfksD7AkhYC+FVreyoNOVeDY 9 | pyXFo+eQkvXtwyPfiDJOl7g4muVrNAWt/GbLI945KmJ1NtE3Syi6afba+CDZRQ1a 10 | T9aa21S96mSTMkLZ+WGqjtUIMjQU54mHtn5kkqjrfSz0BduAdjZVuV5VfyymLoVT 11 | 9DKNJweTDqdvjoHy1eqTBoeuF4B81z8v/ZVfFI4BYmfa9f5kgah9qYuAEoTciLFP 12 | VeqYo+atnTfzULiBb24vSrhrk6pqEI6BKcIkhAHVtAvZSoJQZmy5KgxkqNqJfw/7 13 | vzWGZwJlUV9shwXDpggeGMAHNuFW6lJftXc6xeCZQ+JLYYeUrKHD6a3w/KQUgAUC 14 | AwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQCjXT7HlRwsNru8/S4JNrQTvM+q7M2B 15 | vSWIz2/usvPnOQtLs6Pyf/KpqxZPMtIPYgrxxVu0dZH2P2TXXCintTrWUOXqFSM0 16 | 8G9Xm82zrOFZZjtOhjcp/bTjpJijU2QJbB7MJoz8VkjgEsCF7RyHnPpLG+QDGWxc 17 | PW+sf+2V1UV9wpXv8wxDYVtlg56IlVhgD2R9RfztuJDswempOqDmCbHLXYFum35x 18 | e4rRmlqOVzO3G8QPvWLhpYEPGtqCEe/xao+8NtmQX55avkoAJwqro1GwROuejX7y 19 | seWkLVBJb4/EMo7dk3ngPdGBJLIoMJRy1yCmWe9jcRUcqhcWLYxzxScqFxr3naIu 20 | za+s5wX2NiXvlQi3eCDVOCzUPR/rg8QuatdSPJc2egj/paHGJp2Podd20YqDXOSW 21 | ExGMDfk/KQR3nvFxnGKPDPyBwPAMqb5RSurpY5yaJM6RB8rTWOq5VssKxKHabzRH 22 | qOYIRmcWZOOohves61RC5tIjGDTta4ZcWy+J9LyzGrvhUzOHPVY6jUiaIGtbpxKC 23 | SrFtfx2VmUsZnuddcU9ZTBNKOXDNvkcwPxirwPQr8NUUXLRK/rHFotTNeu0RBVYj 24 | fESOunn5LLV8vQWVC0luw+9wETyd03pdCyM/5Kqxpge6QpqNeyLxl9O3LzZqr3im 25 | XuUOmpmdTKFB/w== 26 | -----END CERTIFICATE REQUEST----- 27 | -------------------------------------------------------------------------------- /test/mtls/client-csr.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEVjCCAj4CAQAwETEPMA0GA1UEAwwGY2xpZW50MIICIjANBgkqhkiG9w0BAQEF 3 | AAOCAg8AMIICCgKCAgEAzbME7lCdG0r4F8QB3SFT1XNvsQmkcDDRi9+okp5HxKS2 4 | m9U4C3IqTMk2fEgDfOKJ1AEfSiItMgiOLsq9LBl1ckxO8vTwpo09BZlwtn5Itg99 5 | I6Ppj1Y4+NaJpgXvKlkDtpS5LfJQLhHyC1xz7tKVi2HHvOFICMIwV0+xOCZzc1ly 6 | MlUvznJmMOmrbuspIUijcQ4DJdoid863a/gK5XLOaQw35U6N64ye06MJDCjrewWt 7 | WQ8V9gnwSq1/Wl8aLF1Sd5KN/HkGhvJW37DC3hGuQsjQTMBKGtezRz1cdkBMOrlW 8 | qJTweMTvacUweYG5+9JJlOf9NxcCMGHwjfWA1z6Eq0gH91rh72V91zzFESLj3RwO 9 | KeW49q9tMAWCoFYzQJTofV7EkIcqiu2LqRYsWLYSK/WuVXW3fb7lWF8twExIllJl 10 | wfQm6zOMW8jx1ERJ/EXAtd7fjWZI/CsKM48MU4a93db99Hb+IwQ23IlP8YJOeK4i 11 | fnH7RLeAWoBPQWjFDXgmvShclIYAY4xpg6JBjY1pgpDGlZH27B9ofF2xmGCONLYQ 12 | OEDFrlMno6TFiybt2yFTCDyAXVF7vL/ScDjogTMg58iPLoGwxeByySMkMvHyg2iV 13 | IEuK54hJRRfLVCCBCkChxouTSswYP1aEd/oL8BJTtMv0iLwk9CehsXPEPeS6qNEC 14 | AwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQAneS82+ctN3MZYKU4kW7TF0/UHoQQq 15 | gi+Z/fWTDG1gAb6TfZLoHOwzqhbes53YB0aKTHadkk7yCDRT/m70VnT6/Z8hLnLV 16 | 2i9bAYuAGlDjps5lu6bh+phLvaQx8eJFw8NDr9X8sPxox9WZaNkvSs/0jjjXD4DX 17 | EKloGJ4hahEpKoPbs0tt/wYhmSSyHmiC8aQ8p+JKiWTxeAHGmHEZmOXXnJDUS5PF 18 | ZEMOMwx++T/h0ghogJw6VNkMnLtj52OJ1kUhTffVmdOWxw/vjnkGcgjhNy5NK1dv 19 | AqgDQGT6LXKZnrcVdHK4+FHpiOv10SxzUszWKFzUXuEuURifNnHJvepQDrST++NP 20 | KUB9cceBr1C+XFm7nVk0uN50Z44pheQJBfzspuK+1qFLi+hiDaZRgOtpGb4Tl2I6 21 | N4yC8hJxKs0FmzOHJMpiUhS1afZsKXzFAs7alxQVgbZDUcy/NUileRUUsL32dCNH 22 | aByJZUVwS0rnkj9NBODcFY9/IqXvylR5RhHHJywAM+eauHlcAt83JEBDdxBcQlfQ 23 | LM+A4exkQzgwJjGNQ/9UOnF35uVfakPvGEejZqVNPvX+b35OR7oZNC2EMomzYTL5 24 | w9NUtLOCZSWe/RtGNF9pW3R0F1yQfsWu0mcXLCo/dsU63frlu7GtG5sjdgBNTakA 25 | QNHWiFvz5ZNugA== 26 | -----END CERTIFICATE REQUEST----- 27 | -------------------------------------------------------------------------------- /test/mtls/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: default 2 | default: client-cert.pem client-key.pem kafka.server.keystore.jks kafka.server.truststore.jks 3 | 4 | .PHONY: clean 5 | clean: 6 | rm *.pem *.srl *.p12 *.jks 7 | 8 | ca-key.pem ca-cert.pem: 9 | openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes -keyout ca-key.pem -out ca-cert.pem -subj "/CN=ca" 10 | 11 | client-key.pem client-csr.pem: 12 | openssl req -new -newkey rsa:4096 -sha256 -days 3650 -nodes -keyout client-key.pem -out client-csr.pem -subj "/CN=client" 13 | 14 | client-cert.pem: client-csr.pem ca-cert.pem ca-key.pem extensions.cnf 15 | openssl x509 -req -in client-csr.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -days 3650 -sha256 -extensions v3_ca -extfile extensions.cnf 16 | 17 | broker-key.pem broker-csr.pem: 18 | openssl req -new -newkey rsa:4096 -sha256 -days 3650 -nodes -keyout broker-key.pem -out broker-csr.pem -subj "/CN=broker" 19 | 20 | broker-cert.pem: broker-csr.pem ca-cert.pem ca-key.pem extensions.cnf 21 | openssl x509 -req -in broker-csr.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -out broker-cert.pem -days 3650 -sha256 -extensions v3_ca -extfile extensions.cnf 22 | 23 | # https://smallstep.com/hello-mtls/doc/server/kafka 24 | 25 | broker.p12: broker-cert.pem broker-key.pem 26 | openssl pkcs12 -export -in broker-cert.pem -inkey broker-key.pem -name localhost -passout pass:123456 >broker.p12 27 | 28 | kafka.server.keystore.jks: broker.p12 29 | keytool -importkeystore -srckeystore broker.p12 -destkeystore kafka.server.keystore.jks -srcstoretype pkcs12 -alias localhost -srcstorepass 123456 -deststorepass 123456 -noprompt 30 | 31 | kafka.server.truststore.jks: ca-cert.pem 32 | keytool -keystore kafka.server.truststore.jks -alias CARoot -import -file ca-cert.pem -storepass 123456 -noprompt 33 | -------------------------------------------------------------------------------- /test/mtls/broker-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE3DCCAsSgAwIBAgIUYiJMrEOuMdT5gc9BDjlJyn1x3pAwDQYJKoZIhvcNAQEL 3 | BQAwDTELMAkGA1UEAwwCY2EwHhcNMjEwODI0MTUzMTE4WhcNMzEwODIyMTUzMTE4 4 | WjARMQ8wDQYDVQQDDAZicm9rZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK 5 | AoICAQC91orfOWOEMXkmmFmwAzR8TwCoY6b2E4YAfdas2dge1o0FZXuXuMQAAd9O 6 | jbgw2P/G4Yb41Jif5Bmd7pKUD3EP1NqgpsXl+9ArnJCR6ot1Q5W+tudBmSY7yVc9 7 | NBDHnxr/DWgiPcQ9OCLBKIUNedcU98NUgWXCx6UeM6yrpyzlv7Du3LouMTmZB4IE 8 | 7MTpwSzWfD6tHs4JnFy5UjBVTdTYGuFsmfTuU931oqur2CF17oiSUI3nPcX37k14 9 | vVoPfwrASMBfSVj2VnEKpB/7MjWns7kdgbDEo+dCkSCeUdQTPBZ8r9OfjwzgUFkc 10 | SUud35fZV8AlLwOdgVmn5/7UaQo5+SwPsCSFgL4VWt7Kg05V4NinJcWj55CS9e3D 11 | I9+IMk6XuDia5Ws0Ba38Zssj3jkqYnU20TdLKLpp9tr4INlFDVpP1prbVL3qZJMy 12 | Qtn5YaqO1QgyNBTniYe2fmSSqOt9LPQF24B2NlW5XlV/LKYuhVP0Mo0nB5MOp2+O 13 | gfLV6pMGh64XgHzXPy/9lV8UjgFiZ9r1/mSBqH2pi4AShNyIsU9V6pij5q2dN/NQ 14 | uIFvbi9KuGuTqmoQjoEpwiSEAdW0C9lKglBmbLkqDGSo2ol/D/u/NYZnAmVRX2yH 15 | BcOmCB4YwAc24VbqUl+1dzrF4JlD4kthh5SsocPprfD8pBSABQIDAQABozAwLjAJ 16 | BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ 17 | KoZIhvcNAQELBQADggIBAEv/n11HpgjtF7sW+47+do+TUIYLv4C9Kmr/rDCY5YGZ 18 | NLqpffIWq32Qx2ttEXrelK1w4oFm7Lls7EF6q3Y8YRtN6CeGgqlYCinEsN0r9K6U 19 | lrkaldETs5pJUKGTACnm6FU9rPB9L6U05S/kX4+5Davu2Bxq4I4qc/2mM4xhMD5p 20 | lWcxzVytDhcLL2ZtitMZwI+mFKTZOCxyBYUl5QeLHSONnNuZRxA1Rr0YVwv2djzX 21 | Zmqi2DWLtS8GeRtQ8/ZcEyEe0N8afICEcirG7ZCbnrflmvnZiQuTIFsaQNMzziJt 22 | FrQugfPruwYDQiKlP7uboSVLc0D8unbvTrBClT9Ewhn/HkGbXtIN5JV4JbyoNbdZ 23 | UWdBsGGa6Yb1ynnQqHFmJ/clo/3h+sJrzV0e9paXoYIUt8LxcPEV2In7jM5DdA3Q 24 | Mdu7o37J71b+0p0oSXjhJur81r1cvg9B9QOzZ0S6uM2KVxvxFdPSsx5v1lAv2t0u 25 | zFGvkukvru+bw/zoMvjUEwnrUYO0ewcX0eRJ3+u4s+TbzhQRSpb2ueSgy4Kj4Hsw 26 | pMLBqYbo6enfXet9F+nL39+0SY89h13OzurOE2ixAFK/IG9EX/WLyz6Meg4V0anl 27 | 77eWtTD4dQUa7ofZdv5RECiOa6XCtY3yNuzq+kHmBt7kyWqkdsuoDhCdO7jM5nBA 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /test/mtls/client-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE3DCCAsSgAwIBAgIUYiJMrEOuMdT5gc9BDjlJyn1x3o8wDQYJKoZIhvcNAQEL 3 | BQAwDTELMAkGA1UEAwwCY2EwHhcNMjEwODI0MTUzMTE4WhcNMzEwODIyMTUzMTE4 4 | WjARMQ8wDQYDVQQDDAZjbGllbnQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK 5 | AoICAQDNswTuUJ0bSvgXxAHdIVPVc2+xCaRwMNGL36iSnkfEpLab1TgLcipMyTZ8 6 | SAN84onUAR9KIi0yCI4uyr0sGXVyTE7y9PCmjT0FmXC2fki2D30jo+mPVjj41omm 7 | Be8qWQO2lLkt8lAuEfILXHPu0pWLYce84UgIwjBXT7E4JnNzWXIyVS/OcmYw6atu 8 | 6ykhSKNxDgMl2iJ3zrdr+Arlcs5pDDflTo3rjJ7TowkMKOt7Ba1ZDxX2CfBKrX9a 9 | XxosXVJ3ko38eQaG8lbfsMLeEa5CyNBMwEoa17NHPVx2QEw6uVaolPB4xO9pxTB5 10 | gbn70kmU5/03FwIwYfCN9YDXPoSrSAf3WuHvZX3XPMURIuPdHA4p5bj2r20wBYKg 11 | VjNAlOh9XsSQhyqK7YupFixYthIr9a5Vdbd9vuVYXy3ATEiWUmXB9CbrM4xbyPHU 12 | REn8RcC13t+NZkj8KwozjwxThr3d1v30dv4jBDbciU/xgk54riJ+cftEt4BagE9B 13 | aMUNeCa9KFyUhgBjjGmDokGNjWmCkMaVkfbsH2h8XbGYYI40thA4QMWuUyejpMWL 14 | Ju3bIVMIPIBdUXu8v9JwOOiBMyDnyI8ugbDF4HLJIyQy8fKDaJUgS4rniElFF8tU 15 | IIEKQKHGi5NKzBg/VoR3+gvwElO0y/SIvCT0J6Gxc8Q95Lqo0QIDAQABozAwLjAJ 16 | BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ 17 | KoZIhvcNAQELBQADggIBAJA1Q2keEQPtp25TAK7IlSzmjlLBDliU1ZvvxtxouMVG 18 | EVrmD0EvkQcVUj/oGWkRvp4z7u8wSq8/5UdFRh3kDeyqZOMETV5vP+FV66Iw1B+c 19 | AY6yWYSxELBNo8TANlqVIZRTiT7uUGJ6tbJ44t/4U5ncAYHE688yugFHFuTJUt3r 20 | lBsO1+vwShPi2wpho1fI007HorjclRbEx3PnflXlVKJ5gZbwR2eo+XRkILgbXnw6 21 | X6TMUTRijQzjvwXj/PH5iZpXtFQubI8Hz9Cq5e/buPV3iOsHcGwDqsFaPCrWgeFj 22 | uwFxwC2lR2PMo0Md2phFwu1S6OmPDGd2w8kKjkTxb0W0QIlSJsTbfxbucR4xqciu 23 | rOsT6s80BuYNjZF4ok6qmUm0ov1BOT0PCpv+AdYqgu/SadxFpxT944kMFO3442jk 24 | 3gJxySDKlfGWbkq5Aoceg17nak6hu2zq5f9ZZYjemCGEGu85MAF427avgj9gBjhm 25 | 3dAnmymblYhFup6mqFXSudbFHAz42cSHMZJpiSD80hTKjg1WD7GTQj4ieFP8NVjV 26 | +roWyDNtV7Q1aQPcZE3refUjR8pIZJHDSb/lBE50VUKPw4tOzI18Ux3awhOYmmMl 27 | 2K4fpfqnWQUEv7LoSleaq57tW7Zzegw9CHhOUUUAhYr+tHT0YueJTEI3MhRUqi9x 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /test/mtls/ca-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE+zCCAuOgAwIBAgIUJaP1HoTAKraFQIkBLhshN5vYMmkwDQYJKoZIhvcNAQEL 3 | BQAwDTELMAkGA1UEAwwCY2EwHhcNMjEwODI0MTUzMTE4WhcNMzEwODIyMTUzMTE4 4 | WjANMQswCQYDVQQDDAJjYTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB 5 | AKbVIfzH+MUnZwE7/QgprKO/+y0Hskwp+s/wbzS64a+Tr6QJSXXkHPvLb7BrPBJO 6 | mEMBl8LbEdY2cx10BQBNURkQVIDZGQeFiHH+fdLhMGk5gOFsUmapZlgpLF1/L6n9 7 | wx27u6EKok4foHwDGx+l6KbQd1LlrydYxRJyD7C5JprA01lP7oalrgVha8jkujg4 8 | k1yfQNusSSQ7+lrwn8eDk4pKYEAQrMJYs1wvmjbzJGUN6tTdQW3lb2DG4KdcLxtO 9 | irifsqtNkEwMnO33JnTkUfF2gSsJ4108BWRTxCcBui0oCiwFvGfGAMMU+ZmrlB9D 10 | p9p2u3rfzhfq7+uT9xA5LutOghR1MORWqpynyjjxPi3wi8WdeZdaQa1/8vnvBos5 11 | 64bZ0qVQxu+aG1QAlZmj2pmAiV6KBGXBRv/JoBUPqVIVUdXqTDiPO+lpk5httQJo 12 | Ck0tPwnNqNlpz4U5YNxz60I0I4Gnb4+Ll5P93gPwNVhY0QfZsatIyP2Aikl0uTSB 13 | J9P7h9XlA7/A8wSBnE1Ab6VZiXAVbTjHKSynqDwUQL91QEXXmYKMzWiJ93yhqhqR 14 | uIQ60iD0V/4hMyZ4ZrYGwND8u436vsllf170lojh8Wslig+qy9vra+r7r0AAoa01 15 | HHZcSY4v3uvUdXttb4rw+GOn+zZfEibRE36B8adsdTkBAgMBAAGjUzBRMB0GA1Ud 16 | DgQWBBQagU7UJPX51iQCD/j/QYLvfhSRQjAfBgNVHSMEGDAWgBQagU7UJPX51iQC 17 | D/j/QYLvfhSRQjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBQ 18 | oAjKa9VXVn2VqT91sbOrUSjrzEB3EOxePZ+IMudhMGI55+SIb7Mfb8xY60B+01bd 19 | U7jNfOylmUkjzq9rBEV1wTY1G7wm8OrIwvrOwSRa35JTRQQyTnv0XJDp/8LIUrjv 20 | +LaQO9/NldhEv3QalUtYlaGuoFoT7uF9b3huO921w5xfWpQ5kwqZcSqfnD3Gk3LY 21 | dE9Kd9ASISgv7+e+2Rssf7UC4tN1Yyap1ypup+CZTwWDo90f1FKWcGmw4NqeZmJd 22 | E13kzv3ZjTRDYnvvqZbo7NNsz4+q7fTX/a+Cs1RhEE1GoI5TjuuzNWMN4DeiYKhG 23 | DcY2Wj5O+wv/deCX1x2cdH4oZcyP5lTbLHHankwRTkX/e3aGZ38baIQlYaSUTgPv 24 | tjUIg185LGjXHDpNPxiIfE7osQ8BRWWtWQdXg8hxqRcVAr6pinTZvls6CcmAzOF4 25 | VHoCHxdheHD7sq+Ah00TBjBMj9lQ6yuvUlze1a1koQKBqUcHK6rEO3DEwX0yyBcJ 26 | Poh/XGHBx2i0s//b21NkmGZhh7haubjw7IfKpfIHh2avkZvET3hN5LqBQzJ3SrJx 27 | 2k5UwjpOWZR+4e0Ccc3I0y+cotz5fDVonRI7Sns0wm6vvHOGrkk7PYQ0mVCPfl8T 28 | F68zDvzJO3e6shCo4CGwMWiLhUgvbi/srLjrxCM8OA== 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /api/Metadata/response.go: -------------------------------------------------------------------------------- 1 | package Metadata 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strconv" 7 | ) 8 | 9 | type Response struct { 10 | ThrottleTimeMs int32 11 | Brokers []Broker 12 | ClusterId string 13 | ControllerId int32 14 | TopicMetadata []TopicMetadata 15 | } 16 | 17 | type Broker struct { 18 | NodeId int32 19 | Host string 20 | Port int32 21 | Rack string 22 | } 23 | 24 | func (b *Broker) Addr() string { 25 | return net.JoinHostPort(b.Host, strconv.Itoa(int(b.Port))) 26 | } 27 | 28 | func (b *Broker) String() string { 29 | return fmt.Sprintf("%s:%d:%s:%d", b.Rack, b.NodeId, b.Host, b.Port) 30 | } 31 | 32 | type TopicMetadata struct { 33 | ErrorCode int16 34 | Topic string 35 | IsInternal bool 36 | PartitionMetadata []PartitionMetadata 37 | } 38 | 39 | type PartitionMetadata struct { 40 | ErrorCode int16 41 | Partition int32 42 | Leader int32 43 | Replicas []int32 44 | Isr []int32 45 | OfflineReplicas []int32 46 | } 47 | 48 | func (r *Response) Broker(id int32) *Broker { 49 | for _, b := range r.Brokers { 50 | if b.NodeId == id { 51 | return &b 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | func (r *Response) Leaders(topic string) map[int32]*Broker { 58 | leaders := make(map[int32]*Broker) 59 | for _, t := range r.TopicMetadata { 60 | if t.Topic != topic { 61 | continue 62 | } 63 | for _, p := range t.PartitionMetadata { 64 | broker := r.Broker(p.Leader) 65 | if broker != nil { 66 | leaders[p.Partition] = broker 67 | } 68 | } 69 | } 70 | return leaders 71 | } 72 | 73 | func (r *Response) Partitions(topic string) map[int32]*PartitionMetadata { 74 | partitions := make(map[int32]*PartitionMetadata) 75 | for _, t := range r.TopicMetadata { 76 | if t.Topic != topic { 77 | continue 78 | } 79 | for _, p := range t.PartitionMetadata { 80 | partitions[p.Partition] = &p 81 | } 82 | } 83 | return partitions 84 | } 85 | -------------------------------------------------------------------------------- /api/OffsetCommit/request.go: -------------------------------------------------------------------------------- 1 | package OffsetCommit 2 | 3 | import ( 4 | "github.com/mkocikowski/libkafka/api" 5 | ) 6 | 7 | // NewMultiplePartitionsRequest constructs api.Request with ApiKey 8 | // "OffsetCommit" which is designed to flush offsets for multiple partitions at 9 | // once. Variable retentionMs sets the time period in ms to retain the offsets 10 | // (-1 means no limit) 11 | func NewMultiplePartitionsRequest(group, topic string, offsets map[int32]int64, retentionMs int64) *api.Request { 12 | partitionOffsets := make([]Partition, len(offsets)) 13 | i := 0 14 | for p, o := range offsets { 15 | partitionOffsets[i] = Partition{ 16 | PartitionIndex: p, 17 | CommitedOffset: o, 18 | } 19 | i++ 20 | } 21 | t := Topic{ 22 | Name: topic, 23 | Partitions: partitionOffsets, 24 | } 25 | return &api.Request{ 26 | ApiKey: api.OffsetCommit, 27 | ApiVersion: 2, 28 | Body: Request{ 29 | GroupId: group, 30 | GenerationId: -1, 31 | MemberId: "", 32 | RetentionTimeMs: retentionMs, 33 | Topics: []Topic{t}, 34 | }, 35 | } 36 | } 37 | 38 | func NewRequest(group, topic string, partition int32, offset, retentionMs int64) *api.Request { 39 | p := Partition{ 40 | PartitionIndex: partition, 41 | CommitedOffset: offset, 42 | CommitedMetadata: "", 43 | } 44 | t := Topic{ 45 | Name: topic, 46 | Partitions: []Partition{p}, 47 | } 48 | return &api.Request{ 49 | ApiKey: api.OffsetCommit, 50 | ApiVersion: 2, 51 | Body: Request{ 52 | GroupId: group, 53 | GenerationId: -1, 54 | MemberId: "", 55 | RetentionTimeMs: retentionMs, 56 | Topics: []Topic{t}, 57 | }, 58 | } 59 | } 60 | 61 | type Request struct { 62 | GroupId string 63 | GenerationId int32 64 | MemberId string 65 | RetentionTimeMs int64 66 | Topics []Topic 67 | } 68 | 69 | type Topic struct { 70 | Name string 71 | Partitions []Partition 72 | } 73 | 74 | type Partition struct { 75 | PartitionIndex int32 76 | CommitedOffset int64 77 | CommitedMetadata string 78 | } 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Package libkafka is a low level golang library for producing to and consuming 2 | from Kafka 1.0+. It has no external dependencies. It is not modeled on the Java 3 | client. All API calls are synchronous and all code executes in the calling 4 | goroutine. 5 | 6 | 7 | Project Scope 8 | --- 9 | The library focuses on non transactional production and consumption. It 10 | implements single partition Producer and Consumer. Multi partition producers 11 | and consumers are built on top of this library (example: https://github.com/mkocikowski/kafkaclient). 12 | 13 | 14 | Development status / "roadmap" 15 | --- 16 | As of 2020-04-30 focus in on producer code. Consumer code has been validated (I 17 | have a working multi-partition consumer with sticky consumption coordinated 18 | over group membership protocol built on top of kafkaclient library) but that 19 | was just to make sure there were no design blind alleys. Next steps will be 20 | partitioned production and mtls. Consumer work will come after that. 21 | 22 | Get Started 23 | --- 24 | Read the documentation for the "batch" and "client" packages. Everything is in 25 | [godoc](https://pkg.go.dev/github.com/mkocikowski/libkafka). 26 | 27 | 28 | Design Decisions 29 | --- 30 | 1. Focus on record batches. Kafka protocol Produce and Fetch API calls operate 31 | on sets of record batches. Record batch is the unit at which messages are 32 | produced and fetched. It also is the unit at which data is partitioned and 33 | compressed. In libkafka producers and consumers operate on batches of records. 34 | Building and parsing of record batches is separate from Producing and Fetching. 35 | Record batch compression and decompression implementations are provided by the 36 | library user. 37 | 2. Synchronous single-partition calls. Kafka wire protocol is asynchronous: on 38 | a single connection there can be multiple requests awaiting response from the 39 | Kafka broker. In addition, many API calls (such as Produce and Fetch) can 40 | combine data for multiple topics and partitions in a single call. Libkafka 41 | maintains a separate connection for every topic-partition and calls on that 42 | connection are synchronous, and each call is for only one topic-partition. That 43 | makes call handling (and failure) logic simpler. 44 | 3. Wide use of reflection. All API calls (requests and responses) are defined 45 | as structs and marshaled using reflection. This is not a performance problem, 46 | because API calls are not frequent. Marshaling and unmarshaling of individual 47 | records within record batches (which has big performance impact) is done 48 | without using reflection. 49 | 4. Limited use of data hiding. The library is not intended to be child proof. 50 | Most internal structures are exposed to make debugging and metrics collection 51 | easier. 52 | -------------------------------------------------------------------------------- /client/producer/producer.go: -------------------------------------------------------------------------------- 1 | // Package producer implements a single partition Kafka producer. 2 | package producer 3 | 4 | import ( 5 | "fmt" 6 | "time" 7 | 8 | "github.com/mkocikowski/libkafka/api/Metadata" 9 | "github.com/mkocikowski/libkafka/api/Produce" 10 | "github.com/mkocikowski/libkafka/batch" 11 | "github.com/mkocikowski/libkafka/client" 12 | ) 13 | 14 | func parseResponse(r *Produce.Response) (*Response, error) { 15 | if n := len(r.TopicResponses); n != 1 { 16 | return nil, fmt.Errorf("unexpected number of topic responses: %d", n) 17 | } 18 | tr := &(r.TopicResponses[0]) 19 | if n := len(tr.PartitionResponses); n != 1 { 20 | return nil, fmt.Errorf("unexpected number of partition responses: %d", n) 21 | } 22 | pr := &(tr.PartitionResponses[0]) 23 | return &Response{ 24 | ThrottleTimeMs: r.ThrottleTimeMs, 25 | Topic: tr.Topic, 26 | Partition: pr.Partition, 27 | ErrorCode: pr.ErrorCode, 28 | BaseOffset: pr.BaseOffset, 29 | LogAppendTime: pr.LogAppendTime, 30 | LogStartOffset: pr.LogStartOffset, 31 | }, nil 32 | } 33 | 34 | type Response struct { 35 | Broker *Metadata.Broker 36 | Topic string 37 | Partition int32 38 | ThrottleTimeMs int32 39 | ErrorCode int16 40 | BaseOffset int64 41 | LogAppendTime int64 42 | LogStartOffset int64 43 | } 44 | 45 | type PartitionProducer struct { 46 | client.PartitionClient 47 | Acks int16 // 0: no, 1: leader only, -1: all ISRs (as specified by min.insync.replicas) 48 | TimeoutMs int32 49 | } 50 | 51 | // ProduceStrings with Nop compression. 52 | func (p *PartitionProducer) ProduceStrings(now time.Time, values ...string) (*Response, error) { 53 | b, err := batch.NewBuilder(now).AddStrings(values...).Build(now) 54 | if err != nil { 55 | return nil, err 56 | } 57 | return p.Produce(b) 58 | } 59 | 60 | func produce(c *client.PartitionClient, args *Produce.Args, rs batch.RecordSet) (*Response, error) { 61 | resp, err := c.Produce(args, rs) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return parseResponse(resp) 66 | } 67 | 68 | // Produce (send) batch to Kafka. Single request is made (no retries). The call 69 | // is blocking. See documentation for client.PartitionClient for general 70 | // description on how request errors are handled. Specific to Produce requests: 71 | // it is possible that the batch was successfuly produced even when the call 72 | // returns an error. This can happen when the connection is interrupted while 73 | // the client is reading the response. This is an edge case but possible. 74 | func (p *PartitionProducer) Produce(b *batch.Batch) (*Response, error) { 75 | args := &Produce.Args{ 76 | ClientId: p.ClientId, 77 | Topic: p.Topic, 78 | Partition: p.Partition, 79 | Acks: p.Acks, 80 | TimeoutMs: p.TimeoutMs, 81 | } 82 | recordSet := b.Marshal() 83 | resp, err := produce(&(p.PartitionClient), args, recordSet) 84 | if err != nil { 85 | if leader := p.Leader(); leader != nil { 86 | err = fmt.Errorf("error calling %+v: %w", leader, err) 87 | } 88 | return nil, err 89 | } 90 | resp.Broker = p.Leader() 91 | return resp, nil 92 | } 93 | -------------------------------------------------------------------------------- /api/api.go: -------------------------------------------------------------------------------- 1 | // Package api defines Kafka protocol requests and responses. 2 | package api 3 | 4 | const ( 5 | Produce int16 = 0 // 1_0:5 2_3:7 6 | Fetch = 1 // 1_0:6 7 | ListOffsets = 2 // 1_0:2 8 | Metadata = 3 // 1_0:5 9 | LeaderAndIsr = 4 10 | StopReplica = 5 11 | UpdateMetadata = 6 12 | ControlledShutdown = 7 13 | OffsetCommit = 8 // 1_0:3 14 | OffsetFetch = 9 // 1_0:3 15 | FindCoordinator = 10 // 1_0:1 16 | JoinGroup = 11 // 1_0:2 17 | Heartbeat = 12 // 1_0:1 18 | LeaveGroup = 13 19 | SyncGroup = 14 // 1_0:1 20 | DescribeGroups = 15 21 | ListGroups = 16 22 | SaslHandshake = 17 23 | ApiVersions = 18 // 1_0:1 24 | CreateTopics = 19 // 1_0:2 25 | DeleteTopics = 20 26 | DeleteRecords = 21 27 | InitProducerId = 22 28 | OffsetForLeaderEpoch = 23 29 | AddPartitionsToTxn = 24 30 | AddOffsetsToTxn = 25 31 | EndTxn = 26 32 | WriteTxnMarkers = 27 33 | TxnOffsetCommit = 28 34 | DescribeAcls = 29 35 | CreateAcls = 30 36 | DeleteAcls = 31 37 | DescribeConfigs = 32 38 | AlterConfigs = 33 39 | AlterReplicaLogDirs = 34 40 | DescribeLogDirs = 35 41 | SaslAuthenticate = 36 42 | CreatePartitions = 37 43 | CreateDelegationToken = 38 44 | RenewDelegationToken = 39 45 | ExpireDelegationToken = 40 46 | DescribeDelegationToken = 41 47 | DeleteGroups = 42 48 | ElectPreferredLeaders = 43 49 | ) 50 | 51 | var Keys = map[int]string{ 52 | 0: "Produce", 53 | 1: "Fetch", 54 | 2: "ListOffsets", 55 | 3: "Metadata", 56 | 4: "LeaderAndIsr", 57 | 5: "StopReplica", 58 | 6: "UpdateMetadata", 59 | 7: "ControlledShutdown", 60 | 8: "OffsetCommit", 61 | 9: "OffsetFetch", 62 | 10: "FindCoordinator", 63 | 11: "JoinGroup", 64 | 12: "Heartbeat", 65 | 13: "LeaveGroup", 66 | 14: "SyncGroup", 67 | 15: "DescribeGroups", 68 | 16: "ListGroups", 69 | 17: "SaslHandshake", 70 | 18: "ApiVersions", 71 | 19: "CreateTopics", 72 | 20: "DeleteTopics", 73 | 21: "DeleteRecords", 74 | 22: "InitProducerId", 75 | 23: "OffsetForLeaderEpoch", 76 | 24: "AddPartitionsToTxn", 77 | 25: "AddOffsetsToTxn", 78 | 26: "EndTxn", 79 | 27: "WriteTxnMarkers", 80 | 28: "TxnOffsetCommit", 81 | 29: "DescribeAcls", 82 | 30: "CreateAcls", 83 | 31: "DeleteAcls", 84 | 32: "DescribeConfigs", 85 | 33: "AlterConfigs", 86 | 34: "AlterReplicaLogDirs", 87 | 35: "DescribeLogDirs", 88 | 36: "SaslAuthenticate", 89 | 37: "CreatePartitions", 90 | 38: "CreateDelegationToken", 91 | 39: "RenewDelegationToken", 92 | 40: "ExpireDelegationToken", 93 | 41: "DescribeDelegationToken", 94 | 42: "DeleteGroups", 95 | 43: "ElectPreferredLeaders", 96 | } 97 | -------------------------------------------------------------------------------- /test/mtls/ca-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCm1SH8x/jFJ2cB 3 | O/0IKayjv/stB7JMKfrP8G80uuGvk6+kCUl15Bz7y2+wazwSTphDAZfC2xHWNnMd 4 | dAUATVEZEFSA2RkHhYhx/n3S4TBpOYDhbFJmqWZYKSxdfy+p/cMdu7uhCqJOH6B8 5 | Axsfpeim0HdS5a8nWMUScg+wuSaawNNZT+6Gpa4FYWvI5Lo4OJNcn0DbrEkkO/pa 6 | 8J/Hg5OKSmBAEKzCWLNcL5o28yRlDerU3UFt5W9gxuCnXC8bToq4n7KrTZBMDJzt 7 | 9yZ05FHxdoErCeNdPAVkU8QnAbotKAosBbxnxgDDFPmZq5QfQ6fadrt6384X6u/r 8 | k/cQOS7rToIUdTDkVqqcp8o48T4t8IvFnXmXWkGtf/L57waLOeuG2dKlUMbvmhtU 9 | AJWZo9qZgIleigRlwUb/yaAVD6lSFVHV6kw4jzvpaZOYbbUCaApNLT8JzajZac+F 10 | OWDcc+tCNCOBp2+Pi5eT/d4D8DVYWNEH2bGrSMj9gIpJdLk0gSfT+4fV5QO/wPME 11 | gZxNQG+lWYlwFW04xyksp6g8FEC/dUBF15mCjM1oifd8oaoakbiEOtIg9Ff+ITMm 12 | eGa2BsDQ/LuN+r7JZX9e9JaI4fFrJYoPqsvb62vq+69AAKGtNRx2XEmOL97r1HV7 13 | bW+K8Phjp/s2XxIm0RN+gfGnbHU5AQIDAQABAoICAQCeUep0CY1iA3dzq2r/gRTt 14 | PLXvULt6Heh/xoqx/ptH1J48y/djojuDE9cFE26pbN5/0BYl+2Ec6QUrMTvBwp2w 15 | bFfVArEG+0i17S2Yns0jPke5JOO5uw59oW0RuS4RYtjz8YEoXYATJV5V8VifGwIC 16 | tBhYmdEzRMGYNJgBPGOPPmgS/JC+RKI1RvNTbQPvcHkfz51sGj463ZRDEtB+NLYh 17 | 6V0XzvszX5uuYxo1BV5HbAKrSODbmnys2W6+Q/QMNOfeNucGO5AxK1MNXJqpGTdO 18 | Lwh7QGH8y4XOZNPjtHbqfXHRvwvBaxy0BMatdy8U0E6NOlnGTHc3KWPPKtauFaYY 19 | bOpWXHsSCh+P3x/uzajQSZf/VNq1qxFZql4HCbRv1Fhvg13Mr64RB2+aMMWs+YDM 20 | JemxACmDJ4U5kVLFYX9BERPkR1V5syg/BlXmxMYV14G41zUvbNcLjbvD64AsTNyn 21 | F4aktFV3XMIdqQbgnCRg6GWkig6goA1YYwCqH09E0JUeDlTcGctwlSLOXHa96PJ1 22 | pF0X5bPNpnG3S3aiE1Li8oLwJmKBtGVFxBsa8UUj8VtmO0qWsjCKF6DDoBME1u82 23 | ZSC06Ggtf3AokrKXNTMjLIpgonWtrg9+83RCfDXMPK9w+LkjNR4krpOQJhG+q0Gz 24 | XN5DPtTPbdgUPnYJ5ECrMQKCAQEA2Sz9J+fbU8hE9wl+QoiV4sdG61Pu7hs1jebD 25 | ni5DDbm6iE1E47muz56fQzblaQhvXLEDHHALvHZCUIiVki+/mihyU+wdjqpiWzNK 26 | NavsuHYGHcjBhJDSMlWiBA3nuJgYIkJy6aminvYuxfatJqNLPoNh/9Oouby5vBb7 27 | TJG1fXoHsO2tg/omn5E3/H9/P/FtDB0JunY4J+36JNjP9GpZg+CsPBO5O8e7KJEU 28 | lC4McL3mBLKfJjn6TZQ/efkSH2t6ntErgMJ42YWFmCY8pNOPvtgSDngFb//JhKNd 29 | D0ipblB0nZENNrV+2CZrBx31MeqOvVUiCXDkyPJLFuW0F7Fs7QKCAQEAxKgyQ42C 30 | rolGnAMUL+xT2biN1w6b6ghLK6F5C4BGZCY5URVZdItXoTpIvKErbjzSRsJzc24/ 31 | GkvfUYRefUMpZIXL8emzyh+bOSXrsLcj/tnkzWvbhLk9AAiQufXo4Htu+X/2mHzc 32 | F5oQC4QqEZaCQ3vbVqMqdFmpAR1y3YrGVBbqVoTbz4GgJF5pz9q5ugnPkv8yCf3o 33 | ckjknni2RR5FOK3eAWE+Av7HHyqeQOVUqpp049UV+1EEkixlc/fnhmu+pU+bQVJG 34 | JppoAEJnANVLd28E4WzX4Otc7ZnUl3kYP6aBXy7N4TIcnQrE1e0hNCvZRwbG9Rmp 35 | Dtp/Btk4hPnN5QKCAQASFHy+5b6Mqw+SwWeRgm14A78TuFkb2YgL2DpQ4OkUK3Is 36 | BsLFcp/rCPmZTuiqZdlfjh0EMQ/JEM9UhQpusSryWouqEO4lAmpNzWdQMI/0i17W 37 | +++gaa4WIqrqS34PEBpyP7XxXf373eLm+DJSFSYefjGaarSRQfT0g7VIGplRaOdb 38 | A25+nkS45A4VJ0gguqipaYF11IG4UUeR6brmTjRFll7AYaP6JhaeufVMoMjIWgbc 39 | PUKinBEKhF0EeofXtcrvjdphTc9hzIvirsw9rJ869CIMslFBkbnlIPSOYS450Njk 40 | hT7kXxfARvKCFSxki3J2sK+0uYFSeVlm/piPoGXBAoIBABlgfwP6mdd9/W+achtx 41 | Cqz4xMzwpKwBn/mkX6NdTTgVOqY89As3A/rZM5Or1JfoH1ZkN1O48UmqasnxI+RU 42 | K14+ab/w4XN+uqAYRZ7LM0jqwzzC41PwN2/uqwZkNkXph7uIiMDKs/gTuGhUCL22 43 | R17/96Dpc0wKWeW78Buokck+H8VEZb0URB8NU+gvmHHqXC/i4Ikri7hbsazFdPfc 44 | XSBgR1q0OrVWPUnHTR6guVk+j0DrOWfx334hXoIlhmnxM2vPpwA55rWKjUsjRnl0 45 | DMX/vIh+bCkpFhfPFwDo7nkgOmeW+ZhKyCh3wEi+zd8WnXYt6LSuOAOjrGqs0z9N 46 | oQUCggEAW1VNO1FMkIo7oHws7j4EXYOMJRjmtnhqTbUzL8Bi36O7wyu789sg9dm1 47 | VuEIx96EWz93eAfrA/XbHSs9c4ChnhrMsO5WvD/9kSwNCw9Lf9HdgFjDN+Qx7Bis 48 | z/HsLaXki+b9c0jrOHNOiG5EH7cWsm/RVdTV2iyTnco5ou9GRdG30wjyP+8mVgml 49 | kHN/VC9JIjrId5uGIZKNO5M7WxwzZ5Zs23np94yb9BVJByGKSuWPPOjp0mmUrRjf 50 | 9vf0UWh5otr1g5Q+pPBK7sAWUrDGDHNLBfKoVVvHPV5TT1Np3NWmoTa7YLYNf33f 51 | lrNsBeGrTm1wQ3jTnfXFiVvSeIstWQ== 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /test/mtls/broker-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC91orfOWOEMXkm 3 | mFmwAzR8TwCoY6b2E4YAfdas2dge1o0FZXuXuMQAAd9Ojbgw2P/G4Yb41Jif5Bmd 4 | 7pKUD3EP1NqgpsXl+9ArnJCR6ot1Q5W+tudBmSY7yVc9NBDHnxr/DWgiPcQ9OCLB 5 | KIUNedcU98NUgWXCx6UeM6yrpyzlv7Du3LouMTmZB4IE7MTpwSzWfD6tHs4JnFy5 6 | UjBVTdTYGuFsmfTuU931oqur2CF17oiSUI3nPcX37k14vVoPfwrASMBfSVj2VnEK 7 | pB/7MjWns7kdgbDEo+dCkSCeUdQTPBZ8r9OfjwzgUFkcSUud35fZV8AlLwOdgVmn 8 | 5/7UaQo5+SwPsCSFgL4VWt7Kg05V4NinJcWj55CS9e3DI9+IMk6XuDia5Ws0Ba38 9 | Zssj3jkqYnU20TdLKLpp9tr4INlFDVpP1prbVL3qZJMyQtn5YaqO1QgyNBTniYe2 10 | fmSSqOt9LPQF24B2NlW5XlV/LKYuhVP0Mo0nB5MOp2+OgfLV6pMGh64XgHzXPy/9 11 | lV8UjgFiZ9r1/mSBqH2pi4AShNyIsU9V6pij5q2dN/NQuIFvbi9KuGuTqmoQjoEp 12 | wiSEAdW0C9lKglBmbLkqDGSo2ol/D/u/NYZnAmVRX2yHBcOmCB4YwAc24VbqUl+1 13 | dzrF4JlD4kthh5SsocPprfD8pBSABQIDAQABAoICAALy5DFXeN5Cznj1AN+lDRmp 14 | OH9lHODRqNE1IzKIJ71C4+nji6ntqs29ObTEqsnHdSGnuyr+UmiZaHR1kj1OiBkQ 15 | 21yHeqd549/pjK1XkVb+98KAoNL6CAgBtqMyvdf79rVjheySPQweZRvO/1IYRGkE 16 | +DnF0MjXxQ7VCqeX7zCaa6sqLpItHZHs9e4Rp3ViFMo4H9kck2K+1Qnj5vBMRzQc 17 | JshDE/TnxJjeMAB5WPu1+tUa/zlKUlrpjraDanS3KnmWJ7bTMtiQuyzJI94JB6BV 18 | QFje/NnxGdSE4JbU+pBZ7Jd/O7BMzoxlcxznXo9TO86aeFebhVja+4OKLQErqIb7 19 | GSaBaQav2rGPW6izmXk4Kj/G+ajhPkLcDkKMFEr1CTquPt+7QSQNQAmLgt5QeQFJ 20 | UBHHVM43QWtpnxjCvBTYfdN2wn+dxi4Z+BtztXjCIjWdpyMkZdBX+vADRVcB+XLk 21 | YcFZblGgh6rww5a3hiv3FAVp5wonXgQJv4WjVfBgeoZiveITonEIZS5XVCIOpmA/ 22 | n60ex7zUF7Zq9g58aHlzwHaPGIFTugkjWYBjO7f4BYxBLgoNqSU/YowXOCXGlcv8 23 | ysyhbz29KA13k2OTwobT9nkhHe8TyM4IlPolO4b3WkmgBK633b7nrwTZXKCZ4nT8 24 | n6eOnH/m8eWRDXjddkR5AoIBAQD7t9+cJwCCa8lRKox1fZuDXEif9WVkFUlu1VXf 25 | xU5T9VH+zkuMzR28UbK/PWUbNgRyabT3I2deQGi/fZnqyApuvx1ExcMNuI0mQiy8 26 | gkLPnv72Pb7Y2y5xf/RB4SFH0jeeKIcgE85gx5MaCikqYQP30XicXdyo4zQLVaW6 27 | 0cZjT4etOOEjy+Frf68GEU9TkrSaKpPLezUad1BkvOZ1q+ZBOoGVYZMtqBWfR4IR 28 | OICwxe4/1+3DOpgjPyezwr+SzTjQQZnK4J2m1fI4gSpFmG9X4NNZU5DfAWSsZDpt 29 | 9aMe8dpNRh5QJYgK2jB22cVk3YrZA4m5t0PvJ+r0PVVIo1AnAoIBAQDBETT3cKt/ 30 | vBAsmpO9zYEewEgVzEHOov6CSZBjEuyVX+HFGFXaKJJqiFjbQNaQ44XrrjAFr+rR 31 | G0hfeO8aMYXq4qCRVsK1N7Cx8Rqx3eHsVQ4lA5YtN4ZZF1oS7oAjHxbTiSx0FuN4 32 | r/rnnnPn5LqVj2+I/n2RcSKXLq856EzIa4OQdQGsh40A5nChyyidZ2lnG1L+5F4z 33 | W7iKusLTmckbm4qCtzXh66lyygzVh3+nZ0eqzCvDLLor8CjXgDJlFOk65EiMNyi8 34 | Yk6Nw4TUyi/I4M7K1LyPDPXoFzRmGGXFJfZzLgzchK3dazt5uIYjc5UOJhmflFpA 35 | fbCHphhJDR3zAoIBADbAXPwLI7o6lbR468pWgvAEr6tTZjVpx+o6rw4YTZxHfQlO 36 | d/JtXy1qG9YrFYXgrwDv6zPiWIaQvfgA6uy9jD9ghI5D8dRXMySCOlbagovEirKm 37 | 5Y8nxadcbJWz0Vhc6koViKa6iCZW4rFESoX0SjeqMXDZnbiCivx4Ka9EF8HcN6e+ 38 | +oGdxC9su3Y3Z6jG/U+XOUSGTRuvniX7TKmFtXS0tCD481l3+RQHKsxyHNPWiacL 39 | YaUYZqUsBIe+MYtN5jCuRDRzvPbgDGqr9bTjnjxgI7iHS7imBe7VcjwetNdzRceP 40 | G+LU9w1dPjNr1NrGsX/YnClwKm+noBx2Bsng8/sCggEAFzbCb54UgN9T8okDiqbT 41 | B6eHA1fz8ayXfYLq4X69A5jrxI8Qg5KtSDHsvTAmnHU+/P+MtqJYUZw+L3yTA0tB 42 | FuT4+YC9GJ0BmeOrmgdbytbPRJQR0o4CkWPFaNFUhuSOEhFETxEReMlwGblrpk5S 43 | QPlmJ8qxvnMj/ZmXaBcOZGrI/u2SrfP+eEU89bc/wu1UNxmJ+SvsR7CXl9as5lRv 44 | Yepp1R3CVM/JKAwzTtlj9/TrIRY7jNQr7myJ/J9lb9H72SDpuDJJq50GZPVFW3CU 45 | HkspkinSIkEksCtaGYyFcBcIcwDuevVZEm56UEBnoIudOH5MRmvIDkkOmMDX2rDB 46 | EQKCAQEAnksEwQbQuXLBIIW/WV4IggVH529urPQlHRR/3gOwZiriqCVkf2hH927v 47 | vI36gt5AB+Tk0L8CMh1T+uHC7hDnv9n0T2BW0Wj9i7M6+oy0P1ZYmD6DjPhlmJui 48 | R2WkLZz+oo1n5OMAu/Jh7Fr5YtJ6UNcziPw2BBHmlHAQh3xNW93UqWN1usoIiXlH 49 | xxH28f9tfi6UDQ6zqDTl4E0nStRk+r9UU8+Gmknyh8STEN7P1VqWRrQTaplDk+5e 50 | H3OsrmmJz7LbPvXLQjAp9pYZf1g+56MHLMKyUSbFZv++9O1Corg7uJ9W6DkQWd/4 51 | cOQuXIvbmzd93FAbzrcQYiNEpF411g== 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /test/mtls/client-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDNswTuUJ0bSvgX 3 | xAHdIVPVc2+xCaRwMNGL36iSnkfEpLab1TgLcipMyTZ8SAN84onUAR9KIi0yCI4u 4 | yr0sGXVyTE7y9PCmjT0FmXC2fki2D30jo+mPVjj41ommBe8qWQO2lLkt8lAuEfIL 5 | XHPu0pWLYce84UgIwjBXT7E4JnNzWXIyVS/OcmYw6atu6ykhSKNxDgMl2iJ3zrdr 6 | +Arlcs5pDDflTo3rjJ7TowkMKOt7Ba1ZDxX2CfBKrX9aXxosXVJ3ko38eQaG8lbf 7 | sMLeEa5CyNBMwEoa17NHPVx2QEw6uVaolPB4xO9pxTB5gbn70kmU5/03FwIwYfCN 8 | 9YDXPoSrSAf3WuHvZX3XPMURIuPdHA4p5bj2r20wBYKgVjNAlOh9XsSQhyqK7Yup 9 | FixYthIr9a5Vdbd9vuVYXy3ATEiWUmXB9CbrM4xbyPHUREn8RcC13t+NZkj8Kwoz 10 | jwxThr3d1v30dv4jBDbciU/xgk54riJ+cftEt4BagE9BaMUNeCa9KFyUhgBjjGmD 11 | okGNjWmCkMaVkfbsH2h8XbGYYI40thA4QMWuUyejpMWLJu3bIVMIPIBdUXu8v9Jw 12 | OOiBMyDnyI8ugbDF4HLJIyQy8fKDaJUgS4rniElFF8tUIIEKQKHGi5NKzBg/VoR3 13 | +gvwElO0y/SIvCT0J6Gxc8Q95Lqo0QIDAQABAoICAEiRvM6a8CJd25L+2q16AYqP 14 | lDsALNxLzNGtEVrQrn8ooSfvHDulhljar/c+rMRVY8zArJpJ3moFbKwDaKPzQ2UU 15 | mNHMKk6IC5w7GvG3Mc3RPxPg0xh3kdfwUFWbSFpHVzEF3SLhlvn56MurTVdXQd0P 16 | nRj83Z4BbG6RNfOaVSa/yrMJLLmH0Je3CH00R6lvaAINsHydLYXZDwrvUmDKlRmo 17 | btveT+FnFe2SWjHJCfK3+QUvdk78CKM//GsUnDZEokB/GsqUpAHd41o9kTIpSLJV 18 | CG/bcwlvSdd7RXCOlJYvJuyIxyHEULafE8/6PXQjJ0R6Z/IUkvggxW4/y7mkW8cy 19 | 8eOCLolI0nyzoNjpXuANOI6HmAMbv7dXhzyuokv0bXI8CcAVgVGrsuTS3X9Qw0dG 20 | HWdy/iU+o5h5ljqVO4C75RFTHVTjAQ9Faw9+rPZ3ow+HxR9FsGN1To9YLuJm01TD 21 | S+jp1wBPSfR66PKOBSOMkJHXEiqWQImD8Le/4xJHkIY63KDXFP9GYzyK47WABhOF 22 | T1lsjRW25Y0GUNy8RhsYTJt90ZqY24Hyp7FnMfkIixLAiMkqO/0vnYfUAaqLBxr3 23 | RcQg9bixypviR1uq8AwmteoybuYcW4PIqI0/Qfd8CJkO1RozfL8vi854fly+zRWr 24 | Z8iFMRJsJfo58m2fitdxAoIBAQD+pTRpXjNCXbKfyAHNUbGHEnB7UUnYyjLq+sVD 25 | Cbyt99X1ZvEIvPOdjGR3Dg9cHIyFzL38uHPZjJP3U2Y9QETVfWi3F1px/Q5Bz3bX 26 | sJa4CynanHK/G+am/ON3j/CH8NeoZKgzwuMmdzSDDW6sI1lYJlrZujSKX8417YA+ 27 | KhO4pkTb3RqLp6DwyKSPFz/XcUhQI0aSIZNtbhJCUjpSIBLoYM2S0CHx1J/oNT3d 28 | ldzL+7M3dxwZmDvJ7THDYkZvBtc0C1SlwDbnkZkLrj9D1qJv+LMvVddk3i7RVIbn 29 | E8I+aWutTBtoZJq5sOhqEAyup3sLFnCvNpm1NgqT8m7quVm9AoIBAQDOyyf3GwUk 30 | qsh69SX8TF3TlCWRLHyOJiYXFLEOrZxobME3KiLRdwt1ouXpqe1LXBhwyu6HYi0m 31 | 8K0kGC1Pl+zIMIlsMus+zxYvDSP5O6hx7y0CEty0zHSYlziFm3iOWs7bwqZyxr8M 32 | l5ayrqXhY1GKWuPsLmY2rlog1iBswKnYGGeb18IgotnuyTJycElZ8TwnEt0iVCRm 33 | yLZZ+dW77uAV5rxVCZLGWIIwsVc1WrFMZ5reC4cjkcwlIe/Rh1pTPGCmZkt++rGA 34 | KtUih77XIm67IzV9oWBPKFeh59EGMYnjX0L4QGttaaELSy87KWoXfKd+1s8sdUWs 35 | UALzkKoeGzqlAoIBAQCsb3y1WWfGqiJaVpr5yTc5K0BmEV67Yfjm7BeGVOKiv6/w 36 | NNxFuYYSis5BXJEEJAT4WfPRXap9h9du5NfX5Fx4YSr2yOajR+ROpklot5joWg7m 37 | jYiaZy9ipt71yM/tjibLThYkrvUYyCIUGJV4FZvbuGVPCOupREUkeYadEes238jD 38 | Rc9DAKlYQ9ZDW1AM+RYpxil0rS3jLWVJ6dq6YCPNnje3Eh/aXcxG9z7EfUX+D5yF 39 | k6/AmTjrfSZ0k2j0qCI0iPOyJh5H421K3pzSuFZZEoVsKWnpURdNAzsy4Uto7gRP 40 | Xrk0kOPBmM1ZfTLJVnpYwMJfs86USlsRYlq/sfHVAoIBAQCCASQ57Em6eIebDV15 41 | FMVzy1Imx1Hyx7bwkbiSIsEOwThjJuFG0FFq/iMOWB7vXpGa72kvwZ/jODGRXIW3 42 | 4Soh3Km4VPahwO0QXXF9MW0/W37vK86G93Zhq8gD7u7Gh/4+GEwuIhZfozlBUhzE 43 | 6nyLv4JErBLkU44j0JoH5MOiMA2K4wSPIfJidSh6226x1b/cTLaq6z0LRSmmvTMK 44 | eC12d111FJSqj2AhnovV5hNKlmO9LoAh83nk6kXrcu1tIKseUXcq+A35JnRhxfdL 45 | JsF4crUhKv/yI8mb5rH489HdGLlweodO/LYa9IRX7DxfUaW3TvJl38ASiSah2xOn 46 | 47RlAoIBAGtI7Fgk3eYp8Jko2/aqYLjmS9mts4Sb/uwSV1lqGSKXYmFwK2wr2Mx+ 47 | 1qLo3YZNvM4UtqeSZm33zdhiztEA986yWuNXmzcbuY3dTVWd7GCN5S7s5YKENlHg 48 | eqtrxHG/JtQFfRfrcP4etucKx5S+ITjUi8NFeonc/sHnCA5gCveeTGFqqeeuWXLK 49 | A2KiK5TEevs9+E74Mjj9LTHHFz9vV6xyQpNe/Xmvo/0v2QT1HxFyAOcy2J8pKBrL 50 | nkBZB4NckwC9dEWzVNa4hri+sl/F3yxFSu9+IWH6ajU+tbFzNggjrm3NyUCJaCfg 51 | hSinI13bploG0ZKF8zHV84OPOf0UwsM= 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /libkafka.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package libkafka is a low level golang library for producing to and consuming 3 | from Kafka 1.0+. It has no external dependencies. It is not modeled on the Java 4 | client. All API calls are synchronous and all code executes in the calling 5 | goroutine. 6 | 7 | 8 | Project Scope 9 | 10 | The library focuses on non transactional production and consumption. It 11 | implements single partition Producer and Consumer. Multi partition producers 12 | and consumers are built on top of this library (example: https://github.com/mkocikowski/kafkaclient). 13 | 14 | 15 | Get Started 16 | 17 | Read the documentation for the "batch" and "client" packages. 18 | 19 | 20 | Design Decisions 21 | 22 | 1. Focus on record batches. Kafka protocol Produce and Fetch API calls operate 23 | on sets of record batches. Record batch is the unit at which messages are 24 | produced and fetched. It also is the unit at which data is partitioned and 25 | compressed. In libkafka producers and consumers operate on batches of records. 26 | Building and parsing of record batches is separate from Producing and Fetching. 27 | Record batch compression and decompression implementations are provided by the 28 | library user. 29 | 30 | 2. Synchronous single-partition calls. Kafka wire protocol is asynchronous: on 31 | a single connection there can be multiple requests awaiting response from the 32 | Kafka broker. In addition, many API calls (such as Produce and Fetch) can 33 | combine data for multiple topics and partitions in a single call. Libkafka 34 | maintains a separate connection for every topic-partition and calls on that 35 | connection are synchronous, and each call is for only one topic-partition. That 36 | makes call handling (and failure) logic simpler. 37 | 38 | 3. Wide use of reflection. All API calls (requests and responses) are defined 39 | as structs and marshaled using reflection. This is not a performance problem, 40 | because API calls are not frequent. Marshaling and unmarshaling of individual 41 | records within record batches (which has big performance impact) is done 42 | without using reflection. 43 | 44 | 4. Limited use of data hiding. The library is not intended to be child proof. 45 | Most internal structures are exposed to make debugging and metrics collection 46 | easier. 47 | */ 48 | package libkafka 49 | 50 | import ( 51 | "time" 52 | 53 | "github.com/mkocikowski/libkafka/batch" 54 | "github.com/mkocikowski/libkafka/record" 55 | ) 56 | 57 | func NewRecord(key, value []byte) *Record { 58 | return record.New(key, value) 59 | } 60 | 61 | type Record = record.Record 62 | 63 | type Batch = batch.Batch 64 | 65 | type Compressor = batch.Compressor 66 | type Decompressor = batch.Decompressor 67 | 68 | // Changing timeouts is not safe for concurrent use. If you want to change 69 | // them, do it once, right at the beginning. 70 | var ( 71 | // DialTimeout value is used in net.DialTimeout calls to connect to 72 | // kafka brokers (partition leaders, group coordinators, bootstrap 73 | // hosts). 74 | DialTimeout = 5 * time.Second 75 | // RequestTimeout used for setting deadlines while communicating via 76 | // TCP. Any single api call (request-response) can not take longer than 77 | // RequestTimeout. Set it to zero to prevent setting connection 78 | // deadlines. MaxWaitTimeMs for fetch requests should not be greater 79 | // than RequestTimeout. 80 | RequestTimeout = 60 * time.Second 81 | // ConnectionTTL specifies the max time a partition-client connection 82 | // to a broker will stay open (connection will be closed and re-opened 83 | // on first request after the TTL). The TTL counts from the time 84 | // connection was opened, not when it was last used. Default value of 0 85 | // means "ignore this setting" (connections will stay open "forever"). 86 | ConnectionTTL time.Duration = 0 87 | ) 88 | -------------------------------------------------------------------------------- /record/record_test.go: -------------------------------------------------------------------------------- 1 | package record 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "fmt" 7 | "math/rand" 8 | "testing" 9 | ) 10 | 11 | func TestUnitMarshal(t *testing.T) { 12 | tests := []struct { 13 | r *Record 14 | key []byte 15 | value []byte 16 | }{ 17 | {New([]byte(""), []byte("m1")), nil, []byte("m1")}, 18 | {New([]byte("foo"), []byte("m1")), []byte("foo"), []byte("m1")}, 19 | {New(nil, nil), nil, nil}, 20 | {New(nil, make([]byte, 10)), nil, make([]byte, 10)}, 21 | {New(nil, make([]byte, 1e5)), nil, make([]byte, 1e5)}, 22 | } 23 | 24 | x1 := make([]byte, 100) 25 | x2 := make([]byte, 100) 26 | buf := new(bytes.Buffer) 27 | 28 | for _, test := range tests { 29 | b := test.r.Marshal() 30 | // make sure Marshal2 and Marshal3 are equivalent 31 | b2 := test.r.Marshal2(make([]byte, 0, 1<<10)) 32 | if !bytes.Equal(b, b2) { 33 | t.Fatal(b, b2) 34 | } 35 | b3 := test.r.Marshal3() 36 | if !bytes.Equal(b, b3) { 37 | t.Fatal(b, b3) 38 | } 39 | buf.Reset() 40 | test.r.Marshal4(x1, x2, buf) 41 | b4 := buf.Bytes() 42 | if !bytes.Equal(b, b4) { 43 | t.Fatal(b, b4) 44 | } 45 | // 46 | //t.Logf("%v %s", b, base64.StdEncoding.EncodeToString(b)) 47 | r, _ := Unmarshal(b) 48 | if !bytes.Equal(r.Key, test.key) { 49 | t.Fatal(r.Key, test.key) 50 | } 51 | if !bytes.Equal(r.Value, test.value) { 52 | t.Fatal(r.Value, test.value) 53 | } 54 | } 55 | } 56 | 57 | const recordBodyFixture = `EAAABAEEbTMA` 58 | 59 | func TestUnitUnmarshal(t *testing.T) { 60 | b, _ := base64.StdEncoding.DecodeString(recordBodyFixture) 61 | t.Log(len(b)) 62 | r, _ := Unmarshal(b) 63 | t.Logf("%+v", r) 64 | if string(r.Value) != "m3" { 65 | t.Fatal(string(r.Value)) 66 | } 67 | } 68 | 69 | // this benchmark is not representative of workloads where record values are 70 | // >100B. it seems like value len has the biggest impact (because of slice 71 | // growing). see additional benchmarks below. 72 | func BenchmarkRecord_MarshalSmall(b *testing.B) { 73 | const messagesN = 1e3 74 | msgs := make([]*Record, messagesN) 75 | for i := 0; i < messagesN; i++ { 76 | key := fmt.Sprintf("key_%d", i) 77 | val := fmt.Sprintf("value_%d", i) 78 | r := New([]byte(key), []byte(val)) 79 | r.Attributes = int8(i) 80 | r.TimestampDelta = rand.Int63() 81 | r.OffsetDelta = rand.Int63() 82 | msgs[i] = r 83 | } 84 | b.ResetTimer() 85 | b.ReportAllocs() 86 | for i := 0; i < b.N; i++ { 87 | b := msgs[i%messagesN].Marshal() 88 | b = b[:] 89 | } 90 | } 91 | 92 | func BenchmarkRecord_Marshal(b *testing.B) { 93 | for i := 7; i < 17; i++ { 94 | size := 1 << i 95 | b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { 96 | r := New(make([]byte, 27), make([]byte, size)) 97 | b.ReportAllocs() 98 | b.ResetTimer() 99 | for i := 0; i < b.N; i++ { 100 | r.Marshal() 101 | } 102 | }) 103 | } 104 | } 105 | 106 | func BenchmarkRecord_Marshal2(b *testing.B) { 107 | for i := 7; i < 17; i++ { 108 | size := 1 << i 109 | b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { 110 | r := New(make([]byte, 27), make([]byte, size)) 111 | b.ReportAllocs() 112 | b.ResetTimer() 113 | tmp := make([]byte, 0, 256<<10) 114 | for i := 0; i < b.N; i++ { 115 | r.Marshal2(tmp) 116 | tmp = tmp[0:0] 117 | } 118 | }) 119 | } 120 | } 121 | 122 | func BenchmarkRecord_Marshal3(b *testing.B) { 123 | for i := 7; i < 17; i++ { 124 | size := 1 << i 125 | b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { 126 | r := New(make([]byte, 27), make([]byte, size)) 127 | b.ReportAllocs() 128 | b.ResetTimer() 129 | for i := 0; i < b.N; i++ { 130 | r.Marshal3() 131 | } 132 | }) 133 | } 134 | } 135 | 136 | func BenchmarkRecord_Marshal4(b *testing.B) { 137 | for i := 7; i < 17; i++ { 138 | size := 1 << i 139 | b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { 140 | r := New(make([]byte, 27), make([]byte, size)) 141 | x1 := make([]byte, 4<<10) 142 | x2 := make([]byte, 4<<10) 143 | buf := bytes.NewBuffer(make([]byte, 1<<20)) 144 | b.ReportAllocs() 145 | b.ResetTimer() 146 | for i := 0; i < b.N; i++ { 147 | r.Marshal4(x1, x2, buf) 148 | buf.Reset() 149 | } 150 | }) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /client/fetcher/fetcher_test.go: -------------------------------------------------------------------------------- 1 | package fetcher 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/mkocikowski/libkafka" 11 | "github.com/mkocikowski/libkafka/batch" 12 | "github.com/mkocikowski/libkafka/client" 13 | "github.com/mkocikowski/libkafka/client/producer" 14 | ) 15 | 16 | func init() { 17 | rand.Seed(time.Now().UnixNano()) 18 | } 19 | 20 | func TestIntergationPartitionFetcher(t *testing.T) { 21 | bootstrap := "localhost:9092" 22 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 23 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 24 | t.Fatal(err) 25 | } 26 | p := &producer.PartitionProducer{ 27 | PartitionClient: client.PartitionClient{ 28 | Bootstrap: bootstrap, 29 | Topic: topic, 30 | Partition: 0, 31 | }, 32 | Acks: 1, 33 | TimeoutMs: 1000, 34 | } 35 | if _, err := p.ProduceStrings(time.Now(), "foo", "bar"); err != nil { 36 | t.Fatal(err) 37 | } 38 | if _, err := p.ProduceStrings(time.Now(), "monkey", "banana"); err != nil { 39 | t.Fatal(err) 40 | } 41 | // 42 | c := &PartitionFetcher{ 43 | PartitionClient: client.PartitionClient{ 44 | Bootstrap: bootstrap, 45 | Topic: topic, 46 | Partition: 0, 47 | }, 48 | MinBytes: 10 << 10, 49 | MaxBytes: 10 << 20, 50 | MaxWaitTimeMs: 1000, 51 | } 52 | resp, err := c.Fetch() 53 | if err != nil { 54 | log.Fatal(err) 55 | } 56 | highWatermark := resp.HighWatermark 57 | if highWatermark != 4 { 58 | t.Fatalf("%+v", resp) 59 | } 60 | if c.offset != 0 { // offset is not advanced automatically 61 | t.Fatalf("%+v", c) 62 | } 63 | batches := resp.RecordSet.Batches() 64 | if len(batches) != 2 { 65 | t.Fatalf("%+v", resp) 66 | } 67 | b, err := batch.Unmarshal(batches[1]) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | if b.BaseOffset != 2 { 72 | t.Fatalf("%+v", b) 73 | } 74 | if b.LastOffsetDelta != 1 { 75 | t.Fatalf("%+v", b) 76 | } 77 | if s := resp.Broker.String(); s != ":0:localhost:9092" { 78 | t.Fatal(s) 79 | } 80 | // 81 | c.offset = 4 82 | resp, err = c.Fetch() 83 | if err != nil { 84 | log.Fatal(err) 85 | } 86 | batches = resp.RecordSet.Batches() 87 | if len(batches) != 0 { 88 | t.Fatalf("%+v", resp) 89 | } 90 | if resp.ErrorCode != libkafka.ERR_NONE { 91 | t.Fatalf("%+v", resp) 92 | } 93 | // 94 | if _, err := p.ProduceStrings(time.Now(), "hello"); err != nil { 95 | t.Fatal(err) 96 | } 97 | resp, err = c.Fetch() 98 | if err != nil { 99 | log.Fatal(err) 100 | } 101 | batches = resp.RecordSet.Batches() 102 | if len(batches) != 1 { 103 | t.Fatalf("%+v", resp) 104 | } 105 | // 106 | c.offset = 10 107 | resp, _ = c.Fetch() 108 | if resp.ErrorCode != libkafka.ERR_OFFSET_OUT_OF_RANGE { 109 | t.Fatalf("%+v", resp) 110 | } 111 | // 112 | c.offset = -1 113 | resp, _ = c.Fetch() 114 | if resp.ErrorCode != libkafka.ERR_OFFSET_OUT_OF_RANGE { 115 | t.Fatalf("%+v", resp) 116 | } 117 | // 118 | if err := c.Seek(MessageNewest); err != nil { 119 | t.Fatal(err) 120 | } 121 | if c.offset != 5 { 122 | t.Fatalf("%+v", c) 123 | } 124 | resp, _ = c.Fetch() 125 | if resp.ErrorCode != libkafka.ERR_NONE { 126 | t.Fatalf("%+v", resp) 127 | } 128 | batches = resp.RecordSet.Batches() 129 | if len(batches) != 0 { 130 | t.Fatalf("%+v", resp) 131 | } 132 | } 133 | 134 | // test that when fetching the newest offset (where the offset is the same as 135 | // the high watermark), if there are no new records ready to be fetched, then 136 | // there is no error, and no error code 137 | func TestIntergationPartitionFetcherEmptyPartition(t *testing.T) { 138 | bootstrap := "localhost:9092" 139 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 140 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 141 | t.Fatal(err) 142 | } 143 | // 144 | c := &PartitionFetcher{ 145 | PartitionClient: client.PartitionClient{ 146 | Bootstrap: bootstrap, 147 | Topic: topic, 148 | Partition: 0, 149 | }, 150 | MinBytes: 10 << 10, 151 | MaxBytes: 10 << 20, 152 | MaxWaitTimeMs: 1000, 153 | } 154 | resp, err := c.Fetch() 155 | if err != nil { 156 | log.Fatal(err) 157 | } 158 | if resp.ErrorCode != libkafka.ERR_NONE { 159 | log.Fatalf("%+v", resp) 160 | } 161 | if len(resp.RecordSet) != 0 { 162 | log.Fatalf("%+v", resp) 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /record/record.go: -------------------------------------------------------------------------------- 1 | // Package record implements functions for marshaling and unmarshaling individual Kafka records. 2 | package record 3 | 4 | import ( 5 | "encoding/binary" 6 | "io" 7 | 8 | "github.com/mkocikowski/libkafka/varint" 9 | ) 10 | 11 | func Unmarshal(b []byte) (*Record, error) { // TODO: errors 12 | r := &Record{} 13 | var offset, n int 14 | r.Len, offset = varint.DecodeZigZag64(b) 15 | r.Attributes = int8(b[offset]) 16 | offset += 1 17 | r.TimestampDelta, n = varint.DecodeZigZag64(b[offset:]) 18 | offset += n 19 | r.OffsetDelta, n = varint.DecodeZigZag64(b[offset:]) 20 | offset += n 21 | r.KeyLen, n = varint.DecodeZigZag64(b[offset:]) 22 | offset += n 23 | // TODO: remove copy 24 | if r.KeyLen > 0 { 25 | r.Key = make([]byte, r.KeyLen) 26 | } 27 | offset += copy(r.Key, b[offset:]) 28 | r.ValueLen, n = varint.DecodeZigZag64(b[offset:]) 29 | if r.ValueLen < 1 { 30 | return r, nil 31 | } 32 | offset += n 33 | r.Value = make([]byte, r.ValueLen) 34 | n += copy(r.Value, b[offset:]) 35 | return r, nil // TODO: errors 36 | } 37 | 38 | func New(key, value []byte) *Record { 39 | return &Record{ 40 | KeyLen: int64(len(key)), 41 | Key: key, 42 | ValueLen: int64(len(value)), 43 | Value: value, 44 | } 45 | } 46 | 47 | type Record struct { 48 | Len int64 49 | Attributes int8 50 | TimestampDelta int64 51 | OffsetDelta int64 52 | KeyLen int64 53 | Key []byte 54 | ValueLen int64 55 | Value []byte 56 | // TODO: headers 57 | } 58 | 59 | func (r *Record) Marshal() []byte { 60 | var b, c []byte 61 | buf := make([]byte, binary.MaxVarintLen64) 62 | b = varint.PutZigZag64(b, buf, int64(r.Attributes)) 63 | b = varint.PutZigZag64(b, buf, r.TimestampDelta) 64 | b = varint.PutZigZag64(b, buf, r.OffsetDelta) 65 | b = varint.PutZigZag64(b, buf, r.KeyLen) 66 | b = append(b, r.Key...) 67 | b = varint.PutZigZag64(b, buf, r.ValueLen) 68 | b = append(b, r.Value...) 69 | b = varint.PutZigZag64(b, buf, 0) // no headers 70 | c = varint.PutZigZag64(c, buf, int64(len(b))) 71 | c = append(c, b...) 72 | return c 73 | } 74 | 75 | func (r *Record) Marshal2(b []byte) []byte { 76 | buf := make([]byte, binary.MaxVarintLen64) 77 | b = varint.PutZigZag64(b, buf, int64(r.Attributes)) 78 | b = varint.PutZigZag64(b, buf, r.TimestampDelta) 79 | b = varint.PutZigZag64(b, buf, r.OffsetDelta) 80 | b = varint.PutZigZag64(b, buf, r.KeyLen) 81 | b = append(b, r.Key...) 82 | b = varint.PutZigZag64(b, buf, r.ValueLen) 83 | b = append(b, r.Value...) 84 | b = varint.PutZigZag64(b, buf, 0) // no headers 85 | c := make([]byte, 0, len(b)+10) 86 | c = varint.PutZigZag64(c, buf, int64(len(b))) 87 | c = append(c, b...) 88 | return c 89 | } 90 | 91 | func (r *Record) Marshal3() []byte { 92 | buf := make([]byte, binary.MaxVarintLen64) 93 | // allocate only once, but enough to fit "most cases" 94 | b := make([]byte, len(r.Key)+len(r.Value)+100) 95 | // "reserve" 10 bytes to leave room for the record length 96 | b = b[0:binary.MaxVarintLen64] 97 | // write out the record 98 | b = varint.PutZigZag64(b, buf, int64(r.Attributes)) 99 | b = varint.PutZigZag64(b, buf, r.TimestampDelta) 100 | b = varint.PutZigZag64(b, buf, r.OffsetDelta) 101 | b = varint.PutZigZag64(b, buf, r.KeyLen) 102 | b = append(b, r.Key...) 103 | b = varint.PutZigZag64(b, buf, r.ValueLen) 104 | b = append(b, r.Value...) 105 | b = varint.PutZigZag64(b, buf, 0) // no headers 106 | // write out record length 107 | c := make([]byte, 0, binary.MaxVarintLen64) 108 | c = varint.PutZigZag64(c, buf, int64(len(b)-binary.MaxVarintLen64)) 109 | // and put while record lenght "in front" using "reserved" space 110 | offset := binary.MaxVarintLen64 - len(c) 111 | copy(b[offset:], c) 112 | return b[offset:len(b)] 113 | } 114 | 115 | func (r *Record) Marshal4(tmp, header []byte, dst io.Writer) { 116 | header = header[:0] // reset because it will be appended to 117 | header = varint.PutZigZag64(header, tmp, int64(r.Attributes)) 118 | header = varint.PutZigZag64(header, tmp, r.TimestampDelta) 119 | header = varint.PutZigZag64(header, tmp, r.OffsetDelta) 120 | header = varint.PutZigZag64(header, tmp, r.KeyLen) 121 | header = append(header, r.Key...) 122 | header = varint.PutZigZag64(header, tmp, r.ValueLen) 123 | // 124 | length := int64(len(header) + len(r.Value) + 1) 125 | n := varint.PutVarint(tmp, uint64(length<<1^(length>>63))) // ZigZag 126 | dst.Write(tmp[:n]) 127 | dst.Write(header) 128 | dst.Write(r.Value) 129 | dst.Write([]byte{0}) // no kafka record "headers" 130 | } 131 | -------------------------------------------------------------------------------- /client/fetcher/fetcher.go: -------------------------------------------------------------------------------- 1 | // Package fetcher implements a single partition Kafka fetcher. A "fetcher", in 2 | // my nomenclature, is different from a "consumer" in that it does no offset 3 | // management of its own: it doesn't even advance the offset on successfully 4 | // reading a fetch response. The reason for this is that there are many nuanced 5 | // error scenarios (example: fetch response successful; 3rd out of 5 returned 6 | // batches is corrupted) and so it makes sense to push the error handling logic 7 | // (and the logic responsible for advancing and storing offsets) to a higher 8 | // level library or even to the user. 9 | package fetcher 10 | 11 | import ( 12 | "fmt" 13 | "sync" 14 | "time" 15 | 16 | "github.com/mkocikowski/libkafka" 17 | "github.com/mkocikowski/libkafka/api/Fetch" 18 | "github.com/mkocikowski/libkafka/api/Metadata" 19 | "github.com/mkocikowski/libkafka/batch" 20 | "github.com/mkocikowski/libkafka/client" 21 | ) 22 | 23 | func parseResponse(r *Fetch.Response) (*Response, error) { 24 | if n := len(r.TopicResponses); n != 1 { 25 | return nil, fmt.Errorf("unexpected number of topic responses: %d", n) 26 | } 27 | topicResponse := &(r.TopicResponses[0]) 28 | if n := len(topicResponse.PartitionResponses); n != 1 { 29 | return nil, fmt.Errorf("unexpected number of partition responses: %d", n) 30 | } 31 | partitionResponse := &(topicResponse.PartitionResponses[0]) 32 | return &Response{ 33 | Topic: topicResponse.Topic, 34 | Partition: partitionResponse.Partition, 35 | ThrottleTimeMs: r.ThrottleTimeMs, 36 | ErrorCode: partitionResponse.ErrorCode, 37 | LogStartOffset: partitionResponse.LogStartOffset, 38 | HighWatermark: partitionResponse.HighWatermark, 39 | RecordSet: batch.RecordSet(partitionResponse.RecordSet), 40 | }, nil 41 | } 42 | 43 | type Response struct { 44 | Broker *Metadata.Broker 45 | Topic string 46 | Partition int32 47 | ThrottleTimeMs int32 48 | ErrorCode int16 49 | LogStartOffset int64 50 | HighWatermark int64 51 | RecordSet batch.RecordSet `json:"-"` 52 | } 53 | 54 | type PartitionFetcher struct { 55 | sync.Mutex 56 | client.PartitionClient 57 | offset int64 58 | // The minimum amount of data the server should return for a fetch 59 | // request. If insufficient data is available the request will wait for 60 | // that much data to accumulate before answering the request (up to 61 | // limit set by MaxWaitTimeMs). 62 | MinBytes int32 63 | MaxBytes int32 64 | // The maximum amount of time the server will block before answering 65 | // the fetch request if there isn't sufficient data to immediately 66 | // satisfy the requirement given by MinBytes. Keep it < libkafka.RequestTimeout. 67 | MaxWaitTimeMs int32 68 | } 69 | 70 | var ( 71 | MessageNewest = time.Unix(0, -1e6) 72 | MessageOldest = time.Unix(0, -2e6) 73 | ) 74 | 75 | // Seek looks up an offset close to specified timestamp and sets the fetcher's 76 | // offset to it. If there is any error the fetcher's offset is not modified. 77 | // MessageNewest and MessageOldest are two "magic" values for the target. 78 | func (c *PartitionFetcher) Seek(target time.Time) error { 79 | c.Lock() 80 | defer c.Unlock() 81 | timestampMs := target.UnixNano() / int64(time.Millisecond) 82 | resp, err := c.PartitionClient.ListOffsets(timestampMs) 83 | if err != nil { 84 | return err 85 | } 86 | p := resp.Responses[0].Partitions[0] 87 | if p.ErrorCode != libkafka.ERR_NONE { 88 | return &libkafka.Error{Code: p.ErrorCode} 89 | } 90 | c.offset = p.Offset 91 | return nil 92 | } 93 | 94 | func (c *PartitionFetcher) Offset() int64 { 95 | c.Lock() 96 | defer c.Unlock() 97 | return c.offset 98 | } 99 | 100 | func (c *PartitionFetcher) SetOffset(offset int64) { 101 | c.Lock() 102 | c.offset = offset 103 | c.Unlock() 104 | } 105 | 106 | func fetch(c *client.PartitionClient, args *Fetch.Args) (*Response, error) { 107 | resp, err := c.Fetch(args) 108 | if err != nil { 109 | return nil, err 110 | } 111 | return parseResponse(resp) 112 | } 113 | 114 | func (c *PartitionFetcher) Fetch() (*Response, error) { 115 | c.Lock() 116 | defer c.Unlock() 117 | args := &Fetch.Args{ 118 | ClientId: c.ClientId, 119 | Topic: c.Topic, 120 | Partition: c.Partition, 121 | Offset: c.offset, 122 | MinBytes: c.MinBytes, 123 | MaxBytes: c.MaxBytes, 124 | MaxWaitTimeMs: c.MaxWaitTimeMs, 125 | } 126 | resp, err := fetch(&(c.PartitionClient), args) 127 | if err != nil { 128 | if leader := c.Leader(); leader != nil { 129 | err = fmt.Errorf("error calling %+v: %w", leader, err) 130 | } 131 | return nil, err 132 | } 133 | resp.Broker = c.Leader() 134 | return resp, nil 135 | } 136 | -------------------------------------------------------------------------------- /test/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # Unspecified loggers and loggers with additivity=true output to server.log and stdout 17 | # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise 18 | log4j.rootLogger=INFO, stdout, kafkaAppender 19 | 20 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 23 | 24 | log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender 25 | log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH 26 | log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log 27 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout 28 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 29 | 30 | log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender 31 | log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH 32 | log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log 33 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout 34 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 35 | 36 | log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender 37 | log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH 38 | log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log 39 | log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout 40 | log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 41 | 42 | log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender 43 | log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH 44 | log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log 45 | log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout 46 | log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 47 | 48 | log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender 49 | log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH 50 | log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log 51 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout 52 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 53 | 54 | log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender 55 | log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH 56 | log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log 57 | log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout 58 | log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 59 | 60 | # Change the two lines below to adjust ZK client logging 61 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO 62 | log4j.logger.org.apache.zookeeper=INFO 63 | 64 | # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) 65 | log4j.logger.kafka=INFO 66 | log4j.logger.org.apache.kafka=INFO 67 | 68 | # Change to DEBUG or TRACE to enable request logging 69 | log4j.logger.kafka.request.logger=DEBUG, requestAppender 70 | log4j.additivity.kafka.request.logger=false 71 | 72 | # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output 73 | # related to the handling of requests 74 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender 75 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender 76 | #log4j.additivity.kafka.server.KafkaApis=false 77 | log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender 78 | log4j.additivity.kafka.network.RequestChannel$=false 79 | 80 | log4j.logger.kafka.controller=TRACE, controllerAppender 81 | log4j.additivity.kafka.controller=false 82 | 83 | log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender 84 | log4j.additivity.kafka.log.LogCleaner=false 85 | 86 | log4j.logger.state.change.logger=TRACE, stateChangeAppender 87 | log4j.additivity.state.change.logger=false 88 | 89 | # Access denials are logged at INFO level, change to DEBUG to also log allowed accesses 90 | log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender 91 | log4j.additivity.kafka.authorizer.logger=false 92 | 93 | -------------------------------------------------------------------------------- /test/v1_0/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # Unspecified loggers and loggers with additivity=true output to server.log and stdout 17 | # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise 18 | log4j.rootLogger=INFO, stdout, kafkaAppender 19 | 20 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 23 | 24 | log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender 25 | log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH 26 | log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log 27 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout 28 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 29 | 30 | log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender 31 | log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH 32 | log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log 33 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout 34 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 35 | 36 | log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender 37 | log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH 38 | log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log 39 | log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout 40 | log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 41 | 42 | log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender 43 | log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH 44 | log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log 45 | log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout 46 | log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 47 | 48 | log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender 49 | log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH 50 | log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log 51 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout 52 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 53 | 54 | log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender 55 | log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH 56 | log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log 57 | log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout 58 | log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 59 | 60 | # Change the two lines below to adjust ZK client logging 61 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO 62 | log4j.logger.org.apache.zookeeper=INFO 63 | 64 | # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) 65 | log4j.logger.kafka=INFO 66 | log4j.logger.org.apache.kafka=INFO 67 | 68 | # Change to DEBUG or TRACE to enable request logging 69 | log4j.logger.kafka.request.logger=DEBUG, requestAppender 70 | log4j.additivity.kafka.request.logger=false 71 | 72 | # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output 73 | # related to the handling of requests 74 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender 75 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender 76 | #log4j.additivity.kafka.server.KafkaApis=false 77 | log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender 78 | log4j.additivity.kafka.network.RequestChannel$=false 79 | 80 | log4j.logger.kafka.controller=TRACE, controllerAppender 81 | log4j.additivity.kafka.controller=false 82 | 83 | log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender 84 | log4j.additivity.kafka.log.LogCleaner=false 85 | 86 | log4j.logger.state.change.logger=TRACE, stateChangeAppender 87 | log4j.additivity.state.change.logger=false 88 | 89 | # Access denials are logged at INFO level, change to DEBUG to also log allowed accesses 90 | log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender 91 | log4j.additivity.kafka.authorizer.logger=false 92 | 93 | -------------------------------------------------------------------------------- /client/producer/producer_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/mkocikowski/libkafka" 11 | "github.com/mkocikowski/libkafka/api/Produce" 12 | "github.com/mkocikowski/libkafka/batch" 13 | "github.com/mkocikowski/libkafka/client" 14 | ) 15 | 16 | func init() { 17 | rand.Seed(time.Now().UnixNano()) 18 | } 19 | 20 | func TestIntergationPartitionProducer(t *testing.T) { 21 | bootstrap := "localhost:9092" 22 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 23 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 24 | t.Fatal(err) 25 | } 26 | p := &PartitionProducer{ 27 | PartitionClient: client.PartitionClient{ 28 | Bootstrap: bootstrap, 29 | Topic: topic, 30 | Partition: 0, 31 | }, 32 | Acks: 1, 33 | TimeoutMs: 1000, 34 | } 35 | if _, err := p.ProduceStrings(time.Now(), "foo", "bar"); err != nil { 36 | t.Fatal(err) 37 | } 38 | resp, err := p.ProduceStrings(time.Now(), "monkey", "banana") 39 | if err != nil { 40 | t.Fatal(err) 41 | } 42 | if resp.BaseOffset != 2 { 43 | t.Fatal(resp.BaseOffset) 44 | } 45 | if _, err := p.ProduceStrings(time.Now(), []string{}...); err != batch.ErrEmpty { 46 | t.Fatal(err) 47 | } 48 | p.Partition = 1 49 | if resp, _ := p.ProduceStrings(time.Now(), "hello"); resp.ErrorCode != libkafka.ERR_UNKNOWN_TOPIC_OR_PARTITION { 50 | t.Fatal(&libkafka.Error{Code: resp.ErrorCode}) 51 | } 52 | } 53 | 54 | func TestIntergationPartitionProducerSingleBatch(t *testing.T) { 55 | bootstrap := "localhost:9092" 56 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 57 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 58 | t.Fatal(err) 59 | } 60 | p := &PartitionProducer{ 61 | PartitionClient: client.PartitionClient{ 62 | Bootstrap: bootstrap, 63 | Topic: topic, 64 | Partition: 0, 65 | }, 66 | Acks: 1, 67 | TimeoutMs: 1000, 68 | } 69 | now := time.Unix(1584485804, 0) 70 | b, _ := batch.NewBuilder(now).AddStrings("foo", "bar").Build(now) 71 | if b.Crc != 0 { 72 | t.Fatal(b.Crc) 73 | } 74 | resp, err := p.Produce(b) 75 | if err != nil { 76 | t.Fatal(err) 77 | } 78 | if resp.ErrorCode != libkafka.ERR_NONE { 79 | t.Fatal(resp.ErrorCode) 80 | } 81 | if b.Crc != 3094838044 { 82 | t.Fatal(b.Crc) 83 | } 84 | t.Logf("%+v", resp) 85 | // 86 | p.Acks = 2 87 | resp, err = p.Produce(b) 88 | if err != nil { 89 | t.Fatal(err) 90 | } 91 | if resp.ErrorCode != libkafka.ERR_INVALID_REQUIRED_ACKS { 92 | t.Fatalf("%+v", resp) 93 | } 94 | } 95 | 96 | func TestIntergationPartitionProducerBadTopic(t *testing.T) { 97 | p := &PartitionProducer{ 98 | PartitionClient: client.PartitionClient{ 99 | Bootstrap: "localhost:9092", 100 | Topic: "no-such-topic", 101 | }, 102 | } 103 | resp, err := p.ProduceStrings(time.Now(), "foo", "bar") 104 | if err == nil { 105 | t.Fatalf("%+v", resp) 106 | } 107 | t.Log(err) 108 | } 109 | 110 | func TestIntergationPartitionProducerCorruptBytes(t *testing.T) { 111 | bootstrap := "localhost:9092" 112 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 113 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 114 | t.Fatal(err) 115 | } 116 | p := &PartitionProducer{ 117 | PartitionClient: client.PartitionClient{ 118 | Bootstrap: bootstrap, 119 | Topic: topic, 120 | Partition: 0, 121 | }, 122 | } 123 | now := time.Unix(1584485804, 0) 124 | b, _ := batch.NewBuilder(now).AddStrings("foo", "bar").Build(now) 125 | corrupted := b.Marshal() 126 | corrupted[len(corrupted)-1] = math.MaxUint8 - corrupted[len(corrupted)-1] 127 | args := &Produce.Args{ 128 | Topic: topic, 129 | Partition: 0, 130 | Acks: 1, 131 | TimeoutMs: 1000, 132 | } 133 | // calling PartitionClient.Produce and not just Produce so that batch 134 | // is not re-marshaled 135 | resp, err := p.PartitionClient.Produce(args, corrupted) 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | parsed, _ := parseResponse(resp) 140 | if parsed.ErrorCode != libkafka.ERR_CORRUPT_MESSAGE { 141 | t.Fatalf("%+v", parsed) 142 | } 143 | } 144 | 145 | func TestIntergationPartitionProducerConnectionClosed(t *testing.T) { 146 | bootstrap := "localhost:9092" 147 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 148 | if _, err := client.CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 149 | t.Fatal(err) 150 | } 151 | p := &PartitionProducer{ 152 | PartitionClient: client.PartitionClient{ 153 | Bootstrap: bootstrap, 154 | Topic: topic, 155 | Partition: 0, 156 | }, 157 | Acks: 1, 158 | TimeoutMs: 1000, 159 | } 160 | if _, err := p.ProduceStrings(time.Now(), "foo"); err != nil { 161 | t.Fatal(err) 162 | } 163 | // this is "clean" and results in reconnect on next produce 164 | p.Close() 165 | if _, err := p.ProduceStrings(time.Now(), "bar"); err != nil { 166 | t.Fatal(err) 167 | } 168 | // this is "dirty" and results in error on next produce 169 | p.Conn().Close() 170 | if _, err := p.ProduceStrings(time.Now(), "baz"); err == nil { 171 | t.Fatal("expected 'use of closed network connection' error") 172 | } else { 173 | t.Log(err) 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /client/partition_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/mkocikowski/libkafka" 11 | "github.com/mkocikowski/libkafka/api/Metadata" 12 | ) 13 | 14 | func init() { 15 | rand.Seed(time.Now().UnixNano()) 16 | } 17 | 18 | func TestIntergationPartitionClientSuccess(t *testing.T) { 19 | bootstrap := "localhost:9092" 20 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 21 | if _, err := CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 22 | t.Fatal(err) 23 | } 24 | c := &PartitionClient{ 25 | Bootstrap: bootstrap, 26 | Topic: topic, 27 | Partition: 0, 28 | } 29 | r, err := c.ListOffsets(0) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | t.Logf("%+v", r) 34 | } 35 | 36 | func TestIntergationPartitionClientSuccessTLS(t *testing.T) { 37 | bootstrap := "localhost:9093" 38 | tlsConfig := mTLSConfig() 39 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 40 | if _, err := CallCreateTopic(bootstrap, tlsConfig, topic, 1, 1); err != nil { 41 | t.Fatal(err) 42 | } 43 | c := &PartitionClient{ 44 | Bootstrap: bootstrap, 45 | TLS: tlsConfig, 46 | Topic: topic, 47 | Partition: 0, 48 | } 49 | r, err := c.ListOffsets(0) 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | t.Logf("%+v", r) 54 | } 55 | 56 | func TestIntergationPartitionClientBadBootstrap(t *testing.T) { 57 | bootstrap := "foo" 58 | topic := fmt.Sprintf("test-%x", rand.Uint32()) // do not create 59 | c := &PartitionClient{ 60 | Bootstrap: bootstrap, 61 | Topic: topic, 62 | Partition: 0, 63 | } 64 | _, err := c.ListOffsets(0) 65 | if err == nil { 66 | t.Fatal("expected 'dial tcp' error") 67 | } 68 | t.Log(err) 69 | } 70 | 71 | func TestIntergationPartitionClientTopicDoesNotExist(t *testing.T) { 72 | bootstrap := "localhost:9092" 73 | topic := fmt.Sprintf("test-%x", rand.Uint32()) // do not create 74 | c := &PartitionClient{ 75 | Bootstrap: bootstrap, 76 | Topic: topic, 77 | Partition: 0, 78 | } 79 | _, err := c.ListOffsets(0) 80 | if !errors.Is(err, ErrPartitionDoesNotExist) { 81 | t.Fatal(err) 82 | } 83 | t.Log(err) 84 | } 85 | 86 | // the purpose of this test is to test that when ConnMaxIdle is set connection 87 | // is automatically closed and reopened when the idle time is exceeded 88 | func TestIntergationPartitionClientConnectionIdleTimeout(t *testing.T) { 89 | bootstrap := "localhost:9092" 90 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 91 | if _, err := CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 92 | t.Fatal(err) 93 | } 94 | timeout := 50 * time.Millisecond 95 | c := &PartitionClient{ 96 | Bootstrap: bootstrap, 97 | Topic: topic, 98 | Partition: 0, 99 | ConnMaxIdle: timeout, 100 | } 101 | // make first call to open connection 102 | if _, err := c.ListOffsets(0); err != nil { 103 | t.Fatal(err) 104 | } 105 | // record the connection 106 | conn := c.Conn() 107 | // make second call 108 | if _, err := c.ListOffsets(0); err != nil { 109 | t.Fatal(err) 110 | } 111 | // ensure the connection is the same connection 112 | if c.Conn() != conn { 113 | t.Fatal("different connection") 114 | } 115 | // now exceed the timeout 116 | time.Sleep(timeout) 117 | // third call 118 | if _, err := c.ListOffsets(0); err != nil { 119 | t.Fatal(err) 120 | } 121 | // now there should be different connection 122 | if c.Conn() == conn { 123 | t.Fatal("same connection") 124 | } 125 | } 126 | 127 | // the purpose of this test is to test that when libkafka.ConnectionTTL is set 128 | // connection is automatically closed and reopened when the TTL is exceeded 129 | func TestIntergationPartitionClientConnectionTTL(t *testing.T) { 130 | bootstrap := "localhost:9092" 131 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 132 | if _, err := CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 133 | t.Fatal(err) 134 | } 135 | c := &PartitionClient{ 136 | Bootstrap: bootstrap, 137 | Topic: topic, 138 | Partition: 0, 139 | } 140 | // make first call to open connection 141 | if _, err := c.ListOffsets(0); err != nil { 142 | t.Fatal(err) 143 | } 144 | // record the connection 145 | conn := c.Conn() 146 | // sleep should not reset connection because no ttl set yet 147 | time.Sleep(50 * time.Millisecond) 148 | if _, err := c.ListOffsets(0); err != nil { 149 | t.Fatal(err) 150 | } 151 | if c.Conn() != conn { 152 | t.Fatal("different connection") 153 | } 154 | // set TTL (not safe for concurrent use but this just a test) 155 | defer func() { libkafka.ConnectionTTL = 0 }() 156 | libkafka.ConnectionTTL = time.Millisecond 157 | // make another call this one should ttl so connection should be closed and reopened 158 | if _, err := c.ListOffsets(0); err != nil { 159 | t.Fatal(err) 160 | } 161 | if _, err := c.ListOffsets(0); err != nil { 162 | t.Fatal(err) 163 | } 164 | // now there should be different connection 165 | if c.Conn() == conn { 166 | t.Fatal("same connection") 167 | } 168 | } 169 | 170 | func TestUnitLeaderString(t *testing.T) { 171 | b := &Metadata.Broker{Rack: "foo", NodeId: 1, Host: "bar", Port: 9092} 172 | s := fmt.Sprintf("%v", b) 173 | if s != "foo:1:bar:9092" { 174 | t.Fatal(s) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /client/client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "errors" 7 | "fmt" 8 | "io/ioutil" 9 | "math/rand" 10 | "net" 11 | "testing" 12 | "time" 13 | 14 | "github.com/mkocikowski/libkafka" 15 | "github.com/mkocikowski/libkafka/api/CreateTopics" 16 | ) 17 | 18 | func init() { 19 | rand.Seed(time.Now().UnixNano()) 20 | } 21 | 22 | func mTLSConfig() *tls.Config { 23 | caCert, err := ioutil.ReadFile("../test/mtls/ca-cert.pem") 24 | if err != nil { 25 | panic(err) 26 | } 27 | caCertPool := x509.NewCertPool() 28 | if ok := caCertPool.AppendCertsFromPEM([]byte(caCert)); !ok { 29 | panic("!ok") 30 | } 31 | cert, err := tls.LoadX509KeyPair("../test/mtls/client-cert.pem", "../test/mtls/client-key.pem") 32 | if err != nil { 33 | panic(err) 34 | } 35 | return &tls.Config{ 36 | RootCAs: caCertPool, 37 | Certificates: []tls.Certificate{cert}, 38 | } 39 | } 40 | 41 | func TestIntegrationCallApiVersions(t *testing.T) { 42 | r, err := CallApiVersions("localhost:9092", nil) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | t.Logf("%+v", r) 47 | } 48 | 49 | func TestIntegrationCallApiVersionsMTLS(t *testing.T) { 50 | conf := mTLSConfig() 51 | // make good call with CAs and client certs in order 52 | r, err := CallApiVersions("localhost:9093", mTLSConfig()) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | t.Logf("%+v", r) 57 | // try to make TLS call on PLAINTEXT port 58 | if _, err = CallApiVersions("localhost:9092", mTLSConfig()); err == nil { 59 | t.Fatal("expected error trying to handshake on PLAINTEXT port") 60 | } 61 | t.Log(err) 62 | // try to make PLAINTEXT call on TLS port 63 | if _, err = CallApiVersions("localhost:9093", nil); err == nil { 64 | t.Fatal("expected error trying PLAINTEXT connection on TLS port") 65 | } 66 | t.Log(err) 67 | // If RootCAs is nil, TLS uses the host's root CA set. Expect cert error 68 | conf.RootCAs = nil 69 | if _, err = CallApiVersions("localhost:9093", conf); err == nil { 70 | t.Fatal("expected 'x509: certificate signed by unknown authority' error") 71 | } 72 | t.Log(err) 73 | // conf.InsecureSkipVerify=true will skip checking broker cert chain. Expect success 74 | conf.InsecureSkipVerify = true 75 | if _, err = CallApiVersions("localhost:9093", conf); err != nil { 76 | t.Fatal(err) 77 | } 78 | } 79 | 80 | // this test will pass or fail depending on the ssl.client.auth broker setting. 81 | // for mTLS to be "on" the setting must be 'ssl.client.auth=required' and that 82 | // is what i have it set for the libkafka integration tests. 83 | func TestIntegrationCallApiVersionsTLSNoClientCert(t *testing.T) { 84 | caCert, err := ioutil.ReadFile("../test/mtls/ca-cert.pem") 85 | if err != nil { 86 | t.Fatal(err) 87 | } 88 | pool := x509.NewCertPool() 89 | pool.AppendCertsFromPEM(caCert) 90 | _, err = CallApiVersions("localhost:9093", &tls.Config{RootCAs: pool}) 91 | if err == nil { 92 | t.Fatal("expected 'tls: unexpected message' error if 'ssl.client.auth=required' in broker config") 93 | } 94 | t.Log(err) 95 | } 96 | 97 | func TestIntegrationCallApiVersionsBadHost(t *testing.T) { 98 | _, err := CallApiVersions("foo", nil) 99 | if err == nil { 100 | t.Fatal("expected bad host error") 101 | } 102 | t.Log(err) 103 | } 104 | 105 | func TestIntegrationCallCreateTopic(t *testing.T) { 106 | brokers := "localhost:9092" 107 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 108 | var r *CreateTopics.Response 109 | r, _ = CallCreateTopic(brokers, nil, topic, 1, 2) 110 | if r.Topics[0].ErrorCode != libkafka.ERR_INVALID_REPLICATION_FACTOR { 111 | t.Fatal(&libkafka.Error{Code: r.Topics[0].ErrorCode}) 112 | } 113 | r, _ = CallCreateTopic(brokers, nil, topic, 1, 1) 114 | if r.Topics[0].ErrorCode != libkafka.ERR_NONE { 115 | t.Fatal(&libkafka.Error{Code: r.Topics[0].ErrorCode}) 116 | } 117 | r, _ = CallCreateTopic(brokers, nil, topic, 1, 1) 118 | if r.Topics[0].ErrorCode != libkafka.ERR_TOPIC_ALREADY_EXISTS { 119 | t.Fatal(&libkafka.Error{Code: r.Topics[0].ErrorCode}) 120 | } 121 | if _, err := CallCreateTopic("foo:9092", nil, topic, 1, 1); err == nil { 122 | t.Fatal("expected error calling foo broker") 123 | } 124 | // TLS 125 | r, _ = CallCreateTopic("localhost:9093", mTLSConfig(), topic, 1, 1) 126 | if r.Topics[0].ErrorCode != libkafka.ERR_TOPIC_ALREADY_EXISTS { 127 | t.Fatal(&libkafka.Error{Code: r.Topics[0].ErrorCode}) 128 | } 129 | if _, err := CallCreateTopic("localhost:9093", nil, topic, 1, 1); err == nil { 130 | t.Fatal("expected error calling TLS port without tls config") 131 | } 132 | } 133 | 134 | func TestIntegrationCallCreateTopicRequestTimeout(t *testing.T) { 135 | d := libkafka.RequestTimeout 136 | defer func() { 137 | libkafka.RequestTimeout = d 138 | }() 139 | libkafka.RequestTimeout = time.Nanosecond 140 | brokers := "localhost:9092" 141 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 142 | _, err := CallCreateTopic(brokers, nil, topic, 1, 2) 143 | for { 144 | err = errors.Unwrap(err) 145 | if err == nil { 146 | break 147 | } 148 | if err, ok := err.(net.Error); ok && err.Timeout() { 149 | return // success 150 | } 151 | } 152 | t.Fatalf("expected timeout got %v", err) 153 | } 154 | 155 | func TestUnitConnectToRandomBrokerAndCallErrorForgetSRV(t *testing.T) { 156 | srvLookupCache["foo"] = []string{"bar:1"} 157 | err := connectToRandomBrokerAndCall("foo", nil, nil, nil) 158 | if err == nil { 159 | t.Fatal("expected error") 160 | } 161 | if _, ok := srvLookupCache["foo"]; ok { 162 | t.Fatal("expected key to be deleted because of call error") 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /wire/wire.go: -------------------------------------------------------------------------------- 1 | // Package wire implements functions for marshaling and unmarshaling Kafka requests and responses. 2 | package wire 3 | 4 | import ( 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "reflect" 9 | "strings" 10 | ) 11 | 12 | var ord = binary.BigEndian 13 | 14 | func Write(w io.Writer, val reflect.Value) error { 15 | switch val.Kind() { 16 | case reflect.Ptr, reflect.Interface: 17 | return Write(w, val.Elem()) 18 | case reflect.Struct: 19 | for i := 0; i < val.NumField(); i++ { 20 | name := val.Type().Field(i).Name 21 | if name[0:1] == strings.ToLower(name[0:1]) { 22 | continue // skip fields that start with lowercase 23 | } 24 | if val.Type().Field(i).Tag.Get("wire") == "omit" { 25 | continue 26 | } 27 | err := Write(w, val.Field(i)) 28 | if err != nil { 29 | return err 30 | } 31 | } 32 | return nil 33 | case reflect.Slice: 34 | if val.IsNil() { 35 | return binary.Write(w, ord, int32(-1)) 36 | } 37 | l := int32(val.Len()) 38 | if l == 0 { 39 | return binary.Write(w, ord, int32(0)) 40 | } 41 | if err := binary.Write(w, ord, l); err != nil { 42 | return err 43 | } 44 | typ := val.Type().Elem() 45 | if typ.Kind() == reflect.Uint8 { // []byte 46 | _, err := w.Write(val.Bytes()) 47 | return err 48 | } 49 | for i := 0; i < val.Len(); i++ { 50 | err := Write(w, val.Index(i)) 51 | if err != nil { 52 | return err 53 | } 54 | } 55 | return nil 56 | case reflect.String: 57 | l := int16(val.Len()) 58 | if l == 0 { 59 | //return binary.Write(w, ord, int16(-1)) 60 | return binary.Write(w, ord, int16(0)) 61 | } 62 | if err := binary.Write(w, ord, l); err != nil { 63 | return err 64 | } 65 | b := []byte(val.String()) 66 | _, err := w.Write(b) 67 | return err 68 | case reflect.Int8: 69 | i := int8(val.Int()) 70 | return binary.Write(w, ord, i) 71 | case reflect.Int16: 72 | i := int16(val.Int()) 73 | return binary.Write(w, ord, i) 74 | case reflect.Int32: 75 | i := int32(val.Int()) 76 | return binary.Write(w, ord, i) 77 | case reflect.Uint32: 78 | i := uint32(val.Uint()) 79 | return binary.Write(w, ord, i) 80 | case reflect.Int64: 81 | i := int64(val.Int()) 82 | return binary.Write(w, ord, i) 83 | case reflect.Bool: 84 | if val.Bool() { 85 | _, err := w.Write([]byte{1}) 86 | return err 87 | } 88 | _, err := w.Write([]byte{0}) 89 | return err 90 | } 91 | return nil 92 | } 93 | 94 | func Read(r io.Reader, val reflect.Value) error { 95 | //log.Println(val) 96 | switch val.Kind() { 97 | case reflect.Ptr, reflect.Interface: 98 | return Read(r, val.Elem()) 99 | case reflect.Struct: 100 | for i := 0; i < val.NumField(); i++ { 101 | name := val.Type().Field(i).Name 102 | if name[0:1] == strings.ToLower(name[0:1]) { 103 | continue // skip fields that start with lowercase 104 | } 105 | if val.Type().Field(i).Tag.Get("wire") == "omit" { 106 | continue 107 | } 108 | err := Read(r, val.Field(i)) 109 | if err != nil { 110 | return err 111 | } 112 | } 113 | return nil 114 | case reflect.Slice: 115 | var n int32 116 | if err := binary.Read(r, ord, &n); err != nil { 117 | return fmt.Errorf("error reading array length: %v", err) 118 | } 119 | typ := val.Type().Elem() 120 | if typ.Kind() == reflect.Uint8 { // []byte 121 | b := make([]byte, n) 122 | if _, err := io.ReadFull(r, b); err != nil { 123 | return fmt.Errorf("error reading []byte body: %v", err) 124 | } 125 | val.SetBytes(b) 126 | return nil 127 | } 128 | if int(n) == -1 { 129 | return nil // nil slice 130 | } 131 | val.Set(reflect.MakeSlice(val.Type(), 0, 0)) // empty slice 132 | for i := 0; i < int(n); i++ { 133 | element := reflect.New(typ).Elem() 134 | if err := Read(r, element); err != nil { 135 | return fmt.Errorf("error parsing array element: %v", err) 136 | } 137 | val.Set(reflect.Append(val, element)) 138 | } 139 | return nil 140 | case reflect.String: 141 | var n int16 142 | if err := binary.Read(r, ord, &n); err != nil { 143 | return fmt.Errorf("error reading string length: %v", err) 144 | } 145 | if n < 0 { 146 | return nil 147 | } 148 | b := make([]byte, n) 149 | if _, err := io.ReadFull(r, b); err != nil { 150 | return fmt.Errorf("error reading string body: %v", err) 151 | } 152 | val.SetString(string(b)) 153 | return nil 154 | case reflect.Int8: 155 | var i int8 156 | if err := binary.Read(r, ord, &i); err != nil { 157 | return fmt.Errorf("error reading int8: %v", err) 158 | } 159 | val.SetInt(int64(i)) 160 | return nil 161 | case reflect.Int16: 162 | var i int16 163 | if err := binary.Read(r, ord, &i); err != nil { 164 | return fmt.Errorf("error reading int16: %v", err) 165 | } 166 | val.SetInt(int64(i)) 167 | return nil 168 | case reflect.Int32: 169 | var i int32 170 | if err := binary.Read(r, ord, &i); err != nil { 171 | return fmt.Errorf("error reading int32: %v", err) 172 | } 173 | val.SetInt(int64(i)) 174 | return nil 175 | case reflect.Uint32: 176 | var i uint32 177 | if err := binary.Read(r, ord, &i); err != nil { 178 | return fmt.Errorf("error reading uint32: %v", err) 179 | } 180 | val.SetUint(uint64(i)) 181 | return nil 182 | case reflect.Int64: 183 | var i int64 184 | if err := binary.Read(r, ord, &i); err != nil { 185 | return fmt.Errorf("error reading int64: %v", err) 186 | } 187 | val.SetInt(int64(i)) 188 | return nil 189 | case reflect.Bool: 190 | b := make([]byte, 1) 191 | _, err := r.Read(b) 192 | if err != nil { 193 | return fmt.Errorf("error reading bool: %v", err) 194 | } 195 | val.SetBool(b[0] != 0) 196 | return nil 197 | } 198 | return nil 199 | } 200 | -------------------------------------------------------------------------------- /test/wait-for-it.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Use this script to test if a given TCP host/port are available 3 | 4 | WAITFORIT_cmdname=${0##*/} 5 | 6 | echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } 7 | 8 | usage() 9 | { 10 | cat << USAGE >&2 11 | Usage: 12 | $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] 13 | -h HOST | --host=HOST Host or IP under test 14 | -p PORT | --port=PORT TCP port under test 15 | Alternatively, you specify the host and port as host:port 16 | -s | --strict Only execute subcommand if the test succeeds 17 | -q | --quiet Don't output any status messages 18 | -t TIMEOUT | --timeout=TIMEOUT 19 | Timeout in seconds, zero for no timeout 20 | -- COMMAND ARGS Execute command with args after the test finishes 21 | USAGE 22 | exit 1 23 | } 24 | 25 | wait_for() 26 | { 27 | if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then 28 | echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" 29 | else 30 | echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" 31 | fi 32 | WAITFORIT_start_ts=$(date +%s) 33 | while : 34 | do 35 | if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then 36 | nc -z $WAITFORIT_HOST $WAITFORIT_PORT 37 | WAITFORIT_result=$? 38 | else 39 | (echo > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 40 | WAITFORIT_result=$? 41 | fi 42 | if [[ $WAITFORIT_result -eq 0 ]]; then 43 | WAITFORIT_end_ts=$(date +%s) 44 | echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" 45 | break 46 | fi 47 | sleep 1 48 | done 49 | return $WAITFORIT_result 50 | } 51 | 52 | wait_for_wrapper() 53 | { 54 | # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 55 | if [[ $WAITFORIT_QUIET -eq 1 ]]; then 56 | timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & 57 | else 58 | timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & 59 | fi 60 | WAITFORIT_PID=$! 61 | trap "kill -INT -$WAITFORIT_PID" INT 62 | wait $WAITFORIT_PID 63 | WAITFORIT_RESULT=$? 64 | if [[ $WAITFORIT_RESULT -ne 0 ]]; then 65 | echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" 66 | fi 67 | return $WAITFORIT_RESULT 68 | } 69 | 70 | # process arguments 71 | while [[ $# -gt 0 ]] 72 | do 73 | case "$1" in 74 | *:* ) 75 | WAITFORIT_hostport=(${1//:/ }) 76 | WAITFORIT_HOST=${WAITFORIT_hostport[0]} 77 | WAITFORIT_PORT=${WAITFORIT_hostport[1]} 78 | shift 1 79 | ;; 80 | --child) 81 | WAITFORIT_CHILD=1 82 | shift 1 83 | ;; 84 | -q | --quiet) 85 | WAITFORIT_QUIET=1 86 | shift 1 87 | ;; 88 | -s | --strict) 89 | WAITFORIT_STRICT=1 90 | shift 1 91 | ;; 92 | -h) 93 | WAITFORIT_HOST="$2" 94 | if [[ $WAITFORIT_HOST == "" ]]; then break; fi 95 | shift 2 96 | ;; 97 | --host=*) 98 | WAITFORIT_HOST="${1#*=}" 99 | shift 1 100 | ;; 101 | -p) 102 | WAITFORIT_PORT="$2" 103 | if [[ $WAITFORIT_PORT == "" ]]; then break; fi 104 | shift 2 105 | ;; 106 | --port=*) 107 | WAITFORIT_PORT="${1#*=}" 108 | shift 1 109 | ;; 110 | -t) 111 | WAITFORIT_TIMEOUT="$2" 112 | if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi 113 | shift 2 114 | ;; 115 | --timeout=*) 116 | WAITFORIT_TIMEOUT="${1#*=}" 117 | shift 1 118 | ;; 119 | --) 120 | shift 121 | WAITFORIT_CLI=("$@") 122 | break 123 | ;; 124 | --help) 125 | usage 126 | ;; 127 | *) 128 | echoerr "Unknown argument: $1" 129 | usage 130 | ;; 131 | esac 132 | done 133 | 134 | if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then 135 | echoerr "Error: you need to provide a host and port to test." 136 | usage 137 | fi 138 | 139 | WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} 140 | WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} 141 | WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} 142 | WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} 143 | 144 | # Check to see if timeout is from busybox? 145 | WAITFORIT_TIMEOUT_PATH=$(type -p timeout) 146 | WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) 147 | 148 | WAITFORIT_BUSYTIMEFLAG="" 149 | if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then 150 | WAITFORIT_ISBUSY=1 151 | # Check if busybox timeout uses -t flag 152 | # (recent Alpine versions don't support -t anymore) 153 | if timeout &>/dev/stdout | grep -q -e '-t '; then 154 | WAITFORIT_BUSYTIMEFLAG="-t" 155 | fi 156 | else 157 | WAITFORIT_ISBUSY=0 158 | fi 159 | 160 | if [[ $WAITFORIT_CHILD -gt 0 ]]; then 161 | wait_for 162 | WAITFORIT_RESULT=$? 163 | exit $WAITFORIT_RESULT 164 | else 165 | if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then 166 | wait_for_wrapper 167 | WAITFORIT_RESULT=$? 168 | else 169 | wait_for 170 | WAITFORIT_RESULT=$? 171 | fi 172 | fi 173 | 174 | if [[ $WAITFORIT_CLI != "" ]]; then 175 | if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then 176 | echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" 177 | exit $WAITFORIT_RESULT 178 | fi 179 | exec "${WAITFORIT_CLI[@]}" 180 | else 181 | exit $WAITFORIT_RESULT 182 | fi 183 | -------------------------------------------------------------------------------- /client/group_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | 9 | "github.com/mkocikowski/libkafka" 10 | "github.com/mkocikowski/libkafka/api/SyncGroup" 11 | ) 12 | 13 | func TestIntegrationGroupClientJoin(t *testing.T) { 14 | c := &GroupClient{ 15 | Bootstrap: "localhost:9092", 16 | GroupId: fmt.Sprintf("test-group-%x", rand.Uint32()), 17 | } 18 | req := &JoinGroupRequest{ 19 | ProtocolType: "partition", 20 | ProtocolName: "random", 21 | Metadata: []byte{}, 22 | } 23 | for i := 0; i < 10; i++ { // retry in case kafka not ready in travis 24 | resp, err := c.Join(req) 25 | if err != nil || resp.MemberId == "" { 26 | time.Sleep(time.Second) 27 | continue 28 | } 29 | if resp.ErrorCode != libkafka.ERR_NONE { 30 | t.Fatalf("%+v", resp) 31 | } 32 | if resp.GenerationId != 1 { 33 | t.Fatalf("%+v", resp) 34 | } 35 | return // success 36 | } 37 | t.Fatal() 38 | } 39 | 40 | func TestIntegrationGroupClientSyncAndHeartbeat(t *testing.T) { 41 | c := &GroupClient{ 42 | Bootstrap: "localhost:9092", 43 | GroupId: fmt.Sprintf("test-group-%x", rand.Uint32()), 44 | } 45 | var memberId string 46 | var generationId int32 47 | 48 | for i := 0; i < 10; i++ { // retry in case kafka not ready in travis 49 | req := &JoinGroupRequest{ 50 | ProtocolType: "partition", 51 | ProtocolName: "random", 52 | Metadata: []byte{}, 53 | } 54 | resp, err := c.Join(req) 55 | if err == nil && resp.MemberId != "" { 56 | memberId = resp.MemberId 57 | generationId = resp.GenerationId 58 | break 59 | } 60 | time.Sleep(time.Second) 61 | } 62 | req := &SyncGroupRequest{ 63 | MemberId: memberId, 64 | GenerationId: generationId, 65 | Assignments: []SyncGroup.Assignment{ 66 | SyncGroup.Assignment{ 67 | MemberId: memberId, 68 | Assignment: []byte("foo"), 69 | }, 70 | }, 71 | } 72 | resp, _ := c.Sync(req) 73 | if resp.ErrorCode != libkafka.ERR_NONE { 74 | t.Fatalf("%+v", resp) 75 | } 76 | if string(resp.Assignment) != "foo" { 77 | t.Fatalf("%+v", resp) 78 | } 79 | for i := 0; i < 10; i++ { 80 | if resp, _ := c.Heartbeat(memberId, generationId); resp.ErrorCode != libkafka.ERR_NONE { 81 | t.Fatalf("%+v", resp) 82 | } 83 | } 84 | } 85 | 86 | func TestIntegrationGroupClientSyncUnknownMemberId(t *testing.T) { 87 | c := &GroupClient{ 88 | Bootstrap: "localhost:9092", 89 | GroupId: fmt.Sprintf("test-group-%x", rand.Uint32()), 90 | } 91 | for i := 0; i < 10; i++ { // retry in case kafka not ready in travis 92 | req := &SyncGroupRequest{Assignments: []SyncGroup.Assignment{}} 93 | resp, _ := c.Sync(req) 94 | if resp != nil && resp.ErrorCode == libkafka.ERR_UNKNOWN_MEMBER_ID { 95 | return // success 96 | } 97 | time.Sleep(time.Second) 98 | } 99 | t.Fatal() 100 | } 101 | 102 | func TestIntegrationGroupOffsets(t *testing.T) { 103 | bootstrap := "localhost:9092" 104 | c := &GroupClient{ 105 | Bootstrap: bootstrap, 106 | GroupId: fmt.Sprintf("test-group-%x", rand.Uint32()), 107 | } 108 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 109 | multiPartitionedTopic := fmt.Sprintf("test-multi-%x", rand.Uint32()) 110 | var offset int64 111 | var err error 112 | // topic doesn't exist and there is no offset commited 113 | offset, err = c.FetchOffset(topic, 0) 114 | if err != nil { 115 | t.Fatal(err) 116 | } 117 | if offset != -1 { 118 | t.Fatal(offset) 119 | } 120 | // try commiting offset for topic that doesn't exist 121 | err = c.CommitOffset(topic, 0, 1, 1000) 122 | if err.(*libkafka.Error).Code != libkafka.ERR_UNKNOWN_TOPIC_OR_PARTITION { 123 | t.Fatal(err) 124 | } 125 | // 126 | if _, err = CallCreateTopic(bootstrap, nil, topic, 1, 1); err != nil { 127 | t.Fatal(err) 128 | } 129 | // get offset for existing topic but where no offset has been committed 130 | offset, err = c.FetchOffset(topic, 0) 131 | if err != nil { 132 | t.Fatal(err) 133 | } 134 | if offset != -1 { 135 | t.Fatal(offset) 136 | } 137 | // 138 | if err = c.CommitOffset(topic, 0, 1, 1000); err != nil { 139 | t.Fatal(err) 140 | } 141 | // 142 | offset, err = c.FetchOffset(topic, 0) 143 | if err != nil { 144 | t.Fatal(err) 145 | } 146 | if offset != 1 { 147 | t.Fatal(offset) 148 | } 149 | 150 | // CommitMultiplePartitionsOffsets 151 | offsets := map[int32]int64{ 152 | 0: 10, 153 | } 154 | // CommitMultiplePartitionsOffsets to a single-partition topic should be fine 155 | if err = c.CommitMultiplePartitionsOffsets(topic, offsets, 1000); err != nil { 156 | t.Fatal(err) 157 | } 158 | offset, err = c.FetchOffset(topic, 0) 159 | if err != nil { 160 | t.Fatal(err) 161 | } 162 | if offset != 10 { 163 | t.Fatal(offset) 164 | } 165 | offsets = map[int32]int64{ 166 | 0: 100, 167 | 1: 200, 168 | } 169 | // CommitMultiplePartitionsOffsets with two partitions to a single-partition topic should 170 | // return UNKNOWN_TOPIC_OR_PARTITION error 171 | err = c.CommitMultiplePartitionsOffsets(topic, offsets, 1000) 172 | if err.(*libkafka.Error).Code != libkafka.ERR_UNKNOWN_TOPIC_OR_PARTITION { 173 | t.Fatalf("we didn't get expected UNKNOWN_TOPIC_OR_PARTITION error, got instead: %v", err) 174 | } 175 | if _, err = CallCreateTopic(bootstrap, nil, multiPartitionedTopic, 2, 1); err != nil { 176 | t.Fatalf("we got unexpected error during creating multi-partitioned topic: %v", err) 177 | } 178 | // CommitOfCommitMultiplePartitionsOffsetsfsets with two partitions to a multi-partitioned topic should be 179 | // fine 180 | if err = c.CommitMultiplePartitionsOffsets(multiPartitionedTopic, offsets, 1000); err != nil { 181 | t.Fatalf("we got unexpected error commit offsets: %v", err) 182 | } 183 | 184 | offset, err = c.FetchOffset(multiPartitionedTopic, 0) 185 | if err != nil { 186 | t.Fatal(err) 187 | } 188 | if offset != 100 { 189 | t.Fatal(offset) 190 | } 191 | offset, err = c.FetchOffset(multiPartitionedTopic, 1) 192 | if err != nil { 193 | t.Fatal(err) 194 | } 195 | if offset != 200 { 196 | t.Fatal(offset) 197 | } 198 | } 199 | 200 | func TestIntegrationGroupOffsetsTLS(t *testing.T) { 201 | bootstrap := "localhost:9093" 202 | c := &GroupClient{ 203 | Bootstrap: bootstrap, 204 | TLS: mTLSConfig(), 205 | GroupId: fmt.Sprintf("test-group-%x", rand.Uint32()), 206 | } 207 | topic := fmt.Sprintf("test-%x", rand.Uint32()) 208 | // topic doesn't exist and there is no offset commited 209 | if _, err := c.FetchOffset(topic, 0); err != nil { 210 | t.Fatal(err) 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | // Package client has code for making api calls to brokers. It implements the 2 | // PartitionClient which maintains a connection to a single partition leader 3 | // (producers and consumers are built on top of that) and the GroupClient which 4 | // maintains a connection to the group manager (for group membership and for 5 | // offset management). Clients are synchronous and all code executes in the 6 | // calling goroutine. 7 | package client 8 | 9 | import ( 10 | "bufio" 11 | "crypto/tls" 12 | "errors" 13 | "fmt" 14 | "math/rand" 15 | "net" 16 | "strconv" 17 | "sync" 18 | "time" 19 | 20 | "github.com/mkocikowski/libkafka" 21 | "github.com/mkocikowski/libkafka/api" 22 | "github.com/mkocikowski/libkafka/api/ApiVersions" 23 | "github.com/mkocikowski/libkafka/api/CreateTopics" 24 | "github.com/mkocikowski/libkafka/api/Metadata" 25 | ) 26 | 27 | var ( 28 | srvLookupMutex sync.Mutex 29 | srvLookupCache = make(map[string][]string) // TODO: ttl 30 | 31 | errNotAnSRV = fmt.Errorf("not an SRV") 32 | ) 33 | 34 | func lookupSrv(name string) ([]string, error) { 35 | srvLookupMutex.Lock() 36 | defer srvLookupMutex.Unlock() 37 | if addrs, ok := srvLookupCache[name]; ok { 38 | addrsCopy := make([]string, len(addrs)) 39 | copy(addrsCopy, addrs) // making copy because it will be mutated 40 | return addrsCopy, nil 41 | } 42 | _, srvs, err := net.LookupSRV("", "", name) 43 | if err != nil { 44 | return nil, fmt.Errorf("%w: %s", errNotAnSRV, err) 45 | } 46 | var addrs []string 47 | for _, srv := range srvs { 48 | hosts, err := net.LookupHost(srv.Target) 49 | // skip hosts that can't be resolved 50 | if err != nil || len(hosts) == 0 { 51 | continue 52 | } 53 | host := net.JoinHostPort(hosts[0], strconv.Itoa(int(srv.Port))) 54 | addrs = append(addrs, host) 55 | } 56 | if len(addrs) < 1 { 57 | return nil, fmt.Errorf("failed to resolve SRV record %q: no hosts found", name) 58 | } 59 | srvLookupCache[name] = addrs 60 | addrsCopy := make([]string, len(addrs)) 61 | copy(addrsCopy, addrs) // making copy because it will be mutated 62 | return addrsCopy, nil 63 | } 64 | 65 | func forgetSrv(name string) { 66 | srvLookupMutex.Lock() 67 | delete(srvLookupCache, name) 68 | srvLookupMutex.Unlock() 69 | } 70 | 71 | // randomBroker tries to resolve name through a call to lookupSrv. If successful 72 | // it returns a random host:port from the list. lookupSrv in its turn invokes 73 | // net.LookupSRV(), unsuccessful result is considered as not an SRV record, so 74 | // you can pass "localhost:9092" for example. It returns error, if resolved SRV 75 | // record has no hosts. 76 | func randomBroker(name string) (string, error) { 77 | addrs, err := lookupSrv(name) 78 | if err != nil { 79 | if errors.Is(err, errNotAnSRV) { 80 | return name, nil 81 | } 82 | return "", err 83 | } 84 | 85 | rand.Shuffle(len(addrs), func(i, j int) { 86 | addrs[i], addrs[j] = addrs[j], addrs[i] 87 | }) 88 | return addrs[0], nil 89 | } 90 | 91 | func connectToRandomBroker(bootstrap string, tlsConfig *tls.Config) (net.Conn, error) { 92 | host, err := randomBroker(bootstrap) 93 | if err != nil { 94 | return nil, fmt.Errorf("failed to get random broker: %w", err) 95 | } 96 | if tlsConfig != nil { 97 | return tls.DialWithDialer(&net.Dialer{Timeout: libkafka.DialTimeout}, "tcp", host, tlsConfig) 98 | } 99 | return net.DialTimeout("tcp", host, libkafka.DialTimeout) 100 | } 101 | 102 | func call(conn net.Conn, req *api.Request, v interface{}) error { 103 | if libkafka.RequestTimeout > 0 { 104 | if err := conn.SetDeadline(time.Now().Add(libkafka.RequestTimeout)); err != nil { 105 | return fmt.Errorf("failed to set connection deadline: %w", err) 106 | } 107 | } 108 | out := bufio.NewWriter(conn) 109 | if _, err := out.Write(req.Bytes()); err != nil { 110 | return fmt.Errorf("error sending %T request: %w", req.Body, err) 111 | } 112 | if err := out.Flush(); err != nil { 113 | return fmt.Errorf("error finalizing %T request: %w", req.Body, err) 114 | } 115 | resp, err := api.Read(bufio.NewReader(conn)) 116 | if err != nil { 117 | return fmt.Errorf("error reading %T response: %w", req.Body, err) 118 | } 119 | if err := resp.Unmarshal(v); err != nil { 120 | return fmt.Errorf("error unmarshaling %T response: %w", req.Body, err) 121 | } 122 | return nil 123 | } 124 | 125 | // this is used for calls that do not need to talk to a specific broker (such 126 | // as the Metadata call). these are the "bootstrap" calls. if bootstrap is an 127 | // srv record, that record gets resolved and that resolved value is cached. 128 | // that cached value is cleared on call error (for example: srv record pointed 129 | // to a host that used to be a kafka broker but no longer is). 130 | func connectToRandomBrokerAndCall(bootstrap string, tlsConfig *tls.Config, req *api.Request, v interface{}) (err error) { 131 | defer func() { 132 | if err != nil { 133 | forgetSrv(bootstrap) 134 | } 135 | }() 136 | var conn net.Conn 137 | if conn, err = connectToRandomBroker(bootstrap, tlsConfig); err != nil { 138 | return fmt.Errorf("error connecting to random broker (TLS: %v): %w", tlsConfig != nil, err) 139 | } 140 | defer conn.Close() 141 | if err := call(conn, req, v); err != nil { 142 | return fmt.Errorf("error making call to random broker (TLS: %v): %w", tlsConfig != nil, err) 143 | } 144 | return nil 145 | } 146 | 147 | func CallApiVersions(bootstrap string, tlsConfig *tls.Config) (*ApiVersions.Response, error) { 148 | req := ApiVersions.NewRequest() 149 | resp := &ApiVersions.Response{} 150 | return resp, connectToRandomBrokerAndCall(bootstrap, tlsConfig, req, resp) 151 | } 152 | 153 | func apiVersions(conn net.Conn) (*ApiVersions.Response, error) { 154 | req := ApiVersions.NewRequest() 155 | resp := &ApiVersions.Response{} 156 | return resp, call(conn, req, resp) 157 | } 158 | 159 | func CallMetadata(bootstrap string, tlsConfig *tls.Config, topics []string) (*Metadata.Response, error) { 160 | req := Metadata.NewRequest(topics) 161 | resp := &Metadata.Response{} 162 | return resp, connectToRandomBrokerAndCall(bootstrap, tlsConfig, req, resp) 163 | } 164 | 165 | func CallCreateTopic(bootstrap string, tlsConfig *tls.Config, topic string, numPartitions int32, replicationFactor int16) (*CreateTopics.Response, error) { 166 | req := CreateTopics.NewRequest(topic, numPartitions, replicationFactor, []CreateTopics.Config{}) 167 | resp := &CreateTopics.Response{} 168 | return resp, connectToRandomBrokerAndCall(bootstrap, tlsConfig, req, resp) 169 | } 170 | -------------------------------------------------------------------------------- /batch/batch_test.go: -------------------------------------------------------------------------------- 1 | package batch 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "testing" 7 | "time" 8 | 9 | "github.com/mkocikowski/libkafka/compression" 10 | "github.com/mkocikowski/libkafka/record" 11 | ) 12 | 13 | // this came from the wire from a live kafka 1.0 broker 14 | const recordBatchFixture = `AAAAAAAAAAMAAABMAAAAAAJx8ZMnAAAAAAACAAABbZh/W 15 | LMAAAFtmH9Ys/////////////8AAAAAAAAAAxAAAAABBG0xABAAAAIBBG0yABAAAAQBBG0zAA==` 16 | 17 | func TestUnitUnmarshalRecordSet(t *testing.T) { 18 | fixture, _ := base64.StdEncoding.DecodeString(recordBatchFixture) 19 | batches := RecordSet(fixture).Batches() 20 | if n := len(batches); n != 1 { 21 | t.Fatal(n) 22 | } 23 | batch, err := Unmarshal(batches[0]) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | if batch.Crc != 1911657255 { 28 | t.Fatal(batch.Crc) 29 | } 30 | } 31 | 32 | func TestUnitUnmarshalRecordSetIdempotent(t *testing.T) { 33 | fixture, _ := base64.StdEncoding.DecodeString(recordBatchFixture) 34 | b := RecordSet(fixture).Batches() 35 | if n := len(b); n != 1 { 36 | t.Fatal(n) 37 | } 38 | // verify that serialized batch is the same as RecordSet 39 | c := RecordSet(b[0]).Batches() 40 | if n := len(c); n != 1 { 41 | t.Fatal(n) 42 | } 43 | if !bytes.Equal(b[0], c[0]) { 44 | t.Fatal(b, c) 45 | } 46 | } 47 | 48 | func TestUnitUnmarshalBatchFixture(t *testing.T) { 49 | fixture, _ := base64.StdEncoding.DecodeString(recordBatchFixture) 50 | batch, err := Unmarshal(fixture) 51 | if err != nil { 52 | t.Fatal(err) 53 | } 54 | if batch.Crc != 1911657255 { 55 | t.Fatal(batch.Crc) 56 | } 57 | records := batch.Records() 58 | if len(records) != 3 { 59 | t.Fatal(len(records)) 60 | } 61 | fixture[86] = 0xff // corrupt the fixture 62 | if _, err = Unmarshal(fixture); err != CorruptedBatchError { 63 | t.Fatal(err) 64 | } 65 | } 66 | 67 | func TestUnitMarshalBatch(t *testing.T) { 68 | now := time.Now() 69 | batch, _ := NewBuilder(now).AddStrings("m1", "m2", "m3").Build(now) 70 | b := batch.Marshal() 71 | batch, err := Unmarshal(b) 72 | if err != nil { 73 | t.Fatal(err) 74 | } 75 | records := batch.Records() 76 | r, _ := record.Unmarshal(records[2]) 77 | if string(r.Value) != "m3" { 78 | t.Fatal(string(r.Value)) 79 | } 80 | } 81 | 82 | func TestUnitNumRecords(t *testing.T) { 83 | now := time.Now() 84 | builder := NewBuilder(now) 85 | if builder.NumRecords() != 0 { 86 | t.Fatal(builder.NumRecords()) 87 | } 88 | builder.AddStrings("foo") 89 | if builder.NumRecords() != 1 { 90 | t.Fatal(builder.NumRecords()) 91 | } 92 | batch, _ := builder.Build(now) 93 | if batch.NumRecords != 1 { 94 | t.Fatal(batch.NumRecords) 95 | } 96 | } 97 | 98 | func TestUnitBuild(t *testing.T) { 99 | now := time.Now() 100 | batch, _ := NewBuilder(now).AddStrings("m1", "m2", "m3").Build(now) 101 | if typ := batch.CompressionType(); typ != compression.None { 102 | t.Fatal(typ) 103 | } 104 | records := batch.Records() 105 | r, _ := record.Unmarshal(records[2]) 106 | if string(r.Value) != "m3" { 107 | t.Fatal(string(r.Value)) 108 | } 109 | t.Logf("%+v", r) 110 | } 111 | 112 | func TestUnitBuildEmptyBatch(t *testing.T) { 113 | now := time.Now() 114 | batch, err := NewBuilder(now).Build(now) 115 | if err != ErrEmpty { 116 | t.Fatal(batch, err) 117 | } 118 | } 119 | 120 | func TestUnitBuildBatchiNilRecord(t *testing.T) { 121 | now := time.Now() 122 | builder := NewBuilder(now).AddStrings("foo") 123 | builder.Add(nil) 124 | batch, err := builder.Build(now) 125 | if err != ErrNilRecord { 126 | t.Fatal(batch, err) 127 | } 128 | } 129 | 130 | const recordBodiesFixture = `EAAAAAEEbTEAEAAAAgEEbTIAEAAABAEEbTMA` 131 | 132 | func TestUnitRecords(t *testing.T) { 133 | fixture, _ := base64.StdEncoding.DecodeString(recordBodiesFixture) 134 | batch := &Batch{MarshaledRecords: fixture} 135 | br := batch.Records() 136 | if len(br) != 3 { 137 | t.Fatal(len(br)) 138 | } 139 | r, _ := record.Unmarshal(br[2]) 140 | if string(r.Value) != "m3" { 141 | t.Fatal(string(r.Value)) 142 | } 143 | t.Logf("%+v", br) 144 | for _, b := range br { 145 | r, _ := record.Unmarshal(b) 146 | t.Logf("%+v %s", r, base64.StdEncoding.EncodeToString(b)) 147 | } 148 | } 149 | 150 | func TestUnitCompressionType(t *testing.T) { 151 | b := &Batch{Attributes: 12} 152 | if c := b.CompressionType(); c != compression.Zstd { 153 | t.Fatal(c) 154 | } 155 | } 156 | 157 | func TestUnitTimestampType(t *testing.T) { 158 | b := &Batch{Attributes: 12} 159 | if c := b.TimestampType(); c != TimestampLogAppend { 160 | t.Fatal(c) 161 | } 162 | } 163 | 164 | func BenchmarkBuild(b *testing.B) { 165 | builder := NewBuilder(time.Now().UTC()) 166 | for i := 0; i < 1000; i++ { 167 | r := record.New(make([]byte, 27), make([]byte, 3476)) 168 | builder.Add(r) 169 | } 170 | b.ReportAllocs() 171 | b.ResetTimer() 172 | for i := 0; i < b.N; i++ { 173 | _, err := builder.Build(time.Now().UTC()) 174 | if err != nil { 175 | b.Fatal(err) 176 | } 177 | } 178 | } 179 | 180 | func TestUnitUnmarshalRecordSetIncorrectMagicBytes(t *testing.T) { 181 | var encodedBatchBytes = []byte{ 182 | 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 183 | 0, 0, 0, 79, // Length 184 | 0, 0, 0, 0, // Partition Leader Epoch 185 | 0, // magic 186 | 184, 114, 85, 47, // CRC 187 | 0, 0, // Attributes 188 | 0, 0, 0, 0, // Last Offset Delta 189 | 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp 190 | 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 191 | 255, 255, 255, 255, 255, 255, 255, 255, // Producer ID 192 | 255, 255, // Producer Epoch 193 | 0, 0, 0, 0, // First Sequence 194 | 0, 0, 0, 1, // Number of Records 195 | //Record sequence 196 | 58, 0, 0, 0, 0, 46, 116, 101, 197 | 115, 116, 32, 98, 97, 116, 99, 198 | 104, 32, 102, 111, 114, 32, 108, 199 | 105, 98, 107, 97, 102, 107, 97, 0, 200 | } 201 | _, err := Unmarshal(encodedBatchBytes) 202 | if err != UnsupportedMagicError { 203 | t.Fatal(err) 204 | } 205 | } 206 | 207 | func TestUnitUnmarshalRecordSetCorrectMagicBytes(t *testing.T) { 208 | var encodedBatchBytes = []byte{ 209 | 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 210 | 0, 0, 0, 79, // Length 211 | 0, 0, 0, 0, // Partition Leader Epoch 212 | 2, // magic 213 | 184, 114, 85, 47, // CRC 214 | 0, 0, // Attributes 215 | 0, 0, 0, 0, // Last Offset Delta 216 | 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp 217 | 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 218 | 255, 255, 255, 255, 255, 255, 255, 255, // Producer ID 219 | 255, 255, // Producer Epoch 220 | 0, 0, 0, 0, // First Sequence 221 | 0, 0, 0, 1, // Number of Records 222 | //Record sequence 223 | 58, 0, 0, 0, 0, 46, 116, 101, 224 | 115, 116, 32, 98, 97, 116, 99, 225 | 104, 32, 102, 111, 114, 32, 108, 226 | 105, 98, 107, 97, 102, 107, 97, 0, 227 | } 228 | _, err := Unmarshal(encodedBatchBytes) 229 | if err != nil { 230 | t.Fatal(err) 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /test/v1_0/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | 23 | ############################# Socket Server Settings ############################# 24 | 25 | # The address the socket server listens on. It will get the value returned from 26 | # java.net.InetAddress.getCanonicalHostName() if not configured. 27 | # FORMAT: 28 | # listeners = listener_name://host_name:port 29 | # EXAMPLE: 30 | # listeners = PLAINTEXT://your.host.name:9092 31 | listeners=PLAINTEXT://:9092 32 | 33 | # Hostname and port the broker will advertise to producers and consumers. If not set, 34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 35 | # returned from java.net.InetAddress.getCanonicalHostName(). 36 | advertised.listeners=PLAINTEXT://localhost:9092 37 | 38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 40 | 41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network 42 | num.network.threads=3 43 | 44 | # The number of threads that the server uses for processing requests, which may include disk I/O 45 | num.io.threads=8 46 | 47 | # The send buffer (SO_SNDBUF) used by the socket server 48 | socket.send.buffer.bytes=102400 49 | 50 | # The receive buffer (SO_RCVBUF) used by the socket server 51 | socket.receive.buffer.bytes=102400 52 | 53 | # The maximum size of a request that the socket server will accept (protection against OOM) 54 | socket.request.max.bytes=104857600 55 | 56 | 57 | ############################# Log Basics ############################# 58 | 59 | # A comma seperated list of directories under which to store log files 60 | log.dirs=/tmp/kafka-logs 61 | 62 | # The default number of log partitions per topic. More partitions allow greater 63 | # parallelism for consumption, but this will also result in more files across 64 | # the brokers. 65 | num.partitions=1 66 | 67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 68 | # This value is recommended to be increased for installations with data dirs located in RAID array. 69 | num.recovery.threads.per.data.dir=1 70 | 71 | ############################# Internal Topic Settings ############################# 72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 73 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 74 | offsets.topic.replication.factor=1 75 | transaction.state.log.replication.factor=1 76 | transaction.state.log.min.isr=1 77 | 78 | ############################# Log Flush Policy ############################# 79 | 80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 81 | # the OS cache lazily. The following configurations control the flush of data to disk. 82 | # There are a few important trade-offs here: 83 | # 1. Durability: Unflushed data may be lost if you are not using replication. 84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 86 | # The settings below allow one to configure the flush policy to flush data after a period of time or 87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 88 | 89 | # The number of messages to accept before forcing a flush of data to disk 90 | #log.flush.interval.messages=10000 91 | 92 | # The maximum amount of time a message can sit in a log before we force a flush 93 | #log.flush.interval.ms=1000 94 | 95 | ############################# Log Retention Policy ############################# 96 | 97 | # The following configurations control the disposal of log segments. The policy can 98 | # be set to delete segments after a period of time, or after a given size has accumulated. 99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 100 | # from the end of the log. 101 | 102 | # The minimum age of a log file to be eligible for deletion due to age 103 | log.retention.hours=168 104 | 105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining 106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours. 107 | #log.retention.bytes=1073741824 108 | 109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 110 | log.segment.bytes=1073741824 111 | 112 | # The interval at which log segments are checked to see if they can be deleted according 113 | # to the retention policies 114 | log.retention.check.interval.ms=300000 115 | 116 | ############################# Zookeeper ############################# 117 | 118 | # Zookeeper connection string (see zookeeper docs for details). 119 | # This is a comma separated host:port pairs, each corresponding to a zk 120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 121 | # You can also append an optional chroot string to the urls to specify the 122 | # root directory for all kafka znodes. 123 | zookeeper.connect=zookeeper:2181 124 | 125 | # Timeout in ms for connecting to zookeeper 126 | zookeeper.connection.timeout.ms=6000 127 | 128 | 129 | ############################# Group Coordinator Settings ############################# 130 | 131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. 132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. 133 | # The default value for this is 3 seconds. 134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. 135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. 136 | group.initial.rebalance.delay.ms=0 137 | -------------------------------------------------------------------------------- /test/v1_0/server.properties.v1_0: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | 23 | ############################# Socket Server Settings ############################# 24 | 25 | # The address the socket server listens on. It will get the value returned from 26 | # java.net.InetAddress.getCanonicalHostName() if not configured. 27 | # FORMAT: 28 | # listeners = listener_name://host_name:port 29 | # EXAMPLE: 30 | # listeners = PLAINTEXT://your.host.name:9092 31 | listeners=PLAINTEXT://:9093 32 | 33 | # Hostname and port the broker will advertise to producers and consumers. If not set, 34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 35 | # returned from java.net.InetAddress.getCanonicalHostName(). 36 | advertised.listeners=PLAINTEXT://localhost:9093 37 | 38 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 39 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 40 | 41 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network 42 | num.network.threads=3 43 | 44 | # The number of threads that the server uses for processing requests, which may include disk I/O 45 | num.io.threads=8 46 | 47 | # The send buffer (SO_SNDBUF) used by the socket server 48 | socket.send.buffer.bytes=102400 49 | 50 | # The receive buffer (SO_RCVBUF) used by the socket server 51 | socket.receive.buffer.bytes=102400 52 | 53 | # The maximum size of a request that the socket server will accept (protection against OOM) 54 | socket.request.max.bytes=104857600 55 | 56 | 57 | ############################# Log Basics ############################# 58 | 59 | # A comma seperated list of directories under which to store log files 60 | log.dirs=/tmp/kafka-logs 61 | 62 | # The default number of log partitions per topic. More partitions allow greater 63 | # parallelism for consumption, but this will also result in more files across 64 | # the brokers. 65 | num.partitions=1 66 | 67 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 68 | # This value is recommended to be increased for installations with data dirs located in RAID array. 69 | num.recovery.threads.per.data.dir=1 70 | 71 | ############################# Internal Topic Settings ############################# 72 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 73 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 74 | offsets.topic.replication.factor=1 75 | transaction.state.log.replication.factor=1 76 | transaction.state.log.min.isr=1 77 | 78 | ############################# Log Flush Policy ############################# 79 | 80 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 81 | # the OS cache lazily. The following configurations control the flush of data to disk. 82 | # There are a few important trade-offs here: 83 | # 1. Durability: Unflushed data may be lost if you are not using replication. 84 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 85 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 86 | # The settings below allow one to configure the flush policy to flush data after a period of time or 87 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 88 | 89 | # The number of messages to accept before forcing a flush of data to disk 90 | #log.flush.interval.messages=10000 91 | 92 | # The maximum amount of time a message can sit in a log before we force a flush 93 | #log.flush.interval.ms=1000 94 | 95 | ############################# Log Retention Policy ############################# 96 | 97 | # The following configurations control the disposal of log segments. The policy can 98 | # be set to delete segments after a period of time, or after a given size has accumulated. 99 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 100 | # from the end of the log. 101 | 102 | # The minimum age of a log file to be eligible for deletion due to age 103 | log.retention.hours=168 104 | 105 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining 106 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours. 107 | #log.retention.bytes=1073741824 108 | 109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 110 | log.segment.bytes=1073741824 111 | 112 | # The interval at which log segments are checked to see if they can be deleted according 113 | # to the retention policies 114 | log.retention.check.interval.ms=300000 115 | 116 | ############################# Zookeeper ############################# 117 | 118 | # Zookeeper connection string (see zookeeper docs for details). 119 | # This is a comma separated host:port pairs, each corresponding to a zk 120 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 121 | # You can also append an optional chroot string to the urls to specify the 122 | # root directory for all kafka znodes. 123 | zookeeper.connect=zookeeper:2181/1_0 124 | 125 | # Timeout in ms for connecting to zookeeper 126 | zookeeper.connection.timeout.ms=6000 127 | 128 | 129 | ############################# Group Coordinator Settings ############################# 130 | 131 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. 132 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. 133 | # The default value for this is 3 seconds. 134 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. 135 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. 136 | group.initial.rebalance.delay.ms=0 137 | -------------------------------------------------------------------------------- /test/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | 23 | ############################# Socket Server Settings ############################# 24 | 25 | # The address the socket server listens on. It will get the value returned from 26 | # java.net.InetAddress.getCanonicalHostName() if not configured. 27 | # FORMAT: 28 | # listeners = listener_name://host_name:port 29 | # EXAMPLE: 30 | # listeners = PLAINTEXT://your.host.name:9092 31 | listeners=PLAINTEXT://:9092,SSL://:9093 32 | 33 | # Hostname and port the broker will advertise to producers and consumers. If not set, 34 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 35 | # returned from java.net.InetAddress.getCanonicalHostName(). 36 | advertised.listeners=PLAINTEXT://localhost:9092,SSL://localhost:9093 37 | 38 | #security.inter.broker.protocol=SSL 39 | 40 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 41 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 42 | 43 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network 44 | num.network.threads=3 45 | 46 | # The number of threads that the server uses for processing requests, which may include disk I/O 47 | num.io.threads=8 48 | 49 | # The send buffer (SO_SNDBUF) used by the socket server 50 | socket.send.buffer.bytes=102400 51 | 52 | # The receive buffer (SO_RCVBUF) used by the socket server 53 | socket.receive.buffer.bytes=102400 54 | 55 | # The maximum size of a request that the socket server will accept (protection against OOM) 56 | socket.request.max.bytes=104857600 57 | 58 | 59 | ############################# Log Basics ############################# 60 | 61 | # A comma seperated list of directories under which to store log files 62 | log.dirs=/tmp/kafka-logs 63 | 64 | # The default number of log partitions per topic. More partitions allow greater 65 | # parallelism for consumption, but this will also result in more files across 66 | # the brokers. 67 | num.partitions=1 68 | 69 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 70 | # This value is recommended to be increased for installations with data dirs located in RAID array. 71 | num.recovery.threads.per.data.dir=1 72 | 73 | ############################# Internal Topic Settings ############################# 74 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 75 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 76 | offsets.topic.replication.factor=1 77 | transaction.state.log.replication.factor=1 78 | transaction.state.log.min.isr=1 79 | 80 | ############################# Log Flush Policy ############################# 81 | 82 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 83 | # the OS cache lazily. The following configurations control the flush of data to disk. 84 | # There are a few important trade-offs here: 85 | # 1. Durability: Unflushed data may be lost if you are not using replication. 86 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 87 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 88 | # The settings below allow one to configure the flush policy to flush data after a period of time or 89 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 90 | 91 | # The number of messages to accept before forcing a flush of data to disk 92 | #log.flush.interval.messages=10000 93 | 94 | # The maximum amount of time a message can sit in a log before we force a flush 95 | #log.flush.interval.ms=1000 96 | 97 | ############################# Log Retention Policy ############################# 98 | 99 | # The following configurations control the disposal of log segments. The policy can 100 | # be set to delete segments after a period of time, or after a given size has accumulated. 101 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 102 | # from the end of the log. 103 | 104 | # The minimum age of a log file to be eligible for deletion due to age 105 | log.retention.hours=168 106 | 107 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining 108 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours. 109 | #log.retention.bytes=1073741824 110 | 111 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 112 | log.segment.bytes=1073741824 113 | 114 | # The interval at which log segments are checked to see if they can be deleted according 115 | # to the retention policies 116 | log.retention.check.interval.ms=300000 117 | 118 | ############################# Zookeeper ############################# 119 | 120 | # Zookeeper connection string (see zookeeper docs for details). 121 | # This is a comma separated host:port pairs, each corresponding to a zk 122 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 123 | # You can also append an optional chroot string to the urls to specify the 124 | # root directory for all kafka znodes. 125 | zookeeper.connect=zookeeper:2181 126 | 127 | # Timeout in ms for connecting to zookeeper 128 | zookeeper.connection.timeout.ms=6000 129 | 130 | ############################# Group Coordinator Settings ############################# 131 | 132 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. 133 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. 134 | # The default value for this is 3 seconds. 135 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. 136 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. 137 | group.initial.rebalance.delay.ms=0 138 | 139 | ############################# mTLS ############################# 140 | ssl.client.auth=required 141 | ssl.keystore.location=/opt/kafka/config/kafka.server.keystore.jks 142 | ssl.keystore.password=123456 143 | ssl.key.password=123456 144 | ssl.truststore.location=/opt/kafka/config/kafka.server.truststore.jks 145 | ssl.truststore.password=123456 146 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package libkafka 2 | 3 | import "fmt" 4 | 5 | type Error struct { 6 | Code int16 7 | Message string 8 | } 9 | 10 | func (e Error) Error() string { 11 | s := fmt.Sprintf("error code %d (%s)", e.Code, errorDescriptions[int(e.Code)]) 12 | if e.Message != "" { 13 | s += ": " + e.Message 14 | } 15 | return s 16 | } 17 | 18 | const ( 19 | ERR_UNKNOWN_SERVER_ERROR = -1 20 | ERR_NONE = 0 21 | ERR_OFFSET_OUT_OF_RANGE = 1 22 | ERR_CORRUPT_MESSAGE = 2 // retriable: True 23 | ERR_UNKNOWN_TOPIC_OR_PARTITION = 3 // retriable: True 24 | ERR_INVALID_FETCH_SIZE = 4 25 | ERR_LEADER_NOT_AVAILABLE = 5 // retriable: True 26 | ERR_NOT_LEADER_FOR_PARTITION = 6 // retriable: True 27 | ERR_REQUEST_TIMED_OUT = 7 // retriable: True 28 | ERR_BROKER_NOT_AVAILABLE = 8 29 | ERR_REPLICA_NOT_AVAILABLE = 9 30 | ERR_MESSAGE_TOO_LARGE = 10 31 | ERR_STALE_CONTROLLER_EPOCH = 11 32 | ERR_OFFSET_METADATA_TOO_LARGE = 12 33 | ERR_NETWORK_EXCEPTION = 13 // retriable: True 34 | ERR_COORDINATOR_LOAD_IN_PROGRESS = 14 // retriable: True 35 | ERR_COORDINATOR_NOT_AVAILABLE = 15 // retriable: True 36 | ERR_NOT_COORDINATOR = 16 // retriable: True 37 | ERR_INVALID_TOPIC_EXCEPTION = 17 38 | ERR_RECORD_LIST_TOO_LARGE = 18 39 | ERR_NOT_ENOUGH_REPLICAS = 19 // retriable: True 40 | ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20 // retriable: True 41 | ERR_INVALID_REQUIRED_ACKS = 21 42 | ERR_ILLEGAL_GENERATION = 22 43 | ERR_INCONSISTENT_GROUP_PROTOCOL = 23 44 | ERR_INVALID_GROUP_ID = 24 45 | ERR_UNKNOWN_MEMBER_ID = 25 46 | ERR_INVALID_SESSION_TIMEOUT = 26 47 | ERR_REBALANCE_IN_PROGRESS = 27 48 | ERR_INVALID_COMMIT_OFFSET_SIZE = 28 49 | ERR_TOPIC_AUTHORIZATION_FAILED = 29 50 | ERR_GROUP_AUTHORIZATION_FAILED = 30 51 | ERR_CLUSTER_AUTHORIZATION_FAILED = 31 52 | ERR_INVALID_TIMESTAMP = 32 53 | ERR_UNSUPPORTED_SASL_MECHANISM = 33 54 | ERR_ILLEGAL_SASL_STATE = 34 55 | ERR_UNSUPPORTED_VERSION = 35 56 | ERR_TOPIC_ALREADY_EXISTS = 36 57 | ERR_INVALID_PARTITIONS = 37 58 | ERR_INVALID_REPLICATION_FACTOR = 38 59 | ERR_INVALID_REPLICA_ASSIGNMENT = 39 60 | ERR_INVALID_CONFIG = 40 61 | ERR_NOT_CONTROLLER = 41 // retriable: True 62 | ERR_INVALID_REQUEST = 42 63 | ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43 64 | ERR_POLICY_VIOLATION = 44 65 | ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45 66 | ERR_DUPLICATE_SEQUENCE_NUMBER = 46 67 | ERR_INVALID_PRODUCER_EPOCH = 47 68 | ERR_INVALID_TXN_STATE = 48 69 | ERR_INVALID_PRODUCER_ID_MAPPING = 49 70 | ERR_INVALID_TRANSACTION_TIMEOUT = 50 71 | ERR_CONCURRENT_TRANSACTIONS = 51 72 | ERR_TRANSACTION_COORDINATOR_FENCED = 52 73 | ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53 74 | ERR_SECURITY_DISABLED = 54 75 | ERR_OPERATION_NOT_ATTEMPTED = 55 76 | ERR_KAFKA_STORAGE_ERROR = 56 // retriable: True 77 | ERR_LOG_DIR_NOT_FOUND = 57 78 | ERR_SASL_AUTHENTICATION_FAILED = 58 79 | ERR_UNKNOWN_PRODUCER_ID = 59 80 | ERR_REASSIGNMENT_IN_PROGRESS = 60 81 | ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61 82 | ERR_DELEGATION_TOKEN_NOT_FOUND = 62 83 | ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63 84 | ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64 85 | ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65 86 | ERR_DELEGATION_TOKEN_EXPIRED = 66 87 | ERR_INVALID_PRINCIPAL_TYPE = 67 88 | ERR_NON_EMPTY_GROUP = 68 89 | ERR_GROUP_ID_NOT_FOUND = 69 90 | ERR_FETCH_SESSION_ID_NOT_FOUND = 70 // retriable: True 91 | ERR_INVALID_FETCH_SESSION_EPOCH = 71 // retriable: True 92 | ERR_LISTENER_NOT_FOUND = 72 // retriable: True 93 | ERR_TOPIC_DELETION_DISABLED = 73 94 | ERR_FENCED_LEADER_EPOCH = 74 // retriable: True 95 | ERR_UNKNOWN_LEADER_EPOCH = 75 // retriable: True 96 | ERR_UNSUPPORTED_COMPRESSION_TYPE = 76 97 | ERR_STALE_BROKER_EPOCH = 77 98 | ERR_OFFSET_NOT_AVAILABLE = 78 // retriable: True 99 | ERR_MEMBER_ID_REQUIRED = 79 100 | ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80 // retriable: True 101 | ERR_GROUP_MAX_SIZE_REACHED = 81 102 | ) 103 | 104 | var errorDescriptions = map[int]string{ 105 | -1: "UNKNOWN_SERVER_ERROR", 106 | 0: "NONE", 107 | 1: "OFFSET_OUT_OF_RANGE", 108 | 2: "CORRUPT_MESSAGE", 109 | 3: "UNKNOWN_TOPIC_OR_PARTITION", 110 | 4: "INVALID_FETCH_SIZE", 111 | 5: "LEADER_NOT_AVAILABLE", 112 | 6: "NOT_LEADER_FOR_PARTITION", 113 | 7: "REQUEST_TIMED_OUT", 114 | 8: "BROKER_NOT_AVAILABLE", 115 | 9: "REPLICA_NOT_AVAILABLE", 116 | 10: "MESSAGE_TOO_LARGE", 117 | 11: "STALE_CONTROLLER_EPOCH", 118 | 12: "OFFSET_METADATA_TOO_LARGE", 119 | 13: "NETWORK_EXCEPTION", 120 | 14: "COORDINATOR_LOAD_IN_PROGRESS", 121 | 15: "COORDINATOR_NOT_AVAILABLE", 122 | 16: "NOT_COORDINATOR", 123 | 17: "INVALID_TOPIC_EXCEPTION", 124 | 18: "RECORD_LIST_TOO_LARGE", 125 | 19: "NOT_ENOUGH_REPLICAS", 126 | 20: "NOT_ENOUGH_REPLICAS_AFTER_APPEND", 127 | 21: "INVALID_REQUIRED_ACKS", 128 | 22: "ILLEGAL_GENERATION", 129 | 23: "INCONSISTENT_GROUP_PROTOCOL", 130 | 24: "INVALID_GROUP_ID", 131 | 25: "UNKNOWN_MEMBER_ID", 132 | 26: "INVALID_SESSION_TIMEOUT", 133 | 27: "REBALANCE_IN_PROGRESS", 134 | 28: "INVALID_COMMIT_OFFSET_SIZE", 135 | 29: "TOPIC_AUTHORIZATION_FAILED", 136 | 30: "GROUP_AUTHORIZATION_FAILED", 137 | 31: "CLUSTER_AUTHORIZATION_FAILED", 138 | 32: "INVALID_TIMESTAMP", 139 | 33: "UNSUPPORTED_SASL_MECHANISM", 140 | 34: "ILLEGAL_SASL_STATE", 141 | 35: "UNSUPPORTED_VERSION", 142 | 36: "TOPIC_ALREADY_EXISTS", 143 | 37: "INVALID_PARTITIONS", 144 | 38: "INVALID_REPLICATION_FACTOR", 145 | 39: "INVALID_REPLICA_ASSIGNMENT", 146 | 40: "INVALID_CONFIG", 147 | 41: "NOT_CONTROLLER", 148 | 42: "INVALID_REQUEST", 149 | 43: "UNSUPPORTED_FOR_MESSAGE_FORMAT", 150 | 44: "POLICY_VIOLATION", 151 | 45: "OUT_OF_ORDER_SEQUENCE_NUMBER", 152 | 46: "DUPLICATE_SEQUENCE_NUMBER", 153 | 47: "INVALID_PRODUCER_EPOCH", 154 | 48: "INVALID_TXN_STATE", 155 | 49: "INVALID_PRODUCER_ID_MAPPING", 156 | 50: "INVALID_TRANSACTION_TIMEOUT", 157 | 51: "CONCURRENT_TRANSACTIONS", 158 | 52: "TRANSACTION_COORDINATOR_FENCED", 159 | 53: "TRANSACTIONAL_ID_AUTHORIZATION_FAILED", 160 | 54: "SECURITY_DISABLED", 161 | 55: "OPERATION_NOT_ATTEMPTED", 162 | 56: "KAFKA_STORAGE_ERROR", 163 | 57: "LOG_DIR_NOT_FOUND", 164 | 58: "SASL_AUTHENTICATION_FAILED", 165 | 59: "UNKNOWN_PRODUCER_ID", 166 | 60: "REASSIGNMENT_IN_PROGRESS", 167 | 61: "DELEGATION_TOKEN_AUTH_DISABLED", 168 | 62: "DELEGATION_TOKEN_NOT_FOUND", 169 | 63: "DELEGATION_TOKEN_OWNER_MISMATCH", 170 | 64: "DELEGATION_TOKEN_REQUEST_NOT_ALLOWED", 171 | 65: "DELEGATION_TOKEN_AUTHORIZATION_FAILED", 172 | 66: "DELEGATION_TOKEN_EXPIRED", 173 | 67: "INVALID_PRINCIPAL_TYPE", 174 | 68: "NON_EMPTY_GROUP", 175 | 69: "GROUP_ID_NOT_FOUND", 176 | 70: "FETCH_SESSION_ID_NOT_FOUND", 177 | 71: "INVALID_FETCH_SESSION_EPOCH", 178 | 72: "LISTENER_NOT_FOUND", 179 | 73: "TOPIC_DELETION_DISABLED", 180 | 74: "FENCED_LEADER_EPOCH", 181 | 75: "UNKNOWN_LEADER_EPOCH", 182 | 76: "UNSUPPORTED_COMPRESSION_TYPE", 183 | 77: "STALE_BROKER_EPOCH", 184 | 78: "OFFSET_NOT_AVAILABLE", 185 | 79: "MEMBER_ID_REQUIRED", 186 | 80: "PREFERRED_LEADER_NOT_AVAILABLE", 187 | 81: "GROUP_MAX_SIZE_REACHED", 188 | } 189 | -------------------------------------------------------------------------------- /client/partition.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "net" 9 | "sync" 10 | "time" 11 | 12 | "github.com/mkocikowski/libkafka" 13 | "github.com/mkocikowski/libkafka/api" 14 | "github.com/mkocikowski/libkafka/api/ApiVersions" 15 | "github.com/mkocikowski/libkafka/api/Fetch" 16 | "github.com/mkocikowski/libkafka/api/ListOffsets" 17 | "github.com/mkocikowski/libkafka/api/Metadata" 18 | "github.com/mkocikowski/libkafka/api/Produce" 19 | ) 20 | 21 | var ( 22 | ErrPartitionDoesNotExist = errors.New("partition does not exist") 23 | ErrNoLeaderForPartition = errors.New("no leader for partition") 24 | ) 25 | 26 | func GetPartitionLeader(bootstrap string, tlsConfig *tls.Config, topic string, partition int32) (*Metadata.Broker, error) { 27 | meta, err := CallMetadata(bootstrap, tlsConfig, []string{topic}) 28 | if err != nil { 29 | return nil, err 30 | } 31 | partitions := meta.Partitions(topic) 32 | if p := partitions[partition]; p == nil { 33 | return nil, ErrPartitionDoesNotExist 34 | } 35 | leaders := meta.Leaders(topic) 36 | if l := leaders[partition]; l == nil { 37 | return nil, ErrNoLeaderForPartition 38 | } 39 | return leaders[partition], nil 40 | } 41 | 42 | // PartitionClient maintains a connection to the leader of a single topic 43 | // partition. The client uses the Bootstrap value to look up topic metadata and 44 | // to connect to the Leader of given topic partition. This happens on the first 45 | // API call. Connections are persistent. All client API calls are synchronous. 46 | // If an API call can't complete the request-response round trip or if Kafka 47 | // response can't be parsed then the API call returns an error and the 48 | // underlying connection is closed (it will be re-opened on next call). If 49 | // response is parsed successfully no error is returned but this means only 50 | // that the request-response round trip was completed: there could be an error 51 | // code returned in the Kafka response itself. Checking for and interpreting 52 | // that error (and possibly calling Close) is up to the user. Retries are up to 53 | // the user. All PartitionClient calls are safe for concurrent use. 54 | type PartitionClient struct { 55 | sync.Mutex 56 | Bootstrap string // srv or host:port 57 | TLS *tls.Config 58 | ClientId string 59 | Topic string 60 | Partition int32 61 | // ConnMaxIdle corresponds to connections.max.idle.ms broker setting. 62 | // Kafka broker will close connections that have been idle (no api 63 | // calls have been made; this is not about tcp keep alives) for this 64 | // long. Making a PartitionClient call on a closed connection will 65 | // result in an error (and connection will be re-established on the 66 | // next call). If you don't want this error, set ConnMaxIdle to >0. 67 | // This way, if more than ConnMaxIdle passed since the last call, 68 | // PartitionClient will close the current connection, and open a new 69 | // one. Default value of 0 means that no check it made. 70 | ConnMaxIdle time.Duration 71 | leader *Metadata.Broker 72 | versions *ApiVersions.Response 73 | conn net.Conn 74 | connOpened time.Time 75 | connLastUsed time.Time 76 | } 77 | 78 | // if the client has an open connection, check it for libkafka.ConnectionTTL 79 | // and ConnMaxIdle. if these exceeded, close connection, otherwise noop. if 80 | // there is no open connection (or it was just closed because of TTL or 81 | // IdleTimeout) find partition leader, connect to it, and set c.leader 82 | func (c *PartitionClient) connect() (err error) { 83 | // no mutex here. connect() is called only from call(), and that is 84 | // where the mutex is acquired for both connect() and disconnect() 85 | if c.conn != nil { 86 | switch { 87 | case libkafka.ConnectionTTL > 0 && time.Since(c.connOpened) > libkafka.ConnectionTTL: 88 | // connection exceeded TTL 89 | c.disconnect() 90 | case c.ConnMaxIdle > 0 && time.Since(c.connLastUsed) > c.ConnMaxIdle: 91 | // connection exceeded MaxIdle 92 | c.disconnect() 93 | default: 94 | // ConnTTL and ConnMaxIdle do not apply. Leave connection open 95 | return nil 96 | } 97 | } 98 | c.leader, err = GetPartitionLeader(c.Bootstrap, c.TLS, c.Topic, c.Partition) 99 | if err != nil { 100 | return fmt.Errorf("error getting partition leader: %w", err) 101 | } 102 | if c.TLS != nil { 103 | conn, err := tls.DialWithDialer(&net.Dialer{Timeout: libkafka.DialTimeout}, "tcp", c.leader.Addr(), c.TLS) 104 | if err != nil { 105 | return err 106 | } 107 | c.conn = conn 108 | } 109 | if c.TLS == nil { 110 | conn, err := net.DialTimeout("tcp", c.leader.Addr(), libkafka.DialTimeout) 111 | if err != nil { 112 | return err 113 | } 114 | c.conn = conn 115 | } 116 | c.connOpened = time.Now().UTC() 117 | c.connLastUsed = c.connOpened 118 | // version information is needed only for the kafka 1.0 produce hack 119 | c.versions, err = apiVersions(c.conn) 120 | if err != nil { 121 | return fmt.Errorf("error getting api versions from broker: %w", err) 122 | } 123 | if code := c.versions.ErrorCode; code != libkafka.ERR_NONE { 124 | return fmt.Errorf("error response for api versions call from broker: %w", libkafka.Error{Code: code}) 125 | } 126 | return nil 127 | } 128 | 129 | // close connection to leader, but do not zero c.leader (so that it can still 130 | // be accessed with c.Leader call) 131 | func (c *PartitionClient) disconnect() error { 132 | // no mutex here. disconnect() is called only from call() and from 133 | // Close(), and that is where the mutex is acquired. 134 | if c.conn == nil { 135 | return nil 136 | } 137 | defer func() { 138 | if r := recover(); r != nil { 139 | log.Printf("recovered in PartitionClient.disconnect: %v", r) 140 | } 141 | }() 142 | c.conn.Close() 143 | c.conn = nil 144 | return nil 145 | } 146 | 147 | // Close the connection to the topic partition leader. Nop if no active 148 | // connection. If there is a request in progress blocks until the request 149 | // completes. 150 | func (c *PartitionClient) Close() error { // implement io.Closer 151 | c.Lock() 152 | defer c.Unlock() 153 | c.disconnect() 154 | return nil 155 | } 156 | 157 | // Leader returns the last resolved partition leader, even if connection has 158 | // since been closed (as happens on error). 159 | func (c *PartitionClient) Leader() *Metadata.Broker { 160 | c.Lock() 161 | defer c.Unlock() 162 | return c.leader 163 | } 164 | 165 | // Conn returns the connection that the client has to the partition leader. The 166 | // call is safe for concurrent use, but it is no safe to change the connection. 167 | // The purpose of exposing it here is mostly to make it easier to test network 168 | // errors. Be careful with this one. If you need to cleanly close the current 169 | // connection to the leader, call Close(), not Conn().Close(). 170 | func (c *PartitionClient) Conn() net.Conn { 171 | c.Lock() 172 | defer c.Unlock() 173 | return c.conn 174 | } 175 | 176 | func (c *PartitionClient) call(req *api.Request, v interface{}) error { 177 | c.Lock() 178 | defer c.Unlock() 179 | if err := c.connect(); err != nil { 180 | return fmt.Errorf("error connecting to partition leader (TLS: %v): %w", c.TLS != nil, err) 181 | } 182 | // TODO: remove 183 | if req.ApiKey == api.Produce && c.versions.ApiKeys[api.Produce].MaxVersion == 5 { 184 | req.ApiVersion = 5 // downgrade to be able to produce to kafka 1.0 185 | } 186 | err := call(c.conn, req, v) 187 | if err != nil { 188 | c.disconnect() 189 | err = fmt.Errorf("error making call to partition leader (TLS: %v): %w", c.TLS != nil, err) 190 | } 191 | c.connLastUsed = time.Now().UTC() 192 | return err 193 | } 194 | 195 | func (c *PartitionClient) ListOffsets(timestampMs int64) (*ListOffsets.Response, error) { 196 | req := ListOffsets.NewRequest(c.Topic, c.Partition, timestampMs) 197 | resp := &ListOffsets.Response{} 198 | return resp, c.call(req, resp) 199 | } 200 | 201 | func (c *PartitionClient) Fetch(args *Fetch.Args) (*Fetch.Response, error) { 202 | req := Fetch.NewRequest(args) 203 | resp := &Fetch.Response{} 204 | return resp, c.call(req, resp) 205 | } 206 | 207 | func (c *PartitionClient) Produce(args *Produce.Args, recordSet []byte) (*Produce.Response, error) { 208 | req := Produce.NewRequest(args, recordSet) 209 | resp := &Produce.Response{} 210 | return resp, c.call(req, resp) 211 | } 212 | -------------------------------------------------------------------------------- /client/group.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "net" 7 | "strconv" 8 | "sync" 9 | 10 | "github.com/mkocikowski/libkafka" 11 | "github.com/mkocikowski/libkafka/api" 12 | "github.com/mkocikowski/libkafka/api/FindCoordinator" 13 | "github.com/mkocikowski/libkafka/api/Heartbeat" 14 | "github.com/mkocikowski/libkafka/api/JoinGroup" 15 | "github.com/mkocikowski/libkafka/api/OffsetCommit" 16 | "github.com/mkocikowski/libkafka/api/OffsetFetch" 17 | "github.com/mkocikowski/libkafka/api/SyncGroup" 18 | ) 19 | 20 | func CallFindCoordinator(bootstrap string, tlsConfig *tls.Config, groupId string) (*FindCoordinator.Response, error) { 21 | req := FindCoordinator.NewRequest(groupId) 22 | resp := &FindCoordinator.Response{} 23 | return resp, connectToRandomBrokerAndCall(bootstrap, tlsConfig, req, resp) 24 | } 25 | 26 | func GetGroupCoordinator(bootstrap string, tlsConfig *tls.Config, groupId string) (string, error) { 27 | resp, err := CallFindCoordinator(bootstrap, tlsConfig, groupId) 28 | if err != nil { 29 | return "", fmt.Errorf("error making FindCoordinator call: %w", err) 30 | } 31 | if resp.ErrorCode != 0 { 32 | return "", fmt.Errorf("error response from FindCoordinator call: %w", &libkafka.Error{Code: resp.ErrorCode}) 33 | } 34 | return net.JoinHostPort(resp.Host, strconv.Itoa(int(resp.Port))), nil 35 | } 36 | 37 | // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal 38 | 39 | type GroupClient struct { 40 | sync.Mutex 41 | Bootstrap string 42 | TLS *tls.Config 43 | GroupId string 44 | conn net.Conn 45 | } 46 | 47 | func (c *GroupClient) connect() error { 48 | if c.conn != nil { 49 | return nil 50 | } 51 | addr, err := GetGroupCoordinator(c.Bootstrap, c.TLS, c.GroupId) 52 | if err != nil { 53 | return err 54 | } 55 | if c.TLS != nil { 56 | conn, err := tls.DialWithDialer(&net.Dialer{Timeout: libkafka.DialTimeout}, "tcp", addr, c.TLS) 57 | if err != nil { 58 | return err 59 | } 60 | c.conn = conn 61 | } 62 | if c.TLS == nil { 63 | conn, err := net.DialTimeout("tcp", addr, libkafka.DialTimeout) 64 | if err != nil { 65 | return err 66 | } 67 | c.conn = conn 68 | } 69 | return nil 70 | } 71 | 72 | func (c *GroupClient) disconnect() error { 73 | if c.conn == nil { 74 | return nil 75 | } 76 | c.conn.Close() 77 | c.conn = nil 78 | return nil 79 | } 80 | 81 | // Close the connection to the group coordinator. Nop if no active connection. 82 | // If there is a request in progress blocks until the request completes. This, 83 | // like its counterpart in PartitionClient, is intended to give higher level 84 | // libraries ability to respond to errors by closing the connection (and so 85 | // forcing reconnect on next call). 86 | func (c *GroupClient) Close() error { // implement io.Closer 87 | c.Lock() 88 | defer c.Unlock() 89 | c.disconnect() 90 | return nil 91 | } 92 | 93 | // Call makes a request (connecting to the coordinator if necessary) and reads 94 | // the response. If there is error making the request or reading the response, 95 | // it disconnects. Response is not interpreted (ie, Call does not look at the 96 | // possible error codes inside the kafka response). The purpose of this method 97 | // is to allow users to make their "own" requests - using different api 98 | // version, or calling for multiple topic-partitions. For use, see the source 99 | // of methods such as FetchOffset (basically, you pass it a struct with the 100 | // request that will be marshaled into wire format, and a struct pointer into 101 | // which response will be unmarshaled: these structs are defined in the api 102 | // package for various api keys, but you can provide your own). This is a 103 | // low-level method that was private; I decided to make it public to give users 104 | // more flexibility. We'll see how it goes. 105 | func (c *GroupClient) Call(req *api.Request, respStructPtr interface{}) error { 106 | c.Lock() 107 | defer c.Unlock() 108 | if err := c.connect(); err != nil { 109 | return fmt.Errorf("error connecting to group coordinator (TLS: %v): %w", c.TLS != nil, err) 110 | } 111 | err := call(c.conn, req, respStructPtr) 112 | if err != nil { 113 | c.disconnect() 114 | } 115 | return err 116 | } 117 | 118 | func (c *GroupClient) callJoin(memberId, protoType string, protocols []JoinGroup.Protocol) (*JoinGroup.Response, error) { 119 | req := JoinGroup.NewRequest(c.GroupId, memberId, protoType, protocols) 120 | resp := &JoinGroup.Response{} 121 | return resp, c.Call(req, resp) 122 | } 123 | 124 | func (c *GroupClient) callSync(memberId string, generationId int32, assignments []SyncGroup.Assignment) (*SyncGroup.Response, error) { 125 | req := SyncGroup.NewRequest(c.GroupId, memberId, generationId, assignments) 126 | //log.Printf("%+v", req) 127 | resp := &SyncGroup.Response{} 128 | return resp, c.Call(req, resp) 129 | } 130 | 131 | type JoinGroupRequest struct { 132 | MemberId string 133 | ProtocolType string 134 | ProtocolName string 135 | Metadata []byte 136 | //group.initial.rebalance.delay.ms 137 | } 138 | 139 | func (c *GroupClient) Join(req *JoinGroupRequest) (*JoinGroup.Response, error) { 140 | p := JoinGroup.Protocol{ 141 | Name: req.ProtocolName, 142 | Metadata: req.Metadata, 143 | } 144 | return c.callJoin(req.MemberId, req.ProtocolType, []JoinGroup.Protocol{p}) 145 | } 146 | 147 | type SyncGroupRequest struct { 148 | MemberId string 149 | GenerationId int32 150 | Assignments []SyncGroup.Assignment 151 | } 152 | 153 | func (c *GroupClient) Sync(req *SyncGroupRequest) (*SyncGroup.Response, error) { 154 | return c.callSync(req.MemberId, req.GenerationId, req.Assignments) 155 | } 156 | 157 | func (c *GroupClient) Heartbeat(memberId string, generationId int32) (*Heartbeat.Response, error) { 158 | req := Heartbeat.NewRequest(c.GroupId, memberId, generationId) 159 | resp := &Heartbeat.Response{} 160 | return resp, c.Call(req, resp) 161 | } 162 | 163 | func parseOffsetFetchResponse(r *OffsetFetch.Response) (int64, error) { 164 | if r.ErrorCode != libkafka.ERR_NONE { 165 | return -1, &libkafka.Error{Code: r.ErrorCode} 166 | } 167 | if n := len(r.Topics); n != 1 { 168 | return -1, fmt.Errorf("unexpected number of topic responses: %d", n) 169 | } 170 | t := r.Topics[0] 171 | if n := len(t.Partitions); n != 1 { 172 | return -1, fmt.Errorf("unexpected number of topic partition responses: %d", n) 173 | } 174 | p := t.Partitions[0] 175 | if p.ErrorCode != libkafka.ERR_NONE { 176 | return -1, &libkafka.Error{Code: p.ErrorCode} 177 | } 178 | return p.CommitedOffset, nil 179 | } 180 | 181 | // Fetch last commited offset for topic partition. If the topic partition does 182 | // not exist, or there is no offset commited for it, returns -1 and no error. 183 | func (c *GroupClient) FetchOffset(topic string, partition int32) (int64, error) { 184 | req := OffsetFetch.NewRequest(c.GroupId, topic, partition) 185 | resp := &OffsetFetch.Response{} 186 | if err := c.Call(req, resp); err != nil { 187 | return -1, fmt.Errorf("error making fetch offsets call: %w", err) 188 | } 189 | return parseOffsetFetchResponse(resp) 190 | } 191 | 192 | func parseOffsetCommitResponse(r *OffsetCommit.Response) error { 193 | if n := len(r.Topics); n != 1 { 194 | return fmt.Errorf("unexpected number of topic responses: %d", n) 195 | } 196 | t := r.Topics[0] 197 | if n := len(t.Partitions); n != 1 { 198 | return fmt.Errorf("unexpected number of topic partition responses: %d", n) 199 | } 200 | p := t.Partitions[0] 201 | if p.ErrorCode != libkafka.ERR_NONE { 202 | return &libkafka.Error{Code: p.ErrorCode} 203 | } 204 | return nil 205 | } 206 | 207 | func (c *GroupClient) CommitOffset(topic string, partition int32, offset, retentionMs int64) error { 208 | req := OffsetCommit.NewRequest(c.GroupId, topic, partition, offset, retentionMs) 209 | resp := &OffsetCommit.Response{} 210 | if err := c.Call(req, resp); err != nil { 211 | return fmt.Errorf("error making commit offset call: %w", err) 212 | } 213 | return parseOffsetCommitResponse(resp) 214 | } 215 | 216 | // CommitMultiplePartitionsOffsets commits offsets for multiple partitions of a 217 | // specific topic at once. Accepts topic, and a map of partition -> offset 218 | // alongside with the time to retain the offsets (in ms) 219 | func (c *GroupClient) CommitMultiplePartitionsOffsets(topic string, offsets map[int32]int64, retentionMs int64) error { 220 | req := OffsetCommit.NewMultiplePartitionsRequest(c.GroupId, topic, offsets, retentionMs) 221 | resp := &OffsetCommit.Response{} 222 | if err := c.Call(req, resp); err != nil { 223 | return fmt.Errorf("error of committing offset for multiple partitions: %w", err) 224 | } 225 | return parseCommitMultiplePartitionsOffsetsResponse(resp) 226 | } 227 | 228 | // parseCommitMultiplePartitionsOffsetsResponse reads the response of flushing 229 | // offsets of multiple partitions and returns error if the response malformed OR 230 | // at least one of them failed 231 | func parseCommitMultiplePartitionsOffsetsResponse(resp *OffsetCommit.Response) error { 232 | if n := len(resp.Topics); n != 1 { 233 | return fmt.Errorf("malformed response: unexpected number of topic responses: %d", n) 234 | } 235 | t := resp.Topics[0] 236 | if len(t.Partitions) < 1 { 237 | return fmt.Errorf("malformed response: empty list of partition responses") 238 | } 239 | for _, p := range t.Partitions { 240 | if p.ErrorCode != libkafka.ERR_NONE { 241 | return &libkafka.Error{Code: p.ErrorCode} 242 | } 243 | } 244 | return nil 245 | } 246 | -------------------------------------------------------------------------------- /batch/batch.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package batch implements functions for building, marshaling, and unmarshaling 3 | Kafka record batches. 4 | 5 | Producing 6 | 7 | When producting messages, call NewBuilder, and Add records to it. Call 8 | Builder.Build and pass the returned Batch to the producer. Release the 9 | reference to Builder when done with it to release references to added records. 10 | 11 | Fetching ("consuming") 12 | 13 | Fetch result (if successful) will contain RecordSet. Call its Batches method to 14 | get byte slices containing individual batches. Unmarshal each batch 15 | individually. To get individual records, call Batch.Records and then 16 | record.Unmarshal. Passing around batches is much more efficient than passing 17 | individual records, so save record unmarshaling until the very end. 18 | */ 19 | package batch 20 | 21 | import ( 22 | "bytes" 23 | "encoding/binary" 24 | "errors" 25 | "fmt" 26 | "hash/crc32" 27 | "reflect" 28 | "time" 29 | 30 | "github.com/mkocikowski/libkafka/compression" 31 | "github.com/mkocikowski/libkafka/record" 32 | "github.com/mkocikowski/libkafka/varint" 33 | "github.com/mkocikowski/libkafka/wire" 34 | ) 35 | 36 | type Compressor interface { 37 | Compress([]byte) ([]byte, error) 38 | Type() int16 39 | } 40 | 41 | type Decompressor interface { 42 | Decompress([]byte) ([]byte, error) 43 | Type() int16 44 | } 45 | 46 | func NewBuilder(now time.Time) *Builder { 47 | return &Builder{t: now} 48 | } 49 | 50 | // Builder is used for building record batches. There is no limit on the number 51 | // of records (up to the user). Not safe for concurrent use. 52 | type Builder struct { 53 | t time.Time 54 | records []*record.Record 55 | } 56 | 57 | // Add records to the batch. References to added records are not released on 58 | // call to Build. This means you can add more records and call Build again. 59 | // Don't know why you would want to, but you can. 60 | func (b *Builder) Add(records ...*record.Record) { 61 | b.records = append(b.records, records...) 62 | } 63 | 64 | func (b *Builder) AddStrings(values ...string) *Builder { 65 | for _, s := range values { 66 | b.records = append(b.records, record.New(nil, []byte(s))) 67 | } 68 | return b 69 | } 70 | 71 | // NumRecords that have been added to the builder. 72 | func (b *Builder) NumRecords() int { 73 | return len(b.records) 74 | } 75 | 76 | var ( 77 | ErrEmpty = errors.New("empty batch") 78 | ErrNilRecord = errors.New("nil record in batch") 79 | ) 80 | 81 | // Build a record batch (marshal individual records and set batch metadata). 82 | // Call this after adding records to the batch. Returns ErrEmpty if batch has 83 | // no records. Returns ErrNilRecord if any of the records is nil. Marshaled 84 | // records are not compressed (call Batch.Compress). Batch FirstTimestamp is 85 | // set to the time when the builder was created (with NewBuilder) and the 86 | // MaxTimestamp is set to the time passed to Build. Within the batch, each 87 | // record's TimestampDelta is 0, meaning that all records will appear to have 88 | // been produced at the time the builder was created (TODO: change? how?) 89 | // Idempotent. 90 | func (b *Builder) Build(now time.Time) (*Batch, error) { 91 | if len(b.records) == 0 { 92 | return nil, ErrEmpty 93 | } 94 | tmp := make([]byte, binary.MaxVarintLen64) 95 | header := make([]byte, 1<<10) 96 | buf := new(bytes.Buffer) 97 | for i, r := range b.records { 98 | if r == nil { 99 | return nil, ErrNilRecord 100 | } 101 | r.OffsetDelta = int64(i) 102 | r.Marshal4(tmp, header, buf) 103 | } 104 | marshaledRecords := buf.Bytes() 105 | return &Batch{ 106 | BatchLengthBytes: int32(49 + len(marshaledRecords)), // TODO: constant 107 | Magic: 2, 108 | Attributes: compression.None, 109 | LastOffsetDelta: int32(len(b.records) - 1), 110 | // TODO: base timestamps on record timestamps 111 | FirstTimestamp: b.t.UnixNano() / int64(time.Millisecond), 112 | MaxTimestamp: now.UnixNano() / int64(time.Millisecond), 113 | ProducerId: -1, 114 | ProducerEpoch: -1, 115 | NumRecords: int32(len(b.records)), 116 | MarshaledRecords: marshaledRecords, 117 | }, nil 118 | } 119 | 120 | var ( 121 | CorruptedBatchError = errors.New("batch crc does not match bytes") 122 | UnsupportedMagicError = errors.New("magic value is not 2") 123 | crc32c = crc32.MakeTable(crc32.Castagnoli) 124 | ) 125 | 126 | // Prior to version 0.11 kafka used message sets 127 | // (https://kafka.apache.org/documentation/#messageset) which always has magic value 0 128 | // and starting with 0.11 it started using record batches (https://kafka.apache.org/documentation/#recordbatch). 129 | // libkafka only supports record batches. but, apparently if a pre-0.11 client writes to kafka, 130 | // the old message set format is used and libkafka doesn't support that. 131 | // verifyMagicByte checks if magic byte is set to 2 in the either cases, 132 | // For every batch/messageset the 16th byte of a will always be pointing 133 | // to Magic value attribute, which should be always 2 in our current case 134 | // in case if it is not set 2, it should be return with an error response. 135 | func verifyMagicByte(batchBytes []byte) error { 136 | if batchBytes[16] != 2 { 137 | return UnsupportedMagicError 138 | } 139 | return nil 140 | } 141 | 142 | // Unmarshal the batch. On error batch is nil. If there is an error, it is most 143 | // likely because the crc failed. In that case there is no way to tell how many 144 | // records there were in the batch (and to adjust offsets accordingly). 145 | func Unmarshal(b []byte) (*Batch, error) { 146 | if err := verifyMagicByte(b); err != nil { 147 | return nil, err 148 | } 149 | buf := bytes.NewBuffer(b) 150 | batch := &Batch{} 151 | if err := wire.Read(buf, reflect.ValueOf(batch)); err != nil { 152 | return nil, err 153 | } 154 | batch.MarshaledRecords = buf.Bytes() // the remainder is the message bodies 155 | crc := crc32.Checksum(b[21:], crc32c) 156 | if crc != batch.Crc { 157 | return nil, CorruptedBatchError 158 | } 159 | return batch, nil 160 | } 161 | 162 | // Batch defines Kafka record batch in wire format. Not safe for concurrent use. 163 | type Batch struct { 164 | BaseOffset int64 165 | BatchLengthBytes int32 166 | PartitionLeaderEpoch int32 167 | Magic int8 // this should be =2 168 | Crc uint32 169 | Attributes int16 170 | LastOffsetDelta int32 // NumRecords-1 // TODO: is this always true? 171 | FirstTimestamp int64 // ms since epoch 172 | MaxTimestamp int64 // ms since epoch 173 | ProducerId int64 // for transactions only see KIP-360 174 | ProducerEpoch int16 // for transactions only see KIP-360 175 | BaseSequence int32 176 | NumRecords int32 // LastOffsetDelta+1 177 | // 178 | MarshaledRecords []byte `wire:"omit" json:"-"` 179 | } 180 | 181 | func (batch *Batch) CompressionType() int16 { 182 | return batch.Attributes & 0b111 183 | } 184 | 185 | const ( 186 | TimestampCreate = 0b0000 187 | TimestampLogAppend = 0b1000 188 | ) 189 | 190 | func (batch *Batch) TimestampType() int16 { 191 | return batch.Attributes & 0b1000 192 | } 193 | 194 | func (batch *Batch) LastOffset() int64 { 195 | return batch.BaseOffset + int64(batch.LastOffsetDelta) 196 | } 197 | 198 | // Marshal batch header and append marshaled records. If you want the batch to 199 | // be compressed call Compress before Marshal. Mutates the batch Crc. 200 | func (batch *Batch) Marshal() RecordSet { 201 | buf := new(bytes.Buffer) 202 | if err := wire.Write(buf, reflect.ValueOf(batch)); err != nil { 203 | panic(err) 204 | } 205 | buf.Write(batch.MarshaledRecords) 206 | b := buf.Bytes() 207 | batch.Crc = crc32.Checksum(b[21:], crc32c) 208 | buf = new(bytes.Buffer) 209 | binary.Write(buf, binary.BigEndian, batch.Crc) 210 | copy(b[17:], buf.Bytes()) 211 | return b 212 | } 213 | 214 | // Compress batch records with supplied compressor. Mutates batch on success 215 | // only. Call before Marshal. Not idempotent (on success). 216 | func (batch *Batch) Compress(c Compressor) error { 217 | b, err := c.Compress(batch.MarshaledRecords) 218 | if err != nil { 219 | return fmt.Errorf("error compressing batch records: %w", err) 220 | } 221 | batch.BatchLengthBytes = int32(49 + len(b)) // TODO: constant 222 | batch.Attributes = c.Type() 223 | batch.Crc = 0 // invalidate crc 224 | batch.MarshaledRecords = b 225 | return nil 226 | } 227 | 228 | // Decompress batch with supplied decompressor. Mutates batch. Call after 229 | // Unmarshal and before Records. Not idempotent. 230 | func (batch *Batch) Decompress(d Decompressor) error { 231 | b, err := d.Decompress(batch.MarshaledRecords) 232 | if err != nil { 233 | return fmt.Errorf("error decompressing record batch: %w", err) 234 | } 235 | batch.BatchLengthBytes = int32(49 + len(b)) // TODO: constant 236 | batch.Attributes = compression.None 237 | batch.Crc = 0 // invalidate crc 238 | batch.MarshaledRecords = b 239 | return nil 240 | } 241 | 242 | // Records retrieves individual records from the batch. If batch records are 243 | // compressed you must call Decompress first. 244 | func (batch *Batch) Records() [][]byte { 245 | var records [][]byte 246 | for b := batch.MarshaledRecords; len(b) > 0; { 247 | length, n := varint.DecodeZigZag64(b) 248 | n += int(length) 249 | records = append(records, b[0:n]) 250 | b = b[n:] 251 | } 252 | return records 253 | } 254 | 255 | // RecordSet is composed of 1 or more record batches. Fetch API calls respond 256 | // with record sets. Byte representation of a record set with only one record 257 | // batch is identical to the record batch. 258 | type RecordSet []byte 259 | 260 | // Batches returns the batches in the record set. Because Kafka limits response 261 | // byte sizes, the last record batch in the set may be truncated (bytes will be 262 | // missing from the end). In such case the last batch is discarded. 263 | func (b RecordSet) Batches() [][]byte { 264 | var batches [][]byte 265 | var offset int64 266 | var length int32 267 | for { 268 | if len(b) == 0 { 269 | break 270 | } 271 | r := bytes.NewReader(b) 272 | if err := binary.Read(r, binary.BigEndian, &offset); err != nil { 273 | break 274 | } 275 | if err := binary.Read(r, binary.BigEndian, &length); err != nil { 276 | break 277 | } 278 | n := int(length + 8 + 4) 279 | if len(b) < n { 280 | break // "incomplete" batch 281 | } 282 | batches = append(batches, b[:n]) 283 | b = b[n:] 284 | } 285 | return batches 286 | } 287 | --------------------------------------------------------------------------------