├── .gitignore ├── .travis.yml ├── semaphore.go ├── messages.proto ├── loggers ├── kplogrus │ └── logrus.go └── kpzap │ └── zap.go ├── LICENSE ├── example_test.go ├── logger.go ├── go.mod ├── aggregator_test.go ├── aggregation-format.md ├── aggregator.go ├── README.md ├── config.go ├── producer_test.go ├── go.sum ├── producer.go └── messages.pb.go /.gitignore: -------------------------------------------------------------------------------- 1 | draft 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.14 4 | - 1.15 5 | - tip 6 | scripts: 7 | - go test -v 8 | -------------------------------------------------------------------------------- /semaphore.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | // channel based semaphore 4 | // used to limit the number of concurrent goroutines 5 | type semaphore chan struct{} 6 | 7 | // acquire a lock, blocking or non-blocking 8 | func (s semaphore) acquire() { 9 | s <- struct{}{} 10 | } 11 | 12 | // release a lock 13 | func (s semaphore) release() { 14 | <-s 15 | } 16 | 17 | // wait block until the last goroutine release the lock 18 | func (s semaphore) wait() { 19 | for i := 0; i < cap(s); i++ { 20 | s <- struct{}{} 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /messages.proto: -------------------------------------------------------------------------------- 1 | // protoc --go_out=. --go_opt=paths=source_relative messages.proto 2 | syntax = "proto2"; 3 | package producer; 4 | option go_package = "./producer"; 5 | 6 | message AggregatedRecord { 7 | repeated string partition_key_table = 1; 8 | repeated string explicit_hash_key_table = 2; 9 | repeated Record records = 3; 10 | } 11 | 12 | message Tag { 13 | required string key = 1; 14 | optional string value = 2; 15 | } 16 | 17 | message Record { 18 | required uint64 partition_key_index = 1; 19 | optional uint64 explicit_hash_key_index = 2; 20 | required bytes data = 3; 21 | repeated Tag tags = 4; 22 | } 23 | -------------------------------------------------------------------------------- /loggers/kplogrus/logrus.go: -------------------------------------------------------------------------------- 1 | package kplogrus 2 | 3 | import ( 4 | producer "github.com/a8m/kinesis-producer" 5 | "github.com/sirupsen/logrus" 6 | ) 7 | 8 | // Logger implements a logurs.Logger logger for kinesis-producer 9 | type Logger struct { 10 | Logger *logrus.Logger 11 | } 12 | 13 | // Info logs a message 14 | func (l *Logger) Info(msg string, args ...producer.LogValue) { 15 | l.Logger.WithFields(l.valuesToFields(args...)).Info(msg) 16 | } 17 | 18 | // Error logs an error 19 | func (l *Logger) Error(msg string, err error, args ...producer.LogValue) { 20 | l.Logger.WithError(err).WithFields(l.valuesToFields(args...)).Error(msg) 21 | } 22 | 23 | func (l *Logger) valuesToFields(values ...producer.LogValue) logrus.Fields { 24 | fields := logrus.Fields{} 25 | for _, v := range values { 26 | fields[v.Name] = v.Value 27 | } 28 | return fields 29 | } 30 | -------------------------------------------------------------------------------- /loggers/kpzap/zap.go: -------------------------------------------------------------------------------- 1 | package kpzap 2 | 3 | import ( 4 | "go.uber.org/zap" 5 | 6 | producer "github.com/a8m/kinesis-producer" 7 | ) 8 | 9 | // Logger implements a zap.Logger logger for kinesis-producer 10 | type Logger struct { 11 | Logger *zap.Logger 12 | } 13 | 14 | // Info logs a message 15 | func (l *Logger) Info(msg string, values ...producer.LogValue) { 16 | l.Logger.Info(msg, l.valuesToFields(values)...) 17 | } 18 | 19 | // Error logs an error 20 | func (l *Logger) Error(msg string, err error, values ...producer.LogValue) { 21 | fields := l.valuesToFields(values) 22 | fields = append(fields, zap.Error(err)) 23 | l.Logger.Info(msg, fields...) 24 | } 25 | 26 | func (l *Logger) valuesToFields(values []producer.LogValue) []zap.Field { 27 | fields := make([]zap.Field, len(values)) 28 | for i, v := range values { 29 | fields[i] = zap.Any(v.Name, v.Value) 30 | } 31 | return fields 32 | } 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go-v2/config" 10 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 11 | ) 12 | 13 | func ExampleSimple() { 14 | logger := &StdLogger{log.New(os.Stdout, "", log.LstdFlags)} 15 | cfg, _ := config.LoadDefaultConfig(context.TODO()) 16 | client := kinesis.NewFromConfig(cfg) 17 | pr := New(&Config{ 18 | StreamName: "test", 19 | BacklogCount: 2000, 20 | Client: client, 21 | Logger: logger, 22 | }) 23 | 24 | pr.Start() 25 | 26 | // Handle failures 27 | go func() { 28 | for r := range pr.NotifyFailures() { 29 | // r contains `Data`, `PartitionKey` and `Error()` 30 | logger.Error("detected put failure", r.error) 31 | } 32 | }() 33 | 34 | go func() { 35 | for i := 0; i < 5000; i++ { 36 | err := pr.Put([]byte("foo"), "bar") 37 | if err != nil { 38 | logger.Error("error producing", err) 39 | } 40 | } 41 | }() 42 | 43 | time.Sleep(3 * time.Second) 44 | pr.Stop() 45 | } 46 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | ) 8 | 9 | // Logger represents a simple interface used by kinesis-producer to handle logging 10 | type Logger interface { 11 | Info(msg string, values ...LogValue) 12 | Error(msg string, err error, values ...LogValue) 13 | } 14 | 15 | // LogValue represents a key:value pair used by the Logger interface 16 | type LogValue struct { 17 | Name string 18 | Value interface{} 19 | } 20 | 21 | func (v LogValue) String() string { 22 | return fmt.Sprintf(" %s=%s", v.Name, v.Value) 23 | } 24 | 25 | // StdLogger implements the Logger interface using standard library loggers 26 | type StdLogger struct { 27 | Logger *log.Logger 28 | } 29 | 30 | // Info prints log message 31 | func (l *StdLogger) Info(msg string, values ...LogValue) { 32 | l.Logger.Print(msg, l.valuesToString(values...)) 33 | } 34 | 35 | // Error prints log message 36 | func (l *StdLogger) Error(msg string, err error, values ...LogValue) { 37 | l.Logger.Print(msg, l.valuesToString(values...), err) 38 | } 39 | 40 | func (l *StdLogger) valuesToString(values ...LogValue) string { 41 | parts := make([]string, len(values)) 42 | for i, v := range values { 43 | parts[i] = fmt.Sprint(v) 44 | } 45 | return strings.Join(parts, ", ") 46 | } 47 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/a8m/kinesis-producer 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.30.5 7 | github.com/aws/aws-sdk-go-v2/config v1.27.33 8 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.7 9 | github.com/jpillora/backoff v1.0.0 10 | github.com/sirupsen/logrus v1.9.3 11 | go.uber.org/zap v1.27.0 12 | google.golang.org/protobuf v1.34.2 13 | ) 14 | 15 | require ( 16 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect 17 | github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect 18 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect 19 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect 20 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect 21 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 22 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect 23 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect 24 | github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect 25 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect 26 | github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect 27 | github.com/aws/smithy-go v1.20.4 // indirect 28 | github.com/jmespath/go-jmespath v0.4.0 // indirect 29 | go.uber.org/multierr v1.11.0 // indirect 30 | golang.org/x/sys v0.25.0 // indirect 31 | ) 32 | -------------------------------------------------------------------------------- /aggregator_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "sync" 7 | "testing" 8 | ) 9 | 10 | func assert(t *testing.T, val bool, msg string) { 11 | if !val { 12 | t.Error(msg) 13 | } 14 | } 15 | 16 | func TestSizeAndCount(t *testing.T) { 17 | a := new(Aggregator) 18 | assert(t, a.Size()+a.Count() == 0, "size and count should equal to 0 at the beginning") 19 | data := []byte("hello") 20 | pkey := "world" 21 | n := rand.Intn(100) 22 | for i := 0; i < n; i++ { 23 | a.Put(data, pkey) 24 | } 25 | assert(t, a.Size() == 5*n+5+8*n, "size should equal to the data and the partition-key") 26 | assert(t, a.Count() == n, "count should be equal to the number of Put calls") 27 | } 28 | 29 | func TestAggregation(t *testing.T) { 30 | var wg sync.WaitGroup 31 | a := new(Aggregator) 32 | n := 50 33 | wg.Add(n) 34 | for i := 0; i < n; i++ { 35 | c := strconv.Itoa(i) 36 | data := []byte("hello-" + c) 37 | a.Put(data, c) 38 | wg.Done() 39 | } 40 | wg.Wait() 41 | record, err := a.Drain() 42 | if err != nil { 43 | t.Error(err) 44 | } 45 | assert(t, isAggregated(record), "should return an agregated record") 46 | records := extractRecords(record) 47 | for i := 0; i < n; i++ { 48 | c := strconv.Itoa(i) 49 | found := false 50 | for _, record := range records { 51 | if string(record.Data) == "hello-"+c { 52 | assert(t, string(record.Data) == "hello-"+c, "`Data` field contains invalid value") 53 | found = true 54 | } 55 | } 56 | assert(t, found, "record not found after extracting: "+c) 57 | } 58 | } 59 | 60 | func TestDrainEmptyAggregator(t *testing.T) { 61 | a := new(Aggregator) 62 | _, err := a.Drain() 63 | assert(t, err == nil, "should not return an error") 64 | } 65 | -------------------------------------------------------------------------------- /aggregation-format.md: -------------------------------------------------------------------------------- 1 | # KPL Aggregated Record Format 2 | > Note: This file taken from: [amazon-kinesis-producer](https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md) 3 | 4 | ## Intro 5 | 6 | The Amazon Kinesis Producer Library (KPL) aggregates multiple logical user records into a single Amazon Kinesis record for efficient puts. 7 | 8 | We use Google protocol buffers (protobuf) to create a binary file format for this. The Amazon Kinesis Client Library (KCL) implements deaggregation based on this format on the consumer side. 9 | 10 | This document contains the format used. Developers may use this information to produce aggregated records from their own code that will be compatible with the KCL's deaggregation logic. 11 | 12 | ## Format 13 | 14 | All of the user data is contained in a protobuf message. To this, we add a magic number and a checksum. The overall format is as follows: 15 | 16 | ``` 17 | 0 4 N N+15 18 | +---+---+---+---+==================+---+...+---+ 19 | | MAGIC NUMBER | PROTOBUF MESSAGE | MD5 | 20 | +---+---+---+---+==================+---+...+---+ 21 | 22 | ``` 23 | 24 | The magic number contains the 4 bytes `0xF3 0x89 0x9A 0xC2`. 25 | 26 | The protobuf message is as follows: 27 | 28 | ``` 29 | message AggregatedRecord { 30 | repeated string partition_key_table = 1; 31 | repeated string explicit_hash_key_table = 2; 32 | repeated Record records = 3; 33 | } 34 | ``` 35 | 36 | The sub-messages are as follows: 37 | 38 | ``` 39 | message Tag { 40 | required string key = 1; 41 | optional string value = 2; 42 | } 43 | 44 | message Record { 45 | required uint64 partition_key_index = 1; 46 | optional uint64 explicit_hash_key_index = 2; 47 | required bytes data = 3; 48 | repeated Tag tags = 4; 49 | } 50 | ``` 51 | 52 | Note: we use the proto2 language (not proto3). 53 | 54 | The protobuf message allows more efficient partition and explicit hash key packing by allowing multiple records to point to the same key in a table. This feature is optional; implementations can simply store the keys of every record as a separate entry in the tables, even if two or more of them are the same. 55 | 56 | The key tables are zero-indexed; they are simply arrays, and the key indices are indices into those arrays. 57 | 58 | Tags are not yet implemented in the KPL and KCL APIs. 59 | 60 | Lastly, the 16-byte MD5 checksum is computed over the bytes of the serialized protobuf message. 61 | -------------------------------------------------------------------------------- /aggregator.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "bytes" 5 | "crypto/md5" 6 | 7 | "github.com/aws/aws-sdk-go-v2/aws" 8 | ktypes "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 9 | "google.golang.org/protobuf/proto" 10 | ) 11 | 12 | var ( 13 | magicNumber = []byte{0xF3, 0x89, 0x9A, 0xC2} 14 | ) 15 | 16 | type Aggregator struct { 17 | buf []*Record 18 | pkeys []string 19 | nbytes int 20 | } 21 | 22 | // Size return how many bytes stored in the aggregator. 23 | // including partition keys. 24 | func (a *Aggregator) Size() int { 25 | return a.nbytes 26 | } 27 | 28 | // Count return how many records stored in the aggregator. 29 | func (a *Aggregator) Count() int { 30 | return len(a.buf) 31 | } 32 | 33 | // Put record using `data` and `partitionKey`. This method is thread-safe. 34 | func (a *Aggregator) Put(data []byte, partitionKey string) { 35 | // For now, all records in the aggregated record will have 36 | // the same partition key. 37 | // later, we will add shard-mapper same as the KPL use. 38 | // see: https://github.com/a8m/kinesis-producer/issues/1 39 | if len(a.pkeys) == 0 { 40 | a.pkeys = []string{partitionKey} 41 | a.nbytes += len([]byte(partitionKey)) 42 | } 43 | keyIndex := uint64(len(a.pkeys) - 1) 44 | 45 | a.nbytes += partitionKeyIndexSize 46 | a.buf = append(a.buf, &Record{ 47 | Data: data, 48 | PartitionKeyIndex: &keyIndex, 49 | }) 50 | a.nbytes += len(data) 51 | } 52 | 53 | // Drain create an aggregated `kinesis.PutRecordsRequestEntry` 54 | // that compatible with the KCL's deaggregation logic. 55 | // 56 | // If you interested to know more about it. see: aggregation-format.md 57 | func (a *Aggregator) Drain() (*ktypes.PutRecordsRequestEntry, error) { 58 | if a.nbytes == 0 { 59 | return nil, nil 60 | } 61 | data, err := proto.Marshal(&AggregatedRecord{ 62 | PartitionKeyTable: a.pkeys, 63 | Records: a.buf, 64 | }) 65 | if err != nil { 66 | return nil, err 67 | } 68 | h := md5.New() 69 | h.Write(data) 70 | checkSum := h.Sum(nil) 71 | aggData := append(magicNumber, data...) 72 | aggData = append(aggData, checkSum...) 73 | entry := &ktypes.PutRecordsRequestEntry{ 74 | Data: aggData, 75 | PartitionKey: aws.String(a.pkeys[0]), 76 | } 77 | a.clear() 78 | return entry, nil 79 | } 80 | 81 | func (a *Aggregator) clear() { 82 | a.buf = make([]*Record, 0) 83 | a.pkeys = make([]string, 0) 84 | a.nbytes = 0 85 | } 86 | 87 | // Test if a given entry is aggregated record. 88 | func isAggregated(entry *ktypes.PutRecordsRequestEntry) bool { 89 | return bytes.HasPrefix(entry.Data, magicNumber) 90 | } 91 | 92 | func extractRecords(entry *ktypes.PutRecordsRequestEntry) (out []ktypes.PutRecordsRequestEntry) { 93 | src := entry.Data[len(magicNumber) : len(entry.Data)-md5.Size] 94 | dest := new(AggregatedRecord) 95 | err := proto.Unmarshal(src, dest) 96 | if err != nil { 97 | return 98 | } 99 | for i := range dest.Records { 100 | r := dest.Records[i] 101 | out = append(out, ktypes.PutRecordsRequestEntry{ 102 | Data: r.GetData(), 103 | PartitionKey: &dest.PartitionKeyTable[r.GetPartitionKeyIndex()], 104 | }) 105 | } 106 | return 107 | } 108 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Amazon kinesis producer [![Build status][travis-image]][travis-url] [![License][license-image]][license-url] [![GoDoc][godoc-img]][godoc-url] 2 | > A KPL-like batch producer for Amazon Kinesis built on top of the official Go AWS SDK 3 | and using the same aggregation format that [KPL][kpl-url] use. 4 | 5 | ### Useful links 6 | - [Documentation][godoc-url] 7 | - [Aggregation format][aggregation-format-url] 8 | - [Considerations When Using KPL Aggregation][kpl-aggregation] 9 | - [Consumer De-aggregation][de-aggregation] 10 | 11 | ### Example 12 | ```go 13 | package main 14 | 15 | import ( 16 | "time" 17 | 18 | "github.com/sirupsen/logrus" 19 | "github.com/a8m/kinesis-producer" 20 | "github.com/aws/aws-sdk-go/aws" 21 | "github.com/aws/aws-sdk-go/aws/session" 22 | "github.com/aws/aws-sdk-go/service/kinesis" 23 | ) 24 | 25 | func main() { 26 | client := kinesis.New(session.New(aws.NewConfig())) 27 | pr := producer.New(&producer.Config{ 28 | StreamName: "test", 29 | BacklogCount: 2000, 30 | Client: client 31 | }) 32 | 33 | pr.Start() 34 | 35 | // Handle failures 36 | go func() { 37 | for r := range pr.NotifyFailures() { 38 | // r contains `Data`, `PartitionKey` and `Error()` 39 | log.Error(r) 40 | } 41 | }() 42 | 43 | go func() { 44 | for i := 0; i < 5000; i++ { 45 | err := pr.Put([]byte("foo"), "bar") 46 | if err != nil { 47 | log.WithError(err).Fatal("error producing") 48 | } 49 | } 50 | }() 51 | 52 | time.Sleep(3 * time.Second) 53 | pr.Stop() 54 | } 55 | ``` 56 | 57 | #### Specifying logger implementation 58 | `producer.Config` takes an optional `logging.Logger` implementation. 59 | 60 | ##### Using a custom logger 61 | ```go 62 | customLogger := &CustomLogger{} 63 | 64 | &producer.Config{ 65 | StreamName: "test", 66 | BacklogCount: 2000, 67 | Client: client, 68 | Logger: customLogger, 69 | } 70 | ``` 71 | 72 | #### Using logrus 73 | 74 | ```go 75 | import ( 76 | "github.com/sirupsen/logrus" 77 | producer "github.com/a8m/kinesis-producer" 78 | "github.com/a8m/kinesis-producer/loggers" 79 | ) 80 | 81 | log := logrus.New() 82 | 83 | &producer.Config{ 84 | StreamName: "test", 85 | BacklogCount: 2000, 86 | Client: client, 87 | Logger: loggers.Logrus(log), 88 | } 89 | ``` 90 | 91 | kinesis-producer ships with three logger implementations. 92 | 93 | - `producer.Standard` used the standard library logger 94 | - `loggers.Logrus` uses logrus logger 95 | - `loggers.Zap` uses zap logger 96 | 97 | ### License 98 | MIT 99 | 100 | [godoc-url]: https://godoc.org/github.com/a8m/kinesis-producer 101 | [godoc-img]: https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square 102 | [kpl-url]: https://github.com/awslabs/amazon-kinesis-producer 103 | [de-aggregation]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html 104 | [kpl-aggregation]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-producer-adv-aggregation.html 105 | [aggregation-format-url]: https://github.com/a8m/kinesis-producer/blob/master/aggregation-format.md 106 | [license-image]: https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square 107 | [license-url]: LICENSE 108 | [travis-image]: https://img.shields.io/travis/a8m/kinesis-producer.svg?style=flat-square 109 | [travis-url]: https://travis-ci.org/a8m/kinesis-producer 110 | 111 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | k "github.com/aws/aws-sdk-go-v2/service/kinesis" 10 | ) 11 | 12 | // Constants and default configuration take from: 13 | // github.com/awslabs/amazon-kinesis-producer/.../KinesisProducerConfiguration.java 14 | const ( 15 | maxRecordSize = 1 << 20 // 1MiB 16 | maxRequestSize = 5 << 20 // 5MiB 17 | maxRecordsPerRequest = 500 18 | maxAggregationSize = 1048576 // 1MiB 19 | maxAggregationCount = 4294967295 20 | defaultAggregationSize = 51200 // 50k 21 | defaultMaxConnections = 24 22 | defaultFlushInterval = 5 * time.Second 23 | partitionKeyIndexSize = 8 24 | ) 25 | 26 | // Putter is the interface that wraps the KinesisAPI.PutRecords method. 27 | type Putter interface { 28 | PutRecords(context.Context, *k.PutRecordsInput, ...func(*k.Options)) (*k.PutRecordsOutput, error) 29 | } 30 | 31 | // Config is the Producer configuration. 32 | type Config struct { 33 | // StreamName is the Kinesis stream. 34 | StreamName string 35 | 36 | // FlushInterval is a regular interval for flushing the buffer. Defaults to 5s. 37 | FlushInterval time.Duration 38 | 39 | // BatchCount determine the maximum number of items to pack in batch. 40 | // Must not exceed length. Defaults to 500. 41 | BatchCount int 42 | 43 | // BatchSize determine the maximum number of bytes to send with a PutRecords request. 44 | // Must not exceed 5MiB; Default to 5MiB. 45 | BatchSize int 46 | 47 | // AggregateBatchCount determine the maximum number of items to pack into an aggregated record. 48 | AggregateBatchCount int 49 | 50 | // AggregationBatchSize determine the maximum number of bytes to pack into an aggregated record. User records larger 51 | // than this will bypass aggregation. 52 | AggregateBatchSize int 53 | 54 | // BacklogCount determines the channel capacity before Put() will begin blocking. Default to `BatchCount`. 55 | BacklogCount int 56 | 57 | // Number of requests to sent concurrently. Default to 24. 58 | MaxConnections int 59 | 60 | // Logger is the logger used. Default to producer.Logger. 61 | Logger Logger 62 | 63 | // Enabling verbose logging. Default to false. 64 | Verbose bool 65 | 66 | // Client is the Putter interface implementation. 67 | Client Putter 68 | } 69 | 70 | // defaults for configuration 71 | func (c *Config) defaults() { 72 | if c.Logger == nil { 73 | c.Logger = &StdLogger{log.New(os.Stdout, "", log.LstdFlags)} 74 | } 75 | if c.BatchCount == 0 { 76 | c.BatchCount = maxRecordsPerRequest 77 | } 78 | falseOrPanic(c.BatchCount > maxRecordsPerRequest, "kinesis: BatchCount exceeds 500") 79 | if c.BatchSize == 0 { 80 | c.BatchSize = maxRequestSize 81 | } 82 | falseOrPanic(c.BatchSize > maxRequestSize, "kinesis: BatchSize exceeds 5MiB") 83 | if c.BacklogCount == 0 { 84 | c.BacklogCount = maxRecordsPerRequest 85 | } 86 | if c.AggregateBatchCount == 0 { 87 | c.AggregateBatchCount = maxAggregationCount 88 | } 89 | falseOrPanic(c.AggregateBatchCount > maxAggregationCount, "kinesis: AggregateBatchCount exceeds 4294967295") 90 | if c.AggregateBatchSize == 0 { 91 | c.AggregateBatchSize = defaultAggregationSize 92 | } 93 | falseOrPanic(c.AggregateBatchSize > maxAggregationSize, "kinesis: AggregateBatchSize exceeds 50KB") 94 | if c.MaxConnections == 0 { 95 | c.MaxConnections = defaultMaxConnections 96 | } 97 | falseOrPanic(c.MaxConnections < 1 || c.MaxConnections > 256, "kinesis: MaxConnections must be between 1 and 256") 98 | if c.FlushInterval == 0 { 99 | c.FlushInterval = defaultFlushInterval 100 | } 101 | falseOrPanic(len(c.StreamName) == 0, "kinesis: StreamName length must be at least 1") 102 | } 103 | 104 | func falseOrPanic(p bool, msg string) { 105 | if p { 106 | panic(msg) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /producer_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/aws/aws-sdk-go-v2/aws" 10 | k "github.com/aws/aws-sdk-go-v2/service/kinesis" 11 | ktypes "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 12 | ) 13 | 14 | type responseMock struct { 15 | Response *k.PutRecordsOutput 16 | Error error 17 | } 18 | 19 | type clientMock struct { 20 | calls int 21 | responses []responseMock 22 | incoming map[int][]string 23 | } 24 | 25 | func (c *clientMock) PutRecords(_ context.Context, input *k.PutRecordsInput, _ ...func(*k.Options)) (*k.PutRecordsOutput, error) { 26 | res := c.responses[c.calls] 27 | for _, r := range input.Records { 28 | c.incoming[c.calls] = append(c.incoming[c.calls], *r.PartitionKey) 29 | } 30 | c.calls++ 31 | if res.Error != nil { 32 | return nil, res.Error 33 | } 34 | return res.Response, nil 35 | } 36 | 37 | type testCase struct { 38 | // configuration 39 | name string // test name 40 | config *Config // test config 41 | records []string // all outgoing records(partition keys and data too) 42 | putter *clientMock // mocked client 43 | 44 | // expectations 45 | outgoing map[int][]string // [call number][partition keys] 46 | } 47 | 48 | func genBulk(n int, s string) (ret []string) { 49 | for i := 0; i < n; i++ { 50 | ret = append(ret, s) 51 | } 52 | return 53 | } 54 | 55 | var testCases = []testCase{ 56 | { 57 | "one record with batch count 1", 58 | &Config{BatchCount: 1}, 59 | []string{"hello"}, 60 | &clientMock{ 61 | incoming: make(map[int][]string), 62 | responses: []responseMock{ 63 | { 64 | Error: nil, 65 | Response: &k.PutRecordsOutput{ 66 | FailedRecordCount: aws.Int32(0), 67 | }, 68 | }, 69 | }}, 70 | map[int][]string{ 71 | 0: []string{"hello"}, 72 | }, 73 | }, 74 | { 75 | "two records with batch count 1", 76 | &Config{BatchCount: 1, AggregateBatchCount: 1}, 77 | []string{"hello", "world"}, 78 | &clientMock{ 79 | incoming: make(map[int][]string), 80 | responses: []responseMock{ 81 | { 82 | Error: nil, 83 | Response: &k.PutRecordsOutput{ 84 | FailedRecordCount: aws.Int32(0), 85 | }, 86 | }, 87 | { 88 | Error: nil, 89 | Response: &k.PutRecordsOutput{ 90 | FailedRecordCount: aws.Int32(0), 91 | }, 92 | }, 93 | }}, 94 | map[int][]string{ 95 | 0: []string{"hello"}, 96 | 1: []string{"world"}, 97 | }, 98 | }, 99 | { 100 | "two records with batch count 2, simulating retries", 101 | &Config{BatchCount: 2, AggregateBatchCount: 1}, 102 | []string{"hello", "world"}, 103 | &clientMock{ 104 | incoming: make(map[int][]string), 105 | responses: []responseMock{ 106 | { 107 | Error: nil, 108 | Response: &k.PutRecordsOutput{ 109 | FailedRecordCount: aws.Int32(1), 110 | Records: []ktypes.PutRecordsResultEntry{ 111 | {SequenceNumber: aws.String("3"), ShardId: aws.String("1")}, 112 | {ErrorCode: aws.String("400")}, 113 | }, 114 | }, 115 | }, 116 | { 117 | Error: nil, 118 | Response: &k.PutRecordsOutput{ 119 | FailedRecordCount: aws.Int32(0), 120 | }, 121 | }, 122 | }}, 123 | map[int][]string{ 124 | 0: []string{"hello", "world"}, 125 | 1: []string{"world"}, 126 | }, 127 | }, 128 | { 129 | "2 bulks of 10 records", 130 | &Config{BatchCount: 10, AggregateBatchCount: 1, BacklogCount: 1}, 131 | genBulk(20, "foo"), 132 | &clientMock{ 133 | incoming: make(map[int][]string), 134 | responses: []responseMock{ 135 | { 136 | Error: nil, 137 | Response: &k.PutRecordsOutput{ 138 | FailedRecordCount: aws.Int32(0), 139 | }, 140 | }, 141 | { 142 | Error: nil, 143 | Response: &k.PutRecordsOutput{ 144 | FailedRecordCount: aws.Int32(0), 145 | }, 146 | }, 147 | }}, 148 | map[int][]string{ 149 | 0: genBulk(10, "foo"), 150 | 1: genBulk(10, "foo"), 151 | }, 152 | }, 153 | } 154 | 155 | func TestProducer(t *testing.T) { 156 | for _, test := range testCases { 157 | test.config.StreamName = test.name 158 | test.config.MaxConnections = 1 159 | test.config.Client = test.putter 160 | p := New(test.config) 161 | p.Start() 162 | var wg sync.WaitGroup 163 | wg.Add(len(test.records)) 164 | for _, r := range test.records { 165 | go func(s string) { 166 | p.Put([]byte(s), s) 167 | wg.Done() 168 | }(r) 169 | } 170 | wg.Wait() 171 | p.Stop() 172 | for k, v := range test.putter.incoming { 173 | if len(v) != len(test.outgoing[k]) { 174 | t.Errorf("failed test: %s\n\texcpeted:%v\n\tactual: %v", test.name, 175 | test.outgoing, test.putter.incoming) 176 | } 177 | } 178 | } 179 | } 180 | 181 | func TestNotify(t *testing.T) { 182 | kError := errors.New("ResourceNotFoundException: Stream foo under account X not found") 183 | p := New(&Config{ 184 | StreamName: "foo", 185 | MaxConnections: 1, 186 | BatchCount: 1, 187 | AggregateBatchCount: 10, 188 | Client: &clientMock{ 189 | incoming: make(map[int][]string), 190 | responses: []responseMock{{Error: kError}}, 191 | }, 192 | }) 193 | p.Start() 194 | records := genBulk(10, "bar") 195 | var wg sync.WaitGroup 196 | wg.Add(len(records)) 197 | failed := 0 198 | done := make(chan bool, 1) 199 | go func() { 200 | for _ = range p.NotifyFailures() { 201 | failed++ 202 | wg.Done() 203 | } 204 | // expect producer close the failures channel 205 | done <- true 206 | }() 207 | for _, r := range records { 208 | p.Put([]byte(r), r) 209 | } 210 | wg.Wait() 211 | p.Stop() 212 | 213 | if failed != len(records) { 214 | t.Errorf("failed test: NotifyFailure\n\texcpeted:%v\n\tactual:%v", failed, len(records)) 215 | } 216 | 217 | if !<-done { 218 | t.Error("failed test: NotifyFailure\n\texpect failures channel to be closed") 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= 2 | github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= 3 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= 4 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= 5 | github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= 6 | github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= 7 | github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= 8 | github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= 9 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= 10 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= 11 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= 12 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= 13 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= 14 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= 15 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= 16 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= 17 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= 18 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= 19 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= 20 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= 21 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.7 h1:vIyT3PV/OTjhi3mY6wWDpHQ0sbp7zB7lH6g/63N5ZlY= 22 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.7/go.mod h1:URGOU9fStCYx2LYLwT0g8XpsIa5CAk8mq+MbrxCgJDc= 23 | github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= 24 | github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= 25 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= 26 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= 27 | github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= 28 | github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= 29 | github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= 30 | github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= 31 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 32 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 33 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 34 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 35 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 36 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 37 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 38 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 39 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 40 | github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= 41 | github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= 42 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 43 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 44 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 45 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 46 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 47 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 48 | github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= 49 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 50 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 51 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 52 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 53 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 54 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 55 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 56 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 57 | golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= 58 | golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 59 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 60 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 61 | google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= 62 | google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= 63 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 64 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 65 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 66 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 67 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 68 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 69 | -------------------------------------------------------------------------------- /producer.go: -------------------------------------------------------------------------------- 1 | // Amazon kinesis producer 2 | // A KPL-like batch producer for Amazon Kinesis built on top of the official Go AWS SDK 3 | // and using the same aggregation format that KPL use. 4 | // 5 | // Note: this project start as a fork of `tj/go-kinesis`. if you are not intersting in the 6 | // KPL aggregation logic, you probably want to check it out. 7 | package producer 8 | 9 | import ( 10 | "context" 11 | "crypto/md5" 12 | "errors" 13 | "fmt" 14 | "sync" 15 | "time" 16 | 17 | "github.com/aws/aws-sdk-go-v2/aws" 18 | k "github.com/aws/aws-sdk-go-v2/service/kinesis" 19 | ktypes "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 20 | "github.com/jpillora/backoff" 21 | ) 22 | 23 | // Errors 24 | var ( 25 | ErrStoppedProducer = errors.New("Unable to Put record. Producer is already stopped") 26 | ErrIllegalPartitionKey = errors.New("Invalid parition key. Length must be at least 1 and at most 256") 27 | ErrRecordSizeExceeded = errors.New("Data must be less than or equal to 1MB in size") 28 | ) 29 | 30 | // Producer batches records. 31 | type Producer struct { 32 | sync.RWMutex 33 | *Config 34 | aggregator *Aggregator 35 | semaphore semaphore 36 | records chan *ktypes.PutRecordsRequestEntry 37 | failure chan *FailureRecord 38 | done chan struct{} 39 | 40 | // Current state of the Producer 41 | // notify set to true after calling to `NotifyFailures` 42 | notify bool 43 | // stopped set to true after `Stop`ing the Producer. 44 | // This will prevent from user to `Put` any new data. 45 | stopped bool 46 | } 47 | 48 | // New creates new producer with the given config. 49 | func New(config *Config) *Producer { 50 | config.defaults() 51 | return &Producer{ 52 | Config: config, 53 | done: make(chan struct{}), 54 | records: make(chan *ktypes.PutRecordsRequestEntry, config.BacklogCount), 55 | semaphore: make(chan struct{}, config.MaxConnections), 56 | aggregator: new(Aggregator), 57 | } 58 | } 59 | 60 | // Put `data` using `partitionKey` asynchronously. This method is thread-safe. 61 | // 62 | // Under the covers, the Producer will automatically re-attempt puts in case of 63 | // transient errors. 64 | // When unrecoverable error has detected(e.g: trying to put to in a stream that 65 | // doesn't exist), the message will returned by the Producer. 66 | // Add a listener with `Producer.NotifyFailures` to handle undeliverable messages. 67 | func (p *Producer) Put(data []byte, partitionKey string) error { 68 | p.RLock() 69 | stopped := p.stopped 70 | p.RUnlock() 71 | if stopped { 72 | return ErrStoppedProducer 73 | } 74 | if len(data) > maxRecordSize { 75 | return ErrRecordSizeExceeded 76 | } 77 | if l := len(partitionKey); l < 1 || l > 256 { 78 | return ErrIllegalPartitionKey 79 | } 80 | nbytes := len(data) + len([]byte(partitionKey)) 81 | // if the record size is bigger than aggregation size 82 | // handle it as a simple kinesis record 83 | if nbytes > p.AggregateBatchSize { 84 | p.records <- &ktypes.PutRecordsRequestEntry{ 85 | Data: data, 86 | PartitionKey: &partitionKey, 87 | } 88 | } else { 89 | p.Lock() 90 | needToDrain := nbytes+p.aggregator.Size()+md5.Size+len(magicNumber)+partitionKeyIndexSize > maxRecordSize || p.aggregator.Count() >= p.AggregateBatchCount 91 | var ( 92 | record *ktypes.PutRecordsRequestEntry 93 | err error 94 | ) 95 | if needToDrain { 96 | if record, err = p.aggregator.Drain(); err != nil { 97 | p.Logger.Error("drain aggregator", err) 98 | } 99 | } 100 | p.aggregator.Put(data, partitionKey) 101 | p.Unlock() 102 | // release the lock and then pipe the record to the records channel 103 | // we did it, because the "send" operation blocks when the backlog is full 104 | // and this can cause deadlock(when we never release the lock) 105 | if needToDrain && record != nil { 106 | p.records <- record 107 | } 108 | } 109 | return nil 110 | } 111 | 112 | // Failure record type 113 | type FailureRecord struct { 114 | error 115 | Data []byte 116 | PartitionKey string 117 | } 118 | 119 | // NotifyFailures registers and return listener to handle undeliverable messages. 120 | // The incoming struct has a copy of the Data and the PartitionKey along with some 121 | // error information about why the publishing failed. 122 | func (p *Producer) NotifyFailures() <-chan *FailureRecord { 123 | p.Lock() 124 | defer p.Unlock() 125 | if !p.notify { 126 | p.notify = true 127 | p.failure = make(chan *FailureRecord, p.BacklogCount) 128 | } 129 | return p.failure 130 | } 131 | 132 | // Start the producer 133 | func (p *Producer) Start() { 134 | p.Logger.Info("starting producer", LogValue{"stream", p.StreamName}) 135 | go p.loop() 136 | } 137 | 138 | // Stop the producer gracefully. Flushes any in-flight data. 139 | func (p *Producer) Stop() { 140 | p.Lock() 141 | p.stopped = true 142 | p.Unlock() 143 | p.Logger.Info("stopping producer", LogValue{"backlog", len(p.records)}) 144 | 145 | // drain 146 | if record, ok := p.drainIfNeed(); ok { 147 | p.records <- record 148 | } 149 | p.done <- struct{}{} 150 | close(p.records) 151 | 152 | // wait 153 | <-p.done 154 | p.semaphore.wait() 155 | 156 | // close the failures channel if we notify 157 | p.RLock() 158 | if p.notify { 159 | close(p.failure) 160 | } 161 | p.RUnlock() 162 | p.Logger.Info("stopped producer") 163 | } 164 | 165 | // loop and flush at the configured interval, or when the buffer is exceeded. 166 | func (p *Producer) loop() { 167 | size := 0 168 | drain := false 169 | buf := make([]ktypes.PutRecordsRequestEntry, 0, p.BatchCount) 170 | tick := time.NewTicker(p.FlushInterval) 171 | 172 | flush := func(msg string) { 173 | p.semaphore.acquire() 174 | go p.flush(buf, msg) 175 | buf = nil 176 | size = 0 177 | } 178 | 179 | bufAppend := func(record *ktypes.PutRecordsRequestEntry) { 180 | // the record size limit applies to the total size of the 181 | // partition key and data blob. 182 | rsize := len(record.Data) + len([]byte(*record.PartitionKey)) 183 | if size+rsize > p.BatchSize { 184 | flush("batch size") 185 | } 186 | size += rsize 187 | buf = append(buf, *record) 188 | if len(buf) >= p.BatchCount { 189 | flush("batch length") 190 | } 191 | } 192 | 193 | defer tick.Stop() 194 | defer close(p.done) 195 | 196 | for { 197 | select { 198 | case record, ok := <-p.records: 199 | if drain && !ok { 200 | if size > 0 { 201 | flush("drain") 202 | } 203 | p.Logger.Info("backlog drained") 204 | return 205 | } 206 | bufAppend(record) 207 | case <-tick.C: 208 | if record, ok := p.drainIfNeed(); ok { 209 | bufAppend(record) 210 | } 211 | // if the buffer is still containing records 212 | if size > 0 { 213 | flush("interval") 214 | } 215 | case <-p.done: 216 | drain = true 217 | } 218 | } 219 | } 220 | 221 | func (p *Producer) drainIfNeed() (*ktypes.PutRecordsRequestEntry, bool) { 222 | p.RLock() 223 | needToDrain := p.aggregator.Size() > 0 224 | p.RUnlock() 225 | if needToDrain { 226 | p.Lock() 227 | record, err := p.aggregator.Drain() 228 | p.Unlock() 229 | if err != nil { 230 | p.Logger.Error("drain aggregator", err) 231 | } else { 232 | return record, true 233 | } 234 | } 235 | return nil, false 236 | } 237 | 238 | // flush records and retry failures if necessary. 239 | // for example: when we get "ProvisionedThroughputExceededException" 240 | func (p *Producer) flush(records []ktypes.PutRecordsRequestEntry, reason string) { 241 | b := &backoff.Backoff{ 242 | Jitter: true, 243 | } 244 | 245 | defer p.semaphore.release() 246 | 247 | for { 248 | p.Logger.Info("flushing records", LogValue{"reason", reason}, LogValue{"records", len(records)}) 249 | out, err := p.Client.PutRecords(context.Background(), &k.PutRecordsInput{ 250 | StreamName: aws.String(p.StreamName), 251 | Records: records, 252 | }) 253 | 254 | if err != nil { 255 | p.Logger.Error("flush", err) 256 | p.RLock() 257 | notify := p.notify 258 | p.RUnlock() 259 | if notify { 260 | p.dispatchFailures(records, err) 261 | } 262 | return 263 | } 264 | 265 | if p.Verbose { 266 | for i, r := range out.Records { 267 | values := make([]LogValue, 2) 268 | if r.ErrorCode != nil { 269 | values[0] = LogValue{"ErrorCode", *r.ErrorCode} 270 | values[1] = LogValue{"ErrorMessage", *r.ErrorMessage} 271 | } else { 272 | values[0] = LogValue{"ShardId", *r.ShardId} 273 | values[1] = LogValue{"SequenceNumber", *r.SequenceNumber} 274 | } 275 | p.Logger.Info(fmt.Sprintf("Result[%d]", i), values...) 276 | } 277 | } 278 | 279 | failed := *out.FailedRecordCount 280 | if failed == 0 { 281 | return 282 | } 283 | 284 | duration := b.Duration() 285 | 286 | p.Logger.Info( 287 | "put failures", 288 | LogValue{"failures", failed}, 289 | LogValue{"backoff", duration.String()}, 290 | ) 291 | time.Sleep(duration) 292 | 293 | // change the logging state for the next itertion 294 | reason = "retry" 295 | records = failures(records, out.Records) 296 | } 297 | } 298 | 299 | // dispatchFailures gets batch of records, extract them, and push them 300 | // into the failure channel 301 | func (p *Producer) dispatchFailures(records []ktypes.PutRecordsRequestEntry, err error) { 302 | for _, r := range records { 303 | if isAggregated(&r) { 304 | p.dispatchFailures(extractRecords(&r), err) 305 | } else { 306 | p.failure <- &FailureRecord{err, r.Data, *r.PartitionKey} 307 | } 308 | } 309 | } 310 | 311 | // failures returns the failed records as indicated in the response. 312 | func failures(records []ktypes.PutRecordsRequestEntry, 313 | response []ktypes.PutRecordsResultEntry) (out []ktypes.PutRecordsRequestEntry) { 314 | for i, record := range response { 315 | if record.ErrorCode != nil { 316 | out = append(out, records[i]) 317 | } 318 | } 319 | return 320 | } 321 | -------------------------------------------------------------------------------- /messages.pb.go: -------------------------------------------------------------------------------- 1 | // protoc --go_out=. --go_opt=paths=source_relative messages.proto 2 | 3 | // Code generated by protoc-gen-go. DO NOT EDIT. 4 | // versions: 5 | // protoc-gen-go v1.34.2 6 | // protoc v5.28.0 7 | // source: messages.proto 8 | 9 | package producer 10 | 11 | import ( 12 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 13 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 14 | reflect "reflect" 15 | sync "sync" 16 | ) 17 | 18 | const ( 19 | // Verify that this generated code is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 21 | // Verify that runtime/protoimpl is sufficiently up-to-date. 22 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 23 | ) 24 | 25 | type AggregatedRecord struct { 26 | state protoimpl.MessageState 27 | sizeCache protoimpl.SizeCache 28 | unknownFields protoimpl.UnknownFields 29 | 30 | PartitionKeyTable []string `protobuf:"bytes,1,rep,name=partition_key_table,json=partitionKeyTable" json:"partition_key_table,omitempty"` 31 | ExplicitHashKeyTable []string `protobuf:"bytes,2,rep,name=explicit_hash_key_table,json=explicitHashKeyTable" json:"explicit_hash_key_table,omitempty"` 32 | Records []*Record `protobuf:"bytes,3,rep,name=records" json:"records,omitempty"` 33 | } 34 | 35 | func (x *AggregatedRecord) Reset() { 36 | *x = AggregatedRecord{} 37 | if protoimpl.UnsafeEnabled { 38 | mi := &file_messages_proto_msgTypes[0] 39 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 40 | ms.StoreMessageInfo(mi) 41 | } 42 | } 43 | 44 | func (x *AggregatedRecord) String() string { 45 | return protoimpl.X.MessageStringOf(x) 46 | } 47 | 48 | func (*AggregatedRecord) ProtoMessage() {} 49 | 50 | func (x *AggregatedRecord) ProtoReflect() protoreflect.Message { 51 | mi := &file_messages_proto_msgTypes[0] 52 | if protoimpl.UnsafeEnabled && x != nil { 53 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 54 | if ms.LoadMessageInfo() == nil { 55 | ms.StoreMessageInfo(mi) 56 | } 57 | return ms 58 | } 59 | return mi.MessageOf(x) 60 | } 61 | 62 | // Deprecated: Use AggregatedRecord.ProtoReflect.Descriptor instead. 63 | func (*AggregatedRecord) Descriptor() ([]byte, []int) { 64 | return file_messages_proto_rawDescGZIP(), []int{0} 65 | } 66 | 67 | func (x *AggregatedRecord) GetPartitionKeyTable() []string { 68 | if x != nil { 69 | return x.PartitionKeyTable 70 | } 71 | return nil 72 | } 73 | 74 | func (x *AggregatedRecord) GetExplicitHashKeyTable() []string { 75 | if x != nil { 76 | return x.ExplicitHashKeyTable 77 | } 78 | return nil 79 | } 80 | 81 | func (x *AggregatedRecord) GetRecords() []*Record { 82 | if x != nil { 83 | return x.Records 84 | } 85 | return nil 86 | } 87 | 88 | type Tag struct { 89 | state protoimpl.MessageState 90 | sizeCache protoimpl.SizeCache 91 | unknownFields protoimpl.UnknownFields 92 | 93 | Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` 94 | Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` 95 | } 96 | 97 | func (x *Tag) Reset() { 98 | *x = Tag{} 99 | if protoimpl.UnsafeEnabled { 100 | mi := &file_messages_proto_msgTypes[1] 101 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 102 | ms.StoreMessageInfo(mi) 103 | } 104 | } 105 | 106 | func (x *Tag) String() string { 107 | return protoimpl.X.MessageStringOf(x) 108 | } 109 | 110 | func (*Tag) ProtoMessage() {} 111 | 112 | func (x *Tag) ProtoReflect() protoreflect.Message { 113 | mi := &file_messages_proto_msgTypes[1] 114 | if protoimpl.UnsafeEnabled && x != nil { 115 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 116 | if ms.LoadMessageInfo() == nil { 117 | ms.StoreMessageInfo(mi) 118 | } 119 | return ms 120 | } 121 | return mi.MessageOf(x) 122 | } 123 | 124 | // Deprecated: Use Tag.ProtoReflect.Descriptor instead. 125 | func (*Tag) Descriptor() ([]byte, []int) { 126 | return file_messages_proto_rawDescGZIP(), []int{1} 127 | } 128 | 129 | func (x *Tag) GetKey() string { 130 | if x != nil && x.Key != nil { 131 | return *x.Key 132 | } 133 | return "" 134 | } 135 | 136 | func (x *Tag) GetValue() string { 137 | if x != nil && x.Value != nil { 138 | return *x.Value 139 | } 140 | return "" 141 | } 142 | 143 | type Record struct { 144 | state protoimpl.MessageState 145 | sizeCache protoimpl.SizeCache 146 | unknownFields protoimpl.UnknownFields 147 | 148 | PartitionKeyIndex *uint64 `protobuf:"varint,1,req,name=partition_key_index,json=partitionKeyIndex" json:"partition_key_index,omitempty"` 149 | ExplicitHashKeyIndex *uint64 `protobuf:"varint,2,opt,name=explicit_hash_key_index,json=explicitHashKeyIndex" json:"explicit_hash_key_index,omitempty"` 150 | Data []byte `protobuf:"bytes,3,req,name=data" json:"data,omitempty"` 151 | Tags []*Tag `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` 152 | } 153 | 154 | func (x *Record) Reset() { 155 | *x = Record{} 156 | if protoimpl.UnsafeEnabled { 157 | mi := &file_messages_proto_msgTypes[2] 158 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 159 | ms.StoreMessageInfo(mi) 160 | } 161 | } 162 | 163 | func (x *Record) String() string { 164 | return protoimpl.X.MessageStringOf(x) 165 | } 166 | 167 | func (*Record) ProtoMessage() {} 168 | 169 | func (x *Record) ProtoReflect() protoreflect.Message { 170 | mi := &file_messages_proto_msgTypes[2] 171 | if protoimpl.UnsafeEnabled && x != nil { 172 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 173 | if ms.LoadMessageInfo() == nil { 174 | ms.StoreMessageInfo(mi) 175 | } 176 | return ms 177 | } 178 | return mi.MessageOf(x) 179 | } 180 | 181 | // Deprecated: Use Record.ProtoReflect.Descriptor instead. 182 | func (*Record) Descriptor() ([]byte, []int) { 183 | return file_messages_proto_rawDescGZIP(), []int{2} 184 | } 185 | 186 | func (x *Record) GetPartitionKeyIndex() uint64 { 187 | if x != nil && x.PartitionKeyIndex != nil { 188 | return *x.PartitionKeyIndex 189 | } 190 | return 0 191 | } 192 | 193 | func (x *Record) GetExplicitHashKeyIndex() uint64 { 194 | if x != nil && x.ExplicitHashKeyIndex != nil { 195 | return *x.ExplicitHashKeyIndex 196 | } 197 | return 0 198 | } 199 | 200 | func (x *Record) GetData() []byte { 201 | if x != nil { 202 | return x.Data 203 | } 204 | return nil 205 | } 206 | 207 | func (x *Record) GetTags() []*Tag { 208 | if x != nil { 209 | return x.Tags 210 | } 211 | return nil 212 | } 213 | 214 | var File_messages_proto protoreflect.FileDescriptor 215 | 216 | var file_messages_proto_rawDesc = []byte{ 217 | 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 218 | 0x12, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x22, 0xa5, 0x01, 0x0a, 0x10, 0x41, 219 | 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 220 | 0x2e, 0x0a, 0x13, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 221 | 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x70, 0x61, 222 | 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 223 | 0x35, 0x0a, 0x17, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 224 | 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 225 | 0x52, 0x14, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4b, 0x65, 226 | 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 227 | 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 228 | 0x65, 0x72, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 229 | 0x64, 0x73, 0x22, 0x2d, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 230 | 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 231 | 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 232 | 0x65, 0x22, 0xa6, 0x01, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2e, 0x0a, 0x13, 233 | 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 234 | 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 235 | 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x35, 0x0a, 0x17, 236 | 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6b, 0x65, 237 | 0x79, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x65, 238 | 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4b, 0x65, 0x79, 0x49, 0x6e, 239 | 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x02, 0x28, 240 | 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 241 | 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 242 | 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 243 | 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 244 | } 245 | 246 | var ( 247 | file_messages_proto_rawDescOnce sync.Once 248 | file_messages_proto_rawDescData = file_messages_proto_rawDesc 249 | ) 250 | 251 | func file_messages_proto_rawDescGZIP() []byte { 252 | file_messages_proto_rawDescOnce.Do(func() { 253 | file_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_messages_proto_rawDescData) 254 | }) 255 | return file_messages_proto_rawDescData 256 | } 257 | 258 | var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 3) 259 | var file_messages_proto_goTypes = []any{ 260 | (*AggregatedRecord)(nil), // 0: producer.AggregatedRecord 261 | (*Tag)(nil), // 1: producer.Tag 262 | (*Record)(nil), // 2: producer.Record 263 | } 264 | var file_messages_proto_depIdxs = []int32{ 265 | 2, // 0: producer.AggregatedRecord.records:type_name -> producer.Record 266 | 1, // 1: producer.Record.tags:type_name -> producer.Tag 267 | 2, // [2:2] is the sub-list for method output_type 268 | 2, // [2:2] is the sub-list for method input_type 269 | 2, // [2:2] is the sub-list for extension type_name 270 | 2, // [2:2] is the sub-list for extension extendee 271 | 0, // [0:2] is the sub-list for field type_name 272 | } 273 | 274 | func init() { file_messages_proto_init() } 275 | func file_messages_proto_init() { 276 | if File_messages_proto != nil { 277 | return 278 | } 279 | if !protoimpl.UnsafeEnabled { 280 | file_messages_proto_msgTypes[0].Exporter = func(v any, i int) any { 281 | switch v := v.(*AggregatedRecord); i { 282 | case 0: 283 | return &v.state 284 | case 1: 285 | return &v.sizeCache 286 | case 2: 287 | return &v.unknownFields 288 | default: 289 | return nil 290 | } 291 | } 292 | file_messages_proto_msgTypes[1].Exporter = func(v any, i int) any { 293 | switch v := v.(*Tag); i { 294 | case 0: 295 | return &v.state 296 | case 1: 297 | return &v.sizeCache 298 | case 2: 299 | return &v.unknownFields 300 | default: 301 | return nil 302 | } 303 | } 304 | file_messages_proto_msgTypes[2].Exporter = func(v any, i int) any { 305 | switch v := v.(*Record); i { 306 | case 0: 307 | return &v.state 308 | case 1: 309 | return &v.sizeCache 310 | case 2: 311 | return &v.unknownFields 312 | default: 313 | return nil 314 | } 315 | } 316 | } 317 | type x struct{} 318 | out := protoimpl.TypeBuilder{ 319 | File: protoimpl.DescBuilder{ 320 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 321 | RawDescriptor: file_messages_proto_rawDesc, 322 | NumEnums: 0, 323 | NumMessages: 3, 324 | NumExtensions: 0, 325 | NumServices: 0, 326 | }, 327 | GoTypes: file_messages_proto_goTypes, 328 | DependencyIndexes: file_messages_proto_depIdxs, 329 | MessageInfos: file_messages_proto_msgTypes, 330 | }.Build() 331 | File_messages_proto = out.File 332 | file_messages_proto_rawDesc = nil 333 | file_messages_proto_goTypes = nil 334 | file_messages_proto_depIdxs = nil 335 | } 336 | --------------------------------------------------------------------------------