├── store ├── mysql │ ├── mysql_databaseutils_test.go │ ├── mysql.go │ └── mysql_test.go ├── postgres │ ├── postgres_databaseutils_test.go │ ├── postgres.go │ └── postgres_test.go ├── redis │ ├── options.go │ ├── redis_test.go │ └── redis.go ├── ddb │ ├── retryer.go │ ├── retryer_test.go │ ├── ddb_test.go │ └── ddb.go └── memory │ ├── store_test.go │ └── store.go ├── examples ├── consumer-redis │ ├── README.md │ └── main.go ├── consumer-dynamodb │ ├── README.md │ └── main.go ├── producer │ ├── README.md │ └── main.go ├── consumer-postgres │ ├── README.md │ └── main.go ├── consumer-mysql │ ├── README.md │ └── main.go └── consumer │ └── main.go ├── .travis.yml ├── internal └── deaggregator │ ├── README.md │ ├── deaggregator.go │ └── deaggregator_test.go ├── counter.go ├── logger.go ├── CONTRIBUTING.md ├── store.go ├── .gitignore ├── client.go ├── group.go ├── kinesis.go ├── go.mod ├── LICENSE ├── CHANGELOG.md ├── options.go ├── allgroup.go ├── consumer.go ├── README.md ├── consumer_test.go └── go.sum /store/mysql/mysql_databaseutils_test.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import "database/sql" 4 | 5 | func (c *Checkpoint) SetConn(conn *sql.DB) { 6 | c.conn = conn 7 | } 8 | -------------------------------------------------------------------------------- /store/postgres/postgres_databaseutils_test.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import "database/sql" 4 | 5 | func (c *Checkpoint) SetConn(conn *sql.DB) { 6 | c.conn = conn 7 | } 8 | -------------------------------------------------------------------------------- /examples/consumer-redis/README.md: -------------------------------------------------------------------------------- 1 | # Consumer 2 | 3 | Read records from the Kinesis stream 4 | 5 | ### Run the consumer 6 | 7 | $ go run main.go --app appName --stream streamName 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | services: 3 | - redis-server 4 | go: 5 | - "1.13" 6 | branches: 7 | only: 8 | - master 9 | script: 10 | - env GO111MODULE=on go test -v -race ./... 11 | -------------------------------------------------------------------------------- /examples/consumer-dynamodb/README.md: -------------------------------------------------------------------------------- 1 | # Consumer 2 | 3 | Read records from the Kinesis stream 4 | 5 | ### Run the consumer 6 | 7 | $ go run main.go --app appName --stream streamName --table tableName 8 | -------------------------------------------------------------------------------- /examples/producer/README.md: -------------------------------------------------------------------------------- 1 | # Producer 2 | 3 | A prepopulated file with JSON users is available on S3 for seeing the stream. 4 | 5 | ## Running the code 6 | 7 | $ cat users.txt | go run main.go --stream streamName 8 | -------------------------------------------------------------------------------- /examples/consumer-postgres/README.md: -------------------------------------------------------------------------------- 1 | # Consumer with postgres checkpoint 2 | 3 | Read records from the Kinesis stream using postgres as checkpoint 4 | 5 | ## Run the consumer 6 | 7 | go run main.go --app appName --stream streamName --table tableName --connection connectionString 8 | -------------------------------------------------------------------------------- /internal/deaggregator/README.md: -------------------------------------------------------------------------------- 1 | # Temporary Deaggregator 2 | 3 | Upgrading to aws-sdk-go-v2 was blocked on a PR to introduce a new Deaggregator: 4 | https://github.com/awslabs/kinesis-aggregation/pull/143/files 5 | 6 | Once that PR is merged I'll remove this code and pull in the `awslabs/kinesis-aggregation` repo. -------------------------------------------------------------------------------- /counter.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | // Counter interface is used for exposing basic metrics from the scanner 4 | type Counter interface { 5 | Add(string, int64) 6 | } 7 | 8 | // noopCounter implements counter interface with discard 9 | type noopCounter struct{} 10 | 11 | func (n noopCounter) Add(string, int64) {} 12 | -------------------------------------------------------------------------------- /examples/consumer-mysql/README.md: -------------------------------------------------------------------------------- 1 | # Consumer with mysl checkpoint 2 | 3 | Read records from the Kinesis stream using mysql as checkpoint 4 | 5 | ## Run the consumer 6 | 7 | go run main.go --app --stream --table --connection 8 | 9 | Connection string should look something like 10 | 11 | user:password@/dbname 12 | -------------------------------------------------------------------------------- /store/redis/options.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis "github.com/go-redis/redis/v9" 4 | 5 | // Option is used to override defaults when creating a new Redis checkpoint 6 | type Option func(*Checkpoint) 7 | 8 | // WithClient overrides the default client 9 | func WithClient(client *redis.Client) Option { 10 | return func(c *Checkpoint) { 11 | c.client = client 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "log" 5 | ) 6 | 7 | // A Logger is a minimal interface to as a adaptor for external logging library to consumer 8 | type Logger interface { 9 | Log(...interface{}) 10 | } 11 | 12 | // noopLogger implements logger interface with discard 13 | type noopLogger struct { 14 | logger *log.Logger 15 | } 16 | 17 | // Log using stdlib logger. See log.Println. 18 | func (l noopLogger) Log(args ...interface{}) { 19 | l.logger.Println(args...) 20 | } 21 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Golang Kinesis Connectors 2 | 3 | Pull requests are much apprecited. Please help make the project a success! 4 | 5 | To contribute: 6 | 7 | 1. Fork the [official repository][1]. 8 | 2. Make your changes in a topic branch. 9 | 3. Squash commits and add an [excellent commit message][2]. 10 | 4. Send a pull request. 11 | 12 | [1]: https://github.com/harlow/kinesis-connectors/tree/master 13 | [2]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 14 | -------------------------------------------------------------------------------- /store.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | // Store interface used to persist scan progress 4 | type Store interface { 5 | GetCheckpoint(streamName, shardID string) (string, error) 6 | SetCheckpoint(streamName, shardID, sequenceNumber string) error 7 | } 8 | 9 | // noopStore implements the storage interface with discard 10 | type noopStore struct{} 11 | 12 | func (n noopStore) GetCheckpoint(string, string) (string, error) { return "", nil } 13 | func (n noopStore) SetCheckpoint(string, string, string) error { return nil } 14 | -------------------------------------------------------------------------------- /store/ddb/retryer.go: -------------------------------------------------------------------------------- 1 | package ddb 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 5 | ) 6 | 7 | // Retryer interface contains one method that decides whether to retry based on error 8 | type Retryer interface { 9 | ShouldRetry(error) bool 10 | } 11 | 12 | // DefaultRetryer . 13 | type DefaultRetryer struct { 14 | Retryer 15 | } 16 | 17 | // ShouldRetry when error occured 18 | func (r *DefaultRetryer) ShouldRetry(err error) bool { 19 | switch err.(type) { 20 | case *types.ProvisionedThroughputExceededException: 21 | return true 22 | } 23 | return false 24 | } 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.test 23 | *.exe 24 | 25 | # vim temp files 26 | .*.swp 27 | .*.swo 28 | 29 | # System files 30 | .DS_Store 31 | tags* 32 | 33 | # Vendored files 34 | vendor/** 35 | 36 | # Benchmark files 37 | prof.cpu 38 | prof.mem 39 | 40 | # VSCode files 41 | /.vscode 42 | /**/debug 43 | 44 | # Goland files 45 | .idea/ 46 | tmp/** 47 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 7 | ) 8 | 9 | // kinesisClient defines the interface of functions needed for the consumer 10 | type kinesisClient interface { 11 | GetRecords(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) 12 | ListShards(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) 13 | GetShardIterator(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) 14 | } 15 | -------------------------------------------------------------------------------- /group.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 7 | ) 8 | 9 | // Group interface used to manage which shard to process 10 | type Group interface { 11 | Start(ctx context.Context, shardc chan types.Shard) error 12 | GetCheckpoint(streamName, shardID string) (string, error) 13 | SetCheckpoint(streamName, shardID, sequenceNumber string) error 14 | } 15 | 16 | type CloseableGroup interface { 17 | Group 18 | // Allows shard processors to tell the group when the shard has been 19 | // fully processed. Should be called only once per shardID. 20 | CloseShard(ctx context.Context, shardID string) error 21 | } 22 | -------------------------------------------------------------------------------- /store/memory/store_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_CheckpointLifecycle(t *testing.T) { 8 | c := New() 9 | 10 | // set 11 | c.SetCheckpoint("streamName", "shardID", "testSeqNum") 12 | 13 | // get 14 | val, err := c.GetCheckpoint("streamName", "shardID") 15 | if err != nil { 16 | t.Fatalf("get checkpoint error: %v", err) 17 | } 18 | if val != "testSeqNum" { 19 | t.Fatalf("checkpoint exists expected %s, got %s", "testSeqNum", val) 20 | } 21 | } 22 | 23 | func Test_SetEmptySeqNum(t *testing.T) { 24 | c := New() 25 | 26 | err := c.SetCheckpoint("streamName", "shardID", "") 27 | if err == nil || err.Error() != "sequence number should not be empty" { 28 | t.Fatalf("should not allow empty sequence number") 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /store/ddb/retryer_test.go: -------------------------------------------------------------------------------- 1 | package ddb 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/aws/aws-sdk-go-v2/aws" 7 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 8 | ) 9 | 10 | func TestDefaultRetyer(t *testing.T) { 11 | retryableError := &types.ProvisionedThroughputExceededException{Message: aws.String("error not retryable")} 12 | // retryer is not nil and should returns according to what error is passed in. 13 | q := &DefaultRetryer{} 14 | if q.ShouldRetry(retryableError) != true { 15 | t.Errorf("expected ShouldRetry returns %v. got %v", false, q.ShouldRetry(retryableError)) 16 | } 17 | 18 | nonRetryableError := &types.BackupInUseException{Message: aws.String("error not retryable")} 19 | shouldRetry := q.ShouldRetry(nonRetryableError) 20 | if shouldRetry != false { 21 | t.Errorf("expected ShouldRetry returns %v. got %v", true, shouldRetry) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /kinesis.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/aws/aws-sdk-go-v2/aws" 8 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 9 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 10 | ) 11 | 12 | // listShards pulls a list of Shard IDs from the kinesis api 13 | func listShards(ctx context.Context, ksis kinesisClient, streamName string) ([]types.Shard, error) { 14 | var ss []types.Shard 15 | var listShardsInput = &kinesis.ListShardsInput{ 16 | StreamName: aws.String(streamName), 17 | } 18 | 19 | for { 20 | resp, err := ksis.ListShards(ctx, listShardsInput) 21 | if err != nil { 22 | return nil, fmt.Errorf("ListShards error: %w", err) 23 | } 24 | ss = append(ss, resp.Shards...) 25 | 26 | if resp.NextToken == nil { 27 | return ss, nil 28 | } 29 | 30 | listShardsInput = &kinesis.ListShardsInput{ 31 | NextToken: resp.NextToken, 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /store/memory/store.go: -------------------------------------------------------------------------------- 1 | // The memory store provides a store that can be used for testing and single-threaded applications. 2 | // DO NOT USE this in a production application where persistence beyond a single application lifecycle is necessary 3 | // or when there are multiple consumers. 4 | package store 5 | 6 | import ( 7 | "fmt" 8 | "sync" 9 | ) 10 | 11 | func New() *Store { 12 | return &Store{} 13 | } 14 | 15 | type Store struct { 16 | sync.Map 17 | } 18 | 19 | func (c *Store) SetCheckpoint(streamName, shardID, sequenceNumber string) error { 20 | if sequenceNumber == "" { 21 | return fmt.Errorf("sequence number should not be empty") 22 | } 23 | c.Store(streamName+":"+shardID, sequenceNumber) 24 | return nil 25 | } 26 | 27 | func (c *Store) GetCheckpoint(streamName, shardID string) (string, error) { 28 | val, ok := c.Load(streamName + ":" + shardID) 29 | if !ok { 30 | return "", nil 31 | } 32 | return val.(string), nil 33 | } 34 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/harlow/kinesis-consumer 2 | 3 | require ( 4 | github.com/DATA-DOG/go-sqlmock v1.4.1 5 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect 6 | github.com/alicebob/miniredis v2.5.0+incompatible 7 | github.com/apex/log v1.6.0 8 | github.com/aws/aws-sdk-go-v2 v1.11.2 9 | github.com/aws/aws-sdk-go-v2/config v1.6.1 10 | github.com/aws/aws-sdk-go-v2/credentials v1.3.3 11 | github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 12 | github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 13 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 14 | github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f 15 | github.com/go-redis/redis/v9 v9.0.0-rc.2 16 | github.com/go-sql-driver/mysql v1.5.0 17 | github.com/golang/protobuf v1.5.2 18 | github.com/gomodule/redigo v2.0.0+incompatible // indirect 19 | github.com/lib/pq v1.7.0 20 | github.com/pkg/errors v0.9.1 21 | github.com/stretchr/testify v1.8.1 22 | github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect 23 | ) 24 | 25 | go 1.13 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Harlow Ward 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [Unreleased (`master`)][unreleased] 6 | 7 | Major changes: 8 | 9 | * Remove the concept of `ScanStatus` to simplify the scanning interface 10 | 11 | For more context on this change see: https://github.com/harlow/kinesis-consumer/issues/75 12 | 13 | ## v0.3.0 - 2018-12-28 14 | 15 | Major changes: 16 | 17 | * Remove concept of `Client` it was confusing as it wasn't a direct standin for a Kinesis client. 18 | * Rename `ScanError` to `ScanStatus` as it's not always an error. 19 | 20 | Minor changes: 21 | 22 | * Update tests to use Kinesis mock 23 | 24 | ## v0.2.0 - 2018-07-28 25 | 26 | This is the last stable release from which there is a separate Client. It has caused confusion and will be removed going forward. 27 | 28 | https://github.com/harlow/kinesis-consumer/releases/tag/v0.2.0 29 | 30 | ## v0.1.0 - 2017-11-20 31 | 32 | This is the last stable release of the consumer which aggregated records in `batch` before calling the callback func. 33 | 34 | https://github.com/harlow/kinesis-consumer/releases/tag/v0.1.0 35 | 36 | [unreleased]: https://github.com/harlow/kinesis-consumer/compare/v0.2.0...HEAD 37 | [options]: https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis 38 | -------------------------------------------------------------------------------- /store/redis/redis_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/alicebob/miniredis" 7 | redis "github.com/go-redis/redis/v9" 8 | ) 9 | 10 | func Test_CheckpointOptions(t *testing.T) { 11 | s, err := miniredis.Run() 12 | if err != nil { 13 | panic(err) 14 | } 15 | defer s.Close() 16 | 17 | client := redis.NewClient(&redis.Options{ 18 | Addr: s.Addr(), 19 | }) 20 | 21 | _, err = New("app", WithClient(client)) 22 | if err != nil { 23 | t.Fatalf("new checkpoint error: %v", err) 24 | } 25 | } 26 | 27 | func Test_CheckpointLifecycle(t *testing.T) { 28 | // new 29 | c, err := New("app") 30 | if err != nil { 31 | t.Fatalf("new checkpoint error: %v", err) 32 | } 33 | 34 | // set 35 | c.SetCheckpoint("streamName", "shardID", "testSeqNum") 36 | 37 | // get 38 | val, err := c.GetCheckpoint("streamName", "shardID") 39 | if err != nil { 40 | t.Fatalf("get checkpoint error: %v", err) 41 | } 42 | if val != "testSeqNum" { 43 | t.Fatalf("checkpoint exists expected %s, got %s", "testSeqNum", val) 44 | } 45 | } 46 | 47 | func Test_SetEmptySeqNum(t *testing.T) { 48 | c, err := New("app") 49 | if err != nil { 50 | t.Fatalf("new checkpoint error: %v", err) 51 | } 52 | 53 | err = c.SetCheckpoint("streamName", "shardID", "") 54 | if err == nil { 55 | t.Fatalf("should not allow empty sequence number") 56 | } 57 | } 58 | 59 | func Test_key(t *testing.T) { 60 | c, err := New("app") 61 | if err != nil { 62 | t.Fatalf("new checkpoint error: %v", err) 63 | } 64 | 65 | want := "app:checkpoint:stream:shard" 66 | 67 | if got := c.key("stream", "shard"); got != want { 68 | t.Fatalf("checkpoint key, want %s, got %s", want, got) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /store/redis/redis.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | redis "github.com/go-redis/redis/v9" 9 | ) 10 | 11 | const localhost = "127.0.0.1:6379" 12 | 13 | // New returns a checkpoint that uses Redis for underlying storage 14 | func New(appName string, opts ...Option) (*Checkpoint, error) { 15 | if appName == "" { 16 | return nil, fmt.Errorf("must provide app name") 17 | } 18 | 19 | c := &Checkpoint{ 20 | appName: appName, 21 | } 22 | 23 | // override defaults 24 | for _, opt := range opts { 25 | opt(c) 26 | } 27 | 28 | // default client if none provided 29 | if c.client == nil { 30 | addr := os.Getenv("REDIS_URL") 31 | if addr == "" { 32 | addr = localhost 33 | } 34 | 35 | client := redis.NewClient(&redis.Options{Addr: addr}) 36 | c.client = client 37 | } 38 | 39 | // verify we can ping server 40 | _, err := c.client.Ping(context.Background()).Result() 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | return c, nil 46 | } 47 | 48 | // Checkpoint stores and retreives the last evaluated key from a DDB scan 49 | type Checkpoint struct { 50 | appName string 51 | client *redis.Client 52 | } 53 | 54 | // GetCheckpoint fetches the checkpoint for a particular Shard. 55 | func (c *Checkpoint) GetCheckpoint(streamName, shardID string) (string, error) { 56 | ctx := context.Background() 57 | val, _ := c.client.Get(ctx, c.key(streamName, shardID)).Result() 58 | return val, nil 59 | } 60 | 61 | // SetCheckpoint stores a checkpoint for a shard (e.g. sequence number of last record processed by application). 62 | // Upon failover, record processing is resumed from this point. 63 | func (c *Checkpoint) SetCheckpoint(streamName, shardID, sequenceNumber string) error { 64 | if sequenceNumber == "" { 65 | return fmt.Errorf("sequence number should not be empty") 66 | } 67 | ctx := context.Background() 68 | err := c.client.Set(ctx, c.key(streamName, shardID), sequenceNumber, 0).Err() 69 | if err != nil { 70 | return err 71 | } 72 | return nil 73 | } 74 | 75 | // key generates a unique Redis key for storage of Checkpoint. 76 | func (c *Checkpoint) key(streamName, shardID string) string { 77 | return fmt.Sprintf("%v:checkpoint:%v:%v", c.appName, streamName, shardID) 78 | } 79 | -------------------------------------------------------------------------------- /examples/consumer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/config" 14 | "github.com/aws/aws-sdk-go-v2/credentials" 15 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 16 | consumer "github.com/harlow/kinesis-consumer" 17 | ) 18 | 19 | // A myLogger provides a minimalistic logger satisfying the Logger interface. 20 | type myLogger struct { 21 | logger *log.Logger 22 | } 23 | 24 | // Log logs the parameters to the stdlib logger. See log.Println. 25 | func (l *myLogger) Log(args ...interface{}) { 26 | l.logger.Println(args...) 27 | } 28 | 29 | func main() { 30 | var ( 31 | stream = flag.String("stream", "", "Stream name") 32 | kinesisEndpoint = flag.String("endpoint", "http://localhost:4567", "Kinesis endpoint") 33 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 34 | ) 35 | flag.Parse() 36 | 37 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 38 | return aws.Endpoint{ 39 | PartitionID: "aws", 40 | URL: *kinesisEndpoint, 41 | SigningRegion: *awsRegion, 42 | }, nil 43 | }) 44 | 45 | // client 46 | cfg, err := config.LoadDefaultConfig( 47 | context.TODO(), 48 | config.WithRegion(*awsRegion), 49 | config.WithEndpointResolver(resolver), 50 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 51 | ) 52 | if err != nil { 53 | log.Fatalf("unable to load SDK config, %v", err) 54 | } 55 | var client = kinesis.NewFromConfig(cfg) 56 | 57 | // consumer 58 | c, err := consumer.New( 59 | *stream, 60 | consumer.WithClient(client), 61 | ) 62 | if err != nil { 63 | log.Fatalf("consumer error: %v", err) 64 | } 65 | 66 | // scan 67 | ctx := trap() 68 | err = c.Scan(ctx, func(r *consumer.Record) error { 69 | fmt.Println(string(r.Data)) 70 | return nil // continue scanning 71 | }) 72 | if err != nil { 73 | log.Fatalf("scan error: %v", err) 74 | } 75 | } 76 | 77 | func trap() context.Context { 78 | ctx, cancel := context.WithCancel(context.Background()) 79 | sigs := make(chan os.Signal, 1) 80 | signal.Notify(sigs, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) 81 | 82 | go func() { 83 | sig := <-sigs 84 | log.Printf("received %s", sig) 85 | cancel() 86 | }() 87 | 88 | return ctx 89 | } 90 | -------------------------------------------------------------------------------- /examples/consumer-mysql/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "expvar" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "os/signal" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/config" 14 | "github.com/aws/aws-sdk-go-v2/credentials" 15 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 16 | consumer "github.com/harlow/kinesis-consumer" 17 | store "github.com/harlow/kinesis-consumer/store/mysql" 18 | ) 19 | 20 | func main() { 21 | var ( 22 | app = flag.String("app", "", "Consumer app name") 23 | stream = flag.String("stream", "", "Stream name") 24 | table = flag.String("table", "", "Table name") 25 | connStr = flag.String("connection", "", "Connection Str") 26 | kinesisEndpoint = flag.String("endpoint", "http://localhost:4567", "Kinesis endpoint") 27 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 28 | ) 29 | flag.Parse() 30 | 31 | // mysql checkpoint 32 | store, err := store.New(*app, *table, *connStr) 33 | if err != nil { 34 | log.Fatalf("checkpoint error: %v", err) 35 | } 36 | 37 | var counter = expvar.NewMap("counters") 38 | 39 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 40 | return aws.Endpoint{ 41 | PartitionID: "aws", 42 | URL: *kinesisEndpoint, 43 | SigningRegion: *awsRegion, 44 | }, nil 45 | }) 46 | 47 | // client 48 | cfg, err := config.LoadDefaultConfig( 49 | context.TODO(), 50 | config.WithRegion(*awsRegion), 51 | config.WithEndpointResolver(resolver), 52 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 53 | ) 54 | if err != nil { 55 | log.Fatalf("unable to load SDK config, %v", err) 56 | } 57 | var client = kinesis.NewFromConfig(cfg) 58 | 59 | // consumer 60 | c, err := consumer.New( 61 | *stream, 62 | consumer.WithClient(client), 63 | consumer.WithStore(store), 64 | consumer.WithCounter(counter), 65 | ) 66 | if err != nil { 67 | log.Fatalf("consumer error: %v", err) 68 | } 69 | 70 | // use cancel func to signal shutdown 71 | ctx, cancel := context.WithCancel(context.Background()) 72 | 73 | // trap SIGINT, wait to trigger shutdown 74 | signals := make(chan os.Signal, 1) 75 | signal.Notify(signals, os.Interrupt) 76 | 77 | go func() { 78 | <-signals 79 | cancel() 80 | }() 81 | 82 | // scan stream 83 | err = c.Scan(ctx, func(r *consumer.Record) error { 84 | fmt.Println(string(r.Data)) 85 | return nil 86 | }) 87 | 88 | if err != nil { 89 | log.Fatalf("scan error: %v", err) 90 | } 91 | 92 | if err := store.Shutdown(); err != nil { 93 | log.Fatalf("store shutdown error: %v", err) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /examples/consumer-postgres/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "expvar" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "os/signal" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/config" 14 | "github.com/aws/aws-sdk-go-v2/credentials" 15 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 16 | consumer "github.com/harlow/kinesis-consumer" 17 | store "github.com/harlow/kinesis-consumer/store/postgres" 18 | ) 19 | 20 | func main() { 21 | var ( 22 | app = flag.String("app", "", "Consumer app name") 23 | stream = flag.String("stream", "", "Stream name") 24 | table = flag.String("table", "", "Table name") 25 | connStr = flag.String("connection", "", "Connection Str") 26 | kinesisEndpoint = flag.String("endpoint", "http://localhost:4567", "Kinesis endpoint") 27 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 28 | ) 29 | flag.Parse() 30 | 31 | // postgres checkpoint 32 | store, err := store.New(*app, *table, *connStr) 33 | if err != nil { 34 | log.Fatalf("checkpoint error: %v", err) 35 | } 36 | 37 | var counter = expvar.NewMap("counters") 38 | 39 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 40 | return aws.Endpoint{ 41 | PartitionID: "aws", 42 | URL: *kinesisEndpoint, 43 | SigningRegion: *awsRegion, 44 | }, nil 45 | }) 46 | 47 | // client 48 | cfg, err := config.LoadDefaultConfig( 49 | context.TODO(), 50 | config.WithRegion(*awsRegion), 51 | config.WithEndpointResolver(resolver), 52 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 53 | ) 54 | if err != nil { 55 | log.Fatalf("unable to load SDK config, %v", err) 56 | } 57 | var client = kinesis.NewFromConfig(cfg) 58 | 59 | // consumer 60 | c, err := consumer.New( 61 | *stream, 62 | consumer.WithClient(client), 63 | consumer.WithStore(store), 64 | consumer.WithCounter(counter), 65 | ) 66 | if err != nil { 67 | log.Fatalf("consumer error: %v", err) 68 | } 69 | 70 | // use cancel func to signal shutdown 71 | ctx, cancel := context.WithCancel(context.Background()) 72 | 73 | // trap SIGINT, wait to trigger shutdown 74 | signals := make(chan os.Signal, 1) 75 | signal.Notify(signals, os.Interrupt) 76 | 77 | go func() { 78 | <-signals 79 | cancel() 80 | }() 81 | 82 | // scan stream 83 | err = c.Scan(ctx, func(r *consumer.Record) error { 84 | fmt.Println(string(r.Data)) 85 | return nil // continue scanning 86 | }) 87 | 88 | if err != nil { 89 | log.Fatalf("scan error: %v", err) 90 | } 91 | 92 | if err := store.Shutdown(); err != nil { 93 | log.Fatalf("store shutdown error: %v", err) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /options.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 7 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 8 | ) 9 | 10 | // Option is used to override defaults when creating a new Consumer 11 | type Option func(*Consumer) 12 | 13 | // WithGroup overrides the default storage 14 | func WithGroup(group Group) Option { 15 | return func(c *Consumer) { 16 | c.group = group 17 | } 18 | } 19 | 20 | // WithStore overrides the default storage 21 | func WithStore(store Store) Option { 22 | return func(c *Consumer) { 23 | c.store = store 24 | } 25 | } 26 | 27 | // WithLogger overrides the default logger 28 | func WithLogger(logger Logger) Option { 29 | return func(c *Consumer) { 30 | c.logger = logger 31 | } 32 | } 33 | 34 | // WithCounter overrides the default counter 35 | func WithCounter(counter Counter) Option { 36 | return func(c *Consumer) { 37 | c.counter = counter 38 | } 39 | } 40 | 41 | // WithClient overrides the default client 42 | func WithClient(client kinesisClient) Option { 43 | return func(c *Consumer) { 44 | c.client = client 45 | } 46 | } 47 | 48 | // WithShardIteratorType overrides the starting point for the consumer 49 | func WithShardIteratorType(t string) Option { 50 | return func(c *Consumer) { 51 | c.initialShardIteratorType = types.ShardIteratorType(t) 52 | } 53 | } 54 | 55 | // WithTimestamp overrides the starting point for the consumer 56 | func WithTimestamp(t time.Time) Option { 57 | return func(c *Consumer) { 58 | c.initialTimestamp = &t 59 | } 60 | } 61 | 62 | // WithScanInterval overrides the scan interval for the consumer 63 | func WithScanInterval(d time.Duration) Option { 64 | return func(c *Consumer) { 65 | c.scanInterval = d 66 | } 67 | } 68 | 69 | // WithMaxRecords overrides the maximum number of records to be 70 | // returned in a single GetRecords call for the consumer (specify a 71 | // value of up to 10,000) 72 | func WithMaxRecords(n int64) Option { 73 | return func(c *Consumer) { 74 | c.maxRecords = n 75 | } 76 | } 77 | 78 | // WithGetRecordsOptions passes the given option functions to the 79 | // kinesis client's GetRecords call 80 | func WithGetRecordsOptions(opts ...func(*kinesis.Options)) Option { 81 | return func(c *Consumer) { 82 | c.getRecordsOpts = opts 83 | } 84 | } 85 | 86 | func WithAggregation(a bool) Option { 87 | return func(c *Consumer) { 88 | c.isAggregated = a 89 | } 90 | } 91 | 92 | // ShardClosedHandler is a handler that will be called when the consumer has reached the end of a closed shard. 93 | // No more records for that shard will be provided by the consumer. 94 | // An error can be returned to stop the consumer. 95 | type ShardClosedHandler = func(streamName, shardID string) error 96 | 97 | func WithShardClosedHandler(h ShardClosedHandler) Option { 98 | return func(c *Consumer) { 99 | c.shardClosedHandler = h 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /examples/consumer-redis/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | 11 | "github.com/aws/aws-sdk-go-v2/aws" 12 | "github.com/aws/aws-sdk-go-v2/config" 13 | "github.com/aws/aws-sdk-go-v2/credentials" 14 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 15 | consumer "github.com/harlow/kinesis-consumer" 16 | store "github.com/harlow/kinesis-consumer/store/redis" 17 | ) 18 | 19 | // A myLogger provides a minimalistic logger satisfying the Logger interface. 20 | type myLogger struct { 21 | logger *log.Logger 22 | } 23 | 24 | // Log logs the parameters to the stdlib logger. See log.Println. 25 | func (l *myLogger) Log(args ...interface{}) { 26 | l.logger.Println(args...) 27 | } 28 | 29 | func main() { 30 | var ( 31 | app = flag.String("app", "", "Consumer app name") 32 | stream = flag.String("stream", "", "Stream name") 33 | kinesisEndpoint = flag.String("endpoint", "http://localhost:4567", "Kinesis endpoint") 34 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 35 | ) 36 | flag.Parse() 37 | 38 | // redis checkpoint store 39 | store, err := store.New(*app) 40 | if err != nil { 41 | log.Fatalf("store error: %v", err) 42 | } 43 | 44 | // logger 45 | logger := &myLogger{ 46 | logger: log.New(os.Stdout, "consumer-example: ", log.LstdFlags), 47 | } 48 | 49 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 50 | return aws.Endpoint{ 51 | PartitionID: "aws", 52 | URL: *kinesisEndpoint, 53 | SigningRegion: *awsRegion, 54 | }, nil 55 | }) 56 | 57 | // client 58 | cfg, err := config.LoadDefaultConfig( 59 | context.TODO(), 60 | config.WithRegion(*awsRegion), 61 | config.WithEndpointResolver(resolver), 62 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 63 | ) 64 | if err != nil { 65 | log.Fatalf("unable to load SDK config, %v", err) 66 | } 67 | var client = kinesis.NewFromConfig(cfg) 68 | 69 | // consumer 70 | c, err := consumer.New( 71 | *stream, 72 | consumer.WithClient(client), 73 | consumer.WithStore(store), 74 | consumer.WithLogger(logger), 75 | ) 76 | if err != nil { 77 | log.Fatalf("consumer error: %v", err) 78 | } 79 | 80 | // use cancel func to signal shutdown 81 | ctx, cancel := context.WithCancel(context.Background()) 82 | 83 | // trap SIGINT, wait to trigger shutdown 84 | signals := make(chan os.Signal, 1) 85 | signal.Notify(signals, os.Interrupt) 86 | 87 | go func() { 88 | <-signals 89 | fmt.Println("caught exit signal, cancelling context!") 90 | cancel() 91 | }() 92 | 93 | // scan stream 94 | err = c.Scan(ctx, func(r *consumer.Record) error { 95 | fmt.Println(string(r.Data)) 96 | return nil // continue scanning 97 | }) 98 | if err != nil { 99 | log.Fatalf("scan error: %v", err) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /internal/deaggregator/deaggregator.go: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | package deaggregator 4 | 5 | import ( 6 | "crypto/md5" 7 | "fmt" 8 | 9 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 10 | "github.com/golang/protobuf/proto" 11 | 12 | rec "github.com/awslabs/kinesis-aggregation/go/records" 13 | ) 14 | 15 | // Magic File Header for a KPL Aggregated Record 16 | var KplMagicHeader = fmt.Sprintf("%q", []byte("\xf3\x89\x9a\xc2")) 17 | 18 | const ( 19 | KplMagicLen = 4 // Length of magic header for KPL Aggregate Record checking. 20 | DigestSize = 16 // MD5 Message size for protobuf. 21 | ) 22 | 23 | // DeaggregateRecords takes an array of Kinesis records and expands any Protobuf 24 | // records within that array, returning an array of all records 25 | func DeaggregateRecords(records []*types.Record) ([]*types.Record, error) { 26 | var isAggregated bool 27 | allRecords := make([]*types.Record, 0) 28 | for _, record := range records { 29 | isAggregated = true 30 | 31 | var dataMagic string 32 | var decodedDataNoMagic []byte 33 | // Check if record is long enough to have magic file header 34 | if len(record.Data) >= KplMagicLen { 35 | dataMagic = fmt.Sprintf("%q", record.Data[:KplMagicLen]) 36 | decodedDataNoMagic = record.Data[KplMagicLen:] 37 | } else { 38 | isAggregated = false 39 | } 40 | 41 | // Check if record has KPL Aggregate Record Magic Header and data length 42 | // is correct size 43 | if KplMagicHeader != dataMagic || len(decodedDataNoMagic) <= DigestSize { 44 | isAggregated = false 45 | } 46 | 47 | if isAggregated { 48 | messageDigest := fmt.Sprintf("%x", decodedDataNoMagic[len(decodedDataNoMagic)-DigestSize:]) 49 | messageData := decodedDataNoMagic[:len(decodedDataNoMagic)-DigestSize] 50 | 51 | calculatedDigest := fmt.Sprintf("%x", md5.Sum(messageData)) 52 | 53 | // Check protobuf MD5 hash matches MD5 sum of record 54 | if messageDigest != calculatedDigest { 55 | isAggregated = false 56 | } else { 57 | aggRecord := &rec.AggregatedRecord{} 58 | err := proto.Unmarshal(messageData, aggRecord) 59 | 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | partitionKeys := aggRecord.PartitionKeyTable 65 | 66 | for _, aggrec := range aggRecord.Records { 67 | newRecord := createUserRecord(partitionKeys, aggrec, record) 68 | allRecords = append(allRecords, newRecord) 69 | } 70 | } 71 | } 72 | 73 | if !isAggregated { 74 | allRecords = append(allRecords, record) 75 | } 76 | } 77 | 78 | return allRecords, nil 79 | } 80 | 81 | // createUserRecord takes in the partitionKeys of the aggregated record, the individual 82 | // deaggregated record, and the original aggregated record builds a kinesis.Record and 83 | // returns it 84 | func createUserRecord(partitionKeys []string, aggRec *rec.Record, record *types.Record) *types.Record { 85 | partitionKey := partitionKeys[*aggRec.PartitionKeyIndex] 86 | 87 | return &types.Record{ 88 | ApproximateArrivalTimestamp: record.ApproximateArrivalTimestamp, 89 | Data: aggRec.Data, 90 | EncryptionType: record.EncryptionType, 91 | PartitionKey: &partitionKey, 92 | SequenceNumber: record.SequenceNumber, 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /examples/producer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/config" 14 | "github.com/aws/aws-sdk-go-v2/credentials" 15 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 16 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 17 | ) 18 | 19 | func main() { 20 | var ( 21 | streamName = flag.String("stream", "", "Stream name") 22 | kinesisEndpoint = flag.String("endpoint", "http://localhost:4567", "Kinesis endpoint") 23 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 24 | ) 25 | flag.Parse() 26 | 27 | var records []types.PutRecordsRequestEntry 28 | 29 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 30 | return aws.Endpoint{ 31 | PartitionID: "aws", 32 | URL: *kinesisEndpoint, 33 | SigningRegion: *awsRegion, 34 | }, nil 35 | }) 36 | 37 | cfg, err := config.LoadDefaultConfig( 38 | context.TODO(), 39 | config.WithRegion(*awsRegion), 40 | config.WithEndpointResolver(resolver), 41 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 42 | ) 43 | if err != nil { 44 | log.Fatalf("unable to load SDK config, %v", err) 45 | } 46 | var client = kinesis.NewFromConfig(cfg) 47 | 48 | // create stream if doesn't exist 49 | if err := createStream(client, *streamName); err != nil { 50 | log.Fatalf("create stream error: %v", err) 51 | } 52 | 53 | // loop over file data 54 | b := bufio.NewScanner(os.Stdin) 55 | 56 | for b.Scan() { 57 | records = append(records, types.PutRecordsRequestEntry{ 58 | Data: b.Bytes(), 59 | PartitionKey: aws.String(time.Now().Format(time.RFC3339Nano)), 60 | }) 61 | 62 | if len(records) > 250 { 63 | putRecords(client, streamName, records) 64 | records = nil 65 | } 66 | } 67 | 68 | if len(records) > 0 { 69 | putRecords(client, streamName, records) 70 | } 71 | } 72 | 73 | func createStream(client *kinesis.Client, streamName string) error { 74 | resp, err := client.ListStreams(context.Background(), &kinesis.ListStreamsInput{}) 75 | if err != nil { 76 | return fmt.Errorf("list streams error: %v", err) 77 | } 78 | 79 | for _, val := range resp.StreamNames { 80 | if streamName == val { 81 | return nil 82 | } 83 | } 84 | 85 | _, err = client.CreateStream( 86 | context.Background(), 87 | &kinesis.CreateStreamInput{ 88 | StreamName: aws.String(streamName), 89 | ShardCount: aws.Int32(2), 90 | }, 91 | ) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | waiter := kinesis.NewStreamExistsWaiter(client) 97 | return waiter.Wait( 98 | context.Background(), 99 | &kinesis.DescribeStreamInput{ 100 | StreamName: aws.String(streamName), 101 | }, 102 | 30*time.Second, 103 | ) 104 | } 105 | 106 | func putRecords(client *kinesis.Client, streamName *string, records []types.PutRecordsRequestEntry) { 107 | _, err := client.PutRecords(context.Background(), &kinesis.PutRecordsInput{ 108 | StreamName: streamName, 109 | Records: records, 110 | }) 111 | if err != nil { 112 | log.Fatalf("error putting records: %v", err) 113 | } 114 | 115 | fmt.Print(".") 116 | } 117 | -------------------------------------------------------------------------------- /store/ddb/ddb_test.go: -------------------------------------------------------------------------------- 1 | package ddb 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "testing" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go-v2/config" 10 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 11 | ) 12 | 13 | type fakeRetryer struct { 14 | Name string 15 | } 16 | 17 | func (r *fakeRetryer) ShouldRetry(err error) bool { 18 | r.Name = "fakeRetryer" 19 | return false 20 | } 21 | 22 | func TestNewCheckpoint(t *testing.T) { 23 | c, err := New("", "") 24 | if c == nil { 25 | t.Errorf("expected checkpoint client instance. got %v", c) 26 | } 27 | if err != nil { 28 | t.Errorf("new checkpoint error expected nil. got %v", err) 29 | } 30 | } 31 | 32 | func TestCheckpointSetting(t *testing.T) { 33 | var ck Checkpoint 34 | ckPtr := &ck 35 | 36 | // Test WithMaxInterval 37 | setInterval := WithMaxInterval(time.Duration(2 * time.Minute)) 38 | setInterval(ckPtr) 39 | 40 | // Test WithRetryer 41 | var r fakeRetryer 42 | setRetryer := WithRetryer(&r) 43 | setRetryer(ckPtr) 44 | 45 | // Test WithDyanmoDBClient 46 | cfg, err := config.LoadDefaultConfig(context.TODO()) 47 | if err != nil { 48 | log.Fatalf("unable to load SDK config, %v", err) 49 | } 50 | var fakeDbClient = dynamodb.NewFromConfig(cfg) 51 | 52 | setDDBClient := WithDynamoClient(fakeDbClient) 53 | setDDBClient(ckPtr) 54 | 55 | if ckPtr.maxInterval != time.Duration(2*time.Minute) { 56 | t.Errorf("new checkpoint maxInterval expected 2 minute. got %v", ckPtr.maxInterval) 57 | } 58 | if ckPtr.retryer.ShouldRetry(nil) != false { 59 | t.Errorf("new checkpoint retryer ShouldRetry always returns %v . got %v", false, ckPtr.retryer.ShouldRetry(nil)) 60 | } 61 | if ckPtr.client != fakeDbClient { 62 | t.Errorf("new checkpoint dynamodb client reference should be %p. got %v", &fakeDbClient, ckPtr.client) 63 | } 64 | } 65 | 66 | func TestNewCheckpointWithOptions(t *testing.T) { 67 | // Test WithMaxInterval 68 | setInterval := WithMaxInterval(time.Duration(2 * time.Minute)) 69 | 70 | // Test WithRetryer 71 | var r fakeRetryer 72 | setRetryer := WithRetryer(&r) 73 | 74 | // Test WithDyanmoDBClient 75 | cfg, err := config.LoadDefaultConfig(context.TODO()) 76 | if err != nil { 77 | log.Fatalf("unable to load SDK config, %v", err) 78 | } 79 | var fakeDbClient = dynamodb.NewFromConfig(cfg) 80 | 81 | setDDBClient := WithDynamoClient(fakeDbClient) 82 | 83 | ckPtr, err := New("testapp", "testtable", setInterval, setRetryer, setDDBClient) 84 | if ckPtr == nil { 85 | t.Errorf("expected checkpoint client instance. got %v", ckPtr) 86 | } 87 | if err != nil { 88 | t.Errorf("new checkpoint error expected nil. got %v", err) 89 | } 90 | if ckPtr.appName != "testapp" { 91 | t.Errorf("new checkpoint app name expected %v. got %v", "testapp", ckPtr.appName) 92 | } 93 | if ckPtr.tableName != "testtable" { 94 | t.Errorf("new checkpoint table expected %v. got %v", "testtable", ckPtr.maxInterval) 95 | } 96 | 97 | if ckPtr.maxInterval != time.Duration(2*time.Minute) { 98 | t.Errorf("new checkpoint maxInterval expected 2 minute. got %v", ckPtr.maxInterval) 99 | } 100 | if ckPtr.retryer.ShouldRetry(nil) != false { 101 | t.Errorf("new checkpoint retryer ShouldRetry always returns %v . got %v", false, ckPtr.retryer.ShouldRetry(nil)) 102 | } 103 | if ckPtr.client != fakeDbClient { 104 | t.Errorf("new checkpoint dynamodb client reference should be %p. got %v", &fakeDbClient, ckPtr.client) 105 | } 106 | 107 | } 108 | -------------------------------------------------------------------------------- /store/mysql/mysql.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | "sync" 8 | "time" 9 | 10 | _ "github.com/go-sql-driver/mysql" 11 | ) 12 | 13 | type key struct { 14 | streamName string 15 | shardID string 16 | } 17 | 18 | // Option is used to override defaults when creating a new Checkpoint 19 | type Option func(*Checkpoint) 20 | 21 | // WithMaxInterval sets the flush interval 22 | func WithMaxInterval(maxInterval time.Duration) Option { 23 | return func(c *Checkpoint) { 24 | c.maxInterval = maxInterval 25 | } 26 | } 27 | 28 | // Checkpoint stores and retrieves the last evaluated key from a DDB scan 29 | type Checkpoint struct { 30 | appName string 31 | tableName string 32 | conn *sql.DB 33 | mu *sync.Mutex // protects the checkpoints 34 | done chan struct{} 35 | checkpoints map[key]string 36 | maxInterval time.Duration 37 | } 38 | 39 | // New returns a checkpoint that uses Mysql for underlying storage 40 | // Using connectionStr turn it more flexible to use specific db configs 41 | func New(appName, tableName, connectionStr string, opts ...Option) (*Checkpoint, error) { 42 | if appName == "" { 43 | return nil, errors.New("application name not defined") 44 | } 45 | 46 | if tableName == "" { 47 | return nil, errors.New("table name not defined") 48 | } 49 | 50 | conn, err := sql.Open("mysql", connectionStr) 51 | 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | ck := &Checkpoint{ 57 | conn: conn, 58 | appName: appName, 59 | tableName: tableName, 60 | done: make(chan struct{}), 61 | maxInterval: 1 * time.Minute, 62 | mu: new(sync.Mutex), 63 | checkpoints: map[key]string{}, 64 | } 65 | 66 | for _, opt := range opts { 67 | opt(ck) 68 | } 69 | 70 | go ck.loop() 71 | 72 | return ck, nil 73 | } 74 | 75 | // GetMaxInterval returns the maximum interval before the checkpoint 76 | func (c *Checkpoint) GetMaxInterval() time.Duration { 77 | return c.maxInterval 78 | } 79 | 80 | // GetCheckpoint determines if a checkpoint for a particular Shard exists. 81 | // Typically used to determine whether we should start processing the shard with 82 | // TRIM_HORIZON or AFTER_SEQUENCE_NUMBER (if checkpoint exists). 83 | func (c *Checkpoint) GetCheckpoint(streamName, shardID string) (string, error) { 84 | namespace := fmt.Sprintf("%s-%s", c.appName, streamName) 85 | 86 | var sequenceNumber string 87 | getCheckpointQuery := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=? AND shard_id=?;`, c.tableName) //nolint: gas, it replaces only the table name 88 | err := c.conn.QueryRow(getCheckpointQuery, namespace, shardID).Scan(&sequenceNumber) 89 | 90 | if err != nil { 91 | if err == sql.ErrNoRows { 92 | return "", nil 93 | } 94 | return "", err 95 | } 96 | 97 | return sequenceNumber, nil 98 | } 99 | 100 | // SetCheckpoint stores a checkpoint for a shard (e.g. sequence number of last record processed by application). 101 | // Upon failover, record processing is resumed from this point. 102 | func (c *Checkpoint) SetCheckpoint(streamName, shardID, sequenceNumber string) error { 103 | c.mu.Lock() 104 | defer c.mu.Unlock() 105 | 106 | if sequenceNumber == "" { 107 | return fmt.Errorf("sequence number should not be empty") 108 | } 109 | 110 | key := key{ 111 | streamName: streamName, 112 | shardID: shardID, 113 | } 114 | 115 | c.checkpoints[key] = sequenceNumber 116 | 117 | return nil 118 | } 119 | 120 | // Shutdown the checkpoint. Save any in-flight data. 121 | func (c *Checkpoint) Shutdown() error { 122 | defer c.conn.Close() 123 | 124 | c.done <- struct{}{} 125 | 126 | return c.save() 127 | } 128 | 129 | func (c *Checkpoint) loop() { 130 | tick := time.NewTicker(c.maxInterval) 131 | defer tick.Stop() 132 | defer close(c.done) 133 | 134 | for { 135 | select { 136 | case <-tick.C: 137 | c.save() 138 | case <-c.done: 139 | return 140 | } 141 | } 142 | } 143 | 144 | func (c *Checkpoint) save() error { 145 | c.mu.Lock() 146 | defer c.mu.Unlock() 147 | 148 | //nolint: gas, it replaces only the table name 149 | upsertCheckpoint := fmt.Sprintf(`REPLACE INTO %s (namespace, shard_id, sequence_number) VALUES (?, ?, ?)`, c.tableName) 150 | 151 | for key, sequenceNumber := range c.checkpoints { 152 | if _, err := c.conn.Exec(upsertCheckpoint, fmt.Sprintf("%s-%s", c.appName, key.streamName), key.shardID, sequenceNumber); err != nil { 153 | return err 154 | } 155 | } 156 | 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /store/postgres/postgres.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | "sync" 8 | "time" 9 | 10 | // this is the postgres package so it makes sense to be here 11 | _ "github.com/lib/pq" 12 | ) 13 | 14 | type key struct { 15 | streamName string 16 | shardID string 17 | } 18 | 19 | // Option is used to override defaults when creating a new Checkpoint 20 | type Option func(*Checkpoint) 21 | 22 | // WithMaxInterval sets the flush interval 23 | func WithMaxInterval(maxInterval time.Duration) Option { 24 | return func(c *Checkpoint) { 25 | c.maxInterval = maxInterval 26 | } 27 | } 28 | 29 | // Checkpoint stores and retrieves the last evaluated key from a DDB scan 30 | type Checkpoint struct { 31 | appName string 32 | tableName string 33 | conn *sql.DB 34 | mu *sync.Mutex // protects the checkpoints 35 | done chan struct{} 36 | checkpoints map[key]string 37 | maxInterval time.Duration 38 | } 39 | 40 | // New returns a checkpoint that uses PostgresDB for underlying storage 41 | // Using connectionStr turn it more flexible to use specific db configs 42 | func New(appName, tableName, connectionStr string, opts ...Option) (*Checkpoint, error) { 43 | if appName == "" { 44 | return nil, errors.New("application name not defined") 45 | } 46 | 47 | if tableName == "" { 48 | return nil, errors.New("table name not defined") 49 | } 50 | 51 | conn, err := sql.Open("postgres", connectionStr) 52 | 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | ck := &Checkpoint{ 58 | conn: conn, 59 | appName: appName, 60 | tableName: tableName, 61 | done: make(chan struct{}), 62 | maxInterval: 1 * time.Minute, 63 | mu: new(sync.Mutex), 64 | checkpoints: map[key]string{}, 65 | } 66 | 67 | for _, opt := range opts { 68 | opt(ck) 69 | } 70 | 71 | go ck.loop() 72 | 73 | return ck, nil 74 | } 75 | 76 | // GetMaxInterval returns the maximum interval before the checkpoint 77 | func (c *Checkpoint) GetMaxInterval() time.Duration { 78 | return c.maxInterval 79 | } 80 | 81 | // GetCheckpoint determines if a checkpoint for a particular Shard exists. 82 | // Typically used to determine whether we should start processing the shard with 83 | // TRIM_HORIZON or AFTER_SEQUENCE_NUMBER (if checkpoint exists). 84 | func (c *Checkpoint) GetCheckpoint(streamName, shardID string) (string, error) { 85 | namespace := fmt.Sprintf("%s-%s", c.appName, streamName) 86 | 87 | var sequenceNumber string 88 | getCheckpointQuery := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=$1 AND shard_id=$2;`, c.tableName) //nolint: gas, it replaces only the table name 89 | err := c.conn.QueryRow(getCheckpointQuery, namespace, shardID).Scan(&sequenceNumber) 90 | 91 | if err != nil { 92 | if err == sql.ErrNoRows { 93 | return "", nil 94 | } 95 | return "", err 96 | } 97 | 98 | return sequenceNumber, nil 99 | } 100 | 101 | // SetCheckpoint stores a checkpoint for a shard (e.g. sequence number of last record processed by application). 102 | // Upon failover, record processing is resumed from this point. 103 | func (c *Checkpoint) SetCheckpoint(streamName, shardID, sequenceNumber string) error { 104 | c.mu.Lock() 105 | defer c.mu.Unlock() 106 | 107 | if sequenceNumber == "" { 108 | return fmt.Errorf("sequence number should not be empty") 109 | } 110 | 111 | key := key{ 112 | streamName: streamName, 113 | shardID: shardID, 114 | } 115 | 116 | c.checkpoints[key] = sequenceNumber 117 | 118 | return nil 119 | } 120 | 121 | // Shutdown the checkpoint. Save any in-flight data. 122 | func (c *Checkpoint) Shutdown() error { 123 | defer c.conn.Close() 124 | 125 | c.done <- struct{}{} 126 | 127 | return c.save() 128 | } 129 | 130 | func (c *Checkpoint) loop() { 131 | tick := time.NewTicker(c.maxInterval) 132 | defer tick.Stop() 133 | defer close(c.done) 134 | 135 | for { 136 | select { 137 | case <-tick.C: 138 | c.save() 139 | case <-c.done: 140 | return 141 | } 142 | } 143 | } 144 | 145 | func (c *Checkpoint) save() error { 146 | c.mu.Lock() 147 | defer c.mu.Unlock() 148 | 149 | //nolint: gas, it replaces only the table name 150 | upsertCheckpoint := fmt.Sprintf(`INSERT INTO %s (namespace, shard_id, sequence_number) 151 | VALUES($1, $2, $3) 152 | ON CONFLICT (namespace, shard_id) 153 | DO 154 | UPDATE 155 | SET sequence_number= $3;`, c.tableName) 156 | 157 | for key, sequenceNumber := range c.checkpoints { 158 | if _, err := c.conn.Exec(upsertCheckpoint, fmt.Sprintf("%s-%s", c.appName, key.streamName), key.shardID, sequenceNumber); err != nil { 159 | return err 160 | } 161 | } 162 | 163 | return nil 164 | } 165 | -------------------------------------------------------------------------------- /allgroup.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 10 | ) 11 | 12 | // NewAllGroup returns an initialized AllGroup for consuming 13 | // all shards on a stream 14 | func NewAllGroup(ksis kinesisClient, store Store, streamName string, logger Logger) *AllGroup { 15 | return &AllGroup{ 16 | ksis: ksis, 17 | shards: make(map[string]types.Shard), 18 | shardsClosed: make(map[string]chan struct{}), 19 | streamName: streamName, 20 | logger: logger, 21 | Store: store, 22 | } 23 | } 24 | 25 | // AllGroup is used to consume all shards from a single consumer. It 26 | // caches a local list of the shards we are already processing 27 | // and routinely polls the stream looking for new shards to process. 28 | type AllGroup struct { 29 | ksis kinesisClient 30 | streamName string 31 | logger Logger 32 | Store 33 | 34 | shardMu sync.Mutex 35 | shards map[string]types.Shard 36 | shardsClosed map[string]chan struct{} 37 | } 38 | 39 | // Start is a blocking operation which will loop and attempt to find new 40 | // shards on a regular cadence. 41 | func (g *AllGroup) Start(ctx context.Context, shardC chan types.Shard) error { 42 | // Note: while ticker is a rather naive approach to this problem, 43 | // it actually simplifies a few things. I.e. If we miss a new shard 44 | // while AWS is resharding, we'll pick it up max 30 seconds later. 45 | 46 | // It might be worth refactoring this flow to allow the consumer 47 | // to notify the broker when a shard is closed. However, shards don't 48 | // necessarily close at the same time, so we could potentially get a 49 | // thundering heard of notifications from the consumer. 50 | 51 | var ticker = time.NewTicker(30 * time.Second) 52 | 53 | for { 54 | if err := g.findNewShards(ctx, shardC); err != nil { 55 | ticker.Stop() 56 | return err 57 | } 58 | 59 | select { 60 | case <-ctx.Done(): 61 | ticker.Stop() 62 | return nil 63 | case <-ticker.C: 64 | } 65 | } 66 | } 67 | 68 | func (g *AllGroup) CloseShard(_ context.Context, shardID string) error { 69 | g.shardMu.Lock() 70 | defer g.shardMu.Unlock() 71 | c, ok := g.shardsClosed[shardID] 72 | if !ok { 73 | return fmt.Errorf("closing unknown shard ID %q", shardID) 74 | } 75 | close(c) 76 | return nil 77 | } 78 | 79 | func waitForCloseChannel(ctx context.Context, c <-chan struct{}) bool { 80 | if c == nil { 81 | // no channel means we haven't seen this shard in listShards, so it 82 | // probably fell off the TRIM_HORIZON, and we can assume it's fully processed. 83 | return true 84 | } 85 | select { 86 | case <-ctx.Done(): 87 | return false 88 | case <-c: 89 | // the channel has been processed and closed by the consumer (CloseShard has been called) 90 | return true 91 | } 92 | } 93 | 94 | // findNewShards pulls the list of shards from the Kinesis API 95 | // and uses a local cache to determine if we are already processing 96 | // a particular shard. 97 | func (g *AllGroup) findNewShards(ctx context.Context, shardC chan types.Shard) error { 98 | g.shardMu.Lock() 99 | defer g.shardMu.Unlock() 100 | 101 | g.logger.Log("[GROUP]", "fetching shards") 102 | 103 | shards, err := listShards(ctx, g.ksis, g.streamName) 104 | if err != nil { 105 | g.logger.Log("[GROUP] error:", err) 106 | return err 107 | } 108 | 109 | // We do two `for` loops, since we have to set up all the `shardClosed` 110 | // channels before we start using any of them. It's highly probable 111 | // that Kinesis provides us the shards in dependency order (parents 112 | // before children), but it doesn't appear to be a guarantee. 113 | newShards := make(map[string]types.Shard) 114 | for _, shard := range shards { 115 | if _, ok := g.shards[*shard.ShardId]; ok { 116 | continue 117 | } 118 | g.shards[*shard.ShardId] = shard 119 | g.shardsClosed[*shard.ShardId] = make(chan struct{}) 120 | newShards[*shard.ShardId] = shard 121 | } 122 | // only new shards need to be checked for parent dependencies 123 | for _, shard := range newShards { 124 | shard := shard // Shadow shard, since we use it in goroutine 125 | var parent1, parent2 <-chan struct{} 126 | if shard.ParentShardId != nil { 127 | parent1 = g.shardsClosed[*shard.ParentShardId] 128 | } 129 | if shard.AdjacentParentShardId != nil { 130 | parent2 = g.shardsClosed[*shard.AdjacentParentShardId] 131 | } 132 | go func() { 133 | // Asynchronously wait for all parents of this shard to be processed 134 | // before providing it out to our client. Kinesis guarantees that a 135 | // given partition key's data will be provided to clients in-order, 136 | // but when splits or joins happen, we need to process all parents prior 137 | // to processing children or that ordering guarantee is not maintained. 138 | if waitForCloseChannel(ctx, parent1) && waitForCloseChannel(ctx, parent2) { 139 | shardC <- shard 140 | } 141 | }() 142 | } 143 | return nil 144 | } 145 | -------------------------------------------------------------------------------- /store/ddb/ddb.go: -------------------------------------------------------------------------------- 1 | package ddb 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "sync" 8 | "time" 9 | 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | "github.com/aws/aws-sdk-go-v2/config" 12 | "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" 13 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 14 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 15 | ) 16 | 17 | // Option is used to override defaults when creating a new Checkpoint 18 | type Option func(*Checkpoint) 19 | 20 | // WithMaxInterval sets the flush interval 21 | func WithMaxInterval(maxInterval time.Duration) Option { 22 | return func(c *Checkpoint) { 23 | c.maxInterval = maxInterval 24 | } 25 | } 26 | 27 | // WithDynamoClient sets the dynamoDb client 28 | func WithDynamoClient(svc *dynamodb.Client) Option { 29 | return func(c *Checkpoint) { 30 | c.client = svc 31 | } 32 | } 33 | 34 | // WithRetryer sets the retryer 35 | func WithRetryer(r Retryer) Option { 36 | return func(c *Checkpoint) { 37 | c.retryer = r 38 | } 39 | } 40 | 41 | // New returns a checkpoint that uses DynamoDB for underlying storage 42 | func New(appName, tableName string, opts ...Option) (*Checkpoint, error) { 43 | ck := &Checkpoint{ 44 | tableName: tableName, 45 | appName: appName, 46 | maxInterval: time.Duration(1 * time.Minute), 47 | done: make(chan struct{}), 48 | mu: &sync.Mutex{}, 49 | checkpoints: map[key]string{}, 50 | retryer: &DefaultRetryer{}, 51 | } 52 | 53 | for _, opt := range opts { 54 | opt(ck) 55 | } 56 | 57 | // default client 58 | if ck.client == nil { 59 | cfg, err := config.LoadDefaultConfig(context.TODO()) 60 | if err != nil { 61 | log.Fatalf("unable to load SDK config, %v", err) 62 | } 63 | ck.client = dynamodb.NewFromConfig(cfg) 64 | } 65 | 66 | go ck.loop() 67 | 68 | return ck, nil 69 | } 70 | 71 | // Checkpoint stores and retreives the last evaluated key from a DDB scan 72 | type Checkpoint struct { 73 | tableName string 74 | appName string 75 | client *dynamodb.Client 76 | maxInterval time.Duration 77 | mu *sync.Mutex // protects the checkpoints 78 | checkpoints map[key]string 79 | done chan struct{} 80 | retryer Retryer 81 | } 82 | 83 | type key struct { 84 | StreamName string 85 | ShardID string 86 | } 87 | 88 | type item struct { 89 | Namespace string `json:"namespace" dynamodbav:"namespace"` 90 | ShardID string `json:"shard_id" dynamodbav:"shard_id"` 91 | SequenceNumber string `json:"sequence_number" dynamodbav:"sequence_number"` 92 | } 93 | 94 | // GetCheckpoint determines if a checkpoint for a particular Shard exists. 95 | // Typically used to determine whether we should start processing the shard with 96 | // TRIM_HORIZON or AFTER_SEQUENCE_NUMBER (if checkpoint exists). 97 | func (c *Checkpoint) GetCheckpoint(streamName, shardID string) (string, error) { 98 | namespace := fmt.Sprintf("%s-%s", c.appName, streamName) 99 | 100 | params := &dynamodb.GetItemInput{ 101 | TableName: aws.String(c.tableName), 102 | ConsistentRead: aws.Bool(true), 103 | Key: map[string]types.AttributeValue{ 104 | "namespace": &types.AttributeValueMemberS{Value: namespace}, 105 | "shard_id": &types.AttributeValueMemberS{Value: shardID}, 106 | }, 107 | } 108 | 109 | resp, err := c.client.GetItem(context.Background(), params) 110 | if err != nil { 111 | if c.retryer.ShouldRetry(err) { 112 | return c.GetCheckpoint(streamName, shardID) 113 | } 114 | return "", err 115 | } 116 | 117 | var i item 118 | attributevalue.UnmarshalMap(resp.Item, &i) 119 | return i.SequenceNumber, nil 120 | } 121 | 122 | // SetCheckpoint stores a checkpoint for a shard (e.g. sequence number of last record processed by application). 123 | // Upon failover, record processing is resumed from this point. 124 | func (c *Checkpoint) SetCheckpoint(streamName, shardID, sequenceNumber string) error { 125 | c.mu.Lock() 126 | defer c.mu.Unlock() 127 | 128 | if sequenceNumber == "" { 129 | return fmt.Errorf("sequence number should not be empty") 130 | } 131 | 132 | key := key{ 133 | StreamName: streamName, 134 | ShardID: shardID, 135 | } 136 | c.checkpoints[key] = sequenceNumber 137 | 138 | return nil 139 | } 140 | 141 | // Shutdown the checkpoint. Save any in-flight data. 142 | func (c *Checkpoint) Shutdown() error { 143 | c.done <- struct{}{} 144 | return c.save() 145 | } 146 | 147 | func (c *Checkpoint) loop() { 148 | tick := time.NewTicker(c.maxInterval) 149 | defer tick.Stop() 150 | defer close(c.done) 151 | 152 | for { 153 | select { 154 | case <-tick.C: 155 | c.save() 156 | case <-c.done: 157 | return 158 | } 159 | } 160 | } 161 | 162 | func (c *Checkpoint) save() error { 163 | c.mu.Lock() 164 | defer c.mu.Unlock() 165 | 166 | for key, sequenceNumber := range c.checkpoints { 167 | item, err := attributevalue.MarshalMap(item{ 168 | Namespace: fmt.Sprintf("%s-%s", c.appName, key.StreamName), 169 | ShardID: key.ShardID, 170 | SequenceNumber: sequenceNumber, 171 | }) 172 | if err != nil { 173 | log.Printf("marshal map error: %v", err) 174 | return nil 175 | } 176 | 177 | _, err = c.client.PutItem( 178 | context.TODO(), 179 | &dynamodb.PutItemInput{ 180 | TableName: aws.String(c.tableName), 181 | Item: item, 182 | }) 183 | if err != nil { 184 | if !c.retryer.ShouldRetry(err) { 185 | return err 186 | } 187 | return c.save() 188 | } 189 | } 190 | 191 | return nil 192 | } 193 | -------------------------------------------------------------------------------- /examples/consumer-dynamodb/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "expvar" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "net" 10 | "net/http" 11 | "os" 12 | "os/signal" 13 | "time" 14 | 15 | alog "github.com/apex/log" 16 | "github.com/apex/log/handlers/text" 17 | "github.com/aws/aws-sdk-go-v2/aws" 18 | "github.com/aws/aws-sdk-go-v2/config" 19 | "github.com/aws/aws-sdk-go-v2/credentials" 20 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 21 | ddbtypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 22 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 23 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 24 | consumer "github.com/harlow/kinesis-consumer" 25 | storage "github.com/harlow/kinesis-consumer/store/ddb" 26 | ) 27 | 28 | // kick off a server for exposing scan metrics 29 | func init() { 30 | sock, err := net.Listen("tcp", "localhost:8080") 31 | if err != nil { 32 | log.Printf("net listen error: %v", err) 33 | } 34 | go func() { 35 | fmt.Println("Metrics available at http://localhost:8080/debug/vars") 36 | http.Serve(sock, nil) 37 | }() 38 | } 39 | 40 | // A myLogger provides a minimalistic logger satisfying the Logger interface. 41 | type myLogger struct { 42 | logger alog.Logger 43 | } 44 | 45 | // Log logs the parameters to the stdlib logger. See log.Println. 46 | func (l *myLogger) Log(args ...interface{}) { 47 | l.logger.Infof("producer: %v", args...) 48 | } 49 | 50 | func main() { 51 | // Wrap myLogger around apex logger 52 | mylog := &myLogger{ 53 | logger: alog.Logger{ 54 | Handler: text.New(os.Stdout), 55 | Level: alog.DebugLevel, 56 | }, 57 | } 58 | 59 | var ( 60 | app = flag.String("app", "", "Consumer app name") 61 | stream = flag.String("stream", "", "Stream name") 62 | tableName = flag.String("table", "", "Checkpoint table name") 63 | ddbEndpoint = flag.String("ddb-endpoint", "http://localhost:8000", "DynamoDB endpoint") 64 | kinesisEndpoint = flag.String("ksis-endpoint", "http://localhost:4567", "Kinesis endpoint") 65 | awsRegion = flag.String("region", "us-west-2", "AWS Region") 66 | ) 67 | flag.Parse() 68 | 69 | // set up clients 70 | kcfg, err := newConfig(*kinesisEndpoint, *awsRegion) 71 | if err != nil { 72 | log.Fatalf("new kinesis config error: %v", err) 73 | } 74 | var myKsis = kinesis.NewFromConfig(kcfg) 75 | 76 | dcfg, err := newConfig(*ddbEndpoint, *awsRegion) 77 | if err != nil { 78 | log.Fatalf("new ddb config error: %v", err) 79 | } 80 | var myDdbClient = dynamodb.NewFromConfig(dcfg) 81 | 82 | // ddb checkpoint table 83 | if err := createTable(myDdbClient, *tableName); err != nil { 84 | log.Fatalf("create ddb table error: %v", err) 85 | } 86 | 87 | // ddb persitance 88 | ddb, err := storage.New(*app, *tableName, storage.WithDynamoClient(myDdbClient), storage.WithRetryer(&MyRetryer{})) 89 | if err != nil { 90 | log.Fatalf("checkpoint error: %v", err) 91 | } 92 | 93 | // expvar counter 94 | var counter = expvar.NewMap("counters") 95 | 96 | // consumer 97 | c, err := consumer.New( 98 | *stream, 99 | consumer.WithStore(ddb), 100 | consumer.WithLogger(mylog), 101 | consumer.WithCounter(counter), 102 | consumer.WithClient(myKsis), 103 | ) 104 | if err != nil { 105 | log.Fatalf("consumer error: %v", err) 106 | } 107 | 108 | // use cancel func to signal shutdown 109 | ctx, cancel := context.WithCancel(context.Background()) 110 | 111 | // trap SIGINT, wait to trigger shutdown 112 | signals := make(chan os.Signal, 1) 113 | signal.Notify(signals, os.Interrupt) 114 | 115 | go func() { 116 | <-signals 117 | cancel() 118 | }() 119 | 120 | // scan stream 121 | err = c.Scan(ctx, func(r *consumer.Record) error { 122 | fmt.Println(string(r.Data)) 123 | return nil // continue scanning 124 | }) 125 | if err != nil { 126 | log.Fatalf("scan error: %v", err) 127 | } 128 | 129 | if err := ddb.Shutdown(); err != nil { 130 | log.Fatalf("storage shutdown error: %v", err) 131 | } 132 | } 133 | 134 | func createTable(client *dynamodb.Client, tableName string) error { 135 | resp, err := client.ListTables(context.Background(), &dynamodb.ListTablesInput{}) 136 | if err != nil { 137 | return fmt.Errorf("list streams error: %v", err) 138 | } 139 | 140 | for _, val := range resp.TableNames { 141 | if tableName == val { 142 | return nil 143 | } 144 | } 145 | 146 | _, err = client.CreateTable( 147 | context.Background(), 148 | &dynamodb.CreateTableInput{ 149 | TableName: aws.String(tableName), 150 | AttributeDefinitions: []ddbtypes.AttributeDefinition{ 151 | {AttributeName: aws.String("namespace"), AttributeType: "S"}, 152 | {AttributeName: aws.String("shard_id"), AttributeType: "S"}, 153 | }, 154 | KeySchema: []ddbtypes.KeySchemaElement{ 155 | {AttributeName: aws.String("namespace"), KeyType: ddbtypes.KeyTypeHash}, 156 | {AttributeName: aws.String("shard_id"), KeyType: ddbtypes.KeyTypeRange}, 157 | }, 158 | ProvisionedThroughput: &ddbtypes.ProvisionedThroughput{ 159 | ReadCapacityUnits: aws.Int64(1), 160 | WriteCapacityUnits: aws.Int64(1), 161 | }, 162 | }, 163 | ) 164 | if err != nil { 165 | return err 166 | } 167 | 168 | waiter := dynamodb.NewTableExistsWaiter(client) 169 | return waiter.Wait( 170 | context.Background(), 171 | &dynamodb.DescribeTableInput{ 172 | TableName: aws.String(tableName), 173 | }, 174 | 5*time.Second, 175 | ) 176 | } 177 | 178 | // MyRetryer used for storage 179 | type MyRetryer struct { 180 | storage.Retryer 181 | } 182 | 183 | // ShouldRetry implements custom logic for when errors should retry 184 | func (r *MyRetryer) ShouldRetry(err error) bool { 185 | switch err.(type) { 186 | case *types.ProvisionedThroughputExceededException, *types.LimitExceededException: 187 | return true 188 | } 189 | return false 190 | } 191 | 192 | func newConfig(url, region string) (aws.Config, error) { 193 | resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { 194 | return aws.Endpoint{ 195 | PartitionID: "aws", 196 | URL: url, 197 | SigningRegion: region, 198 | }, nil 199 | }) 200 | 201 | return config.LoadDefaultConfig( 202 | context.TODO(), 203 | config.WithRegion(region), 204 | config.WithEndpointResolver(resolver), 205 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("user", "pass", "token")), 206 | ) 207 | } 208 | -------------------------------------------------------------------------------- /internal/deaggregator/deaggregator_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | package deaggregator_test 4 | 5 | import ( 6 | "crypto/md5" 7 | "fmt" 8 | "math/rand" 9 | "testing" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 13 | "github.com/golang/protobuf/proto" 14 | "github.com/stretchr/testify/assert" 15 | 16 | rec "github.com/awslabs/kinesis-aggregation/go/records" 17 | deagg "github.com/harlow/kinesis-consumer/internal/deaggregator" 18 | ) 19 | 20 | // Generate an aggregate record in the correct AWS-specified format 21 | // https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md 22 | func generateAggregateRecord(numRecords int) []byte { 23 | 24 | aggr := &rec.AggregatedRecord{} 25 | // Start with the magic header 26 | aggRecord := []byte("\xf3\x89\x9a\xc2") 27 | partKeyTable := make([]string, 0) 28 | 29 | // Create proto record with numRecords length 30 | for i := 0; i < numRecords; i++ { 31 | var partKey uint64 32 | var hashKey uint64 33 | partKey = uint64(i) 34 | hashKey = uint64(i) * uint64(10) 35 | r := &rec.Record{ 36 | PartitionKeyIndex: &partKey, 37 | ExplicitHashKeyIndex: &hashKey, 38 | Data: []byte("Some test data string"), 39 | Tags: make([]*rec.Tag, 0), 40 | } 41 | 42 | aggr.Records = append(aggr.Records, r) 43 | partKeyVal := "test" + fmt.Sprint(i) 44 | partKeyTable = append(partKeyTable, partKeyVal) 45 | } 46 | 47 | aggr.PartitionKeyTable = partKeyTable 48 | // Marshal to protobuf record, create md5 sum from proto record 49 | // and append both to aggRecord with magic header 50 | data, _ := proto.Marshal(aggr) 51 | md5Hash := md5.Sum(data) 52 | aggRecord = append(aggRecord, data...) 53 | aggRecord = append(aggRecord, md5Hash[:]...) 54 | return aggRecord 55 | } 56 | 57 | // Generate a generic kinesis.Record using whatever []byte 58 | // is passed in as the data (can be normal []byte or proto record) 59 | func generateKinesisRecord(data []byte) *types.Record { 60 | currentTime := time.Now() 61 | encryptionType := types.EncryptionTypeNone 62 | partitionKey := "1234" 63 | sequenceNumber := "21269319989900637946712965403778482371" 64 | return &types.Record{ 65 | ApproximateArrivalTimestamp: ¤tTime, 66 | Data: data, 67 | EncryptionType: encryptionType, 68 | PartitionKey: &partitionKey, 69 | SequenceNumber: &sequenceNumber, 70 | } 71 | } 72 | 73 | // This tests to make sure that the data is at least larger than the length 74 | // of the magic header to do some array slicing with index out of bounds 75 | func TestSmallLengthReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) { 76 | var err error 77 | var kr *types.Record 78 | 79 | krs := make([]*types.Record, 0, 1) 80 | 81 | smallByte := []byte("No") 82 | kr = generateKinesisRecord(smallByte) 83 | krs = append(krs, kr) 84 | dars, err := deagg.DeaggregateRecords(krs) 85 | if err != nil { 86 | panic(err) 87 | } 88 | 89 | // Small byte test, since this is not a deaggregated record, should return 1 90 | // record in the array. 91 | assert.Equal(t, 1, len(dars), "Small Byte test should return length of 1.") 92 | } 93 | 94 | // This function tests to make sure that the data starts with the correct magic header 95 | // according to KPL aggregate documentation. 96 | func TestNonMatchingMagicHeaderReturnsSingleRecord(t *testing.T) { 97 | var err error 98 | var kr *types.Record 99 | 100 | krs := make([]*types.Record, 0, 1) 101 | 102 | min := 1 103 | max := 10 104 | n := rand.Intn(max-min) + min 105 | aggData := generateAggregateRecord(n) 106 | mismatchAggData := aggData[1:] 107 | kr = generateKinesisRecord(mismatchAggData) 108 | 109 | krs = append(krs, kr) 110 | 111 | dars, err := deagg.DeaggregateRecords(krs) 112 | if err != nil { 113 | panic(err) 114 | } 115 | 116 | // A byte record with a magic header that does not match 0xF3 0x89 0x9A 0xC2 117 | // should return a single record. 118 | assert.Equal(t, 1, len(dars), "Mismatch magic header test should return length of 1.") 119 | } 120 | 121 | // This function tests that the DeaggregateRecords function returns the correct number of 122 | // deaggregated records from a single aggregated record. 123 | func TestVariableLengthRecordsReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) { 124 | var err error 125 | var kr *types.Record 126 | 127 | krs := make([]*types.Record, 0, 1) 128 | 129 | min := 1 130 | max := 10 131 | n := rand.Intn(max-min) + min 132 | aggData := generateAggregateRecord(n) 133 | kr = generateKinesisRecord(aggData) 134 | krs = append(krs, kr) 135 | 136 | dars, err := deagg.DeaggregateRecords(krs) 137 | if err != nil { 138 | panic(err) 139 | } 140 | 141 | // Variable Length Aggregate Record test has aggregaterd records and should return 142 | // n length. 143 | assertMsg := fmt.Sprintf("Variable Length Aggregate Record should return length %v.", len(dars)) 144 | assert.Equal(t, n, len(dars), assertMsg) 145 | } 146 | 147 | // This function tests the length of the message after magic file header. If length is less than 148 | // the digest size (16 bytes), it is not an aggregated record. 149 | func TestRecordAfterMagicHeaderWithLengthLessThanDigestSizeReturnsSingleRecord(t *testing.T) { 150 | var err error 151 | var kr *types.Record 152 | 153 | krs := make([]*types.Record, 0, 1) 154 | 155 | min := 1 156 | max := 10 157 | n := rand.Intn(max-min) + min 158 | aggData := generateAggregateRecord(n) 159 | // Change size of proto message to 15 160 | reducedAggData := aggData[:19] 161 | kr = generateKinesisRecord(reducedAggData) 162 | 163 | krs = append(krs, kr) 164 | 165 | dars, err := deagg.DeaggregateRecords(krs) 166 | if err != nil { 167 | panic(err) 168 | } 169 | 170 | // A byte record with length less than 16 after the magic header should return 171 | // a single record from DeaggregateRecords 172 | assert.Equal(t, 1, len(dars), "Digest size test should return length of 1.") 173 | } 174 | 175 | // This function tests the MD5 Sum at the end of the record by comparing MD5 sum 176 | // at end of proto record with MD5 Sum of Proto message. If they do not match, 177 | // it is not an aggregated record. 178 | func TestRecordWithMismatchMd5SumReturnsSingleRecord(t *testing.T) { 179 | var err error 180 | var kr *types.Record 181 | 182 | krs := make([]*types.Record, 0, 1) 183 | 184 | min := 1 185 | max := 10 186 | n := rand.Intn(max-min) + min 187 | aggData := generateAggregateRecord(n) 188 | // Remove last byte from array to mismatch the MD5 sums 189 | mismatchAggData := aggData[:len(aggData)-1] 190 | kr = generateKinesisRecord(mismatchAggData) 191 | 192 | krs = append(krs, kr) 193 | 194 | dars, err := deagg.DeaggregateRecords(krs) 195 | if err != nil { 196 | panic(err) 197 | } 198 | 199 | // A byte record with an MD5 sum that does not match with the md5.Sum(record) 200 | // will be marked as a non-aggregate record and return a single record 201 | assert.Equal(t, 1, len(dars), "Mismatch md5 sum test should return length of 1.") 202 | } 203 | -------------------------------------------------------------------------------- /consumer.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log" 9 | "sync" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/config" 14 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 15 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 16 | "github.com/harlow/kinesis-consumer/internal/deaggregator" 17 | ) 18 | 19 | // Record wraps the record returned from the Kinesis library and 20 | // extends to include the shard id. 21 | type Record struct { 22 | types.Record 23 | ShardID string 24 | MillisBehindLatest *int64 25 | } 26 | 27 | // New creates a kinesis consumer with default settings. Use Option to override 28 | // any of the optional attributes. 29 | func New(streamName string, opts ...Option) (*Consumer, error) { 30 | if streamName == "" { 31 | return nil, errors.New("must provide stream name") 32 | } 33 | 34 | // new consumer with noop storage, counter, and logger 35 | c := &Consumer{ 36 | streamName: streamName, 37 | initialShardIteratorType: types.ShardIteratorTypeLatest, 38 | store: &noopStore{}, 39 | counter: &noopCounter{}, 40 | getRecordsOpts: []func(*kinesis.Options){}, 41 | logger: &noopLogger{ 42 | logger: log.New(io.Discard, "", log.LstdFlags), 43 | }, 44 | scanInterval: 250 * time.Millisecond, 45 | maxRecords: 10000, 46 | } 47 | 48 | // override defaults 49 | for _, opt := range opts { 50 | opt(c) 51 | } 52 | 53 | // default client 54 | if c.client == nil { 55 | cfg, err := config.LoadDefaultConfig(context.TODO()) 56 | if err != nil { 57 | log.Fatalf("unable to load SDK config, %v", err) 58 | } 59 | c.client = kinesis.NewFromConfig(cfg) 60 | } 61 | 62 | // default group consumes all shards 63 | if c.group == nil { 64 | c.group = NewAllGroup(c.client, c.store, streamName, c.logger) 65 | } 66 | 67 | return c, nil 68 | } 69 | 70 | // Consumer wraps the interaction with the Kinesis stream 71 | type Consumer struct { 72 | streamName string 73 | initialShardIteratorType types.ShardIteratorType 74 | initialTimestamp *time.Time 75 | client kinesisClient 76 | counter Counter 77 | group Group 78 | logger Logger 79 | store Store 80 | scanInterval time.Duration 81 | maxRecords int64 82 | isAggregated bool 83 | shardClosedHandler ShardClosedHandler 84 | getRecordsOpts []func(*kinesis.Options) 85 | } 86 | 87 | // ScanFunc is the type of the function called for each message read 88 | // from the stream. The record argument contains the original record 89 | // returned from the AWS Kinesis library. 90 | // If an error is returned, scanning stops. The sole exception is when the 91 | // function returns the special value ErrSkipCheckpoint. 92 | type ScanFunc func(*Record) error 93 | 94 | // ErrSkipCheckpoint is used as a return value from ScanFunc to indicate that 95 | // the current checkpoint should be skipped. It is not returned 96 | // as an error by any function. 97 | var ErrSkipCheckpoint = errors.New("skip checkpoint") 98 | 99 | // Scan launches a goroutine to process each of the shards in the stream. The ScanFunc 100 | // is passed through to each of the goroutines and called with each message pulled from 101 | // the stream. 102 | func (c *Consumer) Scan(ctx context.Context, fn ScanFunc) error { 103 | ctx, cancel := context.WithCancel(ctx) 104 | defer cancel() 105 | 106 | var ( 107 | errC = make(chan error, 1) 108 | shardC = make(chan types.Shard, 1) 109 | ) 110 | 111 | go func() { 112 | err := c.group.Start(ctx, shardC) 113 | if err != nil { 114 | errC <- fmt.Errorf("error starting scan: %w", err) 115 | cancel() 116 | } 117 | <-ctx.Done() 118 | close(shardC) 119 | }() 120 | 121 | wg := new(sync.WaitGroup) 122 | // process each of the shards 123 | s := newShardsInProcess() 124 | for shard := range shardC { 125 | shardId := aws.ToString(shard.ShardId) 126 | if s.doesShardExist(shardId) { 127 | // safetynet: if shard already in process by another goroutine, just skipping the request 128 | continue 129 | } 130 | wg.Add(1) 131 | go func(shardID string) { 132 | s.addShard(shardID) 133 | defer func() { 134 | s.deleteShard(shardID) 135 | }() 136 | defer wg.Done() 137 | var err error 138 | if err = c.ScanShard(ctx, shardID, fn); err != nil { 139 | err = fmt.Errorf("shard %s error: %w", shardID, err) 140 | } else if closeable, ok := c.group.(CloseableGroup); !ok { 141 | // group doesn't allow closure, skip calling CloseShard 142 | } else if err = closeable.CloseShard(ctx, shardID); err != nil { 143 | err = fmt.Errorf("shard closed CloseableGroup error: %w", err) 144 | } 145 | if err != nil { 146 | select { 147 | case errC <- fmt.Errorf("shard %s error: %w", shardID, err): 148 | cancel() 149 | default: 150 | } 151 | } 152 | }(shardId) 153 | } 154 | 155 | go func() { 156 | wg.Wait() 157 | close(errC) 158 | }() 159 | 160 | return <-errC 161 | } 162 | 163 | // ScanShard loops over records on a specific shard, calls the callback func 164 | // for each record and checkpoints the progress of scan. 165 | func (c *Consumer) ScanShard(ctx context.Context, shardID string, fn ScanFunc) error { 166 | // get last seq number from checkpoint 167 | lastSeqNum, err := c.group.GetCheckpoint(c.streamName, shardID) 168 | if err != nil { 169 | return fmt.Errorf("get checkpoint error: %w", err) 170 | } 171 | 172 | // get shard iterator 173 | shardIterator, err := c.getShardIterator(ctx, c.streamName, shardID, lastSeqNum) 174 | if err != nil { 175 | return fmt.Errorf("get shard iterator error: %w", err) 176 | } 177 | 178 | c.logger.Log("[CONSUMER] start scan:", shardID, lastSeqNum) 179 | defer func() { 180 | c.logger.Log("[CONSUMER] stop scan:", shardID) 181 | }() 182 | 183 | scanTicker := time.NewTicker(c.scanInterval) 184 | defer scanTicker.Stop() 185 | 186 | for { 187 | resp, err := c.client.GetRecords(ctx, &kinesis.GetRecordsInput{ 188 | Limit: aws.Int32(int32(c.maxRecords)), 189 | ShardIterator: shardIterator, 190 | }, c.getRecordsOpts...) 191 | 192 | // attempt to recover from GetRecords error 193 | if err != nil { 194 | c.logger.Log("[CONSUMER] get records error:", err.Error()) 195 | 196 | if !isRetriableError(err) { 197 | return fmt.Errorf("get records error: %w", err) 198 | } 199 | 200 | shardIterator, err = c.getShardIterator(ctx, c.streamName, shardID, lastSeqNum) 201 | if err != nil { 202 | return fmt.Errorf("get shard iterator error: %w", err) 203 | } 204 | } else { 205 | // loop over records, call callback func 206 | var records []types.Record 207 | 208 | // deaggregate records 209 | if c.isAggregated { 210 | records, err = deaggregateRecords(resp.Records) 211 | if err != nil { 212 | return err 213 | } 214 | } else { 215 | records = resp.Records 216 | } 217 | 218 | for _, r := range records { 219 | select { 220 | case <-ctx.Done(): 221 | return nil 222 | default: 223 | err := fn(&Record{r, shardID, resp.MillisBehindLatest}) 224 | if err != nil && !errors.Is(err, ErrSkipCheckpoint) { 225 | return err 226 | } 227 | 228 | if err := c.group.SetCheckpoint(c.streamName, shardID, *r.SequenceNumber); err != nil { 229 | return err 230 | } 231 | 232 | c.counter.Add("records", 1) 233 | lastSeqNum = *r.SequenceNumber 234 | } 235 | } 236 | 237 | if isShardClosed(resp.NextShardIterator, shardIterator) { 238 | c.logger.Log("[CONSUMER] shard closed:", shardID) 239 | 240 | if c.shardClosedHandler != nil { 241 | if err := c.shardClosedHandler(c.streamName, shardID); err != nil { 242 | return fmt.Errorf("shard closed handler error: %w", err) 243 | } 244 | } 245 | return nil 246 | } 247 | 248 | shardIterator = resp.NextShardIterator 249 | } 250 | 251 | // Wait for next scan 252 | select { 253 | case <-ctx.Done(): 254 | return nil 255 | case <-scanTicker.C: 256 | continue 257 | } 258 | } 259 | } 260 | 261 | // temporary conversion func of []types.Record -> DeaggregateRecords([]*types.Record) -> []types.Record 262 | func deaggregateRecords(in []types.Record) ([]types.Record, error) { 263 | var recs []*types.Record 264 | for _, rec := range in { 265 | recs = append(recs, &rec) 266 | } 267 | 268 | deagg, err := deaggregator.DeaggregateRecords(recs) 269 | if err != nil { 270 | return nil, err 271 | } 272 | 273 | var out []types.Record 274 | for _, rec := range deagg { 275 | out = append(out, *rec) 276 | } 277 | return out, nil 278 | } 279 | 280 | func (c *Consumer) getShardIterator(ctx context.Context, streamName, shardID, seqNum string) (*string, error) { 281 | params := &kinesis.GetShardIteratorInput{ 282 | ShardId: aws.String(shardID), 283 | StreamName: aws.String(streamName), 284 | } 285 | 286 | if seqNum != "" { 287 | params.ShardIteratorType = types.ShardIteratorTypeAfterSequenceNumber 288 | params.StartingSequenceNumber = aws.String(seqNum) 289 | } else if c.initialTimestamp != nil { 290 | params.ShardIteratorType = types.ShardIteratorTypeAtTimestamp 291 | params.Timestamp = c.initialTimestamp 292 | } else { 293 | params.ShardIteratorType = c.initialShardIteratorType 294 | } 295 | 296 | res, err := c.client.GetShardIterator(ctx, params) 297 | if err != nil { 298 | return nil, err 299 | } 300 | return res.ShardIterator, nil 301 | } 302 | 303 | func isRetriableError(err error) bool { 304 | if oe := (*types.ExpiredIteratorException)(nil); errors.As(err, &oe) { 305 | return true 306 | } 307 | if oe := (*types.ProvisionedThroughputExceededException)(nil); errors.As(err, &oe) { 308 | return true 309 | } 310 | return false 311 | } 312 | 313 | func isShardClosed(nextShardIterator, currentShardIterator *string) bool { 314 | return nextShardIterator == nil || currentShardIterator == nextShardIterator 315 | } 316 | 317 | type shards struct { 318 | shardsInProcess sync.Map 319 | } 320 | 321 | func newShardsInProcess() *shards { 322 | return &shards{} 323 | } 324 | 325 | func (s *shards) addShard(shardId string) { 326 | s.shardsInProcess.Store(shardId, struct{}{}) 327 | } 328 | 329 | func (s *shards) doesShardExist(shardId string) bool { 330 | _, ok := s.shardsInProcess.Load(shardId) 331 | return ok 332 | } 333 | 334 | func (s *shards) deleteShard(shardId string) { 335 | s.shardsInProcess.Delete(shardId) 336 | } 337 | -------------------------------------------------------------------------------- /store/mysql/mysql_test.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | sqlmock "github.com/DATA-DOG/go-sqlmock" 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | func TestNew(t *testing.T) { 14 | appName := "streamConsumer" 15 | tableName := "checkpoint" 16 | connString := "user:password@/dbname" 17 | ck, err := New(appName, tableName, connString) 18 | 19 | if ck == nil { 20 | t.Errorf("expected checkpointer not equal nil, but got %v", ck) 21 | } 22 | if err != nil { 23 | t.Errorf("expected error equals nil, but got %v", err) 24 | } 25 | ck.Shutdown() 26 | } 27 | 28 | func TestNew_AppNameEmpty(t *testing.T) { 29 | appName := "" 30 | tableName := "checkpoint" 31 | connString := "" 32 | ck, err := New(appName, tableName, connString) 33 | 34 | if ck != nil { 35 | t.Errorf("expected checkpointer equal nil, but got %v", ck) 36 | } 37 | if err == nil { 38 | t.Errorf("expected error equals not nil, but got %v", err) 39 | } 40 | } 41 | 42 | func TestNew_TableNameEmpty(t *testing.T) { 43 | appName := "streamConsumer" 44 | tableName := "" 45 | connString := "" 46 | ck, err := New(appName, tableName, connString) 47 | 48 | if ck != nil { 49 | t.Errorf("expected checkpointer equal nil, but got %v", ck) 50 | } 51 | if err == nil { 52 | t.Errorf("expected error equals not nil, but got %v", err) 53 | } 54 | } 55 | 56 | func TestNew_WithMaxIntervalOption(t *testing.T) { 57 | appName := "streamConsumer" 58 | tableName := "checkpoint" 59 | connString := "user:password@/dbname" 60 | maxInterval := time.Second 61 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 62 | 63 | if ck == nil { 64 | t.Errorf("expected checkpointer not equal nil, but got %v", ck) 65 | } 66 | if ck.GetMaxInterval() != time.Second { 67 | t.Errorf("expected max interval equals %v, but got %v", maxInterval, ck.GetMaxInterval()) 68 | } 69 | if err != nil { 70 | t.Errorf("expected error equals nil, but got %v", err) 71 | } 72 | ck.Shutdown() 73 | } 74 | 75 | func TestCheckpoint_GetCheckpoint(t *testing.T) { 76 | appName := "streamConsumer" 77 | tableName := "checkpoint" 78 | connString := "user:password@/dbname" 79 | streamName := "myStreamName" 80 | shardID := "shardId-00000000" 81 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 82 | maxInterval := time.Second 83 | connMock, mock, err := sqlmock.New() 84 | if err != nil { 85 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 86 | } 87 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 88 | if err != nil { 89 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 90 | } 91 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 92 | 93 | rows := []string{"sequence_number"} 94 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 95 | expectedRows := sqlmock.NewRows(rows) 96 | expectedRows.AddRow(expectedSequenceNumber) 97 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\? AND shard_id=\?;`, 98 | tableName) 99 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnRows(expectedRows) 100 | 101 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 102 | 103 | if gotSequenceNumber != expectedSequenceNumber { 104 | t.Errorf("expected sequence number equals %v, but got %v", expectedSequenceNumber, gotSequenceNumber) 105 | } 106 | if err != nil { 107 | t.Errorf("expected error equals nil, but got %v", err) 108 | } 109 | if err := mock.ExpectationsWereMet(); err != nil { 110 | t.Errorf("there were unfulfilled expectations: %s", err) 111 | } 112 | ck.Shutdown() 113 | } 114 | 115 | func TestCheckpoint_Get_NoRows(t *testing.T) { 116 | appName := "streamConsumer" 117 | tableName := "checkpoint" 118 | connString := "user:password@/dbname" 119 | streamName := "myStreamName" 120 | shardID := "shardId-00000000" 121 | maxInterval := time.Second 122 | connMock, mock, err := sqlmock.New() 123 | if err != nil { 124 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 125 | } 126 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 127 | if err != nil { 128 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 129 | } 130 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 131 | 132 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 133 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\? AND shard_id=\?;`, 134 | tableName) 135 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnError(sql.ErrNoRows) 136 | 137 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 138 | 139 | if gotSequenceNumber != "" { 140 | t.Errorf("expected sequence number equals empty, but got %v", gotSequenceNumber) 141 | } 142 | if err != nil { 143 | t.Errorf("expected error equals nil, but got %v", err) 144 | } 145 | if err := mock.ExpectationsWereMet(); err != nil { 146 | t.Errorf("there were unfulfilled expectations: %s", err) 147 | } 148 | ck.Shutdown() 149 | } 150 | 151 | func TestCheckpoint_Get_QueryError(t *testing.T) { 152 | appName := "streamConsumer" 153 | tableName := "checkpoint" 154 | connString := "user:password@/dbname" 155 | streamName := "myStreamName" 156 | shardID := "shardId-00000000" 157 | maxInterval := time.Second 158 | connMock, mock, err := sqlmock.New() 159 | if err != nil { 160 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 161 | } 162 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 163 | if err != nil { 164 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 165 | } 166 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 167 | 168 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 169 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\? AND shard_id=\?;`, 170 | tableName) 171 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnError(errors.New("an error")) 172 | 173 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 174 | 175 | if gotSequenceNumber != "" { 176 | t.Errorf("expected sequence number equals empty, but got %v", gotSequenceNumber) 177 | } 178 | if err == nil { 179 | t.Errorf("expected error equals not nil, but got %v", err) 180 | } 181 | if err := mock.ExpectationsWereMet(); err != nil { 182 | t.Errorf("there were unfulfilled expectations: %s", err) 183 | } 184 | ck.Shutdown() 185 | } 186 | 187 | func TestCheckpoint_SetCheckpoint(t *testing.T) { 188 | appName := "streamConsumer" 189 | tableName := "checkpoint" 190 | connString := "user:password@/dbname" 191 | streamName := "myStreamName" 192 | shardID := "shardId-00000000" 193 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 194 | maxInterval := time.Second 195 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 196 | if err != nil { 197 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 198 | } 199 | 200 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 201 | 202 | if err != nil { 203 | t.Errorf("expected error equals nil, but got %v", err) 204 | } 205 | ck.Shutdown() 206 | } 207 | 208 | func TestCheckpoint_Set_SequenceNumberEmpty(t *testing.T) { 209 | appName := "streamConsumer" 210 | tableName := "checkpoint" 211 | connString := "user:password@/dbname" 212 | streamName := "myStreamName" 213 | shardID := "shardId-00000000" 214 | expectedSequenceNumber := "" 215 | maxInterval := time.Second 216 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 217 | if err != nil { 218 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 219 | } 220 | 221 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 222 | 223 | if err == nil { 224 | t.Errorf("expected error equals not nil, but got %v", err) 225 | } 226 | ck.Shutdown() 227 | } 228 | 229 | func TestCheckpoint_Shutdown(t *testing.T) { 230 | appName := "streamConsumer" 231 | tableName := "checkpoint" 232 | connString := "user:password@/dbname" 233 | streamName := "myStreamName" 234 | shardID := "shardId-00000000" 235 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 236 | maxInterval := time.Second 237 | connMock, mock, err := sqlmock.New() 238 | if err != nil { 239 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 240 | } 241 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 242 | if err != nil { 243 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 244 | } 245 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 246 | 247 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 248 | expectedSQLRegexString := fmt.Sprintf(`REPLACE INTO %s \(namespace, shard_id, sequence_number\) VALUES \(\?, \?, \?\)`, tableName) 249 | result := sqlmock.NewResult(0, 1) 250 | mock.ExpectExec(expectedSQLRegexString).WithArgs(namespace, shardID, expectedSequenceNumber).WillReturnResult(result) 251 | 252 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 253 | 254 | if err != nil { 255 | t.Fatalf("unable to set checkpoint for data initialization. cause: %v", err) 256 | } 257 | 258 | err = ck.Shutdown() 259 | 260 | if err != nil { 261 | t.Errorf("expected error equals not nil, but got %v", err) 262 | } 263 | if err := mock.ExpectationsWereMet(); err != nil { 264 | t.Errorf("there were unfulfilled expectations: %s", err) 265 | } 266 | } 267 | 268 | func TestCheckpoint_Shutdown_SaveError(t *testing.T) { 269 | appName := "streamConsumer" 270 | tableName := "checkpoint" 271 | connString := "user:password@/dbname" 272 | streamName := "myStreamName" 273 | shardID := "shardId-00000000" 274 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 275 | maxInterval := time.Second 276 | connMock, mock, err := sqlmock.New() 277 | if err != nil { 278 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 279 | } 280 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 281 | if err != nil { 282 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 283 | } 284 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 285 | 286 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 287 | expectedSQLRegexString := fmt.Sprintf(`REPLACE INTO %s \(namespace, shard_id, sequence_number\) VALUES \(\?, \?, \?\)`, tableName) 288 | mock.ExpectExec(expectedSQLRegexString).WithArgs(namespace, shardID, expectedSequenceNumber).WillReturnError(errors.New("an error")) 289 | 290 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 291 | 292 | if err != nil { 293 | t.Fatalf("unable to set checkpoint for data initialization. cause: %v", err) 294 | } 295 | 296 | err = ck.Shutdown() 297 | 298 | if err == nil { 299 | t.Errorf("expected error equals nil, but got %v", err) 300 | } 301 | if err := mock.ExpectationsWereMet(); err != nil { 302 | t.Errorf("there were unfulfilled expectations: %s", err) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /store/postgres/postgres_test.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | sqlmock "github.com/DATA-DOG/go-sqlmock" 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | func TestNew(t *testing.T) { 14 | appName := "streamConsumer" 15 | tableName := "checkpoint" 16 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 17 | ck, err := New(appName, tableName, connString) 18 | 19 | if ck == nil { 20 | t.Errorf("expected checkpointer not equal nil, but got %v", ck) 21 | } 22 | if err != nil { 23 | t.Errorf("expected error equals nil, but got %v", err) 24 | } 25 | ck.Shutdown() 26 | } 27 | 28 | func TestNew_AppNameEmpty(t *testing.T) { 29 | appName := "" 30 | tableName := "checkpoint" 31 | connString := "" 32 | ck, err := New(appName, tableName, connString) 33 | 34 | if ck != nil { 35 | t.Errorf("expected checkpointer equal nil, but got %v", ck) 36 | } 37 | if err == nil { 38 | t.Errorf("expected error equals not nil, but got %v", err) 39 | } 40 | } 41 | 42 | func TestNew_TableNameEmpty(t *testing.T) { 43 | appName := "streamConsumer" 44 | tableName := "" 45 | connString := "" 46 | ck, err := New(appName, tableName, connString) 47 | 48 | if ck != nil { 49 | t.Errorf("expected checkpointer equal nil, but got %v", ck) 50 | } 51 | if err == nil { 52 | t.Errorf("expected error equals not nil, but got %v", err) 53 | } 54 | } 55 | 56 | func TestNew_WithMaxIntervalOption(t *testing.T) { 57 | appName := "streamConsumer" 58 | tableName := "checkpoint" 59 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 60 | maxInterval := time.Second 61 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 62 | 63 | if ck == nil { 64 | t.Errorf("expected checkpointer not equal nil, but got %v", ck) 65 | } 66 | if ck.GetMaxInterval() != time.Second { 67 | t.Errorf("expected max interval equals %v, but got %v", maxInterval, ck.GetMaxInterval()) 68 | } 69 | if err != nil { 70 | t.Errorf("expected error equals nil, but got %v", err) 71 | } 72 | ck.Shutdown() 73 | } 74 | 75 | func TestCheckpoint_GetCheckpoint(t *testing.T) { 76 | appName := "streamConsumer" 77 | tableName := "checkpoint" 78 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 79 | streamName := "myStreamName" 80 | shardID := "shardId-00000000" 81 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 82 | maxInterval := time.Second 83 | connMock, mock, err := sqlmock.New() 84 | if err != nil { 85 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 86 | } 87 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 88 | if err != nil { 89 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 90 | } 91 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 92 | 93 | rows := []string{"sequence_number"} 94 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 95 | expectedRows := sqlmock.NewRows(rows) 96 | expectedRows.AddRow(expectedSequenceNumber) 97 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\$1 AND shard_id=\$2;`, 98 | tableName) 99 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnRows(expectedRows) 100 | 101 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 102 | 103 | if gotSequenceNumber != expectedSequenceNumber { 104 | t.Errorf("expected sequence number equals %v, but got %v", expectedSequenceNumber, gotSequenceNumber) 105 | } 106 | if err != nil { 107 | t.Errorf("expected error equals nil, but got %v", err) 108 | } 109 | if err := mock.ExpectationsWereMet(); err != nil { 110 | t.Errorf("there were unfulfilled expectations: %s", err) 111 | } 112 | ck.Shutdown() 113 | } 114 | 115 | func TestCheckpoint_Get_NoRows(t *testing.T) { 116 | appName := "streamConsumer" 117 | tableName := "checkpoint" 118 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 119 | streamName := "myStreamName" 120 | shardID := "shardId-00000000" 121 | maxInterval := time.Second 122 | connMock, mock, err := sqlmock.New() 123 | if err != nil { 124 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 125 | } 126 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 127 | if err != nil { 128 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 129 | } 130 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 131 | 132 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 133 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\$1 AND shard_id=\$2;`, 134 | tableName) 135 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnError(sql.ErrNoRows) 136 | 137 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 138 | 139 | if gotSequenceNumber != "" { 140 | t.Errorf("expected sequence number equals empty, but got %v", gotSequenceNumber) 141 | } 142 | if err != nil { 143 | t.Errorf("expected error equals nil, but got %v", err) 144 | } 145 | if err := mock.ExpectationsWereMet(); err != nil { 146 | t.Errorf("there were unfulfilled expectations: %s", err) 147 | } 148 | ck.Shutdown() 149 | } 150 | 151 | func TestCheckpoint_Get_QueryError(t *testing.T) { 152 | appName := "streamConsumer" 153 | tableName := "checkpoint" 154 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 155 | streamName := "myStreamName" 156 | shardID := "shardId-00000000" 157 | maxInterval := time.Second 158 | connMock, mock, err := sqlmock.New() 159 | if err != nil { 160 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 161 | } 162 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 163 | if err != nil { 164 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 165 | } 166 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 167 | 168 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 169 | expectedSQLRegexString := fmt.Sprintf(`SELECT sequence_number FROM %s WHERE namespace=\$1 AND shard_id=\$2;`, 170 | tableName) 171 | mock.ExpectQuery(expectedSQLRegexString).WithArgs(namespace, shardID).WillReturnError(errors.New("an error")) 172 | 173 | gotSequenceNumber, err := ck.GetCheckpoint(streamName, shardID) 174 | 175 | if gotSequenceNumber != "" { 176 | t.Errorf("expected sequence number equals empty, but got %v", gotSequenceNumber) 177 | } 178 | if err == nil { 179 | t.Errorf("expected error equals not nil, but got %v", err) 180 | } 181 | if err := mock.ExpectationsWereMet(); err != nil { 182 | t.Errorf("there were unfulfilled expectations: %s", err) 183 | } 184 | ck.Shutdown() 185 | } 186 | 187 | func TestCheckpoint_SetCheckpoint(t *testing.T) { 188 | appName := "streamConsumer" 189 | tableName := "checkpoint" 190 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 191 | streamName := "myStreamName" 192 | shardID := "shardId-00000000" 193 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 194 | maxInterval := time.Second 195 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 196 | if err != nil { 197 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 198 | } 199 | 200 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 201 | 202 | if err != nil { 203 | t.Errorf("expected error equals nil, but got %v", err) 204 | } 205 | ck.Shutdown() 206 | } 207 | 208 | func TestCheckpoint_Set_SequenceNumberEmpty(t *testing.T) { 209 | appName := "streamConsumer" 210 | tableName := "checkpoint" 211 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 212 | streamName := "myStreamName" 213 | shardID := "shardId-00000000" 214 | expectedSequenceNumber := "" 215 | maxInterval := time.Second 216 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 217 | if err != nil { 218 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 219 | } 220 | 221 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 222 | 223 | if err == nil { 224 | t.Errorf("expected error equals not nil, but got %v", err) 225 | } 226 | ck.Shutdown() 227 | } 228 | 229 | func TestCheckpoint_Shutdown(t *testing.T) { 230 | appName := "streamConsumer" 231 | tableName := "checkpoint" 232 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 233 | streamName := "myStreamName" 234 | shardID := "shardId-00000000" 235 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 236 | maxInterval := time.Second 237 | connMock, mock, err := sqlmock.New() 238 | if err != nil { 239 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 240 | } 241 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 242 | if err != nil { 243 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 244 | } 245 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 246 | 247 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 248 | expectedSQLRegexString := fmt.Sprintf(`INSERT INTO %s \(namespace, shard_id, sequence_number\) VALUES\(\$1, \$2, \$3\) ON CONFLICT \(namespace, shard_id\) DO UPDATE SET sequence_number= \$3;`, tableName) 249 | result := sqlmock.NewResult(0, 1) 250 | mock.ExpectExec(expectedSQLRegexString).WithArgs(namespace, shardID, expectedSequenceNumber).WillReturnResult(result) 251 | 252 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 253 | 254 | if err != nil { 255 | t.Fatalf("unable to set checkpoint for data initialization. cause: %v", err) 256 | } 257 | 258 | err = ck.Shutdown() 259 | 260 | if err != nil { 261 | t.Errorf("expected error equals not nil, but got %v", err) 262 | } 263 | if err := mock.ExpectationsWereMet(); err != nil { 264 | t.Errorf("there were unfulfilled expectations: %s", err) 265 | } 266 | } 267 | 268 | func TestCheckpoint_Shutdown_SaveError(t *testing.T) { 269 | appName := "streamConsumer" 270 | tableName := "checkpoint" 271 | connString := "UserID=root;Password=myPassword;Host=localhost;Port=5432;Database=myDataBase;" 272 | streamName := "myStreamName" 273 | shardID := "shardId-00000000" 274 | expectedSequenceNumber := "49578481031144599192696750682534686652010819674221576194" 275 | maxInterval := time.Second 276 | connMock, mock, err := sqlmock.New() 277 | if err != nil { 278 | t.Fatalf("error occurred during the sqlmock creation. cause: %v", err) 279 | } 280 | ck, err := New(appName, tableName, connString, WithMaxInterval(maxInterval)) 281 | if err != nil { 282 | t.Fatalf("error occurred during the checkpoint creation. cause: %v", err) 283 | } 284 | ck.SetConn(connMock) // nolint: gotypex, the function available only in test 285 | 286 | namespace := fmt.Sprintf("%s-%s", appName, streamName) 287 | expectedSQLRegexString := fmt.Sprintf(`INSERT INTO %s \(namespace, shard_id, sequence_number\) VALUES\(\$1, \$2, \$3\) ON CONFLICT \(namespace, shard_id\) DO UPDATE SET sequence_number= \$3;`, tableName) 288 | mock.ExpectExec(expectedSQLRegexString).WithArgs(namespace, shardID, expectedSequenceNumber).WillReturnError(errors.New("an error")) 289 | 290 | err = ck.SetCheckpoint(streamName, shardID, expectedSequenceNumber) 291 | 292 | if err != nil { 293 | t.Fatalf("unable to set checkpoint for data initialization. cause: %v", err) 294 | } 295 | 296 | err = ck.Shutdown() 297 | 298 | if err == nil { 299 | t.Errorf("expected error equals nil, but got %v", err) 300 | } 301 | if err := mock.ExpectationsWereMet(); err != nil { 302 | t.Errorf("there were unfulfilled expectations: %s", err) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Golang Kinesis Consumer 2 | 3 | ![technology Go](https://img.shields.io/badge/technology-go-blue.svg) [![Build Status](https://travis-ci.com/harlow/kinesis-consumer.svg?branch=master)](https://travis-ci.com/harlow/kinesis-consumer) [![GoDoc](https://godoc.org/github.com/harlow/kinesis-consumer?status.svg)](https://godoc.org/github.com/harlow/kinesis-consumer) [![GoReportCard](https://goreportcard.com/badge/github.com/harlow/kinesis-consumer)](https://goreportcard.com/report/harlow/kinesis-consumer) 4 | 5 | Kinesis consumer applications written in Go. This library is intended to be a lightweight wrapper around the Kinesis API to read records, save checkpoints (with swappable backends), and gracefully recover from service timeouts/errors. 6 | 7 | __Alternate serverless options:__ 8 | 9 | * [Kinesis to Firehose](http://docs.aws.amazon.com/firehose/latest/dev/writing-with-kinesis-streams.html) can be used to archive data directly to S3, Redshift, or Elasticsearch without running a consumer application. 10 | 11 | * [Process Kinesis Streams with Golang and AWS Lambda](https://medium.com/@harlow/processing-kinesis-streams-w-aws-lambda-and-golang-264efc8f979a) for serverless processing and checkpoint management. 12 | 13 | ## Installation 14 | 15 | Get the package source: 16 | 17 | $ go get github.com/harlow/kinesis-consumer 18 | 19 | Note: This repo now requires the AWS SDK V2 package. If you are still using 20 | AWS SDK V1 then use: https://github.com/harlow/kinesis-consumer/releases/tag/v0.3.5 21 | 22 | ## Overview 23 | 24 | The consumer leverages a handler func that accepts a Kinesis record. The `Scan` method will consume all shards concurrently and call the callback func as it receives records from the stream. 25 | 26 | _Important 1: The `Scan` func will also poll the stream to check for new shards, it will automatically start consuming new shards added to the stream._ 27 | 28 | _Important 2: The default Log, Counter, and Checkpoint are no-op which means no logs, counts, or checkpoints will be emitted when scanning the stream. See the options below to override these defaults._ 29 | 30 | ```go 31 | import( 32 | // ... 33 | 34 | consumer "github.com/harlow/kinesis-consumer" 35 | ) 36 | 37 | func main() { 38 | var stream = flag.String("stream", "", "Stream name") 39 | flag.Parse() 40 | 41 | // consumer 42 | c, err := consumer.New(*stream) 43 | if err != nil { 44 | log.Fatalf("consumer error: %v", err) 45 | } 46 | 47 | // start scan 48 | err = c.Scan(context.TODO(), func(r *consumer.Record) error { 49 | fmt.Println(string(r.Data)) 50 | return nil // continue scanning 51 | }) 52 | if err != nil { 53 | log.Fatalf("scan error: %v", err) 54 | } 55 | 56 | // Note: If you need to aggregate based on a specific shard 57 | // the `ScanShard` function should be used instead. 58 | } 59 | ``` 60 | 61 | ## ScanFunc 62 | 63 | ScanFunc is the type of the function called for each message read 64 | from the stream. The record argument contains the original record 65 | returned from the AWS Kinesis library. 66 | 67 | ```go 68 | type ScanFunc func(r *Record) error 69 | ``` 70 | 71 | If an error is returned, scanning stops. The sole exception is when the 72 | function returns the special value SkipCheckpoint. 73 | 74 | ```go 75 | // continue scanning 76 | return nil 77 | 78 | // continue scanning, skip checkpoint 79 | return consumer.SkipCheckpoint 80 | 81 | // stop scanning, return error 82 | return errors.New("my error, exit all scans") 83 | ``` 84 | 85 | Use context cancel to signal the scan to exit without error. For example if we wanted to gracefully exit the scan on interrupt. 86 | 87 | ```go 88 | // trap SIGINT, wait to trigger shutdown 89 | signals := make(chan os.Signal, 1) 90 | signal.Notify(signals, os.Interrupt) 91 | 92 | // context with cancel 93 | ctx, cancel := context.WithCancel(context.Background()) 94 | 95 | go func() { 96 | <-signals 97 | cancel() // call cancellation 98 | }() 99 | 100 | err := c.Scan(ctx, func(r *consumer.Record) error { 101 | fmt.Println(string(r.Data)) 102 | return nil // continue scanning 103 | }) 104 | ``` 105 | 106 | ## Options 107 | 108 | The consumer allows the following optional overrides. 109 | 110 | ### Store 111 | 112 | To record the progress of the consumer in the stream (checkpoint) we use a storage layer to persist the last sequence number the consumer has read from a particular shard. The boolean value ErrSkipCheckpoint of consumer.ScanError determines if checkpoint will be activated. ScanError is returned by the record processing callback. 113 | 114 | This will allow consumers to re-launch and pick up at the position in the stream where they left off. 115 | 116 | The uniq identifier for a consumer is `[appName, streamName, shardID]` 117 | 118 | kinesis-checkpoints 119 | 120 | Note: The default storage is in-memory (no-op). Which means the scan will not persist any state and the consumer will start from the beginning of the stream each time it is re-started. 121 | 122 | The consumer accepts a `WithStore` option to set the storage layer: 123 | 124 | ```go 125 | c, err := consumer.New(*stream, consumer.WithStore(db)) 126 | if err != nil { 127 | log.Log("consumer error: %v", err) 128 | } 129 | ``` 130 | 131 | To persist scan progress choose one of the following storage layers: 132 | 133 | #### Redis 134 | 135 | The Redis checkpoint requires App Name, and Stream Name: 136 | 137 | ```go 138 | import store "github.com/harlow/kinesis-consumer/store/redis" 139 | 140 | // redis checkpoint 141 | db, err := store.New(appName) 142 | if err != nil { 143 | log.Fatalf("new checkpoint error: %v", err) 144 | } 145 | ``` 146 | 147 | #### DynamoDB 148 | 149 | The DynamoDB checkpoint requires Table Name, App Name, and Stream Name: 150 | 151 | ```go 152 | import store "github.com/harlow/kinesis-consumer/store/ddb" 153 | 154 | // ddb checkpoint 155 | db, err := store.New(appName, tableName) 156 | if err != nil { 157 | log.Fatalf("new checkpoint error: %v", err) 158 | } 159 | 160 | // Override the Kinesis if any needs on session (e.g. assume role) 161 | myDynamoDbClient := dynamodb.New(session.New(aws.NewConfig())) 162 | 163 | // For versions of AWS sdk that fixed config being picked up properly, the example of 164 | // setting region should work. 165 | // myDynamoDbClient := dynamodb.New(session.New(aws.NewConfig()), &aws.Config{ 166 | // Region: aws.String("us-west-2"), 167 | // }) 168 | 169 | db, err := store.New(*app, *table, checkpoint.WithDynamoClient(myDynamoDbClient)) 170 | if err != nil { 171 | log.Fatalf("new checkpoint error: %v", err) 172 | } 173 | 174 | // Or we can provide your own Retryer to customize what triggers a retry inside checkpoint 175 | // See code in examples 176 | // ck, err := checkpoint.New(*app, *table, checkpoint.WithDynamoClient(myDynamoDbClient), checkpoint.WithRetryer(&MyRetryer{})) 177 | ``` 178 | 179 | To leverage the DDB checkpoint we'll also need to create a table: 180 | 181 | ``` 182 | Partition key: namespace 183 | Sort key: shard_id 184 | ``` 185 | 186 | screen shot 2017-11-22 at 7 59 36 pm 187 | 188 | #### Postgres 189 | 190 | The Postgres checkpoint requires Table Name, App Name, Stream Name and ConnectionString: 191 | 192 | ```go 193 | import store "github.com/harlow/kinesis-consumer/store/postgres" 194 | 195 | // postgres checkpoint 196 | db, err := store.New(app, table, connStr) 197 | if err != nil { 198 | log.Fatalf("new checkpoint error: %v", err) 199 | } 200 | 201 | ``` 202 | 203 | To leverage the Postgres checkpoint we'll also need to create a table: 204 | 205 | ```sql 206 | CREATE TABLE kinesis_consumer ( 207 | namespace text NOT NULL, 208 | shard_id text NOT NULL, 209 | sequence_number numeric NOT NULL, 210 | CONSTRAINT kinesis_consumer_pk PRIMARY KEY (namespace, shard_id) 211 | ); 212 | ``` 213 | 214 | The table name has to be the same that you specify when creating the checkpoint. The primary key composed by namespace and shard_id is mandatory in order to the checkpoint run without issues and also to ensure data integrity. 215 | 216 | #### Mysql 217 | 218 | The Mysql checkpoint requires Table Name, App Name, Stream Name and ConnectionString (just like the Postgres checkpoint!): 219 | 220 | ```go 221 | import store "github.com/harlow/kinesis-consumer/store/mysql" 222 | 223 | // mysql checkpoint 224 | db, err := store.New(app, table, connStr) 225 | if err != nil { 226 | log.Fatalf("new checkpoint error: %v", err) 227 | } 228 | 229 | ``` 230 | 231 | To leverage the Mysql checkpoint we'll also need to create a table: 232 | 233 | ```sql 234 | CREATE TABLE kinesis_consumer ( 235 | namespace varchar(255) NOT NULL, 236 | shard_id varchar(255) NOT NULL, 237 | sequence_number numeric(65,0) NOT NULL, 238 | CONSTRAINT kinesis_consumer_pk PRIMARY KEY (namespace, shard_id) 239 | ); 240 | ``` 241 | 242 | The table name has to be the same that you specify when creating the checkpoint. The primary key composed by namespace and shard_id is mandatory in order to the checkpoint run without issues and also to ensure data integrity. 243 | 244 | ### Kinesis Client 245 | 246 | Override the Kinesis client if there is any special config needed: 247 | 248 | ```go 249 | // client 250 | client := kinesis.New(session.NewSession(aws.NewConfig())) 251 | 252 | // consumer 253 | c, err := consumer.New(streamName, consumer.WithClient(client)) 254 | ``` 255 | 256 | ### Metrics 257 | 258 | Add optional counter for exposing counts for checkpoints and records processed: 259 | 260 | ```go 261 | // counter 262 | counter := expvar.NewMap("counters") 263 | 264 | // consumer 265 | c, err := consumer.New(streamName, consumer.WithCounter(counter)) 266 | ``` 267 | 268 | The [expvar package](https://golang.org/pkg/expvar/) will display consumer counts: 269 | 270 | ```json 271 | "counters": { 272 | "checkpoints": 3, 273 | "records": 13005 274 | }, 275 | ``` 276 | 277 | ### Consumer starting point 278 | 279 | Kinesis allows consumers to specify where on the stream they'd like to start consuming from. The default in this library is `LATEST` (Start reading just after the most recent record in the shard). 280 | 281 | This can be adjusted by using the `WithShardIteratorType` option in the library: 282 | 283 | ```go 284 | // override starting place on stream to use TRIM_HORIZON 285 | c, err := consumer.New( 286 | *stream, 287 | consumer.WithShardIteratorType(kinesis.ShardIteratorTypeTrimHorizon) 288 | ) 289 | ``` 290 | 291 | [See AWS Docs for more options.](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html) 292 | 293 | ### Logging 294 | 295 | Logging supports the basic built-in logging library or use third party external one, so long as 296 | it implements the Logger interface. 297 | 298 | For example, to use the builtin logging package, we wrap it with myLogger structure. 299 | 300 | ```go 301 | // A myLogger provides a minimalistic logger satisfying the Logger interface. 302 | type myLogger struct { 303 | logger *log.Logger 304 | } 305 | 306 | // Log logs the parameters to the stdlib logger. See log.Println. 307 | func (l *myLogger) Log(args ...interface{}) { 308 | l.logger.Println(args...) 309 | } 310 | ``` 311 | 312 | The package defaults to `ioutil.Discard` so swallow all logs. This can be customized with the preferred logging strategy: 313 | 314 | ```go 315 | // logger 316 | logger := &myLogger{ 317 | logger: log.New(os.Stdout, "consumer-example: ", log.LstdFlags), 318 | } 319 | 320 | // consumer 321 | c, err := consumer.New(streamName, consumer.WithLogger(logger)) 322 | ``` 323 | 324 | To use a more complicated logging library, e.g. apex log 325 | 326 | ```go 327 | type myLogger struct { 328 | logger *log.Logger 329 | } 330 | 331 | func (l *myLogger) Log(args ...interface{}) { 332 | l.logger.Infof("producer", args...) 333 | } 334 | 335 | func main() { 336 | log := &myLogger{ 337 | logger: alog.Logger{ 338 | Handler: text.New(os.Stderr), 339 | Level: alog.DebugLevel, 340 | }, 341 | } 342 | ``` 343 | 344 | # Examples 345 | 346 | There are examples of producer and comsumer in the `/examples` directory. These should help give end-to-end examples of setting up consumers with different checkpoint strategies. 347 | 348 | The examples run locally against [Kinesis Lite](https://github.com/mhart/kinesalite). 349 | 350 | $ kinesalite & 351 | 352 | Produce data to the stream: 353 | 354 | $ cat examples/producer/users.txt | go run examples/producer/main.go --stream myStream 355 | 356 | Consume data from the stream: 357 | 358 | $ go run examples/consumer/main.go --stream myStream 359 | 360 | ## Contributing 361 | 362 | Please see [CONTRIBUTING.md] for more information. Thank you, [contributors]! 363 | 364 | [LICENSE]: /MIT-LICENSE 365 | [CONTRIBUTING.md]: /CONTRIBUTING.md 366 | 367 | ## License 368 | 369 | Copyright (c) 2015 Harlow Ward. It is free software, and may 370 | be redistributed under the terms specified in the [LICENSE] file. 371 | 372 | [contributors]: https://github.com/harlow/kinesis-connectors/graphs/contributors 373 | 374 | > [www.hward.com](http://www.hward.com)  ·  375 | > GitHub [@harlow](https://github.com/harlow)  ·  376 | > Twitter [@harlow_ward](https://twitter.com/harlow_ward) 377 | -------------------------------------------------------------------------------- /consumer_test.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/service/kinesis" 14 | "github.com/aws/aws-sdk-go-v2/service/kinesis/types" 15 | 16 | store "github.com/harlow/kinesis-consumer/store/memory" 17 | ) 18 | 19 | var records = []types.Record{ 20 | { 21 | Data: []byte("firstData"), 22 | SequenceNumber: aws.String("firstSeqNum"), 23 | }, 24 | { 25 | Data: []byte("lastData"), 26 | SequenceNumber: aws.String("lastSeqNum"), 27 | }, 28 | } 29 | 30 | // Implement logger to wrap testing.T.Log. 31 | type testLogger struct { 32 | t *testing.T 33 | } 34 | 35 | func (t *testLogger) Log(args ...interface{}) { 36 | t.t.Log(args...) 37 | } 38 | 39 | func TestNew(t *testing.T) { 40 | if _, err := New("myStreamName"); err != nil { 41 | t.Fatalf("new consumer error: %v", err) 42 | } 43 | } 44 | 45 | func TestScan(t *testing.T) { 46 | client := &kinesisClientMock{ 47 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 48 | return &kinesis.GetShardIteratorOutput{ 49 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 50 | }, nil 51 | }, 52 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 53 | return &kinesis.GetRecordsOutput{ 54 | NextShardIterator: nil, 55 | Records: records, 56 | }, nil 57 | }, 58 | listShardsMock: func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 59 | return &kinesis.ListShardsOutput{ 60 | Shards: []types.Shard{ 61 | {ShardId: aws.String("myShard")}, 62 | }, 63 | }, nil 64 | }, 65 | } 66 | var ( 67 | cp = store.New() 68 | ctr = &fakeCounter{} 69 | ) 70 | 71 | c, err := New("myStreamName", 72 | WithClient(client), 73 | WithCounter(ctr), 74 | WithStore(cp), 75 | WithLogger(&testLogger{t}), 76 | ) 77 | if err != nil { 78 | t.Fatalf("new consumer error: %v", err) 79 | } 80 | 81 | var ( 82 | ctx, cancel = context.WithCancel(context.Background()) 83 | res string 84 | ) 85 | 86 | var fn = func(r *Record) error { 87 | res += string(r.Data) 88 | 89 | if string(r.Data) == "lastData" { 90 | cancel() 91 | } 92 | 93 | return nil 94 | } 95 | 96 | if err := c.Scan(ctx, fn); err != nil { 97 | t.Errorf("scan returned unexpected error %v", err) 98 | } 99 | 100 | if res != "firstDatalastData" { 101 | t.Errorf("callback error expected %s, got %s", "firstDatalastData", res) 102 | } 103 | 104 | if val := ctr.Get(); val != 2 { 105 | t.Errorf("counter error expected %d, got %d", 2, val) 106 | } 107 | 108 | val, err := cp.GetCheckpoint("myStreamName", "myShard") 109 | if err != nil && val != "lastSeqNum" { 110 | t.Errorf("checkout error expected %s, got %s", "lastSeqNum", val) 111 | } 112 | } 113 | 114 | func TestScan_ListShardsError(t *testing.T) { 115 | mockError := errors.New("mock list shards error") 116 | client := &kinesisClientMock{ 117 | listShardsMock: func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 118 | return nil, mockError 119 | }, 120 | } 121 | 122 | // use cancel func to signal shutdown 123 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) 124 | 125 | var res string 126 | var fn = func(r *Record) error { 127 | res += string(r.Data) 128 | cancel() // simulate cancellation while processing first record 129 | return nil 130 | } 131 | 132 | c, err := New("myStreamName", WithClient(client)) 133 | if err != nil { 134 | t.Fatalf("new consumer error: %v", err) 135 | } 136 | 137 | err = c.Scan(ctx, fn) 138 | if !errors.Is(err, mockError) { 139 | t.Errorf("expected an error from listShards, but instead got %v", err) 140 | } 141 | } 142 | 143 | func TestScan_GetShardIteratorError(t *testing.T) { 144 | mockError := errors.New("mock get shard iterator error") 145 | client := &kinesisClientMock{ 146 | listShardsMock: func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 147 | return &kinesis.ListShardsOutput{ 148 | Shards: []types.Shard{ 149 | {ShardId: aws.String("myShard")}, 150 | }, 151 | }, nil 152 | }, 153 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 154 | return nil, mockError 155 | }, 156 | } 157 | 158 | // use cancel func to signal shutdown 159 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) 160 | 161 | var res string 162 | var fn = func(r *Record) error { 163 | res += string(r.Data) 164 | cancel() // simulate cancellation while processing first record 165 | return nil 166 | } 167 | 168 | c, err := New("myStreamName", WithClient(client)) 169 | if err != nil { 170 | t.Fatalf("new consumer error: %v", err) 171 | } 172 | 173 | err = c.Scan(ctx, fn) 174 | if !errors.Is(err, mockError) { 175 | t.Errorf("expected an error from getShardIterator, but instead got %v", err) 176 | } 177 | } 178 | 179 | func TestScanShard(t *testing.T) { 180 | var client = &kinesisClientMock{ 181 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 182 | return &kinesis.GetShardIteratorOutput{ 183 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 184 | }, nil 185 | }, 186 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 187 | return &kinesis.GetRecordsOutput{ 188 | NextShardIterator: nil, 189 | Records: records, 190 | }, nil 191 | }, 192 | } 193 | 194 | var ( 195 | cp = store.New() 196 | ctr = &fakeCounter{} 197 | ) 198 | 199 | c, err := New("myStreamName", 200 | WithClient(client), 201 | WithCounter(ctr), 202 | WithStore(cp), 203 | WithLogger(&testLogger{t}), 204 | ) 205 | if err != nil { 206 | t.Fatalf("new consumer error: %v", err) 207 | } 208 | 209 | // callback fn appends record data 210 | var ( 211 | ctx, cancel = context.WithCancel(context.Background()) 212 | res string 213 | ) 214 | 215 | var fn = func(r *Record) error { 216 | res += string(r.Data) 217 | 218 | if string(r.Data) == "lastData" { 219 | cancel() 220 | } 221 | 222 | return nil 223 | } 224 | 225 | if err := c.ScanShard(ctx, "myShard", fn); err != nil { 226 | t.Errorf("scan returned unexpected error %v", err) 227 | } 228 | 229 | // runs callback func 230 | if res != "firstDatalastData" { 231 | t.Fatalf("callback error expected %s, got %s", "firstDatalastData", res) 232 | } 233 | 234 | // increments counter 235 | if val := ctr.Get(); val != 2 { 236 | t.Fatalf("counter error expected %d, got %d", 2, val) 237 | } 238 | 239 | // sets checkpoint 240 | val, err := cp.GetCheckpoint("myStreamName", "myShard") 241 | if err != nil && val != "lastSeqNum" { 242 | t.Fatalf("checkout error expected %s, got %s", "lastSeqNum", val) 243 | } 244 | } 245 | 246 | func TestScanShard_Cancellation(t *testing.T) { 247 | var client = &kinesisClientMock{ 248 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 249 | return &kinesis.GetShardIteratorOutput{ 250 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 251 | }, nil 252 | }, 253 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 254 | return &kinesis.GetRecordsOutput{ 255 | NextShardIterator: nil, 256 | Records: records, 257 | }, nil 258 | }, 259 | } 260 | 261 | // use cancel func to signal shutdown 262 | ctx, cancel := context.WithCancel(context.Background()) 263 | 264 | var res string 265 | var fn = func(r *Record) error { 266 | res += string(r.Data) 267 | cancel() // simulate cancellation while processing first record 268 | return nil 269 | } 270 | 271 | c, err := New("myStreamName", WithClient(client)) 272 | if err != nil { 273 | t.Fatalf("new consumer error: %v", err) 274 | } 275 | 276 | err = c.ScanShard(ctx, "myShard", fn) 277 | if err != nil { 278 | t.Fatalf("scan shard error: %v", err) 279 | } 280 | 281 | if res != "firstData" { 282 | t.Fatalf("callback error expected %s, got %s", "firstData", res) 283 | } 284 | } 285 | 286 | func TestScanShard_SkipCheckpoint(t *testing.T) { 287 | var client = &kinesisClientMock{ 288 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 289 | return &kinesis.GetShardIteratorOutput{ 290 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 291 | }, nil 292 | }, 293 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 294 | return &kinesis.GetRecordsOutput{ 295 | NextShardIterator: nil, 296 | Records: records, 297 | }, nil 298 | }, 299 | } 300 | 301 | var cp = store.New() 302 | 303 | c, err := New("myStreamName", WithClient(client), WithStore(cp)) 304 | if err != nil { 305 | t.Fatalf("new consumer error: %v", err) 306 | } 307 | 308 | var ctx, cancel = context.WithCancel(context.Background()) 309 | 310 | var fn = func(r *Record) error { 311 | if aws.ToString(r.SequenceNumber) == "lastSeqNum" { 312 | cancel() 313 | return ErrSkipCheckpoint 314 | } 315 | 316 | return nil 317 | } 318 | 319 | err = c.ScanShard(ctx, "myShard", fn) 320 | if err != nil { 321 | t.Fatalf("scan shard error: %v", err) 322 | } 323 | 324 | val, err := cp.GetCheckpoint("myStreamName", "myShard") 325 | if err != nil && val != "firstSeqNum" { 326 | t.Fatalf("checkout error expected %s, got %s", "firstSeqNum", val) 327 | } 328 | } 329 | 330 | func TestScanShard_ShardIsClosed(t *testing.T) { 331 | var client = &kinesisClientMock{ 332 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 333 | return &kinesis.GetShardIteratorOutput{ 334 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 335 | }, nil 336 | }, 337 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 338 | return &kinesis.GetRecordsOutput{ 339 | NextShardIterator: nil, 340 | Records: make([]types.Record, 0), 341 | }, nil 342 | }, 343 | } 344 | 345 | c, err := New("myStreamName", WithClient(client)) 346 | if err != nil { 347 | t.Fatalf("new consumer error: %v", err) 348 | } 349 | 350 | var fn = func(r *Record) error { 351 | return nil 352 | } 353 | 354 | err = c.ScanShard(context.Background(), "myShard", fn) 355 | if err != nil { 356 | t.Fatalf("scan shard error: %v", err) 357 | } 358 | } 359 | 360 | func TestScanShard_ShardIsClosed_WithShardClosedHandler(t *testing.T) { 361 | var client = &kinesisClientMock{ 362 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 363 | return &kinesis.GetShardIteratorOutput{ 364 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 365 | }, nil 366 | }, 367 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 368 | return &kinesis.GetRecordsOutput{ 369 | NextShardIterator: nil, 370 | Records: make([]types.Record, 0), 371 | }, nil 372 | }, 373 | } 374 | 375 | var fn = func(r *Record) error { 376 | return nil 377 | } 378 | 379 | c, err := New("myStreamName", 380 | WithClient(client), 381 | WithShardClosedHandler(func(streamName, shardID string) error { 382 | return fmt.Errorf("closed shard error") 383 | }), 384 | WithLogger(&testLogger{t})) 385 | if err != nil { 386 | t.Fatalf("new consumer error: %v", err) 387 | } 388 | 389 | err = c.ScanShard(context.Background(), "myShard", fn) 390 | if err == nil { 391 | t.Fatal("expected an error but didn't get one") 392 | } 393 | if err.Error() != "shard closed handler error: closed shard error" { 394 | t.Fatalf("unexpected error: %s", err.Error()) 395 | } 396 | } 397 | 398 | func TestScanShard_GetRecordsError(t *testing.T) { 399 | getRecordsError := &types.InvalidArgumentException{Message: aws.String("aws error message")} 400 | var client = &kinesisClientMock{ 401 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 402 | return &kinesis.GetShardIteratorOutput{ 403 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 404 | }, nil 405 | }, 406 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 407 | return &kinesis.GetRecordsOutput{ 408 | NextShardIterator: nil, 409 | Records: nil, 410 | }, getRecordsError 411 | }, 412 | } 413 | 414 | var fn = func(r *Record) error { 415 | return nil 416 | } 417 | 418 | c, err := New("myStreamName", WithClient(client), WithLogger(&testLogger{t})) 419 | if err != nil { 420 | t.Fatalf("new consumer error: %v", err) 421 | } 422 | 423 | err = c.ScanShard(context.Background(), "myShard", fn) 424 | if err.Error() != "get records error: InvalidArgumentException: aws error message" { 425 | t.Fatalf("unexpected error: %v", err) 426 | } 427 | 428 | if !errors.Is(err, getRecordsError) { 429 | t.Fatalf("unexpected error: %v", err) 430 | } 431 | } 432 | 433 | type kinesisClientMock struct { 434 | kinesis.Client 435 | getShardIteratorMock func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) 436 | getRecordsMock func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) 437 | listShardsMock func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) 438 | } 439 | 440 | func (c *kinesisClientMock) ListShards(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 441 | return c.listShardsMock(ctx, params) 442 | } 443 | 444 | func (c *kinesisClientMock) GetRecords(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 445 | return c.getRecordsMock(ctx, params) 446 | } 447 | 448 | func (c *kinesisClientMock) GetShardIterator(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 449 | return c.getShardIteratorMock(ctx, params) 450 | } 451 | 452 | // implementation of counter 453 | type fakeCounter struct { 454 | counter int64 455 | mu sync.Mutex 456 | } 457 | 458 | func (fc *fakeCounter) Get() int64 { 459 | fc.mu.Lock() 460 | defer fc.mu.Unlock() 461 | 462 | return fc.counter 463 | } 464 | 465 | func (fc *fakeCounter) Add(streamName string, count int64) { 466 | fc.mu.Lock() 467 | defer fc.mu.Unlock() 468 | 469 | fc.counter += count 470 | } 471 | 472 | func TestScan_PreviousParentsBeforeTrimHorizon(t *testing.T) { 473 | client := &kinesisClientMock{ 474 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 475 | return &kinesis.GetShardIteratorOutput{ 476 | ShardIterator: aws.String("49578481031144599192696750682534686652010819674221576194"), 477 | }, nil 478 | }, 479 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 480 | return &kinesis.GetRecordsOutput{ 481 | NextShardIterator: nil, 482 | Records: records, 483 | }, nil 484 | }, 485 | listShardsMock: func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 486 | return &kinesis.ListShardsOutput{ 487 | Shards: []types.Shard{ 488 | { 489 | ShardId: aws.String("myShard"), 490 | ParentShardId: aws.String("myOldParent"), 491 | AdjacentParentShardId: aws.String("myOldAdjacentParent"), 492 | }, 493 | }, 494 | }, nil 495 | }, 496 | } 497 | var ( 498 | cp = store.New() 499 | ctr = &fakeCounter{} 500 | ) 501 | 502 | c, err := New("myStreamName", 503 | WithClient(client), 504 | WithCounter(ctr), 505 | WithStore(cp), 506 | WithLogger(&testLogger{t}), 507 | ) 508 | if err != nil { 509 | t.Fatalf("new consumer error: %v", err) 510 | } 511 | 512 | var ( 513 | ctx, cancel = context.WithCancel(context.Background()) 514 | res string 515 | ) 516 | 517 | var fn = func(r *Record) error { 518 | res += string(r.Data) 519 | 520 | if string(r.Data) == "lastData" { 521 | cancel() 522 | } 523 | 524 | return nil 525 | } 526 | 527 | if err := c.Scan(ctx, fn); err != nil { 528 | t.Errorf("scan returned unexpected error %v", err) 529 | } 530 | 531 | if res != "firstDatalastData" { 532 | t.Errorf("callback error expected %s, got %s", "firstDatalastData", res) 533 | } 534 | 535 | if val := ctr.Get(); val != 2 { 536 | t.Errorf("counter error expected %d, got %d", 2, val) 537 | } 538 | 539 | val, err := cp.GetCheckpoint("myStreamName", "myShard") 540 | if err != nil && val != "lastSeqNum" { 541 | t.Errorf("checkout error expected %s, got %s", "lastSeqNum", val) 542 | } 543 | } 544 | 545 | func TestScan_ParentChildOrdering(t *testing.T) { 546 | // We create a set of shards where shard1 split into (shard2,shard3), then (shard2,shard3) merged into shard4. 547 | client := &kinesisClientMock{ 548 | getShardIteratorMock: func(ctx context.Context, params *kinesis.GetShardIteratorInput, optFns ...func(*kinesis.Options)) (*kinesis.GetShardIteratorOutput, error) { 549 | return &kinesis.GetShardIteratorOutput{ 550 | ShardIterator: aws.String(*params.ShardId + "iter"), 551 | }, nil 552 | }, 553 | getRecordsMock: func(ctx context.Context, params *kinesis.GetRecordsInput, optFns ...func(*kinesis.Options)) (*kinesis.GetRecordsOutput, error) { 554 | switch *params.ShardIterator { 555 | case "shard1iter": 556 | return &kinesis.GetRecordsOutput{ 557 | NextShardIterator: nil, 558 | Records: []types.Record{ 559 | { 560 | Data: []byte("shard1data"), 561 | SequenceNumber: aws.String("shard1num"), 562 | }, 563 | }, 564 | }, nil 565 | case "shard2iter": 566 | return &kinesis.GetRecordsOutput{ 567 | NextShardIterator: nil, 568 | Records: []types.Record{}, 569 | }, nil 570 | case "shard3iter": 571 | return &kinesis.GetRecordsOutput{ 572 | NextShardIterator: nil, 573 | Records: []types.Record{ 574 | { 575 | Data: []byte("shard3data"), 576 | SequenceNumber: aws.String("shard3num"), 577 | }, 578 | }, 579 | }, nil 580 | case "shard4iter": 581 | return &kinesis.GetRecordsOutput{ 582 | NextShardIterator: nil, 583 | Records: []types.Record{ 584 | { 585 | Data: []byte("shard4data"), 586 | SequenceNumber: aws.String("shard4num"), 587 | }, 588 | }, 589 | }, nil 590 | default: 591 | panic("got unexpected iterator") 592 | } 593 | }, 594 | listShardsMock: func(ctx context.Context, params *kinesis.ListShardsInput, optFns ...func(*kinesis.Options)) (*kinesis.ListShardsOutput, error) { 595 | // Intentionally misorder these to test resiliance to ordering issues from ListShards. 596 | return &kinesis.ListShardsOutput{ 597 | Shards: []types.Shard{ 598 | { 599 | ShardId: aws.String("shard3"), 600 | ParentShardId: aws.String("shard1"), 601 | }, 602 | { 603 | ShardId: aws.String("shard1"), 604 | ParentShardId: aws.String("shard0"), // not otherwise referenced, parent ordering should ignore this 605 | }, 606 | { 607 | ShardId: aws.String("shard4"), 608 | ParentShardId: aws.String("shard2"), 609 | AdjacentParentShardId: aws.String("shard3"), 610 | }, 611 | { 612 | ShardId: aws.String("shard2"), 613 | ParentShardId: aws.String("shard1"), 614 | }, 615 | }, 616 | }, nil 617 | }, 618 | } 619 | var ( 620 | cp = store.New() 621 | ctr = &fakeCounter{} 622 | ) 623 | 624 | c, err := New("myStreamName", 625 | WithClient(client), 626 | WithCounter(ctr), 627 | WithStore(cp), 628 | WithLogger(&testLogger{t}), 629 | ) 630 | if err != nil { 631 | t.Fatalf("new consumer error: %v", err) 632 | } 633 | 634 | var ( 635 | ctx, cancel = context.WithCancel(context.Background()) 636 | res string 637 | ) 638 | 639 | rand.Seed(time.Now().UnixNano()) 640 | 641 | var fn = func(r *Record) error { 642 | res += string(r.Data) 643 | time.Sleep(time.Duration(rand.Int()%100) * time.Millisecond) 644 | 645 | if string(r.Data) == "shard4data" { 646 | cancel() 647 | } 648 | 649 | return nil 650 | } 651 | 652 | if err := c.Scan(ctx, fn); err != nil { 653 | t.Errorf("scan returned unexpected error %v", err) 654 | } 655 | 656 | if want := "shard1datashard3datashard4data"; res != want { 657 | t.Errorf("callback error expected %s, got %s", want, res) 658 | } 659 | 660 | if val := ctr.Get(); val != 3 { 661 | t.Errorf("counter error expected %d, got %d", 2, val) 662 | } 663 | 664 | val, err := cp.GetCheckpoint("myStreamName", "shard4data") 665 | if err != nil && val != "shard4num" { 666 | t.Errorf("checkout error expected %s, got %s", "shard4num", val) 667 | } 668 | } 669 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= 2 | github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= 3 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= 4 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= 5 | github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= 6 | github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= 7 | github.com/apex/log v1.6.0 h1:Y50wF1PBIIexIgTm0/7G6gcLitkO5jHK5Mb6wcMY0UI= 8 | github.com/apex/log v1.6.0/go.mod h1:x7s+P9VtvFBXge9Vbn+8TrqKmuzmD35TTkeBHul8UtY= 9 | github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= 10 | github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= 11 | github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= 12 | github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= 13 | github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= 14 | github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= 15 | github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= 16 | github.com/aws/aws-sdk-go-v2 v1.11.2 h1:SDiCYqxdIYi6HgQfAWRhgdZrdnOuGyLDJVRSWLeHWvs= 17 | github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= 18 | github.com/aws/aws-sdk-go-v2/config v1.6.1 h1:qrZINaORyr78syO1zfD4l7r4tZjy0Z1l0sy4jiysyOM= 19 | github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= 20 | github.com/aws/aws-sdk-go-v2/credentials v1.3.3 h1:A13QPatmUl41SqUfnuT3V0E3XiNGL6qNTOINbE8cZL4= 21 | github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= 22 | github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= 23 | github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= 24 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1 h1:rc+fRGvlKbeSd9IFhFS1KWBs0XjTkq0CfK5xqyLgIp0= 25 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= 26 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= 27 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= 28 | github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1 h1:IkqRRUZTKaS16P2vpX+FNc2jq3JWa3c478gykQp4ow4= 29 | github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= 30 | github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= 31 | github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= 32 | github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= 33 | github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= 34 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= 35 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= 36 | github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 h1:QCPbsMPMcM4iGbui5SH6O4uxvZffPoBJ4CIGX7dU0l4= 37 | github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= 38 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3 h1:VxFCgxsqWe7OThOwJ5IpFX3xrObtuIH9Hg/NW7oot1Y= 39 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= 40 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 h1:hb+NupVMUzINGUCfDs2+YqMkWKu47dBIQHpulM0XWh4= 41 | github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= 42 | github.com/aws/aws-sdk-go-v2/service/sso v1.3.3 h1:K2gCnGvAASpz+jqP9iyr+F/KNjmTYf8aWOtTQzhmZ5w= 43 | github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= 44 | github.com/aws/aws-sdk-go-v2/service/sts v1.6.2 h1:l504GWCoQi1Pk68vSUFGLmDIEMzRfVGNgLakDK+Uj58= 45 | github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= 46 | github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= 47 | github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= 48 | github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58= 49 | github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= 50 | github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= 51 | github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= 52 | github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= 53 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= 54 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 55 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 56 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 57 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 58 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 59 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 60 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 61 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 62 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 63 | github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= 64 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 65 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 66 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 67 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 68 | github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= 69 | github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 70 | github.com/go-redis/redis/v9 v9.0.0-rc.2 h1:IN1eI8AvJJeWHjMW/hlFAv2sAfvTun2DVksDDJ3a6a0= 71 | github.com/go-redis/redis/v9 v9.0.0-rc.2/go.mod h1:cgBknjwcBJa2prbnuHH/4k/Mlj4r0pWNV2HBanHujfY= 72 | github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= 73 | github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= 74 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= 75 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 76 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 77 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 78 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 79 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 80 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 81 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 82 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 83 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 84 | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= 85 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 86 | github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= 87 | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= 88 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 89 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 90 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 91 | github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 92 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 93 | github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 94 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 95 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 96 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 97 | github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= 98 | github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 99 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 100 | github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= 101 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= 102 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 103 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 104 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 105 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 106 | github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= 107 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 108 | github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= 109 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 110 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 111 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 112 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 113 | github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= 114 | github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= 115 | github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= 116 | github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= 117 | github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 118 | github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 119 | github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= 120 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 121 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= 122 | github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 123 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 124 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 125 | github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= 126 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= 127 | github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= 128 | github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= 129 | github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= 130 | github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= 131 | github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= 132 | github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= 133 | github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls= 134 | github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= 135 | github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 136 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 137 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 138 | github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= 139 | github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= 140 | github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= 141 | github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= 142 | github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= 143 | github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= 144 | github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= 145 | github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= 146 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 147 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 148 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 149 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 150 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 151 | github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= 152 | github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= 153 | github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= 154 | github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= 155 | github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= 156 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 157 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 158 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 159 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 160 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 161 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 162 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 163 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 164 | github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= 165 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 166 | github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= 167 | github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= 168 | github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= 169 | github.com/tj/go-buffer v1.0.1/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= 170 | github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= 171 | github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= 172 | github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= 173 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 174 | github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 175 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 176 | github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e h1:oIpIX9VKxSCFrfjsKpluGbNPBGq9iNnT9crH781j9wY= 177 | github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= 178 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 179 | golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 180 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 181 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 182 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 183 | golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= 184 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 185 | golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= 186 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 187 | golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= 188 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 189 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 190 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 191 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 192 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 193 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 194 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= 195 | golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 196 | golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 197 | golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 198 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 199 | golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= 200 | golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= 201 | golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= 202 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 203 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 204 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 205 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 206 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 207 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 208 | golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 209 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 210 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 211 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 212 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 213 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 214 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 215 | golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 216 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 217 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 218 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 219 | golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 220 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 221 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 222 | golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 223 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 224 | golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 225 | golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 226 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 227 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 228 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 229 | golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= 230 | golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 231 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 232 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 233 | golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 234 | golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= 235 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 236 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 237 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 238 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 239 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 240 | golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= 241 | golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 242 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 243 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 244 | golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 245 | golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= 246 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 247 | golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= 248 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 249 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 250 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 251 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 252 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 253 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 254 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 255 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 256 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 257 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 258 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 259 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 260 | google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= 261 | google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 262 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 263 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 264 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 265 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 266 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 267 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 268 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 269 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 270 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 271 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 272 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 273 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 274 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 275 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 276 | gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 277 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 278 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 279 | --------------------------------------------------------------------------------