├── .github └── workflows │ ├── codecov.yml │ ├── codeql-analysis.yml │ └── gotest.yml ├── .gitignore ├── LICENSE ├── README.md ├── admin ├── kafka_admin.go ├── kafka_admin_test.go ├── mock_kafka_admin.go ├── mock_topics.go └── mock_topics_test.go ├── backend ├── backend.go ├── iterator.go ├── memory │ ├── memory.go │ ├── memory_bench_test.go │ ├── memory_test.go │ ├── partition_memory.go │ ├── partition_memory_bench_test.go │ └── partitioner.go ├── mock_backend.go └── rocks │ └── rocks.go ├── consumer ├── builder.go ├── config.go ├── consumer.go ├── events.go ├── group_handler.go ├── mock_consumer.go ├── mock_consumer_partition.go ├── mock_partition_consumer.go ├── mock_partition_consumer_test.go ├── partition.go └── partition_consumer.go ├── data ├── record.go └── record_test.go ├── examples ├── example_1 │ ├── encoders │ │ ├── encoders.go │ │ ├── int64_encoder.go │ │ └── transaction_encoder.go │ ├── events │ │ ├── account_credited.go │ │ ├── account_debited.go │ │ ├── account_details_updated.go │ │ ├── customer_profile.go │ │ └── message.go │ ├── main.go │ └── stream │ │ ├── account_credited.go │ │ ├── account_debited.go │ │ ├── account_details_global_table.go │ │ ├── customer_profile_global_table.go │ │ ├── init.go │ │ ├── mock-stream │ │ └── mock_stream.go │ │ └── transaction_stream.go └── example_2 │ ├── domain │ └── variables.go │ ├── encoders │ ├── common_encoder.go │ ├── encoders.go │ └── int64_encoder.go │ ├── events │ ├── a.go │ ├── ab.go │ ├── abc.go │ ├── b.go │ └── c.go │ ├── main.go │ └── stream │ ├── a.go │ ├── abcCommonStream.go │ ├── init.go │ └── mock-stream │ └── mock_stream.go ├── go.mod ├── go.sum ├── kstream ├── branch │ └── branch.go ├── builder.go ├── builder_config.go ├── changelog │ ├── buffer.go │ ├── buffer_test.go │ ├── cache.go │ ├── changelog.go │ ├── mock_changelog.go │ ├── options.go │ ├── replica_manager.go │ ├── replica_syncer.go │ ├── state_changelog.go │ ├── state_changelog_test.go │ └── store_changelog.go ├── context │ ├── context.go │ └── context_test.go ├── default_builders.go ├── dlq │ ├── dlq.go │ └── options.go ├── encoding │ ├── encoder.go │ ├── int_encoder.go │ ├── int_encoder_test.go │ ├── json_encoder.go │ └── string_encoder.go ├── global_table.go ├── global_table_stream.go ├── global_table_stream_instance.go ├── global_table_stream_test.go ├── graph │ └── graph.go ├── k_flow.go ├── k_sink.go ├── k_source.go ├── k_stream.go ├── k_table.go ├── offsets │ ├── manager.go │ ├── mock_manager.go │ └── resetter.go ├── processor.go ├── processor_pool.go ├── processors │ ├── filter.go │ ├── filter_test.go │ ├── join │ │ ├── global_table_joiner.go │ │ ├── global_table_joiner_bench_test.go │ │ ├── global_table_joiner_test.go │ │ ├── global_table_star_joiner.go │ │ ├── joiner.go │ │ ├── repartition.go │ │ ├── side_joiner.go │ │ ├── stream_joiner.go │ │ └── window.go │ ├── key_selector.go │ ├── materializer.go │ ├── processor.go │ ├── processor_test.go │ ├── transformer.go │ ├── transformer_test.go │ └── value_transformer.go ├── rebelance_handler.go ├── store │ ├── hash_index.go │ ├── hash_index_test.go │ ├── http.go │ ├── index.go │ ├── indexed_bench_test.go │ ├── indexed_store.go │ ├── indexed_store_bench_test.go │ ├── indexed_store_test.go │ ├── iterator.go │ ├── meta.go │ ├── mock_store.go │ ├── option.go │ ├── recoverable_store.go │ ├── registry.go │ ├── rpc.go │ ├── state_store.go │ ├── store.go │ ├── store_bench_test.go │ ├── store_test.go │ ├── uuid_hash_index.go │ └── uuid_hash_index_test.go ├── streams.go ├── topic_builder.go ├── topology │ ├── node.go │ ├── source.go │ └── topology.go ├── window │ └── sliding.go └── worker_pool │ ├── pool.go │ ├── pool_bench_test.go │ └── pool_test.go ├── producer ├── config.go ├── mock-producer.go ├── pool.go ├── producer.go └── producer_test.go └── util └── struct_to_map.go /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: coverage 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | paths-ignore: 7 | - '**.md' 8 | pull_request: 9 | paths-ignore: 10 | - '**.md' 11 | jobs: 12 | run: 13 | name: Go 14 | runs-on: ${{ matrix.operating-system }} 15 | strategy: 16 | matrix: 17 | go-version: [1.13.x] 18 | operating-system: [ubuntu-latest] 19 | env: 20 | GO111MODULE: on 21 | steps: 22 | 23 | - name: Install Go 24 | uses: actions/setup-go@v1 25 | with: 26 | go-version: ${{ matrix.go-version }} 27 | 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | 31 | - name: Go Test 32 | run: go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... 33 | 34 | - name: Code Cov 35 | run: CODECOV_TOKEN="7749df23-a010-4716-91fb-277130eb51bd" bash <(curl -s https://codecov.io/bash) -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | name: "CodeQL" 7 | 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | # The branches below must be a subset of the branches above 13 | branches: [master] 14 | schedule: 15 | - cron: '0 11 * * 0' 16 | 17 | jobs: 18 | analyze: 19 | name: Analyze 20 | runs-on: ubuntu-latest 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | # Override automatic language detection by changing the below list 26 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 27 | language: ['go'] 28 | # Learn more... 29 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 30 | 31 | steps: 32 | - name: Checkout repository 33 | uses: actions/checkout@v2 34 | with: 35 | # We must fetch at least the immediate parents so that if this is 36 | # a pull request then we can checkout the head. 37 | fetch-depth: 2 38 | 39 | # If this run was triggered by a pull request event, then checkout 40 | # the head of the pull request instead of the merge commit. 41 | - run: git checkout HEAD^2 42 | if: ${{ github.event_name == 'pull_request' }} 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.github/workflows/gotest.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | paths-ignore: 7 | - '**.md' 8 | pull_request: 9 | paths-ignore: 10 | - '**.md' 11 | jobs: 12 | run: 13 | name: Go 14 | runs-on: ${{ matrix.operating-system }} 15 | strategy: 16 | matrix: 17 | go-version: [1.13.x, 1.14.x] 18 | operating-system: [ubuntu-latest, windows-latest, macos-latest] 19 | env: 20 | GO111MODULE: on 21 | steps: 22 | 23 | - name: Install Go 24 | uses: actions/setup-go@v1 25 | with: 26 | go-version: ${{ matrix.go-version }} 27 | 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | 31 | - name: Go Test 32 | run: go test -v -race ./... -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 TryFix Engineering 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## KStream - Kafka Streams for Golang 2 | 3 | [![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tryfix/kstream) 4 | ![build](https://github.com/tryfix/kstream/workflows/build/badge.svg) 5 | [![Coverage](https://codecov.io/gh/tryfix/kstream/branch/master/graph/badge.svg)](https://codecov.io/gh/tryfix/kstream) 6 | [![Releases](https://img.shields.io/github/release/tryfix/kstream/all.svg?style=flat-square)](https://github.com/tryfix/kstream/releases) 7 | [![Go Report Card](https://goreportcard.com/badge/github.com/tryfix/kstream)](https://goreportcard.com/report/github.com/tryfix/kstream) 8 | [![LICENSE](https://img.shields.io/github/license/tryfix/kstream.svg?style=flat-square)](https://github.com/tryfix/kstream/blob/master/LICENSE) 9 | 10 | KStream is a light-weight implementation of [kafka streams](https://kafka.apache.org/documentation/streams/). 11 | It is heavily inspired by Kafka-Streams(Java) library. KStream 12 | implements features like Internal Stores, Remote Stores, Local Tables, Global Tables and it guarantees Exactly Once 13 | Processing with its de-duplicator. It can process message as micro batches or one by one depending on the throughput 14 | required. KStream can handle good amount of throughput (50,000 messages pe second with micro batch enabled) in a fault 15 | tolerable manner with a very minimal amount of latency (2 ~ 5 ms) 16 | 17 | Project uses two external libraries 18 | [sarama](https://github.com/Shopify/sarama) for consumers and producers 19 | 20 | ## Stream Components 21 | 22 | ### Stream Topology 23 | 24 | Stream Topology is a set of processor nodes typically starts with a source node and ends with a sink node 25 | 26 | ### Stream Builder 27 | 28 | Stream builder is responsible for building stream topologies into kafka streams with their dependencies like changelog 29 | topics, re-partition topics, etc... 30 | 31 | ### KStream 32 | 33 | KStream is a regular kafka stream which takes an input topic as an upstream and process the record stream to another 34 | kafka topic(downstream topic) 35 | It supports several functions like transforming, merging to another stream, joins with other streams etc. 36 | 37 | ### KTable 38 | 39 | @TODO 40 | 41 | ### Global KTable 42 | 43 | Global KTable also a KTable except for is each instance of the application has its own 44 | copy of all the partitions and it will be running on a separate thread so applications dose'nt have to worry 45 | about handling failures. 46 | 47 | ### Store 48 | 49 | Store is a pluggable local key-val store which is used by KTable, Global KTable and other oparations like joins, merges 50 | and removing duplicates. 51 | 52 | ### Store Backend 53 | 54 | Store is a pluggable local key-val store which is used by KTable, Global KTable and other oparations like joins, merges 55 | and removing duplicates. 56 | 57 | ### RPC layer for Store 58 | 59 | @TODO 60 | 61 | ### Key discovery service 62 | 63 | @TODO 64 | 65 | 66 | -------------------------------------------------------------------------------- /admin/kafka_admin_test.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "github.com/Shopify/sarama" 5 | "github.com/tryfix/log" 6 | "testing" 7 | ) 8 | 9 | func TestKafkaAdmin_FetchInfo(t *testing.T) { 10 | seedBroker := sarama.NewMockBroker(t, 1) 11 | defer seedBroker.Close() 12 | 13 | seedBroker.SetHandlerByMap(map[string]sarama.MockResponse{ 14 | "MetadataRequest": sarama.NewMockMetadataResponse(t). 15 | SetController(seedBroker.BrokerID()). 16 | SetLeader("my_topic", 0, seedBroker.BrokerID()). 17 | SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), 18 | "DescribeConfigsRequest": sarama.NewMockDescribeConfigsResponse(t), 19 | }) 20 | 21 | config := sarama.NewConfig() 22 | config.Version = sarama.V1_0_0_0 23 | saramaAdmin, err := sarama.NewClusterAdmin([]string{seedBroker.Addr()}, config) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | 28 | topic := `my_topic` 29 | admin := &kafkaAdmin{ 30 | admin: saramaAdmin, 31 | logger: log.NewNoopLogger(), 32 | } 33 | tps, err := admin.FetchInfo([]string{topic}) 34 | if err != nil { 35 | t.Error(err) 36 | } 37 | 38 | if tps[topic].NumPartitions != 1 { 39 | t.Fail() 40 | } 41 | 42 | err = saramaAdmin.Close() 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | } 47 | 48 | func TestKafkaAdmin_CreateTopics(t *testing.T) { 49 | seedBroker := sarama.NewMockBroker(t, 1) 50 | defer seedBroker.Close() 51 | 52 | seedBroker.SetHandlerByMap(map[string]sarama.MockResponse{ 53 | "MetadataRequest": sarama.NewMockMetadataResponse(t). 54 | SetController(seedBroker.BrokerID()). 55 | SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), 56 | "CreateTopicsRequest": sarama.NewMockCreateTopicsResponse(t), 57 | }) 58 | 59 | config := sarama.NewConfig() 60 | config.Version = sarama.V0_10_2_0 61 | saramaAdmin, err := sarama.NewClusterAdmin([]string{seedBroker.Addr()}, config) 62 | if err != nil { 63 | t.Fatal(err) 64 | } 65 | 66 | topic := `my_topic` 67 | admin := &kafkaAdmin{ 68 | admin: saramaAdmin, 69 | logger: log.NewNoopLogger(), 70 | } 71 | 72 | err = admin.CreateTopics(map[string]*Topic{ 73 | topic: { 74 | Name: topic, 75 | NumPartitions: 1, 76 | ReplicationFactor: 1, 77 | }, 78 | }) 79 | if err != nil { 80 | t.Fatal(err) 81 | } 82 | err = saramaAdmin.Close() 83 | if err != nil { 84 | t.Fatal(err) 85 | } 86 | } 87 | 88 | func TestKafkaAdmin_DeleteTopics(t *testing.T) { 89 | seedBroker := sarama.NewMockBroker(t, 1) 90 | defer seedBroker.Close() 91 | 92 | seedBroker.SetHandlerByMap(map[string]sarama.MockResponse{ 93 | "MetadataRequest": sarama.NewMockMetadataResponse(t). 94 | SetController(seedBroker.BrokerID()). 95 | SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), 96 | "DeleteTopicsRequest": sarama.NewMockDeleteTopicsResponse(t), 97 | }) 98 | 99 | config := sarama.NewConfig() 100 | config.Version = sarama.V0_10_2_0 101 | saramaAdmin, err := sarama.NewClusterAdmin([]string{seedBroker.Addr()}, config) 102 | if err != nil { 103 | t.Fatal(err) 104 | } 105 | 106 | topic := `my_topic` 107 | admin := &kafkaAdmin{ 108 | admin: saramaAdmin, 109 | logger: log.NewNoopLogger(), 110 | } 111 | 112 | _, err = admin.DeleteTopics([]string{topic}) 113 | if err != nil { 114 | t.Fatal(err) 115 | } 116 | err = saramaAdmin.Close() 117 | if err != nil { 118 | t.Fatal(err) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /admin/mock_kafka_admin.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package admin 9 | 10 | type MockKafkaAdmin struct { 11 | Topics *Topics 12 | } 13 | 14 | func NewMockAdminWithTopics(tps map[string]*Topic) *MockKafkaAdmin { 15 | topics := NewMockTopics() 16 | admin := &MockKafkaAdmin{Topics: topics} 17 | admin.CreateTopics(tps) 18 | return admin 19 | } 20 | 21 | func (m *MockKafkaAdmin) FetchInfo(topics []string) (map[string]*Topic, error) { 22 | tps := make(map[string]*Topic) 23 | for _, topic := range topics { 24 | info, err := m.Topics.Topic(topic) 25 | if err != nil { 26 | return nil, err 27 | } 28 | tps[topic] = info.Meta 29 | } 30 | 31 | return tps, nil 32 | } 33 | 34 | func (m *MockKafkaAdmin) CreateTopics(topics map[string]*Topic) error { 35 | for name, topic := range topics { 36 | if err := m.createTopic(name, topic); err != nil { 37 | return err 38 | } 39 | } 40 | return nil 41 | } 42 | 43 | func (m *MockKafkaAdmin) createTopic(name string, info *Topic) error { 44 | topic := &MockTopic{ 45 | Name: name, 46 | Meta: info, 47 | } 48 | 49 | err := m.Topics.AddTopic(topic) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | return nil 55 | } 56 | 57 | func (m *MockKafkaAdmin) DeleteTopics(topics []string) (map[string]error, error) { 58 | for _, tp := range topics { 59 | if err := m.Topics.RemoveTopic(tp); err != nil { 60 | return nil, err 61 | } 62 | } 63 | 64 | return nil, nil 65 | } 66 | 67 | func (m *MockKafkaAdmin) Close() {} 68 | -------------------------------------------------------------------------------- /admin/mock_topics_test.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/kstream/data" 6 | "testing" 7 | ) 8 | 9 | func TestMockPartition_Latest(t *testing.T) { 10 | mocksTopics := NewMockTopics() 11 | kafkaAdmin := &MockKafkaAdmin{ 12 | Topics: mocksTopics, 13 | } 14 | if err := kafkaAdmin.CreateTopics(map[string]*Topic{ 15 | `tp1`: { 16 | Name: "tp1", 17 | NumPartitions: 1, 18 | ReplicationFactor: 1, 19 | }, 20 | }); err != nil { 21 | t.Error(err) 22 | } 23 | tp, _ := mocksTopics.Topic(`tp1`) 24 | pt, _ := tp.Partition(0) 25 | for i := 1; i <= 3333; i++ { 26 | err := pt.Append(&data.Record{ 27 | Key: []byte(fmt.Sprint(i)), 28 | Value: []byte(`v`), 29 | Topic: "tp1", 30 | }) 31 | if err != nil { 32 | t.Error(err) 33 | } 34 | } 35 | 36 | if pt.Latest() != 3332 { 37 | t.Fail() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /backend/backend.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package backend 9 | 10 | import ( 11 | "time" 12 | ) 13 | 14 | type Builder func(name string) (Backend, error) 15 | 16 | type Backend interface { 17 | Name() string 18 | Set(key []byte, value []byte, expiry time.Duration) error 19 | Get(key []byte) ([]byte, error) 20 | RangeIterator(fromKy []byte, toKey []byte) Iterator 21 | Iterator() Iterator 22 | Delete(key []byte) error 23 | SetExpiry(time time.Duration) 24 | String() string 25 | Persistent() bool 26 | Close() error 27 | Destroy() error 28 | } 29 | -------------------------------------------------------------------------------- /backend/iterator.go: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | type Iterator interface { 4 | SeekToFirst() 5 | SeekToLast() 6 | Seek(key []byte) 7 | Next() 8 | Prev() 9 | Close() 10 | Key() []byte 11 | Value() []byte 12 | Valid() bool 13 | Error() error 14 | } 15 | -------------------------------------------------------------------------------- /backend/memory/memory_bench_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package memory 9 | 10 | import ( 11 | "fmt" 12 | "github.com/tryfix/log" 13 | "github.com/tryfix/metrics" 14 | "math/rand" 15 | "testing" 16 | ) 17 | 18 | func BenchmarkMemory_Set(b *testing.B) { 19 | conf := NewConfig() 20 | conf.Logger = log.NewNoopLogger() 21 | conf.MetricsReporter = metrics.NoopReporter() 22 | backend := NewMemoryBackend(conf) 23 | 24 | b.ResetTimer() 25 | b.ReportAllocs() 26 | b.RunParallel(func(pb *testing.PB) { 27 | for pb.Next() { 28 | if err := backend.Set([]byte(`100`), []byte(`100`), 0); err != nil { 29 | log.Fatal(err) 30 | } 31 | } 32 | }) 33 | } 34 | 35 | func BenchmarkMemory_Get(b *testing.B) { 36 | conf := NewConfig() 37 | conf.Logger = log.NewNoopLogger() 38 | conf.MetricsReporter = metrics.NoopReporter() 39 | backend := NewMemoryBackend(conf) 40 | numOfRecs := 1000000 41 | for i := 1; i <= numOfRecs; i++ { 42 | if err := backend.Set([]byte(fmt.Sprint(i)), []byte(`100`), 0); err != nil { 43 | log.Fatal(err) 44 | } 45 | } 46 | 47 | b.ResetTimer() 48 | b.ReportAllocs() 49 | 50 | b.RunParallel(func(pb *testing.PB) { 51 | for pb.Next() { 52 | k := fmt.Sprint(rand.Intn(numOfRecs-1) + 1) 53 | 54 | if _, err := backend.Get([]byte(k)); err != nil { 55 | b.Error(err) 56 | } 57 | } 58 | }) 59 | } 60 | 61 | func BenchmarkMemory_GetSet(b *testing.B) { 62 | conf := NewConfig() 63 | conf.Logger = log.NewNoopLogger() 64 | conf.MetricsReporter = metrics.NoopReporter() 65 | backend := NewMemoryBackend(conf) 66 | 67 | for i := 1; i <= 99999; i++ { 68 | if err := backend.Set([]byte(fmt.Sprint(rand.Intn(1000)+1)), []byte(`100`), 0); err != nil { 69 | log.Fatal(err) 70 | } 71 | } 72 | b.ResetTimer() 73 | go func() { 74 | for { 75 | if err := backend.Set([]byte(fmt.Sprint(rand.Intn(1000)+1)), []byte(`100`), 0); err != nil { 76 | b.Fatal(err) 77 | } 78 | } 79 | }() 80 | 81 | b.RunParallel(func(pb *testing.PB) { 82 | for pb.Next() { 83 | if _, err := backend.Get([]byte(fmt.Sprint(rand.Intn(1000) + 1))); err != nil { 84 | b.Fatal(err) 85 | } 86 | } 87 | }) 88 | } 89 | 90 | func BenchmarkMemory_Iterator(b *testing.B) { 91 | conf := NewConfig() 92 | conf.Logger = log.NewNoopLogger() 93 | conf.MetricsReporter = metrics.NoopReporter() 94 | backend := NewMemoryBackend(conf) 95 | 96 | for i := 1; i <= 999999; i++ { 97 | if err := backend.Set([]byte(fmt.Sprint(rand.Intn(999999)+1)), []byte(`100`), 0); err != nil { 98 | log.Fatal(err) 99 | } 100 | } 101 | b.ResetTimer() 102 | 103 | b.RunParallel(func(pb *testing.PB) { 104 | for pb.Next() { 105 | i := backend.Iterator() 106 | for i.Valid() { 107 | 108 | if i.Error() != nil { 109 | i.Next() 110 | continue 111 | } 112 | 113 | i.Next() 114 | } 115 | } 116 | }) 117 | } 118 | -------------------------------------------------------------------------------- /backend/memory/memory_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package memory 9 | 10 | import ( 11 | "fmt" 12 | "github.com/tryfix/log" 13 | "github.com/tryfix/metrics" 14 | "testing" 15 | "time" 16 | ) 17 | 18 | func TestMemory_Set_Expiry(t *testing.T) { 19 | conf := NewConfig() 20 | conf.ExpiredRecordCleanupInterval = 1 * time.Millisecond 21 | conf.Logger = log.NewNoopLogger() 22 | conf.MetricsReporter = metrics.NoopReporter() 23 | backend := NewMemoryBackend(conf) 24 | if err := backend.Set([]byte(`100`), []byte(`100`), 10*time.Millisecond); err != nil { 25 | log.Fatal(err) 26 | } 27 | 28 | time.Sleep(1 * time.Second) 29 | 30 | r, err := backend.Get([]byte(`100`)) 31 | if err != nil { 32 | t.Error(err) 33 | } 34 | 35 | if r != nil { 36 | t.Error(`record exist`) 37 | } 38 | } 39 | 40 | func TestMemory_Get(t *testing.T) { 41 | conf := NewConfig() 42 | conf.Logger = log.NewNoopLogger() 43 | conf.MetricsReporter = metrics.NoopReporter() 44 | backend := NewMemoryBackend(conf) 45 | 46 | for i := 1; i <= 1000; i++ { 47 | if err := backend.Set([]byte(fmt.Sprint(i)), []byte(`100`), 0); err != nil { 48 | t.Fatal(err) 49 | } 50 | } 51 | 52 | for i := 1; i <= 1000; i++ { 53 | val, err := backend.Get([]byte(fmt.Sprint(i))) 54 | if err != nil { 55 | t.Error(err) 56 | } 57 | 58 | if string(val) != `100` { 59 | t.Fail() 60 | } 61 | } 62 | 63 | } 64 | 65 | func TestMemory_Delete(t *testing.T) { 66 | conf := NewConfig() 67 | conf.Logger = log.NewNoopLogger() 68 | conf.MetricsReporter = metrics.NoopReporter() 69 | backend := NewMemoryBackend(conf) 70 | 71 | if err := backend.Set([]byte(`100`), []byte(`100`), 0); err != nil { 72 | t.Fatal(err) 73 | } 74 | 75 | if err := backend.Delete([]byte(`100`)); err != nil { 76 | t.Fatal(err) 77 | } 78 | 79 | val, err := backend.Get([]byte(`100`)) 80 | if err != nil { 81 | t.Error(err) 82 | } 83 | 84 | if val != nil { 85 | t.Fail() 86 | } 87 | 88 | } 89 | -------------------------------------------------------------------------------- /backend/memory/partition_memory.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import ( 4 | "github.com/tryfix/kstream/backend" 5 | "github.com/tryfix/log" 6 | "github.com/tryfix/metrics" 7 | "strconv" 8 | "time" 9 | ) 10 | 11 | type PartitionMemory interface { 12 | backend.Backend 13 | Partitions() []backend.Iterator 14 | } 15 | 16 | type partitionMemory struct { 17 | partitionCount int 18 | partitions map[int]backend.Backend 19 | } 20 | 21 | func NewPartitionMemoryBackend(partitions int, logger log.Logger, reporter metrics.Reporter) PartitionMemory { 22 | partitionedBackend := &partitionMemory{ 23 | partitionCount: partitions, 24 | partitions: make(map[int]backend.Backend), 25 | } 26 | 27 | for i := 0; i < partitions; i++ { 28 | conf := NewConfig() 29 | conf.Logger = logger 30 | conf.MetricsReporter = reporter 31 | backend := NewMemoryBackend(conf) 32 | partitionedBackend.partitions[i] = backend 33 | } 34 | 35 | return partitionedBackend 36 | } 37 | 38 | func (pm *partitionMemory) Name() string { 39 | return `partitioned_memory_backend` 40 | } 41 | 42 | func (pm *partitionMemory) Set(key []byte, value []byte, expiry time.Duration) error { 43 | k, err := strconv.Atoi(string(key)) 44 | if err != nil { 45 | return err 46 | } 47 | partitionId := k % pm.partitionCount 48 | 49 | pm.partitions[partitionId].Set(key, value, expiry) 50 | 51 | return nil 52 | } 53 | 54 | func (pm *partitionMemory) Get(key []byte) ([]byte, error) { 55 | k, err := strconv.Atoi(string(key)) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | partitionId := k % pm.partitionCount 61 | 62 | return pm.partitions[partitionId].Get(key) 63 | } 64 | 65 | func (pm *partitionMemory) RangeIterator(fromKy []byte, toKey []byte) backend.Iterator { 66 | panic("implement me") 67 | } 68 | 69 | func (pm *partitionMemory) Iterator() backend.Iterator { 70 | panic("implement me") 71 | } 72 | 73 | func (pm *partitionMemory) Delete(key []byte) error { 74 | 75 | partitionId, err := strconv.Atoi(string(key)) 76 | if err != nil { 77 | return err 78 | } 79 | partitionId = partitionId % pm.partitionCount 80 | 81 | return pm.partitions[partitionId].Delete(key) 82 | } 83 | 84 | func (m *partitionMemory) Destroy() error { return nil } 85 | 86 | func (pm *partitionMemory) SetExpiry(time time.Duration) {} 87 | 88 | func (pm *partitionMemory) String() string { 89 | return `partition memory` 90 | } 91 | 92 | func (pm *partitionMemory) Persistent() bool { 93 | return false 94 | } 95 | 96 | func (pm *partitionMemory) Close() error { 97 | for i := 0; i < pm.partitionCount; i++ { 98 | pm.partitions[i].Close() 99 | } 100 | return nil 101 | } 102 | 103 | func (pm *partitionMemory) Partitions() []backend.Iterator { 104 | var iterators []backend.Iterator 105 | for _, partition := range pm.partitions { 106 | iterators = append(iterators, partition.Iterator()) 107 | } 108 | return iterators 109 | } 110 | -------------------------------------------------------------------------------- /backend/memory/partition_memory_bench_test.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/log" 7 | "github.com/tryfix/metrics" 8 | "math/rand" 9 | "sync" 10 | "testing" 11 | ) 12 | 13 | var pMemory = NewPartitionMemoryBackend(1000, log.Constructor.Log(), metrics.NoopReporter()) 14 | 15 | func BenchmarkPartitionMemory_Set(b *testing.B) { 16 | pMemory := NewPartitionMemoryBackend(1000, log.Constructor.Log(), metrics.NoopReporter()) 17 | 18 | b.RunParallel(func(pb *testing.PB) { 19 | for pb.Next() { 20 | if err := pMemory.Set([]byte(fmt.Sprint(rand.Intn(1000)+1)), []byte(`100`), 0); err != nil { 21 | log.Fatal(err) 22 | } 23 | } 24 | }) 25 | } 26 | 27 | func BenchmarkPartitionMemory_Get(b *testing.B) { 28 | pMemory := NewPartitionMemoryBackend(100, log.Constructor.Log(), metrics.NoopReporter()) 29 | 30 | for i := 1; i <= 10000; i++ { 31 | if err := pMemory.Set([]byte(fmt.Sprint(i)), []byte(`100`), 0); err != nil { 32 | log.Fatal(err) 33 | } 34 | } 35 | 36 | b.RunParallel(func(pb *testing.PB) { 37 | for pb.Next() { 38 | if _, err := pMemory.Get([]byte(fmt.Sprint(rand.Intn(100) + 1))); err != nil { 39 | log.Fatal(err) 40 | } 41 | } 42 | }) 43 | } 44 | 45 | func BenchmarkPartitionMemory_SetGet(b *testing.B) { 46 | pMemory := NewPartitionMemoryBackend(100, log.Constructor.Log(), metrics.NoopReporter()) 47 | 48 | for i := 1; i <= 99999; i++ { 49 | if err := pMemory.Set([]byte(fmt.Sprint(rand.Intn(1000)+1)), []byte(`100`), 0); err != nil { 50 | log.Fatal(err) 51 | } 52 | } 53 | 54 | go func() { 55 | for { 56 | if err := pMemory.Set([]byte(fmt.Sprint(rand.Intn(1000)+1)), []byte(`100`), 0); err != nil { 57 | b.Fatal(err) 58 | } 59 | } 60 | }() 61 | 62 | b.RunParallel(func(pb *testing.PB) { 63 | for pb.Next() { 64 | if _, err := pMemory.Get([]byte(fmt.Sprint(rand.Intn(1000) + 1))); err != nil { 65 | b.Fatal(err) 66 | } 67 | } 68 | }) 69 | } 70 | 71 | func BenchmarkPartitionMemory_Iterator(b *testing.B) { 72 | 73 | //var backend = NewPartitionMemoryBackend(1000) 74 | //var backend = NewPartitionMemoryBackend(100) 75 | 76 | for i := 1; i <= 99999; i++ { 77 | if err := pMemory.Set([]byte(fmt.Sprint(rand.Intn(9999)+1)), []byte(`100`), 0); err != nil { 78 | log.Fatal(err) 79 | } 80 | } 81 | 82 | //iterators := pMemory.Partitions() 83 | 84 | b.RunParallel(func(pb *testing.PB) { 85 | for pb.Next() { 86 | /*it := pMemory.Iterator() 87 | for it.Valid() { 88 | 89 | if it.Error() != nil { 90 | it.Next() 91 | continue 92 | } 93 | 94 | _, err := encoders.DriverLocationSyncEncoderBuilder().Decode(it.Value()) 95 | if err != nil { 96 | it.Next() 97 | continue 98 | } 99 | 100 | it.Next() 101 | }*/ 102 | iterators := pMemory.Partitions() 103 | wg := new(sync.WaitGroup) 104 | for _, i := range iterators { 105 | wg.Add(1) 106 | go func(it backend.Iterator, wg *sync.WaitGroup) { 107 | defer wg.Done() 108 | for it.Valid() { 109 | 110 | if it.Error() != nil { 111 | it.Next() 112 | continue 113 | } 114 | 115 | it.Next() 116 | continue 117 | 118 | it.Next() 119 | } 120 | }(i, wg) 121 | } 122 | 123 | wg.Wait() 124 | } 125 | }) 126 | 127 | } 128 | -------------------------------------------------------------------------------- /backend/memory/partitioner.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | //import "github.com/coreos/etcd/mvcc/backend" 4 | // 5 | //type Partitioner interface { 6 | // Partition(key interface{}) backend.Backend 7 | //} 8 | // 9 | // 10 | // 11 | //func (p *Partitioner) Partition() backend.Backend { 12 | // 13 | //} 14 | -------------------------------------------------------------------------------- /backend/mock_backend.go: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type MockBackend struct { 9 | name string 10 | data map[string][]byte 11 | mu *sync.Mutex 12 | expiry time.Duration 13 | } 14 | 15 | func NewMockBackend(name string, expiry time.Duration) Backend { 16 | b := &MockBackend{ 17 | name: name, 18 | data: make(map[string][]byte), 19 | mu: new(sync.Mutex), 20 | } 21 | 22 | if expiry > 0 { 23 | b.expiry = expiry 24 | } 25 | 26 | return b 27 | } 28 | 29 | func (b *MockBackend) Name() string { 30 | return b.name 31 | } 32 | 33 | func (b *MockBackend) Persistent() bool { 34 | return false 35 | } 36 | 37 | func (b *MockBackend) Set(key []byte, value []byte, expiry time.Duration) error { 38 | b.mu.Lock() 39 | defer b.mu.Unlock() 40 | 41 | var del = func() { 42 | time.Sleep(expiry) 43 | b.Delete(key) 44 | } 45 | 46 | if expiry > 0 { 47 | go del() 48 | } else { 49 | if b.expiry > 0 { 50 | go del() 51 | } 52 | } 53 | 54 | b.data[string(key)] = value 55 | return nil 56 | } 57 | 58 | func (b *MockBackend) Get(key []byte) ([]byte, error) { 59 | b.mu.Lock() 60 | defer b.mu.Unlock() 61 | v, ok := b.data[string(key)] 62 | if !ok { 63 | return nil, nil 64 | } 65 | 66 | return v, nil 67 | } 68 | 69 | func (b *MockBackend) RangeIterator(fromKy []byte, toKey []byte) Iterator { 70 | panic("implement me") 71 | } 72 | 73 | func (*MockBackend) Iterator() Iterator { 74 | panic("implement me") 75 | } 76 | 77 | func (b *MockBackend) Delete(key []byte) error { 78 | b.mu.Lock() 79 | defer b.mu.Unlock() 80 | delete(b.data, string(key)) 81 | return nil 82 | } 83 | 84 | func (b *MockBackend) SetExpiry(time time.Duration) { 85 | b.expiry = time 86 | } 87 | 88 | func (b *MockBackend) String() string { 89 | return b.name 90 | } 91 | 92 | func (*MockBackend) Close() error { 93 | panic("implement me") 94 | } 95 | 96 | func (*MockBackend) Destroy() error { 97 | panic("implement me") 98 | } 99 | -------------------------------------------------------------------------------- /consumer/builder.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "github.com/tryfix/log" 5 | "github.com/tryfix/metrics" 6 | ) 7 | 8 | type BuilderOption func(config *Config) 9 | 10 | func BuilderWithId(id string) BuilderOption { 11 | return func(config *Config) { 12 | config.Id = id 13 | } 14 | } 15 | 16 | func BuilderWithOptions(options ...Option) BuilderOption { 17 | return func(config *Config) { 18 | config.options.apply(options...) 19 | } 20 | } 21 | 22 | func BuilderWithGroupId(id string) BuilderOption { 23 | return func(config *Config) { 24 | config.GroupId = id 25 | } 26 | } 27 | 28 | func BuilderWithMetricsReporter(reporter metrics.Reporter) BuilderOption { 29 | return func(config *Config) { 30 | config.MetricsReporter = reporter 31 | } 32 | } 33 | 34 | func BuilderWithLogger(logger log.Logger) BuilderOption { 35 | return func(config *Config) { 36 | config.Logger = logger 37 | } 38 | } 39 | 40 | type Builder interface { 41 | Config() *Config 42 | Build(options ...BuilderOption) (Consumer, error) 43 | } 44 | 45 | type builder struct { 46 | config *Config 47 | } 48 | 49 | func NewBuilder() Builder { 50 | return &builder{ 51 | config: NewConsumerConfig(), 52 | } 53 | } 54 | 55 | func (b *builder) Config() *Config { 56 | return b.config 57 | } 58 | 59 | func (b *builder) Build(options ...BuilderOption) (Consumer, error) { 60 | conf := *b.config 61 | for _, option := range options { 62 | option(&conf) 63 | } 64 | return NewConsumer(&conf) 65 | } 66 | 67 | type PartitionConsumerBuilder interface { 68 | Config() *Config 69 | Build(options ...BuilderOption) (PartitionConsumer, error) 70 | } 71 | 72 | type partitionConsumerBuilder struct { 73 | config *Config 74 | } 75 | 76 | func NewPartitionConsumerBuilder() PartitionConsumerBuilder { 77 | return &partitionConsumerBuilder{ 78 | config: NewConsumerConfig(), 79 | } 80 | } 81 | 82 | func (b *partitionConsumerBuilder) Config() *Config { 83 | return &*b.config 84 | } 85 | 86 | func (b *partitionConsumerBuilder) Configure(c *Config) PartitionConsumerBuilder { 87 | return &partitionConsumerBuilder{ 88 | config: c, 89 | } 90 | } 91 | 92 | func (b *partitionConsumerBuilder) Build(options ...BuilderOption) (PartitionConsumer, error) { 93 | conf := *b.config 94 | for _, option := range options { 95 | option(&conf) 96 | } 97 | return NewPartitionConsumer(&conf) 98 | } 99 | -------------------------------------------------------------------------------- /consumer/config.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "github.com/Shopify/sarama" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/log" 7 | "github.com/tryfix/metrics" 8 | ) 9 | 10 | type Config struct { 11 | Id string 12 | GroupId string 13 | BootstrapServers []string 14 | MetricsReporter metrics.Reporter 15 | Logger log.Logger 16 | options *consumerOptions 17 | *sarama.Config 18 | } 19 | 20 | func (c *Config) validate() error { 21 | if err := c.Config.Validate(); err != nil { 22 | return err 23 | } 24 | 25 | if c.GroupId == `` { 26 | return errors.New(`k-stream.consumer.Config: Consumer.groupId cannot be empty`) 27 | } 28 | 29 | if len(c.BootstrapServers) < 1 { 30 | return errors.New(`k-stream.consumer.Config: Consumer.BootstrapServers cannot be empty`) 31 | } 32 | 33 | return nil 34 | } 35 | 36 | func NewConsumerConfig() *Config { 37 | c := new(Config) 38 | c.setDefaults() 39 | return c 40 | } 41 | 42 | func (c *Config) setDefaults() { 43 | c.Config = sarama.NewConfig() 44 | c.Config.Version = sarama.V2_3_0_0 45 | c.Consumer.Return.Errors = true 46 | c.ChannelBufferSize = 100 47 | c.MetricsReporter = metrics.NoopReporter() 48 | c.Logger = log.NewNoopLogger() 49 | c.options = new(consumerOptions) 50 | c.options.applyDefault() 51 | } 52 | -------------------------------------------------------------------------------- /consumer/events.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package consumer 9 | 10 | import "fmt" 11 | 12 | type Event interface { 13 | String() string 14 | } 15 | 16 | type PartitionAllocated struct { 17 | tps []TopicPartition 18 | } 19 | 20 | func (p *PartitionAllocated) String() string { 21 | return fmt.Sprintf(`%v`, p.tps) 22 | } 23 | 24 | func (p *PartitionAllocated) TopicPartitions() []TopicPartition { 25 | return p.tps 26 | } 27 | 28 | type PartitionRemoved struct { 29 | tps []TopicPartition 30 | } 31 | 32 | func (p *PartitionRemoved) String() string { 33 | return fmt.Sprintf(`%v`, p.tps) 34 | } 35 | 36 | func (p *PartitionRemoved) TopicPartitions() []TopicPartition { 37 | return p.tps 38 | } 39 | 40 | type PartitionEnd struct { 41 | tps []TopicPartition 42 | } 43 | 44 | func (p *PartitionEnd) String() string { 45 | return fmt.Sprintf(`%v`, p.tps) 46 | } 47 | 48 | func (p *PartitionEnd) TopicPartitions() []TopicPartition { 49 | return p.tps 50 | } 51 | 52 | type Error struct { 53 | err error 54 | } 55 | 56 | func (p *Error) String() string { 57 | return fmt.Sprint(`consumer error`, p.err) 58 | } 59 | 60 | func (p *Error) Error() string { 61 | return fmt.Sprint(`consumer error`, p.err) 62 | } 63 | -------------------------------------------------------------------------------- /consumer/group_handler.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/Shopify/sarama" 7 | "github.com/tryfix/kstream/data" 8 | "github.com/tryfix/log" 9 | "github.com/tryfix/metrics" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | type ReBalanceHandler interface { 15 | OnPartitionRevoked(ctx context.Context, revoked []TopicPartition) error 16 | OnPartitionAssigned(ctx context.Context, assigned []TopicPartition) error 17 | } 18 | 19 | type groupHandler struct { 20 | reBalanceHandler ReBalanceHandler 21 | partitionMap map[string]*partition 22 | partitions chan Partition 23 | logger log.Logger 24 | recordUuidExtractFunc RecordUuidExtractFunc 25 | mu *sync.Mutex 26 | metrics struct { 27 | reporter metrics.Reporter 28 | reBalancing metrics.Gauge 29 | commitLatency metrics.Observer 30 | reBalanceLatency metrics.Observer 31 | endToEndLatency metrics.Observer 32 | } 33 | } 34 | 35 | func (h *groupHandler) Setup(session sarama.ConsumerGroupSession) error { 36 | tps := h.extractTps(session.Claims()) 37 | h.logger.Info(fmt.Sprintf(`setting up partitions [%#v]`, tps)) 38 | if err := h.reBalanceHandler.OnPartitionAssigned(session.Context(), tps); err != nil { 39 | return err 40 | } 41 | 42 | h.mu.Lock() 43 | defer h.mu.Unlock() 44 | 45 | for _, tp := range tps { 46 | p := newPartition(tp) 47 | h.partitionMap[tp.String()] = p 48 | h.partitions <- p 49 | } 50 | 51 | return nil 52 | } 53 | 54 | func (h *groupHandler) Cleanup(session sarama.ConsumerGroupSession) error { 55 | tps := h.extractTps(session.Claims()) 56 | h.logger.Info(fmt.Sprintf(`cleaning up partitions [%#v]`, tps)) 57 | 58 | h.mu.Lock() 59 | for _, tp := range tps { 60 | h.partitionMap[tp.String()].close() 61 | delete(h.partitionMap, tp.String()) 62 | } 63 | h.mu.Unlock() 64 | 65 | return h.reBalanceHandler.OnPartitionRevoked(session.Context(), tps) 66 | } 67 | 68 | func (h *groupHandler) ConsumeClaim(g sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error { 69 | tp := TopicPartition{ 70 | Topic: c.Topic(), 71 | Partition: c.Partition(), 72 | } 73 | 74 | h.mu.Lock() 75 | h.partitionMap[tp.String()].groupSession = g 76 | ch := h.partitionMap[tp.String()].records 77 | h.mu.Unlock() 78 | 79 | for msg := range c.Messages() { 80 | t := time.Since(msg.Timestamp) 81 | h.metrics.endToEndLatency.Observe(float64(t.Nanoseconds()/1e3), map[string]string{ 82 | `topic`: msg.Topic, 83 | `partition`: fmt.Sprint(msg.Partition), 84 | }) 85 | 86 | record := &data.Record{ 87 | Key: msg.Key, 88 | Value: msg.Value, 89 | Offset: msg.Offset, 90 | Topic: msg.Topic, 91 | Partition: msg.Partition, 92 | Timestamp: msg.Timestamp, 93 | Headers: data.RecordHeaders(msg.Headers), 94 | } 95 | 96 | uuid := h.recordUuidExtractFunc(record) 97 | record.UUID = uuid 98 | 99 | h.logger.Trace("record received after " + 100 | t.String() + 101 | " for " + tp.String() + 102 | " with key: " + string(msg.Key) + 103 | " and value: " + string(msg.Value) + 104 | " with record-id [" + record.UUID.String() + "]") 105 | 106 | ch <- record 107 | } 108 | 109 | return nil 110 | } 111 | 112 | func (h *groupHandler) extractTps(kafkaTps map[string][]int32) []TopicPartition { 113 | tps := make([]TopicPartition, 0) 114 | for topic, partitions := range kafkaTps { 115 | for _, p := range partitions { 116 | tps = append(tps, TopicPartition{ 117 | Topic: topic, 118 | Partition: p, 119 | }) 120 | } 121 | } 122 | return tps 123 | } 124 | -------------------------------------------------------------------------------- /consumer/mock_consumer.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "context" 5 | "github.com/google/uuid" 6 | "github.com/tryfix/kstream/admin" 7 | "github.com/tryfix/kstream/data" 8 | "github.com/tryfix/kstream/kstream/offsets" 9 | "log" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | type MockConsumerBuilder struct { 15 | Builder 16 | topics *admin.Topics 17 | } 18 | 19 | func NewMockConsumerBuilder(topics *admin.Topics) Builder { 20 | return &MockConsumerBuilder{ 21 | Builder: NewBuilder(), 22 | topics: topics, 23 | } 24 | } 25 | 26 | func (mb *MockConsumerBuilder) Build(options ...BuilderOption) (Consumer, error) { 27 | return NewMockConsumer(mb.topics), nil 28 | } 29 | 30 | type MockPartitionConsumerBuilder struct { 31 | PartitionConsumerBuilder 32 | offsets offsets.Manager 33 | topics *admin.Topics 34 | } 35 | 36 | func NewMockPartitionConsumerBuilder(topics *admin.Topics, offsets offsets.Manager) PartitionConsumerBuilder { 37 | return &MockPartitionConsumerBuilder{ 38 | PartitionConsumerBuilder: NewPartitionConsumerBuilder(), 39 | topics: topics, 40 | offsets: offsets, 41 | } 42 | } 43 | 44 | func (mb *MockPartitionConsumerBuilder) Build(options ...BuilderOption) (PartitionConsumer, error) { 45 | return NewMockPartitionConsumer(mb.topics, mb.offsets), nil 46 | } 47 | 48 | type mockConsumer struct { 49 | topics *admin.Topics 50 | wg *sync.WaitGroup 51 | fetchInterval time.Duration 52 | fetchBatchSize int 53 | partitions chan Partition 54 | closing chan bool 55 | InitialOffset Offset 56 | } 57 | 58 | func NewMockConsumer(topics *admin.Topics) *mockConsumer { 59 | return &mockConsumer{ 60 | topics: topics, 61 | fetchInterval: 100 * time.Microsecond, 62 | fetchBatchSize: 50, 63 | wg: new(sync.WaitGroup), 64 | InitialOffset: Earliest, 65 | closing: make(chan bool, 1), 66 | } 67 | } 68 | 69 | func (m *mockConsumer) Consume(topics []string, handler ReBalanceHandler) (chan Partition, error) { 70 | tps := make(map[string]*mockConsumerPartition) 71 | var assigned []TopicPartition 72 | 73 | for _, topic := range topics { 74 | tp, err := m.topics.Topic(topic) 75 | if err != nil { 76 | return nil, err 77 | } 78 | for p := range tp.Partitions() { 79 | tp := TopicPartition{ 80 | Topic: topic, 81 | Partition: int32(p), 82 | } 83 | assigned = append(assigned, tp) 84 | } 85 | } 86 | if err := handler.OnPartitionAssigned(context.Background(), assigned); err != nil { 87 | return nil, err 88 | } 89 | m.partitions = make(chan Partition, len(assigned)) 90 | for _, tp := range assigned { 91 | consumerPartition := &mockConsumerPartition{ 92 | tp: tp, 93 | records: make(chan *data.Record, 10000), 94 | } 95 | tps[tp.String()] = consumerPartition 96 | m.partitions <- consumerPartition 97 | m.wg.Add(1) 98 | go m.consume(consumerPartition) 99 | } 100 | 101 | return m.partitions, nil 102 | } 103 | 104 | func (m *mockConsumer) Errors() <-chan *Error { 105 | return make(<-chan *Error, 1) 106 | } 107 | 108 | func (m *mockConsumer) Close() error { 109 | m.closing <- true 110 | m.wg.Wait() 111 | close(m.partitions) 112 | return nil 113 | } 114 | 115 | func (m *mockConsumer) consume(partition *mockConsumerPartition) { 116 | pt := m.topics.Topics()[partition.tp.Topic].Partitions()[partition.tp.Partition] 117 | 118 | offset := int64(m.InitialOffset) 119 | LOOP: 120 | for { 121 | select { 122 | case <-m.closing: 123 | break LOOP 124 | default: 125 | } 126 | 127 | time.Sleep(m.fetchInterval) 128 | 129 | records, err := pt.Fetch(offset, m.fetchBatchSize) 130 | if err != nil { 131 | log.Fatal(err) 132 | } 133 | 134 | if len(records) < 1 { 135 | continue 136 | } 137 | 138 | for _, msg := range records { 139 | partition.records <- &data.Record{ 140 | Key: msg.Key, 141 | Value: msg.Value, 142 | Offset: msg.Offset, 143 | Topic: msg.Topic, 144 | Partition: msg.Partition, 145 | Timestamp: msg.Timestamp, 146 | UUID: uuid.New(), 147 | Headers: msg.Headers, 148 | } 149 | } 150 | 151 | offset = records[len(records)-1].Offset + 1 152 | 153 | } 154 | close(partition.records) 155 | m.wg.Done() 156 | } 157 | -------------------------------------------------------------------------------- /consumer/mock_consumer_partition.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "github.com/tryfix/kstream/data" 5 | ) 6 | 7 | type mockConsumerPartition struct { 8 | tp TopicPartition 9 | records chan *data.Record 10 | } 11 | 12 | func (m *mockConsumerPartition) Wait() chan<- bool { 13 | return nil 14 | } 15 | 16 | func (m *mockConsumerPartition) Records() <-chan *data.Record { 17 | return m.records 18 | } 19 | 20 | func (m *mockConsumerPartition) Partition() TopicPartition { 21 | return m.tp 22 | } 23 | 24 | func (m *mockConsumerPartition) MarkOffset(offset int64) {} 25 | 26 | func (m *mockConsumerPartition) CommitOffset(*data.Record) error { 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /consumer/mock_partition_consumer.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | "github.com/tryfix/kstream/admin" 6 | "github.com/tryfix/kstream/data" 7 | "github.com/tryfix/kstream/kstream/offsets" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type mockPartitionConsumer struct { 13 | topics *admin.Topics 14 | offsets offsets.Manager 15 | fetchInterval time.Duration 16 | closing chan bool 17 | closed chan bool 18 | fetchBatchSize int 19 | events chan Event 20 | } 21 | 22 | func NewMockPartitionConsumer(topics *admin.Topics, offsets offsets.Manager) *mockPartitionConsumer { 23 | return &mockPartitionConsumer{ 24 | topics: topics, 25 | fetchInterval: 100 * time.Microsecond, 26 | //fetchInterval: 1 * time.Second, 27 | fetchBatchSize: 1000, 28 | closed: make(chan bool, 1), 29 | closing: make(chan bool, 1), 30 | offsets: offsets, 31 | events: make(chan Event, 100), 32 | } 33 | } 34 | 35 | func (m *mockPartitionConsumer) Consume(topic string, partition int32, offset Offset) (<-chan Event, error) { 36 | go m.consume(topic, partition, offset) 37 | return m.events, nil 38 | } 39 | 40 | func (m *mockPartitionConsumer) consume(topic string, partition int32, offset Offset) { 41 | pt := m.topics.Topics()[topic].Partitions()[int(partition)] 42 | 43 | var currentOffset = int64(offset) 44 | 45 | if offset == -1 { 46 | currentOffset = pt.Latest() + 1 47 | } 48 | 49 | LOOP: 50 | for { 51 | select { 52 | case <-m.closing: 53 | break LOOP 54 | default: 55 | 56 | } 57 | 58 | time.Sleep(m.fetchInterval) 59 | 60 | records, err := pt.Fetch(currentOffset, m.fetchBatchSize) 61 | if err != nil { 62 | log.Fatal(err) 63 | } 64 | 65 | if len(records) < 1 { 66 | m.events <- &PartitionEnd{} 67 | continue 68 | } 69 | 70 | partitionEnd, err := m.offsets.GetOffsetLatest(topic, partition) 71 | if err != nil { 72 | log.Fatal(err) 73 | } 74 | 75 | for _, msg := range records { 76 | m.events <- &data.Record{ 77 | Key: msg.Key, 78 | Value: msg.Value, 79 | Offset: msg.Offset, 80 | Topic: msg.Topic, 81 | Partition: msg.Partition, 82 | Timestamp: msg.Timestamp, 83 | UUID: uuid.New(), 84 | Headers: msg.Headers, 85 | } 86 | 87 | //if highWatermark == 0 || highWatermark-1 == msg.Offset { 88 | if msg.Offset == partitionEnd { 89 | m.events <- &PartitionEnd{} 90 | } 91 | } 92 | 93 | currentOffset = records[len(records)-1].Offset + 1 94 | } 95 | 96 | m.closed <- true 97 | } 98 | 99 | func (m *mockPartitionConsumer) Errors() <-chan *Error { 100 | return make(chan *Error) 101 | } 102 | 103 | func (m *mockPartitionConsumer) Close() error { 104 | m.closing <- true 105 | <-m.closed 106 | close(m.events) 107 | return nil 108 | } 109 | 110 | func (m *mockPartitionConsumer) Id() string { 111 | panic("implement me") 112 | } 113 | -------------------------------------------------------------------------------- /consumer/mock_partition_consumer_test.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/kstream/admin" 6 | "github.com/tryfix/kstream/data" 7 | "github.com/tryfix/kstream/kstream/offsets" 8 | "testing" 9 | ) 10 | 11 | func TestMockPartitionConsumer_Consume(t *testing.T) { 12 | mocksTopics := admin.NewMockTopics() 13 | kafkaAdmin := &admin.MockKafkaAdmin{ 14 | Topics: mocksTopics, 15 | } 16 | if err := kafkaAdmin.CreateTopics(map[string]*admin.Topic{ 17 | `tp1`: { 18 | Name: "tp1", 19 | NumPartitions: 1, 20 | ReplicationFactor: 1, 21 | }, 22 | }); err != nil { 23 | t.Error(err) 24 | } 25 | tp, _ := mocksTopics.Topic(`tp1`) 26 | pt, _ := tp.Partition(0) 27 | 28 | t.Run(`ZeroMessage`, func(t *testing.T) { 29 | con := NewMockPartitionConsumer(mocksTopics, &offsets.MockManager{Topics: mocksTopics}) 30 | ch, err := con.Consume(`tp1`, 0, Earliest) 31 | if err != nil { 32 | t.Error(err) 33 | } 34 | 35 | var count int 36 | 37 | L: 38 | for msg := range ch { 39 | if _, ok := msg.(*PartitionEnd); ok { 40 | break L 41 | } 42 | count++ 43 | } 44 | 45 | if count != 0 { 46 | t.Error(`expected 0 have `, count) 47 | t.Fail() 48 | } 49 | }) 50 | 51 | for i := 1; i <= 3333; i++ { 52 | err := pt.Append(&data.Record{ 53 | Key: []byte(fmt.Sprint(i)), 54 | Value: []byte(`v`), 55 | Topic: "tp1", 56 | }) 57 | if err != nil { 58 | t.Error(err) 59 | } 60 | } 61 | 62 | t.Run(`Earliest`, func(t *testing.T) { 63 | con := NewMockPartitionConsumer(mocksTopics, &offsets.MockManager{Topics: mocksTopics}) 64 | ch, err := con.Consume(`tp1`, 0, Earliest) 65 | if err != nil { 66 | t.Error(err) 67 | } 68 | 69 | var count int 70 | 71 | L: 72 | for msg := range ch { 73 | if _, ok := msg.(*PartitionEnd); ok { 74 | break L 75 | } 76 | count++ 77 | } 78 | 79 | if count != 3333 { 80 | t.Error(`expected 3333 have `, count) 81 | t.Fail() 82 | } 83 | }) 84 | 85 | t.Run(`Latest`, func(t *testing.T) { 86 | con := NewMockPartitionConsumer(mocksTopics, &offsets.MockManager{Topics: mocksTopics}) 87 | ch, err := con.Consume(`tp1`, 0, Latest) 88 | if err != nil { 89 | t.Error(err) 90 | } 91 | 92 | var count int 93 | 94 | L: 95 | for msg := range ch { 96 | if _, ok := msg.(*PartitionEnd); ok { 97 | break L 98 | } 99 | count++ 100 | } 101 | 102 | if count != 0 { 103 | t.Error(`expected 0 have `, count) 104 | t.Fail() 105 | } 106 | }) 107 | 108 | } 109 | -------------------------------------------------------------------------------- /consumer/partition.go: -------------------------------------------------------------------------------- 1 | package consumer 2 | 3 | import ( 4 | "github.com/Shopify/sarama" 5 | "github.com/tryfix/kstream/data" 6 | ) 7 | 8 | type Partition interface { 9 | Records() <-chan *data.Record 10 | Partition() TopicPartition 11 | MarkOffset(offset int64) 12 | CommitOffset(*data.Record) error 13 | } 14 | 15 | type partition struct { 16 | records chan *data.Record 17 | groupSession sarama.ConsumerGroupSession 18 | partition TopicPartition 19 | } 20 | 21 | func newPartition(tp TopicPartition) *partition { 22 | return &partition{ 23 | records: make(chan *data.Record, 1), 24 | partition: tp, 25 | } 26 | } 27 | 28 | func (p *partition) Records() <-chan *data.Record { 29 | return p.records 30 | } 31 | 32 | func (p *partition) Partition() TopicPartition { 33 | return p.partition 34 | } 35 | 36 | func (p *partition) MarkOffset(offset int64) { 37 | p.groupSession.MarkOffset(p.partition.Topic, p.partition.Partition, offset+1, ``) 38 | } 39 | 40 | func (p *partition) CommitOffset(r *data.Record) error { 41 | p.groupSession.MarkOffset(r.Topic, r.Partition, r.Offset+1, ``) 42 | return nil 43 | } 44 | 45 | func (p *partition) close() { 46 | close(p.records) 47 | } 48 | -------------------------------------------------------------------------------- /data/record.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/Shopify/sarama" 7 | "github.com/google/uuid" 8 | "time" 9 | ) 10 | 11 | //type RecordHeaders interface { 12 | // Read(name []byte) []byte 13 | // All() []*sarama.RecordHeader 14 | //} 15 | 16 | type RecordHeaders []*sarama.RecordHeader 17 | 18 | func (h RecordHeaders) Read(name []byte) []byte { 19 | for _, header := range h { 20 | if bytes.Equal(header.Key, name) { 21 | return header.Value 22 | } 23 | } 24 | 25 | return nil 26 | } 27 | 28 | func (h RecordHeaders) All() []*sarama.RecordHeader { 29 | return h 30 | } 31 | 32 | type Record struct { 33 | Key, Value []byte 34 | Topic string 35 | Partition int32 36 | Offset int64 37 | Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp 38 | BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp 39 | Headers RecordHeaders // only set if kafka is version 0.11+ 40 | UUID uuid.UUID 41 | } 42 | 43 | func (r *Record) String() string { 44 | return fmt.Sprintf(`%s_%d_%d`, r.Topic, r.Partition, r.Offset) 45 | } 46 | 47 | func (r *Record) RecordKey() interface{} { 48 | return r.Key 49 | } 50 | 51 | func (r *Record) RecordValue() interface{} { 52 | return r.Value 53 | } 54 | -------------------------------------------------------------------------------- /data/record_test.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestRecord_RecordKey(t *testing.T) { 10 | rec := Record{ 11 | Key: []byte(`k`), 12 | Value: []byte(`v`), 13 | } 14 | if !reflect.DeepEqual(rec.RecordKey(), []byte(`k`)) { 15 | t.Fail() 16 | } 17 | } 18 | 19 | func TestRecord_RecordValue(t *testing.T) { 20 | rec := Record{ 21 | Key: []byte(`k`), 22 | Value: []byte(`v`), 23 | } 24 | if !reflect.DeepEqual(rec.RecordValue(), []byte(`v`)) { 25 | t.Fail() 26 | } 27 | } 28 | 29 | func TestRecord_String(t *testing.T) { 30 | r := Record{ 31 | Key: []byte(`k`), 32 | Value: []byte(`v`), 33 | Offset: 1000, 34 | Topic: `test`, 35 | Partition: 1, 36 | } 37 | if r.String() != fmt.Sprintf(`%s_%d_%d`, r.Topic, r.Partition, r.Offset) { 38 | t.Fail() 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /examples/example_1/encoders/encoders.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_1/events" 5 | "github.com/tryfix/kstream/kstream/encoding" 6 | ) 7 | 8 | var KeyEncoder = func() encoding.Encoder { return Int64Encoder{} } 9 | 10 | var UuidKeyEncoder = func() encoding.Encoder { return UuidEncoder{} } 11 | 12 | var TransactionReceivedEncoder = func() encoding.Encoder { return TransactionEncoder{} } 13 | 14 | var AccountCreditedEncoder = func() encoding.Encoder { return events.AccountCredited{} } 15 | 16 | var AccountDebitedEncoder = func() encoding.Encoder { return events.AccountDebited{} } 17 | 18 | var AccountDetailsUpdatedEncoder = func() encoding.Encoder { return events.AccountDetailsUpdated{} } 19 | 20 | var CustomerProfileUpdatedEncoder = func() encoding.Encoder { return events.CustomerProfileUpdated{} } 21 | 22 | var MessageEncoder = func() encoding.Encoder { return events.MessageCreated{} } 23 | -------------------------------------------------------------------------------- /examples/example_1/encoders/int64_encoder.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | "github.com/tryfix/errors" 6 | "reflect" 7 | "strconv" 8 | ) 9 | 10 | type Int64Encoder struct{} 11 | 12 | func (Int64Encoder) Encode(v interface{}) ([]byte, error) { 13 | 14 | i, ok := v.(int64) 15 | if !ok { 16 | j, k := v.(int) 17 | if !k { 18 | return nil, errors.Errorf(`invalid type [%v] expected int64`, reflect.TypeOf(v)) 19 | } 20 | i = int64(j) 21 | } 22 | 23 | return []byte(strconv.FormatInt(i, 10)), nil 24 | } 25 | 26 | func (Int64Encoder) Decode(data []byte) (interface{}, error) { 27 | i, err := strconv.ParseInt(string(data), 10, 64) 28 | if err != nil { 29 | return nil, errors.WithPrevious(err, `cannot decode data`) 30 | } 31 | 32 | return i, nil 33 | } 34 | 35 | type UuidEncoder struct{} 36 | 37 | func (UuidEncoder) Encode(v interface{}) ([]byte, error) { 38 | i, ok := v.(uuid.UUID) 39 | if !ok { 40 | return nil, errors.Errorf(`invalid type [%v] expected int64`, reflect.TypeOf(v)) 41 | } 42 | 43 | return i.MarshalText() 44 | } 45 | 46 | func (UuidEncoder) Decode(data []byte) (interface{}, error) { 47 | uid := uuid.UUID{} 48 | err := uid.UnmarshalText(data) 49 | if err != nil { 50 | return nil, errors.WithPrevious(err, `cannot decode data`) 51 | } 52 | 53 | return uid, nil 54 | } 55 | -------------------------------------------------------------------------------- /examples/example_1/encoders/transaction_encoder.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/tryfix/errors" 7 | "github.com/tryfix/kstream/examples/example_1/events" 8 | ) 9 | 10 | type TransactionEncoder struct { 11 | ID string `json:"id"` 12 | Type string `json:"type"` 13 | Body interface{} `json:"body"` 14 | Timestamp int64 `json:"timestamp"` 15 | } 16 | 17 | func (t TransactionEncoder) Encode(data interface{}) ([]byte, error) { 18 | panic("implement me") 19 | } 20 | 21 | func (t TransactionEncoder) Decode(data []byte) (interface{}, error) { 22 | te := TransactionEncoder{} 23 | err := json.Unmarshal(data, &te) 24 | if err != nil { 25 | return nil, err 26 | } 27 | switch te.Type { 28 | case `account_credited`: 29 | ac := events.AccountCredited{} 30 | err := json.Unmarshal(data, &ac) 31 | if err != nil { 32 | return nil, err 33 | } 34 | return ac, nil 35 | 36 | case `account_debited`: 37 | ad := events.AccountDebited{} 38 | err := json.Unmarshal(data, &ad) 39 | if err != nil { 40 | return nil, err 41 | } 42 | return ad, nil 43 | 44 | default: 45 | return nil, errors.New(fmt.Sprintf(`unexpected type received :- %v`, te.Type)) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /examples/example_1/events/account_credited.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type AccountCredited struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | Body struct { 9 | AccountNo int64 `json:"account_no"` 10 | TransactionId int64 `json:"transaction_id"` 11 | Amount float64 `json:"amount"` 12 | Reason string `json:"reason"` 13 | DebitedFrom int64 `json:"debited_from"` 14 | CreditedAt int64 `json:"credited_at"` 15 | Location string `json:"location"` 16 | } `json:"body"` 17 | Timestamp int64 `json:"timestamp"` 18 | } 19 | 20 | func (a AccountCredited) Encode(data interface{}) ([]byte, error) { 21 | b, err := json.Marshal(data) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | return b, nil 27 | } 28 | 29 | func (a AccountCredited) Decode(data []byte) (interface{}, error) { 30 | ac := AccountCredited{} 31 | err := json.Unmarshal(data, &ac) 32 | if err != nil { 33 | return nil, err 34 | } 35 | return ac, nil 36 | } 37 | -------------------------------------------------------------------------------- /examples/example_1/events/account_debited.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type AccountDebited struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | Body struct { 9 | AccountNo int64 `json:"account_no"` 10 | Amount float64 `json:"amount"` 11 | TransactionId int64 `json:"transaction_id"` 12 | Reason string `json:"reason"` 13 | CreditedTo int64 `json:"credited_to"` 14 | DebitedAt int64 `json:"debited_at"` 15 | Location string `json:"location"` 16 | } `json:"body"` 17 | Timestamp int64 `json:"timestamp"` 18 | } 19 | 20 | func (ad AccountDebited) Encode(data interface{}) ([]byte, error) { 21 | b, err := json.Marshal(data) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | return b, nil 27 | } 28 | 29 | func (ad AccountDebited) Decode(data []byte) (interface{}, error) { 30 | debited := AccountDebited{} 31 | err := json.Unmarshal(data, &debited) 32 | if err != nil { 33 | return nil, err 34 | } 35 | return debited, nil 36 | } 37 | -------------------------------------------------------------------------------- /examples/example_1/events/account_details_updated.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/google/uuid" 6 | ) 7 | 8 | type AccountDetailsUpdated struct { 9 | ID string `json:"id"` 10 | Type string `json:"type"` 11 | Body struct { 12 | AccountNo int64 `json:"account_no"` 13 | AccountType string `json:"account_type"` 14 | CustomerID uuid.UUID `json:"customer_id"` 15 | Branch string `json:"branch"` 16 | BranchCode int `json:"branch_code"` 17 | UpdatedAt int64 `json:"updated_at"` 18 | } `json:"body"` 19 | Timestamp int64 `json:"timestamp"` 20 | } 21 | 22 | func (a AccountDetailsUpdated) Encode(data interface{}) ([]byte, error) { 23 | b, err := json.Marshal(data) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return b, nil 29 | } 30 | 31 | func (a AccountDetailsUpdated) Decode(data []byte) (interface{}, error) { 32 | ad := AccountDetailsUpdated{} 33 | err := json.Unmarshal(data, &ad) 34 | if err != nil { 35 | return nil, err 36 | } 37 | return ad, nil 38 | } 39 | -------------------------------------------------------------------------------- /examples/example_1/events/customer_profile.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/google/uuid" 6 | ) 7 | 8 | type CustomerProfileUpdated struct { 9 | ID uuid.UUID `json:"id"` 10 | Type string `json:"type"` 11 | Body struct { 12 | CustomerID uuid.UUID `json:"customer_id"` 13 | CustomerName string `json:"customer_name"` 14 | NIC string `json:"nic"` 15 | ContactDetails struct { 16 | Phone string `json:"phone"` 17 | Email string `json:"email"` 18 | Address string `json:"address"` 19 | } `json:"contact_details"` 20 | DateOfBirth string `json:"date_of_birth"` 21 | UpdatedAt int64 `json:"updated_at"` 22 | } `json:"body"` 23 | Timestamp int64 `json:"timestamp"` 24 | } 25 | 26 | func (c CustomerProfileUpdated) Encode(data interface{}) ([]byte, error) { 27 | b, err := json.Marshal(data) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | return b, nil 33 | } 34 | 35 | func (c CustomerProfileUpdated) Decode(data []byte) (interface{}, error) { 36 | cp := CustomerProfileUpdated{} 37 | err := json.Unmarshal(data, &cp) 38 | if err != nil { 39 | return nil, err 40 | } 41 | return cp, nil 42 | } 43 | -------------------------------------------------------------------------------- /examples/example_1/events/message.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/google/uuid" 6 | ) 7 | 8 | type MessageCreated struct { 9 | ID string `json:"id"` 10 | Type string `json:"type"` 11 | Body struct { 12 | CustomerID uuid.UUID `json:"customer_id"` 13 | Text string `json:"text"` 14 | Phone string `json:"phone"` 15 | Email string `json:"email"` 16 | Address string `json:"address"` 17 | } `json:"body"` 18 | Timestamp int64 `json:"timestamp"` 19 | } 20 | 21 | func (m MessageCreated) Encode(data interface{}) ([]byte, error) { 22 | b, err := json.Marshal(data) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | return b, nil 28 | } 29 | 30 | func (m MessageCreated) Decode(data []byte) (interface{}, error) { 31 | mc := MessageCreated{} 32 | err := json.Unmarshal(data, &mc) 33 | if err != nil { 34 | return nil, err 35 | } 36 | return mc, nil 37 | } 38 | -------------------------------------------------------------------------------- /examples/example_1/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/tryfix/kstream/examples/example_1/stream" 4 | 5 | func main() { 6 | stream.Init() 7 | } 8 | -------------------------------------------------------------------------------- /examples/example_1/stream/account_credited.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/google/uuid" 7 | "github.com/tryfix/kstream/examples/example_1/events" 8 | kstream "github.com/tryfix/kstream/kstream" 9 | "github.com/tryfix/kstream/kstream/branch" 10 | "github.com/tryfix/kstream/kstream/encoding" 11 | "time" 12 | ) 13 | 14 | type AccountCredited struct { 15 | Upstream kstream.Stream 16 | AccountDetailTable kstream.GlobalTable 17 | CustomerProfileTable kstream.GlobalTable 18 | KeyEncoder func() encoding.Encoder 19 | MessageEncoder func() encoding.Encoder 20 | } 21 | 22 | func (ac AccountCredited) Init() { 23 | accountCreditedBranches := ac.Upstream.Branch([]branch.Details{ 24 | { 25 | Name: `account_credited`, 26 | Predicate: func(ctx context.Context, key interface{}, val interface{}) (b bool, e error) { 27 | _, ok := val.(events.AccountCredited) 28 | return ok, nil 29 | }, 30 | }}) 31 | 32 | accountCreditedBranch := accountCreditedBranches[0] 33 | 34 | filteredAccountCredited := accountCreditedBranch.Filter(ac.filterFromTimestamp) 35 | 36 | joinedCreditedAccountDetails := filteredAccountCredited.JoinGlobalTable(ac.AccountDetailTable, ac.accountCreditedAccountDetailsKeyMapping, ac.accountCreditedAccountDetailsMapping, 1) //1 for inner join 37 | 38 | joinedCreditedCustomerProfile := joinedCreditedAccountDetails.JoinGlobalTable(ac.CustomerProfileTable, ac.accountCreditedMessageCustomerProfileKeyMapping, ac.accountMessageCustomerProfileDetailsMapping, 1) 39 | 40 | joinedCreditedCustomerProfile.To(`message`, ac.KeyEncoder, ac.MessageEncoder) 41 | } 42 | 43 | func (ac AccountCredited) filterFromTimestamp(ctx context.Context, key, value interface{}) (b bool, e error) { 44 | 45 | accCredited, _ := value.(events.AccountCredited) 46 | if time.Now().UnixNano()/1e6-accCredited.Timestamp > 300000 { 47 | return false, nil 48 | } 49 | 50 | return true, nil 51 | } 52 | 53 | func (ac AccountCredited) accountCreditedAccountDetailsKeyMapping(_, value interface{}) (interface{}, error) { 54 | 55 | accCredited, _ := value.(events.AccountCredited) 56 | 57 | return accCredited.Body.AccountNo, nil 58 | } 59 | 60 | func (ac AccountCredited) accountCreditedAccountDetailsMapping(left interface{}, right interface{}) (joined interface{}, err error) { 61 | 62 | l, _ := left.(events.AccountCredited) 63 | r, _ := right.(events.AccountDetailsUpdated) 64 | 65 | dateTime := time.Unix(l.Body.CreditedAt, 0).Format(time.RFC1123) 66 | text := fmt.Sprintf(`Your a/c %d is credited with %v USD on %v at %v`, l.Body.AccountNo, l.Body.Amount, dateTime, l.Body.Location) 67 | 68 | message := events.MessageCreated{ 69 | ID: uuid.New().String(), 70 | Type: "message_created", 71 | Timestamp: time.Now().UnixNano() / 1e6, 72 | } 73 | 74 | message.Body.CustomerID = r.Body.CustomerID 75 | message.Body.Text = text 76 | 77 | return message, nil 78 | } 79 | 80 | func (ac AccountCredited) accountCreditedMessageCustomerProfileKeyMapping(key interface{}, value interface{}) (mappedKey interface{}, err error) { 81 | 82 | message, _ := value.(events.MessageCreated) 83 | 84 | return message.Body.CustomerID, nil 85 | } 86 | 87 | func (ac AccountCredited) accountMessageCustomerProfileDetailsMapping(left interface{}, right interface{}) (joined interface{}, err error) { 88 | 89 | l, _ := left.(events.MessageCreated) 90 | r, _ := right.(events.CustomerProfileUpdated) 91 | 92 | l.Body.Address = r.Body.ContactDetails.Address 93 | l.Body.Phone = r.Body.ContactDetails.Phone 94 | l.Body.Email = r.Body.ContactDetails.Email 95 | 96 | return l, nil 97 | } 98 | -------------------------------------------------------------------------------- /examples/example_1/stream/account_debited.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/google/uuid" 7 | "github.com/tryfix/kstream/examples/example_1/events" 8 | "github.com/tryfix/kstream/kstream" 9 | "github.com/tryfix/kstream/kstream/branch" 10 | "github.com/tryfix/kstream/kstream/encoding" 11 | "time" 12 | ) 13 | 14 | type AccountDebited struct { 15 | Upstream kstream.Stream 16 | AccountDetailTable kstream.GlobalTable 17 | CustomerProfileTable kstream.GlobalTable 18 | KeyEncoder func() encoding.Encoder 19 | MessageEncoder func() encoding.Encoder 20 | } 21 | 22 | func (ad AccountDebited) Init() { 23 | accountDebitedBranches := ad.Upstream.Branch([]branch.Details{{Name: `account_debited`, Predicate: func(ctx context.Context, key interface{}, val interface{}) (b bool, e error) { 24 | _, ok := val.(events.AccountDebited) 25 | return ok, nil 26 | }}}) 27 | 28 | accountDebitedBranch := accountDebitedBranches[0] 29 | 30 | filteredAccountDebited := accountDebitedBranch.Filter(ad.filterFromTimestamp) 31 | 32 | joinedDebitedAccountDetails := filteredAccountDebited.JoinGlobalTable(ad.AccountDetailTable, ad.accountDebitedAccountDetailsKeyMapping, ad.accountDebitedAccountDetailsMapping, 1) //1 for inner join 33 | 34 | joinedDebitedCustomerProfile := joinedDebitedAccountDetails.JoinGlobalTable(ad.CustomerProfileTable, ad.accountDebitedMessageCustomerProfileKeyMapping, ad.accountMessageCustomerProfileDetailsMapping, 1) 35 | 36 | joinedDebitedCustomerProfile.To(`message`, ad.KeyEncoder, ad.MessageEncoder) 37 | } 38 | 39 | func (ad AccountDebited) filterFromTimestamp(ctx context.Context, key, value interface{}) (b bool, e error) { 40 | 41 | accDebited, _ := value.(events.AccountDebited) 42 | if time.Now().UnixNano()/1e6-accDebited.Timestamp > 300000 { 43 | return false, nil 44 | } 45 | 46 | return true, nil 47 | } 48 | 49 | func (ad AccountDebited) accountDebitedAccountDetailsKeyMapping(key interface{}, value interface{}) (mappedKey interface{}, err error) { 50 | 51 | accDebited, _ := value.(events.AccountDebited) 52 | 53 | return accDebited.Body.AccountNo, nil 54 | } 55 | 56 | func (ad AccountDebited) accountDebitedAccountDetailsMapping(left interface{}, right interface{}) (joined interface{}, err error) { 57 | 58 | l, _ := left.(events.AccountDebited) 59 | r, _ := right.(events.AccountDetailsUpdated) 60 | 61 | dateTime := time.Unix(l.Body.DebitedAt, 0).Format(time.RFC1123) 62 | text := fmt.Sprintf(`Your a/c %d is debited with %v USD on %v at %v`, l.Body.AccountNo, l.Body.Amount, dateTime, l.Body.Location) 63 | 64 | message := events.MessageCreated{ 65 | ID: uuid.New().String(), 66 | Type: "message_created", 67 | Timestamp: time.Now().UnixNano() / 1e6, 68 | } 69 | 70 | message.Body.CustomerID = r.Body.CustomerID 71 | message.Body.Text = text 72 | 73 | return message, nil 74 | } 75 | 76 | func (ad AccountDebited) accountDebitedMessageCustomerProfileKeyMapping(key interface{}, value interface{}) (mappedKey interface{}, err error) { 77 | 78 | message, _ := value.(events.MessageCreated) 79 | 80 | return message.Body.CustomerID, nil 81 | } 82 | 83 | func (ad AccountDebited) accountMessageCustomerProfileDetailsMapping(left interface{}, right interface{}) (joined interface{}, err error) { 84 | 85 | l, _ := left.(events.MessageCreated) 86 | r, _ := right.(events.CustomerProfileUpdated) 87 | 88 | l.Body.Address = r.Body.ContactDetails.Address 89 | l.Body.Phone = r.Body.ContactDetails.Phone 90 | l.Body.Email = r.Body.ContactDetails.Email 91 | 92 | return l, nil 93 | } 94 | -------------------------------------------------------------------------------- /examples/example_1/stream/account_details_global_table.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_1/encoders" 5 | kstream "github.com/tryfix/kstream/kstream" 6 | ) 7 | 8 | func initAccountDetailTable(builder *kstream.StreamBuilder) kstream.GlobalTable { 9 | 10 | return builder.GlobalTable( 11 | `account_detail`, 12 | encoders.KeyEncoder, 13 | encoders.AccountDetailsUpdatedEncoder, 14 | `account_detail_store`) 15 | } 16 | -------------------------------------------------------------------------------- /examples/example_1/stream/customer_profile_global_table.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_1/encoders" 5 | kstream "github.com/tryfix/kstream/kstream" 6 | ) 7 | 8 | func initCustomerProfileTable(builder *kstream.StreamBuilder) kstream.GlobalTable { 9 | 10 | return builder.GlobalTable( 11 | `customer_profile`, 12 | encoders.KeyEncoder, 13 | encoders.CustomerProfileUpdatedEncoder, 14 | `customer_profile_store`) 15 | } 16 | -------------------------------------------------------------------------------- /examples/example_1/stream/transaction_stream.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_1/encoders" 5 | kstream "github.com/tryfix/kstream/kstream" 6 | ) 7 | 8 | func initTransactionStream(builder *kstream.StreamBuilder) kstream.Stream { 9 | return builder.Stream( 10 | `transaction`, 11 | encoders.KeyEncoder, 12 | encoders.TransactionReceivedEncoder, 13 | kstream.WithConfig(map[string]interface{}{ 14 | //`stream.processor.retry`: 2, 15 | //`stream.processor.retry.interval`: 3000, 16 | //`stream.processor.changelog`: false, 17 | //`stream.processor.changelog.minInSyncReplicas`: 2, 18 | //`stream.processor.changelog.replicationFactor`: 3, 19 | //`stream.processor.changelog.buffered`: true, 20 | //`stream.processor.changelog.BufferedSize`: 100, 21 | })) 22 | } 23 | -------------------------------------------------------------------------------- /examples/example_2/domain/variables.go: -------------------------------------------------------------------------------- 1 | package domain 2 | 3 | const ( 4 | ABCTopic = `common.abc` 5 | ) 6 | -------------------------------------------------------------------------------- /examples/example_2/encoders/common_encoder.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/tryfix/errors" 7 | "github.com/tryfix/kstream/examples/example_2/events" 8 | ) 9 | 10 | type CommonEncoder struct { 11 | ID string `json:"id"` 12 | Type string `json:"type"` 13 | Body interface{} `json:"body"` 14 | Timestamp int64 `json:"timestamp"` 15 | } 16 | 17 | func (t CommonEncoder) Encode(data interface{}) ([]byte, error) { 18 | panic("implement me") 19 | } 20 | 21 | func (t CommonEncoder) Decode(data []byte) (interface{}, error) { 22 | te := CommonEncoder{} 23 | err := json.Unmarshal(data, &te) 24 | if err != nil { 25 | return nil, err 26 | } 27 | switch te.Type { 28 | case `aa`: 29 | ac := events.AA{} 30 | err := json.Unmarshal(data, &ac) 31 | if err != nil { 32 | return nil, err 33 | } 34 | return ac, nil 35 | 36 | case `bb`: 37 | ad := events.BB{} 38 | err := json.Unmarshal(data, &ad) 39 | if err != nil { 40 | return nil, err 41 | } 42 | return ad, nil 43 | 44 | case `cc`: 45 | ad := events.CC{} 46 | err := json.Unmarshal(data, &ad) 47 | if err != nil { 48 | return nil, err 49 | } 50 | return ad, nil 51 | 52 | default: 53 | return nil, errors.New(fmt.Sprintf(`unexpected type received :- %v`, te.Type)) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/example_2/encoders/encoders.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_2/events" 5 | "github.com/tryfix/kstream/kstream/encoding" 6 | ) 7 | 8 | //var KeyEncoder = func() encoding.Encoder { return Int64Encoder{} } 9 | var StringEncoder = func() encoding.Encoder { return encoding.StringEncoder{} } 10 | 11 | var CommonABEncoder = func() encoding.Encoder { return CommonEncoder{} } 12 | 13 | var AAEncoder = func() encoding.Encoder { return events.AA{} } 14 | var BBEncoder = func() encoding.Encoder { return events.BB{} } 15 | var CCEncoder = func() encoding.Encoder { return events.CC{} } 16 | -------------------------------------------------------------------------------- /examples/example_2/encoders/int64_encoder.go: -------------------------------------------------------------------------------- 1 | package encoders 2 | 3 | import ( 4 | "github.com/tryfix/errors" 5 | "reflect" 6 | "strconv" 7 | ) 8 | 9 | type Int64Encoder struct{} 10 | 11 | func (Int64Encoder) Encode(v interface{}) ([]byte, error) { 12 | 13 | i, ok := v.(int64) 14 | if !ok { 15 | j, k := v.(int) 16 | if !k { 17 | return nil, errors.Errorf(`invalid type [%v] expected int64`, reflect.TypeOf(v)) 18 | } 19 | i = int64(j) 20 | } 21 | 22 | /*byt := make([]byte, 4) 23 | binary.BigEndian.PutUint32(byt, uint32(i))*/ 24 | 25 | return []byte(strconv.FormatInt(i, 10)), nil 26 | } 27 | 28 | func (Int64Encoder) Decode(data []byte) (interface{}, error) { 29 | i, err := strconv.ParseInt(string(data), 10, 64) 30 | if err != nil { 31 | return nil, errors.WithPrevious(err, `cannot decode data`) 32 | } 33 | 34 | return i, nil 35 | //return int(binary.BigEndian.Uint32(data)), nil 36 | } 37 | -------------------------------------------------------------------------------- /examples/example_2/events/a.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type AA struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | AAA string `json:"aaa"` 9 | Timestamp int64 `json:"timestamp"` 10 | } 11 | 12 | func (a AA) Encode(data interface{}) ([]byte, error) { 13 | b, err := json.Marshal(data) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | return b, nil 19 | } 20 | 21 | func (a AA) Decode(data []byte) (interface{}, error) { 22 | ac := AA{} 23 | err := json.Unmarshal(data, &ac) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return ac, nil 28 | } 29 | -------------------------------------------------------------------------------- /examples/example_2/events/ab.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type AB struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | AAA string `json:"aaa"` 9 | BBB string `json:"bbb"` 10 | TimestampA int64 `json:"timestamp_a"` 11 | TimestampB int64 `json:"timestamp_b"` 12 | } 13 | 14 | func (a AB) Encode(data interface{}) ([]byte, error) { 15 | b, err := json.Marshal(data) 16 | if err != nil { 17 | return nil, err 18 | } 19 | 20 | return b, nil 21 | } 22 | 23 | func (a AB) Decode(data []byte) (interface{}, error) { 24 | ac := AB{} 25 | err := json.Unmarshal(data, &ac) 26 | if err != nil { 27 | return nil, err 28 | } 29 | return ac, nil 30 | } 31 | -------------------------------------------------------------------------------- /examples/example_2/events/abc.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type ABC struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | AAA string `json:"aaa"` 9 | BBB string `json:"bbb"` 10 | CCC string `json:"ccc"` 11 | TimestampA int64 `json:"timestamp_a"` 12 | TimestampB int64 `json:"timestamp_b"` 13 | TimestampC int64 `json:"timestamp_c"` 14 | } 15 | 16 | func (a ABC) Encode(data interface{}) ([]byte, error) { 17 | b, err := json.Marshal(data) 18 | if err != nil { 19 | return nil, err 20 | } 21 | 22 | return b, nil 23 | } 24 | 25 | func (a ABC) Decode(data []byte) (interface{}, error) { 26 | ac := ABC{} 27 | err := json.Unmarshal(data, &ac) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return ac, nil 32 | } 33 | -------------------------------------------------------------------------------- /examples/example_2/events/b.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type BB struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | BBB string `json:"bbb"` 9 | Timestamp int64 `json:"timestamp"` 10 | } 11 | 12 | func (ad BB) Encode(data interface{}) ([]byte, error) { 13 | b, err := json.Marshal(data) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | return b, nil 19 | } 20 | 21 | func (ad BB) Decode(data []byte) (interface{}, error) { 22 | debited := BB{} 23 | err := json.Unmarshal(data, &debited) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return debited, nil 28 | } 29 | -------------------------------------------------------------------------------- /examples/example_2/events/c.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import "encoding/json" 4 | 5 | type CC struct { 6 | ID string `json:"id"` 7 | Type string `json:"type"` 8 | CCC string `json:"ccc"` 9 | Timestamp int64 `json:"timestamp"` 10 | } 11 | 12 | func (ad CC) Encode(data interface{}) ([]byte, error) { 13 | b, err := json.Marshal(data) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | return b, nil 19 | } 20 | 21 | func (ad CC) Decode(data []byte) (interface{}, error) { 22 | debited := CC{} 23 | err := json.Unmarshal(data, &debited) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return debited, nil 28 | } 29 | -------------------------------------------------------------------------------- /examples/example_2/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/tryfix/kstream/examples/example_1/stream" 4 | 5 | func main() { 6 | 7 | stream.Init() 8 | } 9 | -------------------------------------------------------------------------------- /examples/example_2/stream/abcCommonStream.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "github.com/tryfix/kstream/examples/example_2/domain" 5 | "github.com/tryfix/kstream/examples/example_2/encoders" 6 | "github.com/tryfix/kstream/kstream" 7 | ) 8 | 9 | func initCommonStream(builder *kstream.StreamBuilder) kstream.Stream { 10 | str := builder.Stream( 11 | domain.ABCTopic, 12 | encoders.StringEncoder, 13 | encoders.CommonABEncoder, 14 | kstream.WithConfig(map[string]interface{}{ 15 | //`stream.processor.retry`: 2, 16 | //`stream.processor.retry.interval`: 3000, 17 | //`stream.processor.changelog`: false, 18 | //`stream.processor.changelog.minInSyncReplicas`: 2, 19 | //`stream.processor.changelog.replicationFactor`: 3, 20 | //`stream.processor.changelog.buffered`: true, 21 | //`stream.processor.changelog.BufferedSize`: 100, 22 | })) 23 | 24 | AStream{ 25 | Upstream: str, 26 | }.Init() 27 | 28 | return str 29 | } 30 | -------------------------------------------------------------------------------- /examples/example_2/stream/init.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | "github.com/tryfix/kstream/consumer" 6 | "github.com/tryfix/kstream/data" 7 | "github.com/tryfix/kstream/examples/example_1/encoders" 8 | "github.com/tryfix/kstream/kstream" 9 | "github.com/tryfix/kstream/kstream/worker_pool" 10 | "github.com/tryfix/log" 11 | "github.com/tryfix/metrics" 12 | "os" 13 | "os/signal" 14 | ) 15 | 16 | func Init() { 17 | 18 | log.StdLogger = log.Constructor.Log( 19 | log.WithLevel(`TRACE`), 20 | log.WithColors(true), 21 | ) 22 | 23 | Logger := log.NewLog( 24 | log.WithLevel(`TRACE`), 25 | log.WithColors(true), 26 | ).Log() 27 | 28 | builderConfig := kstream.NewStreamBuilderConfig() 29 | builderConfig.BootstrapServers = []string{`localhost:9092`} 30 | builderConfig.ApplicationId = `k_stream_example_2` 31 | builderConfig.ConsumerCount = 1 32 | builderConfig.Host = `localhost:8100` 33 | builderConfig.AsyncProcessing = true 34 | //builderConfig.Store.StorageDir = `storage` 35 | builderConfig.Store.Http.Host = `:9002` 36 | builderConfig.ChangeLog.Enabled = false 37 | //builderConfig.ChangeLog.Buffer.Enabled = true 38 | //builderConfig.ChangeLog.Buffer.Size = 100 39 | //builderConfig.ChangeLog.ReplicationFactor = 3 40 | //builderConfig.ChangeLog.MinInSycReplicas = 2 41 | 42 | builderConfig.WorkerPool.Order = worker_pool.OrderByKey 43 | builderConfig.WorkerPool.NumOfWorkers = 100 44 | builderConfig.WorkerPool.WorkerBufferSize = 10 45 | 46 | builderConfig.MetricsReporter = metrics.PrometheusReporter(metrics.ReporterConf{`streams`, `k_stream_test`, nil}) 47 | builderConfig.Logger = Logger 48 | 49 | //builderConfig.Producer.Pool.NumOfWorkers = 1 50 | 51 | builder := kstream.NewStreamBuilder(builderConfig) 52 | 53 | builder.StoreRegistry().New( 54 | `account_detail_store`, 55 | encoders.KeyEncoder, 56 | encoders.AccountDetailsUpdatedEncoder) 57 | 58 | builder.StoreRegistry().New( 59 | `customer_profile_store`, 60 | encoders.KeyEncoder, 61 | encoders.CustomerProfileUpdatedEncoder) 62 | 63 | err := builder.Build(InitStreams(builder)...) 64 | if err != nil { 65 | log.Fatal(log.WithPrefix(`boot.boot.Init`, `error in stream building`), err) 66 | } 67 | 68 | synced := make(chan bool, 1) 69 | 70 | // trap SIGINT to trigger a shutdown. 71 | signals := make(chan os.Signal, 1) 72 | signal.Notify(signals, os.Interrupt) 73 | 74 | stream := kstream.NewStreams(builder, 75 | kstream.NotifyOnStart(synced), 76 | kstream.WithConsumerOptions(consumer.WithRecordUuidExtractFunc(func(message *data.Record) uuid.UUID { 77 | // extract uuid from header 78 | id, err := uuid.Parse(string(message.Key)) 79 | if err != nil { 80 | return uuid.New() 81 | } 82 | return id 83 | })), 84 | ) 85 | go func() { 86 | select { 87 | case <-signals: 88 | stream.Stop() 89 | } 90 | }() 91 | 92 | if err := stream.Start(); err != nil { 93 | log.Fatal(log.WithPrefix(`boot.boot.Init`, `error in stream starting`), err) 94 | } 95 | 96 | } 97 | 98 | func InitStreams(builder *kstream.StreamBuilder) []kstream.Stream { 99 | 100 | commonStream := initCommonStream(builder) 101 | return []kstream.Stream{commonStream} 102 | } 103 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tryfix/kstream 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/Shopify/sarama v1.26.1 7 | github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab 8 | github.com/google/uuid v1.1.1 9 | github.com/gorilla/handlers v1.4.2 10 | github.com/gorilla/mux v1.7.4 11 | github.com/olekukonko/tablewriter v0.0.4 12 | github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 13 | github.com/tryfix/errors v1.0.0 14 | github.com/tryfix/log v1.0.2 15 | github.com/tryfix/metrics v1.0.1 16 | github.com/tryfix/traceable-context v1.0.1 17 | ) 18 | -------------------------------------------------------------------------------- /kstream/branch/branch.go: -------------------------------------------------------------------------------- 1 | package branch 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type Predicate func(ctx context.Context, key interface{}, val interface{}) (bool, error) 10 | 11 | type Details struct { 12 | Name string 13 | Predicate Predicate 14 | } 15 | 16 | type Splitter struct { 17 | Id int32 18 | Branches []topology.Node 19 | BranchBuilders []topology.NodeBuilder 20 | } 21 | 22 | func (bs *Splitter) ChildBuilders() []topology.NodeBuilder { 23 | return bs.BranchBuilders 24 | } 25 | 26 | func (bs *Splitter) Childs() []topology.Node { 27 | return bs.Branches 28 | } 29 | 30 | func (bs *Splitter) AddChildBuilder(builder topology.NodeBuilder) { 31 | bs.BranchBuilders = append(bs.BranchBuilders, builder) 32 | } 33 | 34 | func (bs *Splitter) AddChild(node topology.Node) { 35 | bs.Branches = append(bs.Branches, node) 36 | } 37 | 38 | func (bs *Splitter) Build() (topology.Node, error) { 39 | var branches []topology.Node 40 | //var childBuilders []node.NodeBuilder 41 | 42 | for _, childBuilder := range bs.BranchBuilders { 43 | branch, err := childBuilder.Build() 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | branches = append(branches, branch) 49 | } 50 | 51 | return &Splitter{ 52 | Branches: branches, 53 | Id: bs.Id, 54 | }, nil 55 | } 56 | 57 | func (bs *Splitter) Next() bool { 58 | return true 59 | } 60 | 61 | func (bs *Splitter) ID() int32 { 62 | return bs.Id 63 | } 64 | 65 | func (bs *Splitter) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 66 | for _, b := range bs.Branches { 67 | branch, _ := b.(*Branch) 68 | 69 | ok, err := branch.Predicate(ctx, kIn, vIn) 70 | if err != nil { 71 | return nil, nil, false, errors.WithPrevious(err, `predicate error`) 72 | } 73 | 74 | if ok { 75 | _, _, next, err := branch.Run(ctx, kIn, vIn) 76 | if err != nil || !next { 77 | return nil, nil, false, err 78 | } 79 | break 80 | } 81 | } 82 | 83 | return kIn, kOut, true, nil 84 | } 85 | 86 | func (bs *Splitter) Type() topology.Type { 87 | return topology.Type(`branch_splitter`) 88 | } 89 | 90 | type Branch struct { 91 | Id int32 92 | Name string 93 | Predicate Predicate 94 | childBuilders []topology.NodeBuilder 95 | childs []topology.Node 96 | } 97 | 98 | func (b *Branch) Childs() []topology.Node { 99 | return b.childs 100 | } 101 | 102 | func (b *Branch) ChildBuilders() []topology.NodeBuilder { 103 | return b.childBuilders 104 | } 105 | 106 | func (b *Branch) AddChildBuilder(builder topology.NodeBuilder) { 107 | b.childBuilders = append(b.childBuilders, builder) 108 | } 109 | 110 | func (b *Branch) AddChild(node topology.Node) { 111 | b.childs = append(b.childs, node) 112 | } 113 | 114 | func (b *Branch) Build() (topology.Node, error) { 115 | var childs []topology.Node 116 | //var childBuilders []node.NodeBuilder 117 | 118 | for _, childBuilder := range b.childBuilders { 119 | child, err := childBuilder.Build() 120 | if err != nil { 121 | return nil, err 122 | } 123 | 124 | childs = append(childs, child) 125 | } 126 | 127 | return &Branch{ 128 | Name: b.Name, 129 | Predicate: b.Predicate, 130 | childs: childs, 131 | Id: b.Id, 132 | }, nil 133 | } 134 | 135 | func (b *Branch) Next() bool { 136 | return true 137 | } 138 | 139 | func (b *Branch) ID() int32 { 140 | return b.Id 141 | } 142 | 143 | func (b *Branch) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 144 | for _, child := range b.childs { 145 | _, _, next, err := child.Run(ctx, kIn, vIn) 146 | if err != nil || !next { 147 | return nil, nil, false, err 148 | } 149 | } 150 | return kIn, kOut, true, nil 151 | } 152 | 153 | func (b *Branch) Type() topology.Type { 154 | return topology.TypeBranch 155 | } 156 | -------------------------------------------------------------------------------- /kstream/changelog/buffer.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package changelog 9 | 10 | import ( 11 | "context" 12 | "github.com/tryfix/kstream/data" 13 | "github.com/tryfix/kstream/producer" 14 | "github.com/tryfix/log" 15 | "github.com/tryfix/metrics" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | // Buffer holds a temporary changelog Buffer 21 | type Buffer struct { 22 | records []*data.Record 23 | mu *sync.Mutex 24 | shouldFlush chan bool 25 | flushInterval time.Duration 26 | bufferSize int 27 | logger log.Logger 28 | producer producer.Producer 29 | lastFlushed time.Time 30 | metrics struct { 31 | flushLatency metrics.Observer 32 | } 33 | } 34 | 35 | // NewBuffer creates a new Buffer object 36 | func NewBuffer(p producer.Producer, size int, flushInterval time.Duration, logger log.Logger) *Buffer { 37 | flush := 1 * time.Second 38 | if flushInterval != 0 { 39 | flush = flushInterval 40 | } 41 | 42 | b := &Buffer{ 43 | records: make([]*data.Record, 0, size), 44 | mu: new(sync.Mutex), 45 | producer: p, 46 | bufferSize: size, 47 | logger: logger, 48 | shouldFlush: make(chan bool, 1), 49 | flushInterval: flush, 50 | lastFlushed: time.Now(), 51 | } 52 | 53 | go b.runFlusher() 54 | 55 | return b 56 | } 57 | 58 | // Clear clears the Buffer 59 | func (b *Buffer) Clear() { 60 | b.mu.Lock() 61 | defer b.mu.Unlock() 62 | if err := b.flushAll(); err != nil { 63 | b.logger.ErrorContext(context.Background(), `k-stream.changelog.buffer`, err) 64 | } 65 | 66 | } 67 | 68 | func (b *Buffer) Records() []*data.Record { 69 | b.mu.Lock() 70 | defer b.mu.Unlock() 71 | return b.records 72 | } 73 | 74 | // Store stores the record in Buffer 75 | func (b *Buffer) Store(record *data.Record) { 76 | b.mu.Lock() 77 | defer b.mu.Unlock() 78 | 79 | b.records = append(b.records, record) 80 | 81 | if len(b.records) >= b.bufferSize { 82 | b.flush() 83 | } 84 | } 85 | 86 | func (b *Buffer) runFlusher() { 87 | //tic := time.NewTicker(b.flushInterval) 88 | //defer tic.Stop() 89 | // 90 | //for range tic.C { 91 | // 92 | // if time.Since(b.lastFlushed) <= b.flushInterval { 93 | // continue 94 | // } 95 | // 96 | // b.mu.Lock() 97 | // if len(b.records) > 0 { 98 | // b.flush() 99 | // } 100 | // b.mu.Unlock() 101 | // 102 | //} 103 | } 104 | 105 | func (b *Buffer) flush() { 106 | if err := b.flushAll(); err != nil { 107 | b.logger.ErrorContext(context.Background(), `k-stream.changelog.buffer`, err) 108 | } 109 | 110 | b.logger.Trace(`k-stream.changelog.buffer`, `buffer flushed`) 111 | } 112 | 113 | func (b *Buffer) flushAll() error { 114 | begin := time.Now() 115 | defer func(t time.Time) { 116 | b.metrics.flushLatency.Observe(float64(time.Since(begin).Nanoseconds()/1e3), nil) 117 | }(begin) 118 | 119 | // publish buffer to kafka and clear on success 120 | //deDuplicated := deDuplicate(b.records) 121 | //if len(deDuplicated) > 0 { 122 | // if err := b.producer.ProduceBatch(context.Background(), deDuplicated); err != nil { 123 | // return err 124 | // } 125 | //} 126 | 127 | if err := b.producer.ProduceBatch(context.Background(), b.records); err != nil { 128 | return err 129 | } 130 | 131 | b.reset() 132 | 133 | return nil 134 | } 135 | 136 | func (b *Buffer) Delete(record *data.Record) { 137 | record.Value = nil 138 | b.Store(record) 139 | } 140 | 141 | func (b *Buffer) reset() { 142 | b.records = make([]*data.Record, 0, b.bufferSize) 143 | b.lastFlushed = time.Now() 144 | } 145 | 146 | func (b *Buffer) Close() { 147 | // flush existing buffer 148 | b.logger.Info(`k-stream.changelog.buffer`, `flushing buffer...`) 149 | if err := b.flushAll(); err != nil { 150 | b.logger.ErrorContext(context.Background(), `k-stream.changelog.buffer`, err) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /kstream/changelog/buffer_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package changelog 9 | 10 | //import ( 11 | // "github.com/tryfix/kstream/consumer" 12 | // "github.com/tryfix/kstream/data" 13 | // "github.com/tryfix/kstream/producer" 14 | // "testing" 15 | // "time" 16 | //) 17 | // 18 | //func TestNewBuffer(t *testing.T) { 19 | // b := NewBuffer(producer.NewMockProducer(t), 10, 10*time.Second) 20 | // if b.records == nil { 21 | // t.Fail() 22 | // } 23 | // 24 | // if b.mu == nil { 25 | // t.Fail() 26 | // } 27 | //} 28 | // 29 | //func TestBufferStore(t *testing.T) { 30 | // b := NewBuffer(producer.NewMockProducer(t), 10, 10*time.Second) 31 | // 32 | // rec := new(data.Record) 33 | // rec.Key = []byte(`key`) 34 | // b.Store(rec) 35 | // 36 | // if string(b.records[0].Key) != string(rec.Key) { 37 | // t.Fail() 38 | // } 39 | //} 40 | // 41 | //func TestBufferClear(t *testing.T) { 42 | // b := NewBuffer(producer.NewMockProducer(t), 10, 10*time.Second) 43 | // 44 | // rec := new(data.Record) 45 | // rec.Key = []byte(``) 46 | // rec.Value = []byte(``) 47 | // b.Store(rec) 48 | // 49 | // b.Clear() 50 | // 51 | // if len(b.records) > 0 { 52 | // t.Fail() 53 | // } 54 | //} 55 | // 56 | //func TestBufferShouldClearOnceFull(t *testing.T) { 57 | // size := 5 58 | // 59 | // b := NewBuffer(producer.NewMockProducer(t), size, 10*time.Millisecond) 60 | // go b.runFlusher() 61 | // 62 | // time.Sleep(1 * time.Second) 63 | // 64 | // rec := new(data.Record) 65 | // for i := 0; i < size*20+1; i++ { 66 | // b.Store(rec) 67 | // } 68 | // 69 | // if len(b.records) != size { 70 | // t.Fail() 71 | // } 72 | //} 73 | // 74 | //func TestBufferFlushInterval(t *testing.T) { 75 | // d := 100 * time.Millisecond 76 | // b := NewBuffer(producer.NewMockProducer(t), 10, d) 77 | // go b.runFlusher() 78 | // 79 | // time.Sleep(d) 80 | // 81 | // rec := new(data.Record) 82 | // rec.Key = []byte(`100`) 83 | // rec.Value = []byte(`200`) 84 | // b.Store(rec) 85 | // 86 | // time.Sleep(d + 1*time.Second) 87 | // 88 | // if len(b.records) > 0 { 89 | // t.Fail() 90 | // } 91 | //} 92 | -------------------------------------------------------------------------------- /kstream/changelog/cache.go: -------------------------------------------------------------------------------- 1 | package changelog 2 | 3 | import ( 4 | "encoding/binary" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/backend" 7 | "github.com/tryfix/kstream/consumer" 8 | "github.com/tryfix/kstream/data" 9 | "sync" 10 | ) 11 | 12 | type cacheManager struct { 13 | caches map[string]*cache 14 | mu *sync.Mutex 15 | backendBuilder backend.Builder 16 | cacheOffsetStorage backend.Backend 17 | } 18 | 19 | func newCacheManager(backendBuilder backend.Builder) (*cacheManager, error) { 20 | m := &cacheManager{ 21 | caches: make(map[string]*cache), 22 | mu: new(sync.Mutex), 23 | backendBuilder: backendBuilder, 24 | } 25 | 26 | offsetBackend, err := backendBuilder(`__changelog_cache_offsets`) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | //if !offsetBackend.Persistent() { 32 | // return nil, errors.New( `only persistent backend are supported`) 33 | //} 34 | 35 | m.cacheOffsetStorage = offsetBackend 36 | return m, nil 37 | } 38 | 39 | func (m *cacheManager) getCache(tp consumer.TopicPartition) (*cache, error) { 40 | if c, ok := m.caches[tp.String()]; ok { 41 | return c, nil 42 | } 43 | 44 | b, err := m.backendBuilder(`__changelog_cache_` + tp.String()) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | cache := new(cache) 50 | cache.tp = tp 51 | cache.backend = b 52 | cache.offsetBackend = m.cacheOffsetStorage 53 | 54 | m.mu.Lock() 55 | m.caches[tp.String()] = cache 56 | m.mu.Unlock() 57 | 58 | return cache, nil 59 | } 60 | 61 | type cache struct { 62 | backend backend.Backend 63 | offsetBackend backend.Backend 64 | tp consumer.TopicPartition 65 | } 66 | 67 | func (c *cache) Flush() error { 68 | itr := c.backend.Iterator() 69 | for itr.Valid() { 70 | if err := c.backend.Delete(itr.Key()); err != nil { 71 | return errors.WithPrevious(err, `cache flush failed`) 72 | } 73 | itr.Next() 74 | } 75 | return nil 76 | } 77 | 78 | func (c *cache) Put(record *data.Record) error { 79 | 80 | if len(record.Value) < 1 { 81 | if err := c.backend.Delete(record.Key); err != nil { 82 | return err 83 | } 84 | } else { 85 | if err := c.backend.Set(record.Key, record.Value, 0); err != nil { 86 | return err 87 | } 88 | } 89 | // update current offset on backend 90 | return c.offsetBackend.Set([]byte(c.offsetKeyPrefix()), c.encodeOffset(record.Offset), 0) 91 | 92 | } 93 | 94 | func (c *cache) offsetKeyPrefix() string { 95 | return `__changelog_offset_cache_last_synced_` + c.tp.String() 96 | } 97 | 98 | func (c *cache) ReadAll() []*data.Record { 99 | var records []*data.Record 100 | 101 | i := c.backend.Iterator() 102 | i.SeekToFirst() 103 | for i.Valid() { 104 | record := &data.Record{ 105 | Key: i.Key(), 106 | Value: i.Value(), 107 | Topic: c.tp.Topic, 108 | Partition: c.tp.Partition, 109 | } 110 | records = append(records, record) 111 | i.Next() 112 | } 113 | 114 | return records 115 | } 116 | 117 | func (c *cache) Delete(record *data.Record) error { 118 | return c.backend.Delete(record.Key) 119 | } 120 | 121 | func (c *cache) decodeOffset(offset []byte) int64 { 122 | return int64(binary.LittleEndian.Uint64(offset)) 123 | } 124 | 125 | func (c *cache) encodeOffset(offset int64) []byte { 126 | byt := make([]byte, 8) 127 | binary.LittleEndian.PutUint64(byt, uint64(offset)) 128 | 129 | return byt 130 | } 131 | 132 | func (c *cache) LastSynced() (int64, error) { 133 | byt, err := c.offsetBackend.Get([]byte(c.offsetKeyPrefix())) 134 | if err != nil { 135 | return 0, err 136 | } 137 | 138 | if len(byt) < 1 { 139 | return 0, nil 140 | } 141 | 142 | return c.decodeOffset(byt), nil 143 | } 144 | -------------------------------------------------------------------------------- /kstream/changelog/changelog.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package changelog 9 | 10 | import ( 11 | "context" 12 | "github.com/tryfix/kstream/data" 13 | ) 14 | 15 | type Builder func(id string, topic string, partition int32, opts ...Options) (Changelog, error) 16 | 17 | type Changelog interface { 18 | ReadAll(ctx context.Context) ([]*data.Record, error) 19 | Put(ctx context.Context, record *data.Record) error 20 | PutAll(ctx context.Context, record []*data.Record) error 21 | Delete(ctx context.Context, record *data.Record) error 22 | DeleteAll(ctx context.Context, record []*data.Record) error 23 | Close() 24 | } 25 | -------------------------------------------------------------------------------- /kstream/changelog/mock_changelog.go: -------------------------------------------------------------------------------- 1 | package changelog 2 | 3 | import ( 4 | "context" 5 | "crypto/sha1" 6 | "github.com/tryfix/kstream/data" 7 | "sync" 8 | ) 9 | 10 | type mockChangelog struct { 11 | data map[string]*data.Record 12 | mu *sync.Mutex 13 | buffer *Buffer 14 | bufferSize int 15 | } 16 | 17 | func NewMockChangelog(bufferSize int) Changelog { 18 | return &mockChangelog{ 19 | //buffer: NewBuffer(), 20 | bufferSize: bufferSize, 21 | mu: new(sync.Mutex), 22 | data: make(map[string]*data.Record), 23 | } 24 | } 25 | 26 | func (c *mockChangelog) ReadAll(ctx context.Context) ([]*data.Record, error) { 27 | var data []*data.Record 28 | for _, rec := range c.data { 29 | data = append(data, rec) 30 | } 31 | return data, nil 32 | } 33 | 34 | func (c *mockChangelog) Put(ctx context.Context, record *data.Record) error { 35 | c.mu.Lock() 36 | defer c.mu.Unlock() 37 | 38 | if len(c.buffer.records) >= c.bufferSize { 39 | c.PutAll(ctx, c.buffer.records) 40 | return nil 41 | } 42 | 43 | c.buffer.Store(record) 44 | return nil 45 | } 46 | 47 | func (c *mockChangelog) PutAll(ctx context.Context, records []*data.Record) error { 48 | c.mu.Lock() 49 | defer c.mu.Unlock() 50 | for _, rec := range records { 51 | c.data[c.hash(rec.Key)] = rec 52 | } 53 | return nil 54 | } 55 | 56 | func (c *mockChangelog) Delete(ctx context.Context, record *data.Record) error { 57 | c.mu.Lock() 58 | defer c.mu.Unlock() 59 | delete(c.data, c.hash(record.Key)) 60 | return nil 61 | } 62 | 63 | func (c *mockChangelog) DeleteAll(ctx context.Context, records []*data.Record) error { 64 | c.mu.Lock() 65 | defer c.mu.Unlock() 66 | for _, rec := range records { 67 | delete(c.data, c.hash(rec.Key)) 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func (c *mockChangelog) Info() map[string]interface{} { 74 | panic("implement me") 75 | } 76 | 77 | func (c *mockChangelog) Close() { 78 | c.buffer = nil 79 | c.mu = nil 80 | c.data = nil 81 | } 82 | 83 | func (c *mockChangelog) hash(k []byte) string { 84 | ha := sha1.New() 85 | ha.Write(k) 86 | return string(ha.Sum(nil)) 87 | } 88 | -------------------------------------------------------------------------------- /kstream/changelog/options.go: -------------------------------------------------------------------------------- 1 | package changelog 2 | 3 | import ( 4 | "github.com/tryfix/kstream/producer" 5 | "time" 6 | ) 7 | 8 | type options struct { 9 | buffered bool 10 | bufferSize int 11 | flushInterval time.Duration 12 | producer producer.Producer 13 | } 14 | 15 | type Options func(config *options) 16 | 17 | func (c *options) apply(id string, options ...Options) error { 18 | 19 | if err := c.applyDefaults(id); err != nil { 20 | return err 21 | } 22 | 23 | for _, opt := range options { 24 | opt(c) 25 | } 26 | 27 | return nil 28 | } 29 | 30 | func (c *options) applyDefaults(id string) error { 31 | return nil 32 | } 33 | 34 | func Producer(p producer.Producer) Options { 35 | return func(config *options) { 36 | config.producer = p 37 | } 38 | } 39 | 40 | func Buffered(size int) Options { 41 | return func(config *options) { 42 | config.buffered = true 43 | config.bufferSize = size 44 | } 45 | } 46 | 47 | func FlushInterval(d time.Duration) Options { 48 | return func(config *options) { 49 | config.flushInterval = d 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /kstream/changelog/replica_syncer.go: -------------------------------------------------------------------------------- 1 | package changelog 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/consumer" 7 | "github.com/tryfix/kstream/data" 8 | "github.com/tryfix/log" 9 | ) 10 | 11 | type replicaSyncer struct { 12 | cache *cache 13 | tp consumer.TopicPartition 14 | consumer consumer.PartitionConsumer 15 | syncing bool 16 | logger log.Logger 17 | running bool 18 | } 19 | 20 | func (rs *replicaSyncer) Sync(startOffset int64) (started chan bool, syncErrors chan error) { 21 | started = make(chan bool) 22 | syncErrors = make(chan error) 23 | 24 | go rs.initSync(startOffset, started, syncErrors) 25 | 26 | return started, syncErrors 27 | } 28 | 29 | func (rs *replicaSyncer) initSync(startOffset int64, started chan bool, syncErrors chan error) { 30 | if startOffset == 0 { 31 | startOffset = int64(consumer.Earliest) 32 | } 33 | 34 | events, err := rs.consumer.Consume(rs.tp.Topic, rs.tp.Partition, consumer.Offset(startOffset)) 35 | if err != nil { 36 | syncErrors <- errors.WithPrevious(err, fmt.Sprintf(`cannot read partition %s[%d]`, 37 | rs.tp.Topic, rs.tp.Partition)) 38 | return 39 | } 40 | 41 | rs.logger.Info(fmt.Sprintf(`partition consumer started at offset [%d]`, startOffset)) 42 | 43 | started <- true 44 | rs.syncing = true 45 | 46 | for event := range events { 47 | switch ev := event.(type) { 48 | case *data.Record: 49 | if err := rs.cache.Put(ev); err != nil { 50 | syncErrors <- errors.WithPrevious(err, `writing to cache failed`) 51 | } 52 | case *consumer.PartitionEnd: 53 | rs.logger.Info(fmt.Sprintf(`replica sync completed for [%s]`, rs.tp)) 54 | case *consumer.Error: 55 | rs.logger.Error(err) 56 | } 57 | } 58 | 59 | close(started) 60 | close(syncErrors) 61 | rs.syncing = false 62 | rs.running = true 63 | } 64 | 65 | func (rs *replicaSyncer) Stop() error { 66 | if rs.running { 67 | rs.logger.Info(`sync not running`) 68 | return nil 69 | } 70 | 71 | return rs.consumer.Close() 72 | } 73 | -------------------------------------------------------------------------------- /kstream/changelog/state_changelog_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package changelog 9 | 10 | import "testing" 11 | 12 | func TestStateChangelog_ReadAll(t *testing.T) { 13 | 14 | } 15 | 16 | func TestStateChangelog_Put(t *testing.T) { 17 | 18 | } 19 | -------------------------------------------------------------------------------- /kstream/changelog/store_changelog.go: -------------------------------------------------------------------------------- 1 | package changelog 2 | 3 | func NewStoreChangelog(applicationId string, topic string, partition int32, opts ...Options) (Changelog, error) { 4 | 5 | options := new(options) 6 | options.apply(applicationId, opts...) 7 | 8 | return &stateChangelog{ 9 | topic: topic, 10 | partition: partition, 11 | applicationId: applicationId, 12 | options: options, 13 | //buffer: NewBuffer(), 14 | changelogSuffix: `_store_changelog`, 15 | }, nil 16 | } 17 | -------------------------------------------------------------------------------- /kstream/context/context.go: -------------------------------------------------------------------------------- 1 | package context 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/data" 7 | "github.com/tryfix/traceable-context" 8 | "time" 9 | ) 10 | 11 | var recordMeta = `rc_meta` 12 | 13 | type RecordMeta struct { 14 | Topic string 15 | Partition int32 16 | Offset int64 17 | Timestamp time.Time 18 | Headers data.RecordHeaders 19 | } 20 | 21 | type Context struct { 22 | context.Context 23 | } 24 | 25 | func FromRecord(parent context.Context, record *data.Record) context.Context { 26 | return traceable_context.WithValue(parent, &recordMeta, &RecordMeta{ 27 | Topic: record.Topic, 28 | Offset: record.Offset, 29 | Partition: record.Partition, 30 | Timestamp: record.Timestamp, 31 | Headers: record.Headers, 32 | }) 33 | } 34 | 35 | func RecordFromContext(ctx context.Context, key []byte, val []byte) (*data.Record, error) { 36 | if c, ok := ctx.(*Context); ok { 37 | 38 | meta := Meta(c) 39 | 40 | return &data.Record{ 41 | Topic: meta.Topic, 42 | Partition: meta.Partition, 43 | Offset: meta.Offset, 44 | Timestamp: meta.Timestamp, 45 | Key: key, 46 | Value: val, 47 | Headers: meta.Headers, 48 | }, nil 49 | } 50 | 51 | return nil, errors.New(`invalid context expected [k-stream.context]`) 52 | } 53 | 54 | func Meta(ctx context.Context) *RecordMeta { 55 | if meta, ok := ctx.Value(&recordMeta).(*RecordMeta); ok { 56 | return meta 57 | } 58 | panic(`k-stream.context meta not available`) 59 | } 60 | -------------------------------------------------------------------------------- /kstream/context/context_test.go: -------------------------------------------------------------------------------- 1 | package context 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/data" 6 | "reflect" 7 | "testing" 8 | ) 9 | 10 | func TestFromRecord(t *testing.T) { 11 | //type args struct { 12 | // parent context.Context 13 | // record *data.Record 14 | //} 15 | // 16 | //ctx := context.WithValue(context.Background(), `foo`, `bar`) 17 | //kafkaRec := &data.Record{ 18 | // Key: nil, 19 | // Value: nil, 20 | // Topic: "test", 21 | // Partition: 0, 22 | // Offset: 0, 23 | // Timestamp: time.Time{}, 24 | // BlockTimestamp: time.Time{}, 25 | // RecordHeaders: nil, 26 | // UUID: uuid.UUID{}, 27 | //} 28 | // 29 | //tests := []struct { 30 | // name string 31 | // args args 32 | // want context.Context 33 | //}{ 34 | // {name: `default`, args: args{ 35 | // parent: ctx, 36 | // record: nil, 37 | // }, want: Context{}}, 38 | //} 39 | //for _, tt := range tests { 40 | // t.Run(tt.name, func(t *testing.T) { 41 | // if got := FromRecord(tt.args.parent, tt.args.record); !reflect.DeepEqual(got, tt.want) { 42 | // t.Errorf("FromRecord() = %v, want %v", got, tt.want) 43 | // } 44 | // }) 45 | //} 46 | } 47 | 48 | func TestMeta(t *testing.T) { 49 | type args struct { 50 | ctx context.Context 51 | } 52 | tests := []struct { 53 | name string 54 | args args 55 | want *RecordMeta 56 | }{ 57 | // TODO: Add test cases. 58 | } 59 | for _, tt := range tests { 60 | t.Run(tt.name, func(t *testing.T) { 61 | if got := Meta(tt.args.ctx); !reflect.DeepEqual(got, tt.want) { 62 | t.Errorf("Meta() = %v, want %v", got, tt.want) 63 | } 64 | }) 65 | } 66 | } 67 | 68 | func TestRecordFromContext(t *testing.T) { 69 | type args struct { 70 | ctx context.Context 71 | key []byte 72 | val []byte 73 | } 74 | tests := []struct { 75 | name string 76 | args args 77 | want *data.Record 78 | wantErr bool 79 | }{ 80 | // TODO: Add test cases. 81 | } 82 | for _, tt := range tests { 83 | t.Run(tt.name, func(t *testing.T) { 84 | got, err := RecordFromContext(tt.args.ctx, tt.args.key, tt.args.val) 85 | if (err != nil) != tt.wantErr { 86 | t.Errorf("RecordFromContext() error = %v, wantErr %v", err, tt.wantErr) 87 | return 88 | } 89 | if !reflect.DeepEqual(got, tt.want) { 90 | t.Errorf("RecordFromContext() got = %v, want %v", got, tt.want) 91 | } 92 | }) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /kstream/dlq/dlq.go: -------------------------------------------------------------------------------- 1 | package dlq 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "github.com/tryfix/kstream/data" 8 | kContext "github.com/tryfix/kstream/kstream/context" 9 | "github.com/tryfix/kstream/producer" 10 | ) 11 | 12 | type DqlType int 13 | 14 | const DqlGlobal DqlType = 1 15 | const DqlPerTopic DqlType = 2 16 | 17 | type DLQ interface { 18 | Publish(ctx context.Context, record *data.Record) error 19 | } 20 | 21 | type Builder func() (DLQ, error) 22 | 23 | type dlq struct { 24 | producer producer.Producer 25 | options *Options 26 | } 27 | 28 | type Options struct { 29 | BootstrapServers []string 30 | Topic string 31 | TopicFormat string 32 | Type DqlType 33 | Producer producer.Producer 34 | } 35 | 36 | func NewDLQ(options *Options) (DLQ, error) { 37 | /*p, err := producer.DefaultBuilder(&producer.Options{ 38 | BootstrapServers: options.BootstrapServers, 39 | Partitioner: producer.Random, 40 | }) 41 | if err != nil { 42 | return nil, err 43 | }*/ 44 | 45 | return &dlq{ 46 | options: options, 47 | //producer: p, 48 | }, nil 49 | } 50 | 51 | func (dq *dlq) Publish(ctx context.Context, record *data.Record) error { 52 | if _, _, err := dq.producer.Produce(ctx, record); err != nil { 53 | return err 54 | } 55 | 56 | return nil 57 | } 58 | 59 | func (dq *dlq) prepareMessage(ctx context.Context, key []byte, value []byte) (*data.Record, error) { 60 | kCtx, ok := ctx.(*kContext.Context) 61 | if !ok { 62 | return nil, errors.New(`k-stream.DLQ.Publish: published message context should be the type of kstream.Context`) 63 | } 64 | 65 | return &data.Record{ 66 | Key: key, 67 | Value: value, 68 | Partition: kContext.Meta(kCtx).Partition, 69 | Topic: dq.topic(kContext.Meta(kCtx).Topic), 70 | }, nil 71 | } 72 | 73 | func (dq *dlq) topic(topic string) string { 74 | if dq.options.Type == DqlPerTopic { 75 | return fmt.Sprintf(dq.options.TopicFormat, topic) 76 | } 77 | 78 | return dq.options.Topic 79 | } 80 | -------------------------------------------------------------------------------- /kstream/dlq/options.go: -------------------------------------------------------------------------------- 1 | package dlq 2 | -------------------------------------------------------------------------------- /kstream/encoding/encoder.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package encoding 9 | 10 | type Builder func() Encoder 11 | 12 | type Encoder interface { 13 | Encode(data interface{}) ([]byte, error) 14 | Decode(data []byte) (interface{}, error) 15 | } 16 | -------------------------------------------------------------------------------- /kstream/encoding/int_encoder.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "github.com/tryfix/errors" 5 | "reflect" 6 | "strconv" 7 | ) 8 | 9 | type IntEncoder struct{} 10 | 11 | func (IntEncoder) Encode(v interface{}) ([]byte, error) { 12 | 13 | i, ok := v.(int) 14 | if !ok { 15 | return nil, errors.Errorf(`invalid type [%v] expected int`, reflect.TypeOf(v)) 16 | } 17 | 18 | return []byte(strconv.Itoa(i)), nil 19 | } 20 | 21 | func (IntEncoder) Decode(data []byte) (interface{}, error) { 22 | i, err := strconv.Atoi(string(data)) 23 | if err != nil { 24 | return nil, errors.WithPrevious(err, `cannot decode data`) 25 | } 26 | 27 | return i, nil 28 | } 29 | -------------------------------------------------------------------------------- /kstream/encoding/int_encoder_test.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestIntEncoder_Decode(t *testing.T) { 9 | type args struct { 10 | data []byte 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | want interface{} 16 | wantErr bool 17 | }{ 18 | {name: `should_decode`, args: args{data: []byte(`1`)}, want: 1, wantErr: false}, 19 | {name: `should_return_error`, args: args{data: []byte(`ss`)}, want: nil, wantErr: true}, 20 | } 21 | for _, tt := range tests { 22 | t.Run(tt.name, func(t *testing.T) { 23 | in := IntEncoder{} 24 | got, err := in.Decode(tt.args.data) 25 | if (err != nil) != tt.wantErr { 26 | t.Errorf("Decode() error = %v, wantErr %v", err, tt.wantErr) 27 | return 28 | } 29 | if !reflect.DeepEqual(got, tt.want) { 30 | t.Errorf("Decode() got = %v, want %v", got, tt.want) 31 | } 32 | }) 33 | } 34 | } 35 | 36 | func TestIntEncoder_Encode(t *testing.T) { 37 | type args struct { 38 | v interface{} 39 | } 40 | tests := []struct { 41 | name string 42 | args args 43 | want []byte 44 | wantErr bool 45 | }{ 46 | {name: `should_decode`, args: args{100}, want: []byte(`100`), wantErr: false}, 47 | {name: `should_return_error`, args: args{nil}, want: nil, wantErr: true}, 48 | } 49 | for _, tt := range tests { 50 | t.Run(tt.name, func(t *testing.T) { 51 | in := IntEncoder{} 52 | got, err := in.Encode(tt.args.v) 53 | if (err != nil) != tt.wantErr { 54 | t.Errorf("Encode() error = %v, wantErr %v", err, tt.wantErr) 55 | return 56 | } 57 | if !reflect.DeepEqual(got, tt.want) { 58 | t.Errorf("Encode() got = %v, want %v", got, tt.want) 59 | } 60 | }) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /kstream/encoding/json_encoder.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import "encoding/json" 4 | 5 | type JsonSerializer struct{} 6 | 7 | func NewJsonSerDes() *JsonSerializer { 8 | return &JsonSerializer{} 9 | } 10 | 11 | func (s *JsonSerializer) Encode(data interface{}) ([]byte, error) { 12 | return json.Marshal(data) 13 | } 14 | 15 | func (s *JsonSerializer) Decode(byt []byte, v interface{}) error { 16 | return json.Unmarshal(byt, &v) 17 | 18 | } 19 | -------------------------------------------------------------------------------- /kstream/encoding/string_encoder.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "github.com/tryfix/errors" 5 | "reflect" 6 | ) 7 | 8 | type StringEncoder struct{} 9 | 10 | func (s StringEncoder) Encode(v interface{}) ([]byte, error) { 11 | str, ok := v.(string) 12 | if !ok { 13 | return nil, errors.Errorf(`invalid type [%+v] expected string`, reflect.TypeOf(v)) 14 | } 15 | 16 | return []byte(str), nil 17 | } 18 | 19 | func (s StringEncoder) Decode(data []byte) (interface{}, error) { 20 | return string(data), nil 21 | } 22 | -------------------------------------------------------------------------------- /kstream/k_flow.go: -------------------------------------------------------------------------------- 1 | package kstream 2 | 3 | //import ( 4 | // "github.com/tryfix/kstream/errors" 5 | // "github.com/tryfix/kstream/flow" 6 | // "github.com/tryfix/kstream/logger" 7 | // "github.com/tryfix/kstream/processors" 8 | // "github.com/tryfix/kstream/source_sink" 9 | // "time" 10 | //) 11 | // 12 | //type KFlow struct { 13 | // source *kSource 14 | // sink *kSink 15 | // branches []*flow.Branch 16 | // processors []processors.Processor 17 | // retryCount int 18 | // retryInterval time.Duration 19 | // changelogEnabled bool 20 | // errorHandler errors.ErrorHandler 21 | //} 22 | // 23 | //type KFlowBranchBuilder struct { 24 | // name string 25 | // Builder *kFlowBuilder 26 | // Predicate flow.BranchPredicate 27 | // isParallel bool 28 | //} 29 | // 30 | //type kFlowBuilder struct { 31 | // sourceBuilder *kSourceBuilder 32 | // sinkBuilder *kSinkBuilder 33 | // branches []*KFlowBranchBuilder 34 | // processors []processors.Processor 35 | // retryCount int 36 | // retryInterval time.Duration 37 | // changelogEnabled bool 38 | // errorHandler errors.ErrorHandler 39 | //} 40 | // 41 | //func (b *kFlowBuilder) Build() (flow.Flow, error) { 42 | // 43 | // kFlow := &KFlow{ 44 | // changelogEnabled: b.changelogEnabled, 45 | // errorHandler: b.errorHandler, 46 | // processors: b.processors, 47 | // retryCount: b.retryCount, 48 | // retryInterval: b.retryInterval, 49 | // } 50 | // 51 | // if b.sourceBuilder != nil { 52 | // source, err := b.sourceBuilder.Build() 53 | // if err != nil { 54 | // return nil, err 55 | // } 56 | // 57 | // kSource, ok := source.(*kSource) 58 | // if !ok { 59 | // logger.DefaultLogger.Fatal(`k-stream.kFlow`, `must be the type of kSource`) 60 | // } 61 | // 62 | // kFlow.source = kSource 63 | // } 64 | // 65 | // for _, branch := range b.branches { 66 | // 67 | // // Build branch 68 | // flowBranch, err := branch.Builder.Build() 69 | // if err != nil { 70 | // logger.DefaultLogger.Fatal(`k-stream.kFlow`, err) 71 | // } 72 | // 73 | // kFlow.branches = append(kFlow.branches, &flow.Branch{ 74 | // Predicate: branch.Predicate, 75 | // Flow: flowBranch, 76 | // }) 77 | // } 78 | // 79 | // if b.sinkBuilder != nil { 80 | // sink, err := b.sinkBuilder.Build() 81 | // if err != nil { 82 | // return nil, err 83 | // } 84 | // 85 | // kSink, ok := sink.(*kSink) 86 | // if !ok { 87 | // logger.DefaultLogger.Fatal(`k-stream.kFlow`, `must be the type of kSource`) 88 | // } 89 | // 90 | // kFlow.sink = kSink 91 | // } 92 | // 93 | // return kFlow, nil 94 | //} 95 | // 96 | //func (f *KFlow) Source() source_sink.Source { 97 | // return f.source 98 | //} 99 | // 100 | //func (f *KFlow) Sink() source_sink.Sink { 101 | // return f.sink 102 | //} 103 | // 104 | //func (f *KFlow) Processors() []processors.Processor { 105 | // return f.processors 106 | //} 107 | // 108 | //func (f *KFlow) Branches() []*flow.Branch { 109 | // return f.branches 110 | //} 111 | // 112 | //func (f *KFlow) Sinkable() bool { 113 | // return f.sink != nil 114 | //} 115 | // 116 | //func (f *KFlow) OnError() errors.ErrorHandler { 117 | // return f.errorHandler 118 | //} 119 | // 120 | //func (f *KFlow) RetryCount() int { 121 | // return f.retryCount 122 | //} 123 | // 124 | //func (f *KFlow) RetryInterval() time.Duration { 125 | // return f.retryInterval 126 | //} 127 | -------------------------------------------------------------------------------- /kstream/k_source.go: -------------------------------------------------------------------------------- 1 | package kstream 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/encoding" 7 | "github.com/tryfix/kstream/kstream/topology" 8 | ) 9 | 10 | type kSource struct { 11 | Id int32 12 | keyEncoder encoding.Encoder 13 | valEncoder encoding.Encoder 14 | name string 15 | topic string 16 | } 17 | 18 | type kSourceBuilder struct { 19 | keyEncoderBuilder encoding.Builder 20 | valEncoderBuilder encoding.Builder 21 | name string 22 | topic string 23 | info map[string]string 24 | } 25 | 26 | func (b *kSourceBuilder) Build() (topology.Source, error) { 27 | return &kSource{ 28 | name: b.name, 29 | topic: b.topic, 30 | keyEncoder: b.keyEncoderBuilder(), 31 | valEncoder: b.valEncoderBuilder(), 32 | }, nil 33 | } 34 | 35 | func (b *kSourceBuilder) Name() string { 36 | return b.name 37 | } 38 | 39 | func (b *kSourceBuilder) SourceType() string { 40 | return `kafka` 41 | } 42 | 43 | func (b *kSourceBuilder) Info() map[string]string { 44 | return b.info 45 | } 46 | 47 | func (s *kSource) Name() string { 48 | return s.name 49 | } 50 | 51 | func (s *kSource) Run(ctx context.Context, kIn, vIn []byte) (kOut, vOut interface{}, err error) { 52 | return s.decodeRecord(kIn, vIn) 53 | } 54 | 55 | func (s *kSource) decodeRecord(key []byte, val []byte) (interface{}, interface{}, error) { 56 | k, err := s.keyEncoder.Decode(key) 57 | if err != nil { 58 | return nil, nil, errors.WithPrevious(err, `key decode error`) 59 | } 60 | 61 | v, err := s.valEncoder.Decode(val) 62 | if err != nil { 63 | return nil, nil, errors.WithPrevious(err, `value decode error`) 64 | } 65 | 66 | return k, v, nil 67 | } 68 | 69 | func (s *kSource) Close() {} 70 | 71 | func (s *kSource) Next() bool { 72 | return true 73 | } 74 | 75 | func (s *kSource) Type() topology.Type { 76 | return topology.TypeSource 77 | } 78 | -------------------------------------------------------------------------------- /kstream/k_table.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package kstream 9 | 10 | //import ( 11 | // "github.com/tryfix/kstream/encoding" 12 | // "github.com/tryfix/kstream/logger" 13 | // "github.com/tryfix/kstream/kstream/store" 14 | //) 15 | // 16 | //type KTable struct { 17 | // kStream 18 | // store store.Store 19 | //} 20 | // 21 | //func NewKTable(topic string, keyEncoder encoding.Builder, valEncoder encoding.Builder, options ...Option) Stream { 22 | // if keyEncoder == nil { 23 | // logger.DefaultLogger.Fatal(`k-stream.kStream`, `keyEncoder cannot be null`) 24 | // } 25 | // 26 | // if valEncoder == nil { 27 | // logger.DefaultLogger.Fatal(`k-stream.kStream`, `valEncoder cannot be null`) 28 | // } 29 | // 30 | // return newKStream(func(s string) string { return topic }, keyEncoder, valEncoder, nil, options...) 31 | //} 32 | -------------------------------------------------------------------------------- /kstream/offsets/manager.go: -------------------------------------------------------------------------------- 1 | package offsets 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Shopify/sarama" 6 | "github.com/tryfix/log" 7 | ) 8 | 9 | type Manager interface { 10 | OffsetValid(topic string, partition int32, offset int64) (isValid bool, err error) 11 | GetOffsetLatest(topic string, partition int32) (offset int64, err error) 12 | GetOffsetOldest(topic string, partition int32) (offset int64, err error) 13 | Close() error 14 | } 15 | 16 | type Config struct { 17 | Config *sarama.Config 18 | BootstrapServers []string 19 | Logger log.Logger 20 | } 21 | 22 | type manager struct { 23 | client sarama.Client 24 | } 25 | 26 | func NewManager(config *Config) Manager { 27 | logger := config.Logger.NewLog(log.Prefixed(`offset-manager`)) 28 | client, err := sarama.NewClient(config.BootstrapServers, config.Config) 29 | if err != nil { 30 | logger.Fatal(fmt.Sprintf(`cannot initiate builder deu to [%+v]`, err)) 31 | } 32 | return &manager{client: client} 33 | } 34 | 35 | func (m *manager) OffsetValid(topic string, partition int32, offset int64) (isValid bool, err error) { 36 | isValid, _, err = m.validate(topic, partition, offset) 37 | return 38 | } 39 | 40 | func (m *manager) GetOffsetLatest(topic string, partition int32) (offset int64, err error) { 41 | partitionStart, err := m.client.GetOffset(topic, partition, sarama.OffsetNewest) 42 | if err != nil { 43 | return offset, fmt.Errorf(`cannot get latest offset for %s-%d due to %w`, topic, partition, err) 44 | } 45 | 46 | return partitionStart, nil 47 | } 48 | 49 | func (m *manager) GetOffsetOldest(topic string, partition int32) (offset int64, err error) { 50 | partitionStart, err := m.client.GetOffset(topic, partition, sarama.OffsetOldest) 51 | if err != nil { 52 | return offset, fmt.Errorf(`cannot get oldes offset for %s-%d due to %w`, topic, partition, err) 53 | } 54 | 55 | return partitionStart, nil 56 | } 57 | 58 | func (m *manager) Close() error { 59 | return m.client.Close() 60 | } 61 | 62 | func offsetValid(offset, bkStart, bkEnd int64) bool { 63 | return offset >= bkStart && offset < bkEnd 64 | } 65 | 66 | func (m *manager) validate(topic string, partition int32, offset int64) (isValid bool, valid int64, err error) { 67 | 68 | startOffset, err := m.GetOffsetLatest(topic, partition) 69 | if err != nil { 70 | return false, 0, fmt.Errorf(`offset validate failed for %s-%d due to %w`, topic, partition, err) 71 | } 72 | 73 | endOffset, err := m.client.GetOffset(topic, partition, sarama.OffsetNewest) 74 | if err != nil { 75 | return false, 0, fmt.Errorf(`offset validate failed for %s-%d due to %w`, topic, partition, err) 76 | } 77 | 78 | return offsetValid(offset, startOffset, endOffset), startOffset, nil 79 | } 80 | -------------------------------------------------------------------------------- /kstream/offsets/mock_manager.go: -------------------------------------------------------------------------------- 1 | package offsets 2 | 3 | import "github.com/tryfix/kstream/admin" 4 | 5 | type MockManager struct { 6 | Topics *admin.Topics 7 | } 8 | 9 | func (m *MockManager) OffsetValid(topic string, partition int32, offset int64) (isValid bool, err error) { 10 | oldest, err := m.GetOffsetOldest(topic, partition) 11 | if err != nil { 12 | return 13 | } 14 | 15 | latest, err := m.GetOffsetLatest(topic, partition) 16 | if err != nil { 17 | return 18 | } 19 | return offsetValid(offset, oldest, latest), nil 20 | } 21 | 22 | func (m *MockManager) GetOffsetLatest(topic string, partition int32) (offset int64, err error) { 23 | tp, err := m.Topics.Topic(topic) 24 | if err != nil { 25 | return 26 | } 27 | 28 | pt, err := tp.Partition(int(partition)) 29 | if err != nil { 30 | return 31 | } 32 | 33 | return pt.Latest(), nil 34 | } 35 | 36 | func (m *MockManager) GetOffsetOldest(topic string, partition int32) (offset int64, err error) { 37 | return 0, nil 38 | } 39 | 40 | func (m *MockManager) Close() error { 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /kstream/offsets/resetter.go: -------------------------------------------------------------------------------- 1 | package offsets 2 | 3 | //import ( 4 | // "github.com/Shopify/sarama" 5 | // "github.com/tryfix/kstream/consumer" 6 | //) 7 | // 8 | //type Resetter struct { 9 | // client sarama.Client 10 | //} 11 | // 12 | //func (r *Resetter) Reset([]consumer.TopicPartition) { 13 | // //b, err := r.client.Config() 14 | // 15 | //} 16 | -------------------------------------------------------------------------------- /kstream/processor_pool.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package kstream 9 | 10 | import ( 11 | "fmt" 12 | "github.com/tryfix/errors" 13 | "github.com/tryfix/kstream/consumer" 14 | "github.com/tryfix/kstream/kstream/changelog" 15 | "github.com/tryfix/log" 16 | "github.com/tryfix/metrics" 17 | "sync" 18 | ) 19 | 20 | type processorPool struct { 21 | id string 22 | processors map[consumer.TopicPartition]*processor 23 | mu *sync.Mutex 24 | topologies map[string]*kStream 25 | logger log.Logger 26 | metrics metrics.Reporter 27 | changelog changelog.Builder 28 | } 29 | 30 | func newProcessorPool(id string, flows map[string]*kStream, changelog changelog.Builder, logger log.Logger, reporter metrics.Reporter) *processorPool { 31 | return &processorPool{ 32 | id: id, 33 | processors: make(map[consumer.TopicPartition]*processor), 34 | mu: &sync.Mutex{}, 35 | topologies: flows, 36 | logger: logger, 37 | metrics: reporter, 38 | changelog: changelog, 39 | } 40 | } 41 | 42 | func (p *processorPool) Processor(tp consumer.TopicPartition) *processor { 43 | p.mu.Lock() 44 | defer p.mu.Unlock() 45 | 46 | return p.processors[tp] 47 | } 48 | 49 | func (p *processorPool) addProcessor(tp consumer.TopicPartition) error { 50 | 51 | processorId := fmt.Sprintf(`%s_%s_%d`, p.id, tp.Topic, tp.Partition) 52 | processor, err := newProcessor(processorId, tp, p.changelog, p.logger, p.metrics) 53 | if err != nil { 54 | return errors.WithPrevious(err, `cannot start stream processor`) 55 | } 56 | 57 | processor.topologyBuilder = p.topologies[tp.Topic].topology 58 | //processor.changelogEnabled = p.topologies[tp.Topic].changelog.enabled 59 | processor.taskPoolConfig = p.topologies[tp.Topic].config.workerPool 60 | p.processors[tp] = processor 61 | 62 | return nil 63 | } 64 | 65 | func (p *processorPool) Stop() { 66 | p.mu.Lock() 67 | defer p.mu.Unlock() 68 | 69 | for _, processor := range p.processors { 70 | processor.Stop() 71 | } 72 | } 73 | 74 | func (p *processorPool) Remove(tp consumer.TopicPartition) { 75 | p.mu.Lock() 76 | defer p.mu.Unlock() 77 | 78 | p.processors[tp].Stop() 79 | logger.Info( 80 | fmt.Sprintf(`processor for %s stopped`, tp.String())) 81 | } 82 | -------------------------------------------------------------------------------- /kstream/processors/filter.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type FilterFunc func(ctx context.Context, key, value interface{}) (bool, error) 10 | 11 | type Filter struct { 12 | Id int32 13 | FilterFunc FilterFunc 14 | next bool 15 | childs []topology.Node 16 | childBuilders []topology.NodeBuilder 17 | } 18 | 19 | func (f *Filter) ChildBuilders() []topology.NodeBuilder { 20 | return f.childBuilders 21 | } 22 | 23 | func (f *Filter) Childs() []topology.Node { 24 | return f.childs 25 | } 26 | 27 | func (f *Filter) AddChildBuilder(builder topology.NodeBuilder) { 28 | f.childBuilders = append(f.childBuilders, builder) 29 | } 30 | 31 | func (f *Filter) AddChild(node topology.Node) { 32 | f.childs = append(f.childs, node) 33 | } 34 | 35 | func (f *Filter) Build() (topology.Node, error) { 36 | var childs []topology.Node 37 | //var childBuilders []node.NodeBuilder 38 | 39 | for _, childBuilder := range f.childBuilders { 40 | child, err := childBuilder.Build() 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | childs = append(childs, child) 46 | } 47 | 48 | return &Filter{ 49 | FilterFunc: f.FilterFunc, 50 | childs: childs, 51 | next: f.next, 52 | Id: f.Id, 53 | }, nil 54 | } 55 | 56 | func (f *Filter) Name() string { 57 | return `filter` 58 | } 59 | 60 | func (f *Filter) Next() bool { 61 | return f.next 62 | } 63 | 64 | func (f *Filter) Type() topology.Type { 65 | return topology.Type(`filter`) 66 | } 67 | 68 | func (f *Filter) ID() int32 { 69 | return f.Id 70 | } 71 | 72 | func (f *Filter) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, next bool, err error) { 73 | 74 | ok, err := f.FilterFunc(ctx, kIn, vIn) 75 | if err != nil { 76 | return nil, nil, false, errors.WithPrevious(err, `process error`) 77 | } 78 | 79 | if ok { 80 | for _, child := range f.childs { 81 | _, _, next, err := child.Run(ctx, kIn, vIn) 82 | if err != nil || !next { 83 | return nil, nil, false, err 84 | } 85 | } 86 | } 87 | 88 | return kIn, vIn, ok, nil 89 | } 90 | -------------------------------------------------------------------------------- /kstream/processors/filter_test.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | ) 8 | 9 | var f FilterFunc = func(ctx context.Context, key, value interface{}) (b bool, err error) { 10 | k, ok := key.(int) 11 | if !ok { 12 | return false, errors.New(`invalid type`) 13 | } 14 | return k == 1, nil 15 | } 16 | 17 | var filter = &Filter{ 18 | FilterFunc: f, 19 | } 20 | 21 | func TestFilter_Process_Should_Filter(t *testing.T) { 22 | k, v, next, err := filter.Run(context.Background(), 1, nil) 23 | if err != nil { 24 | t.Error(err) 25 | } 26 | if !next { 27 | t.Fail() 28 | } 29 | 30 | if k != 1 { 31 | t.Fail() 32 | } 33 | 34 | if v != nil { 35 | t.Fail() 36 | } 37 | } 38 | 39 | func TestFilter_Process_Should_Return_Org_Vals_On_Error(t *testing.T) { 40 | kOrg := `100` 41 | vOrg := `100` 42 | 43 | k, v, next, err := filter.Run(context.Background(), kOrg, vOrg) 44 | if err == nil { 45 | t.Fail() 46 | } 47 | 48 | if next { 49 | t.Fail() 50 | } 51 | 52 | if k != nil { 53 | t.Fail() 54 | } 55 | 56 | if v != nil { 57 | t.Fail() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kstream/processors/join/global_table_joiner.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/tryfix/errors" 7 | "github.com/tryfix/kstream/kstream/store" 8 | "github.com/tryfix/kstream/kstream/topology" 9 | ) 10 | 11 | type GlobalTableJoiner struct { 12 | //Topic string 13 | Id int32 14 | Typ Type 15 | Store string 16 | KeyMapper KeyMapper 17 | ValueMapper ValueMapper 18 | store store.Store 19 | Registry store.Registry 20 | childBuilders []topology.NodeBuilder 21 | childs []topology.Node 22 | } 23 | 24 | func (j *GlobalTableJoiner) ChildBuilders() []topology.NodeBuilder { 25 | return j.childBuilders 26 | } 27 | 28 | func (j *GlobalTableJoiner) Childs() []topology.Node { 29 | return j.childs 30 | } 31 | 32 | func (j *GlobalTableJoiner) AddChildBuilder(builder topology.NodeBuilder) { 33 | j.childBuilders = append(j.childBuilders, builder) 34 | } 35 | 36 | func (j *GlobalTableJoiner) AddChild(node topology.Node) { 37 | j.childs = append(j.childs, node) 38 | } 39 | 40 | func (j *GlobalTableJoiner) Next() bool { 41 | return true 42 | } 43 | 44 | func (j *GlobalTableJoiner) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, next bool, err error) { 45 | joined, err := j.Join(ctx, kIn, vIn) 46 | if err != nil { 47 | return 48 | } 49 | 50 | for _, child := range j.childs { 51 | _, _, next, err := child.Run(ctx, kIn, joined) 52 | if err != nil || !next { 53 | return nil, nil, false, err 54 | } 55 | } 56 | return kIn, joined, true, err 57 | } 58 | 59 | func (j *GlobalTableJoiner) Type() topology.Type { 60 | return topology.TypeJoiner 61 | } 62 | 63 | func (j *GlobalTableJoiner) Build() (topology.Node, error) { //TODO: write new build 64 | s, err := j.Registry.Store(j.Store) 65 | if err != nil || s == nil { 66 | return nil, errors.New(`store [` + j.Store + `] dose not exist`) 67 | } 68 | j.store = s 69 | 70 | var childs []topology.Node 71 | //var childBuilders []node.NodeBuilder 72 | 73 | for _, childBuilder := range j.childBuilders { 74 | child, err := childBuilder.Build() 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | childs = append(childs, child) 80 | } 81 | 82 | return &GlobalTableJoiner{ 83 | Id: j.Id, 84 | Typ: j.Typ, 85 | Store: j.Store, 86 | KeyMapper: j.KeyMapper, 87 | ValueMapper: j.ValueMapper, 88 | store: j.store, 89 | Registry: j.Registry, 90 | childs: childs, 91 | }, nil 92 | } 93 | 94 | func (j *GlobalTableJoiner) Join(ctx context.Context, key interface{}, leftVal interface{}) (joinedVal interface{}, err error) { 95 | 96 | // get key from key mapper 97 | k, err := j.KeyMapper(key, leftVal) 98 | if err != nil { 99 | return nil, errors.WithPrevious(err, `KeyMapper error`) 100 | } 101 | 102 | // get value from store 103 | rightValue, err := j.store.Get(ctx, k) 104 | if err != nil { 105 | return nil, errors.WithPrevious(err, 106 | fmt.Sprintf(`cannot get value from [%s] store`, j.Store)) 107 | } 108 | 109 | // for InnerJoin joins if right side lookup nil ignore the join 110 | if j.Typ == InnerJoin && rightValue == nil { 111 | return nil, errors.New( 112 | fmt.Sprintf(`right value lookup failed due to [key [%+v] dose not exist in %s store]`, k, j.store.Name())) 113 | } 114 | 115 | // send LeftJoin value and right value to ValueJoiner and get the joined value 116 | valJoined, err := j.ValueMapper(leftVal, rightValue) 117 | if err != nil { 118 | return nil, errors.WithPrevious(err, 119 | `value mapper failed`) 120 | } 121 | 122 | return valJoined, nil 123 | 124 | } 125 | 126 | func (j *GlobalTableJoiner) Name() string { 127 | return j.Store 128 | } 129 | 130 | func (j *GlobalTableJoiner) ID() int32 { 131 | return j.Id 132 | } 133 | -------------------------------------------------------------------------------- /kstream/processors/join/global_table_joiner_bench_test.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | ) 7 | 8 | func BenchmarkGlobalTableJoiner(b *testing.B) { 9 | ctx := context.Background() 10 | if err := testStore.Set(ctx, 200, rightRecord{ 11 | PrimaryKey: 100, 12 | ForeignKey: 200, 13 | }, 0); err != nil { 14 | b.Error(err) 15 | } 16 | 17 | b.RunParallel(func(pb *testing.PB) { 18 | for pb.Next() { 19 | j := makeJoiner(InnerJoin) 20 | 21 | _, err := j.Join(ctx, 100, leftRecord{ 22 | PrimaryKey: 100, 23 | ForeignKey: 200, 24 | }) 25 | if err != nil { 26 | b.Error(err) 27 | } 28 | } 29 | }) 30 | 31 | } 32 | -------------------------------------------------------------------------------- /kstream/processors/join/global_table_joiner_test.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/tryfix/kstream/backend" 8 | "github.com/tryfix/kstream/kstream/encoding" 9 | "github.com/tryfix/kstream/kstream/store" 10 | "reflect" 11 | "testing" 12 | ) 13 | 14 | type rightRecord struct { 15 | PrimaryKey int `json:"primary_key"` 16 | ForeignKey int `json:"foreign_key"` 17 | } 18 | 19 | type leftRecord struct { 20 | PrimaryKey int `json:"primary_key"` 21 | ForeignKey int `json:"foreign_key"` 22 | } 23 | 24 | type joinedRecord struct { 25 | left leftRecord 26 | right rightRecord 27 | } 28 | 29 | func (e rightRecord) Decode(data []byte) (interface{}, error) { 30 | v := rightRecord{} 31 | if err := json.Unmarshal(data, &v); err != nil { 32 | return nil, err 33 | } 34 | return v, nil 35 | } 36 | 37 | func (rightRecord) Encode(data interface{}) ([]byte, error) { 38 | return json.Marshal(data) 39 | } 40 | 41 | var testStore = store.NewMockStore( 42 | `test_store`, 43 | encoding.IntEncoder{}, 44 | rightRecord{}, 45 | backend.NewMockBackend(`test_backend`, 0)) 46 | 47 | func makeJoiner(typ Type) *GlobalTableJoiner { 48 | return &GlobalTableJoiner{ 49 | store: testStore, 50 | KeyMapper: func(key interface{}, value interface{}) (mappedKey interface{}, err error) { 51 | v, _ := value.(leftRecord) 52 | return v.ForeignKey, nil 53 | 54 | }, 55 | ValueMapper: func(left interface{}, right interface{}) (joined interface{}, err error) { 56 | l, _ := left.(leftRecord) 57 | r, _ := right.(rightRecord) 58 | 59 | return joinedRecord{ 60 | left: l, 61 | right: r, 62 | }, nil 63 | }, 64 | Typ: typ, 65 | } 66 | } 67 | 68 | func TestGlobalTableJoiner_Join_Inner(t *testing.T) { 69 | 70 | leftRecord := leftRecord{ 71 | PrimaryKey: 1000, 72 | ForeignKey: 2000, 73 | } 74 | 75 | rightRecord := rightRecord{ 76 | PrimaryKey: 1000, 77 | ForeignKey: 2000, 78 | } 79 | 80 | err := testStore.Set(context.Background(), 2000, rightRecord, 0) 81 | if err != nil { 82 | t.Error(err) 83 | } 84 | 85 | defer testStore.Delete(context.Background(), 2000) 86 | 87 | joiner := makeJoiner(InnerJoin) 88 | 89 | v, err := joiner.Join(context.Background(), 1000, leftRecord) 90 | if err != nil { 91 | t.Error(err) 92 | } 93 | 94 | if _, ok := v.(joinedRecord); !ok { 95 | t.Error(`invalid record`) 96 | } 97 | 98 | } 99 | 100 | func TestGlobalTableJoiner_Join_Inner_Should_Return_Error_When_Right_Null(t *testing.T) { 101 | 102 | leftRecord := leftRecord{ 103 | PrimaryKey: 1000, 104 | ForeignKey: 2000, 105 | } 106 | 107 | joiner := makeJoiner(InnerJoin) 108 | 109 | v, err := joiner.Join(context.Background(), 1000, leftRecord) 110 | if err == nil { 111 | t.Error(err) 112 | } 113 | 114 | //log.Fatal(v) 115 | 116 | if v != nil { 117 | t.Error(`joined value must null when right lookup failed`) 118 | } 119 | 120 | } 121 | 122 | func TestGlobalTableJoiner_Join_Inner_Should_Return_Error_When_Left_Null(t *testing.T) { 123 | 124 | rightRecord := rightRecord{ 125 | PrimaryKey: 1000, 126 | ForeignKey: 2000, 127 | } 128 | 129 | err := testStore.Set(context.Background(), 2000, rightRecord, 0) 130 | if err != nil { 131 | t.Error(err) 132 | } 133 | 134 | defer testStore.Delete(context.Background(), 2000) 135 | 136 | joiner := makeJoiner(InnerJoin) 137 | 138 | v, err := joiner.Join(context.Background(), 1000, nil) 139 | if err == nil { 140 | t.Error(err) 141 | } 142 | 143 | if v != nil { 144 | t.Error(`joined value must null when right lookup failed`) 145 | } 146 | 147 | } 148 | 149 | func TestGlobalTableJoiner_Join_Left(t *testing.T) { 150 | 151 | leftRecord := leftRecord{ 152 | PrimaryKey: 1000, 153 | ForeignKey: 2000, 154 | } 155 | 156 | joiner := makeJoiner(LeftJoin) 157 | 158 | v, err := joiner.Join(context.Background(), 1000, leftRecord) 159 | if err != nil { 160 | t.Error(err) 161 | return 162 | } 163 | 164 | if _, ok := v.(joinedRecord); !ok { 165 | t.Error(fmt.Sprintf(`want [joinedRecord] have [%+v]`, reflect.TypeOf(v))) 166 | } 167 | 168 | } 169 | -------------------------------------------------------------------------------- /kstream/processors/join/global_table_star_joiner.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | /*import ( 4 | "context" 5 | "fmt" 6 | "github.com/tryfix/errors" 7 | "github.com/tryfix/kstream/kstream/store" 8 | "sync" 9 | ) 10 | 11 | type GlobalTableStarJoiner struct { 12 | //Topic string 13 | Joins []GlobalTableJoiner 14 | } 15 | 16 | func (j *GlobalTableStarJoiner) Join(ctx context.Context, key interface{}, leftVal interface{}) (joinedVal interface{}, err error) { 17 | 18 | wg := &sync.WaitGroup{} 19 | 20 | for _, join := range j.Joins{ 21 | 22 | } 23 | 24 | 25 | return valJoined, nil 26 | 27 | } 28 | 29 | func (j *GlobalTableJoiner) Process(ctx context.Context, key interface{}, value interface{}) (interface{}, interface{}, error) { 30 | v, err := j.Join(ctx, key, value) 31 | return key, v, err 32 | } 33 | 34 | func (j *GlobalTableJoiner) Name() string { 35 | return j.Store 36 | } 37 | 38 | func (j *GlobalTableJoiner) Type() string { 39 | return `GlobalTableJoiner` 40 | }*/ 41 | -------------------------------------------------------------------------------- /kstream/processors/join/joiner.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/kstream/topology" 6 | ) 7 | 8 | type Type int 9 | 10 | const ( 11 | LeftJoin Type = iota 12 | InnerJoin 13 | ) 14 | 15 | type Joiner interface { 16 | topology.Node 17 | Join(ctx context.Context, key, val interface{}) (joinedVal interface{}, err error) 18 | } 19 | 20 | type KeyMapper func(key, value interface{}) (mappedKey interface{}, err error) 21 | 22 | type ValueMapper func(left, right interface{}) (joined interface{}, err error) 23 | -------------------------------------------------------------------------------- /kstream/processors/join/repartition.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/encoding" 7 | ) 8 | 9 | type Side int 10 | 11 | const ( 12 | LeftSide Side = iota + 1 13 | RightSide 14 | ) 15 | 16 | type RepartitionTopic struct { 17 | Name string 18 | Suffix string 19 | ReplicationFactor int 20 | NumOfPartitions int 21 | MinInSycReplicas int 22 | } 23 | 24 | type Repartition struct { 25 | Enable bool 26 | StreamSide Side 27 | KeyEncoder encoding.Builder 28 | ValueEncoder encoding.Builder 29 | Topic RepartitionTopic 30 | } 31 | 32 | type RepartitionOptions struct { 33 | LeftTopic func(string) string 34 | RightTopic func(string) string 35 | LeftRepartition Repartition 36 | RightRepartition Repartition 37 | } 38 | 39 | type RepartitionOption func(sink *RepartitionOptions) 40 | 41 | func RepartitionLeftStream(keyEncodingBuilder, valueEncodingBuilder encoding.Builder) RepartitionOption { 42 | return func(opts *RepartitionOptions) { 43 | opts.LeftRepartition = Repartition{ 44 | Enable: true, 45 | StreamSide: LeftSide, 46 | KeyEncoder: keyEncodingBuilder, 47 | ValueEncoder: valueEncodingBuilder, 48 | } 49 | } 50 | } 51 | 52 | func RepartitionRightStream(keyEncodingBuilder, valueEncodingBuilder encoding.Builder) RepartitionOption { 53 | return func(opts *RepartitionOptions) { 54 | opts.RightRepartition = Repartition{ 55 | Enable: true, 56 | StreamSide: RightSide, 57 | KeyEncoder: keyEncodingBuilder, 58 | ValueEncoder: valueEncodingBuilder, 59 | } 60 | } 61 | } 62 | 63 | func (iOpts *RepartitionOptions) Apply(options ...RepartitionOption) { 64 | for _, o := range options { 65 | o(iOpts) 66 | } 67 | } 68 | 69 | func (r Repartition) Validate(s Side) error { 70 | fmt.Println(r) 71 | if r.StreamSide != s { 72 | return errors.New(`stream side is not compatible`) 73 | } 74 | if r.KeyEncoder == nil { 75 | return errors.New(`repartition key encoder can not be nil`) 76 | } 77 | if r.ValueEncoder == nil { 78 | return errors.New(`repartition value encoder can not be nil`) 79 | } 80 | //if r.Topic.Name == `` { 81 | // return errors.New( `repartition topic can not be empty`) 82 | //} 83 | 84 | return nil 85 | } 86 | -------------------------------------------------------------------------------- /kstream/processors/join/side_joiner.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type SideJoiner struct { 10 | Id int32 11 | Side string 12 | LeftWindow *Window 13 | RightWindow *Window 14 | ValueMapper ValueMapper 15 | childs []topology.Node 16 | childBuilders []topology.NodeBuilder 17 | } 18 | 19 | func (sj *SideJoiner) Build() (topology.Node, error) { 20 | var childs []topology.Node 21 | //var childBuilders []node.NodeBuilder 22 | 23 | for _, childBuilder := range sj.childBuilders { 24 | child, err := childBuilder.Build() 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | childs = append(childs, child) 30 | } 31 | 32 | return &SideJoiner{ 33 | Side: sj.Side, 34 | LeftWindow: sj.LeftWindow, 35 | RightWindow: sj.RightWindow, 36 | ValueMapper: sj.ValueMapper, 37 | childs: childs, 38 | Id: sj.Id, 39 | }, nil 40 | } 41 | 42 | func (sj *SideJoiner) ChildBuilders() []topology.NodeBuilder { 43 | return sj.childBuilders 44 | } 45 | 46 | func (sj *SideJoiner) AddChildBuilder(builder topology.NodeBuilder) { 47 | sj.childBuilders = append(sj.childBuilders, builder) 48 | } 49 | 50 | func (sj *SideJoiner) Next() bool { 51 | return true 52 | } 53 | 54 | func (sj *SideJoiner) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 55 | 56 | var joinedValue interface{} 57 | 58 | switch sj.Side { 59 | case `left`: 60 | v, ok := sj.RightWindow.Read(kIn) 61 | if !ok { 62 | sj.LeftWindow.Write(kIn, vIn) 63 | return nil, nil, false, nil 64 | } 65 | joinedValue, err = sj.ValueMapper(vIn, v) 66 | if err != nil { 67 | return nil, nil, false, errors.WithPrevious(err, 68 | `value mapper failed`) 69 | } 70 | case `right`: 71 | v, ok := sj.LeftWindow.Read(kIn) 72 | if !ok { 73 | sj.RightWindow.Write(kIn, vIn) 74 | return nil, nil, false, nil 75 | } 76 | joinedValue, err = sj.ValueMapper(v, vIn) 77 | if err != nil { 78 | return nil, nil, false, errors.WithPrevious(err, 79 | `value mapper failed`) 80 | } 81 | default: 82 | return nil, nil, false, errors.New(`stream joiner sides should be only "left" and "right"`) 83 | } 84 | 85 | for _, child := range sj.childs { 86 | _, _, _, err := child.Run(ctx, kIn, joinedValue) 87 | if err != nil { 88 | return nil, nil, false, err 89 | } 90 | } 91 | 92 | return kIn, joinedValue, true, nil 93 | } 94 | 95 | func (sj *SideJoiner) Type() topology.Type { 96 | return topology.Type(sj.Side + `_side_joiner`) 97 | } 98 | 99 | func (sj *SideJoiner) Childs() []topology.Node { 100 | return sj.childs 101 | } 102 | 103 | func (sj *SideJoiner) AddChild(node topology.Node) { 104 | sj.childs = append(sj.childs, node) 105 | } 106 | 107 | func (sj *SideJoiner) ID() int32 { 108 | return sj.Id 109 | } 110 | -------------------------------------------------------------------------------- /kstream/processors/join/stream_joiner.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/kstream/topology" 6 | ) 7 | 8 | type StreamJoiner struct { 9 | Id int32 10 | childs []topology.Node 11 | childBuilders []topology.NodeBuilder 12 | } 13 | 14 | func (j *StreamJoiner) Build() (topology.Node, error) { 15 | var childs []topology.Node 16 | //var childBuilders []node.NodeBuilder 17 | 18 | for _, childBuilder := range j.childBuilders { 19 | child, err := childBuilder.Build() 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | childs = append(childs, child) 25 | } 26 | 27 | return &StreamJoiner{ 28 | childs: childs, 29 | Id: j.Id, 30 | }, nil 31 | } 32 | 33 | func (j *StreamJoiner) ChildBuilders() []topology.NodeBuilder { 34 | return j.childBuilders 35 | } 36 | 37 | func (j *StreamJoiner) AddChildBuilder(builder topology.NodeBuilder) { 38 | j.childBuilders = append(j.childBuilders, builder) 39 | } 40 | 41 | func (j *StreamJoiner) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 42 | for _, child := range j.childs { 43 | _, _, _, err := child.Run(ctx, kIn, vIn) 44 | if err != nil { 45 | return nil, nil, false, err 46 | } 47 | } 48 | return kIn, vIn, true, nil 49 | } 50 | 51 | func (j *StreamJoiner) Childs() []topology.Node { 52 | return j.childs 53 | } 54 | 55 | func (j *StreamJoiner) AddChild(node topology.Node) { 56 | j.childs = append(j.childs, node) 57 | } 58 | 59 | func (j *StreamJoiner) Next() bool { 60 | return true 61 | } 62 | 63 | func (j *StreamJoiner) Type() topology.Type { 64 | return topology.Type(`stream_joiner`) 65 | } 66 | 67 | func (j *StreamJoiner) Name() string { 68 | return `stream_joiner` 69 | } 70 | 71 | func (j *StreamJoiner) ID() int32 { 72 | return j.Id 73 | } 74 | 75 | //type StreamJoinEncoder struct { 76 | // typ reflect.Type 77 | //} 78 | // 79 | //func (s *StreamJoinEncoder) Encode(data interface{}) ([]byte, error) { 80 | // s.typ = reflect.TypeOf(data) 81 | // var buf bytes.Buffer 82 | // enc := gob.NewEncoder(&buf) 83 | // err := enc.Encode(data) 84 | // if err != nil { 85 | // return nil, err 86 | // } 87 | // return buf.Bytes(), nil 88 | //} 89 | // 90 | //func (s *StreamJoinEncoder) Decode(data []byte) (interface{}, error) { 91 | // decoded := reflect.New(s.typ) 92 | // buf := bytes.NewBuffer(data) 93 | // dec := gob.NewDecoder(buf) 94 | // err := dec.Decode(decoded) 95 | // if err != nil { 96 | // return decoded.Interface(),err 97 | // } 98 | // return decoded.Interface(), nil 99 | //} 100 | -------------------------------------------------------------------------------- /kstream/processors/join/window.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import "sync" 4 | 5 | type Window struct { 6 | l *sync.Mutex 7 | window map[interface{}]interface{} 8 | } 9 | 10 | func NewWindow() *Window { 11 | return &Window{ 12 | l: new(sync.Mutex), 13 | window: make(map[interface{}]interface{}), 14 | } 15 | } 16 | 17 | func (w *Window) Write(key, value interface{}) { 18 | w.l.Lock() 19 | defer w.l.Unlock() 20 | w.window[key] = value 21 | } 22 | 23 | func (w *Window) Read(key interface{}) (interface{}, bool) { 24 | w.l.Lock() 25 | defer w.l.Unlock() 26 | 27 | v, ok := w.window[key] 28 | return v, ok 29 | } 30 | -------------------------------------------------------------------------------- /kstream/processors/key_selector.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type SelectKeyFunc func(ctx context.Context, key, value interface{}) (kOut interface{}, err error) 10 | 11 | type KeySelector struct { 12 | Id int32 13 | SelectKeyFunc SelectKeyFunc 14 | childBuilders []topology.NodeBuilder 15 | childs []topology.Node 16 | } 17 | 18 | func (ks *KeySelector) Build() (topology.Node, error) { 19 | var childs []topology.Node 20 | //var childBuilders []node.NodeBuilder 21 | 22 | for _, childBuilder := range ks.childBuilders { 23 | child, err := childBuilder.Build() 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | childs = append(childs, child) 29 | } 30 | 31 | return &KeySelector{ 32 | SelectKeyFunc: ks.SelectKeyFunc, 33 | childs: childs, 34 | Id: ks.Id, 35 | }, nil 36 | } 37 | 38 | func (ks *KeySelector) ChildBuilders() []topology.NodeBuilder { 39 | return ks.childBuilders 40 | } 41 | 42 | func (ks *KeySelector) AddChildBuilder(builder topology.NodeBuilder) { 43 | ks.childBuilders = append(ks.childBuilders, builder) 44 | } 45 | 46 | func (ks *KeySelector) Next() bool { 47 | return true 48 | } 49 | 50 | func (ks *KeySelector) ID() int32 { 51 | return ks.Id 52 | } 53 | 54 | func (ks *KeySelector) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 55 | k, err := ks.SelectKeyFunc(ctx, kIn, vIn) 56 | if err != nil { 57 | return nil, nil, false, errors.WithPrevious(err, `error in select key function`) 58 | } 59 | 60 | for _, child := range ks.childs { 61 | _, _, next, err := child.Run(ctx, k, vIn) 62 | if err != nil || !next { 63 | return nil, nil, false, err 64 | } 65 | } 66 | 67 | return k, vIn, true, err 68 | } 69 | 70 | func (ks *KeySelector) Type() topology.Type { 71 | return topology.Type(`key_selector`) 72 | } 73 | 74 | func (ks *KeySelector) Childs() []topology.Node { 75 | return ks.childs 76 | } 77 | 78 | func (ks *KeySelector) AddChild(node topology.Node) { 79 | ks.childs = append(ks.childs, node) 80 | } 81 | -------------------------------------------------------------------------------- /kstream/processors/materializer.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/store" 7 | "github.com/tryfix/kstream/kstream/topology" 8 | ) 9 | 10 | type RecordVersionExtractor func(ctx context.Context, key, value interface{}) (version int64, err error) 11 | type RecordVersionWriter func(ctx context.Context, version int64, vIn interface{}) (vOut interface{}, err error) 12 | 13 | type Materializer struct { 14 | Topic string 15 | Id int32 16 | Store string 17 | VersionExtractor RecordVersionExtractor 18 | VersionWriter RecordVersionWriter 19 | store store.Store 20 | Registry store.Registry 21 | childBuilders []topology.NodeBuilder 22 | childs []topology.Node 23 | } 24 | 25 | func NewMaterializeBuilder(topic, store string, registry store.Registry, id int32, options ...MaterializeOption) *Materializer { 26 | builder := &Materializer{ 27 | Topic: topic, 28 | Id: id, 29 | Store: store, 30 | VersionWriter: func(ctx context.Context, version int64, vIn interface{}) (vOut interface{}, err error) { 31 | return vIn, nil 32 | }, 33 | Registry: registry, 34 | //VersionExtractor: func(key, value interface{}) (version int64, err error) { 35 | // return 1, nil 36 | //}, 37 | } 38 | 39 | builder.applyOptions(options...) 40 | return builder 41 | } 42 | 43 | func (m *Materializer) Build() (topology.Node, error) { 44 | s, err := m.Registry.Store(m.Store) 45 | if err != nil || s == nil { 46 | return nil, errors.New(`store [` + m.Store + `] dose not exist`) 47 | } 48 | m.store = s 49 | 50 | var childs []topology.Node 51 | 52 | for _, childBuilder := range m.childBuilders { 53 | child, err := childBuilder.Build() 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | childs = append(childs, child) 59 | } 60 | m.childs = childs 61 | 62 | return m, nil 63 | } 64 | 65 | func (m *Materializer) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 66 | vOut = vIn 67 | if m.VersionExtractor != nil { 68 | storeValue, err := m.store.Get(ctx, kIn) 69 | if err != nil { 70 | return nil, nil, false, errors.WithPrevious(err, `materializer store read error`) 71 | } 72 | newVersion, err := m.VersionExtractor(ctx, kIn, storeValue) 73 | if err != nil { 74 | return nil, nil, false, errors.WithPrevious(err, `materializer version extractor error`) 75 | } 76 | vOut, err = m.VersionWriter(ctx, newVersion+1, vIn) 77 | if err != nil { 78 | return nil, nil, false, errors.WithPrevious(err, `materializer version writer error`) 79 | } 80 | } 81 | 82 | err = m.store.Set(ctx, kIn, vOut, 0) 83 | if err != nil { 84 | return nil, nil, false, errors.WithPrevious(err, `materializer store write error`) 85 | } 86 | 87 | for _, child := range m.childs { 88 | _, _, next, err := child.Run(ctx, kIn, vOut) 89 | if err != nil || !next { 90 | return nil, nil, false, err 91 | } 92 | } 93 | return kIn, vOut, true, err 94 | } 95 | 96 | func (m *Materializer) ChildBuilders() []topology.NodeBuilder { 97 | return m.childBuilders 98 | } 99 | 100 | func (m *Materializer) AddChildBuilder(builder topology.NodeBuilder) { 101 | m.childBuilders = append(m.childBuilders, builder) 102 | } 103 | 104 | func (m *Materializer) Type() topology.Type { 105 | return topology.TypeMaterialize 106 | } 107 | 108 | func (m *Materializer) Childs() []topology.Node { 109 | return m.childs 110 | } 111 | 112 | func (m *Materializer) AddChild(node topology.Node) { 113 | m.childs = append(m.childs, node) 114 | } 115 | 116 | type MaterializeOption func(sink *Materializer) 117 | 118 | func (m *Materializer) applyOptions(options ...MaterializeOption) { 119 | for _, option := range options { 120 | option(m) 121 | } 122 | } 123 | 124 | func WithVersionExtractor(ve RecordVersionExtractor) MaterializeOption { 125 | return func(mat *Materializer) { 126 | mat.VersionExtractor = ve 127 | } 128 | } 129 | 130 | func WithVersionWriter(vi RecordVersionWriter) MaterializeOption { 131 | return func(mat *Materializer) { 132 | mat.VersionWriter = vi 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /kstream/processors/processor.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package processors 9 | 10 | import ( 11 | "context" 12 | "github.com/tryfix/errors" 13 | "github.com/tryfix/kstream/kstream/topology" 14 | ) 15 | 16 | type ProcessFunc func(ctx context.Context, key, value interface{}) error 17 | 18 | type Processor struct { 19 | Id int32 20 | ProcessFunc ProcessFunc 21 | childBuilders []topology.NodeBuilder 22 | childs []topology.Node 23 | } 24 | 25 | func (p *Processor) Childs() []topology.Node { 26 | return p.childs 27 | } 28 | 29 | func (p *Processor) ChildBuilders() []topology.NodeBuilder { 30 | return p.childBuilders 31 | } 32 | 33 | func (p *Processor) AddChildBuilder(builder topology.NodeBuilder) { 34 | p.childBuilders = append(p.childBuilders, builder) 35 | } 36 | 37 | func (p *Processor) AddChild(node topology.Node) { 38 | p.childs = append(p.childs, node) 39 | } 40 | 41 | func (p *Processor) Run(ctx context.Context, kIn, vIn interface{}) (interface{}, interface{}, bool, error) { 42 | err := p.ProcessFunc(ctx, kIn, vIn) 43 | if err != nil { 44 | return kIn, vIn, false, errors.WithPrevious(err, `process error`) 45 | } 46 | 47 | for _, child := range p.childs { 48 | _, _, next, err := child.Run(ctx, kIn, vIn) 49 | if err != nil || !next { 50 | return nil, nil, false, err 51 | } 52 | } 53 | 54 | return kIn, vIn, true, nil 55 | } 56 | 57 | func (p *Processor) Build() (topology.Node, error) { 58 | var childs []topology.Node 59 | //var childBuilders []node.NodeBuilder 60 | 61 | for _, childBuilder := range p.childBuilders { 62 | child, err := childBuilder.Build() 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | childs = append(childs, child) 68 | } 69 | 70 | return &Processor{ 71 | ProcessFunc: p.ProcessFunc, 72 | childs: childs, 73 | Id: p.Id, 74 | }, nil 75 | } 76 | 77 | func (p *Processor) Next() bool { 78 | return true 79 | } 80 | 81 | func (p *Processor) Name() string { 82 | return `processor` 83 | } 84 | 85 | func (p *Processor) Type() topology.Type { 86 | return topology.Type(`processor`) 87 | } 88 | 89 | func (p *Processor) ID() int32 { 90 | return p.Id 91 | } 92 | -------------------------------------------------------------------------------- /kstream/processors/processor_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 TryFix Engineering. 3 | * All rights reserved. 4 | * Authors: 5 | * Gayan Yapa (gmbyapa@gmail.com) 6 | */ 7 | 8 | package processors 9 | 10 | import ( 11 | "context" 12 | "testing" 13 | ) 14 | 15 | var p ProcessFunc = func(ctx context.Context, key interface{}, value interface{}) error { 16 | return nil 17 | } 18 | 19 | func TestProcessFunc_Process(t *testing.T) { 20 | if err := p(context.Background(), nil, nil); err != nil { 21 | t.Fail() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /kstream/processors/transformer.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type TransFunc func(ctx context.Context, key, value interface{}) (kOut, vOut interface{}, err error) 10 | 11 | type Transformer struct { 12 | Id int32 13 | TransFunc TransFunc 14 | childBuilders []topology.NodeBuilder 15 | childs []topology.Node 16 | } 17 | 18 | func (t *Transformer) Childs() []topology.Node { 19 | return t.childs 20 | } 21 | 22 | func (t *Transformer) ChildBuilders() []topology.NodeBuilder { 23 | return t.childBuilders 24 | } 25 | 26 | func (t *Transformer) Build() (topology.Node, error) { 27 | var childs []topology.Node 28 | //var childBuilders []node.NodeBuilder 29 | 30 | for _, childBuilder := range t.childBuilders { 31 | child, err := childBuilder.Build() 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | childs = append(childs, child) 37 | } 38 | 39 | return &Transformer{ 40 | TransFunc: t.TransFunc, 41 | childs: childs, 42 | Id: t.Id, 43 | }, nil 44 | } 45 | 46 | func (t *Transformer) Next() bool { 47 | return true 48 | } 49 | 50 | func (t *Transformer) ID() int32 { 51 | return t.Id 52 | } 53 | 54 | func (t *Transformer) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, next bool, err error) { 55 | k, v, err := t.TransFunc(ctx, kIn, vIn) 56 | if err != nil { 57 | return nil, nil, false, errors.WithPrevious(err, `transformer error`) 58 | } 59 | 60 | for _, child := range t.childs { 61 | _, _, next, err := child.Run(ctx, k, v) 62 | if err != nil || !next { 63 | return nil, nil, false, err 64 | } 65 | } 66 | 67 | return k, v, true, err 68 | } 69 | 70 | func (t *Transformer) Type() topology.Type { 71 | return topology.Type(`transformer`) 72 | } 73 | 74 | func (t *Transformer) Name() string { 75 | return `transformer` 76 | } 77 | 78 | func (t *Transformer) AddChildBuilder(builder topology.NodeBuilder) { 79 | t.childBuilders = append(t.childBuilders, builder) 80 | } 81 | 82 | func (t *Transformer) AddChild(node topology.Node) { 83 | t.childs = append(t.childs, node) 84 | } 85 | -------------------------------------------------------------------------------- /kstream/processors/transformer_test.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | ) 8 | 9 | var tr TransFunc = func(ctx context.Context, key interface{}, value interface{}) (interface{}, interface{}, error) { 10 | k, ok := key.(int) 11 | if !ok { 12 | return nil, nil, errors.New(`invalid key`) 13 | } 14 | 15 | v, ok := value.(string) 16 | if !ok { 17 | return nil, nil, errors.New(`invalid key`) 18 | } 19 | 20 | k *= 10 21 | v += `test` 22 | 23 | return k, v, nil 24 | } 25 | 26 | var transformer = Transformer{ 27 | TransFunc: tr, 28 | } 29 | 30 | func TestTransformer_Process_Should_Transform(t *testing.T) { 31 | 32 | k, v, _, err := transformer.Run(context.Background(), 1, `1`) 33 | if err != nil { 34 | t.Fail() 35 | } 36 | 37 | if k != 10 { 38 | t.Fail() 39 | } 40 | 41 | if v != `1test` { 42 | t.Fail() 43 | } 44 | 45 | } 46 | 47 | func TestTransformer_Process_Should_Not_Transform_On_Error(t *testing.T) { 48 | keyOrg := `10` 49 | valOrg := 10 50 | k, v, _, err := transformer.Run(context.Background(), keyOrg, valOrg) 51 | if err == nil { 52 | t.Fail() 53 | } 54 | 55 | if k != nil || v != nil { 56 | t.Fail() 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /kstream/processors/value_transformer.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/errors" 6 | "github.com/tryfix/kstream/kstream/topology" 7 | ) 8 | 9 | type ValueTransformFunc func(ctx context.Context, key, value interface{}) (vOut interface{}, err error) 10 | 11 | type ValueTransformer struct { 12 | Id int32 13 | ValueTransformFunc ValueTransformFunc 14 | childBuilders []topology.NodeBuilder 15 | childs []topology.Node 16 | } 17 | 18 | func (vt *ValueTransformer) Build() (topology.Node, error) { 19 | var childs []topology.Node 20 | //var childBuilders []node.NodeBuilder 21 | 22 | for _, childBuilder := range vt.childBuilders { 23 | child, err := childBuilder.Build() 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | childs = append(childs, child) 29 | } 30 | 31 | return &ValueTransformer{ 32 | ValueTransformFunc: vt.ValueTransformFunc, 33 | childs: childs, 34 | Id: vt.Id, 35 | }, nil 36 | } 37 | 38 | func (vt *ValueTransformer) ChildBuilders() []topology.NodeBuilder { 39 | return vt.childBuilders 40 | } 41 | 42 | func (vt *ValueTransformer) AddChildBuilder(builder topology.NodeBuilder) { 43 | vt.childBuilders = append(vt.childBuilders, builder) 44 | } 45 | 46 | func (vt *ValueTransformer) Next() bool { 47 | return true 48 | } 49 | 50 | func (vt *ValueTransformer) ID() int32 { 51 | return vt.Id 52 | } 53 | 54 | func (vt *ValueTransformer) Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) { 55 | v, err := vt.ValueTransformFunc(ctx, kIn, vIn) 56 | if err != nil { 57 | return nil, nil, false, errors.WithPrevious(err, `error in value transform function`) 58 | } 59 | 60 | for _, child := range vt.childs { 61 | _, _, next, err := child.Run(ctx, kIn, v) 62 | if err != nil || !next { 63 | return nil, nil, false, err 64 | } 65 | } 66 | 67 | return kIn, v, true, err 68 | } 69 | 70 | func (vt *ValueTransformer) Type() topology.Type { 71 | return topology.Type(`value_transformer`) 72 | } 73 | 74 | func (vt *ValueTransformer) Childs() []topology.Node { 75 | return vt.childs 76 | } 77 | 78 | func (vt *ValueTransformer) AddChild(node topology.Node) { 79 | vt.childs = append(vt.childs, node) 80 | } 81 | -------------------------------------------------------------------------------- /kstream/rebelance_handler.go: -------------------------------------------------------------------------------- 1 | package kstream 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/tryfix/kstream/consumer" 7 | "github.com/tryfix/log" 8 | ) 9 | 10 | type reBalanceHandler struct { 11 | userHandler consumer.ReBalanceHandler 12 | processors *processorPool 13 | logger log.Logger 14 | builder *StreamBuilder 15 | rebalancedCount int 16 | } 17 | 18 | func (s *reBalanceHandler) OnPartitionRevoked(ctx context.Context, revoked []consumer.TopicPartition) error { 19 | s.logger.Info(fmt.Sprintf(`partitions %v revoking...`, revoked)) 20 | defer s.logger.Info(fmt.Sprintf(`partitions %v revoked`, revoked)) 21 | for _, tp := range revoked { 22 | s.processors.Processor(tp).Stop() 23 | } 24 | 25 | if s.userHandler != nil { 26 | return s.userHandler.OnPartitionRevoked(ctx, revoked) 27 | } 28 | 29 | if err := s.startChangelogReplicas(revoked); err != nil { 30 | return err 31 | } 32 | 33 | return nil 34 | } 35 | 36 | func (s *reBalanceHandler) OnPartitionAssigned(ctx context.Context, assigned []consumer.TopicPartition) error { 37 | 38 | s.logger.Info(fmt.Sprintf(`partitions %v assigning...`, assigned)) 39 | defer s.logger.Info(fmt.Sprintf(`partitions %v assigned`, assigned)) 40 | 41 | for _, tp := range assigned { 42 | if err := s.processors.addProcessor(tp); err != nil { 43 | return err 44 | } 45 | 46 | if err := s.processors.Processor(tp).boot(); err != nil { 47 | return err 48 | } 49 | 50 | if err := s.stopChangelogReplicas(assigned); err != nil { 51 | return err 52 | } 53 | } 54 | 55 | s.logger.Info(`streams assigned`) 56 | if s.userHandler != nil { 57 | return s.userHandler.OnPartitionAssigned(ctx, assigned) 58 | } 59 | s.rebalancedCount++ 60 | return nil 61 | } 62 | 63 | func (s *reBalanceHandler) stopChangelogReplicas(allocated []consumer.TopicPartition) error { 64 | if len(allocated) > 0 && s.rebalancedCount > 0 { 65 | for _, tp := range allocated { 66 | // stop started replicas 67 | if s.builder.streams[tp.Topic].config.changelog.replicated { 68 | if err := s.builder.changelogReplicaManager.StopReplicas([]consumer.TopicPartition{ 69 | {Topic: s.builder.streams[tp.Topic].config.changelog.topic.Name, Partition: tp.Partition}, 70 | }); err != nil { 71 | return err 72 | } 73 | } 74 | } 75 | } 76 | 77 | return nil 78 | } 79 | 80 | func (s *reBalanceHandler) startChangelogReplicas(allocated []consumer.TopicPartition) error { 81 | if len(allocated) > 0 { 82 | for _, tp := range allocated { 83 | // stop started replicas 84 | if s.builder.streams[tp.Topic].config.changelog.replicated { 85 | if err := s.builder.changelogReplicaManager.StartReplicas([]consumer.TopicPartition{ 86 | {Topic: s.builder.streams[tp.Topic].config.changelog.topic.Name, Partition: tp.Partition}, 87 | }); err != nil { 88 | return err 89 | } 90 | } 91 | } 92 | } 93 | 94 | return nil 95 | } 96 | -------------------------------------------------------------------------------- /kstream/store/hash_index.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "reflect" 7 | "sync" 8 | ) 9 | 10 | type KeyMapper func(key, val interface{}) (idx string) 11 | 12 | var UnknownIndex = errors.New(`index does not exist`) 13 | 14 | type stringHashIndex struct { 15 | indexes map[string]map[interface{}]bool // indexKey:recordKey:bool 16 | mapper KeyMapper 17 | mu *sync.Mutex 18 | name string 19 | } 20 | 21 | func NewStringHashIndex(name string, mapper KeyMapper) Index { 22 | return &stringHashIndex{ 23 | indexes: make(map[string]map[interface{}]bool), 24 | mapper: mapper, 25 | mu: new(sync.Mutex), 26 | name: name, 27 | } 28 | } 29 | 30 | func (s *stringHashIndex) String() string { 31 | return s.name 32 | } 33 | 34 | func (s *stringHashIndex) Write(key, value interface{}) error { 35 | s.mu.Lock() 36 | defer s.mu.Unlock() 37 | hashKey := s.mapper(key, value) 38 | _, ok := s.indexes[hashKey] 39 | if !ok { 40 | s.indexes[hashKey] = make(map[interface{}]bool) 41 | } 42 | s.indexes[hashKey][key] = true 43 | 44 | return nil 45 | } 46 | 47 | func (s *stringHashIndex) ValueIndexed(index, value interface{}) (bool, error) { 48 | hStr, ok := index.(string) 49 | if !ok { 50 | return false, errors.New(fmt.Sprintf(`unsupported hash type expected [string] given [%s]`, reflect.TypeOf(index))) 51 | } 52 | _, ok = s.indexes[hStr] 53 | if !ok { 54 | return false, nil 55 | } 56 | 57 | _, ok = s.indexes[hStr][value] 58 | return ok, nil 59 | } 60 | 61 | func (s *stringHashIndex) Hash(key, val interface{}) (hash interface{}) { 62 | return s.mapper(key, val) 63 | } 64 | 65 | func (s *stringHashIndex) WriteHash(hash, key interface{}) error { 66 | hStr, ok := hash.(string) 67 | if !ok { 68 | return errors.New(fmt.Sprintf(`unsupported hash type expected [string] given [%s]`, reflect.TypeOf(hash))) 69 | } 70 | _, ok = s.indexes[hStr] 71 | if !ok { 72 | s.indexes[hStr] = make(map[interface{}]bool) 73 | } 74 | s.indexes[hStr][key] = true 75 | 76 | return nil 77 | } 78 | 79 | func (s *stringHashIndex) Delete(key, value interface{}) error { 80 | s.mu.Lock() 81 | defer s.mu.Unlock() 82 | hashKey := s.mapper(key, value) 83 | if _, ok := s.indexes[hashKey]; !ok { 84 | return fmt.Errorf(`hashKey [%s] does not exist for [%s]`, hashKey, s.name) 85 | } 86 | 87 | delete(s.indexes[hashKey], key) 88 | return nil 89 | } 90 | 91 | func (s *stringHashIndex) Keys() []interface{} { 92 | s.mu.Lock() 93 | defer s.mu.Unlock() 94 | var keys []interface{} 95 | 96 | for key := range s.indexes { 97 | keys = append(keys, key) 98 | } 99 | 100 | return keys 101 | } 102 | 103 | func (s *stringHashIndex) Values() map[interface{}][]interface{} { 104 | s.mu.Lock() 105 | defer s.mu.Unlock() 106 | values := make(map[interface{}][]interface{}) 107 | 108 | for idx, keys := range s.indexes { 109 | for key := range keys { 110 | values[idx] = append(values[idx], key) 111 | } 112 | } 113 | 114 | return values 115 | } 116 | 117 | func (s *stringHashIndex) Read(key interface{}) ([]interface{}, error) { 118 | s.mu.Lock() 119 | defer s.mu.Unlock() 120 | var indexes []interface{} 121 | index, ok := s.indexes[key.(string)] 122 | if !ok { 123 | return nil, UnknownIndex 124 | } 125 | for k := range index { 126 | indexes = append(indexes, k) 127 | } 128 | 129 | return indexes, nil 130 | } 131 | -------------------------------------------------------------------------------- /kstream/store/hash_index_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | "reflect" 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | func TestNewIndex(t *testing.T) { 11 | var mapper func(key, val interface{}) (idx string) 12 | index := NewStringHashIndex(`foo`, mapper) 13 | type args struct { 14 | name string 15 | mapper KeyMapper 16 | } 17 | tests := []struct { 18 | name string 19 | args args 20 | want Index 21 | }{ 22 | {name: `new`, args: struct { 23 | name string 24 | mapper KeyMapper 25 | }{name: `foo`, mapper: mapper}, want: index}, 26 | } 27 | for _, tt := range tests { 28 | t.Run(tt.name, func(t *testing.T) { 29 | if got := NewStringHashIndex(tt.args.name, tt.args.mapper); !reflect.DeepEqual(got, tt.want) { 30 | t.Errorf("NewStringHashIndex() = %#v, want %#v", got, tt.want) 31 | } 32 | }) 33 | } 34 | } 35 | 36 | func TestHashIndex_Delete(t *testing.T) { 37 | index := NewStringHashIndex(`foo9`, func(key, val interface{}) (idx string) { 38 | return strings.Split(val.(string), `,`)[0] 39 | }) 40 | 41 | if err := index.Write(`100`, `111,222`); err != nil { 42 | t.Error(err) 43 | } 44 | 45 | if err := index.Delete(`100`, `111,222`); err != nil { 46 | t.Error(err) 47 | } 48 | 49 | data, err := index.Read(`111`) 50 | if err != nil { 51 | t.Error(err) 52 | } 53 | 54 | if len(data) > 0 { 55 | t.Fail() 56 | } 57 | } 58 | 59 | func TestHashIndex_Name(t *testing.T) { 60 | tests := []struct { 61 | name string 62 | idx Index 63 | want string 64 | }{ 65 | { 66 | name: `name`, 67 | idx: NewStringHashIndex(`foo`, nil), 68 | want: `foo`}, 69 | } 70 | for _, tt := range tests { 71 | t.Run(tt.name, func(t *testing.T) { 72 | if got := tt.idx.String(); got != tt.want { 73 | t.Errorf("Name() = %v, want %v", got, tt.want) 74 | } 75 | }) 76 | } 77 | } 78 | 79 | func TestHashIndex_Read(t *testing.T) { 80 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 81 | return strings.Split(val.(string), `,`)[0] 82 | }) 83 | 84 | if err := index.Write(`100`, `111,222`); err != nil { 85 | t.Error(err) 86 | } 87 | 88 | data, err := index.Read(`111`) 89 | if err != nil { 90 | t.Error(err) 91 | } 92 | 93 | if !reflect.DeepEqual(data, []interface{}{`100`}) { 94 | t.Errorf("expect []interface{}{`100`} have %#v", data) 95 | } 96 | } 97 | 98 | func TestHashIndex_Write(t *testing.T) { 99 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 100 | return strings.Split(val.(string), `,`)[0] 101 | }) 102 | 103 | if err := index.Write(`100`, `111,222`); err != nil { 104 | t.Error(err) 105 | } 106 | 107 | data, err := index.Read(`111`) 108 | if err != nil { 109 | t.Error(err) 110 | } 111 | 112 | if !reflect.DeepEqual(data, []interface{}{`100`}) { 113 | t.Errorf("expect []interface{}{`100`} have %#v", data) 114 | } 115 | } 116 | 117 | func TestHashIndex_WriteUuidKey(t *testing.T) { 118 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 119 | return strings.Split(val.(string), `,`)[0] 120 | }) 121 | 122 | uid := uuid.New() 123 | 124 | if err := index.Write(uid, `111,222`); err != nil { 125 | t.Error(err) 126 | } 127 | 128 | data, err := index.Read(`111`) 129 | if err != nil { 130 | t.Error(err) 131 | } 132 | 133 | if !reflect.DeepEqual(data, []interface{}{uid}) { 134 | t.Errorf("expect []interface{}{`100`} have %#v", data) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /kstream/store/index.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | type index struct { 9 | indexes map[interface{}]map[interface{}]bool // indexKey:recordKey:bool 10 | mapper func(key, val interface{}) (idx interface{}) 11 | mu *sync.Mutex 12 | name string 13 | } 14 | 15 | func NewIndex(name string, mapper func(key, val interface{}) (idx interface{})) Index { 16 | return &index{ 17 | indexes: make(map[interface{}]map[interface{}]bool), 18 | mapper: mapper, 19 | mu: new(sync.Mutex), 20 | name: name, 21 | } 22 | } 23 | 24 | func (s *index) String() string { 25 | return s.name 26 | } 27 | 28 | func (s *index) Write(key, value interface{}) error { 29 | s.mu.Lock() 30 | defer s.mu.Unlock() 31 | hashKey := s.mapper(key, value) 32 | _, ok := s.indexes[hashKey] 33 | if !ok { 34 | s.indexes[hashKey] = make(map[interface{}]bool) 35 | } 36 | s.indexes[hashKey][key] = true 37 | 38 | return nil 39 | } 40 | 41 | func (s *index) ValueIndexed(index, value interface{}) (bool, error) { 42 | _, ok := s.indexes[index] 43 | if !ok { 44 | return false, nil 45 | } 46 | 47 | _, ok = s.indexes[index][value] 48 | return ok, nil 49 | } 50 | 51 | func (s *index) Hash(key, val interface{}) (hash interface{}) { 52 | return s.mapper(key, val) 53 | } 54 | 55 | func (s *index) WriteHash(hash, key interface{}) error { 56 | 57 | _, ok := s.indexes[hash] 58 | if !ok { 59 | s.indexes[hash] = make(map[interface{}]bool) 60 | } 61 | s.indexes[hash][key] = true 62 | 63 | return nil 64 | } 65 | 66 | func (s *index) Delete(key, value interface{}) error { 67 | s.mu.Lock() 68 | defer s.mu.Unlock() 69 | hashKey := s.mapper(key, value) 70 | if _, ok := s.indexes[hashKey]; !ok { 71 | return fmt.Errorf(`hashKey [%s] does not exist for [%s]`, hashKey, s.name) 72 | } 73 | 74 | delete(s.indexes[hashKey], key) 75 | return nil 76 | } 77 | 78 | func (s *index) Keys() []interface{} { 79 | s.mu.Lock() 80 | defer s.mu.Unlock() 81 | var keys []interface{} 82 | 83 | for key := range s.indexes { 84 | keys = append(keys, key) 85 | } 86 | 87 | return keys 88 | } 89 | 90 | func (s *index) Values() map[interface{}][]interface{} { 91 | s.mu.Lock() 92 | defer s.mu.Unlock() 93 | values := make(map[interface{}][]interface{}) 94 | 95 | for idx, keys := range s.indexes { 96 | for key := range keys { 97 | values[idx] = append(values[idx], key) 98 | } 99 | } 100 | 101 | return values 102 | } 103 | 104 | func (s *index) Read(key interface{}) ([]interface{}, error) { 105 | s.mu.Lock() 106 | defer s.mu.Unlock() 107 | var indexes []interface{} 108 | index, ok := s.indexes[key] 109 | if !ok { 110 | return nil, UnknownIndex 111 | } 112 | for k := range index { 113 | indexes = append(indexes, k) 114 | } 115 | 116 | return indexes, nil 117 | } 118 | -------------------------------------------------------------------------------- /kstream/store/indexed_bench_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | func BenchmarkHashIndex_Write(b *testing.B) { 11 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 12 | return strings.Split(val.(string), `,`)[0] 13 | }) 14 | b.ResetTimer() 15 | b.RunParallel(func(pb *testing.PB) { 16 | for pb.Next() { 17 | if err := index.Write(strconv.Itoa(rand.Intn(100000)+1), `111,222`); err != nil { 18 | b.Error(err) 19 | } 20 | } 21 | 22 | }) 23 | } 24 | 25 | func BenchmarkHashIndex_Read(b *testing.B) { 26 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 27 | return strings.Split(val.(string), `,`)[0] 28 | }) 29 | 30 | for i := 1; i < 1000; i++ { 31 | if err := index.Write(strconv.Itoa(i), `111,222`); err != nil { 32 | b.Error(err) 33 | } 34 | } 35 | b.ResetTimer() 36 | b.RunParallel(func(pb *testing.PB) { 37 | for pb.Next() { 38 | if _, err := index.Read(`111`); err != nil { 39 | b.Error(err) 40 | } 41 | } 42 | 43 | }) 44 | } 45 | -------------------------------------------------------------------------------- /kstream/store/indexed_store_bench_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/kstream/backend/memory" 7 | "github.com/tryfix/kstream/kstream/encoding" 8 | "github.com/tryfix/log" 9 | "github.com/tryfix/metrics" 10 | "math/rand" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "testing" 15 | ) 16 | 17 | func BenchmarkIndexedStore_Set(b *testing.B) { 18 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 19 | return strings.Split(val.(string), `,`)[0] 20 | }) 21 | 22 | i := &indexedStore{ 23 | Store: NewMockStore(`foo`, encoding.StringEncoder{}, encoding.StringEncoder{}, backend.NewMockBackend(`foo`, 0)), 24 | indexes: map[string]Index{`foo`: index}, 25 | mu: new(sync.Mutex), 26 | } 27 | b.ResetTimer() 28 | b.RunParallel(func(pb *testing.PB) { 29 | for pb.Next() { 30 | if err := i.Set(context.Background(), strconv.Itoa(rand.Intn(99999)+1), `111,222`, 0); err != nil { 31 | b.Error(err) 32 | } 33 | } 34 | }) 35 | } 36 | 37 | func BenchmarkIndexedStore_GetIndexedRecords(b *testing.B) { 38 | indexedStore := NewMockStore(`foo`, encoding.StringEncoder{}, encoding.StringEncoder{}, backend.NewMockBackend(`foo`, 0)) 39 | for i := 1; i < 99909; i++ { 40 | compKey := strconv.Itoa(rand.Intn(4)+1) + `:` + strconv.Itoa(i) 41 | if err := indexedStore.Set(context.Background(), strconv.Itoa(i), compKey, 0); err != nil { 42 | b.Error(err) 43 | } 44 | } 45 | 46 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 47 | return strings.Split(val.(string), `:`)[0] 48 | }) 49 | 50 | conf := memory.NewConfig() 51 | conf.Logger = log.NewNoopLogger() 52 | conf.MetricsReporter = metrics.NoopReporter() 53 | st, err := NewIndexedStore( 54 | `foo`, 55 | encoding.StringEncoder{}, 56 | encoding.StringEncoder{}, 57 | []Index{index}, 58 | WithBackend(memory.NewMemoryBackend(conf))) 59 | if err != nil { 60 | b.Error(err) 61 | } 62 | 63 | for i := 1; i < 99909; i++ { 64 | compKey := strconv.Itoa(rand.Intn(4)+1) + `:` + strconv.Itoa(i) 65 | if err := st.Set(context.Background(), strconv.Itoa(i), compKey, 0); err != nil { 66 | b.Error(err) 67 | } 68 | } 69 | 70 | b.ResetTimer() 71 | b.RunParallel(func(pb *testing.PB) { 72 | for pb.Next() { 73 | if _, err := st.GetIndexedRecords(context.Background(), `foo`, strconv.Itoa(rand.Intn(4)+1)); err != nil { 74 | b.Error(err) 75 | } 76 | } 77 | }) 78 | } 79 | -------------------------------------------------------------------------------- /kstream/store/indexed_store_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/kstream/kstream/encoding" 7 | "reflect" 8 | "strings" 9 | "sync" 10 | "testing" 11 | ) 12 | 13 | func Test_indexedStore_Delete(t *testing.T) { 14 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 15 | return strings.Split(val.(string), `,`)[0] 16 | }) 17 | 18 | i := &indexedStore{ 19 | Store: NewMockStore(`foo`, encoding.StringEncoder{}, encoding.StringEncoder{}, backend.NewMockBackend(`foo`, 0)), 20 | indexes: map[string]Index{`foo`: index}, 21 | mu: new(sync.Mutex), 22 | } 23 | 24 | if err := i.Set(context.Background(), `200`, `111,222`, 0); err != nil { 25 | t.Error(err) 26 | } 27 | 28 | if err := i.Set(context.Background(), `300`, `111,333`, 0); err != nil { 29 | t.Error(err) 30 | } 31 | 32 | if err := i.Delete(context.Background(), `200`); err != nil { 33 | t.Error(err) 34 | } 35 | 36 | data, err := index.Read(`111`) 37 | if err != nil { 38 | t.Error(err) 39 | } 40 | 41 | if !reflect.DeepEqual(data, []interface{}{`300`}) { 42 | t.Errorf(`want []string{300}, have %#v`, data) 43 | } 44 | } 45 | 46 | func Test_indexedStore_Set(t *testing.T) { 47 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 48 | return strings.Split(val.(string), `,`)[0] 49 | }) 50 | 51 | i := &indexedStore{ 52 | Store: NewMockStore(`foo`, encoding.StringEncoder{}, encoding.StringEncoder{}, backend.NewMockBackend(`foo`, 0)), 53 | indexes: map[string]Index{`foo`: index}, 54 | mu: new(sync.Mutex), 55 | } 56 | 57 | if err := i.Set(context.Background(), `200`, `111,222`, 0); err != nil { 58 | t.Error(err) 59 | } 60 | 61 | if err := i.Set(context.Background(), `300`, `111,333`, 0); err != nil { 62 | t.Error(err) 63 | } 64 | 65 | data, err := index.Read(`111`) 66 | if err != nil { 67 | t.Error(data) 68 | } 69 | 70 | var want []interface{} 71 | for _, r := range data { 72 | if r.(string) == `200` || r.(string) == `300` { 73 | want = append(want, r) 74 | } 75 | } 76 | 77 | if len(want) < 2 { 78 | t.Fail() 79 | } 80 | } 81 | 82 | func TestIndexedStore_GetIndexedRecords(t *testing.T) { 83 | index := NewStringHashIndex(`foo`, func(key, val interface{}) (idx string) { 84 | return strings.Split(val.(string), `,`)[0] 85 | }) 86 | 87 | i := &indexedStore{ 88 | Store: NewMockStore(`foo`, encoding.StringEncoder{}, encoding.StringEncoder{}, backend.NewMockBackend(`foo`, 0)), 89 | indexes: map[string]Index{`foo`: index}, 90 | mu: new(sync.Mutex), 91 | } 92 | 93 | if err := i.Set(context.Background(), `200`, `111,222`, 0); err != nil { 94 | t.Error(err) 95 | } 96 | if err := i.Set(context.Background(), `300`, `111,333`, 0); err != nil { 97 | t.Error(err) 98 | } 99 | if err := i.Set(context.Background(), `400`, `222,333`, 0); err != nil { 100 | t.Error(err) 101 | } 102 | 103 | data, err := i.GetIndexedRecords(context.Background(), `foo`, `111`) 104 | if err != nil { 105 | t.Error(data) 106 | } 107 | 108 | var want []interface{} 109 | for _, r := range data { 110 | if r.(string) == `111,222` || r.(string) == `111,333` { 111 | want = append(want, r) 112 | } 113 | } 114 | 115 | if len(want) < 2 { 116 | t.Fail() 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /kstream/store/iterator.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/tryfix/kstream/backend" 5 | "github.com/tryfix/kstream/kstream/encoding" 6 | ) 7 | 8 | type Iterator interface { 9 | SeekToFirst() 10 | SeekToLast() 11 | Seek(key interface{}) error 12 | Next() 13 | Prev() 14 | Close() 15 | Key() (interface{}, error) 16 | Value() (interface{}, error) 17 | Valid() bool 18 | Error() error 19 | } 20 | 21 | type iterator struct { 22 | iterator backend.Iterator 23 | keyEncoder encoding.Encoder 24 | valEncoder encoding.Encoder 25 | } 26 | 27 | func (i *iterator) SeekToFirst() { 28 | i.iterator.SeekToFirst() 29 | } 30 | 31 | func (i *iterator) SeekToLast() { 32 | i.iterator.SeekToLast() 33 | } 34 | 35 | func (i *iterator) Seek(key interface{}) error { 36 | k, err := i.keyEncoder.Encode(key) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | i.iterator.Seek(k) 42 | return nil 43 | } 44 | 45 | func (i *iterator) Next() { 46 | i.iterator.Next() 47 | } 48 | 49 | func (i *iterator) Prev() { 50 | i.iterator.Prev() 51 | } 52 | 53 | func (i *iterator) Close() { 54 | i.iterator.Close() 55 | } 56 | 57 | func (i *iterator) Key() (interface{}, error) { 58 | k := i.iterator.Key() 59 | if len(k) < 1 { 60 | return nil, nil 61 | } 62 | 63 | return i.keyEncoder.Decode(k) 64 | } 65 | 66 | func (i *iterator) Value() (interface{}, error) { 67 | v := i.iterator.Value() 68 | if len(v) < 1 { 69 | return nil, nil 70 | } 71 | 72 | return i.valEncoder.Decode(v) 73 | } 74 | 75 | func (i *iterator) Valid() bool { 76 | return i.iterator.Valid() 77 | } 78 | 79 | func (i *iterator) Error() error { 80 | return i.iterator.Error() 81 | } 82 | -------------------------------------------------------------------------------- /kstream/store/meta.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Shopify/sarama" 6 | "github.com/tryfix/log" 7 | "time" 8 | ) 9 | 10 | type Meta struct { 11 | client sarama.Client 12 | hostMappings map[string]string 13 | group string 14 | } 15 | 16 | func NewMata(c sarama.Client, group string) *Meta { 17 | m := &Meta{ 18 | client: c, 19 | hostMappings: make(map[string]string), 20 | group: group, 21 | } 22 | 23 | go m.runRefresher() 24 | return m 25 | } 26 | 27 | func (m *Meta) GetMeta(tp string) string { 28 | return m.hostMappings[tp] 29 | } 30 | 31 | func (m *Meta) Refresh() { 32 | 33 | b, err := m.client.Coordinator(m.group) 34 | if err != nil { 35 | log.Fatal(err) 36 | } 37 | 38 | res, err := b.DescribeGroups(&sarama.DescribeGroupsRequest{ 39 | Groups: []string{m.group}, 40 | }) 41 | if err != nil { 42 | log.Fatal(err) 43 | } 44 | 45 | for _, group := range res.Groups { 46 | if group.GroupId == m.group { 47 | for _, member := range group.Members { 48 | 49 | // get host port through following function 50 | //mt , _ := member.GetMemberMetadata() 51 | //mt.UserData 52 | 53 | ass, err := member.GetMemberAssignment() 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | 58 | for topic, partitions := range ass.Topics { 59 | for _, p := range partitions { 60 | m.hostMappings[fmt.Sprintf(`%s_%d`, topic, p)] = member.ClientHost 61 | } 62 | } 63 | } 64 | } 65 | } 66 | 67 | log.Info(fmt.Sprintf(`host meta refreshed %+v`, m.hostMappings)) 68 | } 69 | 70 | func (m *Meta) runRefresher() { 71 | t := time.NewTicker(30 * time.Second) 72 | 73 | for range t.C { 74 | m.Refresh() 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /kstream/store/mock_store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/kstream/kstream/encoding" 7 | "time" 8 | ) 9 | 10 | type MockStore struct { 11 | name string 12 | backend backend.Backend 13 | kEncoder encoding.Encoder 14 | vEncoder encoding.Encoder 15 | } 16 | 17 | type MockRecord struct { 18 | ctx context.Context 19 | key interface{} 20 | value interface{} 21 | expiry time.Duration 22 | } 23 | 24 | func NewMockStore(name string, kEncode encoding.Encoder, vEncoder encoding.Encoder, backend backend.Backend, records ...MockRecord) Store { 25 | store := &MockStore{ 26 | name: name, 27 | kEncoder: kEncode, 28 | vEncoder: vEncoder, 29 | backend: backend, 30 | } 31 | 32 | for _, record := range records { 33 | if err := store.Set(record.ctx, record.key, record.value, record.expiry); err != nil { 34 | panic(err) 35 | } 36 | } 37 | 38 | return store 39 | } 40 | 41 | func (s *MockStore) Name() string { 42 | return s.name 43 | } 44 | 45 | func (s *MockStore) Backend() backend.Backend { 46 | return s.backend 47 | } 48 | 49 | func (s *MockStore) KeyEncoder() encoding.Encoder { 50 | return s.kEncoder 51 | } 52 | 53 | func (s *MockStore) ValEncoder() encoding.Encoder { 54 | return s.vEncoder 55 | } 56 | 57 | func (s *MockStore) Set(ctx context.Context, key interface{}, value interface{}, expiry time.Duration) error { 58 | k, err := s.kEncoder.Encode(key) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | v, err := s.ValEncoder().Encode(value) 64 | if err != nil { 65 | return err 66 | } 67 | return s.backend.Set(k, v, expiry) 68 | } 69 | 70 | func (s *MockStore) Get(ctx context.Context, key interface{}) (value interface{}, err error) { 71 | k, err := s.kEncoder.Encode(key) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | v, err := s.backend.Get(k) 77 | if err != nil { 78 | return nil, err 79 | } 80 | 81 | if v == nil { 82 | return nil, nil 83 | } 84 | 85 | val, err := s.vEncoder.Decode(v) 86 | if err != nil { 87 | return nil, err 88 | } 89 | 90 | return val, nil 91 | } 92 | 93 | func (*MockStore) GetRange(ctx context.Context, fromKey interface{}, toKey interface{}) (map[interface{}]interface{}, error) { 94 | panic("implement me") 95 | } 96 | 97 | func (*MockStore) GetAll(ctx context.Context) (Iterator, error) { 98 | panic("implement me") 99 | } 100 | 101 | func (s *MockStore) Delete(ctx context.Context, key interface{}) error { 102 | k, err := s.kEncoder.Encode(key) 103 | if err != nil { 104 | return err 105 | } 106 | 107 | return s.backend.Delete(k) 108 | } 109 | 110 | func (s *MockStore) String() string { 111 | return s.name 112 | } 113 | -------------------------------------------------------------------------------- /kstream/store/option.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/tryfix/kstream/backend" 5 | "github.com/tryfix/kstream/kstream/changelog" 6 | "github.com/tryfix/log" 7 | "time" 8 | ) 9 | 10 | type storeOptions struct { 11 | changelog changelog.Changelog 12 | changelogEnable bool 13 | backend backend.Backend 14 | backendBuilder backend.Builder 15 | expiry time.Duration 16 | buffered bool 17 | bufferSize int 18 | compactionEnabled bool 19 | logger log.Logger 20 | } 21 | 22 | type Options func(config *storeOptions) 23 | 24 | func (c *storeOptions) apply(options ...Options) { 25 | c.logger = log.NewNoopLogger() 26 | for _, opt := range options { 27 | opt(c) 28 | } 29 | } 30 | 31 | func ChangelogEnabled() Options { 32 | return func(config *storeOptions) { 33 | config.changelogEnable = true 34 | } 35 | } 36 | 37 | func WithChangelog(changelog changelog.Changelog) Options { 38 | return func(config *storeOptions) { 39 | config.changelog = changelog 40 | config.changelogEnable = true 41 | } 42 | } 43 | 44 | func Compacated() Options { 45 | return func(options *storeOptions) { 46 | options.compactionEnabled = true 47 | } 48 | } 49 | 50 | func Expire(d time.Duration) Options { 51 | return func(options *storeOptions) { 52 | options.expiry = d 53 | } 54 | } 55 | 56 | func Buffered(size int) Options { 57 | return func(options *storeOptions) { 58 | options.buffered = true 59 | options.bufferSize = size 60 | } 61 | } 62 | 63 | func WithBackend(backend backend.Backend) Options { 64 | return func(config *storeOptions) { 65 | config.backend = backend 66 | } 67 | } 68 | 69 | func WithBackendBuilder(builder backend.Builder) Options { 70 | return func(config *storeOptions) { 71 | config.backendBuilder = builder 72 | } 73 | } 74 | 75 | func WithLogger(logger log.Logger) Options { 76 | return func(config *storeOptions) { 77 | config.logger = logger 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /kstream/store/recoverable_store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/tryfix/errors" 7 | "github.com/tryfix/kstream/kstream/changelog" 8 | "github.com/tryfix/log" 9 | ) 10 | 11 | type RecoverableStore interface { 12 | Store 13 | Recover(ctx context.Context) error 14 | } 15 | 16 | type recoverableStore struct { 17 | Store 18 | logger log.Logger 19 | recovering bool 20 | topic string 21 | changelog changelog.Changelog 22 | } 23 | 24 | func (s *recoverableStore) Recover(ctx context.Context) error { 25 | 26 | s.logger.Info( 27 | fmt.Sprintf(`recovering from store [%s]...`, s.Name())) 28 | var c int 29 | records, err := s.changelog.ReadAll(ctx) 30 | if err != nil { 31 | return errors.WithPrevious(err, 32 | fmt.Sprintf(`cannot recover data for store [%s]`, s.Name())) 33 | } 34 | 35 | for _, record := range records { 36 | if err := s.Backend().Set(record.Key, record.Value, 0); err != nil { 37 | return err 38 | } 39 | } 40 | 41 | s.logger.Info( 42 | fmt.Sprintf(`[%d] records recovered for store [%s]...`, c, s.Name())) 43 | 44 | return nil 45 | } 46 | 47 | func (s *recoverableStore) String() string { 48 | return fmt.Sprintf("Backend: %s\nChangelogInfo: %s", s.Backend().Name(), s.changelog) 49 | } 50 | -------------------------------------------------------------------------------- /kstream/store/rpc.go: -------------------------------------------------------------------------------- 1 | package store 2 | -------------------------------------------------------------------------------- /kstream/store/state_store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/tryfix/errors" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/kstream/data" 7 | "github.com/tryfix/kstream/kstream/encoding" 8 | ) 9 | 10 | type stateStore struct { 11 | name string 12 | options *storeOptions 13 | backend backend.Backend 14 | keyEncoder encoding.Encoder 15 | valEncoder encoding.Encoder 16 | } 17 | 18 | func NewStateStore(name string, keyEncoder encoding.Encoder, valEncoder encoding.Encoder, options ...Options) StateStore { 19 | 20 | configs := storeOptions{} 21 | configs.apply(options...) 22 | 23 | return &stateStore{ 24 | name: name, 25 | keyEncoder: keyEncoder, 26 | valEncoder: valEncoder, 27 | } 28 | } 29 | 30 | func (s *stateStore) Name() string { 31 | return s.name 32 | } 33 | 34 | func (s *stateStore) Set(key interface{}, value interface{}) error { 35 | k, err := s.keyEncoder.Encode(key) 36 | if err != nil { 37 | return errors.WithPrevious(err, `key encode err `) 38 | } 39 | 40 | v, err := s.valEncoder.Encode(value) 41 | if err != nil { 42 | return errors.WithPrevious(err, `key encode err `) 43 | } 44 | 45 | return s.backend.Set(k, v, 0) 46 | } 47 | 48 | func (s *stateStore) Get(key interface{}) (value interface{}, err error) { 49 | k, err := s.keyEncoder.Encode(key) 50 | if err != nil { 51 | return nil, errors.WithPrevious(err, `key encode err `) 52 | } 53 | 54 | byts, err := s.options.backend.Get(k) 55 | if err != nil { 56 | return nil, errors.WithPrevious(err, `key encode err `) 57 | } 58 | 59 | v, err := s.valEncoder.Decode(byts) 60 | if err != nil { 61 | return nil, errors.WithPrevious(err, `value decode err `) 62 | } 63 | 64 | return v, nil 65 | } 66 | 67 | func (s *stateStore) GetAll() ([]*data.Record, error) { 68 | panic("implement me") 69 | } 70 | -------------------------------------------------------------------------------- /kstream/store/store_bench_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "testing" 7 | ) 8 | 9 | func BenchmarkDefaultStore_Set(b *testing.B) { 10 | 11 | store := makeTestStore(0) 12 | ctx := context.Background() 13 | b.RunParallel(func(pb *testing.PB) { 14 | for pb.Next() { 15 | if err := store.Set(ctx, rand.Intn(10000000), `100`, 0); err != nil { 16 | b.Error(err) 17 | } 18 | } 19 | }) 20 | } 21 | 22 | func BenchmarkDefaultStore_Get(b *testing.B) { 23 | store := makeTestStore(0) 24 | ctx := context.Background() 25 | 26 | for i := 1; i < 999999; i++ { 27 | if err := store.Set(ctx, rand.Intn(i), `100`, 0); err != nil { 28 | b.Error(err) 29 | } 30 | } 31 | 32 | b.ResetTimer() 33 | b.StartTimer() 34 | b.RunParallel(func(pb *testing.PB) { 35 | for pb.Next() { 36 | if _, err := store.Get(ctx, rand.Intn(999998)+1); err != nil { 37 | b.Error(err) 38 | } 39 | } 40 | }) 41 | } 42 | 43 | func BenchmarkDefaultStore_Delete(b *testing.B) { 44 | store := makeTestStore(0) 45 | ctx := context.Background() 46 | 47 | for i := 1; i <= 999999; i++ { 48 | if err := store.Set(ctx, rand.Intn(i), `100`, 0); err != nil { 49 | b.Error(err) 50 | } 51 | } 52 | 53 | b.ResetTimer() 54 | b.StartTimer() 55 | b.RunParallel(func(pb *testing.PB) { 56 | for pb.Next() { 57 | if err := store.Delete(ctx, rand.Intn(999998)+1); err != nil { 58 | b.Error(err) 59 | } 60 | } 61 | }) 62 | } 63 | -------------------------------------------------------------------------------- /kstream/store/store_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/backend" 6 | "github.com/tryfix/kstream/kstream/encoding" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func makeTestStore(expiry time.Duration) Store { 12 | return &store{ 13 | backend: backend.NewMockBackend(`test_backend`, expiry), 14 | name: `test_store`, 15 | keyEncoder: encoding.IntEncoder{}, 16 | valEncoder: encoding.StringEncoder{}, 17 | } 18 | } 19 | 20 | func TestDefaultStore_Get(t *testing.T) { 21 | ctx := context.Background() 22 | st := makeTestStore(0) 23 | testValue := `test_value` 24 | err := st.Set(ctx, 100, testValue, 0) 25 | if err != nil { 26 | t.Error(err) 27 | return 28 | } 29 | 30 | v, err := st.Get(ctx, 100) 31 | if err != nil { 32 | t.Error(err) 33 | } 34 | 35 | if v != testValue { 36 | t.Fail() 37 | } 38 | } 39 | 40 | func TestDefaultStore_Get_Should_return_Nul_For_Invalid_Key(t *testing.T) { 41 | ctx := context.Background() 42 | st := makeTestStore(0) 43 | testValue := `test_value` 44 | testKey := 100 45 | err := st.Set(ctx, testKey, testValue, 0) 46 | if err != nil { 47 | t.Error(err) 48 | return 49 | } 50 | 51 | v, err := st.Get(ctx, 200) 52 | if err != nil { 53 | t.Error(err) 54 | } 55 | 56 | if v != nil { 57 | t.Fail() 58 | } 59 | } 60 | 61 | func TestDefaultStore_Set(t *testing.T) { 62 | ctx := context.Background() 63 | st := makeTestStore(0) 64 | testValue := `test_value` 65 | testKey := 100 66 | err := st.Set(ctx, testKey, testValue, 0) 67 | if err != nil { 68 | t.Error(err) 69 | return 70 | } 71 | 72 | v, err := st.Get(ctx, testKey) 73 | if err != nil { 74 | t.Error(err) 75 | } 76 | 77 | if v != testValue { 78 | t.Fail() 79 | } 80 | } 81 | 82 | func TestDefaultStore_Delete(t *testing.T) { 83 | ctx := context.Background() 84 | st := makeTestStore(0) 85 | testValue := `test_value` 86 | testKey := 100 87 | err := st.Set(ctx, testKey, testValue, 0) 88 | if err != nil { 89 | t.Error(err) 90 | return 91 | } 92 | 93 | v, err := st.Get(ctx, testKey) 94 | if err != nil { 95 | t.Error(err) 96 | } 97 | 98 | if v != testValue { 99 | t.Fail() 100 | } 101 | } 102 | 103 | func TestDefaultStore_Set_Record_Expiry(t *testing.T) { 104 | ctx := context.Background() 105 | st := makeTestStore(0) 106 | testValue := `test_value` 107 | testKey := 100 108 | expiry := 100 * time.Millisecond 109 | err := st.Set(ctx, testKey, testValue, expiry) 110 | if err != nil { 111 | t.Error(err) 112 | return 113 | } 114 | 115 | time.Sleep(expiry * 2) 116 | 117 | v, err := st.Get(ctx, testKey) 118 | if err != nil { 119 | t.Error(err) 120 | } 121 | 122 | if v != nil { 123 | t.Fail() 124 | } 125 | } 126 | 127 | func TestDefaultStore_Set_Store_Expiry(t *testing.T) { 128 | ctx := context.Background() 129 | expiry := 100 * time.Millisecond 130 | st := makeTestStore(expiry) 131 | testValue := `test_value` 132 | testKey := 100 133 | err := st.Set(ctx, testKey, testValue, 0) 134 | if err != nil { 135 | t.Error(err) 136 | return 137 | } 138 | 139 | time.Sleep(expiry * 2) 140 | 141 | v, err := st.Get(ctx, testKey) 142 | if err != nil { 143 | t.Error(err) 144 | } 145 | 146 | if v != nil { 147 | t.Fail() 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /kstream/store/uuid_hash_index.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "github.com/google/uuid" 6 | "github.com/tryfix/errors" 7 | "reflect" 8 | "sync" 9 | ) 10 | 11 | type uuidHashIndex struct { 12 | indexes map[uuid.UUID]map[interface{}]bool // indexKey:recordKey:bool 13 | mapper func(key, val interface{}) (idx uuid.UUID) 14 | mu *sync.Mutex 15 | name string 16 | } 17 | 18 | func NewUuidHashIndex(name string, mapper func(key, val interface{}) (idx uuid.UUID)) Index { 19 | return &uuidHashIndex{ 20 | indexes: make(map[uuid.UUID]map[interface{}]bool), 21 | mapper: mapper, 22 | mu: new(sync.Mutex), 23 | name: name, 24 | } 25 | } 26 | 27 | func (s *uuidHashIndex) String() string { 28 | return s.name 29 | } 30 | 31 | func (s *uuidHashIndex) Write(key, value interface{}) error { 32 | s.mu.Lock() 33 | defer s.mu.Unlock() 34 | hashKey := s.mapper(key, value) 35 | _, ok := s.indexes[hashKey] 36 | if !ok { 37 | s.indexes[hashKey] = make(map[interface{}]bool) 38 | } 39 | s.indexes[hashKey][key] = true 40 | return nil 41 | } 42 | 43 | func (s *uuidHashIndex) ValueIndexed(index, value interface{}) (bool, error) { 44 | hStr, ok := index.(uuid.UUID) 45 | if !ok { 46 | return false, errors.New(fmt.Sprintf(`unsupported hash type expected [string] given [%s]`, reflect.TypeOf(index))) 47 | } 48 | _, ok = s.indexes[hStr] 49 | if !ok { 50 | return false, nil 51 | } 52 | 53 | _, ok = s.indexes[hStr][value] 54 | return ok, nil 55 | } 56 | 57 | func (s *uuidHashIndex) Hash(key, val interface{}) (hash interface{}) { 58 | return s.mapper(key, val) 59 | } 60 | 61 | func (s *uuidHashIndex) WriteHash(hash, key interface{}) error { 62 | hStr, ok := hash.(uuid.UUID) 63 | if !ok { 64 | return errors.New(fmt.Sprintf(`unsupported hash type expected [string] given [%s]`, reflect.TypeOf(hash))) 65 | } 66 | _, ok = s.indexes[hStr] 67 | if !ok { 68 | s.indexes[hStr] = make(map[interface{}]bool) 69 | } 70 | s.indexes[hStr][key] = true 71 | 72 | return nil 73 | } 74 | 75 | func (s *uuidHashIndex) Delete(key, value interface{}) error { 76 | s.mu.Lock() 77 | defer s.mu.Unlock() 78 | hashKey := s.mapper(key, value) 79 | if _, ok := s.indexes[hashKey]; !ok { 80 | return fmt.Errorf(`hashKey %s does not exist for %s`, hashKey, s.name) 81 | } 82 | 83 | delete(s.indexes[hashKey], key) 84 | return nil 85 | } 86 | 87 | func (s *uuidHashIndex) Keys() []interface{} { 88 | s.mu.Lock() 89 | defer s.mu.Unlock() 90 | var keys []interface{} 91 | 92 | for key := range s.indexes { 93 | keys = append(keys, key) 94 | } 95 | 96 | return keys 97 | } 98 | 99 | func (s *uuidHashIndex) Values() map[interface{}][]interface{} { 100 | s.mu.Lock() 101 | defer s.mu.Unlock() 102 | values := make(map[interface{}][]interface{}) 103 | 104 | for idx, keys := range s.indexes { 105 | for key := range keys { 106 | values[idx] = append(values[idx], key) 107 | } 108 | } 109 | 110 | return values 111 | } 112 | 113 | func (s *uuidHashIndex) Read(key interface{}) ([]interface{}, error) { 114 | s.mu.Lock() 115 | defer s.mu.Unlock() 116 | var indexes []interface{} 117 | index, ok := s.indexes[key.(uuid.UUID)] 118 | if !ok { 119 | return nil, UnknownIndex 120 | } 121 | for k := range index { 122 | indexes = append(indexes, k) 123 | } 124 | 125 | return indexes, nil 126 | } 127 | -------------------------------------------------------------------------------- /kstream/store/uuid_hash_index_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "github.com/google/uuid" 6 | "reflect" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func TestNewUUIDHashIndexIndex(t *testing.T) { 12 | var mapper func(key, val interface{}) (idx uuid.UUID) 13 | index := NewUuidHashIndex(`foo`, mapper) 14 | type args struct { 15 | name string 16 | mapper func(key, val interface{}) (idx uuid.UUID) 17 | } 18 | tests := []struct { 19 | name string 20 | args args 21 | want Index 22 | }{ 23 | {name: `new`, args: struct { 24 | name string 25 | mapper func(key, val interface{}) (idx uuid.UUID) 26 | }{name: `foo`, mapper: mapper}, want: index}, 27 | } 28 | for _, tt := range tests { 29 | t.Run(tt.name, func(t *testing.T) { 30 | if got := NewUuidHashIndex(tt.args.name, tt.args.mapper); !reflect.DeepEqual(got, tt.want) { 31 | t.Errorf("NewStringHashIndex() = %#v, want %#v", got, tt.want) 32 | } 33 | }) 34 | } 35 | } 36 | 37 | func TestUUIDHashIndex_Delete(t *testing.T) { 38 | uid1 := uuid.New() 39 | uid2 := uuid.New() 40 | index := NewUuidHashIndex(`foo`, func(key, val interface{}) (idx uuid.UUID) { 41 | uid, _ := uuid.Parse(strings.Split(val.(string), `,`)[0]) 42 | return uid 43 | }) 44 | 45 | if err := index.Write(uid1, fmt.Sprintf(`%s,%s`, uid1.String(), uid2.String())); err != nil { 46 | t.Error(err) 47 | } 48 | 49 | if err := index.Delete(uid1, fmt.Sprintf(`%s,%s`, uid1.String(), uid2.String())); err != nil { 50 | t.Error(err) 51 | } 52 | 53 | data, err := index.Read(uid1) 54 | if err != nil { 55 | t.Error(err) 56 | } 57 | 58 | if len(data) > 0 { 59 | t.Fail() 60 | } 61 | } 62 | 63 | func TestUUIDHashIndex_Name(t *testing.T) { 64 | tests := []struct { 65 | name string 66 | idx Index 67 | want string 68 | }{ 69 | { 70 | name: `name`, 71 | idx: NewUuidHashIndex(`foo`, nil), 72 | want: `foo`}, 73 | } 74 | for _, tt := range tests { 75 | t.Run(tt.name, func(t *testing.T) { 76 | if got := tt.idx.String(); got != tt.want { 77 | t.Errorf("Name() = %v, want %v", got, tt.want) 78 | } 79 | }) 80 | } 81 | } 82 | 83 | func TestUUIDHashIndex_Read(t *testing.T) { 84 | uid1 := uuid.New() 85 | uid2 := uuid.New() 86 | index := NewUuidHashIndex(`foo`, func(key, val interface{}) (idx uuid.UUID) { 87 | uid, _ := uuid.Parse(strings.Split(val.(string), `,`)[0]) 88 | return uid 89 | }) 90 | 91 | if err := index.Write(uid1, fmt.Sprintf(`%s,%s`, uid1.String(), uid2.String())); err != nil { 92 | t.Error(err) 93 | } 94 | 95 | data, err := index.Read(uid1) 96 | if err != nil { 97 | t.Error(err) 98 | } 99 | 100 | if !reflect.DeepEqual(data, []interface{}{uid1}) { 101 | t.Errorf("expect []interface{}{`100`} have %#v", data) 102 | } 103 | } 104 | 105 | func TestUUIDHashIndex_Write(t *testing.T) { 106 | uid1 := uuid.New() 107 | uid2 := uuid.New() 108 | index := NewUuidHashIndex(`foo`, func(key, val interface{}) (idx uuid.UUID) { 109 | uid, _ := uuid.Parse(strings.Split(val.(string), `,`)[0]) 110 | return uid 111 | }) 112 | 113 | if err := index.Write(uid1, fmt.Sprintf(`%s,%s`, uid1.String(), uid2.String())); err != nil { 114 | t.Error(err) 115 | } 116 | 117 | data, err := index.Read(uid1) 118 | if err != nil { 119 | t.Error(err) 120 | } 121 | 122 | if !reflect.DeepEqual(data, []interface{}{uid1}) { 123 | t.Errorf("expect []interface{}{`100`} have %#v", data) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /kstream/topic_builder.go: -------------------------------------------------------------------------------- 1 | package kstream 2 | 3 | import ( 4 | "fmt" 5 | "github.com/tryfix/kstream/admin" 6 | "github.com/tryfix/log" 7 | ) 8 | 9 | type topicBuilder struct { 10 | topics map[string]*admin.Topic 11 | admin admin.KafkaAdmin 12 | logger log.Logger 13 | } 14 | 15 | func (tb *topicBuilder) apply(config *admin.Topic) { 16 | if _, ok := tb.topics[config.Name]; ok { 17 | tb.logger.Fatal(fmt.Sprintf(`topic [%s] already exst`, config.Name)) 18 | } 19 | 20 | tb.topics[config.Name] = config 21 | } 22 | 23 | func (tb *topicBuilder) build() { 24 | if len(tb.topics) < 1 { 25 | return 26 | } 27 | 28 | tb.logger.Info(`creating changelog topics...`) 29 | 30 | if err := tb.admin.CreateTopics(tb.topics); err != nil { 31 | tb.logger.Fatal(err) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /kstream/topology/node.go: -------------------------------------------------------------------------------- 1 | package topology 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | type Type string 8 | 9 | const TypeSource Type = `source` 10 | const TypeSink Type = `sink` 11 | const TypeBranch Type = `branch` 12 | const TypeThrough Type = `through` 13 | const TypeJoiner Type = `joiner` 14 | const TypeMaterialize Type = `materializer` 15 | 16 | type Node interface { 17 | Run(ctx context.Context, kIn, vIn interface{}) (kOut, vOut interface{}, cont bool, err error) 18 | Type() Type 19 | Childs() []Node 20 | AddChild(node Node) 21 | } 22 | 23 | type NodeBuilder interface { 24 | Build() (Node, error) 25 | Type() Type 26 | ChildBuilders() []NodeBuilder 27 | AddChildBuilder(builder NodeBuilder) 28 | } 29 | -------------------------------------------------------------------------------- /kstream/topology/source.go: -------------------------------------------------------------------------------- 1 | package topology 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | type SourceBuilder interface { 8 | Name() string 9 | Info() map[string]string 10 | SourceType() string 11 | Build() (Source, error) 12 | } 13 | 14 | type SinkBuilder interface { 15 | NodeBuilder 16 | Name() string 17 | ID() int32 18 | Info() map[string]string 19 | SinkType() string 20 | } 21 | 22 | type Source interface { 23 | Run(ctx context.Context, kIn, vIn []byte) (kOut, vOut interface{}, err error) 24 | Name() string 25 | Close() 26 | } 27 | 28 | type Sink interface { 29 | Node 30 | Name() string 31 | Close() error 32 | } 33 | -------------------------------------------------------------------------------- /kstream/topology/topology.go: -------------------------------------------------------------------------------- 1 | package topology 2 | 3 | import "context" 4 | 5 | type TopologyBuilder struct { 6 | Source SourceBuilder 7 | SourceNodeBuilder NodeBuilder 8 | } 9 | 10 | func (tb TopologyBuilder) Build() (Topology, error) { 11 | 12 | topology := Topology{} 13 | 14 | sourceNode, err := tb.SourceNodeBuilder.Build() 15 | if err != nil { 16 | return topology, err 17 | } 18 | 19 | source, err := tb.Source.Build() 20 | if err != nil { 21 | return topology, err 22 | } 23 | 24 | topology.SourceNode = sourceNode 25 | topology.Source = source 26 | 27 | return topology, nil 28 | } 29 | 30 | type Topology struct { 31 | Source Source 32 | SourceNode Node 33 | } 34 | 35 | func (t Topology) Run(ctx context.Context, kIn, vIn []byte) (kOut, vOut interface{}, err error) { 36 | kOut, vOut, err = t.Source.Run(ctx, kIn, vIn) 37 | if err != nil { 38 | return nil, nil, err 39 | } 40 | 41 | _, _, _, err = t.SourceNode.Run(ctx, kOut, vOut) 42 | if err != nil { 43 | return nil, nil, err 44 | } 45 | 46 | return 47 | } 48 | -------------------------------------------------------------------------------- /kstream/window/sliding.go: -------------------------------------------------------------------------------- 1 | package window 2 | 3 | import "github.com/tryfix/kstream/kstream/context" 4 | 5 | type Window interface { 6 | Store(ctx context.Context, key, value interface{}) error 7 | Get(ctx context.Context, key interface{}) (value interface{}, err error) 8 | } 9 | 10 | //type slidingWindow 11 | -------------------------------------------------------------------------------- /kstream/worker_pool/pool_bench_test.go: -------------------------------------------------------------------------------- 1 | package worker_pool 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "testing" 8 | ) 9 | 10 | var pOrdered *Pool 11 | var pRandom *Pool 12 | 13 | func init() { 14 | //builder := node.NewMockTopologyBuilder(10, 10) 15 | // 16 | //pl := NewPool(`1`, builder, metrics.NoopReporter(), &PoolConfig{ 17 | // NumOfWorkers: 1000, 18 | // Order: OrderByKey, 19 | // WorkerBufferSize: 10000, 20 | //}) 21 | //pOrdered = pl 22 | // 23 | //plRd := NewPool(`2`, builder, metrics.NoopReporter(), &PoolConfig{ 24 | // NumOfWorkers: 1000, 25 | // Order: OrderRandom, 26 | // WorkerBufferSize: 10000, 27 | //}) 28 | //pRandom = plRd 29 | } 30 | 31 | func BenchmarkPool_Run_Random(b *testing.B) { 32 | 33 | b.RunParallel(func(pb *testing.PB) { 34 | k := rand.Intn(8) + 1 35 | for pb.Next() { 36 | pRandom.Run(context.Background(), []byte(`200`), []byte(fmt.Sprintf(`br_%d`, k)), func() {}) 37 | } 38 | }) 39 | 40 | } 41 | 42 | func BenchmarkPool_Run_Ordered(b *testing.B) { 43 | 44 | b.RunParallel(func(pb *testing.PB) { 45 | k := rand.Intn(8) + 1 46 | for pb.Next() { 47 | pOrdered.Run(context.Background(), []byte(`200`), []byte(fmt.Sprintf(`br_%d`, k)), func() {}) 48 | } 49 | }) 50 | 51 | } 52 | 53 | //func BenchmarkPool_Run_Ordered(b *testing.B) { 54 | // 55 | // var count int 56 | // 57 | // f := new(flow.MockFlow) 58 | // var processor processors.ProcessFunc = func(ctx context.Context, key interface{}, value interface{}) error { 59 | // 60 | // v, ok := key.(int) 61 | // if !ok { 62 | // b.Error(fmt.Sprintf(`expected [int] have [%+v]`, reflect.TypeOf(key))) 63 | // } 64 | // 65 | // count = v 66 | // 67 | // return nil 68 | // } 69 | // 70 | // f.ProcessorsM = append(f.ProcessorsM, processor) 71 | // executor := flow.NewPlowExecutor(f, logger.DefaultLogger) 72 | // 73 | // p := NewPool(executor, &encoding.IntEncoder{}, &encoding.IntEncoder{}, &PoolConfig{ 74 | // NumOfWorkers: 20, 75 | // Order: OrderByKey, 76 | // WorkerBufferSize: 10, 77 | // }) 78 | // 79 | // b.RunParallel(func(pb *testing.PB) { 80 | // for pb.Next() { 81 | // done := p.Run(context.Background(), []byte(string(`100`)), []byte(string(`100`))) 82 | // <-done 83 | // } 84 | // }) 85 | // 86 | // if count != 100 { 87 | // b.Fail() 88 | // } 89 | // 90 | //} 91 | -------------------------------------------------------------------------------- /kstream/worker_pool/pool_test.go: -------------------------------------------------------------------------------- 1 | package worker_pool 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestPool_Run(t *testing.T) { 8 | 9 | } 10 | -------------------------------------------------------------------------------- /producer/config.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "github.com/Shopify/sarama" 5 | "github.com/tryfix/log" 6 | "github.com/tryfix/metrics" 7 | ) 8 | 9 | type Config struct { 10 | Id string 11 | *sarama.Config 12 | Pool struct { 13 | NumOfWorkers int 14 | } 15 | BootstrapServers []string 16 | RequiredAcks RequiredAcks 17 | Partitioner Partitioner 18 | Logger log.Logger 19 | MetricsReporter metrics.Reporter 20 | } 21 | 22 | func NewConfig() *Config { 23 | c := new(Config) 24 | c.setDefaults() 25 | return c 26 | } 27 | 28 | func (c *Config) validate() error { 29 | if err := c.Config.Validate(); err != nil { 30 | return err 31 | } 32 | return nil 33 | } 34 | 35 | func (c *Config) setDefaults() { 36 | c.Config = sarama.NewConfig() 37 | c.Producer.RequiredAcks = sarama.RequiredAcks(WaitForAll) 38 | c.Producer.Return.Errors = true 39 | c.Producer.Return.Successes = true 40 | c.Logger = log.NewNoopLogger() 41 | //c.Config.Version = sarama.V2_3_0_0 42 | c.MetricsReporter = metrics.NoopReporter() 43 | 44 | c.Producer.Compression = sarama.CompressionSnappy 45 | 46 | if c.Partitioner == Manual { 47 | c.Producer.Partitioner = sarama.NewManualPartitioner 48 | } 49 | 50 | if c.Partitioner == HashBased { 51 | c.Producer.Partitioner = sarama.NewHashPartitioner 52 | } 53 | 54 | if c.Partitioner == Random { 55 | c.Producer.Partitioner = sarama.NewRandomPartitioner 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /producer/mock-producer.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/admin" 6 | "github.com/tryfix/kstream/data" 7 | "hash" 8 | "hash/fnv" 9 | ) 10 | 11 | type MockStreamProducer struct { 12 | hasher hash.Hash32 13 | topics *admin.Topics 14 | } 15 | 16 | func NewMockProducer(topics *admin.Topics) *MockStreamProducer { 17 | return &MockStreamProducer{ 18 | hasher: fnv.New32a(), 19 | topics: topics, 20 | } 21 | } 22 | 23 | func (msp *MockStreamProducer) Produce(ctx context.Context, message *data.Record) (partition int32, offset int64, err error) { 24 | msp.hasher.Reset() 25 | _, err = msp.hasher.Write(message.Key) 26 | if err != nil { 27 | return partition, offset, err 28 | } 29 | 30 | topic, err := msp.topics.Topic(message.Topic) 31 | if err != nil { 32 | return partition, offset, err 33 | } 34 | 35 | p := int64(msp.hasher.Sum32()) % int64(len(topic.Partitions())) 36 | pt, err := topic.Partition(int(p)) 37 | if err != nil { 38 | return 39 | } 40 | 41 | message.Partition = int32(p) 42 | if err = pt.Append(message); err != nil { 43 | return 44 | } 45 | 46 | return int32(p), message.Offset, nil 47 | } 48 | 49 | func (msp *MockStreamProducer) ProduceBatch(ctx context.Context, messages []*data.Record) error { 50 | for _, msg := range messages { 51 | if _, _, err := msp.Produce(ctx, msg); err != nil { 52 | return err 53 | } 54 | } 55 | return nil 56 | } 57 | 58 | func (msp *MockStreamProducer) Close() error { 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /producer/pool.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/data" 6 | "hash" 7 | "hash/fnv" 8 | ) 9 | 10 | type Pool struct { 11 | NumOfWorkers int64 12 | producers map[int64]Producer 13 | hasher hash.Hash32 14 | } 15 | 16 | func NewPool(NumOfWorkers int, builder Builder) (*Pool, error) { 17 | 18 | producers := make(map[int64]Producer) 19 | 20 | pool := &Pool{ 21 | NumOfWorkers: int64(NumOfWorkers), 22 | producers: producers, 23 | hasher: fnv.New32a(), 24 | } 25 | 26 | for i := 0; i < NumOfWorkers; i++ { 27 | p, err := builder(new(Config)) 28 | if err != nil { 29 | return nil, err 30 | } 31 | pool.producers[int64(i)] = p 32 | } 33 | 34 | return pool, nil 35 | } 36 | 37 | func (p *Pool) Produce(ctx context.Context, message *data.Record) (partition int32, offset int64, err error) { 38 | producer, err := p.producer(message.Key) 39 | if err != nil { 40 | return 0, 0, err 41 | } 42 | 43 | return producer.Produce(ctx, message) 44 | } 45 | 46 | func (p *Pool) ProduceBatch(ctx context.Context, messages []*data.Record) error { 47 | producer, err := p.producer(messages[0].Key) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | return producer.ProduceBatch(ctx, messages) 53 | } 54 | 55 | func (p *Pool) Close() error { 56 | for _, producer := range p.producers { 57 | if err := producer.Close(); err != nil { 58 | println(err) 59 | } 60 | } 61 | 62 | return nil 63 | } 64 | 65 | func (p *Pool) producer(key []byte) (Producer, error) { 66 | p.hasher.Reset() 67 | _, err := p.hasher.Write(key) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | w := int64(p.hasher.Sum32()) % p.NumOfWorkers 73 | 74 | return p.producers[w], nil 75 | } 76 | -------------------------------------------------------------------------------- /producer/producer_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "context" 5 | "github.com/tryfix/kstream/admin" 6 | "github.com/tryfix/kstream/data" 7 | "testing" 8 | ) 9 | 10 | func setupMockTopics(t *testing.T, topics *admin.Topics) { 11 | if err := topics.AddTopic(&admin.MockTopic{ 12 | Name: "", 13 | Meta: &admin.Topic{ 14 | Name: "testing", 15 | NumPartitions: 2, 16 | }, 17 | }); err != nil { 18 | t.Error(err) 19 | } 20 | 21 | } 22 | 23 | func TestMockProducer_Produce(t *testing.T) { 24 | topics := admin.NewMockTopics() 25 | setupMockTopics(t, topics) 26 | producer := NewMockProducer(topics) 27 | msg := &data.Record{ 28 | Key: []byte(`100`), 29 | Value: []byte(`100`), 30 | Partition: 1, 31 | } 32 | 33 | p, o, err := producer.Produce(context.Background(), msg) 34 | if err != nil { 35 | t.Error(err) 36 | } 37 | 38 | if p != 0 || o != 0 { 39 | t.Fail() 40 | } 41 | } 42 | 43 | func TestMockProducer_ProduceBatch(t *testing.T) { 44 | topics := admin.NewMockTopics() 45 | setupMockTopics(t, topics) 46 | producer := NewMockProducer(topics) 47 | 48 | msg1 := &data.Record{ 49 | Key: []byte(`100`), 50 | Value: []byte(`100`), 51 | Partition: 1, 52 | } 53 | 54 | msg2 := *msg1 55 | msg2.Key = []byte(`100`) 56 | 57 | err := producer.ProduceBatch(context.Background(), []*data.Record{msg1, &msg2}) 58 | if err != nil { 59 | t.Error(err) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /util/struct_to_map.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "runtime" 7 | "sort" 8 | ) 9 | 10 | type strToMap struct { 11 | normalized map[string]string 12 | } 13 | 14 | func StrToMap(path string, v interface{}) [][]string { 15 | m := strToMap{normalized: make(map[string]string)} 16 | return m.sortAndConvert(path, v) 17 | } 18 | 19 | func (n *strToMap) sortAndConvert(parent string, v interface{}) [][]string { 20 | n.split(parent, reflect.ValueOf(v)) 21 | 22 | return n.sort() 23 | } 24 | 25 | func (n *strToMap) sort() [][]string { 26 | var keyVals [][]string 27 | keys := make([]string, 0, len(n.normalized)) 28 | for k := range n.normalized { 29 | keys = append(keys, k) 30 | } 31 | sort.Strings(keys) 32 | 33 | for _, k := range keys { 34 | keyVals = append(keyVals, []string{k, n.normalized[k]}) 35 | } 36 | return keyVals 37 | } 38 | 39 | func (n *strToMap) split(parent string, v reflect.Value) { 40 | if v.Kind() == reflect.Ptr { 41 | v = v.Elem() 42 | } 43 | 44 | if v.IsZero() { 45 | return 46 | } 47 | 48 | types := v.Type() 49 | 50 | for i := 0; i < v.NumField(); i++ { 51 | 52 | f := v.Field(i) 53 | //println(f.Kind()) 54 | path := types.Field(i).Name 55 | 56 | if !f.CanInterface() { 57 | continue 58 | } 59 | 60 | if parent != `` { 61 | path = parent + `.` + path 62 | } 63 | 64 | if f.Kind() == reflect.Interface && f.IsNil() { 65 | n.normalized[path] = `` 66 | continue 67 | } 68 | 69 | if !(f.Kind() == reflect.Interface && f.IsNil()) && f.CanInterface() && f.NumMethod() > 0 { 70 | emty := reflect.Value{} 71 | if vv := f.MethodByName(`String`); vv != emty { 72 | n.normalized[path] = vv.Call(nil)[0].String() 73 | continue 74 | } else if vv := f.MethodByName(`Name`); vv != emty { 75 | n.normalized[path] = vv.Call(nil)[0].String() 76 | continue 77 | } 78 | } 79 | 80 | if f.Kind() == reflect.Ptr && f.CanInterface() { 81 | n.split(path, f) 82 | continue 83 | } 84 | 85 | if f.Kind() == reflect.Struct { 86 | n.split(path, f) 87 | continue 88 | } 89 | 90 | if path != `` { 91 | n.normalized[path] = n.toString(f) 92 | } 93 | } 94 | } 95 | 96 | func (n *strToMap) toString(value reflect.Value) string { 97 | switch value.Kind() { 98 | case reflect.Map, reflect.Array, reflect.Slice: 99 | return fmt.Sprintf(`%+v`, value) 100 | case reflect.Int, reflect.Int32, reflect.Int16, reflect.Int64: 101 | return fmt.Sprintf(`%d`, value.Int()) 102 | case reflect.Bool: 103 | return fmt.Sprint(value.Bool()) 104 | case reflect.Float64, reflect.Float32: 105 | return fmt.Sprint(value.Float()) 106 | case reflect.Func: 107 | return runtime.FuncForPC(value.Pointer()).Name() 108 | default: 109 | return value.String() 110 | } 111 | } 112 | --------------------------------------------------------------------------------