├── .github
└── workflows
│ ├── build-test.yml
│ └── golangci-lint.yml
├── .gitignore
├── .golangci.yml
├── .idea
└── .gitignore
├── LICENSE
├── README.md
├── bench
├── encode_decode_test.go
├── insert_redis_test.go
└── insert_simple_test.go
├── client.go
├── example
├── README.md
├── cmd
│ ├── advanced
│ │ └── main.go
│ ├── advanced_redis
│ │ └── main.go
│ ├── redis
│ │ └── main.go
│ ├── redis_safe
│ │ └── main.go
│ ├── redis_sql
│ │ └── main.go
│ ├── simple
│ │ └── main.go
│ ├── simple_2
│ │ └── main.go
│ ├── simple_safe
│ │ └── main.go
│ └── simple_sql
│ │ └── main.go
└── pkg
│ └── tables
│ ├── advanced.go
│ └── example.go
├── go.mod
├── go.sum
├── src
├── buffer
│ ├── cxmem
│ │ └── buffer.go
│ ├── cxredis
│ │ ├── buffer.go
│ │ └── connection.go
│ └── cxsyncmem
│ │ └── buffer.go
├── cx
│ ├── buffer.go
│ ├── buffer_batch.go
│ ├── db.go
│ ├── log.go
│ └── support.go
├── db
│ ├── cxnative
│ │ └── impl.go
│ └── cxsql
│ │ └── impl.go
└── retry
│ ├── retry.go
│ ├── retry_memory.go
│ ├── retry_stat.go
│ └── retry_writer.go
├── tests
├── buffer_row_test.go
├── client_impl_test.go
├── integration_memory_test.go
└── integration_test.go
├── write.go
├── write_blocking.go
└── write_options.go
/.github/workflows/build-test.yml:
--------------------------------------------------------------------------------
1 | name: build_and_tests
2 | on:
3 | push:
4 | paths-ignore:
5 | - 'bin/**'
6 | - 'images/**'
7 | - 'share/**'
8 | - '.github/**'
9 | - '**.md'
10 | - '.gitignore'
11 | tags:
12 | - v*
13 | branches:
14 | - master
15 | - main
16 | - feature/**
17 | - bugfix/**
18 | pull_request:
19 | types:
20 | - opened
21 | jobs:
22 | build:
23 | name: Build
24 | strategy:
25 | matrix:
26 | go-version: [1.18.x]
27 | platform: [ubuntu-latest, macos-latest, windows-latest]
28 | runs-on: ${{ matrix.platform }}
29 | steps:
30 | - name: Set up Go
31 | uses: actions/setup-go@v2
32 | with:
33 | go-version: ${{ matrix.go-version }}
34 |
35 | - name: Check out source code
36 | uses: actions/checkout@v2
37 |
38 | - name: Build
39 | env:
40 | GOPROXY: "https://proxy.golang.org"
41 | run: go build ./example/cmd/simple/main.go
42 |
43 | - name: Test
44 | env:
45 | GOPROXY: "https://proxy.golang.org"
46 | run: go test -v ./...
--------------------------------------------------------------------------------
/.github/workflows/golangci-lint.yml:
--------------------------------------------------------------------------------
1 | name: golangci_lint
2 | on:
3 | push:
4 | paths-ignore:
5 | - 'bin/**'
6 | - 'images/**'
7 | - 'share/**'
8 | - '.github/**'
9 | - '**.md'
10 | - '.gitignore'
11 | tags:
12 | - v*
13 | branches:
14 | - master
15 | - main
16 | - feature/**
17 | - bugfix/**
18 | pull_request:
19 | types:
20 | - opened
21 | jobs:
22 | golangci:
23 | name: lint
24 | runs-on: ubuntu-latest
25 | steps:
26 | - uses: actions/checkout@v2
27 | - name: golangci-lint
28 | uses: golangci/golangci-lint-action@v2
29 | with:
30 | # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
31 | version: latest
32 |
33 | # Optional: working directory, useful for monorepos
34 | # working-directory: somedir
35 |
36 | # Optional: golangci-lint command line arguments.
37 | # args: --issues-exit-code=0
38 |
39 | # Optional: show only new issues if it's a pull request. The default value is `false`.
40 | # only-new-issues: true
41 |
42 | # Optional: if set to true then the action will use pre-installed Go.
43 | # skip-go-installation: true
44 |
45 | # Optional: if set to true then the action don't cache or restore ~/go/pkg.
46 | # skip-pkg-cache: true
47 |
48 | # Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
49 | # skip-build-cache: true
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 |
17 | .idea
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | # This file contains only configs which differ from defaults.
2 | # All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml
3 | linters-settings:
4 | cyclop:
5 | # The maximal code complexity to report.
6 | # Default: 10
7 | max-complexity: 30
8 | # The maximal average package complexity.
9 | # If it's higher than 0.0 (float) the check is enabled
10 | # Default: 0.0
11 | package-average: 10.0
12 |
13 | errcheck:
14 | # Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
15 | # Such cases aren't reported by default.
16 | # Default: false
17 | check-type-assertions: true
18 |
19 | exhaustive:
20 | # Program elements to check for exhaustiveness.
21 | # Default: [ switch ]
22 | check:
23 | - switch
24 | - map
25 |
26 | funlen:
27 | # Checks the number of lines in a function.
28 | # If lower than 0, disable the check.
29 | # Default: 60
30 | lines: 100
31 | # Checks the number of statements in a function.
32 | # If lower than 0, disable the check.
33 | # Default: 40
34 | statements: 50
35 |
36 | gocognit:
37 | # Minimal code complexity to report
38 | # Default: 30 (but we recommend 10-20)
39 | min-complexity: 20
40 |
41 | gocritic:
42 | # Settings passed to gocritic.
43 | # The settings key is the name of a supported gocritic checker.
44 | # The list of supported checkers can be find in https://go-critic.github.io/overview.
45 | settings:
46 | captLocal:
47 | # Whether to restrict checker to params only.
48 | # Default: true
49 | paramsOnly: false
50 | underef:
51 | # Whether to skip (*x).method() calls where x is a pointer receiver.
52 | # Default: true
53 | skipRecvDeref: false
54 |
55 | gomnd:
56 | # List of function patterns to exclude from analysis.
57 | # Values always ignored: `time.Date`,
58 | # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
59 | # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
60 | # Default: []
61 | ignored-functions:
62 | - os.Chmod
63 | - os.Mkdir
64 | - os.MkdirAll
65 | - os.OpenFile
66 | - os.WriteFile
67 | - prometheus.ExponentialBuckets
68 | - prometheus.ExponentialBucketsRange
69 | - prometheus.LinearBuckets
70 |
71 | gomodguard:
72 | blocked:
73 | # List of blocked modules.
74 | # Default: []
75 | modules:
76 | - github.com/golang/protobuf:
77 | recommendations:
78 | - google.golang.org/protobuf
79 | reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules"
80 | - github.com/satori/go.uuid:
81 | recommendations:
82 | - github.com/google/uuid
83 | reason: "satori's package is not maintained"
84 | - github.com/gofrs/uuid:
85 | recommendations:
86 | - github.com/google/uuid
87 | reason: "see recommendation from dev-infra team: https://confluence.gtforge.com/x/gQI6Aw"
88 |
89 | govet:
90 | # Enable all analyzers.
91 | # Default: false
92 | enable-all: true
93 | # Disable analyzers by name.
94 | # Run `go tool vet help` to see all analyzers.
95 | # Default: []
96 | disable:
97 | - fieldalignment # too strict
98 | # Settings per analyzer.
99 | settings:
100 | shadow:
101 | # Whether to be strict about shadowing; can be noisy.
102 | # Default: false
103 | strict: true
104 |
105 | nakedret:
106 | # Make an issue if func has more lines of code than this setting, and it has naked returns.
107 | # Default: 30
108 | max-func-lines: 0
109 |
110 | nolintlint:
111 | # Exclude following linters from requiring an explanation.
112 | # Default: []
113 | allow-no-explanation: [ funlen, gocognit, lll ]
114 | # Enable to require an explanation of nonzero length after each nolint directive.
115 | # Default: false
116 | require-explanation: true
117 | # Enable to require nolint directives to mention the specific linter being suppressed.
118 | # Default: false
119 | require-specific: true
120 |
121 | rowserrcheck:
122 | # database/sql is always checked
123 | # Default: []
124 | packages:
125 | - github.com/jmoiron/sqlx
126 |
127 | tenv:
128 | # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures.
129 | # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked.
130 | # Default: false
131 | all: true
132 | lll:
133 | line-length: 140
134 |
135 | linters:
136 | disable-all: true
137 | enable:
138 | ## enabled by default
139 | - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases
140 | - gosimple # specializes in simplifying a code
141 | - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
142 | - ineffassign # detects when assignments to existing variables are not used
143 | - staticcheck # is a go vet on steroids, applying a ton of static analysis checks
144 | - typecheck # like the front-end of a Go compiler, parses and type-checks Go code
145 | - unused # checks for unused constants, variables, functions and types
146 | ## disabled by default
147 | - asasalint # checks for pass []any as any in variadic func(...any)
148 | - asciicheck # checks that your code does not contain non-ASCII identifiers
149 | - bidichk # checks for dangerous unicode character sequences
150 | - bodyclose # checks whether HTTP response body is closed successfully
151 | - cyclop # checks function and package cyclomatic complexity
152 | - dupl # tool for code clone detection
153 | - durationcheck # checks for two durations multiplied together
154 | - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
155 | - errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13
156 | - execinquery # checks query string in Query function which reads your Go src files and warning it finds
157 | - exhaustive # checks exhaustiveness of enum switch statements
158 | - exportloopref # checks for pointers to enclosing loop variables
159 | # - forbidigo # forbids identifiers
160 | - funlen # tool for detection of long functions
161 | - gochecknoglobals # checks that no global variables exist
162 | - gochecknoinits # checks that no init functions are present in Go code
163 | - gocognit # computes and checks the cognitive complexity of functions
164 | - goconst # finds repeated strings that could be replaced by a constant
165 | - gocritic # provides diagnostics that check for bugs, performance and style issues
166 | - gocyclo # computes and checks the cyclomatic complexity of functions
167 | # - godot # checks if comments end in a period
168 | - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt
169 | # - gomnd # detects magic numbers
170 | - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
171 | - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations
172 | - goprintffuncname # checks that printf-like functions are named with f at the end
173 | - gosec # inspects source code for security problems
174 | - lll # reports long lines
175 | # - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap)
176 | - makezero # finds slice declarations with non-zero initial length
177 | - nakedret # finds naked returns in functions greater than a specified function length
178 | - nestif # reports deeply nested if statements
179 | - nilerr # finds the code that returns nil even if it checks that the error is not nil
180 | - nilnil # checks that there is no simultaneous return of nil error and an invalid value
181 | - noctx # finds sending http request without context.Context
182 | # - nolintlint # reports ill-formed or insufficient nolint directives
183 | # - nonamedreturns # reports all named returns
184 | - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL
185 | - predeclared # finds code that shadows one of Go's predeclared identifiers
186 | - promlinter # checks Prometheus metrics naming via promlint
187 | - reassign # checks that package variables are not reassigned
188 | - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint
189 | - rowserrcheck # checks whether Err of rows is checked successfully
190 | - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
191 | - stylecheck # is a replacement for golint
192 | - tenv # detects using os.Setenv instead of t.Setenv since Go1.17
193 | # - testableexamples # checks if examples are testable (have an expected output)
194 | # - testpackage # makes you use a separate _test package
195 | - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes
196 | - unconvert # removes unnecessary type conversions
197 | - unparam # reports unused function parameters
198 | - usestdlibvars # detects the possibility to use variables/constants from the Go standard library
199 | - wastedassign # finds wasted assignment statements
200 | - whitespace # detects leading and trailing whitespace
201 |
202 | ## you may want to enable
203 | #- decorder # checks declaration order and count of types, constants, variables and functions
204 | #- exhaustruct # checks if all structure fields are initialized
205 | #- gci # controls golang package import order and makes it always deterministic
206 | #- godox # detects FIXME, TODO and other comment keywords
207 | #- goheader # checks is file header matches to pattern
208 | #- interfacebloat # checks the number of methods inside an interface
209 | #- ireturn # accept interfaces, return concrete types
210 | #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated
211 | #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope
212 | #- wrapcheck # checks that errors returned from external packages are wrapped
213 |
214 | ## disabled
215 | #- containedctx # detects struct contained context.Context field
216 | #- contextcheck # [too many false positives] checks the function whether use a non-inherited context
217 | #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages
218 | #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
219 | #- dupword # [useless without config] checks for duplicate words in the source code
220 | #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted
221 | #- forcetypeassert # [replaced by errcheck] finds forced type assertions
222 | #- goerr113 # [too strict] checks the errors handling expressions
223 | #- gofmt # [replaced by goimports] checks whether code was gofmt-ed
224 | #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed
225 | #- grouper # analyzes expression groups
226 | #- importas # enforces consistent import aliases
227 | #- maintidx # measures the maintainability index of each function
228 | #- misspell # [useless] finds commonly misspelled English words in comments
229 | #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity
230 | #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test
231 | #- tagliatelle # checks the struct tags
232 | #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers
233 | #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines
234 |
235 | ## deprecated
236 | #- deadcode # [deprecated, replaced by unused] finds unused code
237 | #- exhaustivestruct # [deprecated, replaced by exhaustruct] checks if all struct's fields are initialized
238 | #- golint # [deprecated, replaced by revive] golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
239 | #- ifshort # [deprecated] checks that your code uses short syntax for if-statements whenever possible
240 | #- interfacer # [deprecated] suggests narrower interface types
241 | #- maligned # [deprecated, replaced by govet fieldalignment] detects Go structs that would take less memory if their fields were sorted
242 | #- nosnakecase # [deprecated, replaced by revive var-naming] detects snake case of variable naming and function name
243 | #- scopelint # [deprecated, replaced by exportloopref] checks for unpinned variables in go programs
244 | #- structcheck # [deprecated, replaced by unused] finds unused struct fields
245 | #- varcheck # [deprecated, replaced by unused] finds unused global variables and constants
246 |
247 |
248 | issues:
249 | # Maximum count of issues with the same text.
250 | # Set to 0 to disable.
251 | # Default: 3
252 | max-same-issues: 50
253 |
254 | exclude-rules:
255 | - source: "^//\\s*go:generate\\s"
256 | linters: [ lll ]
257 | - source: "(noinspection|TODO)"
258 | linters: [ godot ]
259 | - source: "//noinspection"
260 | linters: [ gocritic ]
261 | - source: "^\\s+if _, ok := err\\.\\([^.]+\\.InternalError\\); ok {"
262 | linters: [ errorlint ]
263 | - path: "_test\\.go"
264 | linters:
265 | - bodyclose
266 | - dupl
267 | - funlen
268 | - goconst
269 | - gosec
270 | - noctx
271 | - wrapcheck
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/zikwall/clickhouse-buffer/v4/actions)
2 | [](https://github.com/zikwall/clickhouse-buffer/v4/actions)
3 |
4 |
5 |
Clickhouse Buffer
6 | An easy-to-use, powerful and productive package for writing data to Clickhouse columnar database
7 |
8 |
9 | ## Install
10 |
11 | - for go-clickhouse v1 `$ go get -u github.com/zikwall/clickhouse-buffer`
12 | - for go-clickhouse v2 `$ go get -u github.com/zikwall/clickhouse-buffer/v4`
13 |
14 | ### Why and why
15 |
16 | In the practice of using the Clickhouse database (in real projects),
17 | you often have to resort to creating your own ~~bicycles~~ in the form of queues
18 | and testers that accumulate the necessary amount of data or for a certain period of time
19 | and send one large data package to the Clickhouse database.
20 |
21 | This is due to the fact that Clickhouse is designed so that it better processes new data in batches
22 | (and this is recommended by the authors themselves).
23 |
24 | ### Features
25 |
26 | - [x] **non-blocking** - (recommend) async write client uses implicit batching.
27 | Data are asynchronously written to the underlying buffer and they are automatically sent to a server
28 | when the size of the write buffer reaches the batch size, default 5000, or the flush interval,
29 | default 1s, times out. Asynchronous write client is recommended for frequent periodic writes.
30 | - [x] **blocking.**
31 |
32 | **Client buffer engines:**
33 |
34 | - [x] **in-memory** - use native channels and slices
35 | - [x] **redis** - use redis server as queue and buffer
36 | - [x] **in-memory-sync** - if you get direct access to buffer, it will help to avoid data race
37 | - [x] **retries** - resending "broken" or for some reason not sent packets
38 |
39 | ### Usage
40 |
41 | ```go
42 | import (
43 | "database/sql"
44 |
45 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
46 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
47 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxsql"
48 | )
49 |
50 | // if you already have a connection to Clickhouse you can just use wrappers
51 | // with native interface
52 | ch := cxnative.NewClickhouseWithConn(driver.Conn, &cx.RuntimeOptions{})
53 | // or use database/sql interface
54 | ch := cxsql.NewClickhouseWithConn(*sql.DB, &cx.RuntimeOptions{})
55 | ```
56 |
57 | ```go
58 | // if you don't want to create connections yourself,
59 | // package can do it for you, just call the connection option you need:
60 |
61 | // with native interface
62 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
63 | Addr: ctx.StringSlice("clickhouse-host"),
64 | Auth: clickhouse.Auth{
65 | Database: ctx.String("clickhouse-database"),
66 | Username: ctx.String("clickhouse-username"),
67 | Password: ctx.String("clickhouse-password"),
68 | },
69 | Settings: clickhouse.Settings{
70 | "max_execution_time": 60,
71 | },
72 | DialTimeout: 5 * time.Second,
73 | Compression: &clickhouse.Compression{
74 | Method: clickhouse.CompressionLZ4,
75 | },
76 | Debug: ctx.Bool("debug"),
77 | }, &cx.RuntimeOptions{})
78 |
79 | // or with database/sql interface
80 | ch, conn, err := cxsql.NewClickhouse(ctx, &clickhouse.Options{
81 | Addr: ctx.StringSlice("clickhouse-host"),
82 | Auth: clickhouse.Auth{
83 | Database: ctx.String("clickhouse-database"),
84 | Username: ctx.String("clickhouse-username"),
85 | Password: ctx.String("clickhouse-password"),
86 | },
87 | Settings: clickhouse.Settings{
88 | "max_execution_time": 60,
89 | },
90 | DialTimeout: 5 * time.Second,
91 | Compression: &clickhouse.Compression{
92 | Method: clickhouse.CompressionLZ4,
93 | },
94 | Debug: ctx.Bool("debug"),
95 | }, &cx.RuntimeOptions{})
96 | ```
97 |
98 | #### Create main data streamer client and write data
99 |
100 | ```go
101 | import (
102 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
103 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
104 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
105 | )
106 | // create root client
107 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
108 | clickhousebuffer.WithFlushInterval(2000),
109 | clickhousebuffer.WithBatchSize(5000),
110 | clickhousebuffer.WithDebugMode(true),
111 | clickhousebuffer.WithRetry(true),
112 | ))
113 | // create buffer engine
114 | buffer := cxmem.NewBuffer(
115 | client.Options().BatchSize(),
116 | )
117 | // or use redis
118 | buffer := cxredis.NewBuffer(
119 | ctx, *redis.Client, "bucket", client.Options().BatchSize(),
120 | )
121 | // create new writer api: table name with columns
122 | writeAPI := client.Writer(
123 | ctx,
124 | // order of the values []string{"id", "uuid", "insert_ts"}
125 | // must correspond to the return values in the (*MyTable).Row() method, which will be shown below
126 | cx.NewView("clickhouse_database.my_table", []string{"id", "uuid", "insert_ts"}),
127 | buffer,
128 | )
129 |
130 | // define your custom data structure
131 | type MyTable struct {
132 | id int
133 | uuid string
134 | insertTS time.Time
135 | }
136 | // the structure above is a reflection of the entity-table in the database
137 | // CREATE TABLE IF NOT EXISTS MyTable (
138 | // id Int32,
139 | // uuid String,
140 | // insert_ts String
141 | // ) engine=Memory
142 |
143 | // and implement cx.Vectorable interface
144 | // (*MyTable).Row() method describes how will the data be written to the table and in what order
145 | // similar to the INSERT INTO (id, uuid, insert_ts) VALUES (...), (...), (...) query
146 | // the order of return must correspond to the scheme described above: []string{"id", "uuid", "insert_ts"}
147 | func (t *MyTable) Row() cx.Vector {
148 | return cx.Vector{
149 | t.id,
150 | t.uuid,
151 | t.insertTS.Format(time.RFC822),
152 | }
153 | }
154 |
155 | // async write your data
156 | writeAPI.WriteRow(&MyTable{
157 | id: 1, uuid: "1", insertTS: time.Now(),
158 | })
159 | // or use a safe way (same as WriteRow, but safer)
160 | writeAPI.TryWriteRow(&MyTable{
161 | id: 1, uuid: "1", insertTS: time.Now(),
162 | })
163 | // or faster
164 | writeAPI.WriteVector(cx.Vector{
165 | 1, "1", time.Now(),
166 | })
167 | // safe way
168 | writeAPI.TryWriteVector(cx.Vector{
169 | 1, "1", time.Now(),
170 | })
171 | ```
172 |
173 | When using a non-blocking record, you can track errors through a special error channel
174 |
175 | ```go
176 | errorsCh := writeAPI.Errors()
177 | go func() {
178 | for err := range errorsCh {
179 | log.Warning(fmt.Sprintf("clickhouse write error: %s", err.Error()))
180 | }
181 | }()
182 | ```
183 |
184 | Using the blocking writer interface
185 |
186 | ```go
187 | // create new writer api: table name with columns
188 | writerBlocking := client.WriterBlocking(cx.View{
189 | Name: "clickhouse_database.my_table",
190 | Columns: []string{"id", "uuid", "insert_ts"},
191 | })
192 | // non-asynchronous writing of data directly to Clickhouse
193 | err := writerBlocking.WriteRow(ctx, []*MyTable{
194 | {
195 | id: 1, uuid: "1", insertTS: time.Now(),
196 | },
197 | {
198 | id: 2, uuid: "2", insertTS: time.Now(),
199 | },
200 | {
201 | id: 3, uuid: "3", insertTS: time.Now(),
202 | },
203 | }...)
204 | ```
205 |
206 | ### More
207 |
208 | #### Buffer engine:
209 |
210 | You can implement own data-buffer interface: `File`, `Rabbitmq`, `CustomMemory`, etc.
211 |
212 | ```go
213 | type Buffer interface {
214 | Write(vec Vector)
215 | Read() []Vector
216 | Len() int
217 | Flush()
218 | }
219 | ```
220 |
221 | #### Retries:
222 |
223 | > By default, packet resending is disabled, to enable it, you need to call `(*Options).SetRetryIsEnabled(true)`.
224 |
225 | - [x] in-memory use channels (default)
226 | - [ ] redis
227 | - [ ] rabbitMQ
228 | - [ ] kafka
229 |
230 | You can implement queue engine by defining the `Queueable` interface:
231 |
232 | ```go
233 | type Queueable interface {
234 | Queue(packet *Packet)
235 | Retries() <-chan *Packet
236 | }
237 |
238 | // and set it as an engine:
239 | clickhousebuffer.NewOptions(
240 | clickhousebuffer.WithRetry(false),
241 | clickhousebuffer.WithRetryQueueEngine(CustomQueueable),
242 | )
243 | ```
244 |
245 | #### Logs:
246 |
247 | You can implement your logger by simply implementing the Logger interface and throwing it in options:
248 |
249 | ```go
250 | type Logger interface {
251 | Log(message interface{})
252 | Logf(format string, v ...interface{})
253 | }
254 |
255 | // example with options
256 | clickhousebuffer.NewOptions(
257 | clickhousebuffer.WithDebugMode(true),
258 | clickhousebuffer.WithLogger(SomeLogger),
259 | )
260 | ```
261 |
262 | #### Tests:
263 |
264 | - `$ go test -v ./...` - run all tests without integration part
265 | - `$ go test -race -v ./...` - run all tests without integration part and in race detection mode
266 | - `$ golangci-lint run --config ./.golangci.yml` - check code quality with linters
267 |
268 | **Integration Tests:**
269 |
270 | ```shell
271 | export CLICKHOUSE_HOST=111.11.11.11:9000
272 | export REDIS_HOST=111.11.11.11:6379
273 | export REDIS_PASS=password_if_needed
274 |
275 | $ go test -v ./... -tags=integration
276 |
277 | or with race detec mode
278 |
279 | $ go test -race -v ./... -tags=integration
280 | ```
281 |
282 | **Benchmarks**
283 |
284 | **Ubuntu 20.04/Intel Core i7-8750H**
285 |
286 | ```shell
287 | goos: linux
288 | goarch: amd64
289 | pkg: github.com/zikwall/clickhouse-buffer/v4/bench
290 | cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
291 | ```
292 |
293 | ```shell
294 | // memory
295 |
296 | $ go test ./bench -bench=BenchmarkInsertSimplestPreallocateVectors -benchmem -benchtime=1000x
297 |
298 | BenchmarkInsertSimplestPreallocateVectors/1000000-12 1000 142919 ns/op 0 B/op 0 allocs/op
299 | BenchmarkInsertSimplestPreallocateVectors/100000-12 1000 12498 ns/op 0 B/op 0 allocs/op
300 | BenchmarkInsertSimplestPreallocateVectors/10000-12 1000 1265 ns/op 0 B/op 0 allocs/op
301 | BenchmarkInsertSimplestPreallocateVectors/1000-12 1000 143.1 ns/op 0 B/op 0 allocs/op
302 | BenchmarkInsertSimplestPreallocateVectors/100-12 1000 5.700 ns/op 2 B/op 0 allocs/op
303 |
304 | $ go test ./bench -bench=BenchmarkInsertSimplestPreallocateObjects -benchmem -benchtime=1000x
305 |
306 | BenchmarkInsertSimplestPreallocateObjects/1000000-12 1000 399110 ns/op 88000 B/op 3000 allocs/op
307 | BenchmarkInsertSimplestPreallocateObjects/100000-12 1000 37527 ns/op 8800 B/op 300 allocs/op
308 | BenchmarkInsertSimplestPreallocateObjects/10000-12 1000 3880 ns/op 880 B/op 30 allocs/op
309 | BenchmarkInsertSimplestPreallocateObjects/1000-12 1000 419.5 ns/op 88 B/op 3 allocs/op
310 | BenchmarkInsertSimplestPreallocateObjects/100-12 1000 58.90 ns/op 11 B/op 0 allocs/op
311 |
312 | $ go test ./bench -bench=BenchmarkInsertSimplestObjects -benchmem -benchtime=1000x
313 |
314 | BenchmarkInsertSimplestObjects/1000000-12 1000 454794 ns/op 160002 B/op 4000 allocs/op
315 | BenchmarkInsertSimplestObjects/100000-12 1000 41879 ns/op 16000 B/op 400 allocs/op
316 | BenchmarkInsertSimplestObjects/10000-12 1000 4174 ns/op 1605 B/op 40 allocs/op
317 | BenchmarkInsertSimplestObjects/1000-12 1000 479.5 ns/op 160 B/op 4 allocs/op
318 | BenchmarkInsertSimplestObjects/100-12 1000 39.40 ns/op 16 B/op 0 allocs/op
319 |
320 | $ go test ./bench -bench=BenchmarkInsertSimplestVectors -benchmem -benchtime=1000x
321 |
322 | BenchmarkInsertSimplestVectors/1000000-12 1000 182548 ns/op 72002 B/op 1000 allocs/op
323 | BenchmarkInsertSimplestVectors/100000-12 1000 16291 ns/op 7200 B/op 100 allocs/op
324 | BenchmarkInsertSimplestVectors/10000-12 1000 1638 ns/op 725 B/op 10 allocs/op
325 | BenchmarkInsertSimplestVectors/1000-12 1000 208.4 ns/op 72 B/op 1 allocs/op
326 | BenchmarkInsertSimplestVectors/100-12 1000 20.00 ns/op 7 B/op 0 allocs/op
327 |
328 | $ go test ./bench -bench=BenchmarkInsertSimplestEmptyVectors -benchmem -benchtime=1000x
329 |
330 | BenchmarkInsertSimplestEmptyVectors/1000000-12 1000 132887 ns/op 24002 B/op 0 allocs/op
331 | BenchmarkInsertSimplestEmptyVectors/100000-12 1000 13404 ns/op 2400 B/op 0 allocs/op
332 | BenchmarkInsertSimplestEmptyVectors/10000-12 1000 1299 ns/op 245 B/op 0 allocs/op
333 | BenchmarkInsertSimplestEmptyVectors/1000-12 1000 122.1 ns/op 0 B/op 0 allocs/op
334 | BenchmarkInsertSimplestEmptyVectors/100-12 1000 6.800 ns/op 0 B/op 0 allocs/op
335 |
336 | // redis
337 |
338 | $ go test ./bench -bench=BenchmarkInsertRedisObjects -benchmem -benchtime=100x
339 |
340 | BenchmarkInsertRedisObjects/1000-12 100 22404356 ns/op 96095 B/op 2322 allocs/op
341 | BenchmarkInsertRedisObjects/100-12 100 2243544 ns/op 9673 B/op 233 allocs/op
342 | BenchmarkInsertRedisObjects/10-12 100 271749 ns/op 1033 B/op 25 allocs/op
343 |
344 | $ go test ./bench -bench=BenchmarkInsertRedisVectors -benchmem -benchtime=100x
345 |
346 | BenchmarkInsertRedisVectors/1000-12 100 22145258 ns/op 92766 B/op 2274 allocs/op
347 | BenchmarkInsertRedisVectors/100-12 100 2320692 ns/op 9339 B/op 229 allocs/op
348 | BenchmarkInsertRedisVectors/10-12 100 202146 ns/op 157 B/op 2 allocs/op
349 | ```
350 |
351 | **MacBook Pro M1**
352 |
353 | ```shell
354 | goos: darwin
355 | goarch: arm64
356 | pkg: github.com/zikwall/clickhouse-buffer/v4/bench
357 | ```
358 |
359 | ```shell
360 | $ akm@MacBook-Pro-andrey clickhouse-buffer % go test ./bench -bench=BenchmarkInsertSimplestPreallocateVectors -benchmem -benchtime=1000x
361 |
362 | BenchmarkInsertSimplestPreallocateVectors/1000000-8 1000 206279 ns/op 0 B/op 0 allocs/op
363 | BenchmarkInsertSimplestPreallocateVectors/100000-8 1000 24612 ns/op 0 B/op 0 allocs/op
364 | BenchmarkInsertSimplestPreallocateVectors/10000-8 1000 2047 ns/op 0 B/op 0 allocs/op
365 | BenchmarkInsertSimplestPreallocateVectors/1000-8 1000 204.0 ns/op 0 B/op 0 allocs/op
366 | BenchmarkInsertSimplestPreallocateVectors/100-8 1000 22.83 ns/op 0 B/op 0 allocs/op
367 |
368 | $ akm@MacBook-Pro-andrey clickhouse-buffer % go test ./bench -bench=BenchmarkInsertSimplestPreallocateObjects -benchmem -benchtime=1000x
369 |
370 | BenchmarkInsertSimplestPreallocateObjects/1000000-8 1000 410757 ns/op 88000 B/op 3000 allocs/op
371 | BenchmarkInsertSimplestPreallocateObjects/100000-8 1000 40885 ns/op 8800 B/op 300 allocs/op
372 | BenchmarkInsertSimplestPreallocateObjects/10000-8 1000 4059 ns/op 880 B/op 30 allocs/op
373 | BenchmarkInsertSimplestPreallocateObjects/1000-8 1000 407.2 ns/op 88 B/op 3 allocs/op
374 | BenchmarkInsertSimplestPreallocateObjects/100-8 1000 46.29 ns/op 11 B/op 0 allocs/op
375 |
376 | $ akm@MacBook-Pro-andrey clickhouse-buffer % go test ./bench -bench=BenchmarkInsertSimplestObjects -benchmem -benchtime=1000x
377 |
378 | BenchmarkInsertSimplestObjects/1000000-8 1000 454083 ns/op 160002 B/op 4000 allocs/op
379 | BenchmarkInsertSimplestObjects/100000-8 1000 44329 ns/op 16000 B/op 400 allocs/op
380 | BenchmarkInsertSimplestObjects/10000-8 1000 4401 ns/op 1360 B/op 40 allocs/op
381 | BenchmarkInsertSimplestObjects/1000-8 1000 437.8 ns/op 160 B/op 4 allocs/op
382 | BenchmarkInsertSimplestObjects/100-8 1000 44.71 ns/op 16 B/op 0 allocs/op
383 |
384 |
385 | $ akm@MacBook-Pro-andrey clickhouse-buffer % go test ./bench -bench=BenchmarkInsertSimplestVectors -benchmem -benchtime=1000x
386 |
387 | BenchmarkInsertSimplestVectors/1000000-8 1000 244064 ns/op 72002 B/op 1000 allocs/op
388 | BenchmarkInsertSimplestVectors/100000-8 1000 24013 ns/op 7200 B/op 100 allocs/op
389 | BenchmarkInsertSimplestVectors/10000-8 1000 2335 ns/op 725 B/op 10 allocs/op
390 | BenchmarkInsertSimplestVectors/1000-8 1000 240.4 ns/op 48 B/op 1 allocs/op
391 | BenchmarkInsertSimplestVectors/100-8 1000 22.17 ns/op 4 B/op 0 allocs/op
392 |
393 | $ akm@MacBook-Pro-andrey clickhouse-buffer % go test ./bench -bench=BenchmarkInsertSimplestEmptyVectors -benchmem -benchtime=1000x
394 |
395 | BenchmarkInsertSimplestEmptyVectors/1000000-8 1000 215240 ns/op 24002 B/op 0 allocs/op
396 | BenchmarkInsertSimplestEmptyVectors/100000-8 1000 20736 ns/op 2400 B/op 0 allocs/op
397 | BenchmarkInsertSimplestEmptyVectors/10000-8 1000 2109 ns/op 0 B/op 0 allocs/op
398 | BenchmarkInsertSimplestEmptyVectors/1000-8 1000 198.3 ns/op 24 B/op 0 allocs/op
399 | BenchmarkInsertSimplestEmptyVectors/100-8 1000 19.83 ns/op 2 B/op 0 allocs/op
400 | ```
401 |
402 | **Conclusion:**
403 |
404 | - buffer on redis is expected to work slower than the buffer in memory, this is due to fact that it is necessary to serialize data and deserialize it back, which causes a lot of overhead, also do not forget about network overhead
405 | - writing through a vector causes less overhead (allocations) and works faster
406 | - pre-allocated vector recording works very fast with zero memory allocation, this is fact that writing to buffer and then writing it to Clickhouse creates almost no overhead
407 | - the same can be said about recording objects, but there is a small overhead
408 | - writing vectors is faster, allocates less memory, and is preferable to writing objects
409 | - using a buffer in-memory is preferable to a buffer in redis
410 |
411 | ### TODO:
412 |
413 | - [ ] rewrite Buffer interface, simplify it
414 | - [ ] rewrite Options, simplify it
415 | - [ ] optimization redis buffer and encode/decode functions
416 | - [ ] buffer interfaces
417 | - [ ] more retry buffer interfaces
418 | - [ ] rewrite retry lib, simplify it
419 | - [ ] create binary app for streaming data to clickhouse
420 | - [ ] client and server with HTTP interface
421 | - [ ] client and server with gRPC interface
--------------------------------------------------------------------------------
/bench/encode_decode_test.go:
--------------------------------------------------------------------------------
1 | package bench
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
8 | )
9 |
10 | type row struct {
11 | id int
12 | uuid string
13 | insertTS time.Time
14 | }
15 |
16 | func (r *row) Row() cx.Vector {
17 | return cx.Vector{r.id, r.uuid, r.insertTS.Format(time.RFC822)}
18 | }
19 |
20 | // goos: linux
21 | // goarch: amd64
22 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
23 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
24 | // BenchmarkEncodeRow/1000000-12 100 30361854 ns/op 15847294 B/op 240014 allocs/op
25 | // BenchmarkEncodeRow/100000-12 100 2946954 ns/op 1584748 B/op 24001 allocs/op
26 | // BenchmarkEncodeRow/10000-12 100 289346 ns/op 158465 B/op 2400 allocs/op
27 | // BenchmarkEncodeRow/1000-12 100 31659 ns/op 15857 B/op 240 allocs/op
28 | // BenchmarkEncodeRow/100-12 100 3089 ns/op 1584 B/op 24 allocs/op
29 | // BenchmarkEncodeRow/10-12 100 383.0 ns/op 158 B/op 2 allocs/op
30 | // PASS
31 | // ok
32 | // nolint:dupl // it's OK
33 | func BenchmarkEncodeRow(b *testing.B) {
34 | b.Run("1000000", func(b *testing.B) {
35 | for i := 0; i < b.N; i++ {
36 | encode(1000000, b)
37 | }
38 | })
39 | b.Run("100000", func(b *testing.B) {
40 | for i := 0; i < b.N; i++ {
41 | encode(100000, b)
42 | }
43 | })
44 | b.Run("10000", func(b *testing.B) {
45 | for i := 0; i < b.N; i++ {
46 | encode(10000, b)
47 | }
48 | })
49 | b.Run("1000", func(b *testing.B) {
50 | for i := 0; i < b.N; i++ {
51 | encode(1000, b)
52 | }
53 | })
54 | b.Run("100", func(b *testing.B) {
55 | for i := 0; i < b.N; i++ {
56 | encode(100, b)
57 | }
58 | })
59 | b.Run("10", func(b *testing.B) {
60 | for i := 0; i < b.N; i++ {
61 | encode(10, b)
62 | }
63 | })
64 | }
65 |
66 | // goos: linux
67 | // goarch: amd64
68 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
69 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
70 | // BenchmarkDecodeRow/100000-12 100 17739535 ns/op 7653390 B/op 200064 allocs/op
71 | // BenchmarkDecodeRow/10000-12 100 1867818 ns/op 765345 B/op 20006 allocs/op
72 | // BenchmarkDecodeRow/1000-12 100 181877 ns/op 76521 B/op 2000 allocs/op
73 | // BenchmarkDecodeRow/100-12 100 16230 ns/op 7656 B/op 200 allocs/op
74 | // BenchmarkDecodeRow/10-12 100 1661 ns/op 764 B/op 20 allocs/op
75 | // BenchmarkDecodeRow/1-12 100 227.0 ns/op 76 B/op 2 allocs/op
76 | // PASS
77 | // ok
78 | // nolint:dupl // it's OK
79 | func BenchmarkDecodeRow(b *testing.B) {
80 | b.Run("100000", func(b *testing.B) {
81 | for i := 0; i < b.N; i++ {
82 | decode(100000, b)
83 | }
84 | })
85 | b.Run("10000", func(b *testing.B) {
86 | for i := 0; i < b.N; i++ {
87 | decode(10000, b)
88 | }
89 | })
90 | b.Run("1000", func(b *testing.B) {
91 | for i := 0; i < b.N; i++ {
92 | decode(1000, b)
93 | }
94 | })
95 | b.Run("100", func(b *testing.B) {
96 | for i := 0; i < b.N; i++ {
97 | decode(100, b)
98 | }
99 | })
100 | b.Run("10", func(b *testing.B) {
101 | for i := 0; i < b.N; i++ {
102 | decode(10, b)
103 | }
104 | })
105 | b.Run("1", func(b *testing.B) {
106 | for i := 0; i < b.N; i++ {
107 | decode(1, b)
108 | }
109 | })
110 | }
111 |
112 | func encode(x int, b *testing.B) {
113 | now := time.Now()
114 | b.ResetTimer()
115 | for i := 0; i < x; i++ {
116 | r := row{
117 | id: 1,
118 | uuid: "uuid_here",
119 | insertTS: now,
120 | }
121 | _, _ = r.Row().Encode()
122 | }
123 | }
124 |
125 | func decode(x int, b *testing.B) {
126 | now := time.Now()
127 | encodes := make([]cx.VectorDecoded, 0, x)
128 | for i := 0; i < x; i++ {
129 | r := row{
130 | id: 1,
131 | uuid: "uuid_here",
132 | insertTS: now,
133 | }
134 | enc, _ := r.Row().Encode()
135 | encodes = append(encodes, cx.VectorDecoded(enc))
136 | }
137 | b.ResetTimer()
138 | for i := range encodes {
139 | _, _ = encodes[i].Decode()
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/bench/insert_redis_test.go:
--------------------------------------------------------------------------------
1 | package bench
2 |
3 | import (
4 | "context"
5 | "log"
6 | "os"
7 | "testing"
8 |
9 | "github.com/go-redis/redis/v8"
10 |
11 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
12 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
13 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
14 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
15 | )
16 |
17 | // x100
18 | // goos: linux
19 | // goarch: amd64
20 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
21 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
22 | // BenchmarkInsertRedisObjects/1000-12 100 22404356 ns/op 96095 B/op 2322 allocs/op
23 | // BenchmarkInsertRedisObjects/100-12 100 2243544 ns/op 9673 B/op 233 allocs/op
24 | // BenchmarkInsertRedisObjects/10-12 100 271749 ns/op 1033 B/op 25 allocs/op
25 | // PASS
26 | // ok
27 | // nolint:funlen,dupl // it's not important here
28 | func BenchmarkInsertRedisObjects(b *testing.B) {
29 | ctx, cancel := context.WithCancel(context.Background())
30 | defer cancel()
31 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
32 | clickhousebuffer.WithFlushInterval(10000),
33 | clickhousebuffer.WithBatchSize(1000),
34 | ))
35 | redisHost := os.Getenv("REDIS_HOST")
36 | redisPass := os.Getenv("REDIS_PASS")
37 | var writeAPI clickhousebuffer.Writer
38 | b.ResetTimer()
39 |
40 | b.Run("1000", func(b *testing.B) {
41 | client.Options().SetBatchSize(1001)
42 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
43 | Addr: redisHost,
44 | Password: redisPass,
45 | }), "bucket", client.Options().BatchSize())
46 | if err != nil {
47 | log.Panicln(err)
48 | }
49 | writeAPI = client.Writer(
50 | ctx,
51 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
52 | rxbuffer,
53 | )
54 | b.ResetTimer()
55 |
56 | for i := 0; i < b.N; i++ {
57 | insertObjects(writeAPI, 1000, b)
58 | }
59 | })
60 |
61 | b.Run("100", func(b *testing.B) {
62 | client.Options().SetBatchSize(101)
63 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
64 | Addr: redisHost,
65 | Password: redisPass,
66 | }), "bucket", client.Options().BatchSize())
67 | if err != nil {
68 | log.Panicln(err)
69 | }
70 | writeAPI = client.Writer(
71 | ctx,
72 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
73 | rxbuffer,
74 | )
75 | b.ResetTimer()
76 |
77 | for i := 0; i < b.N; i++ {
78 | insertObjects(writeAPI, 100, b)
79 | }
80 | })
81 |
82 | b.Run("10", func(b *testing.B) {
83 | client.Options().SetBatchSize(11)
84 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
85 | Addr: redisHost,
86 | Password: redisPass,
87 | }), "bucket", client.Options().BatchSize())
88 | if err != nil {
89 | log.Panicln(err)
90 | }
91 | writeAPI = client.Writer(
92 | ctx,
93 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
94 | rxbuffer,
95 | )
96 | b.ResetTimer()
97 |
98 | for i := 0; i < b.N; i++ {
99 | insertObjects(writeAPI, 10, b)
100 | }
101 | })
102 |
103 | b.StopTimer()
104 | writeAPI.Close()
105 | client.Close()
106 | }
107 |
108 | // x100
109 | // goos: linux
110 | // goarch: amd64
111 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
112 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
113 | // BenchmarkInsertRedisVectors/1000-12 100 22145258 ns/op 92766 B/op 2274 allocs/op
114 | // BenchmarkInsertRedisVectors/100-12 100 2320692 ns/op 9339 B/op 229 allocs/op
115 | // BenchmarkInsertRedisVectors/10-12 100 202146 ns/op 157 B/op 2 allocs/op
116 | // PASS
117 | // ok
118 | // nolint:funlen,dupl // it's not important here
119 | func BenchmarkInsertRedisVectors(b *testing.B) {
120 | ctx, cancel := context.WithCancel(context.Background())
121 | defer cancel()
122 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
123 | clickhousebuffer.WithFlushInterval(10000),
124 | clickhousebuffer.WithBatchSize(1000),
125 | ))
126 | redisHost := os.Getenv("REDIS_HOST")
127 | redisPass := os.Getenv("REDIS_PASS")
128 | var writeAPI clickhousebuffer.Writer
129 | b.ResetTimer()
130 |
131 | b.Run("1000", func(b *testing.B) {
132 | b.StopTimer()
133 | client.Options().SetBatchSize(1001)
134 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
135 | Addr: redisHost,
136 | Password: redisPass,
137 | }), "bucket", client.Options().BatchSize())
138 | if err != nil {
139 | log.Panicln(err)
140 | }
141 | writeAPI = client.Writer(
142 | ctx,
143 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
144 | rxbuffer,
145 | )
146 | b.StartTimer()
147 |
148 | for i := 0; i < b.N; i++ {
149 | insertVectors(writeAPI, 1000, b)
150 | }
151 | })
152 |
153 | b.Run("100", func(b *testing.B) {
154 | b.StopTimer()
155 | client.Options().SetBatchSize(101)
156 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
157 | Addr: redisHost,
158 | Password: redisPass,
159 | }), "bucket", client.Options().BatchSize())
160 | if err != nil {
161 | log.Panicln(err)
162 | }
163 | writeAPI = client.Writer(
164 | ctx,
165 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
166 | rxbuffer,
167 | )
168 | b.StartTimer()
169 |
170 | for i := 0; i < b.N; i++ {
171 | insertVectors(writeAPI, 100, b)
172 | }
173 | })
174 |
175 | b.Run("10", func(b *testing.B) {
176 | b.StopTimer()
177 | client.Options().SetBatchSize(11)
178 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
179 | Addr: redisHost,
180 | Password: redisPass,
181 | }), "bucket", client.Options().BatchSize())
182 | if err != nil {
183 | log.Panicln(err)
184 | }
185 | writeAPI = client.Writer(
186 | ctx,
187 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
188 | rxbuffer,
189 | )
190 | b.StartTimer()
191 |
192 | for i := 0; i < b.N; i++ {
193 | insertVectors(writeAPI, 10, b)
194 | }
195 | })
196 |
197 | b.StopTimer()
198 | writeAPI.Close()
199 | client.Close()
200 | }
201 |
--------------------------------------------------------------------------------
/bench/insert_simple_test.go:
--------------------------------------------------------------------------------
1 | package bench
2 |
3 | import (
4 | "context"
5 | "testing"
6 | "time"
7 |
8 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
9 |
10 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
11 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
12 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
13 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
14 | )
15 |
16 | type BenchTable struct {
17 | ID int32
18 | UUID string
19 | InsertTS time.Time
20 | }
21 |
22 | func (t *BenchTable) Row() cx.Vector {
23 | return cx.Vector{t.ID, t.UUID, t.InsertTS.Format(time.RFC822)}
24 | }
25 |
26 | type clickhouseMock struct{}
27 |
28 | func (c *clickhouseMock) Insert(_ context.Context, _ cx.View, _ []cx.Vector) (uint64, error) {
29 | return 0, nil
30 | }
31 |
32 | func (c *clickhouseMock) Close() error {
33 | return nil
34 | }
35 |
36 | func (c *clickhouseMock) Conn() driver.Conn {
37 | return nil
38 | }
39 |
40 | // x50
41 | // goos: linux
42 | // goarch: amd64
43 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
44 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
45 | // BenchmarkInsertSimplestPreallocateVectors/1000000-12 1000 142919 ns/op 0 B/op 0 allocs/op
46 | // BenchmarkInsertSimplestPreallocateVectors/100000-12 1000 12498 ns/op 0 B/op 0 allocs/op
47 | // BenchmarkInsertSimplestPreallocateVectors/10000-12 1000 1265 ns/op 0 B/op 0 allocs/op
48 | // BenchmarkInsertSimplestPreallocateVectors/1000-12 1000 143.1 ns/op 0 B/op 0 allocs/op
49 | // BenchmarkInsertSimplestPreallocateVectors/100-12 1000 5.700 ns/op 2 B/op 0 allocs/op
50 | // PASS
51 | // ok
52 | // nolint:lll,dupl // it's OK
53 | func BenchmarkInsertSimplestPreallocateVectors(b *testing.B) {
54 | ctx, cancel := context.WithCancel(context.Background())
55 | defer cancel()
56 |
57 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
58 | clickhousebuffer.WithFlushInterval(10000000),
59 | clickhousebuffer.WithBatchSize(10000000),
60 | ))
61 |
62 | var writeAPI clickhousebuffer.Writer
63 | b.ResetTimer()
64 |
65 | b.Run("1000000", func(b *testing.B) {
66 | client.Options().SetBatchSize(1000001)
67 | writeAPI = client.Writer(
68 | ctx,
69 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
70 | cxmem.NewBuffer(client.Options().BatchSize()),
71 | )
72 | b.ResetTimer()
73 | for i := 0; i < b.N; i++ {
74 | insertPreAllocatedVectors(writeAPI, 1000000, b)
75 | }
76 | })
77 |
78 | b.Run("100000", func(b *testing.B) {
79 | client.Options().SetBatchSize(100001)
80 | writeAPI = client.Writer(
81 | ctx,
82 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
83 | cxmem.NewBuffer(client.Options().BatchSize()),
84 | )
85 | b.ResetTimer()
86 | for i := 0; i < b.N; i++ {
87 | insertPreAllocatedVectors(writeAPI, 100000, b)
88 | }
89 | })
90 |
91 | b.Run("10000", func(b *testing.B) {
92 | client.Options().SetBatchSize(10001)
93 | writeAPI = client.Writer(
94 | ctx,
95 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
96 | cxmem.NewBuffer(client.Options().BatchSize()),
97 | )
98 | b.ResetTimer()
99 | for i := 0; i < b.N; i++ {
100 | insertPreAllocatedVectors(writeAPI, 10000, b)
101 | }
102 | })
103 |
104 | b.Run("1000", func(b *testing.B) {
105 | client.Options().SetBatchSize(1001)
106 | writeAPI = client.Writer(
107 | ctx,
108 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
109 | cxmem.NewBuffer(client.Options().BatchSize()),
110 | )
111 | b.ResetTimer()
112 | for i := 0; i < b.N; i++ {
113 | insertPreAllocatedVectors(writeAPI, 1000, b)
114 | }
115 | })
116 |
117 | b.Run("100", func(b *testing.B) {
118 | client.Options().SetBatchSize(101)
119 | writeAPI = client.Writer(
120 | ctx,
121 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
122 | cxmem.NewBuffer(client.Options().BatchSize()),
123 | )
124 | b.ResetTimer()
125 | for i := 0; i < b.N; i++ {
126 | insertPreAllocatedVectors(writeAPI, 100, b)
127 | }
128 | })
129 |
130 | b.StopTimer()
131 | writeAPI.Close()
132 | client.Close()
133 | }
134 |
135 | // x1000
136 | // goos: linux
137 | // goarch: amd64
138 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
139 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
140 | // BenchmarkInsertSimplestPreallocateObjects/1000000-12 1000 399110 ns/op 88000 B/op 3000 allocs/op
141 | // BenchmarkInsertSimplestPreallocateObjects/100000-12 1000 37527 ns/op 8800 B/op 300 allocs/op
142 | // BenchmarkInsertSimplestPreallocateObjects/10000-12 1000 3880 ns/op 880 B/op 30 allocs/op
143 | // BenchmarkInsertSimplestPreallocateObjects/1000-12 1000 419.5 ns/op 88 B/op 3 allocs/op
144 | // BenchmarkInsertSimplestPreallocateObjects/100-12 1000 58.90 ns/op 11 B/op 0 allocs/op
145 | // PASS
146 | // ok
147 | // nolint:lll,dupl // it's OK
148 | func BenchmarkInsertSimplestPreallocateObjects(b *testing.B) {
149 | ctx, cancel := context.WithCancel(context.Background())
150 | defer cancel()
151 |
152 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
153 | clickhousebuffer.WithFlushInterval(10000000),
154 | clickhousebuffer.WithBatchSize(10000000),
155 | ))
156 |
157 | var writeAPI clickhousebuffer.Writer
158 | b.ResetTimer()
159 |
160 | b.Run("1000000", func(b *testing.B) {
161 | client.Options().SetBatchSize(1000001)
162 | writeAPI = client.Writer(
163 | ctx,
164 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
165 | cxmem.NewBuffer(client.Options().BatchSize()),
166 | )
167 | b.ResetTimer()
168 | for i := 0; i < b.N; i++ {
169 | insertPreAllocatedObjects(writeAPI, 1000000, b)
170 | }
171 | })
172 |
173 | b.Run("100000", func(b *testing.B) {
174 | client.Options().SetBatchSize(100001)
175 | writeAPI = client.Writer(
176 | ctx,
177 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
178 | cxmem.NewBuffer(client.Options().BatchSize()),
179 | )
180 | b.ResetTimer()
181 | for i := 0; i < b.N; i++ {
182 | insertPreAllocatedObjects(writeAPI, 100000, b)
183 | }
184 | })
185 |
186 | b.Run("10000", func(b *testing.B) {
187 | client.Options().SetBatchSize(10001)
188 | writeAPI = client.Writer(
189 | ctx,
190 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
191 | cxmem.NewBuffer(client.Options().BatchSize()),
192 | )
193 | b.ResetTimer()
194 | for i := 0; i < b.N; i++ {
195 | insertPreAllocatedObjects(writeAPI, 10000, b)
196 | }
197 | })
198 |
199 | b.Run("1000", func(b *testing.B) {
200 | client.Options().SetBatchSize(1001)
201 | writeAPI = client.Writer(
202 | ctx,
203 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
204 | cxmem.NewBuffer(client.Options().BatchSize()),
205 | )
206 | b.ResetTimer()
207 | for i := 0; i < b.N; i++ {
208 | insertPreAllocatedObjects(writeAPI, 1000, b)
209 | }
210 | })
211 |
212 | b.Run("100", func(b *testing.B) {
213 | client.Options().SetBatchSize(101)
214 | writeAPI = client.Writer(
215 | ctx,
216 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
217 | cxmem.NewBuffer(client.Options().BatchSize()),
218 | )
219 | b.ResetTimer()
220 | for i := 0; i < b.N; i++ {
221 | insertPreAllocatedObjects(writeAPI, 100, b)
222 | }
223 | })
224 |
225 | b.StopTimer()
226 | writeAPI.Close()
227 | client.Close()
228 | }
229 |
230 | // x1000
231 | // goos: linux
232 | // goarch: amd64
233 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
234 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
235 | // BenchmarkInsertSimplestObjects/1000000-12 1000 454794 ns/op 160002 B/op 4000 allocs/op
236 | // BenchmarkInsertSimplestObjects/100000-12 1000 41879 ns/op 16000 B/op 400 allocs/op
237 | // BenchmarkInsertSimplestObjects/10000-12 1000 4174 ns/op 1605 B/op 40 allocs/op
238 | // BenchmarkInsertSimplestObjects/1000-12 1000 479.5 ns/op 160 B/op 4 allocs/op
239 | // BenchmarkInsertSimplestObjects/100-12 1000 39.40 ns/op 16 B/op 0 allocs/op
240 | // PASS
241 | // ok
242 | // nolint:lll,dupl // it's OK
243 | func BenchmarkInsertSimplestObjects(b *testing.B) {
244 | ctx, cancel := context.WithCancel(context.Background())
245 | defer cancel()
246 |
247 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
248 | clickhousebuffer.WithFlushInterval(10000000),
249 | clickhousebuffer.WithBatchSize(10000000),
250 | ))
251 |
252 | var writeAPI clickhousebuffer.Writer
253 | b.ResetTimer()
254 |
255 | b.Run("1000000", func(b *testing.B) {
256 | client.Options().SetBatchSize(1000001)
257 | writeAPI = client.Writer(
258 | ctx,
259 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
260 | cxmem.NewBuffer(client.Options().BatchSize()),
261 | )
262 | b.ResetTimer()
263 | for i := 0; i < b.N; i++ {
264 | insertObjects(writeAPI, 1000000, b)
265 | }
266 | })
267 |
268 | b.Run("100000", func(b *testing.B) {
269 | client.Options().SetBatchSize(100001)
270 | writeAPI = client.Writer(
271 | ctx,
272 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
273 | cxmem.NewBuffer(client.Options().BatchSize()),
274 | )
275 | b.ResetTimer()
276 | for i := 0; i < b.N; i++ {
277 | insertObjects(writeAPI, 100000, b)
278 | }
279 | })
280 |
281 | b.Run("10000", func(b *testing.B) {
282 | client.Options().SetBatchSize(10001)
283 | writeAPI = client.Writer(
284 | ctx,
285 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
286 | cxmem.NewBuffer(client.Options().BatchSize()),
287 | )
288 | b.ResetTimer()
289 | for i := 0; i < b.N; i++ {
290 | insertObjects(writeAPI, 10000, b)
291 | }
292 | })
293 |
294 | b.Run("1000", func(b *testing.B) {
295 | client.Options().SetBatchSize(1001)
296 | writeAPI = client.Writer(
297 | ctx,
298 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
299 | cxmem.NewBuffer(client.Options().BatchSize()),
300 | )
301 | b.ResetTimer()
302 | for i := 0; i < b.N; i++ {
303 | insertObjects(writeAPI, 1000, b)
304 | }
305 | })
306 |
307 | b.Run("100", func(b *testing.B) {
308 | client.Options().SetBatchSize(101)
309 | writeAPI = client.Writer(
310 | ctx,
311 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
312 | cxmem.NewBuffer(client.Options().BatchSize()),
313 | )
314 | b.ResetTimer()
315 | for i := 0; i < b.N; i++ {
316 | insertObjects(writeAPI, 100, b)
317 | }
318 | })
319 |
320 | b.StopTimer()
321 | writeAPI.Close()
322 | client.Close()
323 | }
324 |
325 | // X1000
326 | // goos: linux
327 | // goarch: amd64
328 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
329 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
330 | // BenchmarkInsertSimplestObjectsJust/10000000-12 1000 4705290 ns/op 1360000 B/op 40000 allocs/op
331 | // BenchmarkInsertSimplestObjectsJust/1000000-12 1000 410051 ns/op 136000 B/op 4000 allocs/op
332 | // BenchmarkInsertSimplestObjectsJust/100000-12 1000 45773 ns/op 13600 B/op 400 allocs/op
333 | // BenchmarkInsertSimplestObjectsJust/10000-12 1000 4851 ns/op 1360 B/op 40 allocs/op
334 | // BenchmarkInsertSimplestObjectsJust/1000-12 1000 431.4 ns/op 136 B/op 4 allocs/op
335 | // BenchmarkInsertSimplestObjectsJust/100-12 1000 66.40 ns/op 13 B/op 0 allocs/op
336 | // PASS
337 | // ok
338 | func BenchmarkInsertSimplestObjectsJust(b *testing.B) {
339 | ctx, cancel := context.WithCancel(context.Background())
340 | defer cancel()
341 |
342 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
343 | clickhousebuffer.WithFlushInterval(10000000),
344 | clickhousebuffer.WithBatchSize(10000000),
345 | ))
346 |
347 | var writeAPI clickhousebuffer.Writer
348 | b.ResetTimer()
349 |
350 | b.Run("10000000", func(b *testing.B) {
351 | client.Options().SetBatchSize(10000001)
352 | writeAPI = client.Writer(
353 | ctx,
354 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
355 | cxmem.NewBuffer(client.Options().BatchSize()),
356 | )
357 | b.ResetTimer()
358 | insertObjects(writeAPI, 10000000, b)
359 | })
360 |
361 | b.Run("1000000", func(b *testing.B) {
362 | client.Options().SetBatchSize(1000001)
363 | writeAPI = client.Writer(
364 | ctx,
365 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
366 | cxmem.NewBuffer(client.Options().BatchSize()),
367 | )
368 | b.ResetTimer()
369 | insertObjects(writeAPI, 1000000, b)
370 | })
371 |
372 | b.Run("100000", func(b *testing.B) {
373 | client.Options().SetBatchSize(100001)
374 | writeAPI = client.Writer(
375 | ctx,
376 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
377 | cxmem.NewBuffer(client.Options().BatchSize()),
378 | )
379 | b.ResetTimer()
380 | insertObjects(writeAPI, 100000, b)
381 | })
382 |
383 | b.Run("10000", func(b *testing.B) {
384 | client.Options().SetBatchSize(10001)
385 | writeAPI = client.Writer(
386 | ctx,
387 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
388 | cxmem.NewBuffer(client.Options().BatchSize()),
389 | )
390 | b.ResetTimer()
391 | insertObjects(writeAPI, 10000, b)
392 | })
393 |
394 | b.Run("1000", func(b *testing.B) {
395 | client.Options().SetBatchSize(1001)
396 | writeAPI = client.Writer(
397 | ctx,
398 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
399 | cxmem.NewBuffer(client.Options().BatchSize()),
400 | )
401 | b.ResetTimer()
402 | insertObjects(writeAPI, 1000, b)
403 | })
404 |
405 | b.Run("100", func(b *testing.B) {
406 | client.Options().SetBatchSize(101)
407 | writeAPI = client.Writer(
408 | ctx,
409 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
410 | cxmem.NewBuffer(client.Options().BatchSize()),
411 | )
412 | b.ResetTimer()
413 | insertObjects(writeAPI, 100, b)
414 | })
415 |
416 | b.StopTimer()
417 | writeAPI.Close()
418 | client.Close()
419 | }
420 |
421 | // x1000
422 | // goos: linux
423 | // goarch: amd64
424 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
425 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
426 | // BenchmarkInsertSimplestVectors/1000000-12 1000 182548 ns/op 72002 B/op 1000 allocs/op
427 | // BenchmarkInsertSimplestVectors/100000-12 1000 16291 ns/op 7200 B/op 100 allocs/op
428 | // BenchmarkInsertSimplestVectors/10000-12 1000 1638 ns/op 725 B/op 10 allocs/op
429 | // BenchmarkInsertSimplestVectors/1000-12 1000 208.4 ns/op 72 B/op 1 allocs/op
430 | // BenchmarkInsertSimplestVectors/100-12 1000 20.00 ns/op 7 B/op 0 allocs/op
431 | // PASS
432 | // ok
433 | // nolint:lll,dupl // it's OK
434 | func BenchmarkInsertSimplestVectors(b *testing.B) {
435 | ctx, cancel := context.WithCancel(context.Background())
436 | defer cancel()
437 |
438 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
439 | clickhousebuffer.WithFlushInterval(10000000),
440 | clickhousebuffer.WithBatchSize(100),
441 | ))
442 |
443 | var writeAPI clickhousebuffer.Writer
444 | b.ResetTimer()
445 |
446 | b.Run("1000000", func(b *testing.B) {
447 | client.Options().SetBatchSize(1000001)
448 | writeAPI = client.Writer(
449 | ctx,
450 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
451 | cxmem.NewBuffer(client.Options().BatchSize()),
452 | )
453 | b.ResetTimer()
454 | for i := 0; i < b.N; i++ {
455 | insertVectors(writeAPI, 1000000, b)
456 | }
457 | })
458 |
459 | b.Run("100000", func(b *testing.B) {
460 | client.Options().SetBatchSize(100001)
461 | writeAPI = client.Writer(
462 | ctx,
463 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
464 | cxmem.NewBuffer(client.Options().BatchSize()),
465 | )
466 | b.ResetTimer()
467 | for i := 0; i < b.N; i++ {
468 | insertVectors(writeAPI, 100000, b)
469 | }
470 | })
471 |
472 | b.Run("10000", func(b *testing.B) {
473 | client.Options().SetBatchSize(10001)
474 | writeAPI = client.Writer(
475 | ctx,
476 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
477 | cxmem.NewBuffer(client.Options().BatchSize()),
478 | )
479 | b.ResetTimer()
480 | for i := 0; i < b.N; i++ {
481 | insertVectors(writeAPI, 10000, b)
482 | }
483 | })
484 |
485 | b.Run("1000", func(b *testing.B) {
486 | client.Options().SetBatchSize(1001)
487 | writeAPI = client.Writer(
488 | ctx,
489 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
490 | cxmem.NewBuffer(client.Options().BatchSize()),
491 | )
492 | b.ResetTimer()
493 | for i := 0; i < b.N; i++ {
494 | insertVectors(writeAPI, 1000, b)
495 | }
496 | })
497 |
498 | b.Run("100", func(b *testing.B) {
499 | client.Options().SetBatchSize(101)
500 | writeAPI = client.Writer(
501 | ctx,
502 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
503 | cxmem.NewBuffer(client.Options().BatchSize()),
504 | )
505 | b.ResetTimer()
506 | for i := 0; i < b.N; i++ {
507 | insertVectors(writeAPI, 100, b)
508 | }
509 | })
510 |
511 | b.StopTimer()
512 | writeAPI.Close()
513 | client.Close()
514 | }
515 |
516 | // X1000
517 | // goos: linux
518 | // goarch: amd64
519 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
520 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
521 | // BenchmarkInsertSimplestVectorsJust/10000000-12 1000 2059182 ns/op 480000 B/op 10000 allocs/op
522 | // BenchmarkInsertSimplestVectorsJust/1000000-12 1000 176129 ns/op 48000 B/op 1000 allocs/op
523 | // BenchmarkInsertSimplestVectorsJust/100000-12 1000 17398 ns/op 4800 B/op 100 allocs/op
524 | // BenchmarkInsertSimplestVectorsJust/10000-12 1000 1937 ns/op 480 B/op 10 allocs/op
525 | // BenchmarkInsertSimplestVectorsJust/1000-12 1000 243.9 ns/op 48 B/op 1 allocs/op
526 | // BenchmarkInsertSimplestVectorsJust/100-12 1000 10.50 ns/op 4 B/op 0 allocs/op
527 | // PASS
528 | // ok
529 | func BenchmarkInsertSimplestVectorsJust(b *testing.B) {
530 | ctx, cancel := context.WithCancel(context.Background())
531 | defer cancel()
532 |
533 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
534 | clickhousebuffer.WithFlushInterval(10000000),
535 | clickhousebuffer.WithBatchSize(10000000),
536 | ))
537 |
538 | var writeAPI clickhousebuffer.Writer
539 | b.ResetTimer()
540 |
541 | b.Run("10000000", func(b *testing.B) {
542 | client.Options().SetBatchSize(10000001)
543 | writeAPI = client.Writer(
544 | ctx,
545 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
546 | cxmem.NewBuffer(client.Options().BatchSize()),
547 | )
548 | b.ResetTimer()
549 | insertVectors(writeAPI, 10000000, b)
550 | })
551 |
552 | b.Run("1000000", func(b *testing.B) {
553 | client.Options().SetBatchSize(1000001)
554 | writeAPI = client.Writer(
555 | ctx,
556 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
557 | cxmem.NewBuffer(client.Options().BatchSize()),
558 | )
559 | b.ResetTimer()
560 | insertVectors(writeAPI, 1000000, b)
561 | })
562 |
563 | b.Run("100000", func(b *testing.B) {
564 | client.Options().SetBatchSize(100001)
565 | writeAPI = client.Writer(
566 | ctx,
567 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
568 | cxmem.NewBuffer(client.Options().BatchSize()),
569 | )
570 | b.ResetTimer()
571 | insertVectors(writeAPI, 100000, b)
572 | })
573 |
574 | b.Run("10000", func(b *testing.B) {
575 | client.Options().SetBatchSize(10001)
576 | writeAPI = client.Writer(
577 | ctx,
578 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
579 | cxmem.NewBuffer(client.Options().BatchSize()),
580 | )
581 | b.ResetTimer()
582 | insertVectors(writeAPI, 10000, b)
583 | })
584 |
585 | b.Run("1000", func(b *testing.B) {
586 | client.Options().SetBatchSize(1001)
587 | writeAPI = client.Writer(
588 | ctx,
589 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
590 | cxmem.NewBuffer(client.Options().BatchSize()),
591 | )
592 | b.ResetTimer()
593 | insertVectors(writeAPI, 1000, b)
594 | })
595 |
596 | b.Run("100", func(b *testing.B) {
597 | client.Options().SetBatchSize(101)
598 | writeAPI = client.Writer(
599 | ctx,
600 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
601 | cxmem.NewBuffer(client.Options().BatchSize()),
602 | )
603 | b.ResetTimer()
604 | insertVectors(writeAPI, 100, b)
605 | })
606 |
607 | b.StopTimer()
608 | writeAPI.Close()
609 | client.Close()
610 | }
611 |
612 | // X1000
613 | // goos: linux
614 | // goarch: amd64
615 | // pkg: github.com/zikwall/clickhouse-buffer/v4/bench
616 | // cpu: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz
617 | // BenchmarkInsertSimplestEmptyVectors/1000000-12 1000 132887 ns/op 24002 B/op 0 allocs/op
618 | // BenchmarkInsertSimplestEmptyVectors/100000-12 1000 13404 ns/op 2400 B/op 0 allocs/op
619 | // BenchmarkInsertSimplestEmptyVectors/10000-12 1000 1299 ns/op 245 B/op 0 allocs/op
620 | // BenchmarkInsertSimplestEmptyVectors/1000-12 1000 122.1 ns/op 0 B/op 0 allocs/op
621 | // BenchmarkInsertSimplestEmptyVectors/100-12 1000 6.800 ns/op 0 B/op 0 allocs/op
622 | // PASS
623 | // ok
624 | // nolint:lll,dupl // it's OK
625 | func BenchmarkInsertSimplestEmptyVectors(b *testing.B) {
626 | ctx, cancel := context.WithCancel(context.Background())
627 | defer cancel()
628 |
629 | client := clickhousebuffer.NewClientWithOptions(ctx, &clickhouseMock{}, clickhousebuffer.NewOptions(
630 | clickhousebuffer.WithFlushInterval(10000000),
631 | clickhousebuffer.WithBatchSize(100),
632 | ))
633 |
634 | var writeAPI clickhousebuffer.Writer
635 | b.ResetTimer()
636 |
637 | b.Run("1000000", func(b *testing.B) {
638 | client.Options().SetBatchSize(1000001)
639 | writeAPI = client.Writer(
640 | ctx,
641 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
642 | cxmem.NewBuffer(client.Options().BatchSize()),
643 | )
644 | b.ResetTimer()
645 | for i := 0; i < b.N; i++ {
646 | insertEmptyVectors(writeAPI, 1000000, b)
647 | }
648 | })
649 |
650 | b.Run("100000", func(b *testing.B) {
651 | client.Options().SetBatchSize(100001)
652 | writeAPI = client.Writer(
653 | ctx,
654 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
655 | cxmem.NewBuffer(client.Options().BatchSize()),
656 | )
657 | b.ResetTimer()
658 | for i := 0; i < b.N; i++ {
659 | insertEmptyVectors(writeAPI, 100000, b)
660 | }
661 | })
662 |
663 | b.Run("10000", func(b *testing.B) {
664 | client.Options().SetBatchSize(10001)
665 | writeAPI = client.Writer(
666 | ctx,
667 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
668 | cxmem.NewBuffer(client.Options().BatchSize()),
669 | )
670 | b.ResetTimer()
671 | for i := 0; i < b.N; i++ {
672 | insertEmptyVectors(writeAPI, 10000, b)
673 | }
674 | })
675 |
676 | b.Run("1000", func(b *testing.B) {
677 | client.Options().SetBatchSize(1001)
678 | writeAPI = client.Writer(
679 | ctx,
680 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
681 | cxmem.NewBuffer(client.Options().BatchSize()),
682 | )
683 | b.ResetTimer()
684 | for i := 0; i < b.N; i++ {
685 | insertEmptyVectors(writeAPI, 1000, b)
686 | }
687 | })
688 |
689 | b.Run("100", func(b *testing.B) {
690 | client.Options().SetBatchSize(101)
691 | writeAPI = client.Writer(
692 | ctx,
693 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
694 | cxmem.NewBuffer(client.Options().BatchSize()),
695 | )
696 | b.ResetTimer()
697 | for i := 0; i < b.N; i++ {
698 | insertEmptyVectors(writeAPI, 100, b)
699 | }
700 | })
701 |
702 | b.StopTimer()
703 | writeAPI.Close()
704 | client.Close()
705 | }
706 |
707 | func insertObjects(writeAPI clickhousebuffer.Writer, x int, b *testing.B) {
708 | b.ResetTimer()
709 | var object *BenchTable
710 | for i := 0; i < x; i++ {
711 | object = &BenchTable{ID: 1}
712 | writeAPI.WriteRow(object)
713 | }
714 | // for flush data
715 | b.StopTimer()
716 | writeAPI.WriteVector(cx.Vector{})
717 | b.StartTimer()
718 | }
719 |
720 | func insertVectors(writeAPI clickhousebuffer.Writer, x int, b *testing.B) {
721 | b.ResetTimer()
722 | var vector cx.Vector
723 | for i := 0; i < x; i++ {
724 | vector = cx.Vector{1, "", ""}
725 | writeAPI.WriteVector(vector)
726 | }
727 | // for flush data
728 | b.StopTimer()
729 | writeAPI.WriteVector(cx.Vector{})
730 | b.StartTimer()
731 | }
732 |
733 | func insertEmptyVectors(writeAPI clickhousebuffer.Writer, x int, b *testing.B) {
734 | b.ResetTimer()
735 | for i := 0; i < x; i++ {
736 | writeAPI.WriteVector(cx.Vector{})
737 | }
738 | // for flush data
739 | b.StopTimer()
740 | writeAPI.WriteVector(cx.Vector{})
741 | b.StartTimer()
742 | }
743 |
744 | func insertPreAllocatedObjects(writeAPI clickhousebuffer.Writer, x int, b *testing.B) {
745 | objects := make([]cx.Vectorable, 0, x+1)
746 | for i := 0; i < x; i++ {
747 | objects = append(objects, &BenchTable{ID: 1})
748 | }
749 | b.ResetTimer()
750 | for i := range objects {
751 | writeAPI.WriteRow(objects[i])
752 | }
753 | // for flush data
754 | b.StopTimer()
755 | // nolint:staticcheck // it's OK
756 | objects = objects[:0]
757 | objects = nil
758 | writeAPI.WriteVector(cx.Vector{})
759 | b.StartTimer()
760 | }
761 |
762 | func insertPreAllocatedVectors(writeAPI clickhousebuffer.Writer, x int, b *testing.B) {
763 | vectors := make([]cx.Vector, 0, x+1)
764 | for i := 0; i < x; i++ {
765 | vectors = append(vectors, cx.Vector{1, "", ""})
766 | }
767 | b.ResetTimer()
768 | for i := range vectors {
769 | writeAPI.WriteVector(vectors[i])
770 | }
771 | // for flush data
772 | b.StopTimer()
773 | // nolint:staticcheck // it's OK
774 | vectors = vectors[:0]
775 | vectors = nil
776 | writeAPI.WriteVector(cx.Vector{})
777 | b.StartTimer()
778 | }
779 |
--------------------------------------------------------------------------------
/client.go:
--------------------------------------------------------------------------------
1 | package clickhousebuffer
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
8 | "github.com/zikwall/clickhouse-buffer/v4/src/retry"
9 | )
10 |
11 | // Client main interface, provides a top-level API.
12 | // Client generates child Writer-s and stores all necessary configuration in itself.
13 | // Client owns an instance of a Clickhouse database connection.
14 | // Client provides a retry.Retryable interface for re-processing packets.
15 | type Client interface {
16 | // Options returns the options associated with client
17 | Options() *Options
18 | // WriteBatch method of sending data to Clickhouse is used implicitly in a non - blocking record,
19 | // and explicitly in a blocking record
20 | WriteBatch(context.Context, cx.View, *cx.Batch) error
21 | // Writer returns the asynchronous, non-blocking, Writer client.
22 | // Ensures using a single Writer instance for each table pair.
23 | Writer(context.Context, cx.View, cx.Buffer) Writer
24 | // WriterBlocking returns the synchronous, blocking, WriterBlocking client.
25 | // Ensures using a single WriterBlocking instance for each table pair.
26 | WriterBlocking(cx.View) WriterBlocking
27 | // RetryClient Get retry client
28 | RetryClient() retry.Retryable
29 | // Close ensures all ongoing asynchronous write clients finish.
30 | Close()
31 | }
32 |
33 | // Implementation of the Client interface
34 | type clientImpl struct {
35 | context context.Context
36 | clickhouse cx.Clickhouse
37 | options *Options
38 | writeAPIs map[string]Writer
39 | syncWriteAPIs map[string]WriterBlocking
40 | mu sync.RWMutex
41 | retry retry.Retryable
42 | logger cx.Logger
43 | }
44 |
45 | // NewClient creates an object implementing the Client interface with default options
46 | func NewClient(ctx context.Context, clickhouse cx.Clickhouse) Client {
47 | return NewClientWithOptions(ctx, clickhouse, NewOptions())
48 | }
49 |
50 | // NewClientWithOptions similar to NewClient except that there is a configuration option
51 | // with an encapsulated setting inside.
52 | // NewClientWithOptions returns implementation of the Client interface.
53 | func NewClientWithOptions(ctx context.Context, clickhouse cx.Clickhouse, options *Options) Client {
54 | if options.logger == nil {
55 | options.logger = cx.NewDefaultLogger()
56 | }
57 | client := &clientImpl{
58 | context: ctx,
59 | clickhouse: clickhouse,
60 | options: options,
61 | writeAPIs: map[string]Writer{},
62 | syncWriteAPIs: map[string]WriterBlocking{},
63 | logger: options.logger,
64 | }
65 | // if resending undelivered messages is enabled, safely check all the necessary settings
66 | if options.isRetryEnabled {
67 | // if no custom engine is specified for queues, we use the default engine,
68 | // in most cases, this covers all cases.
69 | if options.queue == nil {
70 | options.queue = retry.NewImMemoryQueueEngine()
71 | }
72 | client.retry = retry.NewRetry(
73 | ctx, options.queue, retry.NewDefaultWriter(clickhouse), options.logger, options.isDebug,
74 | )
75 | }
76 | return client
77 | }
78 |
79 | // Options return global options object
80 | func (c *clientImpl) Options() *Options {
81 | return c.options
82 | }
83 |
84 | // Writer returns the asynchronous, non-blocking, Writer client.
85 | // Ensures using a single Writer instance for each table pair.
86 | func (c *clientImpl) Writer(ctx context.Context, view cx.View, buf cx.Buffer) Writer {
87 | key := view.Name
88 | c.mu.Lock()
89 | if _, ok := c.writeAPIs[key]; !ok {
90 | c.writeAPIs[key] = NewWriter(ctx, c, view, buf)
91 | }
92 | writer := c.writeAPIs[key]
93 | c.mu.Unlock()
94 | return writer
95 | }
96 |
97 | // WriterBlocking returns the synchronous, blocking, WriterBlocking client.
98 | // Ensures using a single WriterBlocking instance for each table pair.
99 | func (c *clientImpl) WriterBlocking(view cx.View) WriterBlocking {
100 | key := view.Name
101 | c.mu.Lock()
102 | if _, ok := c.syncWriteAPIs[key]; !ok {
103 | c.syncWriteAPIs[key] = NewWriterBlocking(c, view)
104 | }
105 | writer := c.syncWriteAPIs[key]
106 | c.mu.Unlock()
107 | return writer
108 | }
109 |
110 | // Close API top-level method safely closes all child asynchronous and synchronous Writer-s
111 | func (c *clientImpl) Close() {
112 | if c.options.isDebug {
113 | c.logger.Log("close clickhouse buffer client")
114 | c.logger.Log("close async writers")
115 | }
116 | // closing and destroying all asynchronous writers
117 | c.mu.Lock()
118 | for key, w := range c.writeAPIs {
119 | w.Close()
120 | delete(c.writeAPIs, key)
121 | }
122 | c.mu.Unlock()
123 | // closing and destroying all synchronous writers
124 | if c.options.isDebug {
125 | c.logger.Log("close sync writers")
126 | }
127 | c.mu.Lock()
128 | for key := range c.syncWriteAPIs {
129 | delete(c.syncWriteAPIs, key)
130 | }
131 | c.mu.Unlock()
132 | }
133 |
134 | // WriteBatch API top-level method for writing to Clickhouse database.
135 | // All child Writer-s use this method to write their accumulated and encapsulated data.
136 | func (c *clientImpl) WriteBatch(ctx context.Context, view cx.View, batch *cx.Batch) error {
137 | _, err := c.clickhouse.Insert(ctx, view, batch.Rows())
138 | if err != nil {
139 | // if there is an acceptable error and if the functionality of resending data is activated,
140 | // try to repeat the operation
141 | if c.options.isRetryEnabled && cx.IsResendAvailable(err) {
142 | c.retry.Retry(retry.NewPacket(view, batch))
143 | }
144 | return err
145 | }
146 | return nil
147 | }
148 |
149 | // RetryClient returns implementation of the retry.Retryable interface
150 | func (c *clientImpl) RetryClient() retry.Retryable {
151 | return c.retry
152 | }
153 |
--------------------------------------------------------------------------------
/example/README.md:
--------------------------------------------------------------------------------
1 | ### Example
2 |
3 | ```shell
4 | export CLICKHOUSE_HOST=111.11.11.11:9000
5 | export CLICKHOUSE_USER=111.11.11.11:9000
6 | export REDIS_HOST=111.11.11.11:6379
7 | export REDIS_PASS=password_if_needed
8 | ```
9 |
10 | - `$ go run ./cmd/simple/main.go`
11 | - `$ go run ./cmd/redis/main.go`
12 |
13 | ```shell
14 | clickhouse-client -h
15 |
16 | SELECT
17 | id,
18 | uuid,
19 | insert_ts
20 | FROM default.example
21 | ORDER BY id ASC
22 |
23 | Query id: 074f42ff-0ea7-44ca-9cd1-735e8fb5ce54
24 |
25 | ┌─id─┬─uuid─────┬─insert_ts───────────┐
26 | │ 1 │ uuidf 1 │ 09 Jun 22 13:42 MSK │
27 | │ 2 │ uuidf 2 │ 09 Jun 22 13:42 MSK │
28 | │ 3 │ uuidf 3 │ 09 Jun 22 13:42 MSK │
29 | │ 4 │ uuidf 4 │ 09 Jun 22 13:42 MSK │
30 | │ 5 │ uuidf 5 │ 09 Jun 22 13:42 MSK │
31 | │ 6 │ uuidf 6 │ 09 Jun 22 13:42 MSK │
32 | │ 7 │ uuidf 7 │ 09 Jun 22 13:42 MSK │
33 | │ 8 │ uuidf 8 │ 09 Jun 22 13:42 MSK │
34 | │ 9 │ uuidf 9 │ 09 Jun 22 13:42 MSK │
35 | │ 10 │ uuidf 10 │ 09 Jun 22 13:42 MSK │
36 | └────┴──────────┴─────────────────────┘
37 |
38 | 10 rows in set. Elapsed: 0.015 sec.
39 | ```
40 |
41 | - `$ go run ./cmd/advanced/main.go`
42 | - `$ go run ./cmd/advanced_redis/main.go`
43 |
44 | ```sql
45 | SELECT * FROM default.advanced_example;
46 | ```
--------------------------------------------------------------------------------
/example/cmd/advanced/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "log"
6 | "os"
7 | "sync"
8 | "time"
9 |
10 | "github.com/ClickHouse/clickhouse-go/v2"
11 | "github.com/google/uuid"
12 |
13 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
14 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
18 | )
19 |
20 | func main() {
21 | hostname := os.Getenv("CLICKHOUSE_HOST")
22 | username := os.Getenv("CLICKHOUSE_USER")
23 | database := os.Getenv("CLICKHOUSE_DB")
24 | password := os.Getenv("CLICKHOUSE_PASS")
25 |
26 | ctx, cancel := context.WithCancel(context.Background())
27 | defer cancel()
28 |
29 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
30 | Addr: []string{hostname},
31 | Auth: clickhouse.Auth{
32 | Database: database,
33 | Username: username,
34 | Password: password,
35 | },
36 | Settings: clickhouse.Settings{
37 | "max_execution_time": 60,
38 | },
39 | DialTimeout: 5 * time.Second,
40 | Compression: &clickhouse.Compression{
41 | Method: clickhouse.CompressionLZ4,
42 | },
43 | Debug: true,
44 | }, &cx.RuntimeOptions{
45 | WriteTimeout: 15 * time.Second,
46 | })
47 | if err != nil {
48 | log.Panicln(err)
49 | }
50 | if err = tables.CreateAdvancedTableNative(ctx, conn); err != nil {
51 | log.Panicln(err)
52 | }
53 |
54 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
55 | clickhousebuffer.WithFlushInterval(1000),
56 | clickhousebuffer.WithBatchSize(10),
57 | clickhousebuffer.WithDebugMode(true),
58 | clickhousebuffer.WithRetry(false),
59 | ))
60 | writeAPI := client.Writer(
61 | ctx,
62 | cx.NewView(tables.AdvancedTableName(), tables.AdvancedTableColumns()),
63 | cxmem.NewBuffer(client.Options().BatchSize()),
64 | )
65 |
66 | wg := sync.WaitGroup{}
67 | wg.Add(1)
68 | go func() {
69 | errorsCh := writeAPI.Errors()
70 | for chErr := range errorsCh {
71 | log.Printf("clickhouse write error: %s\n", chErr.Error())
72 | }
73 | wg.Done()
74 | }()
75 |
76 | write(writeAPI)
77 |
78 | <-time.After(time.Second * 2)
79 | client.Close()
80 | wg.Wait()
81 | }
82 |
83 | // nolint:gocritic // it's OK
84 | func write(writeAPI clickhousebuffer.Writer) {
85 | for i := 0; i < 50; i++ {
86 | writeAPI.WriteRow(&tables.AdvancedTable{
87 | Col1: uint8(42),
88 | Col2: "ClickHouse",
89 | Col3: "Inc",
90 | Col4: uuid.New(),
91 | // Map(String, UInt8)
92 | // Col5: map[string]uint8{"key": 1},
93 | // Array(String)
94 | Col6: []string{"Q", "W", "E", "R", "T", "Y"},
95 | // Tuple(String, UInt8, Array(Map(String, String)))
96 | Col7: []interface{}{
97 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
98 | time.Now(),
99 | uint32(5),
100 | },
101 | },
102 | Col8: time.Now(),
103 | Col9: "hello",
104 | Col10: time.Now(),
105 | Col11: b2i8(i%2 == 0),
106 | Col12: time.Now(),
107 | Col13: [][]interface{}{
108 | {
109 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
110 | time.Now(),
111 | uint32(5),
112 | },
113 | },
114 | {
115 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
116 | time.Now(),
117 | uint32(5),
118 | },
119 | },
120 | },
121 | })
122 | }
123 | }
124 |
125 | // clickhouse doesn't support bool
126 | func b2i8(b bool) int8 {
127 | if b {
128 | return 1
129 | }
130 | return 0
131 | }
132 |
--------------------------------------------------------------------------------
/example/cmd/advanced_redis/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "encoding/gob"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/go-redis/redis/v8"
13 | "github.com/google/uuid"
14 |
15 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
16 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
18 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
19 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
20 | )
21 |
22 | func main() {
23 | hostname := os.Getenv("CLICKHOUSE_HOST")
24 | username := os.Getenv("CLICKHOUSE_USER")
25 | database := os.Getenv("CLICKHOUSE_DB")
26 | password := os.Getenv("CLICKHOUSE_PASS")
27 | redisHost := os.Getenv("REDIS_HOST")
28 | redisPass := os.Getenv("REDIS_PASS")
29 |
30 | ctx, cancel := context.WithCancel(context.Background())
31 | defer cancel()
32 |
33 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
34 | Addr: []string{hostname},
35 | Auth: clickhouse.Auth{
36 | Database: database,
37 | Username: username,
38 | Password: password,
39 | },
40 | Settings: clickhouse.Settings{
41 | "max_execution_time": 60,
42 | },
43 | DialTimeout: 5 * time.Second,
44 | Compression: &clickhouse.Compression{
45 | Method: clickhouse.CompressionLZ4,
46 | },
47 | Debug: true,
48 | }, &cx.RuntimeOptions{
49 | WriteTimeout: 15 * time.Second,
50 | })
51 | if err != nil {
52 | log.Panicln(err)
53 | }
54 |
55 | if err = tables.CreateAdvancedTableNative(ctx, conn); err != nil {
56 | log.Panicln(err)
57 | }
58 |
59 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
60 | clickhousebuffer.WithFlushInterval(1000),
61 | clickhousebuffer.WithBatchSize(10),
62 | clickhousebuffer.WithDebugMode(true),
63 | clickhousebuffer.WithRetry(false),
64 | ))
65 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
66 | Addr: redisHost,
67 | Password: redisPass,
68 | DB: 10,
69 | }), "bucket", client.Options().BatchSize())
70 | if err != nil {
71 | log.Panicln(err)
72 | }
73 | writeAPI := client.Writer(
74 | ctx,
75 | cx.NewView(tables.AdvancedTableName(), tables.AdvancedTableColumns()),
76 | rxbuffer,
77 | )
78 |
79 | wg := sync.WaitGroup{}
80 | wg.Add(1)
81 | go func() {
82 | errorsCh := writeAPI.Errors()
83 | for chErr := range errorsCh {
84 | log.Printf("clickhouse write error: %s\n", chErr.Error())
85 | }
86 | wg.Done()
87 | }()
88 |
89 | // before register data types
90 | gob.Register(uuid.UUID{})
91 | // Tuple
92 | gob.Register([]interface{}{})
93 | gob.Register(time.Time{})
94 | // array of Tuple
95 | gob.Register([][]interface{}{})
96 |
97 | write(writeAPI)
98 |
99 | <-time.After(time.Second * 2)
100 | client.Close()
101 | wg.Wait()
102 | }
103 |
104 | // nolint:gocritic // it's OK
105 | func write(writeAPI clickhousebuffer.Writer) {
106 | for i := 0; i < 50; i++ {
107 | writeAPI.WriteRow(&tables.AdvancedTable{
108 | Col1: uint8(42),
109 | Col2: "ClickHouse",
110 | Col3: "Inc",
111 | Col4: uuid.New(),
112 | // Map(String, UInt8)
113 | // Col5: map[string]uint8{"key": 1},
114 | // Array(String)
115 | Col6: []string{"Q", "W", "E", "R", "T", "Y"},
116 | // Tuple(String, UInt8, Array(Map(String, String)))
117 | Col7: []interface{}{
118 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
119 | time.Now(),
120 | uint32(5),
121 | },
122 | },
123 | Col8: time.Now(),
124 | Col9: "hello",
125 | Col10: time.Now(),
126 | Col11: b2i8(i%2 == 0),
127 | Col12: time.Now(),
128 | Col13: [][]interface{}{
129 | {
130 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
131 | time.Now(),
132 | uint32(5),
133 | },
134 | },
135 | {
136 | "String Value", uint8(5), []string{"val1", "val2", "val3"}, []interface{}{
137 | time.Now(),
138 | uint32(5),
139 | },
140 | },
141 | },
142 | })
143 | }
144 | }
145 |
146 | // clickhouse doesn't support bool
147 | func b2i8(b bool) int8 {
148 | if b {
149 | return 1
150 | }
151 | return 0
152 | }
153 |
--------------------------------------------------------------------------------
/example/cmd/redis/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/go-redis/redis/v8"
13 |
14 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
15 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
18 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
19 | )
20 |
21 | func main() {
22 | hostname := os.Getenv("CLICKHOUSE_HOST")
23 | username := os.Getenv("CLICKHOUSE_USER")
24 | database := os.Getenv("CLICKHOUSE_DB")
25 | password := os.Getenv("CLICKHOUSE_PASS")
26 | redisHost := os.Getenv("REDIS_HOST")
27 | redisPass := os.Getenv("REDIS_PASS")
28 |
29 | ctx, cancel := context.WithCancel(context.Background())
30 | defer cancel()
31 |
32 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
33 | Addr: []string{hostname},
34 | Auth: clickhouse.Auth{
35 | Database: database,
36 | Username: username,
37 | Password: password,
38 | },
39 | Settings: clickhouse.Settings{
40 | "max_execution_time": 60,
41 | },
42 | DialTimeout: 5 * time.Second,
43 | Compression: &clickhouse.Compression{
44 | Method: clickhouse.CompressionLZ4,
45 | },
46 | Debug: true,
47 | }, &cx.RuntimeOptions{
48 | WriteTimeout: 15 * time.Second,
49 | })
50 | if err != nil {
51 | log.Panicln(err)
52 | }
53 | if err = tables.CreateTableNative(ctx, conn); err != nil {
54 | log.Panicln(err)
55 | }
56 |
57 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
58 | clickhousebuffer.WithFlushInterval(1000),
59 | clickhousebuffer.WithBatchSize(5),
60 | clickhousebuffer.WithDebugMode(true),
61 | clickhousebuffer.WithRetry(false),
62 | ))
63 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
64 | Addr: redisHost,
65 | Password: redisPass,
66 | }), "bucket", client.Options().BatchSize())
67 | if err != nil {
68 | log.Panicln(err)
69 | }
70 |
71 | writeAPI := client.Writer(ctx, cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()), rxbuffer)
72 | wg := sync.WaitGroup{}
73 | wg.Add(1)
74 | go func() {
75 | errorsCh := writeAPI.Errors()
76 | for chErr := range errorsCh {
77 | log.Printf("clickhouse write error: %s\n", chErr.Error())
78 | }
79 | wg.Done()
80 | }()
81 |
82 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
83 | for _, val := range int32s {
84 | writeAPI.WriteRow(&tables.ExampleTable{
85 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
86 | })
87 | }
88 |
89 | <-time.After(time.Second * 2)
90 | client.Close()
91 | wg.Wait()
92 | }
93 |
--------------------------------------------------------------------------------
/example/cmd/redis_safe/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/go-redis/redis/v8"
13 |
14 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
15 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
18 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
19 | )
20 |
21 | func main() {
22 | hostname := os.Getenv("CLICKHOUSE_HOST")
23 | username := os.Getenv("CLICKHOUSE_USER")
24 | database := os.Getenv("CLICKHOUSE_DB")
25 | password := os.Getenv("CLICKHOUSE_PASS")
26 | redisHost := os.Getenv("REDIS_HOST")
27 | redisPass := os.Getenv("REDIS_PASS")
28 |
29 | ctx, cancel := context.WithCancel(context.Background())
30 | defer cancel()
31 |
32 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
33 | Addr: []string{hostname},
34 | Auth: clickhouse.Auth{
35 | Database: database,
36 | Username: username,
37 | Password: password,
38 | },
39 | Settings: clickhouse.Settings{
40 | "max_execution_time": 60,
41 | },
42 | DialTimeout: 5 * time.Second,
43 | Compression: &clickhouse.Compression{
44 | Method: clickhouse.CompressionLZ4,
45 | },
46 | Debug: true,
47 | }, &cx.RuntimeOptions{
48 | WriteTimeout: 15 * time.Second,
49 | })
50 | if err != nil {
51 | log.Panicln(err)
52 | }
53 | if err = tables.CreateTableNative(ctx, conn); err != nil {
54 | log.Panicln(err)
55 | }
56 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
57 | clickhousebuffer.WithFlushInterval(1000),
58 | clickhousebuffer.WithBatchSize(5),
59 | clickhousebuffer.WithDebugMode(true),
60 | clickhousebuffer.WithRetry(false),
61 | ))
62 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
63 | Addr: redisHost,
64 | Password: redisPass,
65 | }), "bucket", client.Options().BatchSize())
66 | if err != nil {
67 | log.Panicln(err)
68 | }
69 | writeAPI := client.Writer(ctx, cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()), rxbuffer)
70 | wg := sync.WaitGroup{}
71 | wg.Add(1)
72 | go func() {
73 | errorsCh := writeAPI.Errors()
74 | for chErr := range errorsCh {
75 | log.Printf("clickhouse write error: %s\n", chErr.Error())
76 | }
77 | wg.Done()
78 | }()
79 |
80 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
81 | for _, val := range int32s {
82 | writeAPI.TryWriteRow(&tables.ExampleTable{
83 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
84 | })
85 | }
86 |
87 | <-time.After(time.Second * 2)
88 | client.Close()
89 | wg.Wait()
90 | }
91 |
--------------------------------------------------------------------------------
/example/cmd/redis_sql/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/go-redis/redis/v8"
13 |
14 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
15 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
18 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxsql"
19 | )
20 |
21 | func main() {
22 | hostname := os.Getenv("CLICKHOUSE_HOST")
23 | username := os.Getenv("CLICKHOUSE_USER")
24 | database := os.Getenv("CLICKHOUSE_DB")
25 | password := os.Getenv("CLICKHOUSE_PASS")
26 | redisHost := os.Getenv("REDIS_HOST")
27 | redisPass := os.Getenv("REDIS_PASS")
28 |
29 | ctx, cancel := context.WithCancel(context.Background())
30 | defer cancel()
31 |
32 | ch, conn, err := cxsql.NewClickhouse(ctx, &clickhouse.Options{
33 | Addr: []string{hostname},
34 | Auth: clickhouse.Auth{
35 | Database: database,
36 | Username: username,
37 | Password: password,
38 | },
39 | Settings: clickhouse.Settings{
40 | "max_execution_time": 60,
41 | },
42 | DialTimeout: 5 * time.Second,
43 | Compression: &clickhouse.Compression{
44 | Method: clickhouse.CompressionLZ4,
45 | },
46 | Debug: true,
47 | }, &cx.RuntimeOptions{})
48 | if err != nil {
49 | log.Panicln(err)
50 | }
51 |
52 | if err = tables.CreateTableSQL(ctx, conn); err != nil {
53 | log.Panicln(err)
54 | }
55 |
56 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
57 | clickhousebuffer.WithFlushInterval(1000),
58 | clickhousebuffer.WithBatchSize(5),
59 | clickhousebuffer.WithDebugMode(true),
60 | clickhousebuffer.WithRetry(false),
61 | ))
62 |
63 | rxbuffer, err := cxredis.NewBuffer(ctx, redis.NewClient(&redis.Options{
64 | Addr: redisHost,
65 | Password: redisPass,
66 | }), "bucket", client.Options().BatchSize())
67 | if err != nil {
68 | log.Panicln(err)
69 | }
70 |
71 | writeAPI := client.Writer(ctx, cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()), rxbuffer)
72 |
73 | wg := sync.WaitGroup{}
74 | wg.Add(1)
75 | go func() {
76 | errorsCh := writeAPI.Errors()
77 | for err := range errorsCh {
78 | log.Printf("clickhouse write error: %s\n", err.Error())
79 | }
80 | wg.Done()
81 | }()
82 |
83 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
84 | for _, val := range int32s {
85 | writeAPI.WriteRow(&tables.ExampleTable{
86 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
87 | })
88 | }
89 |
90 | <-time.After(time.Second * 2)
91 | client.Close()
92 | wg.Wait()
93 | }
94 |
--------------------------------------------------------------------------------
/example/cmd/simple/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 |
13 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
14 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
18 | )
19 |
20 | func main() {
21 | hostname := os.Getenv("CLICKHOUSE_HOST")
22 | username := os.Getenv("CLICKHOUSE_USER")
23 | database := os.Getenv("CLICKHOUSE_DB")
24 | password := os.Getenv("CLICKHOUSE_PASS")
25 |
26 | ctx, cancel := context.WithCancel(context.Background())
27 | defer cancel()
28 |
29 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
30 | Addr: []string{hostname},
31 | Auth: clickhouse.Auth{
32 | Database: database,
33 | Username: username,
34 | Password: password,
35 | },
36 | Settings: clickhouse.Settings{
37 | "max_execution_time": 60,
38 | },
39 | DialTimeout: 5 * time.Second,
40 | Compression: &clickhouse.Compression{
41 | Method: clickhouse.CompressionLZ4,
42 | },
43 | Debug: true,
44 | }, &cx.RuntimeOptions{
45 | WriteTimeout: 15 * time.Second,
46 | })
47 | if err != nil {
48 | log.Panicln(err)
49 | }
50 |
51 | if err = tables.CreateTableNative(ctx, conn); err != nil {
52 | log.Panicln(err)
53 | }
54 |
55 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
56 | clickhousebuffer.WithFlushInterval(1000),
57 | clickhousebuffer.WithBatchSize(5),
58 | clickhousebuffer.WithDebugMode(true),
59 | clickhousebuffer.WithRetry(false),
60 | ))
61 | writeAPI := client.Writer(
62 | ctx,
63 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
64 | cxmem.NewBuffer(client.Options().BatchSize()),
65 | )
66 |
67 | wg := sync.WaitGroup{}
68 | wg.Add(1)
69 | go func() {
70 | errorsCh := writeAPI.Errors()
71 | for chErr := range errorsCh {
72 | log.Printf("clickhouse write error: %s\n", chErr.Error())
73 | }
74 | wg.Done()
75 | }()
76 |
77 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
78 | for _, val := range int32s {
79 | writeAPI.WriteRow(&tables.ExampleTable{
80 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
81 | })
82 | }
83 |
84 | <-time.After(time.Second * 2)
85 | client.Close()
86 | wg.Wait()
87 | }
88 |
--------------------------------------------------------------------------------
/example/cmd/simple_2/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "time"
9 |
10 | "github.com/ClickHouse/clickhouse-go/v2"
11 |
12 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
13 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
14 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
17 | )
18 |
19 | func main() {
20 | hostname := os.Getenv("CLICKHOUSE_HOST")
21 | username := os.Getenv("CLICKHOUSE_USER")
22 | database := os.Getenv("CLICKHOUSE_DB")
23 | password := os.Getenv("CLICKHOUSE_PASS")
24 |
25 | ctx, cancel := context.WithCancel(context.Background())
26 | defer cancel()
27 |
28 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
29 | Addr: []string{hostname},
30 | Auth: clickhouse.Auth{
31 | Database: database,
32 | Username: username,
33 | Password: password,
34 | },
35 | Settings: clickhouse.Settings{
36 | "max_execution_time": 60,
37 | },
38 | DialTimeout: 5 * time.Second,
39 | Compression: &clickhouse.Compression{
40 | Method: clickhouse.CompressionLZ4,
41 | },
42 | Debug: true,
43 | }, &cx.RuntimeOptions{
44 | WriteTimeout: 15 * time.Second,
45 | })
46 | if err != nil {
47 | log.Panicln(err)
48 | }
49 |
50 | if err = tables.CreateTableNative(ctx, conn); err != nil {
51 | log.Panicln(err)
52 | }
53 |
54 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
55 | clickhousebuffer.WithFlushInterval(1000),
56 | clickhousebuffer.WithBatchSize(5),
57 | clickhousebuffer.WithDebugMode(true),
58 | clickhousebuffer.WithRetry(false),
59 | ))
60 | writeAPI := client.Writer(
61 | ctx,
62 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
63 | cxmem.NewBuffer(client.Options().BatchSize()),
64 | )
65 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
66 | for _, val := range int32s {
67 | writeAPI.WriteRow(&tables.ExampleTable{
68 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
69 | })
70 | }
71 | <-time.After(time.Second * 2)
72 | client.Close()
73 | }
74 |
--------------------------------------------------------------------------------
/example/cmd/simple_safe/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 |
13 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
14 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
18 | )
19 |
20 | func main() {
21 | hostname := os.Getenv("CLICKHOUSE_HOST")
22 | username := os.Getenv("CLICKHOUSE_USER")
23 | database := os.Getenv("CLICKHOUSE_DB")
24 | password := os.Getenv("CLICKHOUSE_PASS")
25 |
26 | ctx, cancel := context.WithCancel(context.Background())
27 | defer cancel()
28 |
29 | ch, conn, err := cxnative.NewClickhouse(ctx, &clickhouse.Options{
30 | Addr: []string{hostname},
31 | Auth: clickhouse.Auth{
32 | Database: database,
33 | Username: username,
34 | Password: password,
35 | },
36 | Settings: clickhouse.Settings{
37 | "max_execution_time": 60,
38 | },
39 | DialTimeout: 5 * time.Second,
40 | Compression: &clickhouse.Compression{
41 | Method: clickhouse.CompressionLZ4,
42 | },
43 | Debug: true,
44 | }, &cx.RuntimeOptions{
45 | WriteTimeout: 15 * time.Second,
46 | })
47 | if err != nil {
48 | log.Panicln(err)
49 | }
50 |
51 | if err = tables.CreateTableNative(ctx, conn); err != nil {
52 | log.Panicln(err)
53 | }
54 |
55 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
56 | clickhousebuffer.WithFlushInterval(1000),
57 | clickhousebuffer.WithBatchSize(5),
58 | clickhousebuffer.WithDebugMode(true),
59 | clickhousebuffer.WithRetry(false),
60 | ))
61 | writeAPI := client.Writer(
62 | ctx,
63 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
64 | cxmem.NewBuffer(client.Options().BatchSize()),
65 | )
66 |
67 | wg := sync.WaitGroup{}
68 | wg.Add(1)
69 | go func() {
70 | errorsCh := writeAPI.Errors()
71 | for chErr := range errorsCh {
72 | log.Printf("clickhouse write error: %s\n", chErr.Error())
73 | }
74 | wg.Done()
75 | }()
76 |
77 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
78 | for _, val := range int32s {
79 | writeAPI.TryWriteRow(&tables.ExampleTable{
80 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
81 | })
82 | }
83 |
84 | <-time.After(time.Second * 2)
85 | client.Close()
86 | wg.Wait()
87 | }
88 |
--------------------------------------------------------------------------------
/example/cmd/simple_sql/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "os"
8 | "sync"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 |
13 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
14 | "github.com/zikwall/clickhouse-buffer/v4/example/pkg/tables"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxmem"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
17 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxsql"
18 | )
19 |
20 | func main() {
21 | hostname := os.Getenv("CLICKHOUSE_HOST")
22 | username := os.Getenv("CLICKHOUSE_USER")
23 | database := os.Getenv("CLICKHOUSE_DB")
24 | password := os.Getenv("CLICKHOUSE_PASS")
25 |
26 | ctx, cancel := context.WithCancel(context.Background())
27 | defer cancel()
28 |
29 | ch, conn, err := cxsql.NewClickhouse(ctx, &clickhouse.Options{
30 | Addr: []string{hostname},
31 | Auth: clickhouse.Auth{
32 | Database: database,
33 | Username: username,
34 | Password: password,
35 | },
36 | Settings: clickhouse.Settings{
37 | "max_execution_time": 60,
38 | },
39 | DialTimeout: 5 * time.Second,
40 | Compression: &clickhouse.Compression{
41 | Method: clickhouse.CompressionLZ4,
42 | },
43 | Debug: true,
44 | }, &cx.RuntimeOptions{})
45 | if err != nil {
46 | log.Panicln(err)
47 | }
48 |
49 | if err = tables.CreateTableSQL(ctx, conn); err != nil {
50 | log.Panicln(err)
51 | }
52 |
53 | client := clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
54 | clickhousebuffer.WithFlushInterval(1000),
55 | clickhousebuffer.WithBatchSize(5),
56 | clickhousebuffer.WithDebugMode(true),
57 | clickhousebuffer.WithRetry(false),
58 | ))
59 | writeAPI := client.Writer(
60 | ctx,
61 | cx.NewView(tables.ExampleTableName(), tables.ExampleTableColumns()),
62 | cxmem.NewBuffer(client.Options().BatchSize()),
63 | )
64 |
65 | wg := sync.WaitGroup{}
66 | wg.Add(1)
67 | go func() {
68 | errorsCh := writeAPI.Errors()
69 | for chErr := range errorsCh {
70 | log.Printf("clickhouse write error: %s\n", chErr.Error())
71 | }
72 | wg.Done()
73 | }()
74 |
75 | int32s := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
76 | for _, val := range int32s {
77 | writeAPI.WriteRow(&tables.ExampleTable{
78 | ID: val, UUID: fmt.Sprintf("uuidf %d", val), InsertTS: time.Now(),
79 | })
80 | }
81 |
82 | <-time.After(time.Second * 2)
83 | client.Close()
84 | wg.Wait()
85 | }
86 |
--------------------------------------------------------------------------------
/example/pkg/tables/advanced.go:
--------------------------------------------------------------------------------
1 | package tables
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
9 | "github.com/google/uuid"
10 |
11 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
12 | )
13 |
14 | type AdvancedTable struct {
15 | Col1 uint8
16 | Col2 string
17 | Col3 string
18 | Col4 uuid.UUID
19 | // Map type is not allowed. Set 'allow_experimental_map_type = 1'
20 | // Col5 map[string]uint8
21 | Col6 []string
22 | Col7 []interface{}
23 | Col8 time.Time
24 | Col9 string
25 | Col10 time.Time
26 | Col11 int8
27 | Col12 time.Time
28 | Col13 [][]interface{}
29 | }
30 |
31 | func (a *AdvancedTable) Row() cx.Vector {
32 | return cx.Vector{
33 | a.Col1, a.Col2, a.Col3, a.Col4, a.Col6, a.Col7, a.Col8, a.Col9, a.Col10, a.Col11, a.Col12, a.Col13,
34 | }
35 | }
36 |
37 | func AdvancedTableName() string {
38 | return "default.advanced_example"
39 | }
40 |
41 | func AdvancedTableColumns() []string {
42 | return []string{"Col1", "Col2", "Col3", "Col4", "Col6", "Col7", "Col8", "Col9", "Col10", "Col11", "Col12", "Col13"}
43 | }
44 |
45 | // nolint:gochecknoglobals // it's OK
46 | // SELECT DISTINCT alias_to
47 | // FROM system.data_type_families
48 | //
49 | // ┌─alias_to────┐
50 | // │ │
51 | // │ IPv6 │
52 | // │ IPv4 │
53 | // │ FixedString │
54 | // │ String │
55 | // │ Float64 │
56 | // │ UInt8 │
57 | // │ UInt16 │
58 | // │ DateTime │
59 | // │ Decimal │
60 | // │ UInt32 │
61 | // │ Int8 │
62 | // │ Int16 │
63 | // │ Int32 │
64 | // │ Int64 │
65 | // │ UInt64 │
66 | // │ Float32 │
67 | // └─────────────┘
68 | //
69 | // 17 rows in set. Elapsed: 0.028 sec.
70 | var createAdvancedTableQuery = fmt.Sprintf(`
71 | CREATE TABLE IF NOT EXISTS %s (
72 | Col1 UInt8
73 | , Col2 String
74 | , Col3 FixedString(3)
75 | , Col4 UUID
76 | , Col6 Array(String)
77 | , Col7 Tuple(String, UInt8, Array(String), Tuple(DateTime, UInt32))
78 | , Col8 DateTime
79 | , Col9 Enum('hello' = 1, 'world' = 2)
80 | , Col10 DateTime64
81 | , Col11 Bool
82 | , Col12 Date
83 | , Col13 Array(Tuple(String, UInt8, Array(String), Tuple(DateTime, UInt32)))
84 | ) Engine = Memory
85 | `, AdvancedTableName())
86 |
87 | func CreateAdvancedTableNative(ctx context.Context, conn driver.Conn) error {
88 | return conn.Exec(ctx, createAdvancedTableQuery)
89 | }
90 |
--------------------------------------------------------------------------------
/example/pkg/tables/example.go:
--------------------------------------------------------------------------------
1 | package tables
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
10 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
11 | )
12 |
13 | type ExampleTable struct {
14 | ID int32
15 | UUID string
16 | InsertTS time.Time
17 | }
18 |
19 | func (t *ExampleTable) Row() cx.Vector {
20 | return cx.Vector{t.ID, t.UUID, t.InsertTS.Format(time.RFC822)}
21 | }
22 |
23 | func ExampleTableName() string {
24 | return "default.example"
25 | }
26 |
27 | func ExampleTableColumns() []string {
28 | return []string{"id", "uuid", "insert_ts"}
29 | }
30 |
31 | // nolint:gochecknoglobals // it's OK
32 | var createTableQuery = fmt.Sprintf(`
33 | CREATE TABLE IF NOT EXISTS %s (
34 | id Int32,
35 | uuid String,
36 | insert_ts String
37 | ) engine=Memory
38 | `, ExampleTableName())
39 |
40 | func CreateTableNative(ctx context.Context, conn driver.Conn) error {
41 | return conn.Exec(ctx, createTableQuery)
42 | }
43 |
44 | func CreateTableSQL(ctx context.Context, conn *sql.DB) error {
45 | _, err := conn.ExecContext(ctx, createTableQuery)
46 | return err
47 | }
48 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/zikwall/clickhouse-buffer/v4
2 |
3 | go 1.18
4 |
5 | require (
6 | github.com/ClickHouse/clickhouse-go/v2 v2.15.0
7 | github.com/Rican7/retry v0.3.1
8 | github.com/go-redis/redis/v8 v8.11.5
9 | github.com/google/uuid v1.3.1
10 | )
11 |
12 | require (
13 | github.com/ClickHouse/ch-go v0.58.2 // indirect
14 | github.com/andybalholm/brotli v1.0.6 // indirect
15 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
16 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
17 | github.com/go-faster/city v1.0.1 // indirect
18 | github.com/go-faster/errors v0.6.1 // indirect
19 | github.com/klauspost/compress v1.16.7 // indirect
20 | github.com/paulmach/orb v0.10.0 // indirect
21 | github.com/pierrec/lz4/v4 v4.1.18 // indirect
22 | github.com/pkg/errors v0.9.1 // indirect
23 | github.com/segmentio/asm v1.2.0 // indirect
24 | github.com/shopspring/decimal v1.3.1 // indirect
25 | go.opentelemetry.io/otel v1.19.0 // indirect
26 | go.opentelemetry.io/otel/trace v1.19.0 // indirect
27 | golang.org/x/sys v0.13.0 // indirect
28 | golang.org/x/text v0.13.0 // indirect
29 | gopkg.in/yaml.v3 v3.0.1 // indirect
30 | )
31 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
2 | github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw=
3 | github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
4 | github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ=
5 | github.com/Rican7/retry v0.3.1 h1:scY4IbO8swckzoA/11HgBwaZRJEyY9vaNJshcdhp1Mc=
6 | github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0=
7 | github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
8 | github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
9 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
10 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
14 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
16 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
17 | github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
18 | github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
19 | github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
20 | github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
21 | github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
22 | github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
23 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
24 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
25 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
26 | github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
27 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
28 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
29 | github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
30 | github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
31 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
32 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
33 | github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
34 | github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
35 | github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
36 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
37 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
38 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
39 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
40 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
41 | github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
42 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
43 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
44 | github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
45 | github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
46 | github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
47 | github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
48 | github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
49 | github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
50 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
51 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
52 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
53 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
54 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
55 | github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
56 | github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
57 | github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
58 | github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
59 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
60 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
61 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
62 | github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
63 | github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
64 | github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
65 | github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
66 | github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
67 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
68 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
69 | go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
70 | go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
71 | go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
72 | go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
73 | go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
74 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
75 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
76 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
77 | golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
78 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
79 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
80 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
81 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
82 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
83 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
84 | golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
85 | golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
86 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
87 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
88 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
89 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
90 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
91 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
92 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
93 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
94 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
95 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
96 | golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
97 | golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
98 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
99 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
100 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
101 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
102 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
103 | golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
104 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
105 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
106 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
107 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
108 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
109 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
110 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
111 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
112 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
113 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
114 | google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
115 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
116 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
117 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
118 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
119 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
120 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
121 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
122 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
123 |
--------------------------------------------------------------------------------
/src/buffer/cxmem/buffer.go:
--------------------------------------------------------------------------------
1 | package cxmem
2 |
3 | import (
4 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
5 | )
6 |
7 | type memory struct {
8 | buffer []cx.Vector
9 | size uint
10 | }
11 |
12 | func NewBuffer(bufferSize uint) cx.Buffer {
13 | return &memory{
14 | buffer: make([]cx.Vector, 0, bufferSize+1),
15 | size: bufferSize + 1,
16 | }
17 | }
18 |
19 | func (i *memory) Write(row cx.Vector) {
20 | i.buffer = append(i.buffer, row)
21 | }
22 |
23 | func (i *memory) Read() []cx.Vector {
24 | snapshot := make([]cx.Vector, len(i.buffer))
25 | copy(snapshot, i.buffer)
26 | return snapshot
27 | }
28 |
29 | func (i *memory) Len() int {
30 | return len(i.buffer)
31 | }
32 |
33 | func (i *memory) Flush() {
34 | i.buffer = i.buffer[:0]
35 | }
36 |
--------------------------------------------------------------------------------
/src/buffer/cxredis/buffer.go:
--------------------------------------------------------------------------------
1 | package cxredis
2 |
3 | import (
4 | "log"
5 | "sync/atomic"
6 |
7 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
8 | )
9 |
10 | func (r *redisBuffer) Write(row cx.Vector) {
11 | var err error
12 | var buf []byte
13 | if buf, err = row.Encode(); err != nil {
14 | log.Printf("redis buffer value encode err: %v\n", err.Error())
15 | return
16 | }
17 | if err = r.client.RPush(r.context, r.bucket, buf).Err(); err != nil {
18 | if !r.isContextClosedErr(err) {
19 | log.Printf("redis buffer write err: %v\n", err.Error())
20 | }
21 | return
22 | }
23 | atomic.AddInt64(&r.size, 1)
24 | }
25 |
26 | func (r *redisBuffer) Read() []cx.Vector {
27 | values := r.client.LRange(r.context, r.bucket, 0, atomic.LoadInt64(&r.size)).Val()
28 | slices := make([]cx.Vector, 0, len(values))
29 | for _, value := range values {
30 | if v, err := cx.VectorDecoded(value).Decode(); err == nil {
31 | slices = append(slices, v)
32 | } else {
33 | log.Printf("redis buffer read err: %v\n", err.Error())
34 | }
35 | }
36 | return slices
37 | }
38 |
39 | func (r *redisBuffer) Len() int {
40 | return int(atomic.LoadInt64(&r.size))
41 | }
42 |
43 | func (r *redisBuffer) Flush() {
44 | r.client.LTrim(r.context, r.bucket, r.bufferSize, -1).Val()
45 | atomic.StoreInt64(&r.size, 0)
46 | }
47 |
--------------------------------------------------------------------------------
/src/buffer/cxredis/connection.go:
--------------------------------------------------------------------------------
1 | package cxredis
2 |
3 | import (
4 | "context"
5 | "errors"
6 |
7 | "github.com/go-redis/redis/v8"
8 |
9 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
10 | )
11 |
12 | const prefix = "ch_buffer"
13 |
14 | func key(bucket string) string {
15 | return prefix + ":" + bucket
16 | }
17 |
18 | type redisBuffer struct {
19 | client *redis.Client
20 | context context.Context
21 | bucket string
22 | bufferSize int64
23 | size int64
24 | }
25 |
26 | func NewBuffer(ctx context.Context, rdb *redis.Client, bucket string, bufferSize uint) (cx.Buffer, error) {
27 | return &redisBuffer{
28 | client: rdb,
29 | context: ctx,
30 | bucket: key(bucket),
31 | bufferSize: int64(bufferSize),
32 | size: rdb.LLen(ctx, bucket).Val(),
33 | }, nil
34 | }
35 |
36 | func (r *redisBuffer) isContextClosedErr(err error) bool {
37 | return errors.Is(err, redis.ErrClosed) && r.context.Err() != nil && errors.Is(r.context.Err(), context.Canceled)
38 | }
39 |
--------------------------------------------------------------------------------
/src/buffer/cxsyncmem/buffer.go:
--------------------------------------------------------------------------------
1 | package cxsyncmem
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
7 | )
8 |
9 | // special for tests with locks
10 | type memory struct {
11 | buffer []cx.Vector
12 | size uint
13 | mu *sync.RWMutex
14 | }
15 |
16 | func NewBuffer(bufferSize uint) cx.Buffer {
17 | return &memory{
18 | buffer: make([]cx.Vector, 0, bufferSize+1),
19 | size: bufferSize + 1,
20 | mu: &sync.RWMutex{},
21 | }
22 | }
23 |
24 | func (i *memory) Write(row cx.Vector) {
25 | i.mu.Lock()
26 | i.buffer = append(i.buffer, row)
27 | i.mu.Unlock()
28 | }
29 |
30 | func (i *memory) Read() []cx.Vector {
31 | i.mu.RLock()
32 | snapshot := make([]cx.Vector, len(i.buffer))
33 | copy(snapshot, i.buffer)
34 | i.mu.RUnlock()
35 | return snapshot
36 | }
37 |
38 | func (i *memory) Len() int {
39 | i.mu.RLock()
40 | defer i.mu.RUnlock()
41 | return len(i.buffer)
42 | }
43 |
44 | func (i *memory) Flush() {
45 | i.mu.Lock()
46 | i.buffer = i.buffer[:0]
47 | i.mu.Unlock()
48 | }
49 |
--------------------------------------------------------------------------------
/src/cx/buffer.go:
--------------------------------------------------------------------------------
1 | package cx
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | )
7 |
8 | // Buffer it is the interface for creating a data buffer (temporary storage).
9 | // It is enough to implement this interface so that you can use your own temporary storage
10 | type Buffer interface {
11 | Write(Vector)
12 | Read() []Vector
13 | Len() int
14 | Flush()
15 | }
16 |
17 | // Vectorable interface is an assistant in the correct formation of the order of fields in the data
18 | // before sending it to Clickhouse
19 | type Vectorable interface {
20 | Row() Vector
21 | }
22 |
23 | // Vector basic structure for writing to is nothing more than a slice of undefined interfaces
24 | type Vector []interface{}
25 |
26 | // Encode turns the Vector type into an array of bytes.
27 | // Encode is used for data serialization and storage in remote buffers, such as redis.Buffer
28 | func (v Vector) Encode() ([]byte, error) {
29 | var buf bytes.Buffer
30 | err := gob.NewEncoder(&buf).Encode(v)
31 | if err != nil {
32 | return nil, err
33 | }
34 | return buf.Bytes(), nil
35 | }
36 |
37 | // VectorDecoded a type that is a string, but contains a binary data format
38 | type VectorDecoded string
39 |
40 | // Decode method is required to reverse deserialize an array of bytes in a Vector type
41 | func (d VectorDecoded) Decode() (Vector, error) {
42 | var v Vector
43 | err := gob.NewDecoder(bytes.NewReader([]byte(d))).Decode(&v)
44 | if err != nil {
45 | return nil, err
46 | }
47 | return v, nil
48 | }
49 |
--------------------------------------------------------------------------------
/src/cx/buffer_batch.go:
--------------------------------------------------------------------------------
1 | package cx
2 |
3 | // Batch holds information for sending rows batch
4 | type Batch struct {
5 | rows []Vector
6 | }
7 |
8 | // NewBatch creates new batch
9 | func NewBatch(rows []Vector) *Batch {
10 | return &Batch{
11 | rows: rows,
12 | }
13 | }
14 |
15 | func (b *Batch) Rows() []Vector {
16 | return b.rows
17 | }
18 |
--------------------------------------------------------------------------------
/src/cx/db.go:
--------------------------------------------------------------------------------
1 | package cx
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | // View basic representation is some reflection of the entity (table) in Clickhouse database
8 | type View struct {
9 | Name string
10 | Columns []string
11 | }
12 |
13 | // NewView return View
14 | func NewView(name string, columns []string) View {
15 | return View{Name: name, Columns: columns}
16 | }
17 |
18 | // Clickhouse base interface, which is inherited by the top-level Client API and further by all its child Writer-s
19 | type Clickhouse interface {
20 | Insert(context.Context, View, []Vector) (uint64, error)
21 | Close() error
22 | }
23 |
--------------------------------------------------------------------------------
/src/cx/log.go:
--------------------------------------------------------------------------------
1 | package cx
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | )
7 |
8 | type Logger interface {
9 | Log(message interface{})
10 | Logf(format string, v ...interface{})
11 | }
12 |
13 | type defaultLogger struct{}
14 |
15 | func NewDefaultLogger() Logger {
16 | d := &defaultLogger{}
17 | return d
18 | }
19 |
20 | func (d defaultLogger) Log(message interface{}) {
21 | log.Printf("[CLICKHOUSE BUFFER] %s \n", message)
22 | }
23 |
24 | func (d *defaultLogger) Logf(message string, v ...interface{}) {
25 | d.Log(fmt.Sprintf(message, v...))
26 | }
27 |
--------------------------------------------------------------------------------
/src/cx/support.go:
--------------------------------------------------------------------------------
1 | package cx
2 |
3 | import (
4 | "errors"
5 | "time"
6 |
7 | "github.com/ClickHouse/clickhouse-go/v2"
8 | )
9 |
10 | const defaultInsertDurationTimeout = time.Millisecond * 15000
11 |
12 | // GetDefaultInsertDurationTimeout to get away from this decision in the near future
13 | func getDefaultInsertDurationTimeout() time.Duration {
14 | return defaultInsertDurationTimeout
15 | }
16 |
17 | // nolint:gochecknoglobals // it's OK, readonly variable
18 | // But before that, you need to check error code from Clickhouse,
19 | // this is necessary in order to ensure the finiteness of queue.
20 | // see: https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/ErrorCodes.cpp
21 | var noRetryErrors = map[int32]struct{}{
22 | 1: {}, // UNSUPPORTED_METHOD
23 | 2: {}, // UNSUPPORTED_PARAMETER
24 | 20: {}, // NUMBER_OF_COLUMNS_DOESNT_MATCH
25 | 60: {}, // UNKNOWN_TABLE
26 | 62: {}, // SYNTAX_ERROR
27 | 80: {}, // INCORRECT_QUERY
28 | 81: {}, // UNKNOWN_DATABASE
29 | 108: {}, // NO_DATA_TO_INSERT
30 | 158: {}, // TOO_MANY_ROWS
31 | 161: {}, // TOO_MANY_COLUMNS
32 | 164: {}, // READONLY
33 | 192: {}, // UNKNOWN_USER,
34 | 193: {}, // WRONG_PASSWORD
35 | 195: {}, // IP_ADDRESS_NOT_ALLOWED
36 | 229: {}, // QUERY_IS_TOO_LARGE
37 | 241: {}, // MEMORY_LIMIT_EXCEEDED
38 | 242: {}, // TABLE_IS_READ_ONLY
39 | 291: {}, // DATABASE_ACCESS_DENIED
40 | 372: {}, // SESSION_NOT_FOUND
41 | 373: {}, // SESSION_IS_LOCKED
42 | }
43 |
44 | // IsResendAvailable checks whether it is possible to resend undelivered messages to the Clickhouse database
45 | // based on the error received from Clickhouse
46 | func IsResendAvailable(err error) bool {
47 | var e *clickhouse.Exception
48 | if errors.As(err, &e) {
49 | if _, ok := noRetryErrors[e.Code]; ok {
50 | return false
51 | }
52 | }
53 | return true
54 | }
55 |
56 | type RuntimeOptions struct {
57 | WriteTimeout time.Duration
58 | }
59 |
60 | func (r *RuntimeOptions) GetWriteTimeout() time.Duration {
61 | if r.WriteTimeout != 0 {
62 | return r.WriteTimeout
63 | }
64 | return getDefaultInsertDurationTimeout()
65 | }
66 |
--------------------------------------------------------------------------------
/src/db/cxnative/impl.go:
--------------------------------------------------------------------------------
1 | package cxnative
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "strings"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
13 |
14 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
15 | )
16 |
17 | type clickhouseNative struct {
18 | conn driver.Conn
19 | insertTimeout time.Duration
20 | }
21 |
22 | // creates a template for preparing the query
23 | func nativeInsertQuery(table string, cols []string) string {
24 | prepared := fmt.Sprintf("INSERT INTO %s (%s)", table, strings.Join(cols, ", "))
25 | return prepared
26 | }
27 |
28 | func (c *clickhouseNative) Insert(ctx context.Context, view cx.View, rows []cx.Vector) (uint64, error) {
29 | var err error
30 | timeoutContext, cancel := context.WithTimeout(ctx, c.insertTimeout)
31 | defer cancel()
32 | batch, err := c.conn.PrepareBatch(timeoutContext, nativeInsertQuery(view.Name, view.Columns))
33 | if err != nil {
34 | return 0, err
35 | }
36 | var affected uint64
37 | for _, row := range rows {
38 | if err = batch.Append(row...); err != nil {
39 | log.Println(err)
40 | } else {
41 | affected++
42 | }
43 | }
44 | if err = batch.Send(); err != nil {
45 | return 0, err
46 | }
47 | return affected, nil
48 | }
49 |
50 | func (c *clickhouseNative) Close() error {
51 | return c.conn.Close()
52 | }
53 |
54 | func NewClickhouse(
55 | ctx context.Context,
56 | options *clickhouse.Options,
57 | runtime *cx.RuntimeOptions,
58 | ) (
59 | cx.Clickhouse,
60 | driver.Conn,
61 | error,
62 | ) {
63 | conn, err := clickhouse.Open(options)
64 | if err != nil {
65 | return nil, nil, err
66 | }
67 | if err = conn.Ping(clickhouse.Context(ctx,
68 | clickhouse.WithSettings(clickhouse.Settings{
69 | "max_block_size": 10,
70 | }),
71 | clickhouse.WithProgress(func(p *clickhouse.Progress) {
72 | fmt.Println("progress: ", p)
73 | }),
74 | )); err != nil {
75 | var e *clickhouse.Exception
76 | if errors.As(err, &e) {
77 | fmt.Printf("catch exception [%d] %s \n%s\n", e.Code, e.Message, e.StackTrace)
78 | }
79 | return nil, nil, err
80 | }
81 | return &clickhouseNative{
82 | conn: conn,
83 | insertTimeout: runtime.GetWriteTimeout(),
84 | }, conn, nil
85 | }
86 |
87 | func NewClickhouseWithConn(conn driver.Conn, runtime *cx.RuntimeOptions) cx.Clickhouse {
88 | return &clickhouseNative{
89 | conn: conn,
90 | insertTimeout: runtime.GetWriteTimeout(),
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/src/db/cxsql/impl.go:
--------------------------------------------------------------------------------
1 | package cxsql
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "errors"
7 | "fmt"
8 | "log"
9 | "strings"
10 | "time"
11 |
12 | "github.com/ClickHouse/clickhouse-go/v2"
13 |
14 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
15 | )
16 |
17 | type clickhouseSQL struct {
18 | conn *sql.DB
19 | insertTimeout time.Duration
20 | }
21 |
22 | func (c *clickhouseSQL) Close() error {
23 | return c.conn.Close()
24 | }
25 |
26 | // creates a template for preparing the query
27 | func insertQuery(table string, cols []string) string {
28 | prepared := fmt.Sprintf("INSERT INTO %s (%s)", table, strings.Join(cols, ", "))
29 | return prepared
30 | }
31 |
32 | // Insert Currently, the client library does not support the JSONEachRow format, only native byte blocks
33 | // There is no support for user interfaces as well as simple execution of an already prepared request
34 | // The entire batch bid is implemented through so-called "transactions",
35 | // although Clickhouse does not support them - it is only a client solution for preparing requests
36 | func (c *clickhouseSQL) Insert(ctx context.Context, view cx.View, rows []cx.Vector) (uint64, error) {
37 | tx, err := c.conn.Begin()
38 | if err != nil {
39 | return 0, err
40 | }
41 | stmt, err := tx.Prepare(insertQuery(view.Name, view.Columns))
42 | if err != nil {
43 | // if we do not call rollback function there will be a memory leak and goroutine
44 | // such a leak can occur if there is no access to the table or there is no table itself
45 | if rErr := tx.Rollback(); rErr != nil {
46 | return 0, fmt.Errorf("rollback failed: %w with previous error: %s", rErr, err.Error())
47 | }
48 | return 0, err
49 | }
50 | defer func() {
51 | if err = stmt.Close(); err != nil {
52 | log.Println(err)
53 | }
54 | }()
55 |
56 | timeoutContext, cancel := context.WithTimeout(ctx, c.insertTimeout)
57 | defer cancel()
58 |
59 | var affected uint64
60 | for _, row := range rows {
61 | // row affected is not supported
62 | if _, err = stmt.ExecContext(timeoutContext, row...); err == nil {
63 | affected++
64 | } else {
65 | log.Println(err)
66 | }
67 | }
68 | if err = tx.Commit(); err != nil {
69 | return 0, err
70 | }
71 | return affected, nil
72 | }
73 |
74 | func NewClickhouse(
75 | ctx context.Context,
76 | options *clickhouse.Options,
77 | runtime *cx.RuntimeOptions,
78 | ) (
79 | cx.Clickhouse,
80 | *sql.DB,
81 | error,
82 | ) {
83 | conn := clickhouse.OpenDB(options)
84 | if err := conn.PingContext(clickhouse.Context(ctx,
85 | clickhouse.WithSettings(clickhouse.Settings{
86 | "max_block_size": 10,
87 | }),
88 | clickhouse.WithProgress(func(p *clickhouse.Progress) {
89 | fmt.Println("progress: ", p)
90 | }),
91 | )); err != nil {
92 | var e *clickhouse.Exception
93 | if errors.As(err, &e) {
94 | fmt.Printf("catch exception [%d] %s \n%s\n", e.Code, e.Message, e.StackTrace)
95 | }
96 | return nil, nil, err
97 | }
98 | return &clickhouseSQL{
99 | conn: conn,
100 | insertTimeout: runtime.GetWriteTimeout(),
101 | }, conn, nil
102 | }
103 |
104 | func NewClickhouseWithConn(conn *sql.DB, runtime *cx.RuntimeOptions) cx.Clickhouse {
105 | return &clickhouseSQL{
106 | conn: conn,
107 | insertTimeout: runtime.GetWriteTimeout(),
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/src/retry/retry.go:
--------------------------------------------------------------------------------
1 | package retry
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "github.com/Rican7/retry"
8 | "github.com/Rican7/retry/backoff"
9 | "github.com/Rican7/retry/strategy"
10 |
11 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
12 | )
13 |
14 | const (
15 | defaultRetryChanSize = 100
16 | defaultCycloCount = 2
17 | )
18 |
19 | const (
20 | defaultAttemptLimit = 3
21 | defaultFactor = 100 * time.Millisecond
22 | )
23 |
24 | const (
25 | successfully = "(DEBUG): SUCCESSFULLY handle retry"
26 | successfullyAttempt = "(DEBUG): SUCCESSFULLY handle records. Count attempts: %d, count affected: %d"
27 | attemptError = "(DEBUG WARNING): attempt error"
28 | limitOfRetries = "(WARNING): limit of retries has been reached"
29 | queueIsFull = "(ERROR): queue for repeating messages is full..."
30 | )
31 |
32 | // nolint:lll // it's not important here
33 | const (
34 | runListenerMsg = "(DEBUG): worker has been started listening for data resending operations"
35 | stopListenerMsg = "(DEBUG): worker has been stopped listening for data resending operations"
36 | handleRetryMsg = "(DEBUG): receive retry message, handle retry packet..."
37 | packetIsLost = "(ERROR): packet couldn't be processed within %d retry cycles, packet was removed from queue..."
38 | packetResend = "(DEBUG): packet will be sent for resend, cycles left %d"
39 | )
40 |
41 | type Retryable interface {
42 | Retry(packet *Packet)
43 | Metrics() (uint64, uint64, uint64)
44 | }
45 |
46 | type Queueable interface {
47 | Queue(packet *Packet)
48 | Retries() <-chan *Packet
49 | }
50 |
51 | type Closable interface {
52 | Close() error
53 | CloseMessage() string
54 | }
55 |
56 | type Packet struct {
57 | view cx.View
58 | batch *cx.Batch
59 | tryCount uint8
60 | }
61 |
62 | func NewPacket(view cx.View, batch *cx.Batch) *Packet {
63 | return &Packet{
64 | view: view, batch: batch,
65 | }
66 | }
67 |
68 | type retryImpl struct {
69 | logger cx.Logger
70 | writer Writeable
71 | engine Queueable
72 | isDebug bool
73 | limit strategy.Strategy
74 | backoff strategy.Strategy
75 | successfully Countable
76 | failed Countable
77 | progress Countable
78 | }
79 |
80 | func NewRetry(ctx context.Context, engine Queueable, writer Writeable, logger cx.Logger, isDebug bool) Retryable {
81 | r := &retryImpl{
82 | engine: engine,
83 | writer: writer,
84 | logger: logger,
85 | isDebug: isDebug,
86 | limit: strategy.Limit(defaultAttemptLimit),
87 | backoff: strategy.Backoff(backoff.Fibonacci(defaultFactor)),
88 | successfully: newUint64Counter(),
89 | failed: newUint64Counter(),
90 | progress: newUint64Counter(),
91 | }
92 | go r.backoffRetry(ctx)
93 | return r
94 | }
95 |
96 | func (r *retryImpl) Metrics() (successfully, failed, progress uint64) {
97 | return r.successfully.Val(), r.failed.Val(), r.progress.Val()
98 | }
99 |
100 | func (r *retryImpl) Retry(packet *Packet) {
101 | if value := r.progress.Inc(); value >= defaultRetryChanSize {
102 | r.logger.Log(queueIsFull)
103 | return
104 | }
105 | r.engine.Queue(packet)
106 | }
107 |
108 | func (r *retryImpl) backoffRetry(ctx context.Context) {
109 | if r.isDebug {
110 | r.logger.Log(runListenerMsg)
111 | }
112 | defer func() {
113 | if closable, ok := r.engine.(Closable); ok {
114 | r.logger.Log(closable.CloseMessage())
115 | if err := closable.Close(); err != nil {
116 | r.logger.Log(err)
117 | }
118 | }
119 | if r.isDebug {
120 | r.logger.Log(stopListenerMsg)
121 | }
122 | }()
123 | retries := r.engine.Retries()
124 | for {
125 | select {
126 | case <-ctx.Done():
127 | return
128 | case packet := <-retries:
129 | r.handlePacket(ctx, packet)
130 | }
131 | }
132 | }
133 |
134 | func (r *retryImpl) action(ctx context.Context, view cx.View, btc *cx.Batch) retry.Action {
135 | return func(attempt uint) error {
136 | affected, err := r.writer.Write(ctx, view, btc)
137 | if err != nil {
138 | if r.isDebug {
139 | r.logger.Logf("%s: %s", attemptError, err.Error())
140 | }
141 | return err
142 | }
143 | if r.isDebug {
144 | r.logger.Logf(successfullyAttempt, attempt, affected)
145 | }
146 | return nil
147 | }
148 | }
149 |
150 | // if error is not in list of not allowed,
151 | // and the number of repetition cycles has not been exhausted,
152 | // try to re-send it to the processing queue
153 | func (r *retryImpl) resend(packet *Packet, err error) bool {
154 | if (packet.tryCount < defaultCycloCount) && cx.IsResendAvailable(err) {
155 | r.Retry(&Packet{
156 | view: packet.view,
157 | batch: packet.batch,
158 | tryCount: packet.tryCount + 1,
159 | })
160 | if r.isDebug {
161 | r.logger.Logf(packetResend, defaultCycloCount-packet.tryCount-1)
162 | }
163 | return true
164 | }
165 | return false
166 | }
167 |
168 | func (r *retryImpl) handlePacket(ctx context.Context, packet *Packet) {
169 | r.progress.Dec()
170 | if r.isDebug {
171 | r.logger.Log(handleRetryMsg)
172 | }
173 | if err := retry.Retry(r.action(ctx, packet.view, packet.batch), r.limit, r.backoff); err != nil {
174 | r.logger.Logf("%s: %v", limitOfRetries, err)
175 | if !r.resend(packet, err) {
176 | // otherwise, increase failed counter and report in logs that the package is always lost
177 | r.failed.Inc()
178 | r.logger.Logf(packetIsLost, defaultCycloCount)
179 | }
180 | } else {
181 | // mark packet as successfully processed
182 | r.successfully.Inc()
183 | if r.isDebug {
184 | r.logger.Log(successfully)
185 | }
186 | }
187 | }
188 |
--------------------------------------------------------------------------------
/src/retry/retry_memory.go:
--------------------------------------------------------------------------------
1 | package retry
2 |
3 | type imMemoryQueueEngine struct {
4 | retries chan *Packet
5 | }
6 |
7 | func NewImMemoryQueueEngine() Queueable {
8 | r := &imMemoryQueueEngine{
9 | retries: make(chan *Packet, defaultRetryChanSize),
10 | }
11 | return r
12 | }
13 |
14 | func (r *imMemoryQueueEngine) Queue(packet *Packet) {
15 | r.retries <- packet
16 | }
17 |
18 | func (r *imMemoryQueueEngine) Retries() <-chan *Packet {
19 | return r.retries
20 | }
21 |
22 | func (r *imMemoryQueueEngine) Close() error {
23 | if r.retries != nil {
24 | close(r.retries)
25 | r.retries = nil
26 | }
27 | return nil
28 | }
29 |
30 | func (r *imMemoryQueueEngine) CloseMessage() string {
31 | return "close in-memory queue engine"
32 | }
33 |
--------------------------------------------------------------------------------
/src/retry/retry_stat.go:
--------------------------------------------------------------------------------
1 | package retry
2 |
3 | import "sync/atomic"
4 |
5 | type Countable interface {
6 | Inc() uint64
7 | Dec() uint64
8 | Val() uint64
9 | }
10 |
11 | type uint64Counter struct {
12 | val uint64
13 | }
14 |
15 | func newUint64Counter() Countable {
16 | return &uint64Counter{}
17 | }
18 |
19 | func (u *uint64Counter) Inc() uint64 {
20 | return atomic.AddUint64(&u.val, 1)
21 | }
22 |
23 | func (u *uint64Counter) Dec() uint64 {
24 | return atomic.AddUint64(&u.val, ^uint64(0))
25 | }
26 |
27 | func (u *uint64Counter) Val() uint64 {
28 | return atomic.LoadUint64(&u.val)
29 | }
30 |
--------------------------------------------------------------------------------
/src/retry/retry_writer.go:
--------------------------------------------------------------------------------
1 | package retry
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
7 | )
8 |
9 | type Writeable interface {
10 | Write(ctx context.Context, view cx.View, batch *cx.Batch) (uint64, error)
11 | }
12 |
13 | type defaultWriter struct {
14 | conn cx.Clickhouse
15 | }
16 |
17 | func NewDefaultWriter(conn cx.Clickhouse) Writeable {
18 | w := &defaultWriter{
19 | conn: conn,
20 | }
21 | return w
22 | }
23 |
24 | func (w *defaultWriter) Write(ctx context.Context, view cx.View, batch *cx.Batch) (uint64, error) {
25 | affected, err := w.conn.Insert(ctx, view, batch.Rows())
26 | return affected, err
27 | }
28 |
--------------------------------------------------------------------------------
/tests/buffer_row_test.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | "time"
7 |
8 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
9 | )
10 |
11 | type RowTestMock struct {
12 | id int
13 | uuid string
14 | insertTS time.Time
15 | }
16 |
17 | func (vm RowTestMock) Row() cx.Vector {
18 | return cx.Vector{vm.id, vm.uuid, vm.insertTS.Format(time.RFC822)}
19 | }
20 |
21 | func TestRow(t *testing.T) {
22 | t.Run("it should be success encode to string", func(t *testing.T) {
23 | slice := RowTestMock{
24 | id: 1,
25 | uuid: "uuid_here",
26 | insertTS: time.Now(),
27 | }.Row()
28 | encoded, err := slice.Encode()
29 | if err != nil {
30 | t.Fatal(err)
31 | }
32 | value, err := cx.VectorDecoded(encoded).Decode()
33 | if err != nil {
34 | t.Fatal(err)
35 | }
36 | if len(value) != 3 {
37 | t.Fatal("Failed, expected to get three columns")
38 | }
39 | types := []reflect.Kind{reflect.Int, reflect.String, reflect.String}
40 | for i, col := range value {
41 | if t1 := reflect.TypeOf(col).Kind(); t1 != types[i] {
42 | t.Fatalf("Failed, expected to get int type, received %s", t1)
43 | }
44 | }
45 | if value[0] != 1 && value[1] != "uuid_here" {
46 | t.Fatal("Failed, expected to get [0] => '1' and [1] => 'uuid_here'")
47 | }
48 | })
49 | }
50 |
--------------------------------------------------------------------------------
/tests/client_impl_test.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 | "sync/atomic"
8 | "testing"
9 | "time"
10 |
11 | "github.com/ClickHouse/clickhouse-go/v2"
12 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
13 |
14 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
15 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxsyncmem"
16 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
17 | )
18 |
19 | var (
20 | errClickhouseUnknownException = &clickhouse.Exception{
21 | Code: 1002,
22 | Name: "UNKNOWN_EXCEPTION",
23 | Message: "UNKNOWN_EXCEPTION",
24 | StackTrace: "UNKNOWN_EXCEPTION == UNKNOWN_EXCEPTION",
25 | }
26 | errClickhouseUnknownTableException = &clickhouse.Exception{
27 | Code: 60,
28 | Name: "UNKNOWN_TABLE",
29 | Message: "UNKNOWN_TABLE",
30 | StackTrace: "UNKNOWN_TABLE == UNKNOWN_TABLE",
31 | }
32 | )
33 |
34 | type ClickhouseImplMock struct{}
35 |
36 | func (c *ClickhouseImplMock) Insert(_ context.Context, _ cx.View, _ []cx.Vector) (uint64, error) {
37 | return 0, nil
38 | }
39 |
40 | func (c *ClickhouseImplMock) Close() error {
41 | return nil
42 | }
43 |
44 | func (c *ClickhouseImplMock) Conn() driver.Conn {
45 | return nil
46 | }
47 |
48 | type ClickhouseImplErrMock struct{}
49 |
50 | func (ce *ClickhouseImplErrMock) Insert(_ context.Context, _ cx.View, _ []cx.Vector) (uint64, error) {
51 | return 0, errClickhouseUnknownException
52 | }
53 |
54 | func (ce *ClickhouseImplErrMock) Close() error {
55 | return nil
56 | }
57 |
58 | func (ce *ClickhouseImplErrMock) Conn() driver.Conn {
59 | return nil
60 | }
61 |
62 | type ClickhouseImplErrMockFailed struct{}
63 |
64 | func (ce *ClickhouseImplErrMockFailed) Insert(_ context.Context, _ cx.View, _ []cx.Vector) (uint64, error) {
65 | return 0, errClickhouseUnknownTableException
66 | }
67 |
68 | func (ce *ClickhouseImplErrMockFailed) Close() error {
69 | return nil
70 | }
71 |
72 | func (ce *ClickhouseImplErrMockFailed) Conn() driver.Conn {
73 | return nil
74 | }
75 |
76 | type ClickhouseImplRetryMock struct {
77 | hasErr int32
78 | }
79 |
80 | func (cr *ClickhouseImplRetryMock) Insert(_ context.Context, _ cx.View, _ []cx.Vector) (uint64, error) {
81 | if val := atomic.LoadInt32(&cr.hasErr); val == 0 {
82 | return 0, errClickhouseUnknownException
83 | }
84 | return 1, nil
85 | }
86 |
87 | func (cr *ClickhouseImplRetryMock) Close() error {
88 | return nil
89 | }
90 |
91 | func (cr *ClickhouseImplRetryMock) Conn() driver.Conn {
92 | return nil
93 | }
94 |
95 | type RowMock struct {
96 | id int
97 | uuid string
98 | insertTS time.Time
99 | }
100 |
101 | func (vm RowMock) Row() cx.Vector {
102 | return cx.Vector{vm.id, vm.uuid, vm.insertTS}
103 | }
104 |
105 | // nolint:funlen,gocyclo,cyclop,gocognit // cyclomatic,cognitive complexity not important here
106 | func TestClient(t *testing.T) {
107 | tableView := cx.NewView("test_db.test_table", []string{"id", "uuid", "insert_ts"})
108 | ctx, cancel := context.WithCancel(context.Background())
109 | defer cancel()
110 |
111 | t.Run("it should be correct send and flush data", func(t *testing.T) {
112 | client := clickhousebuffer.NewClientWithOptions(ctx, &ClickhouseImplMock{},
113 | clickhousebuffer.NewOptions(
114 | clickhousebuffer.WithFlushInterval(200),
115 | clickhousebuffer.WithBatchSize(3),
116 | clickhousebuffer.WithDebugMode(true),
117 | clickhousebuffer.WithRetry(true),
118 | ),
119 | )
120 | defer client.Close()
121 | memoryBuffer := cxsyncmem.NewBuffer(
122 | client.Options().BatchSize(),
123 | )
124 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
125 | writeAPI.WriteRow(RowMock{
126 | id: 1, uuid: "1", insertTS: time.Now(),
127 | })
128 | writeAPI.WriteRow(RowMock{
129 | id: 2, uuid: "2", insertTS: time.Now().Add(time.Second),
130 | })
131 | writeAPI.WriteRow(RowMock{
132 | id: 3, uuid: "3", insertTS: time.Now().Add(time.Second * 2),
133 | })
134 | simulateWait(time.Millisecond * 550)
135 | if memoryBuffer.Len() != 0 {
136 | t.Fatal("Failed, the buffer was expected to be cleared")
137 | }
138 | simulateWait(time.Millisecond * 500)
139 | ok, nook, progress := client.RetryClient().Metrics()
140 | fmt.Println("#1:", ok, nook, progress)
141 | if ok != 0 || nook != 0 || progress != 0 {
142 | t.Fatalf("failed, expect zero successful and zero fail retries, expect %d and failed %d", ok, nook)
143 | }
144 | })
145 |
146 | // nolint:dupl // it's not important here
147 | t.Run("it should be successfully received three errors about writing", func(t *testing.T) {
148 | client := clickhousebuffer.NewClientWithOptions(ctx, &ClickhouseImplErrMock{},
149 | clickhousebuffer.NewOptions(
150 | clickhousebuffer.WithFlushInterval(10),
151 | clickhousebuffer.WithBatchSize(1),
152 | clickhousebuffer.WithDebugMode(true),
153 | clickhousebuffer.WithRetry(true),
154 | ),
155 | )
156 | defer client.Close()
157 | memoryBuffer := cxsyncmem.NewBuffer(
158 | client.Options().BatchSize(),
159 | )
160 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
161 | var errors []error
162 | mu := &sync.RWMutex{}
163 | errorsCh := writeAPI.Errors()
164 | // Create go proc for reading and storing errors
165 | go func() {
166 | for err := range errorsCh {
167 | mu.Lock()
168 | errors = append(errors, err)
169 | mu.Unlock()
170 | }
171 | }()
172 | writeAPI.WriteRow(RowMock{
173 | id: 1, uuid: "1", insertTS: time.Now(),
174 | })
175 | writeAPI.WriteRow(RowMock{
176 | id: 2, uuid: "2", insertTS: time.Now().Add(time.Second),
177 | })
178 | writeAPI.WriteRow(RowMock{
179 | id: 3, uuid: "3", insertTS: time.Now().Add(time.Second * 2),
180 | })
181 | simulateWait(time.Millisecond * 150)
182 | mu.RLock()
183 | defer mu.RUnlock()
184 | if len(errors) != 3 {
185 | t.Fatalf("failed, expected to get three errors, received %d", len(errors))
186 | }
187 | if memoryBuffer.Len() != 0 {
188 | t.Fatal("failed, the buffer was expected to be cleared")
189 | }
190 | simulateWait(time.Millisecond * 5000)
191 | ok, nook, progress := client.RetryClient().Metrics()
192 | fmt.Println("#2:", ok, nook, progress)
193 | if ok != 0 || nook != 3 || progress != 0 {
194 | t.Fatalf("failed, expect zero successful and three fail retries, expect %d and failed %d", ok, nook)
195 | }
196 | })
197 |
198 | // nolint:dupl // it's OK
199 | t.Run("it should be successfully handle retry", func(t *testing.T) {
200 | mock := &ClickhouseImplRetryMock{}
201 | client := clickhousebuffer.NewClientWithOptions(ctx, mock,
202 | clickhousebuffer.NewOptions(
203 | clickhousebuffer.WithFlushInterval(10),
204 | clickhousebuffer.WithBatchSize(1),
205 | clickhousebuffer.WithDebugMode(true),
206 | clickhousebuffer.WithRetry(true),
207 | ),
208 | )
209 | defer client.Close()
210 | memoryBuffer := cxsyncmem.NewBuffer(
211 | client.Options().BatchSize(),
212 | )
213 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
214 | var errors []error
215 | mu := &sync.RWMutex{}
216 | errorsCh := writeAPI.Errors()
217 | // Create go proc for reading and storing errors
218 | go func() {
219 | for err := range errorsCh {
220 | mu.Lock()
221 | errors = append(errors, err)
222 | mu.Unlock()
223 | }
224 | }()
225 | writeAPI.WriteRow(RowMock{
226 | id: 1, uuid: "1", insertTS: time.Now(),
227 | })
228 | simulateWait(time.Millisecond * 10)
229 | atomic.StoreInt32(&mock.hasErr, 1)
230 | simulateWait(time.Millisecond * 2000)
231 | mu.RLock()
232 | defer mu.RUnlock()
233 | if len(errors) != 1 {
234 | t.Fatalf("failed, expected to get one error, received %d", len(errors))
235 | }
236 | if memoryBuffer.Len() != 0 {
237 | t.Fatal("failed, the buffer was expected to be cleared")
238 | }
239 | ok, nook, progress := client.RetryClient().Metrics()
240 | fmt.Println("#3:", ok, nook, progress)
241 | if ok != 1 || nook != 0 || progress != 0 {
242 | t.Fatalf("failed, expect one successful and zero fail retries, expect %d and failed %d", ok, nook)
243 | }
244 | simulateWait(time.Millisecond * 350)
245 | })
246 |
247 | // nolint:dupl // it's OK
248 | t.Run("[safe] it should be successfully handle retry", func(t *testing.T) {
249 | mock := &ClickhouseImplRetryMock{}
250 | client := clickhousebuffer.NewClientWithOptions(ctx, mock,
251 | clickhousebuffer.NewOptions(
252 | clickhousebuffer.WithFlushInterval(10),
253 | clickhousebuffer.WithBatchSize(1),
254 | clickhousebuffer.WithDebugMode(true),
255 | clickhousebuffer.WithRetry(true),
256 | ),
257 | )
258 | defer client.Close()
259 | memoryBuffer := cxsyncmem.NewBuffer(
260 | client.Options().BatchSize(),
261 | )
262 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
263 | var errors []error
264 | mu := &sync.RWMutex{}
265 | errorsCh := writeAPI.Errors()
266 | // Create go proc for reading and storing errors
267 | go func() {
268 | for err := range errorsCh {
269 | mu.Lock()
270 | errors = append(errors, err)
271 | mu.Unlock()
272 | }
273 | }()
274 | writeAPI.TryWriteRow(RowMock{
275 | id: 1, uuid: "1", insertTS: time.Now(),
276 | })
277 | simulateWait(time.Millisecond * 10)
278 | atomic.StoreInt32(&mock.hasErr, 1)
279 | simulateWait(time.Millisecond * 2000)
280 | mu.RLock()
281 | defer mu.RUnlock()
282 | if len(errors) != 1 {
283 | t.Fatalf("failed, expected to get one error, received %d", len(errors))
284 | }
285 | if memoryBuffer.Len() != 0 {
286 | t.Fatal("failed, the buffer was expected to be cleared")
287 | }
288 | ok, nook, progress := client.RetryClient().Metrics()
289 | fmt.Println("#3:", ok, nook, progress)
290 | if ok != 1 || nook != 0 || progress != 0 {
291 | t.Fatalf("failed, expect one successful and zero fail retries, expect %d and failed %d", ok, nook)
292 | }
293 | simulateWait(time.Millisecond * 350)
294 | })
295 |
296 | t.Run("it should be successfully handle retry without error channel", func(t *testing.T) {
297 | mock := &ClickhouseImplRetryMock{}
298 | client := clickhousebuffer.NewClientWithOptions(ctx, mock,
299 | clickhousebuffer.NewOptions(
300 | clickhousebuffer.WithFlushInterval(10),
301 | clickhousebuffer.WithBatchSize(1),
302 | clickhousebuffer.WithDebugMode(true),
303 | clickhousebuffer.WithRetry(true),
304 | ),
305 | )
306 | defer client.Close()
307 | memoryBuffer := cxsyncmem.NewBuffer(
308 | client.Options().BatchSize(),
309 | )
310 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
311 | writeAPI.WriteRow(RowMock{
312 | id: 1, uuid: "1", insertTS: time.Now(),
313 | })
314 | simulateWait(time.Millisecond * 10)
315 | atomic.StoreInt32(&mock.hasErr, 1)
316 | simulateWait(time.Millisecond * 2000)
317 | if memoryBuffer.Len() != 0 {
318 | t.Fatal("failed, the buffer was expected to be cleared")
319 | }
320 | ok, nook, progress := client.RetryClient().Metrics()
321 | fmt.Println("#3:", ok, nook, progress)
322 | if ok != 1 || nook != 0 || progress != 0 {
323 | t.Fatalf("failed, expect one successful and zero fail retries, expect %d and failed %d", ok, nook)
324 | }
325 | simulateWait(time.Millisecond * 350)
326 | })
327 |
328 | // nolint:dupl // it's not important here
329 | t.Run("it should be successfully broken retry", func(t *testing.T) {
330 | client := clickhousebuffer.NewClientWithOptions(ctx, &ClickhouseImplErrMockFailed{},
331 | clickhousebuffer.NewOptions(
332 | clickhousebuffer.WithFlushInterval(10),
333 | clickhousebuffer.WithBatchSize(1),
334 | clickhousebuffer.WithDebugMode(true),
335 | clickhousebuffer.WithRetry(true),
336 | ),
337 | )
338 | defer client.Close()
339 | memoryBuffer := cxsyncmem.NewBuffer(
340 | client.Options().BatchSize(),
341 | )
342 | writeAPI := client.Writer(ctx, tableView, memoryBuffer)
343 | var errors []error
344 | mu := &sync.RWMutex{}
345 | errorsCh := writeAPI.Errors()
346 | // Create go proc for reading and storing errors
347 | go func() {
348 | for err := range errorsCh {
349 | mu.Lock()
350 | errors = append(errors, err)
351 | mu.Unlock()
352 | }
353 | }()
354 | writeAPI.WriteRow(RowMock{
355 | id: 1, uuid: "1", insertTS: time.Now(),
356 | })
357 | writeAPI.WriteRow(RowMock{
358 | id: 2, uuid: "2", insertTS: time.Now().Add(time.Second),
359 | })
360 | writeAPI.WriteRow(RowMock{
361 | id: 3, uuid: "3", insertTS: time.Now().Add(time.Second * 2),
362 | })
363 | simulateWait(time.Millisecond * 150)
364 | mu.RLock()
365 | defer mu.RUnlock()
366 | if len(errors) != 3 {
367 | t.Fatalf("failed, expected to get three errors, received %d", len(errors))
368 | }
369 | if memoryBuffer.Len() != 0 {
370 | t.Fatal("failed, the buffer was expected to be cleared")
371 | }
372 | simulateWait(time.Millisecond * 2000)
373 | ok, nook, progress := client.RetryClient().Metrics()
374 | fmt.Println("#4:", ok, nook, progress)
375 | if ok != 0 || nook != 0 || progress != 0 {
376 | t.Fatalf("failed, expect zero successful and zero fail retries, expect %d and failed %d", ok, nook)
377 | }
378 | })
379 | }
380 |
381 | func TestClientImplWriteBatch(t *testing.T) {
382 | tableView := cx.NewView("test_db.test_table", []string{"id", "uuid", "insert_ts"})
383 | ctx, cancel := context.WithCancel(context.Background())
384 | defer cancel()
385 |
386 | t.Run("it should be correct send data", func(t *testing.T) {
387 | client := clickhousebuffer.NewClientWithOptions(ctx, &ClickhouseImplMock{},
388 | clickhousebuffer.NewOptions(
389 | clickhousebuffer.WithFlushInterval(10),
390 | clickhousebuffer.WithBatchSize(1),
391 | clickhousebuffer.WithDebugMode(true),
392 | clickhousebuffer.WithRetry(true),
393 | ),
394 | )
395 | defer client.Close()
396 | writerBlocking := client.WriterBlocking(tableView)
397 | err := writerBlocking.WriteRow(ctx, []cx.Vectorable{
398 | RowMock{
399 | id: 1, uuid: "1", insertTS: time.Now(),
400 | },
401 | RowMock{
402 | id: 1, uuid: "1", insertTS: time.Now(),
403 | },
404 | RowMock{
405 | id: 1, uuid: "1", insertTS: time.Now(),
406 | },
407 | }...)
408 | if err != nil {
409 | t.Fatal(err)
410 | }
411 | })
412 |
413 | t.Run("it should be successfully received error about writing", func(t *testing.T) {
414 | client := clickhousebuffer.NewClientWithOptions(ctx, &ClickhouseImplErrMock{},
415 | clickhousebuffer.NewOptions(
416 | clickhousebuffer.WithFlushInterval(10),
417 | clickhousebuffer.WithBatchSize(1),
418 | clickhousebuffer.WithDebugMode(true),
419 | clickhousebuffer.WithRetry(true),
420 | ),
421 | )
422 | defer client.Close()
423 | writerBlocking := client.WriterBlocking(tableView)
424 | err := writerBlocking.WriteRow(ctx, []cx.Vectorable{
425 | RowMock{
426 | id: 1, uuid: "1", insertTS: time.Now(),
427 | },
428 | RowMock{
429 | id: 1, uuid: "1", insertTS: time.Now(),
430 | },
431 | RowMock{
432 | id: 1, uuid: "1", insertTS: time.Now(),
433 | },
434 | }...)
435 | if err == nil {
436 | t.Fatal("Failed, expected to get write error, give nil")
437 | }
438 | })
439 | simulateWait(time.Millisecond * 500)
440 | }
441 |
442 | func simulateWait(wait time.Duration) {
443 | <-time.After(wait)
444 | }
445 |
--------------------------------------------------------------------------------
/tests/integration_memory_test.go:
--------------------------------------------------------------------------------
1 | //go:build integration
2 | // +build integration
3 |
4 | package tests
5 |
6 | import (
7 | "context"
8 | "log"
9 | "testing"
10 |
11 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
12 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxsyncmem"
13 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
14 | )
15 |
16 | // nolint:dupl // it's OK
17 | func TestMemory(t *testing.T) {
18 | var err error
19 | log.Println("RUN INTEGRATION TEST WITH MEMORY AND NATIVE CLICKHOUSE")
20 | ctx, cancel := context.WithCancel(context.Background())
21 | defer cancel()
22 | // STEP 2: Create Clickhouse service
23 | ch, clickhouse, err := useClickhousePool(ctx)
24 | if err != nil {
25 | t.Fatal(err)
26 | }
27 | // STEP 3: Drop and Create table under certain conditions
28 | if err = beforeCheckTables(ctx, ch); err != nil {
29 | t.Fatal(err)
30 | }
31 | // STEP 4: Create clickhouse client and buffer writer with redis buffer
32 | client, memBuffer := useClientAndMemoryBuffer(ctx, clickhouse)
33 | defer client.Close()
34 | // STEP 5: Write own data to redis
35 | writeAPI := useWriteAPI(ctx, client, memBuffer)
36 | writeDataToBuffer(writeAPI)
37 | // STEP 6: Checks!
38 | if err = checksBuffer(memBuffer); err != nil {
39 | t.Fatal(err)
40 | }
41 | if err = checksClickhouse(ctx, ch); err != nil {
42 | t.Fatal(err)
43 | }
44 | }
45 |
46 | // nolint:dupl // it's OK
47 | func TestSQLMemory(t *testing.T) {
48 | var err error
49 | log.Println("RUN INTEGRATION TEST WITH MEMORY AND SQL CLICKHOUSE")
50 | ctx, cancel := context.WithCancel(context.Background())
51 | defer cancel()
52 | // STEP 2: Create Clickhouse service
53 | ch, clickhouse, err := useClickhouseSQLPool(ctx)
54 | if err != nil {
55 | t.Fatal(err)
56 | }
57 | // STEP 3: Drop and Create table under certain conditions
58 | if err = beforeCheckTablesSQL(ctx, ch); err != nil {
59 | t.Fatal(err)
60 | }
61 | // STEP 4: Create clickhouse client and buffer writer with redis buffer
62 | client, memBuffer := useClientAndMemoryBuffer(ctx, clickhouse)
63 | defer client.Close()
64 | // STEP 5: Write own data to redis
65 | writeAPI := useWriteAPI(ctx, client, memBuffer)
66 | writeDataToBuffer(writeAPI)
67 | // STEP 6: Checks!
68 | if err = checksBuffer(memBuffer); err != nil {
69 | t.Fatal(err)
70 | }
71 | if err = checksClickhouseSQL(ctx, ch); err != nil {
72 | t.Fatal(err)
73 | }
74 | }
75 |
76 | // nolint:dupl // it's OK
77 | func TestMemorySafe(t *testing.T) {
78 | var err error
79 | log.Println("RUN INTEGRATION TEST WITH MEMORY AND NATIVE CLICKHOUSE [SAFE]")
80 | ctx, cancel := context.WithCancel(context.Background())
81 | defer cancel()
82 | // STEP 2: Create Clickhouse service
83 | ch, clickhouse, err := useClickhousePool(ctx)
84 | if err != nil {
85 | t.Fatal(err)
86 | }
87 | // STEP 3: Drop and Create table under certain conditions
88 | if err = beforeCheckTables(ctx, ch); err != nil {
89 | t.Fatal(err)
90 | }
91 | // STEP 4: Create clickhouse client and buffer writer with redis buffer
92 | client, memBuffer := useClientAndMemoryBuffer(ctx, clickhouse)
93 | defer client.Close()
94 | // STEP 5: Write own data to redis
95 | writeAPI := useWriteAPI(ctx, client, memBuffer)
96 | writeDataToBufferSafe(writeAPI)
97 | // STEP 6: Checks!
98 | if err = checksBuffer(memBuffer); err != nil {
99 | t.Fatal(err)
100 | }
101 | if err = checksClickhouse(ctx, ch); err != nil {
102 | t.Fatal(err)
103 | }
104 | }
105 |
106 | func useClientAndMemoryBuffer(ctx context.Context, clickhouse cx.Clickhouse) (clickhousebuffer.Client, cx.Buffer) {
107 | client := useCommonClient(ctx, clickhouse)
108 | return client, cxsyncmem.NewBuffer(client.Options().BatchSize())
109 | }
110 |
--------------------------------------------------------------------------------
/tests/integration_test.go:
--------------------------------------------------------------------------------
1 | //go:build integration
2 | // +build integration
3 |
4 | package tests
5 |
6 | import (
7 | "context"
8 | "database/sql"
9 | "errors"
10 | "fmt"
11 | "log"
12 | "os"
13 | "sync"
14 | "testing"
15 | "time"
16 |
17 | "github.com/ClickHouse/clickhouse-go/v2"
18 | "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
19 | "github.com/go-redis/redis/v8"
20 |
21 | clickhousebuffer "github.com/zikwall/clickhouse-buffer/v4"
22 | "github.com/zikwall/clickhouse-buffer/v4/src/buffer/cxredis"
23 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
24 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxnative"
25 | "github.com/zikwall/clickhouse-buffer/v4/src/db/cxsql"
26 | )
27 |
28 | const integrationTableName = "default.test_integration_xxx_xxx"
29 |
30 | type integrationRow struct {
31 | id uint8
32 | uuid string
33 | insertTS time.Time
34 | }
35 |
36 | func (i integrationRow) Row() cx.Vector {
37 | return cx.Vector{i.id, i.uuid, i.insertTS.Format(time.RFC822)}
38 | }
39 |
40 | // This test is a complete simulation of the work of the buffer bundle (Redis) and the Clickhouse data warehouse
41 | // nolint:dupl // it's OK
42 | func TestNative(t *testing.T) {
43 | var err error
44 | log.Println("RUN INTEGRATION TEST WITH REDIS AND NATIVE CLICKHOUSE")
45 | ctx, cancel := context.WithCancel(context.Background())
46 | defer cancel()
47 | // STEP 1: Create Redis service
48 | db, err := useRedisPool()
49 | if err != nil {
50 | t.Fatal(err)
51 | }
52 | // STEP 2: Create Clickhouse service
53 | conn, nativeClickhouse, err := useClickhousePool(ctx)
54 | if err != nil {
55 | t.Fatal(err)
56 | }
57 | // STEP 3: Drop and Create table under certain conditions
58 | if err = beforeCheckTables(ctx, conn); err != nil {
59 | t.Fatal(err)
60 | }
61 | // STEP 4: Create clickhouse client and buffer writer with redis buffer
62 | client, redisBuffer, err := useClientAndRedisBuffer(ctx, nativeClickhouse, db)
63 | if err != nil {
64 | t.Fatal(err)
65 | }
66 | defer client.Close()
67 | // STEP 5: Write own data to redis
68 | writeAPI := useWriteAPI(ctx, client, redisBuffer)
69 | var errorsSlice []error
70 | mu := &sync.RWMutex{}
71 | errorsCh := writeAPI.Errors()
72 | go func() {
73 | for err := range errorsCh {
74 | mu.Lock()
75 | errorsSlice = append(errorsSlice, err)
76 | mu.Unlock()
77 | }
78 | }()
79 | writeDataToBuffer(writeAPI)
80 | // STEP 6: Checks!
81 | if err := checksBuffer(redisBuffer); err != nil {
82 | t.Fatal(err)
83 | }
84 | if err := checksClickhouse(ctx, conn); err != nil {
85 | t.Fatal(err)
86 | }
87 | // retry test fails
88 | dropTable(ctx, conn)
89 | // it should be successful case
90 | writeDataToBuffer(writeAPI)
91 | if err := checksBuffer(redisBuffer); err != nil {
92 | t.Fatal(err)
93 | }
94 | // we expect an exception from Clickhouse: code: 60, message: Table default.test_integration_xxx_xxx doesn't exist
95 | <-time.After(1000 * time.Millisecond)
96 | mu.RLock()
97 | defer mu.RUnlock()
98 | if len(errorsSlice) != 1 {
99 | t.Fatalf("failed, the clickhouse was expected receive one error, received: %d", len(errorsSlice))
100 | }
101 | log.Println("received errors from clickhouse insert:", errorsSlice)
102 | }
103 |
104 | // nolint:dupl // it's OK
105 | func TestSQL(t *testing.T) {
106 | var err error
107 | log.Println("RUN INTEGRATION TEST WITH REDIS AND SQL CLICKHOUSE")
108 | ctx, cancel := context.WithCancel(context.Background())
109 | defer cancel()
110 | // STEP 1: Create Redis service
111 | db, err := useRedisPool()
112 | if err != nil {
113 | t.Fatal(err)
114 | }
115 | // STEP 2: Create Clickhouse service
116 | conn, nativeClickhouse, err := useClickhouseSQLPool(ctx)
117 | if err != nil {
118 | t.Fatal(err)
119 | }
120 | // STEP 3: Drop and Create table under certain conditions
121 | if err = beforeCheckTablesSQL(ctx, conn); err != nil {
122 | t.Fatal(err)
123 | }
124 | // STEP 4: Create clickhouse client and buffer writer with redis buffer
125 | client, redisBuffer, err := useClientAndRedisBuffer(ctx, nativeClickhouse, db)
126 | if err != nil {
127 | t.Fatal(err)
128 | }
129 | defer client.Close()
130 | // STEP 5: Write own data to redis
131 | writeAPI := useWriteAPI(ctx, client, redisBuffer)
132 | var errorsSlice []error
133 | mu := &sync.RWMutex{}
134 | errorsCh := writeAPI.Errors()
135 | go func() {
136 | for chErr := range errorsCh {
137 | mu.Lock()
138 | errorsSlice = append(errorsSlice, chErr)
139 | mu.Unlock()
140 | }
141 | }()
142 | writeDataToBuffer(writeAPI)
143 | // STEP 6: Checks!
144 | if err = checksBuffer(redisBuffer); err != nil {
145 | t.Fatal(err)
146 | }
147 | if err = checksClickhouseSQL(ctx, conn); err != nil {
148 | t.Fatal(err)
149 | }
150 | // retry test fails
151 | dropTableSQL(ctx, conn)
152 | // it should be successful case
153 | writeDataToBuffer(writeAPI)
154 | if err = checksBuffer(redisBuffer); err != nil {
155 | t.Fatal(err)
156 | }
157 | // we expect an exception from Clickhouse: code: 60, message: Table default.test_integration_xxx_xxx doesn't exist
158 | <-time.After(1000 * time.Millisecond)
159 | mu.RLock()
160 | defer mu.RUnlock()
161 | if len(errorsSlice) != 1 {
162 | t.Fatalf("failed, the clickhouse was expected receive one error, received: %d", len(errorsSlice))
163 | }
164 | log.Println("received errors from clickhouse insert:", errorsSlice)
165 | }
166 |
167 | type clickhouseRowData struct {
168 | id uint8
169 | uuid string
170 | createdAt string
171 | }
172 |
173 | // nolint:dupl // it's OK
174 | func fetchClickhouseRows(ctx context.Context, conn driver.Conn) ([]clickhouseRowData, error) {
175 | rws, err := conn.Query(ctx, fmt.Sprintf("SELECT id, uuid, insert_ts FROM %s", integrationTableName))
176 | if err != nil {
177 | return nil, err
178 | }
179 | defer func() {
180 | _ = rws.Close()
181 | }()
182 | var values []clickhouseRowData
183 | for rws.Next() {
184 | var (
185 | id uint8
186 | uuid string
187 | createdAt string
188 | )
189 | if err = rws.Scan(&id, &uuid, &createdAt); err != nil {
190 | return nil, err
191 | }
192 | values = append(values, clickhouseRowData{id, uuid, createdAt})
193 | }
194 | if err = rws.Err(); err != nil {
195 | return nil, err
196 | }
197 | return values, err
198 | }
199 |
200 | // nolint:dupl // it's OK
201 | func fetchClickhouseRowsSQL(ctx context.Context, conn *sql.DB) ([]clickhouseRowData, error) {
202 | rws, err := conn.QueryContext(ctx, fmt.Sprintf("SELECT id, uuid, insert_ts FROM %s", integrationTableName))
203 | if err != nil {
204 | return nil, err
205 | }
206 | defer func() {
207 | _ = rws.Close()
208 | }()
209 | var values []clickhouseRowData
210 | for rws.Next() {
211 | var (
212 | id uint8
213 | uuid string
214 | createdAt string
215 | )
216 | if err = rws.Scan(&id, &uuid, &createdAt); err != nil {
217 | return nil, err
218 | }
219 | values = append(values, clickhouseRowData{id, uuid, createdAt})
220 | }
221 | if err = rws.Err(); err != nil {
222 | return nil, err
223 | }
224 | return values, err
225 | }
226 |
227 | // nolint:dupl // it's OK
228 | func beforeCheckTables(ctx context.Context, conn driver.Conn) error {
229 | dropTable(ctx, conn)
230 | return createTable(ctx, conn)
231 | }
232 |
233 | // nolint:dupl // it's OK
234 | func dropTable(ctx context.Context, conn driver.Conn) {
235 | _ = conn.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", integrationTableName))
236 | }
237 |
238 | // nolint:dupl // it's OK
239 | func createTable(ctx context.Context, conn driver.Conn) error {
240 | err := conn.Exec(ctx, fmt.Sprintf(`
241 | CREATE TABLE IF NOT EXISTS %s (
242 | id UInt8,
243 | uuid String,
244 | insert_ts String
245 | ) engine=Memory
246 | `, integrationTableName))
247 | return err
248 | }
249 |
250 | // nolint:dupl // it's OK
251 | func beforeCheckTablesSQL(ctx context.Context, conn *sql.DB) error {
252 | dropTableSQL(ctx, conn)
253 | return createTableSQL(ctx, conn)
254 | }
255 |
256 | // nolint:dupl // it's OK
257 | func dropTableSQL(ctx context.Context, conn *sql.DB) {
258 | _, _ = conn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", integrationTableName))
259 | }
260 |
261 | // nolint:dupl // it's OK
262 | func createTableSQL(ctx context.Context, conn *sql.DB) error {
263 | _, err := conn.ExecContext(ctx, fmt.Sprintf(`
264 | CREATE TABLE IF NOT EXISTS %s (
265 | id UInt8,
266 | uuid String,
267 | insert_ts String
268 | ) engine=Memory
269 | `, integrationTableName))
270 | return err
271 | }
272 |
273 | func useRedisPool() (*redis.Client, error) {
274 | var (
275 | db *redis.Client
276 | host = os.Getenv("REDIS_HOST")
277 | user = os.Getenv("REDIS_USER")
278 | pass = os.Getenv("REDIS_PASS")
279 | )
280 | if host == "" {
281 | host = "localhost:6379"
282 | }
283 | db = redis.NewClient(&redis.Options{
284 | Addr: host,
285 | Username: user,
286 | Password: pass,
287 | DB: 12,
288 | })
289 | if err := db.Ping(db.Context()).Err(); err != nil {
290 | return nil, err
291 | }
292 | return db, nil
293 | }
294 |
295 | const (
296 | defaultUser = "default"
297 | defaultDb = "default"
298 | defaultHost = "localhost:9000"
299 | )
300 |
301 | func useCredentials() (host, db, user, password string) {
302 | host = os.Getenv("CLICKHOUSE_HOST")
303 | db = os.Getenv("CLICKHOUSE_DATABASE")
304 | user = os.Getenv("CLICKHOUSE_USER")
305 | password = os.Getenv("CLICKHOUSE_PASSWORD")
306 | if host == "" {
307 | host = defaultHost
308 | }
309 | if db == "" {
310 | db = defaultDb
311 | }
312 | if user == "" {
313 | user = defaultUser
314 | }
315 | return host, db, user, password
316 | }
317 |
318 | func useOptions() *clickhouse.Options {
319 | host, db, user, password := useCredentials()
320 | return &clickhouse.Options{
321 | Addr: []string{host},
322 | Auth: clickhouse.Auth{
323 | Database: db,
324 | Username: user,
325 | Password: password,
326 | },
327 | Settings: clickhouse.Settings{
328 | "max_execution_time": 60,
329 | },
330 | DialTimeout: 5 * time.Second,
331 | Compression: &clickhouse.Compression{
332 | Method: clickhouse.CompressionLZ4,
333 | },
334 | Debug: true,
335 | }
336 | }
337 |
338 | func useClickhousePool(ctx context.Context) (driver.Conn, cx.Clickhouse, error) {
339 | nativeClickhouse, conn, err := cxnative.NewClickhouse(ctx, useOptions(), &cx.RuntimeOptions{
340 | WriteTimeout: 15 * time.Second,
341 | })
342 | if err != nil {
343 | return nil, nil, err
344 | }
345 | return conn, nativeClickhouse, nil
346 | }
347 |
348 | func useClickhouseSQLPool(ctx context.Context) (*sql.DB, cx.Clickhouse, error) {
349 | sqlClickhouse, conn, err := cxsql.NewClickhouse(ctx, useOptions(), &cx.RuntimeOptions{})
350 | if err != nil {
351 | return nil, nil, err
352 | }
353 | return conn, sqlClickhouse, nil
354 | }
355 |
356 | func useCommonClient(ctx context.Context, ch cx.Clickhouse) clickhousebuffer.Client {
357 | return clickhousebuffer.NewClientWithOptions(ctx, ch, clickhousebuffer.NewOptions(
358 | clickhousebuffer.WithBatchSize(6),
359 | clickhousebuffer.WithFlushInterval(500),
360 | ))
361 | }
362 |
363 | func useClientAndRedisBuffer(
364 | ctx context.Context,
365 | ch cx.Clickhouse,
366 | db *redis.Client,
367 | ) (
368 | clickhousebuffer.Client,
369 | cx.Buffer,
370 | error,
371 | ) {
372 | client := useCommonClient(ctx, ch)
373 | buf, err := cxredis.NewBuffer(ctx, db, "bucket", client.Options().BatchSize())
374 | if err != nil {
375 | return nil, nil, fmt.Errorf("could't create redis buffer: %s", err)
376 | }
377 | return client, buf, nil
378 | }
379 |
380 | func useWriteAPI(ctx context.Context, client clickhousebuffer.Client, buf cx.Buffer) clickhousebuffer.Writer {
381 | writeAPI := client.Writer(ctx, cx.NewView(integrationTableName, []string{"id", "uuid", "insert_ts"}), buf)
382 | return writeAPI
383 | }
384 |
385 | func writeDataToBuffer(writeAPI clickhousebuffer.Writer) {
386 | writeAPI.WriteRow(integrationRow{
387 | id: 1, uuid: "1", insertTS: time.Now(),
388 | })
389 | writeAPI.WriteRow(integrationRow{
390 | id: 2, uuid: "2", insertTS: time.Now(),
391 | })
392 | writeAPI.WriteRow(integrationRow{
393 | id: 3, uuid: "3", insertTS: time.Now(),
394 | })
395 | writeAPI.WriteRow(integrationRow{
396 | id: 4, uuid: "4", insertTS: time.Now(),
397 | })
398 | writeAPI.WriteRow(integrationRow{
399 | id: 5, uuid: "5", insertTS: time.Now(),
400 | })
401 | // wait a bit
402 | <-time.After(50 * time.Millisecond)
403 | }
404 |
405 | func writeDataToBufferSafe(writeAPI clickhousebuffer.Writer) {
406 | writeAPI.TryWriteRow(integrationRow{
407 | id: 1, uuid: "1", insertTS: time.Now(),
408 | })
409 | writeAPI.TryWriteRow(integrationRow{
410 | id: 2, uuid: "2", insertTS: time.Now(),
411 | })
412 | writeAPI.TryWriteRow(integrationRow{
413 | id: 3, uuid: "3", insertTS: time.Now(),
414 | })
415 | writeAPI.TryWriteRow(integrationRow{
416 | id: 4, uuid: "4", insertTS: time.Now(),
417 | })
418 | writeAPI.TryWriteRow(integrationRow{
419 | id: 5, uuid: "5", insertTS: time.Now(),
420 | })
421 | // wait a bit
422 | <-time.After(50 * time.Millisecond)
423 | }
424 |
425 | func checksBuffer(buf cx.Buffer) error {
426 | // try read from redis buffer before flushing data in buffer
427 | rows := buf.Read()
428 | if len(rows) != 5 {
429 | return fmt.Errorf("could't get correct valuse, received: %v", rows)
430 | }
431 | log.Printf("Received value from buffer: %v", rows)
432 | // wait until flush in buffer
433 | <-time.After(500 * time.Millisecond)
434 | // check buffer size
435 | if size := buf.Len(); size != 0 {
436 | return errors.New("failed, the buffer was expected to be cleared")
437 | }
438 | return nil
439 | }
440 |
441 | func checkData(values []clickhouseRowData) error {
442 | log.Printf("received values from clickhouse table: %v", values)
443 | if len(values) != 5 {
444 | return fmt.Errorf("failed, expected to get five values, received %d", len(values))
445 | }
446 | if v := values[2].id; v != 3 {
447 | return fmt.Errorf("failed, expected value 3, received %d", v)
448 | }
449 | if v := values[2].uuid; v != "3" {
450 | return fmt.Errorf("failed, expected value 3, received %s", v)
451 | }
452 | return nil
453 | }
454 |
455 | func checksClickhouse(ctx context.Context, conn driver.Conn) error {
456 | // check data in clickhouse, write after flushing
457 | values, err := fetchClickhouseRows(ctx, conn)
458 | if err != nil {
459 | return fmt.Errorf("could't fetch data from clickhouse: %s", err)
460 | }
461 | return checkData(values)
462 | }
463 |
464 | func checksClickhouseSQL(ctx context.Context, conn *sql.DB) error {
465 | // check data in clickhouse, write after flushing
466 | values, err := fetchClickhouseRowsSQL(ctx, conn)
467 | if err != nil {
468 | return fmt.Errorf("could't fetch data from clickhouse: %s", err)
469 | }
470 | return checkData(values)
471 | }
472 |
473 | func TestSomething(t *testing.T) {
474 | // db.Query()
475 | }
476 |
--------------------------------------------------------------------------------
/write.go:
--------------------------------------------------------------------------------
1 | package clickhousebuffer
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "sync/atomic"
7 | "time"
8 |
9 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
10 | )
11 |
12 | // Writer is client interface with non-blocking methods for writing rows asynchronously in batches into an Clickhouse server.
13 | // Writer can be used concurrently.
14 | // When using multiple goroutines for writing, use a single WriteAPI instance in all goroutines.
15 | type Writer interface {
16 | // WriteRow writes asynchronously line protocol record into bucket.
17 | WriteRow(vector cx.Vectorable)
18 | // TryWriteRow same as WriteRow, but with Channel Closing Principle (Gracefully Close Channels)
19 | TryWriteRow(vec cx.Vectorable)
20 | // WriteVector writes asynchronously line protocol record into bucket.
21 | WriteVector(vec cx.Vector)
22 | // TryWriteVector same as WriteVector
23 | TryWriteVector(vec cx.Vector)
24 | // Errors returns a channel for reading errors which occurs during async writes.
25 | Errors() <-chan error
26 | // Close writer
27 | Close()
28 | }
29 |
30 | // writer structure implements the Writer interface,
31 | // encapsulates all the necessary methods within itself and manages its own personal data flows
32 | type writer struct {
33 | context context.Context
34 | view cx.View
35 | client Client
36 | bufferEngine cx.Buffer
37 | writeOptions *Options
38 | errCh chan error
39 | clickhouseCh chan *cx.Batch
40 | bufferCh chan cx.Vector
41 | doneCh chan struct{}
42 | writeStop chan struct{}
43 | bufferStop chan struct{}
44 | mu *sync.RWMutex
45 | isOpenErr int32
46 | }
47 |
48 | // NewWriter returns new non-blocking write client for writing rows to Clickhouse table
49 | func NewWriter(ctx context.Context, client Client, view cx.View, engine cx.Buffer) Writer {
50 | w := &writer{
51 | mu: &sync.RWMutex{},
52 | context: ctx,
53 | view: view,
54 | client: client,
55 | bufferEngine: engine,
56 | writeOptions: client.Options(),
57 | // write buffers
58 | clickhouseCh: make(chan *cx.Batch),
59 | bufferCh: make(chan cx.Vector),
60 | // signals
61 | doneCh: make(chan struct{}),
62 | bufferStop: make(chan struct{}),
63 | writeStop: make(chan struct{}),
64 | }
65 | go w.runBufferBridge()
66 | go w.runClickhouseBridge()
67 | return w
68 | }
69 |
70 | // WriteRow writes asynchronously line protocol record into bucket.
71 | // WriteRow adds record into the buffer which is sent on the background when it reaches the batch size.
72 | func (w *writer) WriteRow(vec cx.Vectorable) {
73 | // maybe use atomic for check is closed
74 | // atomic.LoadInt32(&w.isClosed) == 1
75 | w.bufferCh <- vec.Row()
76 | }
77 |
78 | // TryWriteRow same as WriteRow, but with Channel Closing Principle (Gracefully Close Channels)
79 | func (w *writer) TryWriteRow(vec cx.Vectorable) {
80 | // the try-receive operation is to try to exit the goroutine as early as
81 | // possible.
82 | select {
83 | case <-w.bufferStop:
84 | return
85 | default:
86 | }
87 | // even if bufferStop is closed, the first branch in the second select may be
88 | // still not selected for some loops if to send to bufferCh is also unblocked.
89 | select {
90 | case <-w.bufferStop:
91 | return
92 | case w.bufferCh <- vec.Row():
93 | }
94 | }
95 |
96 | // WriteVector same as WriteRow, but just uses inlined vector.
97 | // WriteVector a faster option for writing to buffer than WriteRow, in addition, memory allocates less
98 | func (w *writer) WriteVector(vec cx.Vector) {
99 | w.bufferCh <- vec
100 | }
101 |
102 | // TryWriteVector same as WriteVector
103 | func (w *writer) TryWriteVector(vec cx.Vector) {
104 | select {
105 | case <-w.bufferStop:
106 | return
107 | default:
108 | }
109 | select {
110 | case <-w.bufferStop:
111 | return
112 | case w.bufferCh <- vec:
113 | }
114 | }
115 |
116 | // Errors returns a channel for reading errors which occurs during async writes.
117 | // Errors must be called before performing any writes for errors to be collected.
118 | // Errors chan is unbuffered and must be drained or the writer will block.
119 | func (w *writer) Errors() <-chan error {
120 | w.mu.Lock()
121 | defer w.mu.Unlock()
122 | if w.errCh == nil {
123 | // mark that have a channel reader with errors so that can write to same channel
124 | atomic.StoreInt32(&w.isOpenErr, 1)
125 | w.errCh = make(chan error)
126 | }
127 | return w.errCh
128 | }
129 |
130 | // hasErrReader returns true if there is at least one channel reader with errors, otherwise false
131 | func (w *writer) hasErrReader() bool {
132 | return atomic.LoadInt32(&w.isOpenErr) > 0
133 | }
134 |
135 | // Close finishes outstanding write operations, stop background routines and closes all channels
136 | func (w *writer) Close() {
137 | if w.clickhouseCh != nil {
138 | // stop and wait for write buffer
139 | close(w.bufferStop)
140 | <-w.doneCh
141 |
142 | // stop and wait for write clickhouse
143 | close(w.writeStop)
144 | <-w.doneCh
145 |
146 | // stop ticker for flush to batch
147 | // close(w.tickerStop)
148 | // <-w.doneCh
149 | }
150 | if w.writeOptions.isDebug {
151 | w.writeOptions.logger.Logf("close writer %s", w.view.Name)
152 | }
153 | }
154 |
155 | // flush generates a new message packet and sends it to the queue channel for subsequent recording to Clickhouse database
156 | func (w *writer) flush() {
157 | if w.writeOptions.isDebug {
158 | w.writeOptions.logger.Logf("flush buffer: %s", w.view.Name)
159 | }
160 | w.clickhouseCh <- cx.NewBatch(w.bufferEngine.Read())
161 | w.bufferEngine.Flush()
162 | }
163 |
164 | // func (w *writer) runTicker() {
165 | // ticker := time.NewTicker(time.Duration(w.writeOptions.FlushInterval()) * time.Millisecond)
166 | // w.writeOptions.logger.Logf("run ticker: %s", w.view.Name)
167 | // defer func() {
168 | // ticker.Stop()
169 | // w.doneCh <- struct{}{}
170 | // w.writeOptions.logger.Logf("stop ticker: %s", w.view.Name)
171 | // }()
172 | // for {
173 | // select {
174 | // case <-ticker.C:
175 | // if w.bufferEngine.Len() > 0 {
176 | // w.flush()
177 | // }
178 | // case <-w.tickerStop:
179 | // return
180 | // }
181 | // }
182 | // }
183 |
184 | // runBufferBridge writing to a temporary buffer to collect more data
185 | func (w *writer) runBufferBridge() {
186 | ticker := time.NewTicker(time.Duration(w.writeOptions.FlushInterval()) * time.Millisecond)
187 | defer func() {
188 | ticker.Stop()
189 | // flush last data
190 | if w.bufferEngine.Len() > 0 {
191 | w.flush()
192 | }
193 | w.mu.Lock()
194 | // close buffer channel
195 | close(w.bufferCh)
196 | w.bufferCh = nil
197 | w.mu.Unlock()
198 | // send signal, buffer listener is done
199 | w.doneCh <- struct{}{}
200 | if w.writeOptions.isDebug {
201 | w.writeOptions.logger.Logf("stop buffer bridge: %s", w.view.Name)
202 | }
203 | }()
204 | if w.writeOptions.isDebug {
205 | w.writeOptions.logger.Logf("run buffer bridge: %s", w.view.Name)
206 | }
207 | for {
208 | select {
209 | case vector := <-w.bufferCh:
210 | w.bufferEngine.Write(vector)
211 | if w.bufferEngine.Len() == int(w.writeOptions.BatchSize()) {
212 | w.flush()
213 | }
214 | case <-w.bufferStop:
215 | return
216 | case <-ticker.C:
217 | if w.bufferEngine.Len() > 0 {
218 | w.flush()
219 | }
220 | }
221 | }
222 | }
223 |
224 | // runClickhouseBridge asynchronously write to Clickhouse database in large batches
225 | func (w *writer) runClickhouseBridge() {
226 | if w.writeOptions.isDebug {
227 | w.writeOptions.logger.Logf("run clickhouse bridge: %s", w.view.Name)
228 | }
229 | defer func() {
230 | w.mu.Lock()
231 | // close clickhouse channel
232 | close(w.clickhouseCh)
233 | w.clickhouseCh = nil
234 | // close errors channel if it created
235 | if w.errCh != nil {
236 | close(w.errCh)
237 | w.errCh = nil
238 | }
239 | w.mu.Unlock()
240 | // send signal, clickhouse listener is done
241 | w.doneCh <- struct{}{}
242 | if w.writeOptions.isDebug {
243 | w.writeOptions.logger.Logf("stop clickhouse bridge: %s", w.view.Name)
244 | }
245 | }()
246 | for {
247 | select {
248 | case batch := <-w.clickhouseCh:
249 | err := w.client.WriteBatch(w.context, w.view, batch)
250 | if err != nil && w.hasErrReader() {
251 | w.errCh <- err
252 | }
253 | case <-w.writeStop:
254 | return
255 | }
256 | }
257 | }
258 |
--------------------------------------------------------------------------------
/write_blocking.go:
--------------------------------------------------------------------------------
1 | package clickhousebuffer
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
7 | )
8 |
9 | // WriterBlocking similar to Writer except that this interface must implement a blocking entry.
10 | // WriterBlocking do not worry about errors and repeated entries of undelivered messages,
11 | // all responsibility for error handling falls on developer
12 | type WriterBlocking interface {
13 | // WriteRow writes row(s) into bucket.
14 | // WriteRow writes without implicit batching. Batch is created from given number of records
15 | // Non-blocking alternative is available in the Writer interface
16 | WriteRow(ctx context.Context, row ...cx.Vectorable) error
17 | }
18 |
19 | // writerBlocking structure implements the WriterBlocking interface and encapsulates all necessary logic within itself
20 | type writerBlocking struct {
21 | view cx.View
22 | client Client
23 | }
24 |
25 | // NewWriterBlocking WriterBlocking object
26 | func NewWriterBlocking(client Client, view cx.View) WriterBlocking {
27 | w := &writerBlocking{
28 | view: view,
29 | client: client,
30 | }
31 | return w
32 | }
33 |
34 | // WriteRow similar to WriteRow,
35 | // only it is blocking and has the ability to write a large batch of data directly to the database at once
36 | func (w *writerBlocking) WriteRow(ctx context.Context, row ...cx.Vectorable) error {
37 | if len(row) > 0 {
38 | rows := make([]cx.Vector, 0, len(row))
39 | for _, r := range row {
40 | rows = append(rows, r.Row())
41 | }
42 | return w.write(ctx, rows)
43 | }
44 | return nil
45 | }
46 |
47 | // write to Clickhouse database
48 | func (w *writerBlocking) write(ctx context.Context, rows []cx.Vector) error {
49 | err := w.client.WriteBatch(ctx, w.view, cx.NewBatch(rows))
50 | if err != nil {
51 | return err
52 | }
53 | return nil
54 | }
55 |
--------------------------------------------------------------------------------
/write_options.go:
--------------------------------------------------------------------------------
1 | package clickhousebuffer
2 |
3 | import (
4 | "github.com/zikwall/clickhouse-buffer/v4/src/cx"
5 | "github.com/zikwall/clickhouse-buffer/v4/src/retry"
6 | )
7 |
8 | // Options holds write configuration properties
9 | type Options struct {
10 | // Maximum number of rows sent to server in single request. Default 5000
11 | batchSize uint
12 | // Interval, in ms, in which is buffer flushed if it has not been already written (by reaching batch size).
13 | // Default 1000ms
14 | flushInterval uint
15 | // Debug mode
16 | isDebug bool
17 | // retry.Retry is enabled
18 | isRetryEnabled bool
19 | // cx.Logger with
20 | logger cx.Logger
21 | // retry.Queueable with
22 | queue retry.Queueable
23 | }
24 |
25 | // BatchSize returns size of batch
26 | func (o *Options) BatchSize() uint {
27 | return o.batchSize
28 | }
29 |
30 | // SetBatchSize sets number of rows sent in single request, it would be a good practice to remove this function,
31 | // but it is convenient for testing. DO NOT USE in parallel environments
32 | func (o *Options) SetBatchSize(batchSize uint) *Options {
33 | o.batchSize = batchSize
34 | return o
35 | }
36 |
37 | // FlushInterval returns flush interval in ms
38 | func (o *Options) FlushInterval() uint {
39 | return o.flushInterval
40 | }
41 |
42 | // SetFlushInterval sets flush interval in ms in which is buffer flushed if it has not been already written
43 | // it would be a good practice to remove this function,
44 | // but it is convenient for testing. DO NOT USE in parallel environments
45 | func (o *Options) SetFlushInterval(flushIntervalMs uint) *Options {
46 | o.flushInterval = flushIntervalMs
47 | return o
48 | }
49 |
50 | // for multithreading systems, you can implement something like this:
51 | //
52 | // func (o *Options) ConcurrentlySetFlushInterval(flushIntervalMs uint) *Options {
53 | // o.mx.Lock()
54 | // o.flushInterval = flushIntervalMs
55 | // o.mx.Unlock()
56 | // return o
57 | // }
58 |
59 | // SetDebugMode set debug mode, for logs and errors
60 | //
61 | // Deprecated: use WithDebugMode function with NewOptions
62 | func (o *Options) SetDebugMode(isDebug bool) *Options {
63 | o.isDebug = isDebug
64 | return o
65 | }
66 |
67 | // SetRetryIsEnabled enable/disable resending undelivered messages
68 | //
69 | // Deprecated: use WithRetry function with NewOptions
70 | func (o *Options) SetRetryIsEnabled(enabled bool) *Options {
71 | o.isRetryEnabled = enabled
72 | return o
73 | }
74 |
75 | // SetLogger installs a custom implementation of the cx.Logger interface
76 | //
77 | // Deprecated: use WithLogger function with NewOptions
78 | func (o *Options) SetLogger(logger cx.Logger) *Options {
79 | o.logger = logger
80 | return o
81 | }
82 |
83 | // SetQueueEngine installs a custom implementation of the retry.Queueable interface
84 | //
85 | // Deprecated: use WithRetryQueueEngine function with NewOptions
86 | func (o *Options) SetQueueEngine(queue retry.Queueable) *Options {
87 | o.queue = queue
88 | return o
89 | }
90 |
91 | func WithBatchSize(size uint) Option {
92 | return func(o *Options) {
93 | o.batchSize = size
94 | }
95 | }
96 |
97 | func WithFlushInterval(interval uint) Option {
98 | return func(o *Options) {
99 | o.flushInterval = interval
100 | }
101 | }
102 |
103 | func WithDebugMode(isDebug bool) Option {
104 | return func(o *Options) {
105 | o.isDebug = isDebug
106 | }
107 | }
108 |
109 | func WithRetry(enabled bool) Option {
110 | return func(o *Options) {
111 | o.isRetryEnabled = enabled
112 | }
113 | }
114 |
115 | func WithLogger(logger cx.Logger) Option {
116 | return func(o *Options) {
117 | o.logger = logger
118 | }
119 | }
120 |
121 | func WithRetryQueueEngine(queue retry.Queueable) Option {
122 | return func(o *Options) {
123 | o.queue = queue
124 | }
125 | }
126 |
127 | type Option func(o *Options)
128 |
129 | // NewOptions returns Options object with the ability to set your own parameters
130 | func NewOptions(options ...Option) *Options {
131 | o := &Options{
132 | batchSize: 2000,
133 | flushInterval: 2000,
134 | }
135 | for _, option := range options {
136 | option(o)
137 | }
138 | return o
139 | }
140 |
141 | // DefaultOptions returns Options object with default values
142 | //
143 | // Deprecated: use NewOptions function with Option callbacks
144 | func DefaultOptions() *Options {
145 | return &Options{
146 | batchSize: 5000,
147 | flushInterval: 1000,
148 | }
149 | }
150 |
--------------------------------------------------------------------------------