├── .gitattributes ├── .github └── workflows │ ├── codeql.yml │ ├── docker.yml │ ├── lint.yml │ ├── release.yml │ ├── tests-sd.yml │ └── tests.yml ├── .gitignore ├── .golangci.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── autocomplete ├── autocomplete.go └── autocomplete_test.go ├── cache └── cache.go ├── capabilities └── handler.go ├── cmd ├── e2e-test │ ├── carbon-clickhouse.go │ ├── checks.go │ ├── clickhouse.go │ ├── container.go │ ├── e2etesting.go │ ├── errors.go │ ├── graphite-clickhouse.go │ ├── main.go │ ├── rproxy.go │ └── utils.go └── graphite-clickhouse-client │ └── main.go ├── config ├── .gitignore ├── config.go ├── config_test.go ├── json.go └── json_test.go ├── deploy ├── doc │ ├── .gitignore │ └── config.md └── root │ ├── etc │ └── logrotate.d │ │ └── graphite-clickhouse │ └── usr │ └── lib │ └── systemd │ └── system │ └── graphite-clickhouse.service ├── doc ├── aggregation.md ├── config.md ├── debugging.md ├── graphite_clickhouse.gliffy ├── index-table.md ├── release.md └── stack.png ├── find ├── find.go ├── handler.go ├── handler_json_test.go └── handler_test.go ├── finder ├── base.go ├── blacklist.go ├── date.go ├── date_reverse.go ├── date_reverse_test.go ├── finder.go ├── index.go ├── index_test.go ├── mock.go ├── plain_from_tagged.go ├── plain_from_tagged_test.go ├── prefix.go ├── prefix_test.go ├── reverse.go ├── reverse_test.go ├── split.go ├── split_test.go ├── tag.go ├── tag_test.go ├── tagged.go ├── tagged_test.go └── unescape.go ├── go.mod ├── go.sum ├── graphite-clickhouse.go ├── healthcheck └── healthcheck.go ├── helper ├── RowBinary │ └── encode.go ├── clickhouse │ ├── clickhouse.go │ ├── clickhouse_test.go │ ├── external-data.go │ └── external-data_test.go ├── client │ ├── datetime.go │ ├── errros.go │ ├── find.go │ ├── render.go │ ├── requests.go │ ├── tags.go │ └── types.go ├── date │ ├── date.go │ └── date_test.go ├── datetime │ ├── datetime.go │ └── datetime_test.go ├── errs │ └── errors.go ├── headers │ └── headers.go ├── pickle │ └── pickle.go ├── point │ ├── func.go │ ├── func_test.go │ ├── point.go │ └── points.go ├── rollup │ ├── aggr.go │ ├── compact.go │ ├── compact_test.go │ ├── remote.go │ ├── remote_test.go │ ├── rollup.go │ ├── rules.go │ ├── rules_test.go │ ├── xml.go │ └── xml_test.go ├── tests │ ├── clickhouse │ │ └── server.go │ └── compare │ │ ├── compare.go │ │ └── expand │ │ └── expand.go └── utils │ ├── utils.go │ └── utils_test.go ├── index ├── handler.go ├── index.go └── index_test.go ├── issues └── daytime │ ├── carbon-clickhouse.conf.tpl │ ├── graphite-clickhouse-internal-aggr.conf.tpl │ ├── graphite-clickhouse.conf.tpl │ └── test.toml ├── limiter ├── alimiter.go ├── alimiter_test.go ├── interface.go ├── limiter.go ├── noop.go └── wlimiter.go ├── load_avg ├── load_avg.go ├── load_avg_default.go ├── load_avg_linux.go └── load_avg_test.go ├── logs └── logger.go ├── metrics ├── limiter_metrics.go ├── metrics.go ├── metrics_test.go ├── query_metrics.go └── statsd.go ├── nfpm.yaml ├── packages.sh ├── pkg ├── alias │ ├── map.go │ ├── map_tagged_test.go │ └── map_test.go ├── dry │ ├── math.go │ ├── math_test.go │ ├── strings.go │ ├── strings_test.go │ ├── unsafe.go │ └── unsafe_test.go ├── reverse │ ├── reverse.go │ └── reverse_test.go ├── scope │ ├── context.go │ ├── http_request.go │ ├── key.go │ ├── logger.go │ └── version.go └── where │ ├── match.go │ ├── match_test.go │ ├── where.go │ └── where_test.go ├── prometheus ├── .gitignore ├── empty_iterator.go ├── exemplar.go ├── gatherer.go ├── labels.go ├── labels_test.go ├── local_storage.go ├── logger.go ├── matcher.go ├── metrics_set.go ├── querier.go ├── querier_select.go ├── querier_select_test.go ├── run.go ├── run_dummy.go ├── series_set.go └── storage.go ├── render ├── data │ ├── carbonlink.go │ ├── carbonlink_test.go │ ├── ch_response.go │ ├── common_step.go │ ├── common_step_test.go │ ├── data.go │ ├── data_parse_test.go │ ├── multi_target.go │ ├── multi_target_test.go │ ├── query.go │ ├── query_test.go │ ├── targets.go │ └── targets_test.go ├── handler.go ├── handler_test.go └── reply │ ├── formatter.go │ ├── formatter_test.go │ ├── json.go │ ├── pickle.go │ ├── protobuf.go │ ├── protobuf_test.go │ ├── v2_pb.go │ ├── v2_pb_test.go │ ├── v3_pb.go │ └── v3_pb_test.go ├── sd ├── nginx │ ├── nginx.go │ ├── nginx_test.go │ └── tests │ │ └── nginx_cleanup_test.go ├── register.go └── utils │ └── utils.go ├── tagger ├── metric.go ├── rule.go ├── rule_test.go ├── set.go ├── tagger.go ├── tagger_test.go └── tree.go └── tests ├── agg_internal ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr.conf.tpl └── test.toml ├── agg_latest ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── agg_merge ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── agg_oneblock ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── clickhouse ├── rollup │ ├── config.xml │ ├── init.sql │ ├── rollup.xml │ └── users.xml └── rollup_tls │ ├── config.xml │ ├── init.sql │ ├── rollup.xml │ ├── rootCA.crt │ ├── server.crt │ ├── server.key │ └── users.xml ├── consolidateBy ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── consul.sh ├── emptyseries_append ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── emptyseries_noappend ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── error_handling ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── feature_flags_both_true ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── feature_flags_dont_match_missing_tags ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── feature_flags_false ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── feature_flags_use_carbon_behaviour ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── find_cache ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-cached.conf.tpl ├── graphite-clickhouse-internal-aggr-cached.conf.tpl └── test.toml ├── limitera ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr-cached.conf.tpl └── test.toml ├── limitermax ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr-cached.conf.tpl └── test.toml ├── limiterw ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr-cached.conf.tpl └── test.toml ├── limiterwn ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr-cached.conf.tpl └── test.toml ├── one_table ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse-internal-aggr.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── tags_min_in_query ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml ├── tls ├── ca.crt ├── carbon-clickhouse.conf.tpl ├── client.crt ├── client.key ├── graphite-clickhouse.conf.tpl └── test.toml └── wildcard_min_distance ├── carbon-clickhouse.conf.tpl ├── graphite-clickhouse.conf.tpl └── test.toml /.gitattributes: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/go-graphite/graphite-clickhouse/0b309280b9eff8a14de113ece4bafc9b3a7abeec/.gitattributes -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | name: "CodeQL" 7 | 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | # The branches below must be a subset of the branches above 13 | branches: [master] 14 | 15 | jobs: 16 | analyze: 17 | name: Analyze 18 | runs-on: ubuntu-latest 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | # Override automatic language detection by changing the below list 24 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 25 | language: ['go'] 26 | # Learn more... 27 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 28 | 29 | steps: 30 | - name: Checkout repository 31 | uses: actions/checkout@v4 32 | with: 33 | # We must fetch at least the immediate parents so that if this is 34 | # a pull request then we can checkout the head. 35 | fetch-depth: 2 36 | 37 | # Initializes the CodeQL tools for scanning. 38 | - name: Initialize CodeQL 39 | uses: github/codeql-action/init@v3 40 | with: 41 | languages: ${{ matrix.language }} 42 | # If you wish to specify custom queries, you can do so here or in a config file. 43 | # By default, queries listed here will override any specified in a config file. 44 | # Prefix the list here with "+" to use these queries and those in the config file. 45 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 46 | 47 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 48 | # If this step fails, then you should remove it and run the build manually (see below) 49 | - name: Autobuild 50 | uses: github/codeql-action/autobuild@v3 51 | 52 | # ℹ️ Command-line programs to run using the OS shell. 53 | # 📚 https://git.io/JvXDl 54 | 55 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 56 | # and modify them (or add more) to build your code if your project 57 | # uses a compiled language 58 | 59 | #- run: | 60 | # make bootstrap 61 | # make release 62 | 63 | - name: Perform CodeQL Analysis 64 | uses: github/codeql-action/analyze@v3 65 | 66 | 67 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker images 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | tags: [ 'v*' ] 7 | pull_request: 8 | branches: [ master ] 9 | 10 | jobs: 11 | docker: 12 | name: Build image 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code 16 | uses: actions/checkout@v4 17 | - name: Docker meta 18 | id: meta 19 | uses: docker/metadata-action@v3 20 | with: 21 | images: ghcr.io/${{ github.repository }} 22 | # create latest tag for branch events 23 | flavor: | 24 | latest=${{ github.event_name == 'push' && github.ref_type == 'branch' }} 25 | tags: | 26 | type=ref,event=branch 27 | type=ref,event=pr 28 | type=semver,pattern={{version}} 29 | type=semver,pattern={{major}}.{{minor}} 30 | type=semver,pattern={{major}}.{{minor}}.{{patch}} 31 | - name: Login to DockerHub 32 | if: github.event_name != 'pull_request' 33 | uses: docker/login-action@v1 34 | with: 35 | registry: ghcr.io 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | - name: Build and push 39 | id: docker_build 40 | uses: docker/build-push-action@v2 41 | with: 42 | # push for non-pr events 43 | push: ${{ github.event_name != 'pull_request' }} 44 | context: . 45 | tags: ${{ steps.meta.outputs.tags }} 46 | labels: ${{ steps.meta.outputs.labels }} 47 | 48 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | pull_request: 4 | 5 | jobs: 6 | golangci: 7 | name: lint 8 | runs-on: ubuntu-22.04 9 | steps: 10 | - uses: actions/checkout@v4 11 | 12 | - uses: actions/setup-go@v4 13 | with: 14 | go-version-file: go.mod 15 | 16 | - name: Run linter 17 | uses: golangci/golangci-lint-action@v7 18 | with: 19 | version: v2.0.2 20 | 21 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Upload Packages to new release 2 | 3 | on: 4 | release: 5 | types: 6 | - published 7 | 8 | jobs: 9 | build: 10 | name: Build 11 | runs-on: ubuntu-latest 12 | env: 13 | BINARY: ${{ github.event.repository.name }} 14 | CGO_ENABLED: 0 15 | 16 | outputs: 17 | matrix: ${{ steps.build.outputs.matrix }} 18 | steps: 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version: ^1 23 | 24 | - uses: actions/checkout@v4 25 | name: Checkout 26 | 27 | - name: Test 28 | run: make test 29 | env: 30 | CGO_ENABLED: 1 31 | 32 | - name: Build packages 33 | id: build 34 | run: | 35 | go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.40.0 36 | make nfpm-deb nfpm-rpm 37 | make sum-files 38 | ARTIFACTS= 39 | # Upload all deb and rpm packages 40 | for package in *deb *rpm; do ARTIFACTS=${ARTIFACTS}\"$package\",\ ; done 41 | echo ::set-output name=matrix::{\"file\": [${ARTIFACTS} \"sha256sum\", \"md5sum\"]} 42 | 43 | - name: Check version 44 | id: check_version 45 | run: | 46 | ./out/${BINARY}-linux-amd64 -version 47 | [ v$(./out/${BINARY}-linux-amd64 -version) = ${{ github.event.release.tag_name }} ] 48 | 49 | - name: Artifact 50 | id: artifact 51 | uses: actions/upload-artifact@v4 52 | with: 53 | name: packages 54 | retention-days: 1 55 | path: | 56 | *.deb 57 | *.rpm 58 | sha256sum 59 | md5sum 60 | 61 | - name: Push packages to the stable repo 62 | run: make packagecloud-stable 63 | env: 64 | PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }} 65 | 66 | upload: 67 | needs: build 68 | runs-on: ubuntu-latest 69 | strategy: 70 | matrix: ${{fromJson(needs.build.outputs.matrix)}} 71 | steps: 72 | - name: Download artifact 73 | uses: actions/download-artifact@v4.1.7 74 | with: 75 | name: packages 76 | - name: Upload ${{ matrix.file }} 77 | id: upload 78 | uses: actions/upload-release-asset@v1 79 | env: 80 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 81 | with: 82 | upload_url: ${{ github.event.release.upload_url }} 83 | asset_path: ${{ matrix.file }} 84 | asset_name: ${{ matrix.file }} 85 | asset_content_type: application/octet-stream 86 | -------------------------------------------------------------------------------- /.github/workflows/tests-sd.yml: -------------------------------------------------------------------------------- 1 | name: Tests register in SD 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | tests: 12 | env: 13 | CGO_ENABLED: 0 14 | name: Test register in SD 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | go: 19 | - ^1 20 | steps: 21 | 22 | - name: Set up Go 23 | uses: actions/setup-go@v5 24 | with: 25 | go-version: ${{ matrix.go }} 26 | 27 | - name: Check out code 28 | uses: actions/checkout@v4 29 | 30 | - name: Start consul 31 | run: | 32 | ./tests/consul.sh 1.15.2 > /tmp/consul.log & 33 | sleep 30 34 | shell: bash 35 | 36 | - name: Test 37 | run: go test ./sd/nginx -tags=test_sd -v 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | graphite-clickhouse 6 | .vscode 7 | /out/ 8 | 9 | # Folders 10 | _obj 11 | _test 12 | 13 | # Architecture specific extensions/prefixes 14 | *.[568vq] 15 | [568vq].out 16 | 17 | *.cgo1.go 18 | *.cgo2.c 19 | _cgo_defun.c 20 | _cgo_gotypes.go 21 | _cgo_export.* 22 | 23 | _testmain.go 24 | 25 | *.exe 26 | *.test 27 | *.prof 28 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: none 4 | enable: 5 | - asasalint 6 | - asciicheck 7 | - bidichk 8 | - bodyclose 9 | # - contextcheck 10 | - decorder 11 | # - dogsled 12 | - durationcheck 13 | # - errcheck 14 | # - errorlint 15 | # - fatcontext 16 | - ginkgolinter 17 | - gocheckcompilerdirectives 18 | - gochecksumtype 19 | # - goconst 20 | # - gocyclo 21 | # - godot 22 | - goheader 23 | - govet 24 | - grouper 25 | # - ineffassign 26 | - loggercheck 27 | # - makezero 28 | # - misspell 29 | # - mnd 30 | # - nilerr 31 | # - noctx 32 | # - nosprintfhostport 33 | - prealloc 34 | # - predeclared 35 | - promlinter 36 | - protogetter 37 | - reassign 38 | # - revive 39 | - rowserrcheck 40 | - sloglint 41 | - spancheck 42 | - sqlclosecheck 43 | # - staticcheck 44 | # - testifylint 45 | - tparallel 46 | - unconvert 47 | # - unparam 48 | # - unused 49 | - usestdlibvars 50 | # - wastedassign 51 | - whitespace 52 | - wsl 53 | settings: 54 | gocyclo: 55 | min-complexity: 15 56 | govet: 57 | settings: 58 | printf: 59 | funcs: 60 | - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof 61 | - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf 62 | - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf 63 | - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf 64 | unparam: 65 | check-exported: false 66 | exclusions: 67 | generated: lax 68 | presets: 69 | - comments 70 | - common-false-positives 71 | - legacy 72 | - std-error-handling 73 | rules: 74 | - linters: 75 | - errcheck 76 | - contextcheck 77 | - goconst 78 | - mnd 79 | path: _test\.go 80 | - linters: 81 | - godot 82 | path: notifier/registrator.go 83 | paths: 84 | - third_party$ 85 | - builtin$ 86 | - examples$ 87 | formatters: 88 | enable: 89 | - gofmt 90 | # - gofumpt 91 | - goimports 92 | exclusions: 93 | generated: lax 94 | paths: 95 | - third_party$ 96 | - builtin$ 97 | - examples$ 98 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as builder 2 | 3 | WORKDIR /go/src/github.com/lomik/graphite-clickhouse 4 | COPY . . 5 | 6 | ENV GOPATH=/go 7 | 8 | RUN apk add git --no-cache 9 | 10 | RUN go build -ldflags '-extldflags "-static"' github.com/lomik/graphite-clickhouse 11 | 12 | FROM alpine:latest 13 | 14 | RUN apk --no-cache add ca-certificates 15 | WORKDIR / 16 | 17 | COPY --from=builder /go/src/github.com/lomik/graphite-clickhouse/graphite-clickhouse /usr/bin/graphite-clickhouse 18 | 19 | CMD ["graphite-clickhouse"] 20 | 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Roman Lomonosov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /cache/cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "errors" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/bradfitz/gomemcache/memcache" 11 | 12 | "github.com/msaf1980/go-expirecache" 13 | ) 14 | 15 | var ( 16 | ErrTimeout = errors.New("cache: timeout") 17 | ErrNotFound = errors.New("cache: not found") 18 | ) 19 | 20 | type BytesCache interface { 21 | Get(k string) ([]byte, error) 22 | Set(k string, v []byte, expire int32) 23 | } 24 | 25 | func NewExpireCache(maxsize uint64) BytesCache { 26 | ec := expirecache.New[string, []byte](maxsize) 27 | go ec.ApproximateCleaner(10 * time.Second) 28 | 29 | return &ExpireCache{ec: ec} 30 | } 31 | 32 | type ExpireCache struct { 33 | ec *expirecache.Cache[string, []byte] 34 | } 35 | 36 | func (ec ExpireCache) Get(k string) ([]byte, error) { 37 | v, ok := ec.ec.Get(k) 38 | 39 | if !ok { 40 | return nil, ErrNotFound 41 | } 42 | 43 | return v, nil 44 | } 45 | 46 | func (ec ExpireCache) Set(k string, v []byte, expire int32) { 47 | ec.ec.Set(k, v, uint64(len(v)), expire) 48 | } 49 | 50 | func NewMemcached(prefix string, servers ...string) BytesCache { 51 | return &MemcachedCache{prefix: prefix, client: memcache.New(servers...)} 52 | } 53 | 54 | type MemcachedCache struct { 55 | prefix string 56 | client *memcache.Client 57 | timeouts uint64 58 | } 59 | 60 | func (m *MemcachedCache) Get(k string) ([]byte, error) { 61 | key := sha256.Sum256([]byte(k)) 62 | hk := hex.EncodeToString(key[:]) 63 | done := make(chan bool, 1) 64 | 65 | var err error 66 | 67 | var item *memcache.Item 68 | 69 | go func() { 70 | item, err = m.client.Get(m.prefix + hk) 71 | done <- true 72 | }() 73 | 74 | timeout := time.After(50 * time.Millisecond) 75 | 76 | select { 77 | case <-timeout: 78 | atomic.AddUint64(&m.timeouts, 1) 79 | return nil, ErrTimeout 80 | case <-done: 81 | } 82 | 83 | if err != nil { 84 | // translate to internal cache miss error 85 | if errors.Is(err, memcache.ErrCacheMiss) { 86 | err = ErrNotFound 87 | } 88 | 89 | return nil, err 90 | } 91 | 92 | return item.Value, nil 93 | } 94 | 95 | func (m *MemcachedCache) Set(k string, v []byte, expire int32) { 96 | key := sha256.Sum256([]byte(k)) 97 | hk := hex.EncodeToString(key[:]) 98 | 99 | go func() { 100 | _ = m.client.Set(&memcache.Item{Key: m.prefix + hk, Value: v, Expiration: expire}) 101 | }() 102 | } 103 | 104 | func (m *MemcachedCache) Timeouts() uint64 { 105 | return atomic.LoadUint64(&m.timeouts) 106 | } 107 | -------------------------------------------------------------------------------- /cmd/e2e-test/container.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os/exec" 5 | "strings" 6 | ) 7 | 8 | var ( 9 | DockerBinary string 10 | DockerNetwork string = "graphite-ch-test" 11 | ) 12 | 13 | func imageDelete(image, version string) (bool, string) { 14 | if len(DockerBinary) == 0 { 15 | panic("docker not set") 16 | } 17 | 18 | chArgs := []string{"rmi", image + ":" + version} 19 | 20 | cmd := exec.Command(DockerBinary, chArgs...) 21 | out, err := cmd.CombinedOutput() 22 | s := strings.Trim(string(out), "\n") 23 | 24 | if err == nil { 25 | return true, s 26 | } 27 | 28 | return false, err.Error() + ": " + s 29 | } 30 | 31 | func containerExist(name string) (bool, string) { 32 | if len(DockerBinary) == 0 { 33 | panic("docker not set") 34 | } 35 | 36 | chInspect := []string{"inspect", "--format", "'{{.Name}}'", name} 37 | 38 | cmd := exec.Command(DockerBinary, chInspect...) 39 | out, err := cmd.CombinedOutput() 40 | s := strings.Trim(string(out), "\n") 41 | 42 | if err == nil { 43 | return true, s 44 | } 45 | 46 | return false, err.Error() + ": " + s 47 | } 48 | 49 | func containerRemove(name string) (bool, string) { 50 | if len(DockerBinary) == 0 { 51 | panic("docker not set") 52 | } 53 | 54 | chInspect := []string{"rm", "-f", name} 55 | 56 | cmd := exec.Command(DockerBinary, chInspect...) 57 | out, err := cmd.CombinedOutput() 58 | s := strings.Trim(string(out), "\n") 59 | 60 | if err == nil { 61 | return true, s 62 | } 63 | 64 | return false, err.Error() + ": " + s 65 | } 66 | 67 | func containerExec(name string, args []string) (bool, string) { 68 | if len(DockerBinary) == 0 { 69 | panic("docker not set") 70 | } 71 | 72 | dCmd := []string{"exec", name} 73 | dCmd = append(dCmd, args...) 74 | 75 | cmd := exec.Command(DockerBinary, dCmd...) 76 | out, err := cmd.CombinedOutput() 77 | s := strings.Trim(string(out), "\n") 78 | 79 | if err == nil { 80 | return true, s 81 | } 82 | 83 | return false, err.Error() + ": " + s 84 | } 85 | -------------------------------------------------------------------------------- /cmd/e2e-test/errors.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrTimestampInvalid = errors.New("invalid timestamp") 7 | ErrNoTest = errors.New("no test section") 8 | ErrNoSetDir = errors.New("dir not set") 9 | ) 10 | -------------------------------------------------------------------------------- /cmd/e2e-test/utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "os/exec" 4 | 5 | func cmdExec(programm string, args ...string) (string, error) { 6 | cmd := exec.Command(programm, args...) 7 | out, err := cmd.CombinedOutput() 8 | 9 | return string(out), err 10 | } 11 | -------------------------------------------------------------------------------- /config/.gitignore: -------------------------------------------------------------------------------- 1 | tests_tmp/ 2 | -------------------------------------------------------------------------------- /config/json.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "net/url" 6 | ) 7 | 8 | func (c *ClickHouse) MarshalJSON() ([]byte, error) { 9 | type ClickHouseRaw ClickHouse 10 | 11 | // make copy 12 | a := *c 13 | 14 | u, err := url.Parse(a.URL) 15 | if err != nil { 16 | a.URL = "" 17 | } else { 18 | if _, isSet := u.User.Password(); isSet { 19 | u.User = url.UserPassword(u.User.Username(), "xxxxxx") 20 | } 21 | 22 | a.URL = u.String() 23 | } 24 | 25 | return json.Marshal((*ClickHouseRaw)(&a)) 26 | } 27 | -------------------------------------------------------------------------------- /config/json_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestClickhouseUrlPassword(t *testing.T) { 11 | assert := assert.New(t) 12 | 13 | result := make(map[string]interface{}) 14 | 15 | c := &ClickHouse{URL: "http://user:qwerty@localhost:8123/?param=value"} 16 | b, err := json.Marshal(c) 17 | assert.NoError(err) 18 | 19 | assert.NoError(json.Unmarshal(b, &result)) 20 | assert.Equal("http://user:xxxxxx@localhost:8123/?param=value", result["url"].(string)) 21 | assert.Equal("http://user:qwerty@localhost:8123/?param=value", c.URL) 22 | } 23 | -------------------------------------------------------------------------------- /deploy/doc/.gitignore: -------------------------------------------------------------------------------- 1 | # autogenerated config for documentation 2 | graphite-clickhouse.conf 3 | -------------------------------------------------------------------------------- /deploy/root/etc/logrotate.d/graphite-clickhouse: -------------------------------------------------------------------------------- 1 | /var/log/graphite-clickhouse/graphite-clickhouse.log { 2 | compress 3 | delaycompress 4 | notifempty 5 | rotate 10 6 | maxsize 100M 7 | daily 8 | } 9 | -------------------------------------------------------------------------------- /deploy/root/usr/lib/systemd/system/graphite-clickhouse.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graphite cluster backend with ClickHouse support 3 | Documentation=https://github.com/lomik/graphite-clickhouse 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | PermissionsStartOnly=true 9 | ExecStart=/usr/bin/graphite-clickhouse -config /etc/graphite-clickhouse/graphite-clickhouse.conf 10 | Restart=on-failure 11 | KillMode=control-group 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /doc/release.md: -------------------------------------------------------------------------------- 1 | # New release 2 | 3 | - Update `const Version` in graphite-clickhouse.go 4 | -------------------------------------------------------------------------------- /doc/stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/go-graphite/graphite-clickhouse/0b309280b9eff8a14de113ece4bafc9b3a7abeec/doc/stack.png -------------------------------------------------------------------------------- /find/handler_test.go: -------------------------------------------------------------------------------- 1 | package find 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/lomik/graphite-clickhouse/config" 10 | ) 11 | 12 | type clickhouseMock struct { 13 | requestLog chan []byte 14 | } 15 | 16 | func (m *clickhouseMock) ServeHTTP(w http.ResponseWriter, r *http.Request) { 17 | body, _ := io.ReadAll(r.Body) 18 | 19 | if m.requestLog != nil { 20 | m.requestLog <- body 21 | } 22 | } 23 | 24 | func TestFind(t *testing.T) { 25 | testCase := func(findQuery, expectedClickHouseQuery string) { 26 | requestLog := make(chan []byte, 1) 27 | m := &clickhouseMock{ 28 | requestLog: requestLog, 29 | } 30 | 31 | srv := httptest.NewServer(m) 32 | defer srv.Close() 33 | 34 | cfg := config.New() 35 | cfg.ClickHouse.URL = srv.URL 36 | 37 | handler := NewHandler(cfg) 38 | w := httptest.NewRecorder() 39 | r := httptest.NewRequest( 40 | http.MethodGet, 41 | "http://localhost/metrics/find/?local=1&format=pickle&query="+findQuery, 42 | nil, 43 | ) 44 | 45 | handler.ServeHTTP(w, r) 46 | 47 | chQuery := <-requestLog 48 | 49 | if string(chQuery) != expectedClickHouseQuery { 50 | t.Fatalf("%#v (actual) != %#v (expected)", string(chQuery), expectedClickHouseQuery) 51 | } 52 | } 53 | 54 | testCase( 55 | "host.top.cpu.cpu%2A", 56 | "SELECT Path FROM graphite_index WHERE ((Level=20004) AND (Path LIKE 'host.top.cpu.cpu%')) AND (Date='1970-02-12') GROUP BY Path FORMAT TabSeparatedRaw", 57 | ) 58 | 59 | testCase( 60 | "host.?cpu", 61 | "SELECT Path FROM graphite_index WHERE ((Level=20002) AND (Path LIKE 'host.%' AND match(Path, '^host[.][^.]cpu[.]?$'))) AND (Date='1970-02-12') GROUP BY Path FORMAT TabSeparatedRaw", 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /finder/base.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "strings" 9 | 10 | "github.com/lomik/graphite-clickhouse/config" 11 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 12 | "github.com/lomik/graphite-clickhouse/metrics" 13 | "github.com/lomik/graphite-clickhouse/pkg/scope" 14 | "github.com/lomik/graphite-clickhouse/pkg/where" 15 | ) 16 | 17 | var ErrNotImplemented = errors.New("not implemented") 18 | 19 | type BaseFinder struct { 20 | url string // clickhouse dsn 21 | table string // graphite_tree table 22 | opts clickhouse.Options // timeout, connectTimeout 23 | body []byte // clickhouse response body 24 | stats []metrics.FinderStat 25 | } 26 | 27 | func NewBase(url string, table string, opts clickhouse.Options) Finder { 28 | return &BaseFinder{ 29 | url: url, 30 | table: table, 31 | opts: opts, 32 | stats: make([]metrics.FinderStat, 0), 33 | } 34 | } 35 | 36 | func (b *BaseFinder) where(query string) *where.Where { 37 | level := strings.Count(query, ".") + 1 38 | 39 | w := where.New() 40 | w.And(where.Eq("Level", level)) 41 | w.And(where.TreeGlob("Path", query)) 42 | 43 | return w 44 | } 45 | 46 | func (b *BaseFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 47 | w := b.where(query) 48 | 49 | b.stats = append(b.stats, metrics.FinderStat{}) 50 | stat := &b.stats[len(b.stats)-1] 51 | 52 | b.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query( 53 | scope.WithTable(ctx, b.table), 54 | b.url, 55 | // TODO: consider consistent query generator 56 | fmt.Sprintf("SELECT Path FROM %s WHERE %s GROUP BY Path FORMAT TabSeparatedRaw", b.table, w), 57 | b.opts, 58 | nil, 59 | ) 60 | stat.Table = b.table 61 | stat.ReadBytes = int64(len(b.body)) 62 | 63 | return 64 | } 65 | 66 | func (b *BaseFinder) makeList(onlySeries bool) [][]byte { 67 | if b.body == nil { 68 | return [][]byte{} 69 | } 70 | 71 | rows := bytes.Split(b.body, []byte{'\n'}) 72 | 73 | skip := 0 74 | 75 | for i := 0; i < len(rows); i++ { 76 | if len(rows[i]) == 0 { 77 | skip++ 78 | continue 79 | } 80 | 81 | if onlySeries && rows[i][len(rows[i])-1] == '.' { 82 | skip++ 83 | continue 84 | } 85 | 86 | if skip > 0 { 87 | rows[i-skip] = rows[i] 88 | } 89 | } 90 | 91 | rows = rows[:len(rows)-skip] 92 | 93 | return rows 94 | } 95 | 96 | func (b *BaseFinder) List() [][]byte { 97 | return b.makeList(false) 98 | } 99 | 100 | func (b *BaseFinder) Series() [][]byte { 101 | return b.makeList(true) 102 | } 103 | 104 | func (b *BaseFinder) Abs(v []byte) []byte { 105 | return v 106 | } 107 | 108 | func (b *BaseFinder) Bytes() ([]byte, error) { 109 | return b.body, nil 110 | } 111 | 112 | func (b *BaseFinder) Stats() []metrics.FinderStat { 113 | return b.stats 114 | } 115 | -------------------------------------------------------------------------------- /finder/blacklist.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "context" 5 | "regexp" 6 | 7 | "github.com/lomik/graphite-clickhouse/config" 8 | "github.com/lomik/graphite-clickhouse/metrics" 9 | ) 10 | 11 | type BlacklistFinder struct { 12 | wrapped Finder 13 | blacklist []*regexp.Regexp // config 14 | matched bool 15 | } 16 | 17 | func WrapBlacklist(f Finder, blacklist []*regexp.Regexp) *BlacklistFinder { 18 | return &BlacklistFinder{ 19 | wrapped: f, 20 | blacklist: blacklist, 21 | } 22 | } 23 | 24 | func (p *BlacklistFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 25 | for i := 0; i < len(p.blacklist); i++ { 26 | if p.blacklist[i].MatchString(query) { 27 | p.matched = true 28 | return 29 | } 30 | } 31 | 32 | return p.wrapped.Execute(ctx, config, query, from, until) 33 | } 34 | 35 | func (p *BlacklistFinder) List() [][]byte { 36 | if p.matched { 37 | return [][]byte{} 38 | } 39 | 40 | return p.wrapped.List() 41 | } 42 | 43 | // For Render 44 | func (p *BlacklistFinder) Series() [][]byte { 45 | if p.matched { 46 | return [][]byte{} 47 | } 48 | 49 | return p.wrapped.Series() 50 | } 51 | 52 | func (p *BlacklistFinder) Abs(v []byte) []byte { 53 | return p.wrapped.Abs(v) 54 | } 55 | 56 | func (p *BlacklistFinder) Bytes() ([]byte, error) { 57 | return nil, ErrNotImplemented 58 | } 59 | 60 | func (p *BlacklistFinder) Stats() []metrics.FinderStat { 61 | return p.wrapped.Stats() 62 | } 63 | -------------------------------------------------------------------------------- /finder/date.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/lomik/graphite-clickhouse/config" 9 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 10 | "github.com/lomik/graphite-clickhouse/metrics" 11 | "github.com/lomik/graphite-clickhouse/pkg/scope" 12 | "github.com/lomik/graphite-clickhouse/pkg/where" 13 | ) 14 | 15 | type DateFinder struct { 16 | *BaseFinder 17 | tableVersion int 18 | } 19 | 20 | func NewDateFinder(url string, table string, tableVersion int, opts clickhouse.Options) Finder { 21 | if tableVersion == 3 { 22 | return NewDateFinderV3(url, table, opts) 23 | } 24 | 25 | b := &BaseFinder{ 26 | url: url, 27 | table: table, 28 | opts: opts, 29 | } 30 | 31 | return &DateFinder{b, tableVersion} 32 | } 33 | 34 | func (b *DateFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 35 | w := b.where(query) 36 | 37 | dateWhere := where.New() 38 | dateWhere.Andf( 39 | "Date >='%s' AND Date <= '%s'", 40 | time.Unix(from, 0).Format("2006-01-02"), 41 | time.Unix(until, 0).Format("2006-01-02"), 42 | ) 43 | 44 | b.stats = append(b.stats, metrics.FinderStat{}) 45 | stat := &b.stats[len(b.stats)-1] 46 | 47 | if b.tableVersion == 2 { 48 | b.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query( 49 | scope.WithTable(ctx, b.table), 50 | b.url, 51 | // TODO: consider consistent query generator 52 | fmt.Sprintf(`SELECT Path FROM %s PREWHERE (%s) WHERE %s GROUP BY Path FORMAT TabSeparatedRaw`, b.table, dateWhere, w), 53 | b.opts, 54 | nil, 55 | ) 56 | } else { 57 | b.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query( 58 | scope.WithTable(ctx, b.table), 59 | b.url, 60 | // TODO: consider consistent query generator 61 | fmt.Sprintf(`SELECT DISTINCT Path FROM %s PREWHERE (%s) WHERE (%s) FORMAT TabSeparatedRaw`, b.table, dateWhere, w), 62 | b.opts, 63 | nil, 64 | ) 65 | } 66 | 67 | stat.ReadBytes = int64(len(b.body)) 68 | stat.Table = b.table 69 | 70 | return 71 | } 72 | -------------------------------------------------------------------------------- /finder/date_reverse.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/lomik/graphite-clickhouse/config" 8 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 9 | "github.com/lomik/graphite-clickhouse/helper/date" 10 | "github.com/lomik/graphite-clickhouse/metrics" 11 | "github.com/lomik/graphite-clickhouse/pkg/scope" 12 | "github.com/lomik/graphite-clickhouse/pkg/where" 13 | ) 14 | 15 | type DateFinderV3 struct { 16 | *BaseFinder 17 | } 18 | 19 | // Same as v2, but reversed 20 | func NewDateFinderV3(url string, table string, opts clickhouse.Options) Finder { 21 | b := &BaseFinder{ 22 | url: url, 23 | table: table, 24 | opts: opts, 25 | } 26 | 27 | return &DateFinderV3{b} 28 | } 29 | 30 | func (f *DateFinderV3) whereFilter(query string, from int64, until int64) (*where.Where, *where.Where) { 31 | w := f.where(ReverseString(query)) 32 | 33 | dateWhere := where.New() 34 | dateWhere.Andf( 35 | "Date >='%s' AND Date <= '%s'", 36 | date.FromTimestampToDaysFormat(from), 37 | date.UntilTimestampToDaysFormat(until), 38 | ) 39 | 40 | return w, dateWhere 41 | } 42 | 43 | func (f *DateFinderV3) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 44 | w, dateWhere := f.whereFilter(query, from, until) 45 | 46 | f.stats = append(f.stats, metrics.FinderStat{}) 47 | stat := &f.stats[len(f.stats)-1] 48 | 49 | f.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query( 50 | scope.WithTable(ctx, f.table), 51 | f.url, 52 | // TODO: consider consistent query generator 53 | fmt.Sprintf(`SELECT Path FROM %s WHERE (%s) AND (%s) GROUP BY Path FORMAT TabSeparatedRaw`, f.table, dateWhere, w), 54 | f.opts, 55 | nil, 56 | ) 57 | stat.Table = f.table 58 | stat.ReadBytes = int64(len(f.body)) 59 | 60 | return 61 | } 62 | 63 | func (f *DateFinderV3) List() [][]byte { 64 | list := f.BaseFinder.List() 65 | for i := 0; i < len(list); i++ { 66 | list[i] = ReverseBytes(list[i]) 67 | } 68 | 69 | return list 70 | } 71 | 72 | func (f *DateFinderV3) Series() [][]byte { 73 | list := f.BaseFinder.Series() 74 | for i := 0; i < len(list); i++ { 75 | list[i] = ReverseBytes(list[i]) 76 | } 77 | 78 | return list 79 | } 80 | -------------------------------------------------------------------------------- /finder/date_reverse_test.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 8 | "github.com/lomik/graphite-clickhouse/helper/date" 9 | ) 10 | 11 | func TestDateFinderV3_whereFilter(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | query string 15 | from int64 16 | until int64 17 | want string 18 | wantDate string 19 | }{ 20 | { 21 | name: "midnight at utc (direct)", 22 | query: "test.metric*", 23 | from: 1668124800, // 2022-11-11 00:00:00 UTC 24 | until: 1668124810, // 2022-11-11 00:00:10 UTC 25 | want: "(Level=2) AND (Path LIKE 'metric%' AND match(Path, '^metric([^.]*?)[.]test[.]?$'))", 26 | wantDate: "Date >='" + date.FromTimestampToDaysFormat(1668124800) + "' AND Date <= '" + date.UntilTimestampToDaysFormat(1668124810) + "'", 27 | }, 28 | { 29 | name: "midnight at utc (reverse)", 30 | query: "*test.metric", 31 | from: 1668124800, // 2022-11-11 00:00:00 UTC 32 | until: 1668124810, // 2022-11-11 00:00:10 UTC 33 | want: "(Level=2) AND (Path LIKE 'metric.%' AND match(Path, '^metric[.]([^.]*?)test[.]?$'))", 34 | wantDate: "Date >='" + date.FromTimestampToDaysFormat(1668124800) + "' AND Date <= '" + date.UntilTimestampToDaysFormat(1668124810) + "'", 35 | }, 36 | } 37 | for _, tt := range tests { 38 | t.Run(tt.name+" "+time.Unix(tt.from, 0).Format(time.RFC3339), func(t *testing.T) { 39 | f := NewDateFinderV3("http://localhost:8123/", "graphite_index", clickhouse.Options{}).(*DateFinderV3) 40 | 41 | got, gotDate := f.whereFilter(tt.query, tt.from, tt.until) 42 | if got.String() != tt.want { 43 | t.Errorf("DateFinderV3.whereFilter()[0] = %v, want %v", got, tt.want) 44 | } 45 | 46 | if gotDate.String() != tt.wantDate { 47 | t.Errorf("DateFinderV3.whereFilter()[1] = %v, want %v", gotDate, tt.wantDate) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /finder/mock.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "strings" 7 | 8 | "github.com/lomik/graphite-clickhouse/config" 9 | "github.com/lomik/graphite-clickhouse/metrics" 10 | ) 11 | 12 | // MockFinder is used for testing purposes 13 | type MockFinder struct { 14 | fnd Finder 15 | query string // logged from execute 16 | } 17 | 18 | // NewMockFinder returns new MockFinder object with given result 19 | func NewMockFinder(result [][]byte) *MockFinder { 20 | return &MockFinder{ 21 | fnd: NewCachedIndex(bytes.Join(result, []byte{'\n'})), 22 | } 23 | } 24 | 25 | // NewMockTagged returns new MockFinder object with given result 26 | func NewMockTagged(result [][]byte) *MockFinder { 27 | return &MockFinder{ 28 | fnd: NewCachedTags(bytes.Join(result, []byte{'\n'})), 29 | } 30 | } 31 | 32 | // Execute assigns given query to the query field 33 | func (m *MockFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 34 | m.query = query 35 | return 36 | } 37 | 38 | // List returns the result 39 | func (m *MockFinder) List() [][]byte { 40 | return m.fnd.List() 41 | } 42 | 43 | // Series returns the result 44 | func (m *MockFinder) Series() [][]byte { 45 | return m.fnd.Series() 46 | } 47 | 48 | // Abs returns the same given v 49 | func (m *MockFinder) Abs(v []byte) []byte { 50 | return m.fnd.Abs(v) 51 | } 52 | 53 | func (m *MockFinder) Bytes() ([]byte, error) { 54 | return m.fnd.Bytes() 55 | } 56 | 57 | // Strings returns the result converted to []string 58 | func (m *MockFinder) Strings() []string { 59 | body, _ := m.fnd.Bytes() 60 | return strings.Split(string(body), "\n") 61 | } 62 | 63 | func (m *MockFinder) Stats() []metrics.FinderStat { 64 | return m.fnd.Stats() 65 | } 66 | -------------------------------------------------------------------------------- /finder/plain_from_tagged_test.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestPlainFromTaggedFinderAbs(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | eq := func(name, value string) TaggedTerm { 13 | return TaggedTerm{Op: TaggedTermEq, Key: name, Value: value} 14 | } 15 | 16 | join := func(terms ...TaggedTerm) []TaggedTerm { 17 | return terms 18 | } 19 | 20 | f := makePlainFromTagged(join( 21 | eq("__name__", "graphite"), 22 | eq("rename", "cpu_usage"), 23 | eq("target", "telegraf.*.cpu.usage"), 24 | eq("node1", "host"), 25 | )) 26 | 27 | assert.NotNil(f) 28 | 29 | table := [][2]string{ 30 | { 31 | "telegraf.localhost.cpu.usage", 32 | `cpu_usage?host=localhost&metric=telegraf.localhost.cpu.usage`, 33 | }, 34 | } 35 | 36 | for _, c := range table { 37 | assert.Equal(c[1], string(f.Abs([]byte(c[0])))) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /finder/reverse.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "strings" 7 | 8 | "github.com/lomik/graphite-clickhouse/config" 9 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 10 | "github.com/lomik/graphite-clickhouse/metrics" 11 | "github.com/lomik/graphite-clickhouse/pkg/where" 12 | ) 13 | 14 | type ReverseFinder struct { 15 | wrapped Finder 16 | baseFinder Finder 17 | url string // clickhouse dsn 18 | table string // graphite_reverse_tree table 19 | isUsed bool // use reverse table 20 | } 21 | 22 | func ReverseString(target string) string { 23 | a := strings.Split(target, ".") 24 | l := len(a) 25 | 26 | for i := 0; i < l/2; i++ { 27 | a[i], a[l-i-1] = a[l-i-1], a[i] 28 | } 29 | 30 | return strings.Join(a, ".") 31 | } 32 | 33 | func ReverseBytes(target []byte) []byte { 34 | // @TODO: check performance 35 | a := bytes.Split(target, []byte{'.'}) 36 | 37 | l := len(a) 38 | for i := 0; i < l/2; i++ { 39 | a[i], a[l-i-1] = a[l-i-1], a[i] 40 | } 41 | 42 | return bytes.Join(a, []byte{'.'}) 43 | } 44 | 45 | func WrapReverse(f Finder, url string, table string, opts clickhouse.Options) *ReverseFinder { 46 | return &ReverseFinder{ 47 | wrapped: f, 48 | baseFinder: NewBase(url, table, opts), 49 | url: url, 50 | table: table, 51 | } 52 | } 53 | 54 | func (r *ReverseFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64) (err error) { 55 | p := strings.LastIndexByte(query, '.') 56 | if p < 0 || p >= len(query)-1 { 57 | return r.wrapped.Execute(ctx, config, query, from, until) 58 | } 59 | 60 | if where.HasWildcard(query[p+1:]) { 61 | return r.wrapped.Execute(ctx, config, query, from, until) 62 | } 63 | 64 | r.isUsed = true 65 | 66 | return r.baseFinder.Execute(ctx, config, ReverseString(query), from, until) 67 | } 68 | 69 | func (r *ReverseFinder) List() [][]byte { 70 | if !r.isUsed { 71 | return r.wrapped.List() 72 | } 73 | 74 | list := r.baseFinder.List() 75 | for i := 0; i < len(list); i++ { 76 | list[i] = ReverseBytes(list[i]) 77 | } 78 | 79 | return list 80 | } 81 | 82 | func (r *ReverseFinder) Series() [][]byte { 83 | if !r.isUsed { 84 | return r.wrapped.Series() 85 | } 86 | 87 | list := r.baseFinder.Series() 88 | for i := 0; i < len(list); i++ { 89 | list[i] = ReverseBytes(list[i]) 90 | } 91 | 92 | return list 93 | } 94 | 95 | func (r *ReverseFinder) Abs(v []byte) []byte { 96 | return v 97 | } 98 | 99 | func (f *ReverseFinder) Bytes() ([]byte, error) { 100 | return f.wrapped.Bytes() 101 | } 102 | 103 | func (f *ReverseFinder) Stats() []metrics.FinderStat { 104 | return f.wrapped.Stats() 105 | } 106 | -------------------------------------------------------------------------------- /finder/reverse_test.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestReverse(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | table := []string{ 13 | "hello.world", "world.hello", 14 | "hello.", ".hello", 15 | "hello", "hello", 16 | ".", ".", 17 | "a1.b2.c3", "c3.b2.a1", 18 | } 19 | 20 | for i := 0; i < len(table); i += 2 { 21 | assert.Equal(table[i+1], ReverseString(table[i])) 22 | 23 | assert.Equal([]byte(table[i+1]), ReverseBytes([]byte(table[i]))) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /finder/unescape.go: -------------------------------------------------------------------------------- 1 | package finder 2 | 3 | import "strings" 4 | 5 | func ishex(c byte) bool { 6 | switch { 7 | case '0' <= c && c <= '9': 8 | return true 9 | case 'a' <= c && c <= 'f': 10 | return true 11 | case 'A' <= c && c <= 'F': 12 | return true 13 | } 14 | 15 | return false 16 | } 17 | 18 | func unhex(c byte) byte { 19 | switch { 20 | case '0' <= c && c <= '9': 21 | return c - '0' 22 | case 'a' <= c && c <= 'f': 23 | return c - 'a' + 10 24 | case 'A' <= c && c <= 'F': 25 | return c - 'A' + 10 26 | } 27 | 28 | return 0 29 | } 30 | 31 | func isPercentEscape(s string, i int) bool { 32 | return i+2 < len(s) && ishex(s[i+1]) && ishex(s[i+2]) 33 | } 34 | 35 | // unescape unescapes a string. 36 | func unescape(s string) string { 37 | first := strings.IndexByte(s, '%') 38 | if first == -1 { 39 | return s 40 | } 41 | 42 | var t strings.Builder 43 | 44 | t.Grow(len(s)) 45 | t.WriteString(s[:first]) 46 | 47 | LOOP: 48 | for i := first; i < len(s); i++ { 49 | switch s[i] { 50 | case '%': 51 | if len(s) < i+3 { 52 | t.WriteString(s[i:]) 53 | break LOOP 54 | } 55 | if !isPercentEscape(s, i) { 56 | t.WriteString(s[i : i+3]) 57 | } else { 58 | t.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2])) 59 | } 60 | i += 2 61 | default: 62 | t.WriteByte(s[i]) 63 | } 64 | } 65 | 66 | return t.String() 67 | } 68 | -------------------------------------------------------------------------------- /helper/client/datetime.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/lomik/graphite-clickhouse/helper/datetime" 7 | ) 8 | 9 | func MetricsTimestampTruncate(metrics []Metric, precision time.Duration) { 10 | if precision == 0 { 11 | return 12 | } 13 | 14 | for i := range metrics { 15 | metrics[i].StartTime = datetime.TimestampTruncate(metrics[i].StartTime, precision) 16 | metrics[i].StopTime = datetime.TimestampTruncate(metrics[i].StopTime, precision) 17 | metrics[i].RequestStartTime = datetime.TimestampTruncate(metrics[i].RequestStartTime, precision) 18 | metrics[i].RequestStopTime = datetime.TimestampTruncate(metrics[i].RequestStopTime, precision) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /helper/client/errros.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import "strconv" 4 | 5 | type HttpError struct { 6 | statusCode int 7 | message string 8 | } 9 | 10 | func NewHttpError(statusCode int, message string) *HttpError { 11 | return &HttpError{ 12 | statusCode: statusCode, 13 | message: message, 14 | } 15 | } 16 | 17 | func (e *HttpError) Error() string { 18 | return strconv.Itoa(e.statusCode) + ": " + e.message 19 | } 20 | -------------------------------------------------------------------------------- /helper/client/requests.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import protov3 "github.com/go-graphite/protocol/carbonapi_v3_pb" 4 | 5 | type MultiGlobRequestV3 struct { 6 | protov3.MultiGlobRequest 7 | } 8 | 9 | func (r *MultiGlobRequestV3) Marshal() ([]byte, error) { 10 | return r.MultiGlobRequest.Marshal() 11 | } 12 | 13 | func (r *MultiGlobRequestV3) LogInfo() interface{} { 14 | return r.MultiGlobRequest 15 | } 16 | -------------------------------------------------------------------------------- /helper/client/types.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | type FormatType int 9 | 10 | const ( 11 | FormatDefault FormatType = iota 12 | FormatJSON 13 | FormatProtobuf 14 | FormatPb_v2 // alias for FormatProtobuf 15 | FormatPb_v3 16 | FormatPickle 17 | ) 18 | 19 | var formatStrings []string = []string{"default", "json", "protobuf", "carbonapi_v2_pb", "carbonapi_v3_pb", "pickle"} 20 | 21 | func (a *FormatType) String() string { 22 | return formatStrings[*a] 23 | } 24 | 25 | func FormatTypes() []string { 26 | return formatStrings 27 | } 28 | 29 | func (a *FormatType) Set(value string) error { 30 | switch value { 31 | case "json": 32 | *a = FormatJSON 33 | case "protobuf": 34 | *a = FormatProtobuf 35 | case "carbonapi_v2_pb": 36 | *a = FormatPb_v2 37 | case "carbonapi_v3_pb": 38 | *a = FormatPb_v3 39 | case "pickle": 40 | *a = FormatPickle 41 | default: 42 | return fmt.Errorf("invalid format type %s", value) 43 | } 44 | 45 | return nil 46 | } 47 | 48 | func (a *FormatType) UnmarshalText(text []byte) error { 49 | return a.Set(string(text)) 50 | } 51 | 52 | var ( 53 | ErrUnsupportedFormat = errors.New("unsupported format") 54 | ErrInvalidQuery = errors.New("invalid query") 55 | 56 | //ErrEmptyQuery = errors.New("missing query") 57 | ) 58 | -------------------------------------------------------------------------------- /helper/errs/errors.go: -------------------------------------------------------------------------------- 1 | package errs 2 | 3 | import "fmt" 4 | 5 | type ErrorWithCode struct { 6 | err string 7 | Code int // error code 8 | } 9 | 10 | func NewErrorWithCode(err string, code int) error { 11 | return ErrorWithCode{err, code} 12 | } 13 | 14 | func NewErrorfWithCode(code int, f string, args ...interface{}) error { 15 | return ErrorWithCode{fmt.Sprintf(f, args...), code} 16 | } 17 | 18 | func (e ErrorWithCode) Error() string { return e.err } 19 | -------------------------------------------------------------------------------- /helper/headers/headers.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import "net/http" 4 | 5 | func GetHeaders(header *http.Header, keys []string) map[string]string { 6 | if len(keys) > 0 { 7 | headers := make(map[string]string) 8 | 9 | for _, key := range keys { 10 | value := header.Get(key) 11 | if len(value) > 0 { 12 | headers[key] = value 13 | } 14 | } 15 | 16 | return headers 17 | } 18 | 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /helper/pickle/pickle.go: -------------------------------------------------------------------------------- 1 | package pickle 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | "math" 7 | ) 8 | 9 | var EmptyList = []byte{0x28, 0x6c, 0x70, 0x30, 0xa, 0x2e} 10 | 11 | // Pickle encoder 12 | type Writer struct { 13 | w io.Writer 14 | } 15 | 16 | func NewWriter(w io.Writer) *Writer { 17 | return &Writer{w: w} 18 | } 19 | 20 | func (p *Writer) Mark() { 21 | p.w.Write([]byte{'('}) 22 | } 23 | 24 | func (p *Writer) Stop() { 25 | p.w.Write([]byte{'.'}) 26 | } 27 | 28 | func (p *Writer) Append() { 29 | p.w.Write([]byte{'a'}) 30 | } 31 | 32 | func (p *Writer) SetItem() { 33 | p.w.Write([]byte{'s'}) 34 | } 35 | 36 | func (p *Writer) List() { 37 | p.w.Write([]byte{'(', 'l'}) 38 | } 39 | 40 | func (p *Writer) Dict() { 41 | p.w.Write([]byte{'(', 'd'}) 42 | } 43 | 44 | func (p *Writer) TupleEnd() { 45 | p.w.Write([]byte{'t'}) 46 | } 47 | 48 | func (p *Writer) Bytes(byt []byte) { 49 | l := len(byt) 50 | 51 | if l < 256 { 52 | p.w.Write([]byte{'U', byte(l)}) 53 | } else { 54 | var b [5]byte 55 | b[0] = 'T' 56 | binary.LittleEndian.PutUint32(b[1:5], uint32(l)) 57 | p.w.Write(b[:]) 58 | } 59 | 60 | p.w.Write(byt) 61 | } 62 | 63 | func (p *Writer) String(v string) { 64 | p.Bytes([]byte(v)) 65 | } 66 | 67 | func (p *Writer) Uint32(v uint32) { 68 | p.w.Write([]byte{'J'}) 69 | 70 | var b [4]byte 71 | 72 | binary.LittleEndian.PutUint32(b[:], v) 73 | p.w.Write(b[:]) 74 | } 75 | 76 | func (p *Writer) AppendFloat64(v float64) { 77 | u := math.Float64bits(v) 78 | 79 | var b [10]byte 80 | b[0] = 'G' 81 | b[9] = 'a' 82 | 83 | binary.BigEndian.PutUint64(b[1:10], u) 84 | 85 | p.w.Write(b[:]) 86 | } 87 | 88 | func (p *Writer) AppendNulls(count int) { 89 | for i := 0; i < count; i++ { 90 | p.w.Write([]byte{'N', 'a'}) 91 | } 92 | } 93 | 94 | func (p *Writer) Bool(b bool) { 95 | if b { 96 | p.w.Write([]byte{'I', '0', '1', '\n'}) 97 | } else { 98 | p.w.Write([]byte{'I', '0', '0', '\n'}) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /helper/point/point.go: -------------------------------------------------------------------------------- 1 | package point 2 | 3 | import "fmt" 4 | 5 | type Point struct { 6 | MetricID uint32 7 | Value float64 8 | Time uint32 9 | Timestamp uint32 // keep max if metric and time equal on two points 10 | } 11 | 12 | // GetValueOrNaN returns Value for the next point or NaN if the value is omited. ErrTimeGreaterStop shows the normal ending. Any else error is considered as real error 13 | type GetValueOrNaN func() (float64, error) 14 | 15 | // ErrTimeGreaterStop shows the correct over for GetValueOrNaN 16 | var ErrTimeGreaterStop = fmt.Errorf("the points for time interval are rover") 17 | 18 | // ErrWrongMetricID shows the Point.MetricID is wrong somehow 19 | var ErrWrongMetricID = fmt.Errorf("the point MetricID is wrong") 20 | 21 | // ErrPointsUnsorted returns for unsorted []Point or Points 22 | var ErrPointsUnsorted = fmt.Errorf("the points are unsorted") 23 | -------------------------------------------------------------------------------- /helper/rollup/aggr.go: -------------------------------------------------------------------------------- 1 | package rollup 2 | 3 | import ( 4 | "github.com/lomik/graphite-clickhouse/helper/point" 5 | ) 6 | 7 | var AggrMap = map[string]*Aggr{ 8 | "avg": {"avg", AggrAvg}, 9 | "max": {"max", AggrMax}, 10 | "min": {"min", AggrMin}, 11 | "sum": {"sum", AggrSum}, 12 | "any": {"any", AggrAny}, 13 | "anyLast": {"anyLast", AggrAnyLast}, 14 | } 15 | 16 | type Aggr struct { 17 | name string 18 | f func(points []point.Point) (r float64) 19 | } 20 | 21 | func (ag *Aggr) Name() string { 22 | if ag == nil { 23 | return "" 24 | } 25 | 26 | return ag.name 27 | } 28 | 29 | func (ag *Aggr) String() string { 30 | if ag == nil { 31 | return "" 32 | } 33 | 34 | return ag.name 35 | } 36 | 37 | func (ag *Aggr) Do(points []point.Point) (r float64) { 38 | if ag == nil || ag.f == nil { 39 | return 0 40 | } 41 | 42 | return ag.f(points) 43 | } 44 | 45 | func AggrSum(points []point.Point) (r float64) { 46 | for _, p := range points { 47 | r += p.Value 48 | } 49 | 50 | return 51 | } 52 | 53 | func AggrMax(points []point.Point) (r float64) { 54 | if len(points) > 0 { 55 | r = points[0].Value 56 | } 57 | 58 | for _, p := range points { 59 | if p.Value > r { 60 | r = p.Value 61 | } 62 | } 63 | 64 | return 65 | } 66 | 67 | func AggrMin(points []point.Point) (r float64) { 68 | if len(points) > 0 { 69 | r = points[0].Value 70 | } 71 | 72 | for _, p := range points { 73 | if p.Value < r { 74 | r = p.Value 75 | } 76 | } 77 | 78 | return 79 | } 80 | 81 | func AggrAvg(points []point.Point) (r float64) { 82 | if len(points) == 0 { 83 | return 84 | } 85 | 86 | r = AggrSum(points) / float64(len(points)) 87 | 88 | return 89 | } 90 | 91 | func AggrAny(points []point.Point) (r float64) { 92 | if len(points) > 0 { 93 | r = points[0].Value 94 | } 95 | 96 | return 97 | } 98 | 99 | func AggrAnyLast(points []point.Point) (r float64) { 100 | if len(points) > 0 { 101 | r = points[len(points)-1].Value 102 | } 103 | 104 | return 105 | } 106 | -------------------------------------------------------------------------------- /helper/rollup/compact.go: -------------------------------------------------------------------------------- 1 | package rollup 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | /* 10 | compact form of rollup rules for tests 11 | 12 | regexp;function;age:precision,age:precision,... 13 | */ 14 | 15 | func parseCompact(body string) (*Rules, error) { 16 | lines := strings.Split(body, "\n") 17 | patterns := make([]Pattern, 0) 18 | 19 | for _, line := range lines { 20 | if strings.TrimSpace(line) == "" { 21 | continue 22 | } 23 | 24 | p2 := strings.LastIndexByte(line, ';') 25 | if p2 < 0 { 26 | return nil, fmt.Errorf("can't parse line: %#v", line) 27 | } 28 | 29 | p1 := strings.LastIndexByte(line[:p2], ';') 30 | if p1 < 0 { 31 | return nil, fmt.Errorf("can't parse line: %#v", line) 32 | } 33 | 34 | regexp := strings.TrimSpace(line[:p1]) 35 | function := strings.TrimSpace(line[p1+1 : p2]) 36 | retention := make([]Retention, 0) 37 | 38 | if strings.TrimSpace(line[p2+1:]) != "" { 39 | arr := strings.Split(line[p2+1:], ",") 40 | 41 | for _, r := range arr { 42 | p := strings.Split(r, ":") 43 | if len(p) != 2 { 44 | return nil, fmt.Errorf("can't parse line: %#v", line) 45 | } 46 | 47 | age, err := strconv.ParseUint(strings.TrimSpace(p[0]), 10, 32) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | precision, err := strconv.ParseUint(strings.TrimSpace(p[1]), 10, 32) 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | retention = append(retention, Retention{Age: uint32(age), Precision: uint32(precision)}) 58 | } 59 | } 60 | 61 | patterns = append(patterns, Pattern{Regexp: regexp, Function: function, Retention: retention}) 62 | } 63 | 64 | return (&Rules{Pattern: patterns}).compile() 65 | } 66 | -------------------------------------------------------------------------------- /helper/rollup/compact_test.go: -------------------------------------------------------------------------------- 1 | package rollup 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseCompact(t *testing.T) { 10 | config := ` 11 | click_cost;any;0:3600,86400:60 12 | ;max;0:60,3600:300,86400:3600` 13 | 14 | expected, _ := (&Rules{ 15 | Pattern: []Pattern{ 16 | {Regexp: "click_cost", Function: "any", Retention: []Retention{ 17 | {Age: 0, Precision: 3600}, 18 | {Age: 86400, Precision: 60}, 19 | }}, 20 | {Regexp: "", Function: "max", Retention: []Retention{ 21 | {Age: 0, Precision: 60}, 22 | {Age: 3600, Precision: 300}, 23 | {Age: 86400, Precision: 3600}, 24 | }}, 25 | }, 26 | }).compile() 27 | 28 | assert := assert.New(t) 29 | r, err := parseCompact(config) 30 | assert.NoError(err) 31 | assert.Equal(expected, r) 32 | } 33 | -------------------------------------------------------------------------------- /helper/rollup/xml.go: -------------------------------------------------------------------------------- 1 | package rollup 2 | 3 | import ( 4 | "encoding/xml" 5 | ) 6 | 7 | /* 8 | 9 | 10 | click_cost 11 | any 12 | 13 | 0 14 | 3600 15 | 16 | 17 | 86400 18 | 60 19 | 20 | 21 | 22 | max 23 | 24 | 0 25 | 60 26 | 27 | 28 | 3600 29 | 300 30 | 31 | 32 | 86400 33 | 3600 34 | 35 | 36 | 37 | */ 38 | 39 | type ClickhouseRollupXML struct { 40 | Rules RulesXML `xml:"graphite_rollup"` 41 | } 42 | 43 | type RetentionXML struct { 44 | Age uint32 `xml:"age"` 45 | Precision uint32 `xml:"precision"` 46 | } 47 | 48 | type PatternXML struct { 49 | RuleType RuleType `xml:"rule_type"` 50 | Regexp string `xml:"regexp"` 51 | Function string `xml:"function"` 52 | Retention []*RetentionXML `xml:"retention"` 53 | } 54 | 55 | type RulesXML struct { 56 | Pattern []*PatternXML `xml:"pattern"` 57 | Default *PatternXML `xml:"default"` 58 | } 59 | 60 | func (r *RetentionXML) retention() Retention { 61 | return Retention{Age: r.Age, Precision: r.Precision} 62 | } 63 | 64 | func (p *PatternXML) pattern() Pattern { 65 | result := Pattern{ 66 | RuleType: p.RuleType, 67 | Regexp: p.Regexp, 68 | Function: p.Function, 69 | Retention: make([]Retention, 0, len(p.Retention)), 70 | } 71 | 72 | for _, r := range p.Retention { 73 | result.Retention = append(result.Retention, r.retention()) 74 | } 75 | 76 | return result 77 | } 78 | 79 | func parseXML(body []byte) (*Rules, error) { 80 | r := &RulesXML{} 81 | 82 | err := xml.Unmarshal(body, r) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | // Maybe we've got Clickhouse's graphite.xml? 88 | if r.Default == nil && r.Pattern == nil { 89 | y := &ClickhouseRollupXML{} 90 | 91 | err = xml.Unmarshal(body, y) 92 | if err != nil { 93 | return nil, err 94 | } 95 | 96 | r = &y.Rules 97 | } 98 | 99 | patterns := make([]Pattern, 0, uint64(len(r.Pattern))+4) 100 | for _, p := range r.Pattern { 101 | patterns = append(patterns, p.pattern()) 102 | } 103 | 104 | if r.Default != nil { 105 | patterns = append(patterns, r.Default.pattern()) 106 | } 107 | 108 | return (&Rules{Pattern: patterns}).compile() 109 | } 110 | -------------------------------------------------------------------------------- /helper/tests/clickhouse/server.go: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "sync" 8 | "sync/atomic" 9 | ) 10 | 11 | type TestResponse struct { 12 | Headers map[string]string 13 | Body []byte 14 | Code int 15 | } 16 | 17 | type TestHandler struct { 18 | sync.RWMutex 19 | responceMap map[string]*TestResponse 20 | queries uint64 21 | } 22 | 23 | type TestServer struct { 24 | *httptest.Server 25 | handler *TestHandler 26 | } 27 | 28 | func (h *TestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 29 | body, _ := io.ReadAll(r.Body) 30 | 31 | req := string(body) 32 | 33 | h.RLock() 34 | resp, ok := h.responceMap[req] 35 | h.RUnlock() 36 | 37 | atomic.AddUint64(&h.queries, 1) 38 | 39 | if ok { 40 | for k, v := range resp.Headers { 41 | w.Header().Set(k, v) 42 | } 43 | 44 | if resp.Code == 0 || resp.Code == http.StatusOK { 45 | w.Write(resp.Body) 46 | } else { 47 | http.Error(w, string(resp.Body), http.StatusInternalServerError) 48 | } 49 | } else { 50 | http.Error(w, "Query not added: "+req, http.StatusInternalServerError) 51 | } 52 | } 53 | 54 | func NewTestServer() *TestServer { 55 | h := &TestHandler{responceMap: make(map[string]*TestResponse)} 56 | 57 | srv := httptest.NewServer(h) 58 | 59 | return &TestServer{Server: srv, handler: h} 60 | } 61 | 62 | func (s *TestServer) AddResponce(request string, response *TestResponse) { 63 | s.handler.Lock() 64 | s.handler.responceMap[request] = response 65 | s.handler.Unlock() 66 | } 67 | 68 | func (s *TestServer) Queries() uint64 { 69 | return s.handler.queries 70 | } 71 | -------------------------------------------------------------------------------- /helper/tests/compare/compare.go: -------------------------------------------------------------------------------- 1 | package compare 2 | 3 | import "math" 4 | 5 | const eps = 0.0000000001 6 | 7 | func NearlyEqualSlice(a, b []float64) bool { 8 | if len(a) != len(b) { 9 | return false 10 | } 11 | 12 | for i, v := range a { 13 | // "same" 14 | if math.IsNaN(a[i]) && math.IsNaN(b[i]) { 15 | continue 16 | } 17 | 18 | if math.IsNaN(a[i]) || math.IsNaN(b[i]) { 19 | // unexpected NaN 20 | return false 21 | } 22 | // "close enough" 23 | if math.Abs(v-b[i]) > eps { 24 | return false 25 | } 26 | } 27 | 28 | return true 29 | } 30 | 31 | func NearlyEqual(a, b float64) bool { 32 | if math.IsNaN(a) && math.IsNaN(b) { 33 | return true 34 | } 35 | 36 | if math.IsNaN(a) || math.IsNaN(b) { 37 | // unexpected NaN 38 | return false 39 | } 40 | 41 | if math.Abs(a-b) > eps { 42 | return false 43 | } 44 | 45 | return true 46 | } 47 | 48 | func Max(a, b int) int { 49 | if a >= b { 50 | return a 51 | } 52 | 53 | return b 54 | } 55 | -------------------------------------------------------------------------------- /helper/tests/compare/expand/expand.go: -------------------------------------------------------------------------------- 1 | package expand 2 | 3 | import ( 4 | "go/token" 5 | "go/types" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | func ExpandTimestamp(fs *token.FileSet, s string, replace map[string]string) (int64, error) { 11 | if s == "" { 12 | return 0, nil 13 | } 14 | 15 | for k, v := range replace { 16 | s = strings.ReplaceAll(s, k, v) 17 | } 18 | 19 | if tv, err := types.Eval(fs, nil, token.NoPos, s); err == nil { 20 | return strconv.ParseInt(tv.Value.String(), 10, 32) 21 | } else { 22 | return 0, err 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /helper/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "time" 4 | 5 | // TimestampTruncate truncate timestamp with duration 6 | func TimestampTruncate(ts int64, duration time.Duration) int64 { 7 | tm := time.Unix(ts, 0).UTC() 8 | return tm.Truncate(duration).UTC().Unix() 9 | } 10 | -------------------------------------------------------------------------------- /helper/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestTimestampTruncate(t *testing.T) { 10 | // reverse sorted 11 | tests := []struct { 12 | ts int64 13 | duration time.Duration 14 | want int64 15 | }{ 16 | { 17 | ts: 1628876563, 18 | duration: 2 * time.Second, 19 | want: 1628876562, 20 | }, 21 | { 22 | ts: 1628876563, 23 | duration: 10 * time.Second, 24 | want: 1628876560, 25 | }, 26 | { 27 | ts: 1628876563, 28 | duration: time.Minute, 29 | want: 1628876520, 30 | }, 31 | { 32 | ts: 1628876563, 33 | duration: time.Hour, 34 | want: 1628874000, 35 | }, 36 | { 37 | ts: 1628876563, 38 | duration: 24 * time.Hour, 39 | want: 1628812800, 40 | }, 41 | } 42 | for i, tt := range tests { 43 | t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 44 | if got := TimestampTruncate(tt.ts, tt.duration); got != tt.want { 45 | t.Errorf("timestampTruncate(%d, %d) = %v, want %v", tt.ts, tt.duration, got, tt.want) 46 | } 47 | }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /index/handler.go: -------------------------------------------------------------------------------- 1 | package index 2 | 3 | import ( 4 | "net/http" 5 | "time" 6 | 7 | "github.com/lomik/graphite-clickhouse/config" 8 | "github.com/lomik/graphite-clickhouse/logs" 9 | "github.com/lomik/graphite-clickhouse/pkg/scope" 10 | ) 11 | 12 | type Handler struct { 13 | config *config.Config 14 | } 15 | 16 | func NewHandler(config *config.Config) *Handler { 17 | return &Handler{ 18 | config: config, 19 | } 20 | } 21 | 22 | func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 23 | accessLogger := scope.LoggerWithHeaders(r.Context(), r, h.config.Common.HeadersToLog).Named("http") 24 | logger := scope.LoggerWithHeaders(r.Context(), r, h.config.Common.HeadersToLog).Named("index") 25 | r = r.WithContext(scope.WithLogger(r.Context(), logger)) 26 | 27 | status := http.StatusOK 28 | start := time.Now() 29 | 30 | defer func() { 31 | d := time.Since(start) 32 | logs.AccessLog(accessLogger, h.config, r, status, d, time.Duration(0), false, false) 33 | }() 34 | 35 | i, err := New(h.config, r.Context()) 36 | if err != nil { 37 | status = http.StatusBadRequest 38 | http.Error(w, err.Error(), status) 39 | 40 | return 41 | } 42 | 43 | i.WriteJSON(w) 44 | i.Close() 45 | } 46 | -------------------------------------------------------------------------------- /index/index.go: -------------------------------------------------------------------------------- 1 | package index 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "encoding/json" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | 12 | "github.com/lomik/graphite-clickhouse/config" 13 | "github.com/lomik/graphite-clickhouse/finder" 14 | "github.com/lomik/graphite-clickhouse/helper/clickhouse" 15 | "github.com/lomik/graphite-clickhouse/pkg/scope" 16 | ) 17 | 18 | type Index struct { 19 | config *config.Config 20 | rowsReader io.ReadCloser 21 | } 22 | 23 | func New(config *config.Config, ctx context.Context) (*Index, error) { 24 | var reader io.ReadCloser 25 | 26 | var err error 27 | 28 | opts := clickhouse.Options{ 29 | TLSConfig: config.ClickHouse.TLSConfig, 30 | ConnectTimeout: config.ClickHouse.ConnectTimeout, 31 | } 32 | if config.ClickHouse.IndexTable != "" { 33 | opts.Timeout = config.ClickHouse.IndexTimeout 34 | reader, err = clickhouse.Reader( 35 | scope.WithTable(ctx, config.ClickHouse.IndexTable), 36 | config.ClickHouse.URL, 37 | fmt.Sprintf( 38 | "SELECT Path FROM %s WHERE Date = '%s' AND Level >= %d AND Level < %d GROUP BY Path", 39 | config.ClickHouse.IndexTable, finder.DefaultTreeDate, finder.TreeLevelOffset, finder.ReverseTreeLevelOffset, 40 | ), 41 | opts, 42 | nil, 43 | ) 44 | } else { 45 | opts.Timeout = config.ClickHouse.TreeTimeout 46 | reader, err = clickhouse.Reader( 47 | scope.WithTable(ctx, config.ClickHouse.TreeTable), 48 | config.ClickHouse.URL, 49 | fmt.Sprintf("SELECT Path FROM %s GROUP BY Path", config.ClickHouse.TreeTable), 50 | opts, 51 | nil, 52 | ) 53 | } 54 | 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | return &Index{ 60 | config: config, 61 | rowsReader: reader, 62 | }, nil 63 | } 64 | 65 | func (i *Index) Close() error { 66 | return i.rowsReader.Close() 67 | } 68 | 69 | func (i *Index) WriteJSON(w http.ResponseWriter) error { 70 | _, err := w.Write([]byte("[")) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | s := bufio.NewScanner(i.rowsReader) 76 | idx := 0 77 | 78 | for s.Scan() { 79 | b := s.Bytes() 80 | if len(b) == 0 { 81 | continue 82 | } 83 | 84 | if b[len(b)-1] == '.' { 85 | continue 86 | } 87 | 88 | json_b, err := json.Marshal(string(b)) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | jsonParts := [][]byte{ 94 | nil, 95 | json_b, 96 | } 97 | if idx != 0 { 98 | jsonParts[0] = []byte{','} 99 | } 100 | 101 | jsonified := bytes.Join(jsonParts, []byte("")) 102 | 103 | _, err = w.Write(jsonified) 104 | if err != nil { 105 | return err 106 | } 107 | 108 | idx++ 109 | } 110 | 111 | if err := s.Err(); err != nil { 112 | return err 113 | } 114 | 115 | _, err = w.Write([]byte("]")) 116 | 117 | return err 118 | } 119 | -------------------------------------------------------------------------------- /index/index_test.go: -------------------------------------------------------------------------------- 1 | package index 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "io" 7 | "net/http/httptest" 8 | "strings" 9 | "testing" 10 | ) 11 | 12 | func TestWriteJSONEmptyRows(t *testing.T) { 13 | rows := []string{ 14 | "", 15 | "testing.leaf", 16 | "", 17 | "testing.leaf.node", 18 | "", 19 | } 20 | 21 | metrics, err := writeRows(rows) 22 | if err != nil { 23 | t.Fatalf("Error during transform or unmarshal: %s", err) 24 | } 25 | 26 | if len(metrics) != 2 { 27 | t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) 28 | } 29 | 30 | if metrics[0] != "testing.leaf" || metrics[1] != "testing.leaf.node" { 31 | t.Fatalf("Wrong metrics contents: %s", metrics) 32 | } 33 | } 34 | 35 | func TestWriteJSONNonleafRows(t *testing.T) { 36 | rows := []string{ 37 | "testing.leaf", 38 | "testing.nonleaf.", 39 | "testing.leaf.node", 40 | "testing.\"broken\".node", 41 | } 42 | 43 | metrics, err := writeRows(rows) 44 | if err != nil { 45 | t.Fatalf("Error during transform or unmarshal: %s", err) 46 | } 47 | 48 | if len(metrics) != 3 { 49 | t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) 50 | } 51 | 52 | if metrics[0] != "testing.leaf" || metrics[1] != "testing.leaf.node" || metrics[2] != "testing.\"broken\".node" { 53 | t.Fatalf("Wrong metrics contents: %s", metrics) 54 | } 55 | } 56 | 57 | func TestWriteJSONEmptyIndex(t *testing.T) { 58 | rows := []string{} 59 | 60 | metrics, err := writeRows(rows) 61 | if err != nil { 62 | t.Fatalf("Error during transform or unmarshal: %s", err) 63 | } 64 | 65 | if len(metrics) != 0 { 66 | t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) 67 | } 68 | } 69 | 70 | func indexForBytes(b []byte) *Index { 71 | buffer := bytes.NewBuffer(b) 72 | 73 | return &Index{ 74 | config: nil, 75 | rowsReader: io.NopCloser(buffer), 76 | } 77 | } 78 | 79 | func writeRows(rows []string) ([]string, error) { 80 | rowsBytes := []byte(strings.Join(rows, string('\n'))) 81 | index := indexForBytes(rowsBytes) 82 | mockResponse := httptest.NewRecorder() 83 | 84 | err := index.WriteJSON(mockResponse) 85 | if err != nil { 86 | return nil, err 87 | } 88 | 89 | var metrics []string 90 | 91 | err = json.Unmarshal(mockResponse.Body.Bytes(), &metrics) 92 | if err != nil { 93 | return nil, err 94 | } 95 | 96 | return metrics, nil 97 | } 98 | -------------------------------------------------------------------------------- /issues/daytime/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /issues/daytime/graphite-clickhouse-internal-aggr.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | date-format = "both" 21 | 22 | [[data-table]] 23 | # # clickhouse table name 24 | table = "graphite" 25 | # # points in table are stored with reverse path 26 | reverse = false 27 | rollup-conf = "auto" 28 | 29 | [[logging]] 30 | logger = "" 31 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 32 | level = "info" 33 | encoding = "json" 34 | encoding-time = "iso8601" 35 | encoding-duration = "seconds" 36 | -------------------------------------------------------------------------------- /issues/daytime/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = false 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | date-format = "both" 21 | 22 | [[data-table]] 23 | # # clickhouse table name 24 | table = "graphite" 25 | # # points in table are stored with reverse path 26 | reverse = false 27 | rollup-conf = "auto" 28 | 29 | [[logging]] 30 | logger = "" 31 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 32 | level = "info" 33 | encoding = "json" 34 | encoding-time = "iso8601" 35 | encoding-duration = "seconds" 36 | -------------------------------------------------------------------------------- /limiter/interface.go: -------------------------------------------------------------------------------- 1 | package limiter 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | ) 7 | 8 | var ( 9 | ErrTimeout = errors.New("timeout exceeded") 10 | ErrOverflow = errors.New("storage maximum queries exceeded") 11 | ) 12 | 13 | type ServerLimiter interface { 14 | Capacity() int 15 | Enabled() bool 16 | TryEnter(ctx context.Context, s string) error 17 | Enter(ctx context.Context, s string) error 18 | Leave(ctx context.Context, s string) 19 | SendDuration(queueMs int64) 20 | Unregiter() 21 | } 22 | -------------------------------------------------------------------------------- /limiter/limiter.go: -------------------------------------------------------------------------------- 1 | package limiter 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/lomik/graphite-clickhouse/metrics" 7 | ) 8 | 9 | type limiter struct { 10 | ch chan struct{} 11 | cap int 12 | } 13 | 14 | // Limiter provides interface to limit amount of requests 15 | type Limiter struct { 16 | limiter limiter 17 | metrics metrics.WaitMetric 18 | } 19 | 20 | // NewServerLimiter creates a limiter for specific servers list. 21 | func NewLimiter(capacity int, enableMetrics bool, scope, sub string) ServerLimiter { 22 | if capacity <= 0 { 23 | return NoopLimiter{} 24 | } 25 | 26 | return &Limiter{ 27 | limiter: limiter{ 28 | ch: make(chan struct{}, capacity), 29 | cap: capacity, 30 | }, 31 | metrics: metrics.NewWaitMetric(enableMetrics, scope, sub), 32 | } 33 | } 34 | 35 | func (sl *Limiter) Capacity() int { 36 | return sl.limiter.capacity() 37 | } 38 | 39 | // Enter claims one of free slots or blocks until there is one. 40 | func (sl *Limiter) Enter(ctx context.Context, s string) (err error) { 41 | if err = sl.limiter.enter(ctx, s); err != nil { 42 | sl.metrics.WaitErrors.Add(1) 43 | } 44 | 45 | sl.metrics.Requests.Add(1) 46 | 47 | return 48 | } 49 | 50 | // TryEnter claims one of free slots without blocking. 51 | func (sl *Limiter) TryEnter(ctx context.Context, s string) (err error) { 52 | if err = sl.limiter.tryEnter(ctx, s); err != nil { 53 | sl.metrics.WaitErrors.Add(1) 54 | } 55 | 56 | sl.metrics.Requests.Add(1) 57 | 58 | return 59 | } 60 | 61 | // Frees a slot in limiter 62 | func (sl *Limiter) Leave(ctx context.Context, s string) { 63 | sl.limiter.leave(ctx, s) 64 | } 65 | 66 | // SendDuration send StatsD duration iming 67 | func (sl *Limiter) SendDuration(queueMs int64) { 68 | if sl.metrics.WaitTimeName != "" { 69 | metrics.Gstatsd.Timing(sl.metrics.WaitTimeName, queueMs, 1.0) 70 | } 71 | } 72 | 73 | // Unregiter unregister graphite metric 74 | func (sl *Limiter) Unregiter() { 75 | sl.metrics.Unregister() 76 | } 77 | 78 | // Enabled return enabled flag, if false - it's a noop limiter and can be safely skiped 79 | func (sl *Limiter) Enabled() bool { 80 | return true 81 | } 82 | 83 | func (sl *limiter) capacity() int { 84 | return sl.cap 85 | } 86 | 87 | // Enter claims one of free slots or blocks until there is one. 88 | func (sl *limiter) enter(ctx context.Context, s string) error { 89 | select { 90 | case sl.ch <- struct{}{}: 91 | return nil 92 | case <-ctx.Done(): 93 | return ErrTimeout 94 | } 95 | } 96 | 97 | // TryEnter claims one of free slots without blocking. 98 | func (sl *limiter) tryEnter(ctx context.Context, s string) error { 99 | select { 100 | case sl.ch <- struct{}{}: 101 | return nil 102 | default: 103 | return ErrOverflow 104 | } 105 | } 106 | 107 | // Frees a slot in limiter 108 | func (sl *limiter) leave(ctx context.Context, s string) { 109 | <-sl.ch 110 | } 111 | -------------------------------------------------------------------------------- /limiter/noop.go: -------------------------------------------------------------------------------- 1 | package limiter 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // ServerLimiter provides interface to limit amount of requests 8 | type NoopLimiter struct { 9 | } 10 | 11 | func (l NoopLimiter) Capacity() int { 12 | return 0 13 | } 14 | 15 | // Enter claims one of free slots or blocks until there is one. 16 | func (l NoopLimiter) Enter(ctx context.Context, s string) error { 17 | return nil 18 | } 19 | 20 | // TryEnter claims one of free slots without blocking 21 | func (l NoopLimiter) TryEnter(ctx context.Context, s string) error { 22 | return nil 23 | } 24 | 25 | // Frees a slot in limiter 26 | func (l NoopLimiter) Leave(ctx context.Context, s string) { 27 | } 28 | 29 | // SendDuration send StatsD duration iming 30 | func (l NoopLimiter) SendDuration(queueMs int64) { 31 | } 32 | 33 | // Unregiter unregister graphite metric 34 | func (l NoopLimiter) Unregiter() { 35 | } 36 | 37 | // Enabled return enabled flag, if false - it's a noop limiter and can be safely skiped 38 | func (l NoopLimiter) Enabled() bool { 39 | return false 40 | } 41 | -------------------------------------------------------------------------------- /load_avg/load_avg.go: -------------------------------------------------------------------------------- 1 | package load_avg 2 | 3 | import ( 4 | "math" 5 | 6 | "github.com/msaf1980/go-syncutils/atomic" 7 | ) 8 | 9 | var loadAvgStore atomic.Float64 10 | 11 | func Load() float64 { 12 | return loadAvgStore.Load() 13 | } 14 | 15 | func Store(f float64) { 16 | loadAvgStore.Store(f) 17 | } 18 | 19 | func Weight(weight int, degraged, degragedLoadAvg, normalizedLoadAvg float64) int64 { 20 | if weight <= 0 || degraged <= 1 || normalizedLoadAvg >= 2.0 { 21 | return 1 22 | } 23 | 24 | if normalizedLoadAvg > degragedLoadAvg { 25 | normalizedLoadAvg *= degraged 26 | } 27 | 28 | normalizedLoadAvg = math.Round(10*normalizedLoadAvg) / 10 29 | if normalizedLoadAvg == 0 { 30 | return 2 * int64(weight) 31 | } 32 | 33 | normalizedLoadAvg = math.Log10(normalizedLoadAvg) 34 | 35 | w := int64(weight) - int64(float64(weight)*normalizedLoadAvg) 36 | if w <= 0 { 37 | return 1 38 | } 39 | 40 | return w 41 | } 42 | -------------------------------------------------------------------------------- /load_avg/load_avg_default.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package load_avg 5 | 6 | func Normalized() (float64, error) { 7 | return 0, nil 8 | } 9 | 10 | func CpuCount() (uint64, error) { 11 | return 0, nil 12 | } 13 | -------------------------------------------------------------------------------- /load_avg/load_avg_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package load_avg 5 | 6 | import ( 7 | "os" 8 | "strings" 9 | "syscall" 10 | 11 | "github.com/msaf1980/go-stringutils" 12 | ) 13 | 14 | func Normalized() (float64, error) { 15 | var info syscall.Sysinfo_t 16 | 17 | err := syscall.Sysinfo(&info) 18 | if err != nil { 19 | return 0, err 20 | } 21 | 22 | cpus, err := CpuCount() 23 | if err != nil { 24 | return 0, err 25 | } 26 | 27 | const si_load_shift = 16 28 | load := float64(info.Loads[0]) / float64(1<" 10 | vendor: *m 11 | homepage: "https://github.com/go-graphite/${NAME}" 12 | license: "MIT" 13 | section: "admin" 14 | priority: "optional" 15 | 16 | contents: 17 | - src: deploy/root/usr/ 18 | dst: /usr 19 | expand: true 20 | - src: deploy/root/etc/logrotate.d/${NAME} 21 | dst: /etc/logrotate.d/${NAME} 22 | type: config|noreplace 23 | expand: true 24 | - src: out/root/etc/${NAME}/${NAME}.conf 25 | dst: /etc/${NAME}/${NAME}.conf 26 | type: config|noreplace 27 | expand: true 28 | - src: "out/${NAME}-linux-${ARCH}" 29 | dst: /usr/bin/${NAME} 30 | expand: true 31 | # docs 32 | - src: LICENSE 33 | dst: /usr/share/doc/${NAME}/LICENSE 34 | expand: true 35 | -------------------------------------------------------------------------------- /packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | cd "$( dirname "$0" )" 4 | ROOT=$PWD 5 | 6 | docker run -i -e "DEVEL=${DEVEL:-0}" --rm -v "$ROOT:/root/go/src/github.com/lomik/graphite-clickhouse" golang bash -e << 'EOF' 7 | cd /root/ 8 | export TZ=Europe/Moscow 9 | ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 10 | 11 | go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.40.0 12 | 13 | cd /root/go/src/github.com/lomik/graphite-clickhouse 14 | 15 | # go reads the VCS state 16 | git config --global --add safe.directory "$PWD" 17 | 18 | make nfpm-deb nfpm-rpm 19 | chmod -R a+w *.deb *.rpm out/ 20 | EOF 21 | -------------------------------------------------------------------------------- /pkg/alias/map.go: -------------------------------------------------------------------------------- 1 | package alias 2 | 3 | import ( 4 | "bytes" 5 | "sync" 6 | 7 | "github.com/lomik/graphite-clickhouse/finder" 8 | "github.com/lomik/graphite-clickhouse/pkg/reverse" 9 | ) 10 | 11 | // Value of Map 12 | type Value struct { 13 | Target string 14 | DisplayName string 15 | } 16 | 17 | // Map from real metric name to display name and target 18 | type Map struct { 19 | data map[string][]Value 20 | lock sync.RWMutex 21 | } 22 | 23 | // New returns new Map 24 | func New() *Map { 25 | return &Map{ 26 | data: make(map[string][]Value), 27 | lock: sync.RWMutex{}, 28 | } 29 | } 30 | 31 | // Merge data from finder.Result into aliases map 32 | func (m *Map) Merge(r finder.Result, useCache bool) { 33 | m.MergeTarget(r, "", useCache) 34 | } 35 | 36 | // MergeTarget data from finder.Result into aliases map 37 | func (m *Map) MergeTarget(r finder.Result, target string, saveCache bool) []byte { 38 | var buf bytes.Buffer 39 | 40 | series := r.Series() 41 | buf.Grow(len(series) * 24) 42 | 43 | for i := 0; i < len(series); i++ { 44 | if saveCache { 45 | buf.Write(series[i]) 46 | buf.WriteByte('\n') 47 | } 48 | 49 | key := string(series[i]) 50 | if len(key) == 0 { 51 | continue 52 | } 53 | 54 | abs := string(r.Abs(series[i])) 55 | 56 | m.lock.Lock() 57 | if x, ok := m.data[key]; ok { 58 | m.data[key] = append(x, Value{Target: target, DisplayName: abs}) 59 | } else { 60 | m.data[key] = []Value{{Target: target, DisplayName: abs}} 61 | } 62 | m.lock.Unlock() 63 | } 64 | 65 | if saveCache { 66 | return buf.Bytes() 67 | } else { 68 | return nil 69 | } 70 | } 71 | 72 | // Len returns count of keys 73 | func (m *Map) Len() int { 74 | m.lock.RLock() 75 | defer m.lock.RUnlock() 76 | 77 | return len(m.data) 78 | } 79 | 80 | // Size returns count of values 81 | func (m *Map) Size() int { 82 | s := 0 83 | 84 | m.lock.RLock() 85 | defer m.lock.RUnlock() 86 | 87 | for _, v := range m.data { 88 | s += len(v) 89 | } 90 | 91 | return s 92 | } 93 | 94 | // Series returns keys of aliases map 95 | func (m *Map) Series(isReverse bool) []string { 96 | series := make([]string, 0, m.Len()) 97 | 98 | for k := range m.data { 99 | if isReverse { 100 | series = append(series, reverse.String(k)) 101 | } else { 102 | series = append(series, k) 103 | } 104 | } 105 | 106 | return series 107 | } 108 | 109 | // DisplayNames returns DisplayName from all Values 110 | func (m *Map) DisplayNames() []string { 111 | dn := make([]string, 0, m.Size()) 112 | 113 | for _, v := range m.data { 114 | for _, a := range v { 115 | dn = append(dn, a.DisplayName) 116 | } 117 | } 118 | 119 | return dn 120 | } 121 | 122 | // Get returns aliases for metric 123 | func (m *Map) Get(metric string) []Value { 124 | return m.data[metric] 125 | } 126 | -------------------------------------------------------------------------------- /pkg/dry/math.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | // Max returns the larger of x or y. 4 | func Max(x, y int64) int64 { 5 | if x > y { 6 | return x 7 | } 8 | 9 | return y 10 | } 11 | 12 | // Min returns the lower of x or y. 13 | func Min(x, y int64) int64 { 14 | if x < y { 15 | return x 16 | } 17 | 18 | return y 19 | } 20 | 21 | // Ceil returns integer greater or equal to x and denominator d division. 22 | // Works only with x >= 0 and d > 0. It returns 0 with other values. 23 | func Ceil(x, d int64) int64 { 24 | if x <= 0 || d <= 0 { 25 | return int64(0) 26 | } 27 | 28 | return (x + d - 1) / d 29 | } 30 | 31 | // CeilToMultiplier returns the integer greater or equal to x and multiplier m product. 32 | // Works only with x >= 0 and m > 0. It returns 0 with other values. 33 | func CeilToMultiplier(x, m int64) int64 { 34 | return Ceil(x, m) * m 35 | } 36 | 37 | // FloorToMultiplier returns the integer less or equal to x and multiplier m product. 38 | // Works only with x >= 0 and m > 0. It returns 0 with other values. 39 | func FloorToMultiplier(x, m int64) int64 { 40 | if x <= 0 || m <= 0 { 41 | return int64(0) 42 | } 43 | 44 | return x / m * m 45 | } 46 | 47 | // GCD returns the absolute greatest common divisor calculated via Euclidean algorithm 48 | func GCD(a, b int64) int64 { 49 | if b < 0 { 50 | b = -b 51 | } 52 | 53 | var t int64 54 | for b != 0 { 55 | t = b 56 | b = a % b 57 | a = t 58 | } 59 | 60 | return a 61 | } 62 | 63 | // LCM returns the absolute least common multiple of 2 integers via GDB 64 | func LCM(a, b int64) int64 { 65 | if a*b < 0 { 66 | return -a / GCD(a, b) * b 67 | } 68 | 69 | return a / GCD(a, b) * b 70 | } 71 | -------------------------------------------------------------------------------- /pkg/dry/math_test.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestMax(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | assert.Equal(int64(1), Max(1, -1)) 13 | assert.Equal(int64(2), Max(1, 2)) 14 | assert.Equal(int64(3), Max(3, 3)) 15 | } 16 | 17 | func TestMin(t *testing.T) { 18 | assert := assert.New(t) 19 | 20 | assert.Equal(int64(-1), Min(1, -1)) 21 | assert.Equal(int64(1), Min(1, 2)) 22 | assert.Equal(int64(3), Min(3, 3)) 23 | } 24 | 25 | func TestCeil(t *testing.T) { 26 | assert := assert.New(t) 27 | 28 | assert.Equal(int64(0), Ceil(0, -1)) 29 | assert.Equal(int64(3), Ceil(5, 2)) 30 | assert.Equal(int64(1), Ceil(5, 5)) // if quotient is integer we should get quotient without +1 31 | assert.Equal(int64(2), Ceil(100001, 100000)) // if quotient is any fraction bigger than integer then we get +1 32 | } 33 | 34 | func TestCeilToMultiplier(t *testing.T) { 35 | assert := assert.New(t) 36 | 37 | assert.Equal(int64(0), CeilToMultiplier(0, -1)) 38 | assert.Equal(int64(0), CeilToMultiplier(1, 0)) 39 | assert.Equal(int64(0), CeilToMultiplier(1, -1)) 40 | assert.Equal(int64(2), CeilToMultiplier(1, 2)) 41 | assert.Equal(int64(6), CeilToMultiplier(4, 3)) 42 | assert.Equal(int64(6), CeilToMultiplier(6, 3)) 43 | } 44 | 45 | func TestFloorToMultiplier(t *testing.T) { 46 | assert := assert.New(t) 47 | 48 | assert.Equal(int64(0), FloorToMultiplier(0, -1)) 49 | assert.Equal(int64(0), FloorToMultiplier(1, 0)) 50 | assert.Equal(int64(0), FloorToMultiplier(1, -1)) 51 | assert.Equal(int64(0), FloorToMultiplier(1, 2)) 52 | assert.Equal(int64(3), FloorToMultiplier(4, 3)) 53 | assert.Equal(int64(6), FloorToMultiplier(6, 3)) 54 | } 55 | 56 | func TestGCD(t *testing.T) { 57 | assert := assert.New(t) 58 | 59 | assert.Equal(int64(1), GCD(1, -1)) 60 | assert.Equal(int64(1), GCD(-1, 1)) 61 | assert.Equal(int64(1), GCD(-1, -1)) 62 | assert.Equal(int64(1), GCD(1, 2)) 63 | assert.Equal(int64(1), GCD(4, 3)) 64 | assert.Equal(int64(3), GCD(6, 3)) 65 | } 66 | 67 | func TestLCM(t *testing.T) { 68 | assert := assert.New(t) 69 | 70 | assert.Equal(int64(1), LCM(1, -1)) 71 | assert.Equal(int64(1), LCM(-1, 1)) 72 | assert.Equal(int64(1), LCM(-1, -1)) 73 | assert.Equal(int64(2), LCM(1, 2)) 74 | assert.Equal(int64(6), LCM(6, 3)) 75 | assert.Equal(int64(12), LCM(4, 3)) 76 | } 77 | -------------------------------------------------------------------------------- /pkg/dry/strings.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | // RemoveEmptyStrings removes empty strings from list and returns truncated slice 4 | func RemoveEmptyStrings(stringList []string) []string { 5 | rm := 0 6 | 7 | for i := 0; i < len(stringList); i++ { 8 | if stringList[i] == "" { 9 | rm++ 10 | continue 11 | } 12 | 13 | if rm > 0 { 14 | stringList[i-rm] = stringList[i] 15 | } 16 | } 17 | 18 | return stringList[:len(stringList)-rm] 19 | } 20 | -------------------------------------------------------------------------------- /pkg/dry/strings_test.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestRemoveEmptyStrings(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | assert.Equal([]string{"lorem", " ", "ipsum"}, 13 | RemoveEmptyStrings([]string{"", "", "lorem", "", " ", "ipsum", ""}), 14 | ) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/dry/unsafe.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | import ( 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | // UnsafeString returns string object from byte slice without copying 9 | func UnsafeString(b []byte) string { 10 | return *(*string)(unsafe.Pointer(&b)) 11 | } 12 | 13 | // UnsafeStringBytes returns the string bytes 14 | func UnsafeStringBytes(s *string) []byte { 15 | return *(*[]byte)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(s)))) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/dry/unsafe_test.go: -------------------------------------------------------------------------------- 1 | package dry 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestUnsafeString(t *testing.T) { 10 | assert := assert.New(t) 11 | assert.Equal("hello", UnsafeString([]byte{'h', 'e', 'l', 'l', 'o'})) 12 | assert.Equal("h", UnsafeString([]byte{'h'})) 13 | assert.Equal("", UnsafeString([]byte{})) 14 | assert.Equal("", UnsafeString(nil)) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/reverse/reverse.go: -------------------------------------------------------------------------------- 1 | package reverse 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | ) 7 | 8 | func String(path string) string { 9 | // don't reverse tagged path 10 | if strings.IndexByte(path, '?') >= 0 { 11 | return path 12 | } 13 | 14 | a := strings.Split(path, ".") 15 | 16 | l := len(a) 17 | for i := 0; i < l/2; i++ { 18 | a[i], a[l-i-1] = a[l-i-1], a[i] 19 | } 20 | 21 | return strings.Join(a, ".") 22 | } 23 | 24 | func reverse(m []byte) { 25 | i := 0 26 | j := len(m) - 1 27 | 28 | for i < j { 29 | m[i], m[j] = m[j], m[i] 30 | i++ 31 | j-- 32 | } 33 | } 34 | 35 | func Inplace(path []byte) { 36 | if bytes.IndexByte(path, '?') >= 0 { 37 | return 38 | } 39 | 40 | reverse(path) 41 | 42 | var a, b int 43 | 44 | l := len(path) 45 | for b = 0; b < l; b++ { 46 | if path[b] == '.' { 47 | reverse(path[a:b]) 48 | a = b + 1 49 | } 50 | } 51 | 52 | reverse(path[a:b]) 53 | } 54 | 55 | func Bytes(path []byte) []byte { 56 | // @TODO: test 57 | // don't reverse tagged path 58 | if bytes.IndexByte(path, '?') >= 0 { 59 | return path 60 | } 61 | 62 | r := make([]byte, len(path)) 63 | copy(r, path) 64 | Inplace(r) 65 | 66 | return r 67 | } 68 | -------------------------------------------------------------------------------- /pkg/reverse/reverse_test.go: -------------------------------------------------------------------------------- 1 | package reverse 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestReverse(t *testing.T) { 10 | assert := assert.New(t) 11 | table := map[string]string{ 12 | "carbon.agents.carbon-clickhouse.graphite1.tcp.metricsReceived": "metricsReceived.tcp.graphite1.carbon-clickhouse.agents.carbon", 13 | "": "", 14 | ".": ".", 15 | "carbon..xx": "xx..carbon", 16 | ".hello..world.": ".world..hello.", 17 | "metric_name?label=value": "metric_name?label=value", 18 | } 19 | 20 | for k, expected := range table { 21 | assert.Equal(expected, String(k)) 22 | p := k 23 | assert.Equal([]byte(expected), Bytes([]byte(k))) 24 | // check k is unchanged 25 | assert.Equal(p, k) 26 | // inplace 27 | b := make([]byte, len(k)) 28 | copy(b, k) 29 | Inplace(b) 30 | assert.Equal(expected, string(b)) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/scope/context.go: -------------------------------------------------------------------------------- 1 | package scope 2 | 3 | import ( 4 | "context" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | // Context wrapper for context.Context with chain constructor 10 | type Context struct { 11 | context.Context 12 | } 13 | 14 | // New ... 15 | func New(ctx context.Context) *Context { 16 | return &Context{ctx} 17 | } 18 | 19 | // With ... 20 | func (c *Context) With(key string, value interface{}) *Context { 21 | return New(With(c.Context, key, value)) 22 | } 23 | 24 | // WithRequestID ... 25 | func (c *Context) WithRequestID(requestID string) *Context { 26 | return New(WithRequestID(c.Context, requestID)) 27 | } 28 | 29 | // WithLogger ... 30 | func (c *Context) WithLogger(logger *zap.Logger) *Context { 31 | return New(WithLogger(c.Context, logger)) 32 | } 33 | 34 | // WithTable ... 35 | func (c *Context) WithTable(table string) *Context { 36 | return New(WithTable(c.Context, table)) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/scope/http_request.go: -------------------------------------------------------------------------------- 1 | package scope 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "fmt" 7 | "math/rand" 8 | "net" 9 | "net/http" 10 | "regexp" 11 | "strings" 12 | ) 13 | 14 | var ( 15 | requestIdRegexp = regexp.MustCompile("^[a-zA-Z0-9_.-]+$") 16 | passHeaders = []string{ 17 | "X-Dashboard-Id", 18 | "X-Grafana-Org-Id", 19 | "X-Panel-Id", 20 | "X-Forwarded-For", 21 | } 22 | ) 23 | 24 | func HttpRequest(r *http.Request) *http.Request { 25 | requestID := r.Header.Get("X-Request-Id") 26 | if requestID == "" || !requestIdRegexp.MatchString(requestID) { 27 | var b [16]byte 28 | 29 | binary.LittleEndian.PutUint64(b[:], rand.Uint64()) 30 | binary.LittleEndian.PutUint64(b[8:], rand.Uint64()) 31 | requestID = fmt.Sprintf("%x", b) 32 | } 33 | 34 | ctx := r.Context() 35 | ctx = WithRequestID(ctx, requestID) 36 | 37 | // Process all X-Gch-Debug-* headers 38 | debugPrefix := "X-Gch-Debug-" 39 | for name, values := range r.Header { 40 | if strings.HasPrefix(name, debugPrefix) && len(values) != 0 && values[0] != "" { 41 | ctx = WithDebug(ctx, strings.TrimPrefix(name, debugPrefix)) 42 | } 43 | } 44 | 45 | // Append the server IP to X-Forwarded-For if exists, else ignore 46 | if xff := r.Header.Get("X-Forwarded-For"); xff != "" { 47 | clientIP, _, _ := net.SplitHostPort(r.RemoteAddr) 48 | r.Header.Set("X-Forwarded-For", fmt.Sprintf("%s, %s", xff, clientIP)) 49 | } 50 | 51 | for _, h := range passHeaders { 52 | hv := r.Header.Get(h) 53 | if hv != "" { 54 | ctx = With(ctx, h, hv) 55 | } 56 | } 57 | 58 | return r.WithContext(ctx) 59 | } 60 | 61 | func Grafana(ctx context.Context) string { 62 | o, d, p := String(ctx, "X-Grafana-Org-Id"), String(ctx, "X-Dashboard-Id"), String(ctx, "X-Panel-Id") 63 | if o != "" || d != "" || p != "" { 64 | return fmt.Sprintf("Org:%s; Dashboard:%s; Panel:%s", o, d, p) 65 | } 66 | 67 | return "" 68 | } 69 | -------------------------------------------------------------------------------- /pkg/scope/key.go: -------------------------------------------------------------------------------- 1 | package scope 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | ) 7 | 8 | // key is type for context.Value keys 9 | type scopeKey string 10 | 11 | // With returns a copy of parent in which the value associated with key is val. 12 | func With(ctx context.Context, key string, value interface{}) context.Context { 13 | return context.WithValue(ctx, scopeKey(key), value) 14 | } 15 | 16 | // String returns the string value associated with this context for key 17 | func String(ctx context.Context, key string) string { 18 | if value, ok := ctx.Value(scopeKey(key)).(string); ok { 19 | return value 20 | } 21 | 22 | return "" 23 | } 24 | 25 | // Bool returns the true if particular key of the context is set 26 | func Bool(ctx context.Context, key string) bool { 27 | if _, ok := ctx.Value(scopeKey(key)).(bool); ok { 28 | return true 29 | } 30 | 31 | return false 32 | } 33 | 34 | // WithRequestID ... 35 | func WithRequestID(ctx context.Context, requestID string) context.Context { 36 | return With(ctx, "requestID", requestID) 37 | } 38 | 39 | // RequestID ... 40 | func RequestID(ctx context.Context) string { 41 | return String(ctx, "requestID") 42 | } 43 | 44 | // WithTable ... 45 | func WithTable(ctx context.Context, table string) context.Context { 46 | return With(ctx, "table", table) 47 | } 48 | 49 | // Table ... 50 | func Table(ctx context.Context) string { 51 | return String(ctx, "table") 52 | } 53 | 54 | // WithDebug returns the context with debug-name 55 | func WithDebug(ctx context.Context, name string) context.Context { 56 | return With(ctx, "debug-"+name, true) 57 | } 58 | 59 | // Debug returns true if debug-name should be enabled 60 | func Debug(ctx context.Context, name string) bool { 61 | return Bool(ctx, "debug-"+name) 62 | } 63 | 64 | // ClickhouseUserAgent ... 65 | func ClickhouseUserAgent(ctx context.Context) string { 66 | grafana := Grafana(ctx) 67 | if grafana != "" { 68 | return fmt.Sprintf("Graphite-Clickhouse/%s (table:%s) Grafana(%s)", Version, Table(ctx), grafana) 69 | } 70 | 71 | return fmt.Sprintf("Graphite-Clickhouse/%s (table:%s)", Version, Table(ctx)) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/scope/logger.go: -------------------------------------------------------------------------------- 1 | package scope 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/lomik/graphite-clickhouse/helper/headers" 8 | "github.com/lomik/zapwriter" 9 | "go.uber.org/zap" 10 | ) 11 | 12 | var ( 13 | CarbonapiUUIDName = "carbonapi_uuid" 14 | RequestHeadersName = "request_headers" 15 | ) 16 | 17 | // Logger returns zap.Logger instance 18 | func Logger(ctx context.Context) *zap.Logger { 19 | logger := ctx.Value(scopeKey("logger")) 20 | 21 | var zapLogger *zap.Logger 22 | 23 | if logger != nil { 24 | if zl, ok := logger.(*zap.Logger); ok { 25 | zapLogger = zl 26 | return zapLogger 27 | } 28 | } 29 | 30 | if zapLogger == nil { 31 | zapLogger = zapwriter.Default() 32 | } 33 | 34 | requestId := RequestID(ctx) 35 | if requestId != "" { 36 | zapLogger = zapLogger.With(zap.String("request_id", requestId)) 37 | } 38 | 39 | return zapLogger 40 | } 41 | 42 | // Logger returns zap.Logger instance 43 | func LoggerWithHeaders(ctx context.Context, r *http.Request, headersToLog []string) *zap.Logger { 44 | logger := ctx.Value(scopeKey("logger")) 45 | 46 | var zapLogger *zap.Logger 47 | 48 | if logger != nil { 49 | if zl, ok := logger.(*zap.Logger); ok { 50 | zapLogger = zl 51 | return zapLogger 52 | } 53 | } 54 | 55 | if zapLogger == nil { 56 | zapLogger = zapwriter.Default() 57 | } 58 | 59 | requestId := RequestID(ctx) 60 | if requestId != "" { 61 | zapLogger = zapLogger.With(zap.String("request_id", requestId)) 62 | } 63 | 64 | carbonapiUUID := r.Header.Get("X-Ctx-Carbonapi-Uuid") 65 | if carbonapiUUID != "" { 66 | zapLogger = zapLogger.With(zap.String("carbonapi_uuid", carbonapiUUID)) 67 | } 68 | 69 | requestHeaders := headers.GetHeaders(&r.Header, headersToLog) 70 | if len(requestHeaders) > 0 { 71 | zapLogger = zapLogger.With(zap.Any("request_headers", requestHeaders)) 72 | } 73 | 74 | return zapLogger 75 | } 76 | 77 | // WithLogger ... 78 | func WithLogger(ctx context.Context, logger *zap.Logger) context.Context { 79 | return With(ctx, "logger", logger) 80 | } 81 | -------------------------------------------------------------------------------- /pkg/scope/version.go: -------------------------------------------------------------------------------- 1 | package scope 2 | 3 | var Version string 4 | -------------------------------------------------------------------------------- /pkg/where/match_test.go: -------------------------------------------------------------------------------- 1 | package where 2 | 3 | import "testing" 4 | 5 | func Test_ClearGlob(t *testing.T) { 6 | tests := []struct { 7 | query string 8 | want string 9 | }{ 10 | {"a.{a,b}.te{s}t.b", "a.{a,b}.test.b"}, 11 | {"a.{a,b}.te{s,t}*.b", "a.{a,b}.te{s,t}*.b"}, 12 | {"a.{a,b}.test*.b", "a.{a,b}.test*.b"}, 13 | {"a.[b].te{s}t.b", "a.b.test.b"}, 14 | {"a.[ab].te{s,t}*.b", "a.[ab].te{s,t}*.b"}, 15 | {"a.{a,b.}.te{s,t}*.b", "a.{a,b.}.te{s,t}*.b"}, // some broken 16 | {"О.[б].те{s}t.b", "О.б.теst.b"}, // utf-8 string 17 | {"О.[].те{}t.b", "О..теt.b"}, // utf-8 string with empthy blocks 18 | } 19 | for _, tt := range tests { 20 | t.Run(tt.query, func(t *testing.T) { 21 | if got := ClearGlob(tt.query); got != tt.want { 22 | t.Errorf("ClearGlob() = %v, want %v", got, tt.want) 23 | } 24 | }) 25 | } 26 | } 27 | 28 | func Test_HasUnmatchedBrackets(t *testing.T) { 29 | tests := []struct { 30 | query string 31 | want bool 32 | }{ 33 | {"a.{a,b.te{s}t.b", true}, 34 | {"a.{a,b}.te{s}t.b", false}, 35 | {"a.{a,b}.te{s,t}}*.b", true}, 36 | {"a.{a,b}.test*.b", false}, 37 | {"a.a,b}.test*.b", true}, 38 | {"a.{a,b.test*.b}", true}, 39 | {"a.[a,b.test*.b]", true}, 40 | {"a.[a,b].test*.b", false}, 41 | {"a.[b].te{s}t.b", false}, 42 | {"a.{[cd],[ef]}.b", false}, 43 | {"a.[ab].te{s,t}*.b", false}, 44 | {"a.{a,b.}.te{s,t}*.b", true}, // dots are not escaped inside curly brackets 45 | {"О.[б].те{s}t.b", false}, // utf-8 string 46 | {"О.[б.теs}t.b", true}, 47 | {"О.[].те{}t.b", false}, // utf-8 string with empthy blocks 48 | } 49 | for _, tt := range tests { 50 | t.Run(tt.query, func(t *testing.T) { 51 | if got := HasUnmatchedBrackets(tt.query); got != tt.want { 52 | t.Errorf("HasUnmatchedBrackets() = %v, want %v", got, tt.want) 53 | } 54 | }) 55 | } 56 | } 57 | 58 | func TestGlob(t *testing.T) { 59 | field := "test" 60 | 61 | tests := []struct { 62 | query string 63 | want string 64 | }{ 65 | {"a.{a,b}.te{s}t.b", "test LIKE 'a.%' AND match(test, '^a[.](a|b)[.]test[.]b$')"}, 66 | {"a.{a,b}.te{s,t}*.b", "test LIKE 'a.%' AND match(test, '^a[.](a|b)[.]te(s|t)([^.]*?)[.]b$')"}, 67 | {"a.{a,b}.test*.b", "test LIKE 'a.%' AND match(test, '^a[.](a|b)[.]test([^.]*?)[.]b$')"}, 68 | {"a.[b].te{s}t.b", "test='a.b.test.b'"}, 69 | {"a.[ab].te{s,t}*.b", "test LIKE 'a.%' AND match(test, '^a[.][ab][.]te(s|t)([^.]*?)[.]b$')"}, 70 | } 71 | for _, tt := range tests { 72 | t.Run(tt.query, func(t *testing.T) { 73 | if got := Glob(field, tt.query); got != tt.want { 74 | t.Errorf("Glob() = %v, want %v", got, tt.want) 75 | } 76 | }) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /pkg/where/where_test.go: -------------------------------------------------------------------------------- 1 | package where 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestGlobExpandSimple(t *testing.T) { 11 | tests := []struct { 12 | value string 13 | want []string 14 | wantErr bool 15 | }{ 16 | {"{a,bc,d}", []string{"a", "bc", "d"}, false}, 17 | {"S{a,bc,d}", []string{"Sa", "Sbc", "Sd"}, false}, 18 | {"{a,bc,d}E", []string{"aE", "bcE", "dE"}, false}, 19 | {"S{a,bc,d}E", []string{"SaE", "SbcE", "SdE"}, false}, 20 | {"S{a,bc,d}E{f,h}", []string{"SaEf", "SaEh", "SbcEf", "SbcEh", "SdEf", "SdEh"}, false}, 21 | {"test{a,b}", []string{"testa", "testb"}, false}, 22 | {"S{a,bc,d}}E{f,h}", nil, true}, //error 23 | {"S{{a,bc,d}E{f,h}", nil, true}, //error 24 | } 25 | for _, tt := range tests { 26 | t.Run(tt.value, func(t *testing.T) { 27 | var got []string 28 | 29 | err := GlobExpandSimple(tt.value, "", &got) 30 | if tt.wantErr { 31 | assert.Error(t, err, "Expand() not returns error for %v", tt.value) 32 | } else { 33 | assert.NoErrorf(t, err, "Expand() returns error %v for %v", err, tt.value) 34 | } 35 | 36 | assert.Equal(t, tt.want, got, "Expand() result") 37 | }) 38 | } 39 | } 40 | 41 | func TestGlobToRegexp(t *testing.T) { 42 | table := []struct { 43 | glob string 44 | regexp string 45 | }{ 46 | {`test.*.foo`, `test[.]([^.]*?)[.]foo`}, 47 | {`test.{foo,bar}`, `test[.](foo|bar)`}, 48 | {`test?.foo`, `test[^.][.]foo`}, 49 | {`test?.$foo`, `test[^.][.][$]foo`}, 50 | } 51 | 52 | for _, test := range table { 53 | testName := fmt.Sprintf("glob: %#v", test.glob) 54 | regexp := GlobToRegexp(test.glob) 55 | assert.Equal(t, test.regexp, regexp, testName) 56 | } 57 | } 58 | 59 | func TestNonRegexpPrefix(t *testing.T) { 60 | table := []struct { 61 | expr string 62 | prefix string 63 | }{ 64 | {`test[.]([^.]*?)[.]foo`, `test`}, 65 | {`__name__=cpu.load`, `__name__=cpu`}, 66 | {`__name__=~(cpu|mem)`, `__name__=~`}, 67 | {`__name__=~cpu|mem`, `__name__=~`}, 68 | {`__name__=~^host`, `__name__=~`}, 69 | } 70 | 71 | for _, test := range table { 72 | testName := fmt.Sprintf("expr: %#v", test.expr) 73 | prefix := NonRegexpPrefix(test.expr) 74 | assert.Equal(t, test.prefix, prefix, testName) 75 | } 76 | } 77 | 78 | func TestMaxWildcardDistance(t *testing.T) { 79 | table := []struct { 80 | glob string 81 | dist int 82 | }{ 83 | {`a.b.c.d.e`, -1}, 84 | {`test.*.foo.bar`, 2}, 85 | {`test.foo.*.*.bar.count`, 2}, 86 | {`test.foo.bar.*.bar.foo.test`, 3}, 87 | {`test.foo.bar.foobar.*.middle.*.foobar.bar.foo.test`, 4}, 88 | {`*.test.foo.bar.*`, 0}, 89 | } 90 | 91 | for _, test := range table { 92 | testName := fmt.Sprintf("glob: %#v", test.glob) 93 | dist := MaxWildcardDistance(test.glob) 94 | assert.Equal(t, test.dist, dist, testName) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /prometheus/.gitignore: -------------------------------------------------------------------------------- 1 | tmp 2 | -------------------------------------------------------------------------------- /prometheus/empty_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "github.com/prometheus/prometheus/model/histogram" 8 | "github.com/prometheus/prometheus/tsdb/chunkenc" 9 | ) 10 | 11 | // Iterator is a simple iterator that can only get the next value. 12 | // Iterator iterates over the samples of a time series, in timestamp-increasing order. 13 | type emptyIterator struct{} 14 | 15 | var emptyIteratorValue chunkenc.Iterator = &emptyIterator{} 16 | 17 | // Next advances the iterator by one and returns the type of the value 18 | // at the new position (or ValNone if the iterator is exhausted). 19 | func (it *emptyIterator) Next() chunkenc.ValueType { return chunkenc.ValNone } 20 | 21 | // Seek advances the iterator forward to the first sample with a 22 | // timestamp equal or greater than t. If the current sample found by a 23 | // previous `Next` or `Seek` operation already has this property, Seek 24 | // has no effect. If a sample has been found, Seek returns the type of 25 | // its value. Otherwise, it returns ValNone, after with the iterator is 26 | // exhausted. 27 | func (it *emptyIterator) Seek(t int64) chunkenc.ValueType { return chunkenc.ValNone } 28 | 29 | // At returns the current timestamp/value pair if the value is a float. 30 | // Before the iterator has advanced, the behaviour is unspecified. 31 | func (it *emptyIterator) At() (int64, float64) { return 0, 0 } 32 | 33 | // AtHistogram returns the current timestamp/value pair if the value is 34 | // a histogram with integer counts. Before the iterator has advanced, 35 | // the behaviour is unspecified. 36 | func (it *emptyIterator) AtHistogram(histogram *histogram.Histogram) (int64, *histogram.Histogram) { 37 | return 0, nil 38 | } 39 | 40 | // AtFloatHistogram returns the current timestamp/value pair if the 41 | // value is a histogram with floating-point counts. It also works if the 42 | // value is a histogram with integer counts, in which case a 43 | // FloatHistogram copy of the histogram is returned. Before the iterator 44 | // has advanced, the behaviour is unspecified. 45 | func (it *emptyIterator) AtFloatHistogram(histogram *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { 46 | return 0, nil 47 | } 48 | 49 | // AtT returns the current timestamp. 50 | // Before the iterator has advanced, the behaviour is unspecified. 51 | func (it *emptyIterator) AtT() int64 { return 0 } 52 | 53 | // Err returns the current error. It should be used only after the 54 | // iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone. 55 | func (it *emptyIterator) Err() error { return nil } 56 | -------------------------------------------------------------------------------- /prometheus/exemplar.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/prometheus/prometheus/model/exemplar" 10 | "github.com/prometheus/prometheus/model/labels" 11 | "github.com/prometheus/prometheus/storage" 12 | ) 13 | 14 | type nopExemplarQueryable struct { 15 | } 16 | 17 | type nopExemplarQuerier struct { 18 | } 19 | 20 | var _ storage.ExemplarQueryable = &nopExemplarQueryable{} 21 | var _ storage.ExemplarQuerier = &nopExemplarQuerier{} 22 | 23 | func (e *nopExemplarQueryable) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { 24 | return &nopExemplarQuerier{}, nil 25 | } 26 | 27 | func (e *nopExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { 28 | return []exemplar.QueryResult{}, nil 29 | } 30 | -------------------------------------------------------------------------------- /prometheus/gatherer.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "github.com/prometheus/client_golang/prometheus" 8 | dto "github.com/prometheus/client_model/go" 9 | ) 10 | 11 | type nopGatherer struct{} 12 | 13 | var _ prometheus.Gatherer = &nopGatherer{} 14 | 15 | func (*nopGatherer) Gather() ([]*dto.MetricFamily, error) { 16 | return []*dto.MetricFamily{}, nil 17 | } 18 | -------------------------------------------------------------------------------- /prometheus/labels.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "net/url" 8 | "sort" 9 | "strings" 10 | 11 | "github.com/prometheus/prometheus/model/labels" 12 | ) 13 | 14 | func urlParse(rawurl string) (*url.URL, error) { 15 | p := strings.IndexByte(rawurl, '?') 16 | if p < 0 { 17 | return url.Parse(rawurl) 18 | } 19 | 20 | m, err := url.Parse(rawurl[p:]) 21 | if m != nil { 22 | m.Path = rawurl[:p] 23 | } 24 | 25 | return m, err 26 | } 27 | 28 | func Labels(path string) labels.Labels { 29 | u, err := urlParse(path) 30 | if err != nil { 31 | return labels.Labels{labels.Label{Name: "__name__", Value: path}} 32 | } 33 | 34 | q := u.Query() 35 | lb := make(labels.Labels, len(q)+1) 36 | lb[0].Name = "__name__" 37 | lb[0].Value = u.Path 38 | 39 | i := 0 40 | for k, v := range q { 41 | i++ 42 | lb[i].Name = k 43 | lb[i].Value = v[0] 44 | } 45 | 46 | if len(lb) > 1 { 47 | sort.Slice(lb, func(i, j int) bool { return lb[i].Name < lb[j].Name }) 48 | } 49 | 50 | return lb 51 | } 52 | -------------------------------------------------------------------------------- /prometheus/labels_test.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestLabels(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | table := [][2]string{ 13 | { 14 | "cpu_usage_system?cpu=cpu5&host=telegraf-b9468c8b5-g47xt&instance=telegraf.default%3A9273&job=telegraf", 15 | `{__name__="cpu_usage_system", cpu="cpu5", host="telegraf-b9468c8b5-g47xt", instance="telegraf.default:9273", job="telegraf"}`, 16 | }, 17 | { 18 | "cpu_usage_system", 19 | `{__name__="cpu_usage_system"}`, 20 | }, 21 | { 22 | ":metric:?instance=localhost", 23 | `{__name__=":metric:", instance="localhost"}`, 24 | }, 25 | } 26 | 27 | for _, c := range table { 28 | assert.Equal(c[1], Labels(c[0]).String()) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /prometheus/local_storage.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/prometheus/prometheus/model/labels" 10 | "github.com/prometheus/prometheus/tsdb" 11 | "github.com/prometheus/prometheus/tsdb/index" 12 | "github.com/prometheus/prometheus/web" 13 | ) 14 | 15 | var _ web.LocalStorage = &storageImpl{} 16 | 17 | func (s *storageImpl) CleanTombstones() error { 18 | return nil 19 | } 20 | 21 | func (s *storageImpl) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error { 22 | return nil 23 | } 24 | 25 | func (s *storageImpl) Snapshot(dir string, withHead bool) error { 26 | return nil 27 | } 28 | 29 | func (s *storageImpl) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) { 30 | return &tsdb.Stats{ 31 | IndexPostingStats: &index.PostingsStats{}, 32 | }, nil 33 | } 34 | 35 | func (s *storageImpl) WALReplayStatus() (tsdb.WALReplayStatus, error) { 36 | return tsdb.WALReplayStatus{}, nil 37 | } 38 | -------------------------------------------------------------------------------- /prometheus/logger.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "go.uber.org/zap" 8 | ) 9 | 10 | type errorLevel interface { 11 | String() string 12 | } 13 | 14 | type logger struct { 15 | z *zap.Logger 16 | } 17 | 18 | func (l *logger) Log(keyvals ...interface{}) error { 19 | lg := l.z 20 | 21 | var msg string 22 | 23 | var level errorLevel 24 | 25 | for i := 1; i < len(keyvals); i += 2 { 26 | keyObj := keyvals[i-1] 27 | 28 | keyStr, ok := keyObj.(string) 29 | if !ok { 30 | l.z.Error("can't handle log, wrong key", zap.Any("keyvals", keyvals)) 31 | return nil 32 | } 33 | 34 | if keyStr == "level" { 35 | level, ok = keyvals[i].(errorLevel) 36 | if !ok { 37 | l.z.Error("can't handle log, wrong level", zap.Any("keyvals", keyvals)) 38 | return nil 39 | } 40 | 41 | continue 42 | } 43 | 44 | if keyStr == "msg" { 45 | msg, ok = keyvals[i].(string) 46 | if !ok { 47 | l.z.Error("can't handle log, wrong msg", zap.Any("keyvals", keyvals)) 48 | return nil 49 | } 50 | 51 | continue 52 | } 53 | 54 | lg = lg.With(zap.Any(keyStr, keyvals[i])) 55 | } 56 | 57 | switch level.String() { 58 | case "debug": 59 | lg.Debug(msg) 60 | case "info": 61 | lg.Info(msg) 62 | case "warn": 63 | lg.Warn(msg) 64 | case "error": 65 | lg.Error(msg) 66 | default: 67 | l.z.Error("can't handle log, unknown level", zap.Any("keyvals", keyvals)) 68 | return nil 69 | } 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /prometheus/metrics_set.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "github.com/prometheus/prometheus/model/labels" 8 | "github.com/prometheus/prometheus/storage" 9 | "github.com/prometheus/prometheus/tsdb/chunkenc" 10 | "github.com/prometheus/prometheus/util/annotations" 11 | ) 12 | 13 | // SeriesSet contains a set of series. 14 | type metricsSet struct { 15 | metrics []string 16 | current int 17 | } 18 | 19 | type metric struct { 20 | name string 21 | } 22 | 23 | var _ storage.SeriesSet = &metricsSet{} 24 | 25 | func (ms *metricsSet) At() storage.Series { 26 | return &metric{name: ms.metrics[ms.current]} 27 | } 28 | 29 | // Iterator returns a new iterator of the data of the series. 30 | func (s *metric) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { 31 | return emptyIteratorValue 32 | } 33 | 34 | func (s *metric) Labels() labels.Labels { 35 | return Labels(s.name) 36 | } 37 | 38 | // Err returns the current error. 39 | func (ms *metricsSet) Err() error { return nil } 40 | 41 | func (ms *metricsSet) Next() bool { 42 | if ms.current < 0 { 43 | ms.current = 0 44 | } else { 45 | ms.current++ 46 | } 47 | 48 | return ms.current < len(ms.metrics) 49 | } 50 | 51 | func newMetricsSet(metrics []string) storage.SeriesSet { 52 | return &metricsSet{metrics: metrics, current: -1} 53 | } 54 | 55 | // Warnings ... 56 | func (s *metricsSet) Warnings() annotations.Annotations { return nil } 57 | -------------------------------------------------------------------------------- /prometheus/querier_select_test.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "testing" 8 | "time" 9 | 10 | "github.com/lomik/graphite-clickhouse/config" 11 | "github.com/prometheus/prometheus/storage" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestQuerier_timeRange(t *testing.T) { 16 | timeNow = func() time.Time { 17 | // 2022-11-29 09:30:47 UTC 18 | return time.Unix(1669714247, 0) 19 | } 20 | cfg := &config.Config{ 21 | ClickHouse: config.ClickHouse{ 22 | TaggedAutocompleDays: 4, 23 | }, 24 | } 25 | 26 | tests := []struct { 27 | name string 28 | 29 | mint int64 30 | maxt int64 31 | hints *storage.SelectHints 32 | 33 | wantFrom int64 34 | wantUntil int64 35 | }{ 36 | { 37 | name: "default from/until", 38 | wantFrom: 1669368647, // timeNow() - config.Clickhouse.TaggedAutocompleDays 39 | wantUntil: 1669714247, // timeNow() result 40 | }, 41 | { 42 | name: "start/end in SelectHints", 43 | hints: &storage.SelectHints{ 44 | Start: 1669453200000, 45 | End: 1669626000000, 46 | }, 47 | wantFrom: 1669453200, 48 | wantUntil: 1669626000, 49 | }, 50 | { 51 | name: "start/end in SelectHints overflow", 52 | hints: &storage.SelectHints{ 53 | // ClickHouse supported range of values by the Date type: [1970-01-01, 2149-06-06] 54 | Start: 5662310400001, 55 | End: 5662310400100, 56 | }, 57 | wantFrom: 1669368647, // timeNow() - config.Clickhouse.TaggedAutocompleDays 58 | wantUntil: 1669714247, // timeNow() result 59 | }, 60 | { 61 | name: "no start/end in SelectHints", 62 | hints: &storage.SelectHints{}, 63 | mint: 1669194000000, 64 | maxt: 1669280400000, 65 | wantFrom: 1669194000, 66 | wantUntil: 1669280400, 67 | }, 68 | { 69 | name: "no start/end in SelectHints, mint/maxt overflow", 70 | hints: &storage.SelectHints{}, 71 | // ClickHouse supported range of values by the Date type: [1970-01-01, 2149-06-06] 72 | mint: 5662310400001, 73 | maxt: 5662310400100, 74 | wantFrom: 1669368647, // timeNow() - config.Clickhouse.TaggedAutocompleDays 75 | wantUntil: 1669714247, // timeNow() result 76 | }, 77 | } 78 | for _, tt := range tests { 79 | t.Run(tt.name, func(t *testing.T) { 80 | s := newStorage(cfg) 81 | 82 | // Querier returns a new Querier on the storage. 83 | sq, err := s.Querier(tt.mint, tt.maxt) 84 | require.NoError(t, err) 85 | 86 | q := sq.(*Querier) 87 | 88 | gotFrom, gotUntil := q.timeRange(tt.hints) 89 | if gotFrom != tt.wantFrom { 90 | t.Errorf("Querier.timeRange().from got = %v, want %v", gotFrom, tt.wantFrom) 91 | } 92 | 93 | if gotUntil != tt.wantUntil { 94 | t.Errorf("Querier.timeRange().until got = %v, want %v", gotUntil, tt.wantUntil) 95 | } 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /prometheus/run.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "context" 8 | "log" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/grafana/regexp" 13 | "github.com/lomik/graphite-clickhouse/config" 14 | "github.com/lomik/zapwriter" 15 | "github.com/prometheus/client_golang/prometheus" 16 | promConfig "github.com/prometheus/prometheus/config" 17 | "github.com/prometheus/prometheus/notifier" 18 | "github.com/prometheus/prometheus/promql" 19 | "github.com/prometheus/prometheus/rules" 20 | "github.com/prometheus/prometheus/scrape" 21 | "github.com/prometheus/prometheus/web" 22 | "github.com/prometheus/prometheus/web/ui" 23 | 24 | uiStatic "github.com/lomik/prometheus-ui-static" 25 | "github.com/prometheus/common/assets" 26 | ) 27 | 28 | func Run(config *config.Config) error { 29 | // use precompiled static from github.com/lomik/prometheus-ui-static 30 | ui.Assets = http.FS(assets.New(uiStatic.EmbedFS)) 31 | 32 | zapLogger := &logger{ 33 | z: zapwriter.Logger("prometheus"), 34 | } 35 | 36 | storage := newStorage(config) 37 | 38 | corsOrigin, err := regexp.Compile("^$") 39 | if err != nil { 40 | return err 41 | } 42 | 43 | queryEngine := promql.NewEngine(promql.EngineOpts{ 44 | Logger: zapLogger, 45 | Timeout: time.Minute, 46 | MaxSamples: 50000000, 47 | LookbackDelta: config.Prometheus.LookbackDelta, 48 | }) 49 | 50 | scrapeManager, err := scrape.NewManager(&scrape.Options{}, zapLogger, storage, prometheus.DefaultRegisterer) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | rulesManager := rules.NewManager(&rules.ManagerOptions{ 56 | Logger: zapLogger, 57 | Appendable: storage, 58 | Queryable: storage, 59 | }) 60 | 61 | notifierManager := notifier.NewManager(¬ifier.Options{}, zapLogger) 62 | 63 | promHandler := web.New(zapLogger, &web.Options{ 64 | ListenAddress: config.Prometheus.Listen, 65 | MaxConnections: 500, 66 | Storage: storage, 67 | ExemplarStorage: &nopExemplarQueryable{}, 68 | ExternalURL: config.Prometheus.ExternalURL, 69 | RoutePrefix: "/", 70 | QueryEngine: queryEngine, 71 | ScrapeManager: scrapeManager, 72 | RuleManager: rulesManager, 73 | Flags: make(map[string]string), 74 | LocalStorage: storage, 75 | Gatherer: &nopGatherer{}, 76 | Notifier: notifierManager, 77 | CORSOrigin: corsOrigin, 78 | PageTitle: config.Prometheus.PageTitle, 79 | LookbackDelta: config.Prometheus.LookbackDelta, 80 | RemoteReadConcurrencyLimit: config.Prometheus.RemoteReadConcurrencyLimit, 81 | }) 82 | 83 | promHandler.ApplyConfig(&promConfig.Config{}) 84 | promHandler.SetReady(true) 85 | 86 | go func() { 87 | log.Fatal(promHandler.Run(context.Background(), nil, "")) 88 | }() 89 | 90 | return nil 91 | } 92 | -------------------------------------------------------------------------------- /prometheus/run_dummy.go: -------------------------------------------------------------------------------- 1 | //go:build noprom 2 | // +build noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "github.com/lomik/graphite-clickhouse/config" 8 | ) 9 | 10 | func Run(config *config.Config) error { 11 | return nil 12 | } 13 | -------------------------------------------------------------------------------- /prometheus/storage.go: -------------------------------------------------------------------------------- 1 | //go:build !noprom 2 | // +build !noprom 3 | 4 | package prometheus 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/lomik/graphite-clickhouse/config" 10 | "github.com/prometheus/prometheus/storage" 11 | ) 12 | 13 | type storageImpl struct { 14 | config *config.Config 15 | } 16 | 17 | var _ storage.Storage = &storageImpl{} 18 | 19 | func newStorage(config *config.Config) *storageImpl { 20 | return &storageImpl{config: config} 21 | } 22 | 23 | // Querier returns a new Querier on the storage. 24 | func (s *storageImpl) Querier(mint, maxt int64) (storage.Querier, error) { 25 | return &Querier{ 26 | config: s.config, 27 | mint: mint, 28 | maxt: maxt, 29 | }, nil 30 | } 31 | 32 | // ChunkQuerier ... 33 | func (s *storageImpl) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { 34 | return nil, nil 35 | } 36 | 37 | // Appender ... 38 | func (s *storageImpl) Appender(ctx context.Context) storage.Appender { 39 | return nil 40 | } 41 | 42 | // StartTime ... 43 | func (s *storageImpl) StartTime() (int64, error) { 44 | return 0, nil 45 | } 46 | 47 | // Close ... 48 | func (s *storageImpl) Close() error { 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /render/data/carbonlink.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/lomik/graphite-clickhouse/config" 8 | "github.com/lomik/graphite-clickhouse/helper/point" 9 | "github.com/lomik/graphite-clickhouse/pkg/scope" 10 | "go.uber.org/zap" 11 | 12 | graphitePickle "github.com/lomik/graphite-pickle" 13 | ) 14 | 15 | type carbonlinkFetcher interface { 16 | CacheQueryMulti(context.Context, []string) (map[string][]graphitePickle.DataPoint, error) 17 | } 18 | 19 | // carbonlink to get data from carbonlink server globally 20 | type carbonlinkClient struct { 21 | carbonlinkFetcher 22 | totalTimeout time.Duration 23 | } 24 | 25 | var carbonlink *carbonlinkClient = nil 26 | 27 | // setCarbonlinkClient setup the client once. Does nothing if Config.Carbonlink.Server is not set 28 | func setCarbonlinkClient(config *config.Carbonlink) { 29 | if carbonlink != nil { 30 | return 31 | } 32 | 33 | if config.Server == "" { 34 | return 35 | } 36 | 37 | carbonlink = &carbonlinkClient{ 38 | graphitePickle.NewCarbonlinkClient( 39 | config.Server, 40 | config.Retries, 41 | config.Threads, 42 | config.ConnectTimeout, 43 | config.QueryTimeout, 44 | ), 45 | config.TotalTimeout, 46 | } 47 | 48 | return 49 | } 50 | 51 | // queryCarbonlink returns callable result fetcher 52 | func queryCarbonlink(parentCtx context.Context, carbonlink *carbonlinkClient, metrics []string) func() *point.Points { 53 | logger := scope.Logger(parentCtx) 54 | 55 | if carbonlink == nil { 56 | return func() *point.Points { return nil } 57 | } 58 | 59 | carbonlinkResponseChan := make(chan *point.Points, 1) 60 | 61 | fetchResult := func() *point.Points { 62 | result := <-carbonlinkResponseChan 63 | return result 64 | } 65 | 66 | go func() { 67 | ctx, cancel := context.WithTimeout(parentCtx, carbonlink.totalTimeout) 68 | defer cancel() 69 | 70 | res, err := carbonlink.CacheQueryMulti(ctx, metrics) 71 | 72 | if err != nil { 73 | logger.Info("carbonlink failed", zap.Error(err)) 74 | } 75 | 76 | result := point.NewPoints() 77 | 78 | if res != nil && len(res) > 0 { 79 | tm := uint32(time.Now().Unix()) 80 | 81 | for metric, points := range res { 82 | metricID := result.MetricID(metric) 83 | for _, p := range points { 84 | result.AppendPoint(metricID, p.Value, uint32(p.Timestamp), tm) 85 | } 86 | } 87 | } 88 | 89 | carbonlinkResponseChan <- result 90 | }() 91 | 92 | return fetchResult 93 | } 94 | -------------------------------------------------------------------------------- /render/data/common_step.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/lomik/graphite-clickhouse/pkg/dry" 9 | ) 10 | 11 | // This is used to calculate lowest common multiplier of metrics for ClickHouse internal aggregation 12 | // Collect amount of targets; 13 | // Wait until all targets will send the step, and calculate the LCM on the fly 14 | // Return the calculated LCM() 15 | type commonStep struct { 16 | result int64 17 | wg sync.WaitGroup 18 | lock sync.RWMutex 19 | } 20 | 21 | func (c *commonStep) addTargets(delta int) { 22 | c.wg.Add(delta) 23 | } 24 | 25 | func (c *commonStep) doneTarget() { 26 | c.wg.Done() 27 | } 28 | 29 | func (c *commonStep) calculateUnsafe(a, b int64) int64 { 30 | if a == 0 || b == 0 { 31 | return dry.Max(a, b) 32 | } 33 | 34 | return dry.LCM(a, b) 35 | } 36 | 37 | func (c *commonStep) calculate(value int64) { 38 | c.lock.Lock() 39 | c.result = c.calculateUnsafe(c.result, value) 40 | c.lock.Unlock() 41 | c.doneTarget() 42 | } 43 | 44 | func (c *commonStep) getResult() int64 { 45 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) 46 | defer cancel() 47 | 48 | ch := make(chan int64) 49 | go func(ch chan int64) { 50 | c.wg.Wait() 51 | c.lock.RLock() 52 | defer c.lock.RUnlock() 53 | ch <- c.result 54 | }(ch) 55 | select { 56 | case r := <-ch: 57 | return r 58 | case <-ctx.Done(): 59 | // -1 is a definitely wrong value, it will break following ClickHouse query 60 | // This possible, when one of the queries in request already returned error 61 | return -1 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /render/data/common_step_test.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | type wrapper struct { 11 | *commonStep 12 | calcCounter int 13 | cLock sync.RWMutex 14 | } 15 | 16 | func (w *wrapper) calc(step int64) { 17 | w.cLock.Lock() 18 | w.calcCounter++ 19 | w.calculate(step) 20 | w.cLock.Unlock() 21 | } 22 | 23 | func newWrapper() *wrapper { 24 | c := &commonStep{ 25 | result: 0, 26 | wg: sync.WaitGroup{}, 27 | lock: sync.RWMutex{}, 28 | } 29 | 30 | return &wrapper{ 31 | commonStep: c, 32 | cLock: sync.RWMutex{}, 33 | } 34 | } 35 | 36 | func TestCommonStepWorker(t *testing.T) { 37 | w := newWrapper() 38 | w.addTargets(4) 39 | 40 | go func() { 41 | lastStep := int64(0) 42 | for i := 0; i < 20000; i++ { 43 | w.calculateUnsafe(lastStep, 0) 44 | } 45 | 46 | w.calc(0) 47 | assert.Equal(t, int64(120), w.commonStep.getResult()) 48 | }() 49 | go func() { 50 | lastStep := int64(0) 51 | for i := 0; i < 30000; i++ { 52 | w.calculateUnsafe(lastStep, 6) 53 | } 54 | 55 | w.calc(6) 56 | assert.Equal(t, int64(120), w.commonStep.getResult()) 57 | }() 58 | go func() { 59 | lastStep := int64(0) 60 | for i := 0; i < 40000; i++ { 61 | w.calculateUnsafe(lastStep, 8) 62 | } 63 | 64 | w.calc(8) 65 | assert.Equal(t, int64(120), w.commonStep.getResult()) 66 | }() 67 | go func() { 68 | lastStep := int64(0) 69 | for i := 0; i < 50000; i++ { 70 | w.calculateUnsafe(lastStep, 10) 71 | } 72 | 73 | w.calc(10) 74 | assert.Equal(t, int64(120), w.commonStep.getResult()) 75 | }() 76 | assert.Equal(t, int64(120), w.commonStep.getResult()) 77 | assert.Equal(t, 4, w.calcCounter) 78 | } 79 | -------------------------------------------------------------------------------- /render/handler_test.go: -------------------------------------------------------------------------------- 1 | package render 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/lomik/graphite-clickhouse/config" 9 | ) 10 | 11 | func Test_getCacheTimeout(t *testing.T) { 12 | cacheConfig := config.CacheConfig{ 13 | ShortTimeoutSec: 60, 14 | ShortTimeoutStr: "60", 15 | DefaultTimeoutSec: 300, 16 | DefaultTimeoutStr: "300", 17 | ShortDuration: 3 * time.Hour, 18 | ShortUntilOffsetSec: 120, 19 | } 20 | 21 | now := int64(1636985018) 22 | 23 | tests := []struct { 24 | name string 25 | now time.Time 26 | from int64 27 | until int64 28 | want int32 29 | wantStr string 30 | }{ 31 | { 32 | name: "short: from = now - 600, until = now - 120", 33 | now: time.Unix(now, 0), 34 | from: now - 600, 35 | until: now - 120, 36 | want: 60, 37 | wantStr: "60", 38 | }, 39 | { 40 | name: "short: from = now - 10800", 41 | now: time.Unix(now, 0), 42 | from: now - 10800, 43 | until: now, 44 | want: 60, 45 | wantStr: "60", 46 | }, 47 | { 48 | name: "short: from = now - 10810, until = now - 120", 49 | now: time.Unix(now, 0), 50 | from: now - 10800, 51 | until: now - 120, 52 | want: 60, 53 | wantStr: "60", 54 | }, 55 | { 56 | name: "short: from = now - 10800, until now - 121", 57 | now: time.Unix(now, 0), 58 | from: now - 10800, 59 | until: now - 121, 60 | want: 300, 61 | wantStr: "300", 62 | }, 63 | { 64 | name: "default: from = now - 10801", 65 | now: time.Unix(now, 0), 66 | from: now - 10801, 67 | until: now, 68 | want: 300, 69 | wantStr: "300", 70 | }, 71 | { 72 | name: "short: from = now - 122, until = now - 121", 73 | now: time.Unix(now, 0), 74 | from: now - 122, 75 | until: now - 121, 76 | want: 300, 77 | wantStr: "300", 78 | }, 79 | } 80 | for i, tt := range tests { 81 | t.Run(fmt.Sprintf("[%d] %s", i, tt.name), func(t *testing.T) { 82 | got, gotStr, _ := getCacheTimeout(tt.now, tt.from, tt.until, &cacheConfig) 83 | if got != tt.want { 84 | t.Errorf("getCacheTimeout() = %v, want %v", got, tt.want) 85 | } 86 | 87 | if gotStr != tt.wantStr { 88 | t.Errorf("getCacheTimeout() = %q, want %q", gotStr, tt.wantStr) 89 | } 90 | }) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /render/reply/formatter.go: -------------------------------------------------------------------------------- 1 | package reply 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "net/http" 7 | "strconv" 8 | 9 | "github.com/lomik/graphite-clickhouse/pkg/alias" 10 | "github.com/lomik/graphite-clickhouse/pkg/dry" 11 | "github.com/lomik/graphite-clickhouse/pkg/scope" 12 | "github.com/lomik/graphite-clickhouse/render/data" 13 | "go.uber.org/zap" 14 | ) 15 | 16 | // Formatter implements request parser and response generator 17 | type Formatter interface { 18 | // Parse request 19 | ParseRequest(r *http.Request) (data.MultiTarget, error) 20 | // Generate reply payload 21 | Reply(http.ResponseWriter, *http.Request, data.CHResponses) 22 | } 23 | 24 | // GetFormatter returns a proper interface for render format 25 | func GetFormatter(r *http.Request) (Formatter, error) { 26 | format := r.FormValue("format") 27 | switch format { 28 | case "carbonapi_v3_pb": 29 | return &V3PB{}, nil 30 | case "pickle": 31 | return &Pickle{}, nil 32 | case "protobuf": 33 | return &V2PB{}, nil 34 | case "carbonapi_v2_pb": 35 | return &V2PB{}, nil 36 | } 37 | 38 | err := fmt.Errorf("format %v is not supported, supported formats: carbonapi_v3_pb, pickle, protobuf (aka carbonapi_v2_pb)", format) 39 | if !scope.Debug(r.Context(), "Output") { 40 | return nil, err 41 | } 42 | 43 | switch format { 44 | case "json": 45 | return &JSON{}, nil 46 | } 47 | 48 | err = fmt.Errorf("%w\n(formats available for output debug: json)", err) 49 | 50 | return nil, err 51 | } 52 | 53 | func parseRequestForms(r *http.Request) (data.MultiTarget, error) { 54 | fromTimestamp, err := strconv.ParseInt(r.FormValue("from"), 10, 32) 55 | if err != nil { 56 | return nil, fmt.Errorf("cannot parse from") 57 | } 58 | 59 | untilTimestamp, err := strconv.ParseInt(r.FormValue("until"), 10, 32) 60 | if err != nil { 61 | return nil, fmt.Errorf("cannot parse until") 62 | } 63 | 64 | maxDataPoints, err := strconv.ParseInt(r.FormValue("maxDataPoints"), 10, 32) 65 | if err != nil { 66 | maxDataPoints = int64(math.MaxInt64) 67 | } 68 | 69 | targets := dry.RemoveEmptyStrings(r.Form["target"]) 70 | tf := data.TimeFrame{ 71 | From: fromTimestamp, 72 | Until: untilTimestamp, 73 | MaxDataPoints: maxDataPoints, 74 | } 75 | multiTarget := make(data.MultiTarget) 76 | multiTarget[tf] = data.NewTargets(targets, alias.New()) 77 | 78 | if len(targets) > 0 { 79 | logger := scope.Logger(r.Context()).Named("form_parser") 80 | for _, t := range targets { 81 | logger.Info( 82 | "target", 83 | zap.Int64("from", tf.From), 84 | zap.Int64("until", tf.Until), 85 | zap.Int64("maxDataPoints", tf.MaxDataPoints), 86 | zap.String("target", t), 87 | ) 88 | } 89 | } 90 | 91 | return multiTarget, nil 92 | } 93 | -------------------------------------------------------------------------------- /render/reply/protobuf_test.go: -------------------------------------------------------------------------------- 1 | package reply 2 | 3 | import ( 4 | "encoding/binary" 5 | "testing" 6 | ) 7 | 8 | func TestVarintLen(t *testing.T) { 9 | buf := make([]byte, binary.MaxVarintLen64) 10 | 11 | for i := uint64(0); i < 1000000; i++ { 12 | n := binary.PutUvarint(buf, i) 13 | if VarintLen(i) != uint64(n) { 14 | t.FailNow() 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /sd/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "net" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/lomik/graphite-clickhouse/helper/errs" 12 | ) 13 | 14 | var ErrNotFound = errors.New("entry not found") 15 | 16 | type KV struct { 17 | Key string 18 | Value string 19 | Flags int64 20 | } 21 | 22 | func HttpGet(url string) ([]byte, error) { 23 | client := &http.Client{Timeout: 2 * time.Second} 24 | 25 | resp, err := client.Get(url) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | data, err := io.ReadAll(resp.Body) 31 | resp.Body.Close() 32 | 33 | if resp.StatusCode == http.StatusNotFound { 34 | return nil, ErrNotFound 35 | } 36 | 37 | if resp.StatusCode != http.StatusOK { 38 | return nil, errs.NewErrorWithCode(string(data), resp.StatusCode) 39 | } 40 | 41 | return data, err 42 | } 43 | 44 | func HttpPut(url string, body []byte) error { 45 | req, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(body)) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | req.Header.Set("Content-Type", "application/json") 51 | 52 | client := &http.Client{Timeout: 2 * time.Second} 53 | 54 | resp, err := client.Do(req) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | defer resp.Body.Close() 60 | 61 | if resp.StatusCode == http.StatusNotFound { 62 | return ErrNotFound 63 | } 64 | 65 | if resp.StatusCode != http.StatusOK { 66 | data, _ := io.ReadAll(resp.Body) 67 | return errs.NewErrorWithCode(string(data), resp.StatusCode) 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func HttpDelete(url string) error { 74 | req, err := http.NewRequest(http.MethodDelete, url, nil) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | client := &http.Client{Timeout: 2 * time.Second} 80 | 81 | resp, err := client.Do(req) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | defer resp.Body.Close() 87 | 88 | if resp.StatusCode == http.StatusNotFound { 89 | return ErrNotFound 90 | } 91 | 92 | if resp.StatusCode != http.StatusOK { 93 | data, _ := io.ReadAll(resp.Body) 94 | return errs.NewErrorWithCode(string(data), resp.StatusCode) 95 | } 96 | 97 | return nil 98 | } 99 | 100 | // GetLocalIP returns the non loopback local IP of the host 101 | func GetLocalIP() string { 102 | addrs, err := net.InterfaceAddrs() 103 | if err != nil { 104 | return "" 105 | } 106 | 107 | for _, address := range addrs { 108 | // check the address type and if it is not a loopback the display it 109 | if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { 110 | if ipnet.IP.To4() != nil { 111 | return ipnet.IP.String() 112 | } 113 | } 114 | } 115 | 116 | return "" 117 | } 118 | -------------------------------------------------------------------------------- /tagger/metric.go: -------------------------------------------------------------------------------- 1 | package tagger 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | 7 | "github.com/lomik/graphite-clickhouse/pkg/dry" 8 | ) 9 | 10 | type Metric struct { 11 | Path []byte 12 | Level int 13 | ParentIndex int 14 | Tags *Set 15 | } 16 | 17 | func (m *Metric) ParentPath() []byte { 18 | if len(m.Path) == 0 { 19 | return nil 20 | } 21 | 22 | index := bytes.LastIndexByte(m.Path[:len(m.Path)-1], '.') 23 | if index < 0 { 24 | return nil 25 | } 26 | 27 | return m.Path[:index+1] 28 | } 29 | 30 | func (m *Metric) IsLeaf() uint8 { 31 | if len(m.Path) > 0 && m.Path[len(m.Path)-1] == '.' { 32 | return 0 33 | } 34 | 35 | return 1 36 | } 37 | 38 | func (m *Metric) MarshalJSON() ([]byte, error) { 39 | return json.Marshal(map[string]interface{}{ 40 | "Path": dry.UnsafeString(m.Path), 41 | "Level": m.Level, 42 | "Tags": m.Tags, 43 | "IsLeaf": m.IsLeaf(), 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /tagger/rule_test.go: -------------------------------------------------------------------------------- 1 | package tagger 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | var RulesConf = ` 12 | [[rule]] 13 | tag = "prefix" 14 | has-prefix = "prefix" 15 | 16 | [[rule]] 17 | tag = "suffix" 18 | has-suffix = "suffix" 19 | 20 | [[rule]] 21 | tag = "contains" 22 | contains = "contains" 23 | 24 | [[rule]] 25 | tag = "equal" 26 | equal = "equal" 27 | 28 | [[rule]] 29 | tag = "regexp" 30 | regexp = "reg[e]xp" 31 | ` 32 | 33 | func TestRules(t *testing.T) { 34 | assert := assert.New(t) 35 | rules, err := Parse(RulesConf) 36 | 37 | assert.NoError(err) 38 | 39 | table := []struct { 40 | path string 41 | method string // "" for all, "prefix", "suffix", "contains" for use only specified tree 42 | expectedTags []string 43 | }{ 44 | {"prefix.metric", "", []string{"prefix"}}, 45 | {"prefix.metric", "prefix", []string{"prefix"}}, 46 | {"prefix.metric", "suffix", nil}, 47 | {"prefix.metric", "contains", nil}, 48 | {"prefix.metric", "other", nil}, 49 | 50 | {"metric.suffix", "", []string{"suffix"}}, 51 | {"metric.suffix", "prefix", nil}, 52 | {"metric.suffix", "suffix", []string{"suffix"}}, 53 | {"metric.suffix", "contains", nil}, 54 | {"metric.suffix", "other", nil}, 55 | 56 | {"hello.contains.world", "", []string{"contains"}}, 57 | {"hello.contains.world", "prefix", nil}, 58 | {"hello.contains.world", "suffix", nil}, 59 | {"hello.contains.world", "contains", []string{"contains"}}, 60 | {"hello.contains.world", "other", nil}, 61 | 62 | {"hello.regexp.world", "", []string{"regexp"}}, 63 | {"hello.regexp.world", "prefix", nil}, 64 | {"hello.regexp.world", "suffix", nil}, 65 | {"hello.regexp.world", "contains", nil}, 66 | {"hello.regexp.world", "other", []string{"regexp"}}, 67 | 68 | {"prefix.suffix", "", []string{"prefix", "suffix"}}, 69 | } 70 | 71 | for i := 0; i < len(table); i++ { 72 | t := table[i] 73 | 74 | m := Metric{Path: []byte(t.path), Tags: EmptySet} 75 | 76 | switch t.method { 77 | case "": 78 | rules.Match(&m) 79 | case "prefix": 80 | rules.matchPrefix(&m) 81 | case "suffix": 82 | rules.matchSuffix(&m) 83 | case "contains": 84 | rules.matchContains(&m) 85 | case "other": 86 | rules.matchOther(&m) 87 | } 88 | 89 | expected := t.expectedTags 90 | if expected == nil { 91 | expected = []string{} 92 | } 93 | 94 | sort.Strings(expected) 95 | 96 | tags := m.Tags.List() 97 | sort.Strings(tags) 98 | 99 | assert.Equal(expected, tags, fmt.Sprintf("path: %s, method: %s", t.path, t.method)) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /tagger/set.go: -------------------------------------------------------------------------------- 1 | package tagger 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | // set with copy-on-write 8 | type Set struct { 9 | data map[string]bool 10 | list []string 11 | json []byte 12 | } 13 | 14 | var EmptySet = &Set{ 15 | data: make(map[string]bool), 16 | list: make([]string, 0), 17 | } 18 | 19 | func (s *Set) Add(tag ...string) *Set { 20 | var newList []string 21 | 22 | for _, t := range tag { 23 | if !s.data[t] { 24 | if newList == nil { 25 | newList = make([]string, len(s.list)+1) 26 | copy(newList, s.list) 27 | newList[len(newList)-1] = t 28 | } else { 29 | newList = append(newList, t) 30 | } 31 | } 32 | } 33 | 34 | // no new tags 35 | if newList == nil { 36 | return s 37 | } 38 | 39 | // new tag 40 | n := &Set{ 41 | data: make(map[string]bool), 42 | list: newList, 43 | } 44 | 45 | for _, t := range n.list { 46 | n.data[t] = true 47 | } 48 | 49 | return n 50 | } 51 | 52 | func (s *Set) Merge(other *Set) *Set { 53 | return s.Add(other.list...) 54 | } 55 | 56 | func (s *Set) Len() int { 57 | return len(s.list) 58 | } 59 | 60 | func (s *Set) List() []string { 61 | return s.list 62 | } 63 | 64 | func (s *Set) MarshalJSON() ([]byte, error) { 65 | if s.json != nil { 66 | return s.json, nil 67 | } 68 | 69 | var err error 70 | 71 | s.json, err = json.Marshal(s.list) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | return s.json, nil 77 | } 78 | -------------------------------------------------------------------------------- /tagger/tree.go: -------------------------------------------------------------------------------- 1 | package tagger 2 | 3 | type Tree struct { 4 | Next [256]*Tree 5 | Rules []*Rule 6 | } 7 | 8 | func (t *Tree) Add(prefix []byte, rule *Rule) { 9 | x := t 10 | 11 | for i := 0; i < len(prefix); i++ { 12 | if x.Next[prefix[i]] == nil { 13 | x.Next[prefix[i]] = &Tree{} 14 | } 15 | 16 | x = x.Next[prefix[i]] 17 | } 18 | 19 | if x.Rules == nil { 20 | x.Rules = make([]*Rule, 0) 21 | } 22 | 23 | x.Rules = append(x.Rules, rule) 24 | } 25 | 26 | func (t *Tree) AddSuffix(suffix []byte, rule *Rule) { 27 | x := t 28 | 29 | for i := len(suffix) - 1; i >= 0; i-- { 30 | if x.Next[suffix[i]] == nil { 31 | x.Next[suffix[i]] = &Tree{} 32 | } 33 | 34 | x = x.Next[suffix[i]] 35 | } 36 | 37 | if x.Rules == nil { 38 | x.Rules = make([]*Rule, 0) 39 | } 40 | 41 | x.Rules = append(x.Rules, rule) 42 | } 43 | -------------------------------------------------------------------------------- /tests/agg_internal/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/agg_internal/graphite-clickhouse-internal-aggr.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/agg_latest/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/agg_latest/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = false 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/agg_merge/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/agg_merge/graphite-clickhouse-internal-aggr.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/agg_merge/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = false 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/agg_oneblock/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/agg_oneblock/graphite-clickhouse-internal-aggr.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/agg_oneblock/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = false 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | debug 5 | /var/log/clickhouse-server/clickhouse-server.log 6 | /var/log/clickhouse-server/clickhouse-server.err.log 7 | 2000M 8 | 20 9 | 10 | 11 | 8123 12 | 9000 13 | 14 | 15 | 9009 16 | 17 | test-clickhouse-s1 18 | 19 | 20 | 21 | 22 | 23 | 0.0.0.0 24 | 25 | 26 | 1073741824 27 | 28 | 32 | 1073741824 33 | 34 | 35 | /var/lib/clickhouse/ 36 | 37 | 38 | /var/lib/clickhouse/tmp/ 39 | 40 | 41 | users.xml 42 | 43 | 44 | default 45 | 46 | 47 | default 48 | 49 | 50 | 51 | 55 | system 56 | query_log
57 | 58 | 59 | 7500 60 |
61 | 62 | 63 | 64 | 65 | system 66 | part_log
67 | 68 | 7500 69 |
70 | 71 | 72 |
73 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup/init.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS default.graphite_reverse ( 2 | Path String, 3 | Value Float64, 4 | Time UInt32, 5 | Date Date, 6 | Timestamp UInt32 7 | ) ENGINE = GraphiteMergeTree('graphite_rollup') 8 | PARTITION BY Date 9 | ORDER BY (Path, Time); 10 | 11 | CREATE TABLE IF NOT EXISTS default.graphite ( 12 | Path String, 13 | Value Float64, 14 | Time UInt32, 15 | Date Date, 16 | Timestamp UInt32 17 | ) ENGINE = GraphiteMergeTree('graphite_rollup') 18 | PARTITION BY Date 19 | ORDER BY (Path, Time); 20 | 21 | CREATE TABLE IF NOT EXISTS default.graphite_index ( 22 | Date Date, 23 | Level UInt32, 24 | Path String, 25 | Version UInt32 26 | ) ENGINE = ReplacingMergeTree(Version) 27 | PARTITION BY Date 28 | ORDER BY (Level, Path, Date); 29 | 30 | CREATE TABLE IF NOT EXISTS default.graphite_tags ( 31 | Date Date, 32 | Tag1 String, 33 | Path String, 34 | Tags Array(String), 35 | Version UInt32 36 | ) ENGINE = ReplacingMergeTree(Version) 37 | PARTITION BY Date 38 | ORDER BY (Tag1, Path, Date); 39 | 40 | CREATE TABLE IF NOT EXISTS default.tag1_count_per_day 41 | ( 42 | Date Date, 43 | Tag1 String, 44 | Count UInt64 45 | ) 46 | ENGINE = SummingMergeTree 47 | ORDER BY (Date, Tag1); 48 | 49 | CREATE MATERIALIZED VIEW IF NOT EXISTS default.tag1_count_per_day_mv TO default.tag1_count_per_day AS 50 | SELECT Date AS Date, 51 | Tag1 AS Tag1, 52 | count(*) AS Count 53 | FROM default.graphite_tags 54 | GROUP BY (Date, Tag1); -------------------------------------------------------------------------------- /tests/clickhouse/rollup/rollup.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | avg 5 | 6 | 0 7 | 10 8 | 9 | 10 | 11 | \.sum$ 12 | sum 13 | 14 | 15 | \.sum\? 16 | sum 17 | 18 | 19 | \.min$ 20 | min 21 | 22 | 23 | \.min\? 24 | min 25 | 26 | 27 | \.max$ 28 | max 29 | 30 | 31 | \.max\? 32 | max 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup_tls/init.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS default.graphite_reverse ( 2 | Path String, 3 | Value Float64, 4 | Time UInt32, 5 | Date Date, 6 | Timestamp UInt32 7 | ) ENGINE = GraphiteMergeTree('graphite_rollup') 8 | PARTITION BY toYYYYMM(Date) 9 | ORDER BY (Path, Time); 10 | 11 | CREATE TABLE IF NOT EXISTS default.graphite ( 12 | Path String, 13 | Value Float64, 14 | Time UInt32, 15 | Date Date, 16 | Timestamp UInt32 17 | ) ENGINE = GraphiteMergeTree('graphite_rollup') 18 | PARTITION BY toYYYYMM(Date) 19 | ORDER BY (Path, Time); 20 | 21 | CREATE TABLE IF NOT EXISTS default.graphite_index ( 22 | Date Date, 23 | Level UInt32, 24 | Path String, 25 | Version UInt32 26 | ) ENGINE = ReplacingMergeTree(Version) 27 | PARTITION BY toYYYYMM(Date) 28 | ORDER BY (Level, Path, Date); 29 | 30 | CREATE TABLE IF NOT EXISTS default.graphite_tags ( 31 | Date Date, 32 | Tag1 String, 33 | Path String, 34 | Tags Array(String), 35 | Version UInt32 36 | ) ENGINE = ReplacingMergeTree(Version) 37 | PARTITION BY toYYYYMM(Date) 38 | ORDER BY (Tag1, Path, Date); 39 | 40 | CREATE TABLE IF NOT EXISTS default.tag1_count_per_day 41 | ( 42 | Date Date, 43 | Tag1 String, 44 | Count UInt64 45 | ) 46 | ENGINE = SummingMergeTree 47 | ORDER BY (Date, Tag1); 48 | 49 | CREATE MATERIALIZED VIEW IF NOT EXISTS default.tag1_count_per_day_mv TO default.tag1_count_per_day AS 50 | SELECT Date AS Date, 51 | Tag1 AS Tag1, 52 | count(*) AS Count 53 | FROM default.graphite_tags 54 | GROUP BY (Date, Tag1); -------------------------------------------------------------------------------- /tests/clickhouse/rollup_tls/rollup.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | avg 5 | 6 | 0 7 | 10 8 | 9 | 10 | 11 | \.sum$ 12 | sum 13 | 14 | 15 | \.sum\? 16 | sum 17 | 18 | 19 | \.min$ 20 | min 21 | 22 | 23 | \.min\? 24 | min 25 | 26 | 27 | \.max$ 28 | max 29 | 30 | 31 | \.max\? 32 | max 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup_tls/rootCA.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDHTCCAgWgAwIBAgIURx5itXwLHeiQES1LzCHF7F8RNEkwDQYJKoZIhvcNAQEL 3 | BQAwHjEcMBoGA1UEAwwTbG9yZHZpcmRleC5sb2NhbCBDQTAeFw0yNDA4MDkxMjMy 4 | MzJaFw0zNDA4MDcxMjMyMzJaMB4xHDAaBgNVBAMME2xvcmR2aXJkZXgubG9jYWwg 5 | Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDuiK4tBYzNtROmhuXD 6 | 80HsVVk2/+/TXV85Aey7oo2gxxJJ09iARnjJadNrbBUdoL42XtmBCkYY+pXYUWPD 7 | hvals2AbXiAePg7DlAHJfpaQTzHlsPvAUMjqbD6cFaQ7DfNQHcz2emmFhcRYzlQM 8 | h0Ob3v2yhogG7PuKaiTLTKYcHnRKfEIobQEIq16ABaaCFKzR6tpvrUJFYtkJ8EUz 9 | jhrSg67qy7yiHiMmGQVq526X2oZYhMbSGjiPkaMZHdFkxZgJF5iQhANG9djvcopO 10 | jdFfsJYM9rVxAjwO/P3fq5dpuQxWLLo6ZmholsixPZs1s8paEnonSDtyoNLsykwD 11 | 2mFdAgMBAAGjUzBRMB0GA1UdDgQWBBS6BlL90Mo/+aHonqIqaewM8CyxnTAfBgNV 12 | HSMEGDAWgBS6BlL90Mo/+aHonqIqaewM8CyxnTAPBgNVHRMBAf8EBTADAQH/MA0G 13 | CSqGSIb3DQEBCwUAA4IBAQAIwTN3II6HdPfMsLvYoOmzcvUE9Y6QndI20eLqp3p8 14 | 6KnU+lgLdSkLjc9BKwLh/Jhuy4H3u1nHpW8Jkgy/8irG2uaUvgKlutfApFQshAo7 15 | /k9xdH36ER0LF/bW5hQ535H76OaE+eaexx2zU50kPVuntal577d8HBfrKVI41KU8 16 | CVdqYTwEqHwjSyRhmmRqLi7Yo+i0o0hRwH39LxYXY2rup/V6uRyLXSIDUZ9VeqVt 17 | K8XDAbLV1s4kzR/OdpYcJuTWX9gFUlNHpGDkOSy9ggc5zxKaHlwGGZsvVSb4f+VF 18 | C89ABPZs+26EvExIih+civiC1XWIghP8RsiNyBOK3TOf 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup_tls/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDYDCCAkigAwIBAgIUElpBaTpcXsRXXP5YNhdKwu+lmE0wDQYJKoZIhvcNAQEL 3 | BQAwHjEcMBoGA1UEAwwTbG9yZHZpcmRleC5sb2NhbCBDQTAeFw0yNDA4MDkxMjM2 4 | NTJaFw0zNDA4MDcxMjM2NTJaMFYxCzAJBgNVBAYTAlJVMRIwEAYDVQQIDAlUYXRh 5 | cnN0YW4xDjAMBgNVBAcMBUthemFuMQ8wDQYDVQQKDAZLb250dXIxEjAQBgNVBAMM 6 | CWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKdkGsVL 7 | lK0ecS7+pEzEFpKmKOrSMKGCfOkqIVNO4f21njvg3rOx+j/1G8+D1eFHJJkotsx/ 8 | HfJg2sgMosltIlR19f1CzV6ewQLYN7fw4d8aMq1B1lnzzvfUjjygdxB353RiaCHI 9 | eQ1xkTPPmdZMEgaYwto2nrNrCOTb/kZig6pQeQ1YLV4c1daiI9L7OJhwKIb9yqT6 10 | gT+jXrrRZWE5o0sSKBw16h+iFXy/niO+2+VLuAHXturTg8m0U+NagexJZzkM4wt1 11 | 9pXAlODxu6y0en3lU2ngfGVV22HGSYsyKjBWAzA4HM3wQ6D6DrEa2W2ezV1MQfrS 12 | FYGVO8DBziaV41cCAwEAAaNeMFwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAAB 13 | MB0GA1UdDgQWBBRTabpN+c8bOE2B5o214IliRqUIqjAfBgNVHSMEGDAWgBS6BlL9 14 | 0Mo/+aHonqIqaewM8CyxnTANBgkqhkiG9w0BAQsFAAOCAQEAJal0TGS91yRK4ATZ 15 | sifjon3w7Q47WAbhXNFasuFdEdEexcWmc+gzhYW+snnVUlHT9y1J675i/Le6ry7y 16 | /pkzzdSoyx7CVHlU81gZLCts1lzufDl/cE5vDG4Sjnx1SepumUy9IrXhCaAaH19s 17 | EiywBsZ1uPC3XqAlaXLUYQglmtzXzeOMDXVRz4n3+SujkZ+DD2UMmTvWe9P1D8Ss 18 | gMkg8iMvNtm90MjVFgddLf9QjHYEJjNvzaRdQXvsnCOBwR/kyKimaZxV34QCC7cl 19 | QMhc6SzEmGqz+NflrzJyAIOMkDxotyqZX6v5kYFwEMhvBa6tXi+mmxx4lQ6VElT9 20 | pB9GKA== 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/clickhouse/rollup_tls/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCnZBrFS5StHnEu 3 | /qRMxBaSpijq0jChgnzpKiFTTuH9tZ474N6zsfo/9RvPg9XhRySZKLbMfx3yYNrI 4 | DKLJbSJUdfX9Qs1ensEC2De38OHfGjKtQdZZ88731I48oHcQd+d0YmghyHkNcZEz 5 | z5nWTBIGmMLaNp6zawjk2/5GYoOqUHkNWC1eHNXWoiPS+ziYcCiG/cqk+oE/o166 6 | 0WVhOaNLEigcNeofohV8v54jvtvlS7gB17bq04PJtFPjWoHsSWc5DOMLdfaVwJTg 7 | 8bustHp95VNp4HxlVdthxkmLMiowVgMwOBzN8EOg+g6xGtltns1dTEH60hWBlTvA 8 | wc4mleNXAgMBAAECggEAMrhgbDvUlwhMX2MFQcWA2XrDlzONTMMPOk9rvaR/UbMA 9 | eUBP+r8JBuwsOxrFafd2nXn6ucgiuNikMk2x3brV1iXQHadqNyt/bG87ot64cjOr 10 | +1ehrav0oJ+lYbV1nmXWmitfRi1KkMpCpyJWiNqP87PCBwDZ4Z+jGEWYrJcZMjem 11 | gAFoaUw0hrLc7FJe8sogC9j3gyIfjVq7k+epPlW6VsRW7h+aZowq2Bbik2VYz1Py 12 | aIdpaZwf8Jrhn7Qo0V39OVEr/VVLKFzNlyLpp+XeXmXT8sinmWNtTWKUB2rzL9bR 13 | oa+OeRTJIyzJXwpIBKte4TIKhtnmWANEWjuGf16m0QKBgQDFjMFwIYt6+8LEmL3Y 14 | xfpI9Hgy/PGLA/Y49ZR52OmEpXvQO12SQsDxMLIMXECSnjIIew+aJgly9HR3i+uK 15 | eFjLjKBABx5xmdfzLT05YcTASRWoj4tZrLrLM4Vytcs4xCSbiWbYFsgbbls38Abs 16 | PbmcD7oU/n/F+GrGJCHRKEtNRQKBgQDY6v8nvMDS46WLou2VzAA25mGOUlY5jzW1 17 | WR0WxU2cLZwl+2upLj5UYRHXQtCOrVIGaMXEUdQgf+w3rvGnet14LeBByFQBs3wP 18 | TnluBEwG/ByZfjOwqAOULfIHJq75HyCZ5XR5H8tIm4hf8rb3BiJ1fe2bJSgJbst4 19 | TLmOPljx6wKBgQCHOP3//zY2jLaZU+Q/yeS0o4LThAjim2ejPZbQgQX3Yj8KHljC 20 | kSb48dguVcdtlROycmoPnhHBuksuuXwVYKOHUU8wBK92G1SShFjwOlgvNte4delx 21 | DKcgCLhD+OSOitR0Eu1u5Mk83aFa/NYAR5ARn0JEtKBJpu2Pi5QKU4aX8QKBgQCo 22 | ufnozfBq2bouKHiHmVvdWEwv6Sm6sgOD4SI4URZyUiPwg2WV/itrdOnst8MECBsS 23 | czLJ5yCKexahpYnAzVgxn/WdFZcKj7MDMPZRNjRxBm+0kS7hzX6jJy3olBVsH+M6 24 | 8fksMifsfVaR03iwIuxw2ZgVosxGshDArWV0GFkVKwKBgFnn1YGbKQM/9EnmO79d 25 | WrNsj3P9uzngeoi9eYQbzU8ow0qBkFch36iY9tVwxwpG4k8Job9q3LBBRtbdu0eE 26 | /snY1cMd9PVuh3HaBg/hFgA2cMnk3CG+7+wLmkonRSpdIhuQjQXbB7xMmCFImypK 27 | RO4iqN/2YDPT1z5UGJEDTkBH 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/consolidateBy/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/consolidateBy/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [feature-flags] 9 | use-carbon-behaviour = false 10 | dont-match-missing-tags = true 11 | 12 | [clickhouse] 13 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 14 | data-timeout = "30s" 15 | 16 | index-table = "graphite_index" 17 | index-use-daily = true 18 | index-timeout = "1m" 19 | internal-aggregation = true 20 | 21 | tagged-table = "graphite_tags" 22 | tagged-autocomplete-days = 1 23 | 24 | [[data-table]] 25 | # # clickhouse table name 26 | table = "graphite" 27 | # # points in table are stored with reverse path 28 | reverse = false 29 | rollup-conf = "auto" 30 | 31 | [[logging]] 32 | logger = "" 33 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 34 | level = "info" 35 | encoding = "json" 36 | encoding-time = "iso8601" 37 | encoding-duration = "seconds" 38 | -------------------------------------------------------------------------------- /tests/consul.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ "$1" != "" ]; then 4 | wget -q https://releases.hashicorp.com/consul/${1}/consul_${1}_linux_amd64.zip || exit 1 5 | unzip consul_${1}_linux_amd64.zip || exit 1 6 | fi 7 | 8 | ./consul agent -server -bootstrap -data-dir=/tmp/consul -bind=127.0.0.1 9 | -------------------------------------------------------------------------------- /tests/emptyseries_append/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/emptyseries_append/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | append-empty-series = true 8 | 9 | [clickhouse] 10 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 11 | data-timeout = "30s" 12 | 13 | index-table = "graphite_index" 14 | index-use-daily = true 15 | index-timeout = "1m" 16 | internal-aggregation = true 17 | 18 | tagged-table = "graphite_tags" 19 | tagged-autocomplete-days = 1 20 | 21 | [[data-table]] 22 | # # clickhouse table name 23 | table = "graphite" 24 | # # points in table are stored with reverse path 25 | reverse = false 26 | rollup-conf = "auto" 27 | 28 | [[logging]] 29 | logger = "" 30 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 31 | level = "info" 32 | encoding = "json" 33 | encoding-time = "iso8601" 34 | encoding-duration = "seconds" 35 | -------------------------------------------------------------------------------- /tests/emptyseries_noappend/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/emptyseries_noappend/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | append-empty-series = false 8 | 9 | [clickhouse] 10 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 11 | data-timeout = "30s" 12 | 13 | index-table = "graphite_index" 14 | index-use-daily = true 15 | index-timeout = "1m" 16 | internal-aggregation = true 17 | 18 | tagged-table = "graphite_tags" 19 | tagged-autocomplete-days = 1 20 | 21 | [[data-table]] 22 | # # clickhouse table name 23 | table = "graphite" 24 | # # points in table are stored with reverse path 25 | reverse = false 26 | rollup-conf = "auto" 27 | 28 | [[logging]] 29 | logger = "" 30 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 31 | level = "info" 32 | encoding = "json" 33 | encoding-time = "iso8601" 34 | encoding-duration = "seconds" 35 | -------------------------------------------------------------------------------- /tests/error_handling/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/error_handling/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .PROXY_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "1s" 11 | 12 | query-params = [ 13 | { 14 | duration = "1h", 15 | url = "{{ .PROXY_URL }}/?max_rows_to_read=1&max_result_bytes=1&readonly=2&log_queries=1", 16 | data-timeout = "5s" 17 | }, 18 | { 19 | duration = "7h", 20 | url = "{{ .PROXY_URL }}/?max_memory_usage=1&max_memory_usage_for_user=1&readonly=2&log_queries=1", 21 | data-timeout = "5s" 22 | } 23 | ] 24 | 25 | index-table = "graphite_index" 26 | index-use-daily = true 27 | index-timeout = "1s" 28 | internal-aggregation = false 29 | 30 | tagged-table = "graphite_tags" 31 | tagged-autocomplete-days = 1 32 | 33 | [[data-table]] 34 | # # clickhouse table name 35 | table = "graphite" 36 | # # points in table are stored with reverse path 37 | reverse = false 38 | rollup-conf = "auto" 39 | 40 | [[logging]] 41 | logger = "" 42 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 43 | level = "info" 44 | encoding = "json" 45 | encoding-time = "iso8601" 46 | encoding-duration = "seconds" 47 | -------------------------------------------------------------------------------- /tests/feature_flags_both_true/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/feature_flags_both_true/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [feature-flags] 9 | use-carbon-behaviour = true 10 | dont-match-missing-tags = true 11 | 12 | [clickhouse] 13 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 14 | data-timeout = "30s" 15 | 16 | index-table = "graphite_index" 17 | index-use-daily = true 18 | index-timeout = "1m" 19 | internal-aggregation = true 20 | 21 | tagged-table = "graphite_tags" 22 | tagged-autocomplete-days = 1 23 | 24 | [[data-table]] 25 | # # clickhouse table name 26 | table = "graphite" 27 | # # points in table are stored with reverse path 28 | reverse = false 29 | rollup-conf = "auto" 30 | 31 | [[logging]] 32 | logger = "" 33 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 34 | level = "info" 35 | encoding = "json" 36 | encoding-time = "iso8601" 37 | encoding-duration = "seconds" 38 | -------------------------------------------------------------------------------- /tests/feature_flags_dont_match_missing_tags/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/feature_flags_dont_match_missing_tags/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [feature-flags] 9 | dont-match-missing-tags = true 10 | 11 | [clickhouse] 12 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 13 | data-timeout = "30s" 14 | 15 | index-table = "graphite_index" 16 | index-use-daily = true 17 | index-timeout = "1m" 18 | internal-aggregation = true 19 | 20 | tagged-table = "graphite_tags" 21 | tagged-autocomplete-days = 1 22 | 23 | [[data-table]] 24 | # # clickhouse table name 25 | table = "graphite" 26 | # # points in table are stored with reverse path 27 | reverse = false 28 | rollup-conf = "auto" 29 | 30 | [[logging]] 31 | logger = "" 32 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 33 | level = "info" 34 | encoding = "json" 35 | encoding-time = "iso8601" 36 | encoding-duration = "seconds" 37 | -------------------------------------------------------------------------------- /tests/feature_flags_false/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/feature_flags_false/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/feature_flags_use_carbon_behaviour/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/feature_flags_use_carbon_behaviour/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [feature-flags] 9 | use-carbon-behaviour = true 10 | 11 | [clickhouse] 12 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 13 | data-timeout = "30s" 14 | 15 | index-table = "graphite_index" 16 | index-use-daily = true 17 | index-timeout = "1m" 18 | internal-aggregation = true 19 | 20 | tagged-table = "graphite_tags" 21 | tagged-autocomplete-days = 1 22 | 23 | [[data-table]] 24 | # # clickhouse table name 25 | table = "graphite" 26 | # # points in table are stored with reverse path 27 | reverse = false 28 | rollup-conf = "auto" 29 | 30 | [[logging]] 31 | logger = "" 32 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 33 | level = "info" 34 | encoding = "json" 35 | encoding-time = "iso8601" 36 | encoding-duration = "seconds" 37 | -------------------------------------------------------------------------------- /tests/find_cache/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/find_cache/graphite-clickhouse-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [common.find-cache] 9 | type = "mem" 10 | size-mb = 1 11 | default-timeout = 300 12 | short-timeout = 60 13 | short-duration = "240s" 14 | find-timeout = 120 15 | 16 | [clickhouse] 17 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 18 | data-timeout = "30s" 19 | 20 | index-table = "graphite_index" 21 | index-use-daily = true 22 | index-timeout = "1m" 23 | internal-aggregation = false 24 | 25 | tagged-table = "graphite_tags" 26 | tagged-autocomplete-days = 1 27 | 28 | [[data-table]] 29 | # # clickhouse table name 30 | table = "graphite" 31 | # # points in table are stored with reverse path 32 | reverse = false 33 | rollup-conf = "auto" 34 | 35 | [[logging]] 36 | logger = "" 37 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 38 | level = "info" 39 | encoding = "json" 40 | encoding-time = "iso8601" 41 | encoding-duration = "seconds" 42 | -------------------------------------------------------------------------------- /tests/find_cache/graphite-clickhouse-internal-aggr-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [common.find-cache] 9 | type = "mem" 10 | size-mb = 1 11 | default-timeout = 300 12 | short-timeout = 60 13 | short-duration = "240s" 14 | find-timeout = 120 15 | 16 | [clickhouse] 17 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 18 | data-timeout = "30s" 19 | 20 | index-table = "graphite_index" 21 | index-use-daily = true 22 | index-timeout = "1m" 23 | internal-aggregation = true 24 | 25 | tagged-table = "graphite_tags" 26 | tagged-autocomplete-days = 1 27 | 28 | [[data-table]] 29 | # # clickhouse table name 30 | table = "graphite" 31 | # # points in table are stored with reverse path 32 | reverse = false 33 | rollup-conf = "auto" 34 | 35 | [[logging]] 36 | logger = "" 37 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 38 | level = "info" 39 | encoding = "json" 40 | encoding-time = "iso8601" 41 | encoding-duration = "seconds" 42 | -------------------------------------------------------------------------------- /tests/limitera/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/limitera/graphite-clickhouse-internal-aggr-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | # Adaptive limiter with throttle queries and limit max queries 2 | 3 | [common] 4 | listen = "{{ .GCH_ADDR }}" 5 | max-cpu = 0 6 | max-metrics-in-render-answer = 10000 7 | max-metrics-per-target = 10000 8 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 9 | 10 | [common.find-cache] 11 | type = "mem" 12 | size-mb = 1 13 | default-timeout = 300 14 | short-timeout = 60 15 | short-duration = "240s" 16 | find-timeout = 120 17 | 18 | [clickhouse] 19 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 20 | data-timeout = "30s" 21 | 22 | index-table = "graphite_index" 23 | index-use-daily = true 24 | index-timeout = "1m" 25 | internal-aggregation = true 26 | 27 | tagged-table = "graphite_tags" 28 | tagged-autocomplete-days = 1 29 | 30 | render-max-concurrent = 6 31 | render-adaptive-queries = 2 32 | find-max-concurrent = 4 33 | find-adaptive-queries = 2 34 | tags-max-concurrent = 4 35 | tags-adaptive-queries = 2 36 | 37 | [[data-table]] 38 | # # clickhouse table name 39 | table = "graphite" 40 | # # points in table are stored with reverse path 41 | reverse = false 42 | rollup-conf = "auto" 43 | 44 | [[logging]] 45 | logger = "" 46 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 47 | level = "info" 48 | encoding = "json" 49 | encoding-time = "iso8601" 50 | encoding-duration = "seconds" 51 | -------------------------------------------------------------------------------- /tests/limitera/test.toml: -------------------------------------------------------------------------------- 1 | [test] 2 | precision = "10s" 3 | 4 | [[test.clickhouse]] 5 | version = "24.2" 6 | dir = "tests/clickhouse/rollup" 7 | delay = "10s" 8 | 9 | [test.carbon_clickhouse] 10 | template = "carbon-clickhouse.conf.tpl" 11 | 12 | [[test.graphite_clickhouse]] 13 | template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" 14 | 15 | ########################################################################## 16 | [[test.input]] 17 | name = "test.cache" 18 | points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] 19 | 20 | [[test.input]] 21 | name = "cache;scope=test" 22 | points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] 23 | 24 | ########################################################################## 25 | [[test.find_checks]] 26 | query = "test" 27 | result = [{ path = "test", is_leaf = false }] 28 | 29 | [[test.find_checks]] 30 | query = "test.cache" 31 | result = [{ path = "test.cache", is_leaf = true }] 32 | 33 | ########################################################################## 34 | 35 | [[test.tags_checks]] 36 | query = "name;scope=test" 37 | result = [ 38 | "cache", 39 | ] 40 | 41 | ########################################################################## 42 | 43 | [[test.render_checks]] 44 | from = "rnow" 45 | until = "rnow+10" 46 | targets = [ "test.cache" ] 47 | 48 | [[test.render_checks.result]] 49 | name = "test.cache" 50 | path = "test.cache" 51 | consolidation = "avg" 52 | start = "rnow" 53 | stop = "rnow+20" 54 | step = 10 55 | req_start = "rnow" 56 | req_stop = "rnow+20" 57 | values = [3.0, nan] 58 | 59 | ########################################################################## 60 | -------------------------------------------------------------------------------- /tests/limitermax/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/limitermax/graphite-clickhouse-internal-aggr-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | # Limiter with limit max connections 2 | 3 | [common] 4 | listen = "{{ .GCH_ADDR }}" 5 | max-cpu = 0 6 | max-metrics-in-render-answer = 10000 7 | max-metrics-per-target = 10000 8 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 9 | 10 | [common.find-cache] 11 | type = "mem" 12 | size-mb = 1 13 | default-timeout = 300 14 | short-timeout = 60 15 | short-duration = "240s" 16 | find-timeout = 120 17 | 18 | [clickhouse] 19 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 20 | data-timeout = "30s" 21 | 22 | index-table = "graphite_index" 23 | index-use-daily = true 24 | index-timeout = "1m" 25 | internal-aggregation = true 26 | 27 | tagged-table = "graphite_tags" 28 | tagged-autocomplete-days = 1 29 | 30 | render-max-queries = 100 31 | find-max-queries = 50 32 | tags-max-queries = 50 33 | 34 | [[data-table]] 35 | # # clickhouse table name 36 | table = "graphite" 37 | # # points in table are stored with reverse path 38 | reverse = false 39 | rollup-conf = "auto" 40 | 41 | [[logging]] 42 | logger = "" 43 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 44 | level = "info" 45 | encoding = "json" 46 | encoding-time = "iso8601" 47 | encoding-duration = "seconds" 48 | -------------------------------------------------------------------------------- /tests/limitermax/test.toml: -------------------------------------------------------------------------------- 1 | [test] 2 | precision = "10s" 3 | 4 | [[test.clickhouse]] 5 | version = "24.2" 6 | dir = "tests/clickhouse/rollup" 7 | delay = "10s" 8 | 9 | [test.carbon_clickhouse] 10 | template = "carbon-clickhouse.conf.tpl" 11 | 12 | [[test.graphite_clickhouse]] 13 | template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" 14 | 15 | ########################################################################## 16 | [[test.input]] 17 | name = "test.cache" 18 | points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] 19 | 20 | [[test.input]] 21 | name = "cache;scope=test" 22 | points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] 23 | 24 | ########################################################################## 25 | [[test.find_checks]] 26 | query = "test" 27 | result = [{ path = "test", is_leaf = false }] 28 | 29 | [[test.find_checks]] 30 | query = "test.cache" 31 | result = [{ path = "test.cache", is_leaf = true }] 32 | 33 | ########################################################################## 34 | 35 | [[test.tags_checks]] 36 | query = "name;scope=test" 37 | result = [ 38 | "cache", 39 | ] 40 | 41 | ########################################################################## 42 | 43 | [[test.render_checks]] 44 | from = "rnow" 45 | until = "rnow+10" 46 | targets = [ "test.cache" ] 47 | 48 | [[test.render_checks.result]] 49 | name = "test.cache" 50 | path = "test.cache" 51 | consolidation = "avg" 52 | start = "rnow" 53 | stop = "rnow+20" 54 | step = 10 55 | req_start = "rnow" 56 | req_stop = "rnow+20" 57 | values = [3.0, nan] 58 | 59 | ########################################################################## 60 | -------------------------------------------------------------------------------- /tests/limiterw/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/limiterw/graphite-clickhouse-internal-aggr-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | # Limiter with throttle queries 2 | 3 | [common] 4 | listen = "{{ .GCH_ADDR }}" 5 | max-cpu = 0 6 | max-metrics-in-render-answer = 10000 7 | max-metrics-per-target = 10000 8 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 9 | 10 | [common.find-cache] 11 | type = "mem" 12 | size-mb = 1 13 | default-timeout = 300 14 | short-timeout = 60 15 | short-duration = "240s" 16 | find-timeout = 120 17 | 18 | [clickhouse] 19 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 20 | data-timeout = "30s" 21 | 22 | index-table = "graphite_index" 23 | index-use-daily = true 24 | index-timeout = "1m" 25 | internal-aggregation = true 26 | 27 | tagged-table = "graphite_tags" 28 | tagged-autocomplete-days = 1 29 | 30 | render-max-concurrent = 6 31 | find-max-concurrent = 4 32 | tags-max-concurrent = 4 33 | 34 | [[data-table]] 35 | # # clickhouse table name 36 | table = "graphite" 37 | # # points in table are stored with reverse path 38 | reverse = false 39 | rollup-conf = "auto" 40 | 41 | [[logging]] 42 | logger = "" 43 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 44 | level = "info" 45 | encoding = "json" 46 | encoding-time = "iso8601" 47 | encoding-duration = "seconds" 48 | -------------------------------------------------------------------------------- /tests/limiterw/test.toml: -------------------------------------------------------------------------------- 1 | [test] 2 | precision = "10s" 3 | 4 | [[test.clickhouse]] 5 | version = "24.2" 6 | dir = "tests/clickhouse/rollup" 7 | delay = "10s" 8 | 9 | [test.carbon_clickhouse] 10 | template = "carbon-clickhouse.conf.tpl" 11 | 12 | [[test.graphite_clickhouse]] 13 | template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" 14 | 15 | ########################################################################## 16 | [[test.input]] 17 | name = "test.cache" 18 | points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] 19 | 20 | [[test.input]] 21 | name = "cache;scope=test" 22 | points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] 23 | 24 | ########################################################################## 25 | [[test.find_checks]] 26 | query = "test" 27 | result = [{ path = "test", is_leaf = false }] 28 | 29 | [[test.find_checks]] 30 | query = "test.cache" 31 | result = [{ path = "test.cache", is_leaf = true }] 32 | 33 | ########################################################################## 34 | 35 | [[test.tags_checks]] 36 | query = "name;scope=test" 37 | result = [ 38 | "cache", 39 | ] 40 | 41 | ########################################################################## 42 | 43 | [[test.render_checks]] 44 | from = "rnow" 45 | until = "rnow+10" 46 | targets = [ "test.cache" ] 47 | 48 | [[test.render_checks.result]] 49 | name = "test.cache" 50 | path = "test.cache" 51 | consolidation = "avg" 52 | start = "rnow" 53 | stop = "rnow+20" 54 | step = 10 55 | req_start = "rnow" 56 | req_stop = "rnow+20" 57 | values = [3.0, nan] 58 | 59 | ########################################################################## 60 | -------------------------------------------------------------------------------- /tests/limiterwn/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/limiterwn/graphite-clickhouse-internal-aggr-cached.conf.tpl: -------------------------------------------------------------------------------- 1 | # Limiter with throttle queries and limit max queries 2 | 3 | [common] 4 | listen = "{{ .GCH_ADDR }}" 5 | max-cpu = 0 6 | max-metrics-in-render-answer = 10000 7 | max-metrics-per-target = 10000 8 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 9 | 10 | [common.find-cache] 11 | type = "mem" 12 | size-mb = 1 13 | default-timeout = 300 14 | short-timeout = 60 15 | short-duration = "240s" 16 | find-timeout = 120 17 | 18 | [clickhouse] 19 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 20 | data-timeout = "30s" 21 | 22 | index-table = "graphite_index" 23 | index-use-daily = true 24 | index-timeout = "1m" 25 | internal-aggregation = true 26 | 27 | tagged-table = "graphite_tags" 28 | tagged-autocomplete-days = 1 29 | 30 | render-max-queries = 100 31 | render-max-concurrent = 6 32 | find-max-queries = 50 33 | find-max-concurrent = 4 34 | tags-max-queries = 50 35 | tags-max-concurrent = 4 36 | 37 | [[data-table]] 38 | # # clickhouse table name 39 | table = "graphite" 40 | # # points in table are stored with reverse path 41 | reverse = false 42 | rollup-conf = "auto" 43 | 44 | [[logging]] 45 | logger = "" 46 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 47 | level = "info" 48 | encoding = "json" 49 | encoding-time = "iso8601" 50 | encoding-duration = "seconds" 51 | -------------------------------------------------------------------------------- /tests/limiterwn/test.toml: -------------------------------------------------------------------------------- 1 | [test] 2 | precision = "10s" 3 | 4 | [[test.clickhouse]] 5 | version = "24.2" 6 | dir = "tests/clickhouse/rollup" 7 | delay = "10s" 8 | 9 | [test.carbon_clickhouse] 10 | template = "carbon-clickhouse.conf.tpl" 11 | 12 | [[test.graphite_clickhouse]] 13 | template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" 14 | 15 | ########################################################################## 16 | [[test.input]] 17 | name = "test.cache" 18 | points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] 19 | 20 | [[test.input]] 21 | name = "cache;scope=test" 22 | points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] 23 | 24 | ########################################################################## 25 | [[test.find_checks]] 26 | query = "test" 27 | result = [{ path = "test", is_leaf = false }] 28 | 29 | [[test.find_checks]] 30 | query = "test.cache" 31 | result = [{ path = "test.cache", is_leaf = true }] 32 | 33 | ########################################################################## 34 | 35 | [[test.tags_checks]] 36 | query = "name;scope=test" 37 | result = [ 38 | "cache", 39 | ] 40 | 41 | ########################################################################## 42 | 43 | [[test.render_checks]] 44 | from = "rnow" 45 | until = "rnow+10" 46 | targets = [ "test.cache" ] 47 | 48 | [[test.render_checks.result]] 49 | name = "test.cache" 50 | path = "test.cache" 51 | consolidation = "avg" 52 | start = "rnow" 53 | stop = "rnow+20" 54 | step = 10 55 | req_start = "rnow" 56 | req_stop = "rnow+20" 57 | values = [3.0, nan] 58 | 59 | ########################################################################## 60 | -------------------------------------------------------------------------------- /tests/one_table/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/one_table/graphite-clickhouse-internal-aggr.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/one_table/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = false 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | 20 | [[data-table]] 21 | # # clickhouse table name 22 | table = "graphite" 23 | # # points in table are stored with reverse path 24 | reverse = false 25 | rollup-conf = "auto" 26 | 27 | [[logging]] 28 | logger = "" 29 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 30 | level = "info" 31 | encoding = "json" 32 | encoding-time = "iso8601" 33 | encoding-duration = "seconds" 34 | -------------------------------------------------------------------------------- /tests/tags_min_in_query/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/tags_min_in_query/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [feature-flags] 9 | use-carbon-behaviour = true 10 | 11 | [clickhouse] 12 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 13 | data-timeout = "30s" 14 | 15 | index-table = "graphite_index" 16 | index-use-daily = true 17 | index-timeout = "1m" 18 | internal-aggregation = true 19 | 20 | tagged-table = "graphite_tags" 21 | tagged-autocomplete-days = 1 22 | 23 | tags-min-in-query = 1 24 | 25 | [[data-table]] 26 | # # clickhouse table name 27 | table = "graphite" 28 | # # points in table are stored with reverse path 29 | reverse = false 30 | rollup-conf = "auto" 31 | 32 | [[logging]] 33 | logger = "" 34 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 35 | level = "info" 36 | encoding = "json" 37 | encoding-time = "iso8601" 38 | encoding-duration = "seconds" 39 | -------------------------------------------------------------------------------- /tests/tls/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDHTCCAgWgAwIBAgIURx5itXwLHeiQES1LzCHF7F8RNEkwDQYJKoZIhvcNAQEL 3 | BQAwHjEcMBoGA1UEAwwTbG9yZHZpcmRleC5sb2NhbCBDQTAeFw0yNDA4MDkxMjMy 4 | MzJaFw0zNDA4MDcxMjMyMzJaMB4xHDAaBgNVBAMME2xvcmR2aXJkZXgubG9jYWwg 5 | Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDuiK4tBYzNtROmhuXD 6 | 80HsVVk2/+/TXV85Aey7oo2gxxJJ09iARnjJadNrbBUdoL42XtmBCkYY+pXYUWPD 7 | hvals2AbXiAePg7DlAHJfpaQTzHlsPvAUMjqbD6cFaQ7DfNQHcz2emmFhcRYzlQM 8 | h0Ob3v2yhogG7PuKaiTLTKYcHnRKfEIobQEIq16ABaaCFKzR6tpvrUJFYtkJ8EUz 9 | jhrSg67qy7yiHiMmGQVq526X2oZYhMbSGjiPkaMZHdFkxZgJF5iQhANG9djvcopO 10 | jdFfsJYM9rVxAjwO/P3fq5dpuQxWLLo6ZmholsixPZs1s8paEnonSDtyoNLsykwD 11 | 2mFdAgMBAAGjUzBRMB0GA1UdDgQWBBS6BlL90Mo/+aHonqIqaewM8CyxnTAfBgNV 12 | HSMEGDAWgBS6BlL90Mo/+aHonqIqaewM8CyxnTAPBgNVHRMBAf8EBTADAQH/MA0G 13 | CSqGSIb3DQEBCwUAA4IBAQAIwTN3II6HdPfMsLvYoOmzcvUE9Y6QndI20eLqp3p8 14 | 6KnU+lgLdSkLjc9BKwLh/Jhuy4H3u1nHpW8Jkgy/8irG2uaUvgKlutfApFQshAo7 15 | /k9xdH36ER0LF/bW5hQ535H76OaE+eaexx2zU50kPVuntal577d8HBfrKVI41KU8 16 | CVdqYTwEqHwjSyRhmmRqLi7Yo+i0o0hRwH39LxYXY2rup/V6uRyLXSIDUZ9VeqVt 17 | K8XDAbLV1s4kzR/OdpYcJuTWX9gFUlNHpGDkOSy9ggc5zxKaHlwGGZsvVSb4f+VF 18 | C89ABPZs+26EvExIih+civiC1XWIghP8RsiNyBOK3TOf 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /tests/tls/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/tls/client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDXTCCAkWgAwIBAgIUH+CPx0invXJZGZk7WQ0TOl2duV4wDQYJKoZIhvcNAQEL 3 | BQAwHjEcMBoGA1UEAwwTbG9yZHZpcmRleC5sb2NhbCBDQTAeFw0yNDA4MDkxMjM4 4 | NTdaFw0zNDA4MDcxMjM4NTdaMFMxCzAJBgNVBAYTAlJVMRIwEAYDVQQIDAlUYXRh 5 | cnN0YW4xDjAMBgNVBAcMBUthemFuMQ8wDQYDVQQKDAZLb250dXIxDzANBgNVBAMM 6 | BmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN4ksMPzkoon 7 | SBbAIutgLjpEJOoEVb5iHbBzkAn9c9EDwkHVrUGFlx69QwkBncoomV09WW3dCMlf 8 | FX8ClHZ5/vEpJAxQVHYTyeNpzRE+gtDuun0NN+TPgTv3Q/wBrBds/4xl1UxtuwQW 9 | QkrZtREi71SYcdkuWnMv4OiA6EZnhJUBuPW6oV0Sa12PeEcmQJuliHGGDd72l50d 10 | ZsYJi/WhVgmJ5FlUkED8cVxKDbXhk3rGkXpkU/eyfEh12sNr0nX6BpPNCts3puM9 11 | 8lkzJ2luSkfwtp46s/pQwgs0aADVd37WaV1DbNGT2iqSnnPlVHR+DoGfb8+c3AGP 12 | zKzuvMNH2xMCAwEAAaNeMFwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMB0G 13 | A1UdDgQWBBRK5htFqO/QAQp6O2yvQOjwIFaXDDAfBgNVHSMEGDAWgBS6BlL90Mo/ 14 | +aHonqIqaewM8CyxnTANBgkqhkiG9w0BAQsFAAOCAQEABl6sVpN1O/fRF1RFKfvc 15 | pYpzFdqQpH2lva26Ove3PMMn3gYD3fgH3JKt1JHJ8mejJ/fJDReM1hD5MtR8buuF 16 | P/UHEg0cJ47ljLFHjnjJX4IobuxAVRkkt+1mx7/HLQoJjPEyzDwuKazz1XcXQd4c 17 | 4F3oa/nmo7/Nzf7NnnSEvNkwv3Anc18qAnwxCaONR0mkEWfJ0sZlcnxS1FlVEVtG 18 | kSymZJa6VsRMqgRDsrTyaOF0WcYuL7+onlywc2+A7fjbPzlFhTL83/yiZA+IDcrV 19 | OC81JN67uh4aK3nXlCHBDU1jFdr9u0jCGwo+wmWfKea7r6KG/J0a2IEokfIdSnmh 20 | wQ== 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/tls/client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDeJLDD85KKJ0gW 3 | wCLrYC46RCTqBFW+Yh2wc5AJ/XPRA8JB1a1BhZcevUMJAZ3KKJldPVlt3QjJXxV/ 4 | ApR2ef7xKSQMUFR2E8njac0RPoLQ7rp9DTfkz4E790P8AawXbP+MZdVMbbsEFkJK 5 | 2bURIu9UmHHZLlpzL+DogOhGZ4SVAbj1uqFdEmtdj3hHJkCbpYhxhg3e9pedHWbG 6 | CYv1oVYJieRZVJBA/HFcSg214ZN6xpF6ZFP3snxIddrDa9J1+gaTzQrbN6bjPfJZ 7 | MydpbkpH8LaeOrP6UMILNGgA1Xd+1mldQ2zRk9oqkp5z5VR0fg6Bn2/PnNwBj8ys 8 | 7rzDR9sTAgMBAAECggEACA7kqqGx9c5UUHRKeqdT20vlhZJVWev35RK2wuYCxtjl 9 | ZGX6kZ869XkqxATe/cUDQIfyhTMOF9/vHlsFmlaf54z1KyKELdRXRSdCsmwbarYo 10 | 5ahjwqpppwyNLB2+FGDL1Ff3/icXhZ/Dv9tYih/uS+9LvJ5wgYUsb21dqlAkVb4C 11 | SK03xmOQ/osaDUZpVj1E6uhiNcs3hc1z5nTLZXFeGjVdtJUePXoAzO6saJOcbcsy 12 | 4iSzmCcT/WJ1T3crlXU6v+v7gc1L1/7uAq5yDTVHtwlxoU+SqcmbKKlUECH86cTs 13 | xT16UtU71SlmQtsnwDYavdb017vhB4+6JKOAFcyTwQKBgQDq8rje8qnacw1Nz1xD 14 | JS+p0R4dkE8uD8usQaTtajpd0lGci28zcl8pr/fPpVwlF8VSePDrMjaf8tqs5Dq/ 15 | KRri3NEnXLhoKsGOn9PiGDwchDG57Lya4OnJ9dDa/FWUzCvxyttBpQZwocaMTEBU 16 | C0nD2G+SxVMdEjzhXKfQFVn7eQKBgQDyDD/hU8tFT28Bge7lRYG73JGZfr4ZeBu0 17 | EOGeu/402fOECngVK1b39VDOTD9me3QJKbKKRtjiUJq+0oFLXyl9nUbBIV8xhBF+ 18 | r9jNd3W0aClzR4u9oxCTnyvodpElWChBWnTZu1EcCASz8KUm0IY+dnbu12I3K4uX 19 | ti8n+xpb6wKBgC2zgRp9AWUotBHKoBu/hAH4V29QvtYq5GdhbX9xBmFxo8ZbqQnM 20 | 2Y32WLHfbIkakpt0Qwi8/7slNjwjOPouOLigU17gvk4k4vmnRUPZivfRDwsnbZiC 21 | 33cVhcbTBqKnBHVIDFY8j4AhN8namzi96V9bHnjiQUSKY6VCrLHhNVuhAoGBAO1Q 22 | I0WKAW8oLV7eBNrXZhZJcJt9D2crQoYuUvdtvBQXaNEZ7pha0L71z08kpLiW67Kc 23 | Jke6pKRngQD8pPXADI7zJ87tKEcFBJ4gTMFOkaHaymETUagRe4ww8DzQGwjxQS6q 24 | QIzFQgXouqutkk7W/fe58GvF0q7iy89oOR3K7RIXAoGAVPk+MFC5cjyYc+srSoGg 25 | K66BhwVyhsjF+7n2qptSFa8OXTtIVV/TBnpeW0l2lD1EGs4RLNz7wmgqa7eU6co3 26 | tzFJqhGQPm0785QfSk3aOSS3OGzR3TqkDUt8LLK8rXIoUuFgNyRKnsp4ncSN75zu 27 | FL/drPzHeFzuHozRy8DxxBE= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/tls/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | append-empty-series = false 8 | 9 | [clickhouse] 10 | url = "{{ .CLICKHOUSE_TLS_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 11 | data-timeout = "30s" 12 | index-table = "graphite_index" 13 | index-use-daily = true 14 | index-timeout = "1m" 15 | internal-aggregation = true 16 | 17 | tagged-table = "graphite_tags" 18 | tagged-autocomplete-days = 1 19 | [clickhouse.tls] 20 | ca-cert = ["{{- .TEST_DIR -}}/ca.crt"] 21 | server-name = "localhost" 22 | [[clickhouse.tls.certificates]] 23 | key = "{{- .TEST_DIR -}}/client.key" 24 | cert = "{{- .TEST_DIR -}}/client.crt" 25 | 26 | [[data-table]] 27 | # # clickhouse table name 28 | table = "graphite" 29 | # # points in table are stored with reverse path 30 | reverse = false 31 | rollup-conf = "auto" 32 | 33 | [[logging]] 34 | logger = "" 35 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 36 | level = "info" 37 | encoding = "json" 38 | encoding-time = "iso8601" 39 | encoding-duration = "seconds" 40 | -------------------------------------------------------------------------------- /tests/wildcard_min_distance/carbon-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | 3 | [data] 4 | path = "/etc/carbon-clickhouse/data" 5 | chunk-interval = "1s" 6 | chunk-auto-interval = "" 7 | 8 | [upload.graphite_index] 9 | type = "index" 10 | table = "graphite_index" 11 | url = "{{ .CLICKHOUSE_URL }}/" 12 | timeout = "2m30s" 13 | cache-ttl = "1h" 14 | 15 | [upload.graphite_tags] 16 | type = "tagged" 17 | table = "graphite_tags" 18 | threads = 3 19 | url = "{{ .CLICKHOUSE_URL }}/" 20 | timeout = "2m30s" 21 | cache-ttl = "1h" 22 | 23 | [upload.graphite_reverse] 24 | type = "points-reverse" 25 | table = "graphite_reverse" 26 | url = "{{ .CLICKHOUSE_URL }}/" 27 | timeout = "2m30s" 28 | zero-timestamp = false 29 | 30 | [upload.graphite] 31 | type = "points" 32 | table = "graphite" 33 | url = "{{ .CLICKHOUSE_URL }}/" 34 | timeout = "2m30s" 35 | zero-timestamp = false 36 | 37 | [tcp] 38 | listen = ":2003" 39 | enabled = true 40 | drop-future = "0s" 41 | drop-past = "0s" 42 | 43 | [logging] 44 | file = "/etc/carbon-clickhouse/carbon-clickhouse.log" 45 | level = "debug" 46 | -------------------------------------------------------------------------------- /tests/wildcard_min_distance/graphite-clickhouse.conf.tpl: -------------------------------------------------------------------------------- 1 | [common] 2 | listen = "{{ .GCH_ADDR }}" 3 | max-cpu = 0 4 | max-metrics-in-render-answer = 10000 5 | max-metrics-per-target = 10000 6 | headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] 7 | 8 | [clickhouse] 9 | url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" 10 | data-timeout = "30s" 11 | 12 | wildcard-min-distance = 1 13 | 14 | index-table = "graphite_index" 15 | index-use-daily = true 16 | index-timeout = "1m" 17 | internal-aggregation = true 18 | 19 | tagged-table = "graphite_tags" 20 | tagged-autocomplete-days = 1 21 | 22 | [[data-table]] 23 | # # clickhouse table name 24 | table = "graphite" 25 | # # points in table are stored with reverse path 26 | reverse = false 27 | rollup-conf = "auto" 28 | 29 | [[logging]] 30 | logger = "" 31 | file = "{{ .GCH_DIR }}/graphite-clickhouse.log" 32 | level = "info" 33 | encoding = "json" 34 | encoding-time = "iso8601" 35 | encoding-duration = "seconds" 36 | --------------------------------------------------------------------------------