├── .github └── workflows │ └── go.yml ├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── cmd ├── tsbs_generate_data │ └── main.go ├── tsbs_generate_queries │ ├── databases │ │ ├── akumuli │ │ │ ├── common.go │ │ │ └── devops.go │ │ ├── cassandra │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ ├── clickhouse │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ ├── common.go │ │ ├── cratedb │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ ├── influx │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ ├── devops_test.go │ │ │ ├── iot.go │ │ │ └── iot_test.go │ │ ├── mongo │ │ │ ├── common.go │ │ │ ├── devops-naive.go │ │ │ └── devops.go │ │ ├── questdb │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ ├── siridb │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ ├── timescaledb │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ ├── devops_test.go │ │ │ ├── iot.go │ │ │ └── iot_test.go │ │ ├── timestream │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ │ └── victoriametrics │ │ │ ├── common.go │ │ │ ├── devops.go │ │ │ └── devops_test.go │ ├── main.go │ ├── uses │ │ ├── common │ │ │ ├── common.go │ │ │ └── common_test.go │ │ ├── devops │ │ │ ├── common.go │ │ │ ├── common_test.go │ │ │ ├── groupby.go │ │ │ ├── groupby_orderby_limit.go │ │ │ ├── high_cpu.go │ │ │ ├── lastpoint.go │ │ │ ├── max_all_cpu.go │ │ │ └── single_groupby.go │ │ └── iot │ │ │ ├── avg_daily_driving_duration.go │ │ │ ├── avg_daily_driving_session.go │ │ │ ├── avg_load.go │ │ │ ├── avg_vs_projected_fuel.go │ │ │ ├── common.go │ │ │ ├── daily_truck_activity.go │ │ │ ├── high_load.go │ │ │ ├── lastloc.go │ │ │ ├── lastloc_single_truck.go │ │ │ ├── long_daily_session.go │ │ │ ├── long_driving_session.go │ │ │ ├── low_fuel.go │ │ │ ├── stationary.go │ │ │ └── truck_breakdown_activity.go │ └── utils │ │ └── query_generator.go ├── tsbs_load │ ├── README.md │ ├── config.go │ ├── config_cmd.go │ ├── load_cmd.go │ ├── load_cmd_flags.go │ ├── main.go │ ├── parse_config.go │ └── root.go ├── tsbs_load_akumuli │ └── main.go ├── tsbs_load_cassandra │ └── main.go ├── tsbs_load_clickhouse │ └── main.go ├── tsbs_load_cratedb │ ├── creator.go │ ├── creator_test.go │ ├── main.go │ ├── processor.go │ ├── scan.go │ └── scan_test.go ├── tsbs_load_influx │ ├── creator.go │ ├── http_writer.go │ ├── http_writer_test.go │ ├── main.go │ ├── process.go │ ├── process_test.go │ ├── scan.go │ └── scan_test.go ├── tsbs_load_mongo │ ├── aggregate_loader.go │ ├── common_loader.go │ ├── creator.go │ ├── document_per_loader.go │ └── main.go ├── tsbs_load_prometheus │ ├── adapter │ │ ├── main.go │ │ └── noop │ │ │ └── noop_adapter.go │ └── main.go ├── tsbs_load_questdb │ ├── creator.go │ ├── main.go │ ├── process.go │ ├── process_test.go │ ├── scan.go │ └── scan_test.go ├── tsbs_load_siridb │ ├── creator.go │ ├── main.go │ ├── process.go │ ├── scan.go │ └── scan_test.go ├── tsbs_load_timescaledb │ ├── database_stats.go │ ├── main.go │ └── profile.go ├── tsbs_load_victoriametrics │ └── main.go ├── tsbs_run_queries_akumuli │ ├── http_client.go │ └── main.go ├── tsbs_run_queries_cassandra │ ├── client_side_index.go │ ├── conn.go │ ├── main.go │ ├── query.go │ ├── query_executor.go │ ├── query_plan.go │ ├── query_plan_aggregators.go │ └── time_util.go ├── tsbs_run_queries_clickhouse │ └── main.go ├── tsbs_run_queries_cratedb │ └── main.go ├── tsbs_run_queries_influx │ ├── http_client.go │ └── main.go ├── tsbs_run_queries_mongo │ └── main.go ├── tsbs_run_queries_questdb │ ├── http_client.go │ └── main.go ├── tsbs_run_queries_siridb │ └── main.go ├── tsbs_run_queries_timescaledb │ └── main.go ├── tsbs_run_queries_timestream │ └── main.go └── tsbs_run_queries_victoriametrics │ └── main.go ├── docs ├── akumuli.md ├── cassandra.md ├── clickhouse.md ├── cratedb.md ├── influx.md ├── mongo.md ├── questdb.md ├── sample-configs │ ├── timescale-multi-node-cpu-only-file.yaml │ ├── timescale-single-node-cpu-only-file.yaml │ └── timescale-single-node-cpu-only-simulator.yaml ├── siridb.md ├── timescaledb.md ├── timestream.md ├── tsbs_load.md └── victoriametrics.md ├── go.mod ├── go.sum ├── helm ├── .helmignore ├── Chart.yaml ├── templates │ ├── _helpers.tpl │ ├── deployment.yaml │ └── pvc.yaml └── values.yaml ├── internal ├── inputs │ ├── generator.go │ ├── generator_data.go │ ├── generator_data_test.go │ ├── generator_queries.go │ ├── generator_queries_test.go │ ├── generator_test.go │ ├── utils.go │ └── utils_test.go └── utils │ ├── config.go │ ├── funcs.go │ ├── funcs_test.go │ ├── time_interval.go │ └── time_interval_test.go ├── load ├── buffered_reader.go ├── duplex_channel.go ├── duplex_channel_test.go ├── insertstrategy │ ├── sleep_regulator.go │ ├── sleep_regulator_config.go │ ├── sleep_regulator_config_test.go │ └── sleep_regulator_test.go ├── loader-no-flow-control.go ├── loader.go ├── loader_test.go ├── loader_test_result.go ├── scan_no_flow_control.go ├── scan_no_flow_control_test.go ├── scan_with_flow_control.go └── scan_with_flow_control_test.go ├── pkg ├── data │ ├── point.go │ ├── point_test.go │ ├── serialize │ │ ├── point_serializer.go │ │ ├── point_serializer_utils.go │ │ ├── util.go │ │ └── util_test.go │ ├── source │ │ ├── config.go │ │ ├── config_test.go │ │ └── file_data_source_config.go │ └── usecases │ │ ├── common │ │ ├── common.go │ │ ├── common_test.go │ │ ├── distribution.go │ │ ├── distribution_test.go │ │ ├── generator.go │ │ ├── measurement.go │ │ ├── measurement_test.go │ │ ├── simulator.go │ │ └── simulator_test.go │ │ ├── devops │ │ ├── common_generate_data.go │ │ ├── common_generate_data_test.go │ │ ├── cpu.go │ │ ├── cpu_only_generate_data.go │ │ ├── cpu_only_generate_data_test.go │ │ ├── cpu_test.go │ │ ├── disk.go │ │ ├── disk_test.go │ │ ├── diskio.go │ │ ├── diskio_test.go │ │ ├── generate_data.go │ │ ├── generate_data_test.go │ │ ├── generic_metrics.go │ │ ├── generic_metrics_generate_data.go │ │ ├── generic_metrics_generate_data_test.go │ │ ├── host.go │ │ ├── host_test.go │ │ ├── kernel.go │ │ ├── kernel_test.go │ │ ├── mem.go │ │ ├── mem_test.go │ │ ├── net.go │ │ ├── net_test.go │ │ ├── nginx.go │ │ ├── nginx_test.go │ │ ├── postgresql.go │ │ ├── postgresql_test.go │ │ ├── redis.go │ │ └── redis_test.go │ │ ├── iot │ │ ├── batch_config.go │ │ ├── batch_config_test.go │ │ ├── diagnostics.go │ │ ├── diagnostics_test.go │ │ ├── readings.go │ │ ├── readings_test.go │ │ ├── simulator.go │ │ ├── simulator_test.go │ │ ├── truck.go │ │ └── truck_test.go │ │ ├── usecases.go │ │ └── usecases_test.go ├── query │ ├── benchmark_result.go │ ├── benchmarker.go │ ├── benchmarker_test.go │ ├── cassandra.go │ ├── cassandra_test.go │ ├── clickhouse.go │ ├── config │ │ └── config.go │ ├── cratedb.go │ ├── cratedb_test.go │ ├── factories │ │ └── init_factories.go │ ├── http.go │ ├── http_test.go │ ├── mongo.go │ ├── mongo_test.go │ ├── query.go │ ├── query_test.go │ ├── scanner.go │ ├── scanner_test.go │ ├── siridb.go │ ├── siridb_test.go │ ├── stat_processor.go │ ├── stat_processor_test.go │ ├── stats.go │ ├── stats_test.go │ ├── timescaledb.go │ ├── timescaledb_test.go │ └── timestream.go └── targets │ ├── akumuli │ ├── benchmark.go │ ├── creator.go │ ├── implemented_target.go │ ├── process.go │ ├── scan.go │ ├── serializer.go │ └── serializer_test.go │ ├── cassandra │ ├── benchmark.go │ ├── creator.go │ ├── db_specific_config.go │ ├── implemented_target.go │ ├── scan.go │ ├── scan_test.go │ ├── serializer.go │ └── serializer_test.go │ ├── clickhouse │ ├── benchmark.go │ ├── clickhouse_test.go │ ├── creator.go │ ├── creator_test.go │ ├── file_data_source.go │ ├── implemented_target.go │ ├── indexer.go │ └── processor.go │ ├── common │ └── generic_point_indexer.go │ ├── constants │ └── constants.go │ ├── crate │ ├── implemented_target.go │ ├── serializer.go │ └── serializer_test.go │ ├── creator.go │ ├── influx │ ├── implemented_target.go │ ├── serializer.go │ └── serializer_test.go │ ├── initializers │ └── target_initializers.go │ ├── mongo │ ├── MongoPoint.go │ ├── MongoReading.go │ ├── MongoTag.go │ ├── implemented_target.go │ ├── mongo.fbs │ ├── serializer.go │ └── serializer_test.go │ ├── processor.go │ ├── prometheus │ ├── benchmark.go │ ├── benchmark_test.go │ ├── client.go │ ├── db_specific_config.go │ ├── implemented_target.go │ ├── point_indexer.go │ ├── point_indexer_test.go │ ├── serializer.go │ ├── serializer_test.go │ ├── simulation_data_source.go │ └── simulation_data_source_test.go │ ├── questdb │ ├── implemented_target.go │ ├── serializer.go │ └── serializer_test.go │ ├── siridb │ ├── implemented_target.go │ ├── serializer.go │ └── serializer_test.go │ ├── targets.go │ ├── timescaledb │ ├── benchmark.go │ ├── creator.go │ ├── creator_test.go │ ├── file_data_source.go │ ├── implemented_target.go │ ├── process.go │ ├── process_test.go │ ├── program_options.go │ ├── program_options_test.go │ ├── scan.go │ ├── scan_test.go │ ├── serializer.go │ ├── serializer_test.go │ └── simulation_data_source.go │ ├── timestream │ ├── aws_session.go │ ├── batch.go │ ├── benchmark.go │ ├── common_dimensions_processor.go │ ├── config.go │ ├── db_creator.go │ ├── deserialized_point.go │ ├── each_point_a_record_processor.go │ ├── file_data_source.go │ ├── implemented_target.go │ ├── serializer.go │ └── simulation_data_source.go │ └── victoriametrics │ ├── batch.go │ ├── benchmark.go │ ├── creator.go │ ├── data_source.go │ ├── data_source_test.go │ ├── implemented_target.go │ ├── processor.go │ └── processor_test.go └── scripts ├── full_cycle_minitest ├── full_cycle_minitest_clickhouse.sh ├── full_cycle_minitest_cratedb.sh ├── full_cycle_minitest_influx.sh ├── full_cycle_minitest_questdb.sh └── full_cycle_minitest_timescaledb.sh ├── generate_data.sh ├── generate_queries.sh ├── generate_run_script.py ├── load ├── load_akumuli.sh ├── load_cassandra.sh ├── load_clickhouse.sh ├── load_common.sh ├── load_cratedb.sh ├── load_influx.sh ├── load_mongo.sh ├── load_questdb.sh ├── load_siridb.sh ├── load_timescaledb.sh └── load_victoriametrics.sh ├── release_siridb.sh ├── run_queries ├── run_queries_akumuli.sh ├── run_queries_clickhouse.sh ├── run_queries_influx.sh ├── run_queries_mongo.sh ├── run_queries_questdb.sh ├── run_queries_siridb.sh ├── run_queries_timescaledb.sh └── run_queries_timestream.sh ├── start_timescaledb.sh └── stop_timescaledb.sh /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v4 17 | with: 18 | go-version: '1.20' 19 | 20 | - name: Test 21 | run: go test -v -race ./... 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.out 2 | *.log 3 | .DS_Store 4 | .idea 5 | .vscode 6 | *~ 7 | /bin 8 | 9 | # High Dynamic Range (HDR) Histogram files 10 | *.hdr 11 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | dist: focal 3 | jobs: 4 | include: 5 | - stage: test 6 | name: "Go 1.14" 7 | go: 8 | - 1.14.x 9 | install: skip 10 | script: 11 | - GO111MODULE=on go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... 12 | - stage: test 13 | name: "Go 1.15" 14 | go: 15 | - 1.15.x 16 | install: skip 17 | script: 18 | - GO111MODULE=on go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage 2 | FROM golang:1.20.8-alpine AS builder 3 | WORKDIR /tsbs 4 | COPY ./ ./ 5 | RUN apk update && apk add --no-cache git 6 | RUN go mod download && go install ./... 7 | 8 | # Final stage 9 | FROM alpine:3.18 10 | RUN apk update && apk add --no-cache bash 11 | COPY --from=builder /go/bin / 12 | COPY --from=builder /tsbs/scripts / 13 | # We need to keep the container running since there is no background process 14 | ENTRYPOINT ["/bin/bash", "-c", "trap : TERM INT; (while true; do sleep 1000; done) & wait"] 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017-2021 Timescale, Inc. 4 | Copyright (c) 2016 InfluxData 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Go parameters 2 | GOCMD=GO111MODULE=on go 3 | GOBUILD=$(GOCMD) build 4 | GOINSTALL=$(GOCMD) install 5 | GOCLEAN=$(GOCMD) clean 6 | GOTEST=$(GOCMD) test 7 | GOGET=$(GOCMD) get 8 | GOMOD=$(GOCMD) mod 9 | GOFMT=$(GOCMD) fmt 10 | 11 | .PHONY: all generators loaders runners lint fmt checkfmt 12 | 13 | all: generators loaders runners 14 | 15 | generators: tsbs_generate_data \ 16 | tsbs_generate_queries 17 | 18 | loaders: tsbs_load \ 19 | tsbs_load_akumuli \ 20 | tsbs_load_cassandra \ 21 | tsbs_load_clickhouse \ 22 | tsbs_load_cratedb \ 23 | tsbs_load_influx \ 24 | tsbs_load_mongo \ 25 | tsbs_load_prometheus \ 26 | tsbs_load_siridb \ 27 | tsbs_load_timescaledb \ 28 | tsbs_load_victoriametrics \ 29 | tsbs_load_questdb 30 | 31 | runners: tsbs_run_queries_akumuli \ 32 | tsbs_run_queries_cassandra \ 33 | tsbs_run_queries_clickhouse \ 34 | tsbs_run_queries_cratedb \ 35 | tsbs_run_queries_influx \ 36 | tsbs_run_queries_mongo \ 37 | tsbs_run_queries_siridb \ 38 | tsbs_run_queries_timescaledb \ 39 | tsbs_run_queries_timestream \ 40 | tsbs_run_queries_victoriametrics \ 41 | tsbs_run_queries_questdb 42 | 43 | test: 44 | $(GOTEST) -v ./... 45 | 46 | coverage: 47 | $(GOTEST) -race -coverprofile=coverage.txt -covermode=atomic ./... 48 | 49 | tsbs_%: $(wildcard ./cmd/$@/*.go) 50 | $(GOGET) ./cmd/$@ 51 | $(GOBUILD) -o bin/$@ ./cmd/$@ 52 | $(GOINSTALL) ./cmd/$@ 53 | 54 | checkfmt: 55 | @echo 'Checking gofmt';\ 56 | bash -c "diff -u <(echo -n) <(gofmt -d .)";\ 57 | EXIT_CODE=$$?;\ 58 | if [ "$$EXIT_CODE" -ne 0 ]; then \ 59 | echo '$@: Go files must be formatted with gofmt'; \ 60 | fi && \ 61 | exit $$EXIT_CODE 62 | 63 | lint: 64 | $(GOGET) github.com/golangci/golangci-lint/cmd/golangci-lint 65 | golangci-lint run 66 | 67 | fmt: 68 | $(GOFMT) ./... 69 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/akumuli/common.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // BaseGenerator contains settings specific for Akumuli database. 12 | type BaseGenerator struct { 13 | } 14 | 15 | // GenerateEmptyQuery returns an empty query.HTTP 16 | func (d *Devops) GenerateEmptyQuery() query.Query { 17 | return query.NewHTTP() 18 | } 19 | 20 | // fillInQuery fills the query struct with data. 21 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, body string, begin, end int64) { 22 | q := qi.(*query.HTTP) 23 | q.HumanLabel = []byte(humanLabel) 24 | q.HumanDescription = []byte(humanDesc) 25 | q.Method = []byte("POST") 26 | q.Path = []byte("/api/query") 27 | q.Body = []byte(body) 28 | q.StartTimestamp = begin 29 | q.EndTimestamp = end 30 | } 31 | 32 | // NewDevops makes an Devops object ready to generate Queries. 33 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 34 | core, err := devops.NewCore(start, end, scale) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | devops := &Devops{ 40 | BaseGenerator: g, 41 | Core: core, 42 | } 43 | 44 | return devops, nil 45 | } 46 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/cassandra/common.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 8 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 9 | internalutils "github.com/timescale/tsbs/internal/utils" 10 | "github.com/timescale/tsbs/pkg/query" 11 | ) 12 | 13 | // BaseGenerator contains settings specific for Cassandra database. 14 | type BaseGenerator struct { 15 | } 16 | 17 | // GenerateEmptyQuery returns an empty query.Cassandra. 18 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 19 | return query.NewCassandra() 20 | } 21 | 22 | // fillInQuery fills the query struct with data. 23 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, aggType string, fields []string, interval *internalutils.TimeInterval, tagSets [][]string) { 24 | q := qi.(*query.Cassandra) 25 | q.HumanLabel = []byte(humanLabel) 26 | q.HumanDescription = []byte(humanDesc) 27 | 28 | q.AggregationType = []byte(aggType) 29 | q.MeasurementName = []byte("cpu") 30 | q.FieldName = []byte(strings.Join(fields, ",")) 31 | 32 | q.TimeStart = interval.Start() 33 | q.TimeEnd = interval.End() 34 | 35 | q.TagSets = tagSets 36 | } 37 | 38 | // NewDevops creates a new devops use case query generator. 39 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 40 | core, err := devops.NewCore(start, end, scale) 41 | 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | devops := &Devops{ 47 | BaseGenerator: g, 48 | Core: core, 49 | } 50 | 51 | return devops, nil 52 | } 53 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/clickhouse/common.go: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // BaseGenerator contains settings specific for ClickHouse. 12 | type BaseGenerator struct { 13 | UseTags bool 14 | } 15 | 16 | // GenerateEmptyQuery returns an empty query.ClickHouse. 17 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 18 | return query.NewClickHouse() 19 | } 20 | 21 | // fill Query fills the query struct with data 22 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, table, sql string) { 23 | q := qi.(*query.ClickHouse) 24 | q.HumanLabel = []byte(humanLabel) 25 | q.HumanDescription = []byte(humanDesc) 26 | q.Table = []byte(table) 27 | q.SqlQuery = []byte(sql) 28 | } 29 | 30 | // NewDevops creates a new devops use case query generator. 31 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 32 | core, err := devops.NewCore(start, end, scale) 33 | 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | devops := &Devops{ 39 | BaseGenerator: g, 40 | Core: core, 41 | } 42 | 43 | return devops, nil 44 | } 45 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/common.go: -------------------------------------------------------------------------------- 1 | package databases 2 | 3 | // PanicIfErr panics when passed a non-nil error 4 | // TODO: Remove the need for this by continuing to bubble up errors 5 | func PanicIfErr(err error) { 6 | if err != nil { 7 | panic(err.Error()) 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/cratedb/common.go: -------------------------------------------------------------------------------- 1 | package cratedb 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // BaseGenerator contains settings specific for CrateDB 12 | type BaseGenerator struct { 13 | } 14 | 15 | // GenerateEmptyQuery returns an empty query.CrateDB. 16 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 17 | return query.NewCrateDB() 18 | } 19 | 20 | // fillInQuery fills the query struct with data. 21 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, sql string) { 22 | q := qi.(*query.CrateDB) 23 | q.HumanLabel = []byte(humanLabel) 24 | q.HumanDescription = []byte(humanDesc) 25 | q.Table = []byte("cpu") 26 | q.SqlQuery = []byte(sql) 27 | } 28 | 29 | // NewDevops creates a new devops use case query generator. 30 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 31 | core, err := devops.NewCore(start, end, scale) 32 | 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | devops := &Devops{ 38 | BaseGenerator: g, 39 | Core: core, 40 | } 41 | 42 | return devops, nil 43 | } 44 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/influx/common.go: -------------------------------------------------------------------------------- 1 | package influx 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "time" 7 | 8 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 9 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/iot" 10 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 11 | "github.com/timescale/tsbs/pkg/query" 12 | ) 13 | 14 | // BaseGenerator contains settings specific for Influx database. 15 | type BaseGenerator struct { 16 | } 17 | 18 | // GenerateEmptyQuery returns an empty query.HTTP. 19 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 20 | return query.NewHTTP() 21 | } 22 | 23 | // fillInQuery fills the query struct with data. 24 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, influxql string) { 25 | v := url.Values{} 26 | v.Set("q", influxql) 27 | q := qi.(*query.HTTP) 28 | q.HumanLabel = []byte(humanLabel) 29 | q.RawQuery = []byte(influxql) 30 | q.HumanDescription = []byte(humanDesc) 31 | q.Method = []byte("POST") 32 | q.Path = []byte(fmt.Sprintf("/query?%s", v.Encode())) 33 | q.Body = nil 34 | } 35 | 36 | // NewDevops creates a new devops use case query generator. 37 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 38 | core, err := devops.NewCore(start, end, scale) 39 | 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | devops := &Devops{ 45 | BaseGenerator: g, 46 | Core: core, 47 | } 48 | 49 | return devops, nil 50 | } 51 | 52 | // NewIoT creates a new iot use case query generator. 53 | func (g *BaseGenerator) NewIoT(start, end time.Time, scale int) (utils.QueryGenerator, error) { 54 | core, err := iot.NewCore(start, end, scale) 55 | 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | devops := &IoT{ 61 | BaseGenerator: g, 62 | Core: core, 63 | } 64 | 65 | return devops, nil 66 | } 67 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/mongo/common.go: -------------------------------------------------------------------------------- 1 | package mongo 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // BaseGenerator contains settings specific for Mongo database. 12 | type BaseGenerator struct { 13 | UseNaive bool 14 | } 15 | 16 | // GenerateEmptyQuery returns an empty query.Mongo. 17 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 18 | return query.NewMongo() 19 | } 20 | 21 | // NewDevops creates a new devops use case query generator. 22 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 23 | core, err := devops.NewCore(start, end, scale) 24 | 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | var devops utils.QueryGenerator = &Devops{ 30 | BaseGenerator: g, 31 | Core: core, 32 | } 33 | 34 | if g.UseNaive { 35 | devops = &NaiveDevops{ 36 | BaseGenerator: g, 37 | Core: core, 38 | } 39 | 40 | } 41 | 42 | return devops, nil 43 | } 44 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/questdb/common.go: -------------------------------------------------------------------------------- 1 | package questdb 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "time" 7 | 8 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 9 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 10 | "github.com/timescale/tsbs/pkg/query" 11 | ) 12 | 13 | // BaseGenerator contains settings specific for QuestDB 14 | type BaseGenerator struct { 15 | } 16 | 17 | // GenerateEmptyQuery returns an empty query.QuestDB. 18 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 19 | return query.NewHTTP() 20 | } 21 | 22 | // fillInQuery fills the query struct with data. 23 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, sql string) { 24 | v := url.Values{} 25 | v.Set("count", "false") 26 | v.Set("query", sql) 27 | q := qi.(*query.HTTP) 28 | q.HumanLabel = []byte(humanLabel) 29 | q.RawQuery = []byte(sql) 30 | q.HumanDescription = []byte(humanDesc) 31 | q.Method = []byte("GET") 32 | q.Path = []byte(fmt.Sprintf("/exec?%s", v.Encode())) 33 | q.Body = nil 34 | } 35 | 36 | // NewDevops creates a new devops use case query generator. 37 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 38 | core, err := devops.NewCore(start, end, scale) 39 | 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | devops := &Devops{ 45 | BaseGenerator: g, 46 | Core: core, 47 | } 48 | 49 | return devops, nil 50 | } 51 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/siridb/common.go: -------------------------------------------------------------------------------- 1 | package siridb 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // BaseGenerator contains settings specific for SiriDB 12 | type BaseGenerator struct { 13 | } 14 | 15 | // GenerateEmptyQuery returns an empty query.SiriDB. 16 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 17 | return query.NewSiriDB() 18 | } 19 | 20 | // fillInQuery fills the query struct with data. 21 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, sql string) { 22 | q := qi.(*query.SiriDB) 23 | q.HumanLabel = []byte(humanLabel) 24 | q.HumanDescription = []byte(humanDesc) 25 | q.SqlQuery = []byte(sql) 26 | } 27 | 28 | // NewDevops creates a new devops use case query generator. 29 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 30 | core, err := devops.NewCore(start, end, scale) 31 | 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | devops := &Devops{ 37 | BaseGenerator: g, 38 | Core: core, 39 | } 40 | 41 | return devops, nil 42 | } 43 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/timescaledb/common.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/iot" 8 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 9 | "github.com/timescale/tsbs/pkg/query" 10 | ) 11 | 12 | const goTimeFmt = "2006-01-02 15:04:05.999999 -0700" 13 | 14 | // BaseGenerator contains settings specific for TimescaleDB 15 | type BaseGenerator struct { 16 | UseJSON bool 17 | UseTags bool 18 | UseTimeBucket bool 19 | } 20 | 21 | // GenerateEmptyQuery returns an empty query.TimescaleDB. 22 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 23 | return query.NewTimescaleDB() 24 | } 25 | 26 | // fillInQuery fills the query struct with data. 27 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, table, sql string) { 28 | q := qi.(*query.TimescaleDB) 29 | q.HumanLabel = []byte(humanLabel) 30 | q.HumanDescription = []byte(humanDesc) 31 | q.Hypertable = []byte(table) 32 | q.SqlQuery = []byte(sql) 33 | } 34 | 35 | // NewDevops creates a new devops use case query generator. 36 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 37 | core, err := devops.NewCore(start, end, scale) 38 | 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | devops := &Devops{ 44 | BaseGenerator: g, 45 | Core: core, 46 | } 47 | 48 | return devops, nil 49 | } 50 | 51 | // NewIoT creates a new iot use case query generator. 52 | func (g *BaseGenerator) NewIoT(start, end time.Time, scale int) (utils.QueryGenerator, error) { 53 | core, err := iot.NewCore(start, end, scale) 54 | 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | iot := &IoT{ 60 | BaseGenerator: g, 61 | Core: core, 62 | } 63 | 64 | return iot, nil 65 | } 66 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/timestream/common.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | const goTimeFmt = "2006-01-02 15:04:05.999999 -0700" 12 | 13 | // BaseGenerator contains settings specific for Timestream 14 | type BaseGenerator struct { 15 | DBName string 16 | } 17 | 18 | // GenerateEmptyQuery returns an empty query.TimescaleDB. 19 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 20 | return query.NewTimestream() 21 | } 22 | 23 | // fillInQuery fills the query struct with data. 24 | func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, table, sql string) { 25 | q := qi.(*query.Timestream) 26 | q.HumanLabel = []byte(humanLabel) 27 | q.HumanDescription = []byte(humanDesc) 28 | q.SqlQuery = []byte(sql) 29 | q.Table = []byte(table) 30 | } 31 | 32 | // NewDevops creates a new devops use case query generator. 33 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 34 | core, err := devops.NewCore(start, end, scale) 35 | 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | dOps := &Devops{ 41 | BaseGenerator: g, 42 | Core: core, 43 | } 44 | 45 | return dOps, nil 46 | } 47 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/databases/victoriametrics/common.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/devops" 10 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 11 | iutils "github.com/timescale/tsbs/internal/utils" 12 | "github.com/timescale/tsbs/pkg/query" 13 | ) 14 | 15 | type BaseGenerator struct{} 16 | 17 | // GenerateEmptyQuery returns an empty query.HTTP. 18 | func (g *BaseGenerator) GenerateEmptyQuery() query.Query { 19 | return query.NewHTTP() 20 | } 21 | 22 | // NewDevops creates a new devops use case query generator. 23 | func (g *BaseGenerator) NewDevops(start, end time.Time, scale int) (utils.QueryGenerator, error) { 24 | core, err := devops.NewCore(start, end, scale) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return &Devops{ 29 | BaseGenerator: g, 30 | Core: core, 31 | }, nil 32 | } 33 | 34 | type queryInfo struct { 35 | // prometheus query 36 | query string 37 | // label to describe type of query 38 | label string 39 | // desc to describe type of query 40 | desc string 41 | // time range for query executing 42 | interval *iutils.TimeInterval 43 | // time period to group by in seconds 44 | step string 45 | } 46 | 47 | // fill Query fills the query struct with data 48 | func (g *BaseGenerator) fillInQuery(qq query.Query, qi *queryInfo) { 49 | q := qq.(*query.HTTP) 50 | q.HumanLabel = []byte(qi.label) 51 | if qi.interval != nil { 52 | q.HumanDescription = []byte(fmt.Sprintf("%s: %s", qi.label, qi.interval.StartString())) 53 | } 54 | q.Method = []byte("GET") 55 | 56 | v := url.Values{} 57 | v.Set("query", qi.query) 58 | v.Set("start", strconv.FormatInt(qi.interval.StartUnixNano()/1e9, 10)) 59 | v.Set("end", strconv.FormatInt(qi.interval.EndUnixNano()/1e9, 10)) 60 | v.Set("step", qi.step) 61 | q.Path = []byte(fmt.Sprintf("/api/v1/query_range?%s", v.Encode())) 62 | q.Body = nil 63 | } 64 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/groupby.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // Groupby produces a QueryFiller for the devops groupby case. 10 | type Groupby struct { 11 | core utils.QueryGenerator 12 | numMetrics int 13 | } 14 | 15 | // NewGroupBy produces a function that produces a new Groupby for the given parameters 16 | func NewGroupBy(numMetrics int) utils.QueryFillerMaker { 17 | return func(core utils.QueryGenerator) utils.QueryFiller { 18 | return &Groupby{ 19 | core: core, 20 | numMetrics: numMetrics, 21 | } 22 | } 23 | } 24 | 25 | // Fill fills in the query.Query with query details 26 | func (d *Groupby) Fill(q query.Query) query.Query { 27 | fc, ok := d.core.(DoubleGroupbyFiller) 28 | if !ok { 29 | common.PanicUnimplementedQuery(d.core) 30 | } 31 | fc.GroupByTimeAndPrimaryTag(q, d.numMetrics) 32 | return q 33 | } 34 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/groupby_orderby_limit.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // GroupByOrderByLimit produces a filler for queries in the devops groupby-orderby-limit case. 10 | type GroupByOrderByLimit struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewGroupByOrderByLimit returns a new GroupByOrderByLimit for given paremeters 15 | func NewGroupByOrderByLimit(core utils.QueryGenerator) utils.QueryFiller { 16 | return &GroupByOrderByLimit{core} 17 | } 18 | 19 | // Fill fills in the query.Query with query details 20 | func (d *GroupByOrderByLimit) Fill(q query.Query) query.Query { 21 | fc, ok := d.core.(GroupbyOrderbyLimitFiller) 22 | if !ok { 23 | common.PanicUnimplementedQuery(d.core) 24 | } 25 | fc.GroupByOrderByLimit(q) 26 | return q 27 | } 28 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/high_cpu.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // HighCPU produces a QueryFiller for the devops high-cpu cases 10 | type HighCPU struct { 11 | core utils.QueryGenerator 12 | hosts int 13 | } 14 | 15 | // NewHighCPU produces a new function that produces a new HighCPU 16 | func NewHighCPU(hosts int) utils.QueryFillerMaker { 17 | return func(core utils.QueryGenerator) utils.QueryFiller { 18 | return &HighCPU{ 19 | core: core, 20 | hosts: hosts, 21 | } 22 | } 23 | } 24 | 25 | // Fill fills in the query.Query with query details 26 | func (d *HighCPU) Fill(q query.Query) query.Query { 27 | fc, ok := d.core.(HighCPUFiller) 28 | if !ok { 29 | common.PanicUnimplementedQuery(d.core) 30 | } 31 | fc.HighCPUForHosts(q, d.hosts) 32 | return q 33 | } 34 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/lastpoint.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // LastPointPerHost returns QueryFiller for the devops lastpoint case 10 | type LastPointPerHost struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewLastPointPerHost returns a new LastPointPerHost for given paremeters 15 | func NewLastPointPerHost(core utils.QueryGenerator) utils.QueryFiller { 16 | return &LastPointPerHost{core} 17 | } 18 | 19 | // Fill fills in the query.Query with query details 20 | func (d *LastPointPerHost) Fill(q query.Query) query.Query { 21 | fc, ok := d.core.(LastPointFiller) 22 | if !ok { 23 | common.PanicUnimplementedQuery(d.core) 24 | } 25 | fc.LastPointPerHost(q) 26 | return q 27 | } 28 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/max_all_cpu.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // MaxAllCPU contains info for filling in a query.Query for "max all" queries 12 | type MaxAllCPU struct { 13 | core utils.QueryGenerator 14 | hosts int 15 | duration time.Duration 16 | } 17 | 18 | // NewMaxAllCPU produces a new function that produces a new AllMaxCPU 19 | func NewMaxAllCPU(hosts int, duration time.Duration) utils.QueryFillerMaker { 20 | return func(core utils.QueryGenerator) utils.QueryFiller { 21 | return &MaxAllCPU{ 22 | core: core, 23 | hosts: hosts, 24 | duration: duration, 25 | } 26 | } 27 | } 28 | 29 | // Fill fills in the query.Query with query details 30 | func (d *MaxAllCPU) Fill(q query.Query) query.Query { 31 | fc, ok := d.core.(MaxAllFiller) 32 | if !ok { 33 | common.PanicUnimplementedQuery(d.core) 34 | } 35 | fc.MaxAllCPU(q, d.hosts, d.duration) 36 | return q 37 | } 38 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/devops/single_groupby.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 7 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 8 | "github.com/timescale/tsbs/pkg/query" 9 | ) 10 | 11 | // SingleGroupby contains info for filling in single groupby queries 12 | type SingleGroupby struct { 13 | core utils.QueryGenerator 14 | metrics int 15 | hosts int 16 | hours int 17 | } 18 | 19 | // NewSingleGroupby produces a new function that produces a new SingleGroupby 20 | func NewSingleGroupby(metrics, hosts, hours int) utils.QueryFillerMaker { 21 | return func(core utils.QueryGenerator) utils.QueryFiller { 22 | return &SingleGroupby{ 23 | core: core, 24 | metrics: metrics, 25 | hosts: hosts, 26 | hours: hours, 27 | } 28 | } 29 | } 30 | 31 | // Fill fills in the query.Query with query details 32 | func (d *SingleGroupby) Fill(q query.Query) query.Query { 33 | fc, ok := d.core.(SingleGroupbyFiller) 34 | if !ok { 35 | common.PanicUnimplementedQuery(d.core) 36 | } 37 | fc.GroupByTime(q, d.hosts, d.metrics, time.Duration(int64(d.hours)*int64(time.Hour))) 38 | return q 39 | } 40 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/avg_daily_driving_duration.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // AvgDailyDrivingDuration contains info for filling in avg daily driving duration per driver queries. 10 | type AvgDailyDrivingDuration struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewAvgDailyDrivingDuration creates a new avg daily driving duration per driver query filler. 15 | func NewAvgDailyDrivingDuration(core utils.QueryGenerator) utils.QueryFiller { 16 | return &AvgDailyDrivingDuration{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *AvgDailyDrivingDuration) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(AvgDailyDrivingDurationFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.AvgDailyDrivingDuration(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/avg_daily_driving_session.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // AvgDailyDrivingSession contains info for filling in avg daily driving session queries. 10 | type AvgDailyDrivingSession struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewAvgDailyDrivingSession creates a new avg daily driving session query filler. 15 | func NewAvgDailyDrivingSession(core utils.QueryGenerator) utils.QueryFiller { 16 | return &AvgDailyDrivingSession{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *AvgDailyDrivingSession) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(AvgDailyDrivingSessionFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.AvgDailyDrivingSession(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/avg_load.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // AvgLoad contains info for filling in avg load queries. 10 | type AvgLoad struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewAvgLoad creates a new avg load query filler. 15 | func NewAvgLoad(core utils.QueryGenerator) utils.QueryFiller { 16 | return &AvgLoad{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *AvgLoad) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(AvgLoadFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.AvgLoad(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/avg_vs_projected_fuel.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // AvgVsProjectedFuelConsumption contains info for filling in avg vs projected fuel consumption queries. 10 | type AvgVsProjectedFuelConsumption struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewAvgVsProjectedFuelConsumption creates a new avg vs projected fuel consumption query filler. 15 | func NewAvgVsProjectedFuelConsumption(core utils.QueryGenerator) utils.QueryFiller { 16 | return &AvgVsProjectedFuelConsumption{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *AvgVsProjectedFuelConsumption) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(AvgVsProjectedFuelConsumptionFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.AvgVsProjectedFuelConsumption(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/daily_truck_activity.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // DailyTruckActivity contains info for filling in daily truck activity queries. 10 | type DailyTruckActivity struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewDailyTruckActivity creates a new daily truck activity query filler. 15 | func NewDailyTruckActivity(core utils.QueryGenerator) utils.QueryFiller { 16 | return &DailyTruckActivity{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *DailyTruckActivity) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(DailyTruckActivityFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.DailyTruckActivity(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/high_load.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // TrucksWithHighLoad contains info for filling in trucks with high load queries. 10 | type TrucksWithHighLoad struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewTruckWithHighLoad creates a new trucks with high load query filler. 15 | func NewTruckWithHighLoad(core utils.QueryGenerator) utils.QueryFiller { 16 | return &TrucksWithHighLoad{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *TrucksWithHighLoad) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(TruckHighLoadFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.TrucksWithHighLoad(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/lastloc.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // LastLocPerTruck contains info for filling in last location queries. 10 | type LastLocPerTruck struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewLastLocPerTruck creates a new last location query filler. 15 | func NewLastLocPerTruck(core utils.QueryGenerator) utils.QueryFiller { 16 | return &LastLocPerTruck{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *LastLocPerTruck) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(LastLocFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.LastLocPerTruck(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/lastloc_single_truck.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // LastLocSingleTruck contains info for filling in last location query for a single truck. 10 | type LastLocSingleTruck struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewLastLocSingleTruck creates a new last location query filler. 15 | func NewLastLocSingleTruck(core utils.QueryGenerator) utils.QueryFiller { 16 | return &LastLocSingleTruck{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *LastLocSingleTruck) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(LastLocByTruckFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.LastLocByTruck(q, 1) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/long_daily_session.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // TrucksWithLongDailySession contains info for filling in trucks with longer driving session queries. 10 | type TrucksWithLongDailySession struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewTruckWithLongDailySession creates a new trucks with longer driving session query filler. 15 | func NewTruckWithLongDailySession(core utils.QueryGenerator) utils.QueryFiller { 16 | return &TrucksWithLongDailySession{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *TrucksWithLongDailySession) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(TruckLongDailySessionFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.TrucksWithLongDailySessions(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/long_driving_session.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // TrucksWithLongDrivingSession contains info for filling in trucks with longer driving sessions queries. 10 | type TrucksWithLongDrivingSession struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewTrucksWithLongDrivingSession creates a new trucks with longer driving sessions query filler. 15 | func NewTrucksWithLongDrivingSession(core utils.QueryGenerator) utils.QueryFiller { 16 | return &TrucksWithLongDrivingSession{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *TrucksWithLongDrivingSession) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(TruckLongDrivingSessionFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.TrucksWithLongDrivingSessions(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/low_fuel.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // TrucksWithLowFuel contains info for filling in trucks with low fuel queries. 10 | type TrucksWithLowFuel struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewTruckWithLowFuel creates a new trucks with low fuel query filler. 15 | func NewTruckWithLowFuel(core utils.QueryGenerator) utils.QueryFiller { 16 | return &TrucksWithLowFuel{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *TrucksWithLowFuel) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(TruckLowFuelFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.TrucksWithLowFuel(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/stationary.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // StationaryTrucks contains info for filling in stationary trucks queries. 10 | type StationaryTrucks struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewStationaryTrucks creates a new stationary trucks query filler. 15 | func NewStationaryTrucks(core utils.QueryGenerator) utils.QueryFiller { 16 | return &StationaryTrucks{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *StationaryTrucks) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(StationaryTrucksFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.StationaryTrucks(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/uses/iot/truck_breakdown_activity.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/uses/common" 5 | "github.com/timescale/tsbs/cmd/tsbs_generate_queries/utils" 6 | "github.com/timescale/tsbs/pkg/query" 7 | ) 8 | 9 | // TruckBreakdownFrequency contains info for filling in truck breakdown frequency queries. 10 | type TruckBreakdownFrequency struct { 11 | core utils.QueryGenerator 12 | } 13 | 14 | // NewTruckBreakdownFrequency creates a new truck breakdown frequency query filler. 15 | func NewTruckBreakdownFrequency(core utils.QueryGenerator) utils.QueryFiller { 16 | return &TruckBreakdownFrequency{ 17 | core: core, 18 | } 19 | } 20 | 21 | // Fill fills in the query.Query with query details. 22 | func (i *TruckBreakdownFrequency) Fill(q query.Query) query.Query { 23 | fc, ok := i.core.(TruckBreakdownFrequencyFiller) 24 | if !ok { 25 | common.PanicUnimplementedQuery(i.core) 26 | } 27 | fc.TruckBreakdownFrequency(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /cmd/tsbs_generate_queries/utils/query_generator.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "github.com/timescale/tsbs/pkg/query" 4 | 5 | // QueryGenerator is an interface that a database-specific implementation of a 6 | // use case implements to set basic configuration that can then be used by 7 | // a specific QueryFiller, ultimately yielding a query.Query with information 8 | // to be run. 9 | type QueryGenerator interface { 10 | GenerateEmptyQuery() query.Query 11 | } 12 | 13 | // QueryFiller describes a type that can fill in a query and return it 14 | type QueryFiller interface { 15 | // Fill fills in the query.Query with query details 16 | Fill(query.Query) query.Query 17 | } 18 | 19 | // QueryFillerMaker is a function that takes a QueryGenerator and returns a QueryFiller 20 | type QueryFillerMaker func(QueryGenerator) QueryFiller 21 | -------------------------------------------------------------------------------- /cmd/tsbs_load/README.md: -------------------------------------------------------------------------------- 1 | # How to use tsbs_load 2 | 3 | * `$ tsbs_load` 4 | * see available commands and global flags 5 | * available commands: help, config, load 6 | * `$ tsbs_load config` 7 | * generates an example config file with default values for each specific target 8 | * see available flags with `$ tsbs_load config --help`: 9 | * `--data-source` where to load the data from 10 | * `--target` where to load data into 11 | * for valid values execute the command 12 | * `$ tsbs_load load [target]` e.g. `$ tsbs_load load prometheus` 13 | * loads the data into the target database 14 | * default config is loaded from `./config.yaml` 15 | * each property can be overridden by the flags available 16 | * execute `$tsbs_load load [target] --help` to see target specific flags 17 | and their description and default values 18 | * execute `$ tsbs_load load` or `$ tsbs_load load --help` to see available targets 19 | and description of flags that are common for all target databases (batch size, 20 | target db name, number of workers etc) 21 | * e.g: `--loader.db-specific.adapter-write-url` overwrites the property 22 | in the config file for where is the prometheus adapter listening 23 | * **flags overide values in the config.yaml file** -------------------------------------------------------------------------------- /cmd/tsbs_load/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | rootCmd.Execute() 5 | } 6 | -------------------------------------------------------------------------------- /cmd/tsbs_load/root.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | var ( 8 | cfgFile string 9 | rootCmd = &cobra.Command{ 10 | Use: "tsbs_load", 11 | Short: "Load data inside a db", 12 | } 13 | ) 14 | 15 | func init() { 16 | loadCmd, err := initLoadCMD() 17 | if err != nil { 18 | panic(err) 19 | } 20 | rootCmd.AddCommand(loadCmd) 21 | configCmd := initConfigCMD() 22 | rootCmd.AddCommand(configCmd) 23 | } 24 | -------------------------------------------------------------------------------- /cmd/tsbs_load_akumuli/main.go: -------------------------------------------------------------------------------- 1 | // bulk_load_akumuli loads an akumlid daemon with data from stdin. 2 | // 3 | // The caller is responsible for assuring that the database is empty before 4 | // bulk load. 5 | package main 6 | 7 | import ( 8 | "bytes" 9 | "fmt" 10 | "log" 11 | "sync" 12 | 13 | "github.com/blagojts/viper" 14 | "github.com/spf13/pflag" 15 | "github.com/timescale/tsbs/internal/utils" 16 | "github.com/timescale/tsbs/load" 17 | "github.com/timescale/tsbs/pkg/targets" 18 | "github.com/timescale/tsbs/pkg/targets/akumuli" 19 | "github.com/timescale/tsbs/pkg/targets/constants" 20 | "github.com/timescale/tsbs/pkg/targets/initializers" 21 | ) 22 | 23 | // Program option vars: 24 | var ( 25 | endpoint string 26 | ) 27 | 28 | // Global vars 29 | var ( 30 | loader load.BenchmarkRunner 31 | loaderConf *load.BenchmarkRunnerConfig 32 | ) 33 | 34 | // allows for testing 35 | var fatal = log.Fatalf 36 | var target targets.ImplementedTarget 37 | 38 | // Parse args: 39 | func init() { 40 | target = initializers.GetTarget(constants.FormatAkumuli) 41 | loaderConf = &load.BenchmarkRunnerConfig{} 42 | loaderConf.AddToFlagSet(pflag.CommandLine) 43 | target.TargetSpecificFlags("", pflag.CommandLine) 44 | 45 | pflag.Parse() 46 | err := utils.SetupConfigFile() 47 | 48 | if err != nil { 49 | panic(fmt.Errorf("fatal error config file: %s", err)) 50 | } 51 | 52 | if err := viper.Unmarshal(loaderConf); err != nil { 53 | panic(fmt.Errorf("unable to decode config: %s", err)) 54 | } 55 | 56 | endpoint = viper.GetString("endpoint") 57 | loaderConf.HashWorkers = true 58 | loader = load.GetBenchmarkRunner(*loaderConf) 59 | } 60 | 61 | func main() { 62 | bufPool := sync.Pool{ 63 | New: func() interface{} { 64 | return bytes.NewBuffer(make([]byte, 0, 4*1024*1024)) 65 | }, 66 | } 67 | benchmark := akumuli.NewBenchmark(loaderConf.FileName, endpoint, &bufPool) 68 | loader.RunBenchmark(benchmark) 69 | } 70 | -------------------------------------------------------------------------------- /cmd/tsbs_load_cassandra/main.go: -------------------------------------------------------------------------------- 1 | // bulk_load_cassandra loads a Cassandra daemon with data from stdin. 2 | // 3 | // The caller is responsible for assuring that the database is empty before 4 | // bulk load. 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "github.com/blagojts/viper" 10 | "github.com/spf13/pflag" 11 | "github.com/timescale/tsbs/internal/utils" 12 | "github.com/timescale/tsbs/load" 13 | "github.com/timescale/tsbs/pkg/data/source" 14 | "github.com/timescale/tsbs/pkg/targets/cassandra" 15 | "github.com/timescale/tsbs/pkg/targets/constants" 16 | "github.com/timescale/tsbs/pkg/targets/initializers" 17 | ) 18 | 19 | // Parse args: 20 | func initProgramOptions() (*cassandra.SpecificConfig, *load.BenchmarkRunnerConfig, load.BenchmarkRunner) { 21 | config := load.BenchmarkRunnerConfig{} 22 | target := initializers.GetTarget(constants.FormatCassandra) 23 | config.AddToFlagSet(pflag.CommandLine) 24 | target.TargetSpecificFlags("", pflag.CommandLine) 25 | pflag.Parse() 26 | 27 | err := utils.SetupConfigFile() 28 | 29 | if err != nil { 30 | panic(fmt.Errorf("fatal error config file: %s", err)) 31 | } 32 | 33 | if err := viper.Unmarshal(&config); err != nil { 34 | panic(fmt.Errorf("unable to decode config: %s", err)) 35 | } 36 | 37 | dbConfig := &cassandra.SpecificConfig{ 38 | Hosts: viper.GetString("hosts"), 39 | ReplicationFactor: viper.GetInt("replication-factor"), 40 | ConsistencyLevel: viper.GetString("consistency"), 41 | WriteTimeout: viper.GetDuration("write-timeout"), 42 | } 43 | 44 | config.HashWorkers = false 45 | config.BatchSize = 100 46 | loader := load.GetBenchmarkRunner(config) 47 | return dbConfig, &config, loader 48 | } 49 | 50 | func main() { 51 | dbConfig, loaderConf, loader := initProgramOptions() 52 | benchmark, err := cassandra.NewBenchmark(dbConfig, &source.DataSourceConfig{ 53 | Type: source.FileDataSourceType, 54 | File: &source.FileDataSourceConfig{Location: loaderConf.FileName}, 55 | }) 56 | if err != nil { 57 | panic(err) 58 | } 59 | loader.RunBenchmark(benchmark) 60 | } 61 | -------------------------------------------------------------------------------- /cmd/tsbs_load_clickhouse/main.go: -------------------------------------------------------------------------------- 1 | // tsbs_load_clickhouse loads a ClickHouse instance with data from stdin. 2 | // 3 | // If the database exists beforehand, it will be *DROPPED*. 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "github.com/blagojts/viper" 9 | "github.com/spf13/pflag" 10 | "github.com/timescale/tsbs/internal/utils" 11 | "github.com/timescale/tsbs/load" 12 | "github.com/timescale/tsbs/pkg/targets" 13 | "github.com/timescale/tsbs/pkg/targets/clickhouse" 14 | ) 15 | 16 | // Global vars 17 | var ( 18 | target targets.ImplementedTarget 19 | ) 20 | 21 | var loader load.BenchmarkRunner 22 | var loaderConf load.BenchmarkRunnerConfig 23 | var conf *clickhouse.ClickhouseConfig 24 | 25 | // Parse args: 26 | func init() { 27 | loaderConf = load.BenchmarkRunnerConfig{} 28 | target := clickhouse.NewTarget() 29 | loaderConf.AddToFlagSet(pflag.CommandLine) 30 | target.TargetSpecificFlags("", pflag.CommandLine) 31 | pflag.Parse() 32 | 33 | err := utils.SetupConfigFile() 34 | 35 | if err != nil { 36 | panic(fmt.Errorf("fatal error config file: %s", err)) 37 | } 38 | 39 | if err := viper.Unmarshal(&loaderConf); err != nil { 40 | panic(fmt.Errorf("unable to decode config: %s", err)) 41 | } 42 | conf = &clickhouse.ClickhouseConfig{ 43 | Host: viper.GetString("host"), 44 | User: viper.GetString("user"), 45 | Password: viper.GetString("password"), 46 | LogBatches: viper.GetBool("log-batches"), 47 | Debug: viper.GetInt("debug"), 48 | DbName: loaderConf.DBName, 49 | } 50 | 51 | loader = load.GetBenchmarkRunner(loaderConf) 52 | } 53 | 54 | func main() { 55 | loader.RunBenchmark(clickhouse.NewBenchmark(loaderConf.FileName, loaderConf.HashWorkers, conf)) 56 | } 57 | -------------------------------------------------------------------------------- /cmd/tsbs_load_influx/scan.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | 7 | "github.com/timescale/tsbs/pkg/data" 8 | "github.com/timescale/tsbs/pkg/data/usecases/common" 9 | "github.com/timescale/tsbs/pkg/targets" 10 | ) 11 | 12 | const errNotThreeTuplesFmt = "parse error: line does not have 3 tuples, has %d" 13 | 14 | var newLine = []byte("\n") 15 | 16 | type fileDataSource struct { 17 | scanner *bufio.Scanner 18 | } 19 | 20 | func (d *fileDataSource) NextItem() data.LoadedPoint { 21 | ok := d.scanner.Scan() 22 | if !ok && d.scanner.Err() == nil { // nothing scanned & no error = EOF 23 | return data.LoadedPoint{} 24 | } else if !ok { 25 | fatal("scan error: %v", d.scanner.Err()) 26 | return data.LoadedPoint{} 27 | } 28 | return data.NewLoadedPoint(d.scanner.Bytes()) 29 | } 30 | 31 | func (d *fileDataSource) Headers() *common.GeneratedDataHeaders { return nil } 32 | 33 | type batch struct { 34 | buf *bytes.Buffer 35 | rows uint 36 | metrics uint64 37 | metricsPerRow uint64 38 | } 39 | 40 | func (b *batch) Len() uint { 41 | return b.rows 42 | } 43 | 44 | func (b *batch) Append(item data.LoadedPoint) { 45 | that := item.Data.([]byte) 46 | b.rows++ 47 | 48 | // We only validate the very first row per batch since it's an expensive operation. 49 | // As a part of the validation we also calculate the number of metrics per row. 50 | if b.metricsPerRow == 0 { 51 | // Each influx line is format "csv-tags csv-fields timestamp", so we split by space. 52 | var tuples, metrics uint64 = 1, 1 53 | for i := 0; i < len(that); i++ { 54 | if that[i] == byte(' ') { 55 | tuples++ 56 | } 57 | // On the middle element, we split by comma to count number of fields added. 58 | if tuples == 2 && that[i] == byte(',') { 59 | metrics++ 60 | } 61 | } 62 | if tuples != 3 { 63 | fatal(errNotThreeTuplesFmt, tuples) 64 | return 65 | } 66 | b.metricsPerRow = metrics 67 | } 68 | b.metrics += b.metricsPerRow 69 | 70 | b.buf.Write(that) 71 | b.buf.Write(newLine) 72 | } 73 | 74 | type factory struct{} 75 | 76 | func (f *factory) New() targets.Batch { 77 | return &batch{buf: bufPool.Get().(*bytes.Buffer)} 78 | } 79 | -------------------------------------------------------------------------------- /cmd/tsbs_load_prometheus/adapter/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | 6 | "github.com/timescale/tsbs/cmd/tsbs_load_prometheus/adapter/noop" 7 | ) 8 | 9 | var port int 10 | 11 | func init() { 12 | flag.IntVar(&port, "port", 9876, "a port for adapter to listen on") 13 | } 14 | 15 | // Start noop Prometheus adapter. Useful for testing purposes 16 | func main() { 17 | adapter := noop.NewAdapter(port) 18 | err := adapter.Start() 19 | if err != nil { 20 | panic(err) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /cmd/tsbs_load_prometheus/adapter/noop/noop_adapter.go: -------------------------------------------------------------------------------- 1 | package noop 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | 8 | "github.com/golang/protobuf/proto" 9 | "github.com/golang/snappy" 10 | "github.com/prometheus/common/log" 11 | "github.com/timescale/promscale/pkg/prompb" 12 | ) 13 | 14 | type Adapter struct { 15 | port int 16 | ReqCounter uint64 17 | SampleCounter uint64 18 | } 19 | 20 | func NewAdapter(port int) *Adapter { 21 | return &Adapter{port: port} 22 | } 23 | 24 | // Start starts no-op Prometheus adapter. This call will block go-routine 25 | func (adapter *Adapter) Start() error { 26 | http.HandleFunc("/", adapter.Handler) 27 | log.Info("msg", fmt.Sprintf("Starting noop adapter listening on: %d\n", adapter.port)) 28 | return http.ListenAndServe(fmt.Sprintf(":%d", adapter.port), nil) 29 | } 30 | 31 | // Handler counts number of requests and samples 32 | func (adapter *Adapter) Handler(rw http.ResponseWriter, req *http.Request) { 33 | compressed, err := ioutil.ReadAll(req.Body) 34 | if err != nil { 35 | log.Error("msg", "error while reading request", "error", err) 36 | http.Error(rw, err.Error(), http.StatusInternalServerError) 37 | return 38 | } 39 | decompressed, err := snappy.Decode(nil, compressed) 40 | if err != nil { 41 | log.Error("msg", "error while decompressing request", "error", err) 42 | http.Error(rw, err.Error(), http.StatusInternalServerError) 43 | return 44 | } 45 | var protoReq prompb.WriteRequest 46 | if err := proto.Unmarshal(decompressed, &protoReq); err != nil { 47 | log.Error("msg", "error while unmarshalling protobuf request", "error", err) 48 | http.Error(rw, err.Error(), http.StatusBadRequest) 49 | return 50 | } 51 | adapter.ReqCounter++ 52 | adapter.SampleCounter += uint64(len(protoReq.Timeseries)) 53 | } 54 | -------------------------------------------------------------------------------- /cmd/tsbs_load_prometheus/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/blagojts/viper" 7 | "github.com/spf13/pflag" 8 | "github.com/timescale/tsbs/internal/utils" 9 | "github.com/timescale/tsbs/load" 10 | "github.com/timescale/tsbs/pkg/data/source" 11 | "github.com/timescale/tsbs/pkg/targets" 12 | "github.com/timescale/tsbs/pkg/targets/prometheus" 13 | ) 14 | 15 | // runs the benchmark 16 | var ( 17 | target targets.ImplementedTarget 18 | loader load.BenchmarkRunner 19 | config load.BenchmarkRunnerConfig 20 | ) 21 | var adapterWriteUrl string 22 | 23 | func init() { 24 | target = prometheus.NewTarget() 25 | config = load.BenchmarkRunnerConfig{} 26 | config.AddToFlagSet(pflag.CommandLine) 27 | target.TargetSpecificFlags("", pflag.CommandLine) 28 | pflag.Parse() 29 | err := utils.SetupConfigFile() 30 | if err != nil { 31 | panic(fmt.Errorf("error setting up a config file: %s", err)) 32 | } 33 | 34 | if err := viper.Unmarshal(&config); err != nil { 35 | panic(fmt.Errorf("unable to decode config: %s", err)) 36 | } 37 | adapterWriteUrl = viper.GetString("adapter-write-url") 38 | loader = load.GetBenchmarkRunner(config) 39 | } 40 | 41 | func main() { 42 | benchmark, err := prometheus.NewBenchmark( 43 | &prometheus.SpecificConfig{AdapterWriteURL: adapterWriteUrl}, 44 | &source.DataSourceConfig{ 45 | Type: source.FileDataSourceType, 46 | File: &source.FileDataSourceConfig{Location: config.FileName}, 47 | }, 48 | ) 49 | if err != nil { 50 | panic(err) 51 | } 52 | loader.RunBenchmark(benchmark) 53 | } 54 | -------------------------------------------------------------------------------- /cmd/tsbs_load_questdb/creator.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type dbCreator struct { 8 | } 9 | 10 | func (d *dbCreator) Init() { 11 | // no-op 12 | } 13 | 14 | func (d *dbCreator) DBExists(dbName string) bool { 15 | // We don't really care if the table already exists, 16 | // especially when dedup is configured. 17 | return false 18 | } 19 | 20 | func (d *dbCreator) RemoveOldDB(dbName string) error { 21 | return nil 22 | } 23 | 24 | func (d *dbCreator) CreateDB(dbName string) error { 25 | time.Sleep(time.Second) 26 | return nil 27 | } 28 | -------------------------------------------------------------------------------- /cmd/tsbs_load_questdb/scan.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | 7 | "github.com/timescale/tsbs/pkg/data" 8 | "github.com/timescale/tsbs/pkg/data/usecases/common" 9 | "github.com/timescale/tsbs/pkg/targets" 10 | ) 11 | 12 | const errNotThreeTuplesFmt = "parse error: line does not have 3 tuples, has %d" 13 | 14 | var newLine = []byte("\n") 15 | 16 | type fileDataSource struct { 17 | scanner *bufio.Scanner 18 | } 19 | 20 | func (d *fileDataSource) NextItem() data.LoadedPoint { 21 | ok := d.scanner.Scan() 22 | if !ok && d.scanner.Err() == nil { // nothing scanned & no error = EOF 23 | return data.LoadedPoint{} 24 | } else if !ok { 25 | fatal("scan error: %v", d.scanner.Err()) 26 | return data.LoadedPoint{} 27 | } 28 | return data.NewLoadedPoint(d.scanner.Bytes()) 29 | } 30 | 31 | func (d *fileDataSource) Headers() *common.GeneratedDataHeaders { return nil } 32 | 33 | type batch struct { 34 | buf *bytes.Buffer 35 | rows uint 36 | metrics uint64 37 | metricsPerRow uint64 38 | } 39 | 40 | func (b *batch) Len() uint { 41 | return b.rows 42 | } 43 | 44 | func (b *batch) Append(item data.LoadedPoint) { 45 | that := item.Data.([]byte) 46 | b.rows++ 47 | 48 | // We only validate the very first row per batch since it's an expensive operation. 49 | // As a part of the validation we also calculate the number of metrics per row. 50 | if b.metricsPerRow == 0 { 51 | // Each influx line is format "csv-tags csv-fields timestamp", so we split by space. 52 | var tuples, metrics uint64 = 1, 1 53 | for i := 0; i < len(that); i++ { 54 | if that[i] == byte(' ') { 55 | tuples++ 56 | } 57 | // On the middle element, we split by comma to count number of fields added. 58 | if tuples == 2 && that[i] == byte(',') { 59 | metrics++ 60 | } 61 | } 62 | if tuples != 3 { 63 | fatal(errNotThreeTuplesFmt, tuples) 64 | return 65 | } 66 | b.metricsPerRow = metrics 67 | } 68 | b.metrics += b.metricsPerRow 69 | 70 | b.buf.Write(that) 71 | b.buf.Write(newLine) 72 | } 73 | 74 | type factory struct{} 75 | 76 | func (f *factory) New() targets.Batch { 77 | return &batch{buf: bufPool.Get().(*bytes.Buffer)} 78 | } 79 | -------------------------------------------------------------------------------- /cmd/tsbs_load_siridb/process.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/SiriDB/go-siridb-connector" 11 | "github.com/timescale/tsbs/pkg/targets" 12 | "github.com/transceptor-technology/go-qpack" 13 | ) 14 | 15 | type processor struct { 16 | connection *siridb.Connection 17 | } 18 | 19 | func (p *processor) Init(numWorker int, _, _ bool) { 20 | hostlist := strings.Split(hosts, ",") 21 | h := hostlist[numWorker%len(hostlist)] 22 | x := strings.Split(h, ":") 23 | host := x[0] 24 | port, err := strconv.ParseUint(x[1], 10, 16) 25 | if err != nil { 26 | fatal(err) 27 | } 28 | p.connection = siridb.NewConnection(host, uint16(port)) 29 | } 30 | 31 | func (p *processor) Close(doLoad bool) { 32 | if doLoad { 33 | p.connection.Close() 34 | } 35 | } 36 | 37 | func (p *processor) ProcessBatch(b targets.Batch, doLoad bool) (metricCount, rows uint64) { 38 | batch := b.(*batch) 39 | if doLoad { 40 | if err := p.connection.Connect(dbUser, dbPass, loader.DatabaseName()); err != nil { 41 | fatal(err) 42 | } 43 | series := make([]byte, 0) 44 | series = append(series, byte(253)) // qpack: "open map" 45 | for k, v := range batch.series { 46 | key, err := qpack.Pack(k) // packs a string in the right format for SiriDB 47 | if err != nil { 48 | log.Fatal(err) 49 | } 50 | series = append(series, key...) 51 | series = append(series, v...) 52 | } 53 | start := time.Now() 54 | if _, err := p.connection.InsertBin(series, uint16(writeTimeout)); err != nil { 55 | fatal(err) 56 | } 57 | if logBatches { 58 | now := time.Now() 59 | took := now.Sub(start) 60 | batchSize := batch.batchCnt 61 | fmt.Printf("BATCH: batchsize %d insert rate %f/sec (took %v)\n", batchSize, float64(batchSize)/float64(took.Seconds()), took) 62 | } 63 | } 64 | metricCount = batch.metricCnt 65 | batch.series = map[string][]byte{} 66 | batch.batchCnt = 0 67 | batch.metricCnt = 0 68 | return metricCount, 0 69 | } 70 | -------------------------------------------------------------------------------- /cmd/tsbs_load_siridb/scan_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/timescale/tsbs/pkg/data" 7 | ) 8 | 9 | func TestBatch(t *testing.T) { 10 | f := &factory{} 11 | b := f.New().(*batch) 12 | if b.Len() != 0 { 13 | t.Errorf("batch not initialized with count 0") 14 | } 15 | p := data.LoadedPoint{ 16 | Data: &point{ 17 | data: map[string][]byte{ 18 | "measurementName|tag1=val1,tag2=val2|fieldKey1": []byte{1, 2}, 19 | "measurementName|tag1=val1,tag2=val2|fieldKey2": []byte{2, 3}, 20 | }, 21 | dataCnt: 2, 22 | }, 23 | } 24 | b.Append(p) 25 | if b.Len() != 1 { 26 | t.Errorf("batch count is not 1 after first append") 27 | } 28 | if b.metricCnt != 2 { 29 | t.Errorf("batch metric count is not 2 after first append") 30 | } 31 | 32 | p = data.LoadedPoint{ 33 | Data: &point{ 34 | data: map[string][]byte{ 35 | "measurementName|tag1=val1,tag2=val2|fieldKey3": []byte{3, 4}, 36 | "measurementName|tag1=val1,tag2=val2|fieldKey4": []byte{4, 5}, 37 | }, 38 | dataCnt: 2, 39 | }, 40 | } 41 | b.Append(p) 42 | if b.Len() != 2 { 43 | t.Errorf("batch count is not 1 after first append") 44 | } 45 | if b.metricCnt != 4 { 46 | t.Errorf("batch metric count is not 2 after first append") 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /cmd/tsbs_load_timescaledb/profile.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | "github.com/shirou/gopsutil/process" 11 | ) 12 | 13 | func profileCPUAndMem(file string) { 14 | f, err := os.Create(file) 15 | if err != nil { 16 | log.Fatal(err) 17 | } 18 | defer f.Close() 19 | 20 | var proc *process.Process 21 | for _ = range time.NewTicker(1 * time.Second).C { 22 | if proc == nil { 23 | procs, err := process.Processes() 24 | if err != nil { 25 | panic(err) 26 | } 27 | for _, p := range procs { 28 | cmd, _ := p.Cmdline() 29 | if strings.Contains(cmd, "postgres") && strings.Contains(cmd, "INSERT") { 30 | proc = p 31 | break 32 | } 33 | } 34 | } else { 35 | cpu, err := proc.CPUPercent() 36 | if err != nil { 37 | proc = nil 38 | continue 39 | } 40 | mem, err := proc.MemoryInfo() 41 | if err != nil { 42 | proc = nil 43 | continue 44 | } 45 | 46 | fmt.Fprintf(f, "%f,%d,%d,%d\n", cpu, mem.RSS, mem.VMS, mem.Swap) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /cmd/tsbs_load_victoriametrics/main.go: -------------------------------------------------------------------------------- 1 | // tsbs_load_victoriametrics loads a VictoriaMetrics with data from stdin or file. 2 | package main 3 | 4 | import ( 5 | "fmt" 6 | "github.com/blagojts/viper" 7 | "github.com/spf13/pflag" 8 | "github.com/timescale/tsbs/internal/utils" 9 | "github.com/timescale/tsbs/load" 10 | "github.com/timescale/tsbs/pkg/data/source" 11 | "github.com/timescale/tsbs/pkg/targets/victoriametrics" 12 | "log" 13 | "strings" 14 | ) 15 | 16 | // Parse args: 17 | func initProgramOptions() (*victoriametrics.SpecificConfig, load.BenchmarkRunner, *load.BenchmarkRunnerConfig) { 18 | target := victoriametrics.NewTarget() 19 | 20 | loaderConf := load.BenchmarkRunnerConfig{} 21 | loaderConf.AddToFlagSet(pflag.CommandLine) 22 | target.TargetSpecificFlags("", pflag.CommandLine) 23 | pflag.Parse() 24 | 25 | if err := utils.SetupConfigFile(); err != nil { 26 | panic(fmt.Errorf("fatal error config file: %s", err)) 27 | } 28 | if err := viper.Unmarshal(&loaderConf); err != nil { 29 | panic(fmt.Errorf("unable to decode config: %s", err)) 30 | } 31 | 32 | urls := viper.GetString("urls") 33 | if len(urls) == 0 { 34 | log.Fatalf("missing `urls` flag") 35 | } 36 | vmURLs := strings.Split(urls, ",") 37 | 38 | loader := load.GetBenchmarkRunner(loaderConf) 39 | return &victoriametrics.SpecificConfig{ServerURLs: vmURLs}, loader, &loaderConf 40 | } 41 | 42 | func main() { 43 | vmConf, loader, loaderConf := initProgramOptions() 44 | 45 | benchmark, err := victoriametrics.NewBenchmark(vmConf, &source.DataSourceConfig{ 46 | Type: source.FileDataSourceType, 47 | File: &source.FileDataSourceConfig{Location: loaderConf.FileName}, 48 | }) 49 | if err != nil { 50 | panic(err) 51 | } 52 | loader.RunBenchmark(benchmark) 53 | } 54 | -------------------------------------------------------------------------------- /cmd/tsbs_run_queries_akumuli/main.go: -------------------------------------------------------------------------------- 1 | // tsbs_run_queries_akumuli speed tests Akumuli using requests from stdin. 2 | // 3 | // It reads encoded Query objects from stdin, and makes concurrent requests 4 | // to the provided HTTP endpoint. This program has no knowledge of the 5 | // internals of the endpoint. 6 | package main 7 | 8 | import ( 9 | "fmt" 10 | 11 | "github.com/blagojts/viper" 12 | "github.com/spf13/pflag" 13 | "github.com/timescale/tsbs/internal/utils" 14 | "github.com/timescale/tsbs/pkg/query" 15 | ) 16 | 17 | // Program option vars: 18 | var ( 19 | endpoint string 20 | ) 21 | 22 | // Global vars: 23 | var ( 24 | runner *query.BenchmarkRunner 25 | ) 26 | 27 | // Parse args: 28 | func init() { 29 | var config query.BenchmarkRunnerConfig 30 | config.AddToFlagSet(pflag.CommandLine) 31 | 32 | pflag.StringVar(&endpoint, "endpoint", "http://localhost:8181", "Akumuli API endpoint IP address.") 33 | 34 | pflag.Parse() 35 | 36 | err := utils.SetupConfigFile() 37 | 38 | if err != nil { 39 | panic(fmt.Errorf("fatal error config file: %s", err)) 40 | } 41 | 42 | if err := viper.Unmarshal(&config); err != nil { 43 | panic(fmt.Errorf("unable to decode config: %s", err)) 44 | } 45 | 46 | endpoint = viper.GetString("endpoint") 47 | 48 | runner = query.NewBenchmarkRunner(config) 49 | } 50 | 51 | func main() { 52 | runner.Run(&query.HTTPPool, newProcessor) 53 | } 54 | 55 | type processor struct { 56 | w *HTTPClient 57 | opts *HTTPClientDoOptions 58 | } 59 | 60 | func newProcessor() query.Processor { return &processor{} } 61 | 62 | func (p *processor) Init(workerNumber int) { 63 | p.opts = &HTTPClientDoOptions{ 64 | Debug: runner.DebugLevel(), 65 | PrintResponses: runner.DoPrintResponses(), 66 | } 67 | url := endpoint 68 | p.w = NewHTTPClient(url) 69 | } 70 | 71 | func (p *processor) ProcessQuery(q query.Query, _ bool) ([]*query.Stat, error) { 72 | hq := q.(*query.HTTP) 73 | lag, err := p.w.Do(hq, p.opts) 74 | if err != nil { 75 | return nil, err 76 | } 77 | stat := query.GetStat() 78 | stat.Init(q.HumanLabelName(), lag) 79 | return []*query.Stat{stat}, nil 80 | } 81 | -------------------------------------------------------------------------------- /cmd/tsbs_run_queries_cassandra/conn.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/gocql/gocql" 8 | ) 9 | 10 | // NewCassandraSession creates a new Cassandra session. It is goroutine-safe 11 | // by default, and uses a connection pool. 12 | func NewCassandraSession(daemonURL, keyspace string, timeout time.Duration) *gocql.Session { 13 | cluster := gocql.NewCluster(daemonURL) 14 | cluster.Keyspace = keyspace 15 | cluster.Consistency = gocql.One 16 | cluster.ProtoVersion = 4 17 | cluster.Timeout = timeout 18 | session, err := cluster.CreateSession() 19 | if err != nil { 20 | log.Fatal(err) 21 | } 22 | return session 23 | } 24 | -------------------------------------------------------------------------------- /cmd/tsbs_run_queries_cassandra/time_util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/timescale/tsbs/internal/utils" 8 | ) 9 | 10 | type TimeIntervals []*utils.TimeInterval 11 | 12 | // implement sort.Interface 13 | func (x TimeIntervals) Len() int { return len(x) } 14 | func (x TimeIntervals) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 15 | func (x TimeIntervals) Less(i, j int) bool { 16 | return x[i].Start().Before(x[j].Start()) 17 | } 18 | 19 | // bucketTimeIntervals is a helper that creates a slice of TimeInterval 20 | // over the given span of time, in chunks of duration `window`. 21 | func bucketTimeIntervals(start, end time.Time, window time.Duration) []*utils.TimeInterval { 22 | if end.Before(start) { 23 | panic("logic error in bucketTimeIntervals: bad input times") 24 | } 25 | ret := []*utils.TimeInterval{} 26 | 27 | start = start.Truncate(window) 28 | for start.Before(end) { 29 | ti, err := utils.NewTimeInterval(start, start.Add(window)) 30 | if err != nil { 31 | panic(fmt.Sprintf("unexpected error: %v", err)) 32 | } 33 | ret = append(ret, ti) 34 | start = start.Add(window) 35 | } 36 | 37 | // sanity check 38 | tis := TimeIntervals(ret) 39 | for i := 0; i < len(tis)-1; i++ { 40 | if !tis.Less(i, i+1) { 41 | panic("logic error: unsorted buckets in bucketTimeIntervals") 42 | } 43 | } 44 | 45 | return ret 46 | } 47 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/timescale/tsbs 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/HdrHistogram/hdrhistogram-go v1.0.0 7 | github.com/SiriDB/go-siridb-connector v0.0.0-20190110105621-86b34c44c921 8 | github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect 9 | github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 10 | github.com/aws/aws-sdk-go v1.35.13 11 | github.com/blagojts/viper v1.6.3-0.20200313094124-068f44cf5e69 12 | github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 13 | github.com/go-ole/go-ole v1.2.4 // indirect 14 | github.com/gocql/gocql v0.0.0-20190810123941-df4b9cc33030 15 | github.com/golang/protobuf v1.4.2 16 | github.com/golang/snappy v0.0.1 17 | github.com/google/flatbuffers v1.11.0 18 | github.com/google/go-cmp v0.5.2 19 | github.com/jackc/pgx/v4 v4.8.0 20 | github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 21 | github.com/kshvakov/clickhouse v1.3.11 22 | github.com/lib/pq v1.3.0 23 | github.com/pkg/errors v0.9.1 24 | github.com/pkg/profile v1.2.1 25 | github.com/prometheus/common v0.13.0 26 | github.com/shirou/gopsutil v3.21.3+incompatible 27 | github.com/spf13/cobra v1.0.0 28 | github.com/spf13/pflag v1.0.5 29 | github.com/timescale/promscale v0.0.0-20201006153045-6a66a36f5c84 30 | github.com/tklauser/go-sysconf v0.3.5 // indirect 31 | github.com/transceptor-technology/go-qpack v0.0.0-20190116123619-49a14b216a45 32 | github.com/valyala/fasthttp v1.15.1 33 | go.uber.org/atomic v1.6.0 34 | golang.org/x/net v0.0.0-20200904194848-62affa334b73 35 | golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e 36 | gopkg.in/yaml.v2 v2.3.0 37 | ) 38 | -------------------------------------------------------------------------------- /helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: tsbs 3 | description: A Helm chart for TSBS 4 | type: application 5 | # This is the chart version. This version number should be incremented each time you make changes 6 | # to the chart and its templates, including the app version. 7 | version: 0.1.0 8 | appVersion: 0.1 9 | -------------------------------------------------------------------------------- /helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "tsbs.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "tsbs.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "tsbs.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "tsbs.labels" -}} 38 | helm.sh/chart: {{ include "tsbs.chart" . }} 39 | {{ include "tsbs.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end -}} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "tsbs.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "tsbs.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end -}} 53 | -------------------------------------------------------------------------------- /helm/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "tsbs.fullname" . }} 5 | labels: 6 | {{- include "tsbs.labels" . | nindent 4 }} 7 | spec: 8 | selector: 9 | matchLabels: 10 | {{- include "tsbs.selectorLabels" . | nindent 6 }} 11 | template: 12 | metadata: 13 | labels: 14 | {{- include "tsbs.selectorLabels" . | nindent 8 }} 15 | spec: 16 | containers: 17 | - name: {{ .Chart.Name }} 18 | image: "{{ .Values.tsbs.image }}:{{ .Chart.AppVersion }}" 19 | volumeMounts: 20 | - name: data 21 | mountPath: /data 22 | resources: 23 | {{ toYaml .Values.resources | indent 12 }} 24 | volumes: 25 | - name: data 26 | persistentVolumeClaim: 27 | claimName: {{ include "tsbs.fullname" . }} 28 | 29 | -------------------------------------------------------------------------------- /helm/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: {{ include "tsbs.fullname" . }} 5 | labels: 6 | app: {{ include "tsbs.fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | release: "{{ .Release.Name }}" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: {{ .Values.persistentVolume.size | quote }} -------------------------------------------------------------------------------- /helm/values.yaml: -------------------------------------------------------------------------------- 1 | tsbs: 2 | image: timescaledev/tsbs 3 | 4 | persistentVolume: 5 | size: 100G 6 | 7 | resources: 8 | requests: 9 | memory: 24Gi 10 | cpu: 7 11 | -------------------------------------------------------------------------------- /internal/inputs/generator.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/usecases/common" 5 | ) 6 | 7 | // Generator is an interface that defines a type that generates inputs to other 8 | // TSBS tools. Examples include DataGenerator which creates database data that 9 | // gets inserted and stored, or QueryGenerator which creates queries that are 10 | // used to test with. 11 | type Generator interface { 12 | Generate(common.GeneratorConfig) error 13 | } 14 | -------------------------------------------------------------------------------- /internal/inputs/utils.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os" 8 | ) 9 | 10 | const ( 11 | errUnknownFormatFmt = "unknown format: '%s'" 12 | ) 13 | 14 | const defaultWriteSize = 4 << 20 // 4 MB 15 | 16 | func getBufferedWriter(filename string, fallback io.Writer) (*bufio.Writer, error) { 17 | // If filename is given, output should go to a file 18 | if len(filename) > 0 { 19 | file, err := os.Create(filename) 20 | if err != nil { 21 | return nil, fmt.Errorf("cannot open file for write %s: %v", filename, err) 22 | } 23 | return bufio.NewWriterSize(file, defaultWriteSize), nil 24 | } 25 | 26 | return bufio.NewWriterSize(fallback, defaultWriteSize), nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/inputs/utils_test.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "github.com/timescale/tsbs/internal/utils" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestIsIn(t *testing.T) { 10 | arr := []string{"foo", "bar", "baz"} 11 | arr2 := []string{"oof", "foo ", "nada", "123"} 12 | 13 | // Test positive cases 14 | for _, s := range arr { 15 | if !utils.IsIn(s, arr) { 16 | t.Errorf("%s not found in %v incorrectly", s, arr) 17 | } 18 | } 19 | for _, s := range arr2 { 20 | if !utils.IsIn(s, arr2) { 21 | t.Errorf("%s not found in %v incorrectly", s, arr) 22 | } 23 | } 24 | 25 | // Test negative cases 26 | for _, s := range arr { 27 | if utils.IsIn(s, arr2) { 28 | t.Errorf("%s found in %v incorrectly", s, arr) 29 | } 30 | } 31 | for _, s := range arr2 { 32 | if utils.IsIn(s, arr) { 33 | t.Errorf("%s found in %v incorrectly", s, arr) 34 | } 35 | } 36 | 37 | } 38 | 39 | const ( 40 | correctTimeStr = "2016-01-01T00:00:00Z" 41 | incorrectTimeStr = "2017-01-01" 42 | ) 43 | 44 | var correctTime = time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC) 45 | 46 | func TestParseUTCTime(t *testing.T) { 47 | parsedTime, err := utils.ParseUTCTime(correctTimeStr) 48 | if err != nil { 49 | t.Errorf("unexpected error: got %v", err) 50 | } else if parsedTime != correctTime { 51 | t.Errorf("did not get correct time back: got %v want %v", parsedTime, correctTime) 52 | } 53 | 54 | _, err = utils.ParseUTCTime(incorrectTimeStr) 55 | if err == nil { 56 | t.Errorf("unexpected lack of error") 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /internal/utils/config.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | ) 7 | 8 | // SetupConfigFile defines the settings for the configuration file support. 9 | func SetupConfigFile() error { 10 | viper.SetConfigName("config") 11 | viper.AddConfigPath(".") 12 | 13 | viper.BindPFlags(pflag.CommandLine) 14 | 15 | if err := viper.ReadInConfig(); err != nil { 16 | // Ignore error if config file not found. 17 | if _, ok := err.(viper.ConfigFileNotFoundError); !ok { 18 | return err 19 | } 20 | } 21 | 22 | return nil 23 | } 24 | -------------------------------------------------------------------------------- /internal/utils/funcs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | func IsIn(s string, arr []string) bool { 9 | for _, x := range arr { 10 | if s == x { 11 | return true 12 | } 13 | } 14 | return false 15 | } 16 | 17 | // ParseUTCTime parses a string-represented time of the format 2006-01-02T15:04:05Z07:00 18 | func ParseUTCTime(s string) (time.Time, error) { 19 | t, err := time.Parse(time.RFC3339, s) 20 | if err != nil { 21 | return time.Time{}, err 22 | } 23 | return t.UTC(), nil 24 | } 25 | 26 | const ( 27 | errInvalidGroupsFmt = "incorrect interleaved groups configuration: id %d >= total groups %d" 28 | errTotalGroupsZero = "incorrect interleaved groups configuration: total groups = 0" 29 | ) 30 | 31 | // ValidateGroups checks validity of combination groupID and totalGroups 32 | func ValidateGroups(groupID, totalGroupsNum uint) error { 33 | if totalGroupsNum == 0 { 34 | // Need at least one group 35 | return fmt.Errorf(errTotalGroupsZero) 36 | } 37 | if groupID >= totalGroupsNum { 38 | // Need reasonable groupID 39 | return fmt.Errorf(errInvalidGroupsFmt, groupID, totalGroupsNum) 40 | } 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /internal/utils/funcs_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestValidateGroups(t *testing.T) { 9 | cases := []struct { 10 | desc string 11 | groupID uint 12 | totalGroups uint 13 | errMsg string 14 | }{ 15 | { 16 | desc: "id < total, no err", 17 | groupID: 0, 18 | totalGroups: 1, 19 | }, 20 | { 21 | desc: "id = total, should err", 22 | groupID: 1, 23 | totalGroups: 1, 24 | errMsg: fmt.Sprintf(errInvalidGroupsFmt, 1, 1), 25 | }, 26 | { 27 | desc: "id > total, should err", 28 | groupID: 2, 29 | totalGroups: 1, 30 | errMsg: fmt.Sprintf(errInvalidGroupsFmt, 2, 1), 31 | }, 32 | { 33 | desc: "total = 0, should err", 34 | groupID: 0, 35 | totalGroups: 0, 36 | errMsg: errTotalGroupsZero, 37 | }, 38 | } 39 | for _, c := range cases { 40 | err := ValidateGroups(c.groupID, c.totalGroups) 41 | if c.errMsg == "" && err != nil { 42 | t.Errorf("%s: unexpected error: %v", c.desc, err) 43 | } else if c.errMsg != "" && err == nil { 44 | t.Errorf("%s: unexpected lack of error", c.desc) 45 | } else if err != nil && err.Error() != c.errMsg { 46 | t.Errorf("%s: incorrect error: got %s want %s", c.desc, err.Error(), c.errMsg) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /load/buffered_reader.go: -------------------------------------------------------------------------------- 1 | package load 2 | 3 | import ( 4 | "bufio" 5 | "os" 6 | ) 7 | 8 | const ( 9 | defaultReadSize = 4 << 20 // 4 MB 10 | ) 11 | 12 | // GetBufferedReader returns the buffered Reader that should be used by the file loader 13 | // if no file name is specified a buffer for STDIN is returned 14 | func GetBufferedReader(fileName string) *bufio.Reader { 15 | if len(fileName) == 0 { 16 | // Read from STDIN 17 | return bufio.NewReaderSize(os.Stdin, defaultReadSize) 18 | } 19 | // Read from specified file 20 | file, err := os.Open(fileName) 21 | if err != nil { 22 | fatal("cannot open file for read %s: %v", fileName, err) 23 | return nil 24 | } 25 | return bufio.NewReaderSize(file, defaultReadSize) 26 | } 27 | -------------------------------------------------------------------------------- /load/duplex_channel.go: -------------------------------------------------------------------------------- 1 | package load 2 | 3 | import "github.com/timescale/tsbs/pkg/targets" 4 | 5 | // duplexChannel acts as a two-way channel for communicating from a scan routine 6 | // to a worker goroutine. The toWorker channel sends data to the worker for it 7 | // to process and the toScan channel allows the worker to acknowledge completion. 8 | // Using this we can accomplish better flow control between the scanner and workers. 9 | type duplexChannel struct { 10 | toWorker chan targets.Batch 11 | toScanner chan bool 12 | } 13 | 14 | // newDuplexChannel returns a duplexChannel with specified buffer sizes 15 | func newDuplexChannel(queueLen int) *duplexChannel { 16 | return &duplexChannel{ 17 | toWorker: make(chan targets.Batch, queueLen), 18 | toScanner: make(chan bool, queueLen), 19 | } 20 | } 21 | 22 | // sendToWorker passes a batch of work on to the worker from the scanner 23 | func (dc *duplexChannel) sendToWorker(b targets.Batch) { 24 | dc.toWorker <- b 25 | } 26 | 27 | // sendToScanner passes an acknowledge to the scanner from the worker 28 | func (dc *duplexChannel) sendToScanner() { 29 | dc.toScanner <- true 30 | } 31 | 32 | // close closes down the duplexChannel 33 | func (dc *duplexChannel) close() { 34 | close(dc.toWorker) 35 | close(dc.toScanner) 36 | } 37 | -------------------------------------------------------------------------------- /load/duplex_channel_test.go: -------------------------------------------------------------------------------- 1 | package load 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestNewDuplexChannel(t *testing.T) { 8 | cases := []struct { 9 | desc string 10 | queueSize int 11 | }{ 12 | { 13 | desc: "queue size 0", 14 | queueSize: 0, 15 | }, 16 | { 17 | desc: "queue size 1", 18 | queueSize: 1, 19 | }, 20 | { 21 | desc: "queue size 5", 22 | queueSize: 5, 23 | }, 24 | } 25 | 26 | for _, c := range cases { 27 | ch := newDuplexChannel(c.queueSize) 28 | if cap(ch.toScanner) != c.queueSize { 29 | t.Errorf("%s: toScanner channel cap incorrect: got %d want %d", c.desc, cap(ch.toScanner), c.queueSize) 30 | } 31 | if cap(ch.toWorker) != c.queueSize { 32 | t.Errorf("%s: toWorker channel cap incorrect: got %d want %d", c.desc, cap(ch.toScanner), c.queueSize) 33 | } 34 | } 35 | } 36 | 37 | func TestSendToWorker(t *testing.T) { 38 | ch := newDuplexChannel(1) 39 | ch.sendToWorker(&testBatch{}) 40 | if res, ok := <-ch.toWorker; !ok || res == nil { 41 | t.Errorf("sendToWorker did not send item or sent nil") 42 | } 43 | } 44 | 45 | func TestSendToScanner(t *testing.T) { 46 | ch := newDuplexChannel(1) 47 | ch.sendToScanner() 48 | if res, ok := <-ch.toScanner; !res || !ok { 49 | t.Errorf("sendToScanner did not send 'true', sent %v", res) 50 | } 51 | } 52 | 53 | func TestClose(t *testing.T) { 54 | ch := newDuplexChannel(1) 55 | ch.close() 56 | _, ok := <-ch.toWorker 57 | if ok { 58 | t.Errorf("close did not close toWorker") 59 | } 60 | _, ok = <-ch.toScanner 61 | if ok { 62 | t.Errorf("close did not close toScanner") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /load/loader_test_result.go: -------------------------------------------------------------------------------- 1 | package load 2 | 3 | const LoaderTestResultVersion = "0.1" 4 | 5 | // LoaderTestResult aggregates the results of an insert or load benchmark in a common format across targets 6 | type LoaderTestResult struct { 7 | // Format Configs 8 | ResultFormatVersion string `json:"ResultFormatVersion"` 9 | 10 | // RunnerConfig Configs 11 | RunnerConfig BenchmarkRunnerConfig `json:"RunnerConfig"` 12 | 13 | // Run info 14 | StartTime int64 `json:"StartTime` 15 | EndTime int64 `json:"EndTime"` 16 | DurationMillis int64 `json:"DurationMillis"` 17 | 18 | // Totals 19 | Totals map[string]interface{} `json:"Totals"` 20 | } 21 | -------------------------------------------------------------------------------- /load/scan_no_flow_control.go: -------------------------------------------------------------------------------- 1 | package load 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/targets" 5 | ) 6 | 7 | // scanWithoutFlowControl reads data from the DataSource ds until a limit is reached (if -1, all items are read). 8 | // Data is then placed into appropriate batches, using the supplied PointIndexer, 9 | // which are then dispatched to workers (channel idx chosen by PointIndexer). 10 | // readDs does no flow control, if the capacity of a channel is reached, scanning stops for all 11 | // workers. (should only happen if channel-capacity is low and one worker is unreasonable slower than the rest) 12 | // in that case just set hash-workers to false and use 1 channel for all workers. 13 | func scanWithoutFlowControl( 14 | ds targets.DataSource, indexer targets.PointIndexer, factory targets.BatchFactory, channels []chan targets.Batch, 15 | batchSize uint, limit uint64, 16 | ) uint64 { 17 | if batchSize == 0 { 18 | panic("batch size can't be 0") 19 | } 20 | numChannels := len(channels) 21 | batches := make([]targets.Batch, numChannels) 22 | for i := 0; i < numChannels; i++ { 23 | batches[i] = factory.New() 24 | } 25 | var itemsRead uint64 26 | for { 27 | if limit > 0 && itemsRead >= limit { 28 | break 29 | } 30 | item := ds.NextItem() 31 | if item.Data == nil { 32 | // Nothing to scan any more - input is empty or failed 33 | // Time to exit 34 | break 35 | } 36 | itemsRead++ 37 | 38 | idx := indexer.GetIndex(item) 39 | batches[idx].Append(item) 40 | 41 | if batches[idx].Len() >= batchSize { 42 | channels[idx] <- batches[idx] 43 | batches[idx] = factory.New() 44 | } 45 | } 46 | 47 | for idx, unfilledBatch := range batches { 48 | if unfilledBatch.Len() > 0 { 49 | channels[idx] <- unfilledBatch 50 | } 51 | } 52 | return itemsRead 53 | } 54 | -------------------------------------------------------------------------------- /pkg/data/serialize/point_serializer.go: -------------------------------------------------------------------------------- 1 | package serialize 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "io" 6 | ) 7 | 8 | // PointSerializer serializes a Point for writing 9 | type PointSerializer interface { 10 | Serialize(p *data.Point, w io.Writer) error 11 | } 12 | -------------------------------------------------------------------------------- /pkg/data/serialize/util.go: -------------------------------------------------------------------------------- 1 | package serialize 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | ) 7 | 8 | // Utility function for appending various data types to a byte string 9 | func FastFormatAppend(v interface{}, buf []byte) []byte { 10 | switch v.(type) { 11 | case int: 12 | return strconv.AppendInt(buf, int64(v.(int)), 10) 13 | case int64: 14 | return strconv.AppendInt(buf, v.(int64), 10) 15 | case float64: 16 | // Why -1 ? 17 | // From Golang source on genericFtoa (called by AppendFloat): 'Negative precision means "only as much as needed to be exact."' 18 | // Using this instead of an exact number for precision ensures we preserve the precision passed in to the function, allowing us 19 | // to use different precision for different use cases. 20 | return strconv.AppendFloat(buf, v.(float64), 'f', -1, 64) 21 | case float32: 22 | return strconv.AppendFloat(buf, float64(v.(float32)), 'f', -1, 32) 23 | case bool: 24 | return strconv.AppendBool(buf, v.(bool)) 25 | case []byte: 26 | buf = append(buf, v.([]byte)...) 27 | return buf 28 | case string: 29 | buf = append(buf, v.(string)...) 30 | return buf 31 | case nil: 32 | return buf 33 | default: 34 | panic(fmt.Sprintf("unknown field type for %#v", v)) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/data/source/config.go: -------------------------------------------------------------------------------- 1 | package source 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/usecases/common" 5 | ) 6 | 7 | const ( 8 | FileDataSourceType = "FILE" 9 | SimulatorDataSourceType = "SIMULATOR" 10 | ) 11 | 12 | var ( 13 | ValidDataSourceTypes = []string{FileDataSourceType, SimulatorDataSourceType} 14 | ) 15 | 16 | type DataSourceConfig struct { 17 | Type string `yaml:"type"` 18 | File *FileDataSourceConfig `yaml:"file,omitempty"` 19 | Simulator *common.DataGeneratorConfig `yaml:"simulator,omitempty"` 20 | } 21 | -------------------------------------------------------------------------------- /pkg/data/source/file_data_source_config.go: -------------------------------------------------------------------------------- 1 | package source 2 | 3 | type FileDataSourceConfig struct { 4 | Location string `yaml:"location"` 5 | } 6 | -------------------------------------------------------------------------------- /pkg/data/usecases/common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "math/rand" 4 | 5 | // RandomStringSliceChoice returns a random string from the provided slice of string slices. 6 | func RandomStringSliceChoice(s []string) string { 7 | return s[rand.Intn(len(s))] 8 | } 9 | 10 | // RandomByteStringSliceChoice returns a random byte string slice from the provided slice of byte string slices. 11 | func RandomByteStringSliceChoice(s [][]byte) []byte { 12 | return s[rand.Intn(len(s))] 13 | } 14 | 15 | // RandomInt64SliceChoice returns a random int64 from an int64 slice. 16 | func RandomInt64SliceChoice(s []int64) int64 { 17 | return s[rand.Intn(len(s))] 18 | } 19 | 20 | const ( 21 | // Use case choices (make sure to update TestGetConfig if adding a new one) 22 | UseCaseCPUOnly = "cpu-only" 23 | UseCaseCPUSingle = "cpu-single" 24 | UseCaseDevops = "devops" 25 | UseCaseIoT = "iot" 26 | UseCaseDevopsGeneric = "devops-generic" 27 | ) 28 | 29 | var UseCaseChoices = []string{ 30 | UseCaseCPUOnly, 31 | UseCaseCPUSingle, 32 | UseCaseDevops, 33 | UseCaseIoT, 34 | UseCaseDevopsGeneric, 35 | } 36 | -------------------------------------------------------------------------------- /pkg/data/usecases/common/common_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func testIfInByteStringSlice(t *testing.T, arr [][]byte, choice []byte) { 9 | for _, x := range arr { 10 | if bytes.Equal(x, choice) { 11 | return 12 | } 13 | } 14 | t.Errorf("could not find choice in array: %s", choice) 15 | } 16 | 17 | func TestRandomByteSliceChoice(t *testing.T) { 18 | arr := [][]byte{ 19 | []byte("foo"), 20 | []byte("bar"), 21 | []byte("baz"), 22 | } 23 | // One million attempts ought to catch it? 24 | for i := 0; i < 1000000; i++ { 25 | choice := RandomByteStringSliceChoice(arr) 26 | testIfInByteStringSlice(t, arr, choice) 27 | } 28 | } 29 | 30 | func testIfInInt64Slice(t *testing.T, arr []int64, choice int64) { 31 | for _, x := range arr { 32 | if x == choice { 33 | return 34 | } 35 | } 36 | t.Errorf("could not find choice in array: %d", choice) 37 | } 38 | 39 | func TestRandomInt64Choice(t *testing.T) { 40 | arr := []int64{0, 10000, 9999} 41 | // One million attempts ought to catch it? 42 | for i := 0; i < 1000000; i++ { 43 | choice := RandomInt64SliceChoice(arr) 44 | testIfInInt64Slice(t, arr, choice) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/data/usecases/devops/diskio_test.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestDiskIOMeasurementTick(t *testing.T) { 11 | now := time.Now() 12 | m := NewDiskIOMeasurement(now) 13 | origSerial := string(m.serial) 14 | duration := time.Second 15 | oldVals := map[string]float64{} 16 | fields := ldmToFieldLabels(diskIOFields) 17 | for i, ldm := range diskIOFields { 18 | oldVals[string(ldm.Label)] = m.Distributions[i].Get() 19 | } 20 | 21 | rand.Seed(123) 22 | m.Tick(duration) 23 | err := testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 24 | if err != nil { 25 | t.Errorf(err.Error()) 26 | } 27 | if got := string(m.serial); got != origSerial { 28 | t.Errorf("server name updated unexpectedly: got %s want %s", got, origSerial) 29 | } 30 | m.Tick(duration) 31 | err = testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 32 | if err != nil { 33 | t.Errorf(err.Error()) 34 | } 35 | if got := string(m.serial); got != origSerial { 36 | t.Errorf("server name updated unexpectedly: got %s want %s", got, origSerial) 37 | } 38 | } 39 | 40 | func TestDiskIOMeasurementToPoint(t *testing.T) { 41 | now := time.Now() 42 | m := NewDiskIOMeasurement(now) 43 | origSerial := string(m.serial) 44 | duration := time.Second 45 | m.Tick(duration) 46 | 47 | p := data.NewPoint() 48 | m.ToPoint(p) 49 | if got := string(p.MeasurementName()); got != string(labelDiskIO) { 50 | t.Errorf("incorrect measurement name: got %s want %s", got, labelDiskIO) 51 | } 52 | 53 | if got := p.GetTagValue(labelDiskIOSerial).(string); got != origSerial { 54 | t.Errorf("incorrect tag value for server name: got %s want %s", got, origSerial) 55 | } 56 | 57 | for _, ldm := range diskIOFields { 58 | if got := p.GetFieldValue(ldm.Label); got == nil { 59 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/data/usecases/devops/kernel.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "github.com/timescale/tsbs/pkg/data/usecases/common" 6 | "math/rand" 7 | "time" 8 | ) 9 | 10 | var ( 11 | labelKernel = []byte("kernel") // heap optimization 12 | labelKernelBootTime = []byte("boot_time") 13 | 14 | // Reuse NormalDistributions as arguments to other distributions. This is 15 | // safe to do because the higher-level distribution advances the ND and 16 | // immediately uses its value and saves the state 17 | kernelND = common.ND(5, 1) 18 | 19 | kernelFields = []common.LabeledDistributionMaker{ 20 | {Label: []byte("interrupts"), DistributionMaker: func() common.Distribution { return common.MWD(kernelND, 0) }}, 21 | {Label: []byte("context_switches"), DistributionMaker: func() common.Distribution { return common.MWD(kernelND, 0) }}, 22 | {Label: []byte("processes_forked"), DistributionMaker: func() common.Distribution { return common.MWD(kernelND, 0) }}, 23 | {Label: []byte("disk_pages_in"), DistributionMaker: func() common.Distribution { return common.MWD(kernelND, 0) }}, 24 | {Label: []byte("disk_pages_out"), DistributionMaker: func() common.Distribution { return common.MWD(kernelND, 0) }}, 25 | } 26 | ) 27 | 28 | type KernelMeasurement struct { 29 | *common.SubsystemMeasurement 30 | bootTime int64 31 | } 32 | 33 | func NewKernelMeasurement(start time.Time) *KernelMeasurement { 34 | sub := common.NewSubsystemMeasurementWithDistributionMakers(start, kernelFields) 35 | bootTime := rand.Int63n(240) 36 | return &KernelMeasurement{ 37 | SubsystemMeasurement: sub, 38 | bootTime: bootTime, 39 | } 40 | } 41 | 42 | func (m *KernelMeasurement) ToPoint(p *data.Point) { 43 | p.AppendField(labelKernelBootTime, m.bootTime) 44 | m.ToPointAllInt64(p, labelKernel, kernelFields) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/data/usecases/devops/kernel_test.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestKernelMeasurementTick(t *testing.T) { 11 | now := time.Now() 12 | m := NewKernelMeasurement(now) 13 | duration := time.Second 14 | bootTime := m.bootTime 15 | oldVals := map[string]float64{} 16 | fields := ldmToFieldLabels(kernelFields) 17 | for i, ldm := range kernelFields { 18 | oldVals[string(ldm.Label)] = m.Distributions[i].Get() 19 | } 20 | 21 | rand.Seed(123) 22 | m.Tick(duration) 23 | err := testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 24 | if err != nil { 25 | t.Errorf(err.Error()) 26 | } 27 | if got := m.bootTime; got != bootTime { 28 | t.Errorf("boot time changed unexpectedly: got %d", got) 29 | } 30 | m.Tick(duration) 31 | err = testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 32 | if err != nil { 33 | t.Errorf(err.Error()) 34 | } 35 | if got := m.bootTime; got != bootTime { 36 | t.Errorf("boot time changed unexpectedly: got %d", got) 37 | } 38 | } 39 | 40 | func TestKernelMeasurementToPoint(t *testing.T) { 41 | now := time.Now() 42 | m := NewKernelMeasurement(now) 43 | duration := time.Second 44 | bootTime := m.bootTime 45 | m.Tick(duration) 46 | 47 | p := data.NewPoint() 48 | m.ToPoint(p) 49 | if got := string(p.MeasurementName()); got != string(labelKernel) { 50 | t.Errorf("incorrect measurement name: got %s want %s", got, labelKernel) 51 | } 52 | 53 | if got := p.GetFieldValue(labelKernelBootTime).(int64); got != bootTime { 54 | t.Errorf("boot time changed unexpectedly: got %d want %d", got, bootTime) 55 | } 56 | 57 | for _, ldm := range kernelFields { 58 | if got := p.GetFieldValue(ldm.Label); got == nil { 59 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/data/usecases/devops/net_test.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestNetMeasurementTick(t *testing.T) { 11 | now := time.Now() 12 | m := NewNetMeasurement(now) 13 | origName := string(m.interfaceName) 14 | duration := time.Second 15 | oldVals := map[string]float64{} 16 | fields := ldmToFieldLabels(netFields) 17 | for i, ldm := range netFields { 18 | oldVals[string(ldm.Label)] = m.Distributions[i].Get() 19 | } 20 | 21 | rand.Seed(123) 22 | m.Tick(duration) 23 | err := testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 24 | if err != nil { 25 | t.Errorf(err.Error()) 26 | } 27 | if got := string(m.interfaceName); got != origName { 28 | t.Errorf("server name updated unexpectedly: got %s want %s", got, origName) 29 | } 30 | m.Tick(duration) 31 | err = testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 32 | if err != nil { 33 | t.Errorf(err.Error()) 34 | } 35 | if got := string(m.interfaceName); got != origName { 36 | t.Errorf("server name updated unexpectedly: got %s want %s", got, origName) 37 | } 38 | } 39 | 40 | func TestNetMeasurementToPoint(t *testing.T) { 41 | now := time.Now() 42 | m := NewNetMeasurement(now) 43 | origName := m.interfaceName 44 | duration := time.Second 45 | m.Tick(duration) 46 | 47 | p := data.NewPoint() 48 | m.ToPoint(p) 49 | if got := string(p.MeasurementName()); got != string(labelNet) { 50 | t.Errorf("incorrect measurement name: got %s want %s", got, labelNet) 51 | } 52 | 53 | if got := p.GetTagValue(labelNetTagInterface).(string); got != origName { 54 | t.Errorf("incorrect tag value for server name: got %s want %s", got, origName) 55 | } 56 | 57 | for _, ldm := range netFields { 58 | if got := p.GetFieldValue(ldm.Label); got == nil { 59 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/data/usecases/devops/postgresql_test.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestPostgresqlMeasurementTick(t *testing.T) { 11 | now := time.Now() 12 | m := NewPostgresqlMeasurement(now) 13 | duration := time.Second 14 | oldVals := map[string]float64{} 15 | fields := ldmToFieldLabels(postgresqlFields) 16 | for i, ldm := range postgresqlFields { 17 | oldVals[string(ldm.Label)] = m.Distributions[i].Get() 18 | } 19 | 20 | rand.Seed(123) 21 | m.Tick(duration) 22 | err := testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 23 | if err != nil { 24 | t.Errorf(err.Error()) 25 | } 26 | m.Tick(duration) 27 | err = testDistributionsAreDifferent(oldVals, m.SubsystemMeasurement, fields) 28 | if err != nil { 29 | t.Errorf(err.Error()) 30 | } 31 | } 32 | 33 | func TestPostgresqlMeasurementToPoint(t *testing.T) { 34 | now := time.Now() 35 | m := NewPostgresqlMeasurement(now) 36 | duration := time.Second 37 | m.Tick(duration) 38 | 39 | p := data.NewPoint() 40 | m.ToPoint(p) 41 | if got := string(p.MeasurementName()); got != string(labelPostgresql) { 42 | t.Errorf("incorrect measurement name: got %s want %s", got, labelPostgresql) 43 | } 44 | 45 | for _, ldm := range postgresqlFields { 46 | if got := p.GetFieldValue(ldm.Label); got == nil { 47 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /pkg/data/usecases/iot/batch_config_test.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/google/go-cmp/cmp" 8 | ) 9 | 10 | var ( 11 | numberOfRuns = 5 12 | numberOfBatches = 150 13 | ) 14 | 15 | func TestNewBatchConfig(t *testing.T) { 16 | 17 | batchRuns := make([][]*batchConfig, numberOfRuns) 18 | 19 | for i := 0; i < numberOfRuns; i++ { 20 | rand.Seed(123) 21 | batchRuns[i] = make([]*batchConfig, numberOfBatches) 22 | 23 | for j := 0; j < numberOfBatches; j++ { 24 | batchRuns[i][j] = newBatchConfig(j, j, j+5, j+5) 25 | } 26 | } 27 | 28 | var firstBatchRun []*batchConfig 29 | 30 | for i := range batchRuns { 31 | if firstBatchRun == nil { 32 | firstBatchRun = batchRuns[i] 33 | continue 34 | } 35 | 36 | for j := range batchRuns[i] { 37 | if !cmp.Equal(firstBatchRun[j], batchRuns[i][j]) { 38 | t.Errorf("batch configs don't match for index %d:\ngot\n%+v\nwant\n%+v", j, batchRuns[i][j], firstBatchRun[j]) 39 | } 40 | } 41 | 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /pkg/data/usecases/iot/diagnostics_test.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "github.com/timescale/tsbs/pkg/data/usecases/common" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestDiagnosticsMeasurementToPoint(t *testing.T) { 11 | now := time.Now() 12 | m := NewDiagnosticsMeasurement(now) 13 | duration := time.Second 14 | m.Tick(duration) 15 | 16 | p := data.NewPoint() 17 | m.ToPoint(p) 18 | if got := string(p.MeasurementName()); got != string(labelDiagnostics) { 19 | t.Errorf("incorrect measurement name: got %s want %s", got, labelReadings) 20 | } 21 | 22 | for _, ldm := range diagnosticsFields { 23 | if got := p.GetFieldValue(ldm.Label); got == nil { 24 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 25 | } 26 | } 27 | } 28 | 29 | func TestCustomFuelDistribution(t *testing.T) { 30 | testCount := 5 31 | fuelMin, fuelMax := 10.0, 100.0 32 | fuelStep := &common.ConstantDistribution{State: -1} 33 | 34 | clampedDist := common.CWD(fuelStep, fuelMin, fuelMax, fuelMax) 35 | clampedCopy := *clampedDist 36 | 37 | fuelDist := &customFuelDistribution{&clampedCopy} 38 | 39 | for i := 0; i < testCount; i++ { 40 | for clampedDist.Get() > fuelMin { 41 | clampedDist.Advance() 42 | fuelDist.Advance() 43 | 44 | if clampedDist.Get() != fuelDist.Get() { 45 | 46 | if clampedDist.Get() == fuelMin { 47 | if fuelDist.Get() != fuelMax { 48 | t.Fatalf("expected fuel to be refilled when state hits minimum") 49 | } 50 | break 51 | } 52 | 53 | t.Fatalf("distributions don't match when they are supposed to") 54 | } 55 | } 56 | 57 | // Resetting the distribution and running another test. 58 | clampedDist = common.CWD(fuelStep, fuelMin, fuelMax, fuelMax) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /pkg/data/usecases/iot/readings_test.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestReadingsMeasurementToPoint(t *testing.T) { 10 | now := time.Now() 11 | m := NewReadingsMeasurement(now) 12 | duration := time.Second 13 | m.Tick(duration) 14 | 15 | p := data.NewPoint() 16 | m.ToPoint(p) 17 | if got := string(p.MeasurementName()); got != string(labelReadings) { 18 | t.Errorf("incorrect measurement name: got %s want %s", got, labelReadings) 19 | } 20 | 21 | for _, ldm := range readingsFields { 22 | if got := p.GetFieldValue(ldm.Label); got == nil { 23 | t.Errorf("field %s returned a nil value unexpectedly", ldm.Label) 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pkg/data/usecases/usecases_test.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/usecases/common" 5 | "github.com/timescale/tsbs/pkg/data/usecases/devops" 6 | "github.com/timescale/tsbs/pkg/data/usecases/iot" 7 | "reflect" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | const defaultLogInterval = 10 * time.Second 13 | 14 | func TestGetSimulatorConfig(t *testing.T) { 15 | dgc := &common.DataGeneratorConfig{ 16 | BaseConfig: common.BaseConfig{ 17 | Scale: 1, 18 | TimeStart: "2020-01-01T00:00:00Z", 19 | TimeEnd: "2020-01-01T00:00:01Z", 20 | }, 21 | InitialScale: 1, 22 | LogInterval: defaultLogInterval, 23 | } 24 | 25 | checkType := func(use string, want common.SimulatorConfig) { 26 | wantType := reflect.TypeOf(want) 27 | dgc.Use = use 28 | scfg, err := GetSimulatorConfig(dgc) 29 | if err != nil { 30 | t.Errorf("unexpected error with use case %s: %v", use, err) 31 | } 32 | if got := reflect.TypeOf(scfg); got != wantType { 33 | t.Errorf("use '%s' does not give right scfg: got %v want %v", use, got, wantType) 34 | } 35 | } 36 | 37 | checkType(common.UseCaseDevops, &devops.DevopsSimulatorConfig{}) 38 | checkType(common.UseCaseIoT, &iot.SimulatorConfig{}) 39 | checkType(common.UseCaseCPUOnly, &devops.CPUOnlySimulatorConfig{}) 40 | checkType(common.UseCaseCPUSingle, &devops.CPUOnlySimulatorConfig{}) 41 | 42 | dgc.Use = "bogus use case" 43 | _, err := GetSimulatorConfig(dgc) 44 | if err == nil { 45 | t.Errorf("unexpected lack of error for bogus use case") 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /pkg/query/benchmark_result.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | const BenchmarkTestResultVersion = "0.1" 4 | 5 | // LoaderTestResult aggregates the results of an query benchmark in a common format across targets 6 | type LoaderTestResult struct { 7 | // Format Configs 8 | ResultFormatVersion string `json:"ResultFormatVersion"` 9 | 10 | // RunnerConfig Configs 11 | RunnerConfig BenchmarkRunnerConfig `json:"RunnerConfig"` 12 | 13 | // Run info 14 | StartTime int64 `json:"StartTime` 15 | EndTime int64 `json:"EndTime"` 16 | DurationMillis int64 `json:"DurationMillis"` 17 | 18 | // Totals 19 | Totals map[string]interface{} `json:"Totals"` 20 | } 21 | -------------------------------------------------------------------------------- /pkg/query/clickhouse.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // ClickHouse encodes a ClickHouse query. 9 | // This will be serialized for use by the tsbs_run_queries_clickhouse program. 10 | type ClickHouse struct { 11 | HumanLabel []byte 12 | HumanDescription []byte 13 | 14 | Table []byte // e.g. "cpu" 15 | SqlQuery []byte 16 | id uint64 17 | } 18 | 19 | // ClickHousePool is a sync.Pool of ClickHouse Query types 20 | var ClickHousePool = sync.Pool{ 21 | New: func() interface{} { 22 | return &ClickHouse{ 23 | HumanLabel: make([]byte, 0, 1024), 24 | HumanDescription: make([]byte, 0, 1024), 25 | Table: make([]byte, 0, 1024), 26 | SqlQuery: make([]byte, 0, 1024), 27 | } 28 | }, 29 | } 30 | 31 | // NewClickHouse returns a new ClickHouse Query instance 32 | func NewClickHouse() *ClickHouse { 33 | return ClickHousePool.Get().(*ClickHouse) 34 | } 35 | 36 | // GetID returns the ID of this Query 37 | func (ch *ClickHouse) GetID() uint64 { 38 | return ch.id 39 | } 40 | 41 | // SetID sets the ID for this Query 42 | func (ch *ClickHouse) SetID(n uint64) { 43 | ch.id = n 44 | } 45 | 46 | // String produces a debug-ready description of a Query. 47 | func (ch *ClickHouse) String() string { 48 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s, Table: %s, Query: %s", ch.HumanLabel, ch.HumanDescription, ch.Table, ch.SqlQuery) 49 | } 50 | 51 | // HumanLabelName returns the human readable name of this Query 52 | func (ch *ClickHouse) HumanLabelName() []byte { 53 | return ch.HumanLabel 54 | } 55 | 56 | // HumanDescriptionName returns the human readable description of this Query 57 | func (ch *ClickHouse) HumanDescriptionName() []byte { 58 | return ch.HumanDescription 59 | } 60 | 61 | // Release resets and returns this Query to its pool 62 | func (ch *ClickHouse) Release() { 63 | ch.HumanLabel = ch.HumanLabel[:0] 64 | ch.HumanDescription = ch.HumanDescription[:0] 65 | 66 | ch.Table = ch.Table[:0] 67 | ch.SqlQuery = ch.SqlQuery[:0] 68 | 69 | ClickHousePool.Put(ch) 70 | } 71 | -------------------------------------------------------------------------------- /pkg/query/cratedb.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // CrateDB encodes a CrateDB request. This will be serialized for use 9 | // by the tsbs_run_queries_cratedb program. 10 | type CrateDB struct { 11 | HumanLabel []byte 12 | HumanDescription []byte 13 | 14 | Table []byte // e.g. "cpu" 15 | SqlQuery []byte 16 | id uint64 17 | } 18 | 19 | var CrateDBPool = sync.Pool{ 20 | New: func() interface{} { 21 | return &CrateDB{ 22 | HumanLabel: make([]byte, 0, 1024), 23 | HumanDescription: make([]byte, 0, 1024), 24 | Table: make([]byte, 0, 1024), 25 | SqlQuery: make([]byte, 0, 1024), 26 | } 27 | }, 28 | } 29 | 30 | func NewCrateDB() *CrateDB { 31 | return CrateDBPool.Get().(*CrateDB) 32 | } 33 | 34 | func (q *CrateDB) GetID() uint64 { 35 | return q.id 36 | } 37 | 38 | func (q *CrateDB) SetID(n uint64) { 39 | q.id = n 40 | } 41 | 42 | // String produces a debug-ready description of a Query. 43 | func (q *CrateDB) String() string { 44 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s, Table: %s, Query: %s", 45 | q.HumanLabel, q.HumanDescription, q.Table, q.SqlQuery) 46 | } 47 | 48 | func (q *CrateDB) HumanLabelName() []byte { 49 | return q.HumanLabel 50 | } 51 | 52 | func (q *CrateDB) HumanDescriptionName() []byte { 53 | return q.HumanDescription 54 | } 55 | 56 | // Release resets and returns this Query to its pool 57 | func (q *CrateDB) Release() { 58 | q.HumanLabel = q.HumanLabel[:0] 59 | q.HumanDescription = q.HumanDescription[:0] 60 | q.id = 0 61 | 62 | q.Table = q.Table[:0] 63 | q.SqlQuery = q.SqlQuery[:0] 64 | 65 | CrateDBPool.Put(q) 66 | } 67 | -------------------------------------------------------------------------------- /pkg/query/cratedb_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import "testing" 4 | 5 | func TestNewCrateDB(t *testing.T) { 6 | check := func(tq *CrateDB) { 7 | testValidNewQuery(t, tq) 8 | if got := len(tq.Table); got != 0 { 9 | t.Errorf("new query has non-0 table label: got %d", got) 10 | } 11 | if got := len(tq.SqlQuery); got != 0 { 12 | t.Errorf("new query has non-0 sql query: got %d", got) 13 | } 14 | } 15 | tq := NewCrateDB() 16 | check(tq) 17 | tq.HumanLabel = []byte("foo") 18 | tq.HumanDescription = []byte("bar") 19 | tq.Table = []byte("table") 20 | tq.SqlQuery = []byte("SELECT * FROM *") 21 | tq.SetID(1) 22 | if got := string(tq.HumanLabelName()); got != "foo" { 23 | t.Errorf("incorrect label name: got %s", got) 24 | } 25 | if got := string(tq.HumanDescriptionName()); got != "bar" { 26 | t.Errorf("incorrect desc: got %s", got) 27 | } 28 | tq.Release() 29 | 30 | // Since we use a pool, check that the next one is reset 31 | tq = NewCrateDB() 32 | check(tq) 33 | tq.Release() 34 | } 35 | 36 | func TestCrateDBSetAndGetID(t *testing.T) { 37 | for i := 0; i < 2; i++ { 38 | q := NewCrateDB() 39 | testSetAndGetID(t, q) 40 | q.Release() 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /pkg/query/http_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import "testing" 4 | 5 | func TestNewHTTP(t *testing.T) { 6 | check := func(q *HTTP) { 7 | testValidNewQuery(t, q) 8 | if got := len(q.Method); got != 0 { 9 | t.Errorf("new query has non-0 method: got %d", got) 10 | } 11 | if got := len(q.Path); got != 0 { 12 | t.Errorf("new query has non-0 path: got %d", got) 13 | } 14 | if got := len(q.Body); got != 0 { 15 | t.Errorf("new query has non-0 body: got %d", got) 16 | } 17 | if got := q.StartTimestamp; got != 0 { 18 | t.Errorf("new query has non-0 start time: got %d", got) 19 | } 20 | if got := q.EndTimestamp; got != 0 { 21 | t.Errorf("new query has non-0 end time: got %d", got) 22 | } 23 | } 24 | q := NewHTTP() 25 | check(q) 26 | q.HumanLabel = []byte("foo") 27 | q.HumanDescription = []byte("bar") 28 | q.Method = []byte("POST") 29 | q.Path = []byte("/home") 30 | q.Body = []byte("bazbazbaz") 31 | q.StartTimestamp = 1 32 | q.EndTimestamp = 5 33 | q.SetID(1) 34 | if got := string(q.HumanLabelName()); got != "foo" { 35 | t.Errorf("incorrect label name: got %s", got) 36 | } 37 | if got := string(q.HumanDescriptionName()); got != "bar" { 38 | t.Errorf("incorrect desc: got %s", got) 39 | } 40 | q.Release() 41 | 42 | // Since we use a pool, check that the next one is reset 43 | q = NewHTTP() 44 | check(q) 45 | q.Release() 46 | } 47 | 48 | func TestHTTPSetAndGetID(t *testing.T) { 49 | for i := 0; i < 2; i++ { 50 | q := NewHTTP() 51 | testSetAndGetID(t, q) 52 | q.Release() 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /pkg/query/mongo.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/globalsign/mgo/bson" 8 | ) 9 | 10 | // Mongo encodes a Mongo request. This will be serialized for use 11 | // by the tsbs_run_queries_mongo program. 12 | type Mongo struct { 13 | HumanLabel []byte 14 | HumanDescription []byte 15 | CollectionName []byte 16 | BsonDoc []bson.M 17 | id uint64 18 | } 19 | 20 | // MongoPool is a sync.Pool of Mongo Query types 21 | var MongoPool = sync.Pool{ 22 | New: func() interface{} { 23 | return &Mongo{ 24 | HumanLabel: []byte{}, 25 | HumanDescription: []byte{}, 26 | CollectionName: []byte{}, 27 | BsonDoc: []bson.M{}, 28 | } 29 | }, 30 | } 31 | 32 | // NewMongo returns a new Mongo Query instance 33 | func NewMongo() *Mongo { 34 | return MongoPool.Get().(*Mongo) 35 | } 36 | 37 | // GetID returns the ID of this Query 38 | func (q *Mongo) GetID() uint64 { 39 | return q.id 40 | } 41 | 42 | // SetID sets the ID for this Query 43 | func (q *Mongo) SetID(id uint64) { 44 | q.id = id 45 | } 46 | 47 | // String produces a debug-ready description of a Query. 48 | func (q *Mongo) String() string { 49 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s", q.HumanLabel, q.HumanDescription) 50 | } 51 | 52 | // HumanLabelName returns the human readable name of this Query 53 | func (q *Mongo) HumanLabelName() []byte { 54 | return q.HumanLabel 55 | } 56 | 57 | // HumanDescriptionName returns the human readable description of this Query 58 | func (q *Mongo) HumanDescriptionName() []byte { 59 | return q.HumanDescription 60 | } 61 | 62 | // Release resets and returns this Query to its pool 63 | func (q *Mongo) Release() { 64 | q.HumanLabel = q.HumanLabel[:0] 65 | q.HumanDescription = q.HumanDescription[:0] 66 | q.id = 0 67 | q.CollectionName = q.CollectionName[:0] 68 | q.BsonDoc = nil 69 | 70 | MongoPool.Put(q) 71 | } 72 | -------------------------------------------------------------------------------- /pkg/query/mongo_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/globalsign/mgo/bson" 7 | ) 8 | 9 | func TestNewMongo(t *testing.T) { 10 | check := func(q *Mongo) { 11 | testValidNewQuery(t, q) 12 | if got := len(q.CollectionName); got != 0 { 13 | t.Errorf("new query has non-0 collection name: got %d", got) 14 | } 15 | if got := len(q.BsonDoc); got != 0 { 16 | t.Errorf("new query has non-0 bson doc: got %d", got) 17 | } 18 | } 19 | q := NewMongo() 20 | check(q) 21 | q.HumanLabel = []byte("foo") 22 | q.HumanDescription = []byte("bar") 23 | q.BsonDoc = append(q.BsonDoc, bson.M{}) 24 | q.CollectionName = []byte("baz") 25 | q.SetID(1) 26 | if got := string(q.HumanLabelName()); got != "foo" { 27 | t.Errorf("incorrect label name: got %s", got) 28 | } 29 | if got := string(q.HumanDescriptionName()); got != "bar" { 30 | t.Errorf("incorrect desc: got %s", got) 31 | } 32 | q.Release() 33 | 34 | // Since we use a pool, check that the next one is reset 35 | q = NewMongo() 36 | check(q) 37 | q.Release() 38 | } 39 | 40 | func TestMongoSetAndGetID(t *testing.T) { 41 | for i := 0; i < 2; i++ { 42 | q := NewMongo() 43 | testSetAndGetID(t, q) 44 | q.Release() 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/query/query.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // Query is an interface used for encoding a benchmark query for different databases 8 | type Query interface { 9 | Release() 10 | HumanLabelName() []byte 11 | HumanDescriptionName() []byte 12 | GetID() uint64 13 | SetID(uint64) 14 | fmt.Stringer 15 | } 16 | -------------------------------------------------------------------------------- /pkg/query/query_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import "testing" 4 | 5 | func testValidNewQuery(t *testing.T, q Query) { 6 | if got := len(q.HumanLabelName()); got != 0 { 7 | t.Errorf("new query has non-0 human label: got %d", got) 8 | } 9 | if got := len(q.HumanDescriptionName()); got != 0 { 10 | t.Errorf("new query has non-0 human desc: got %d", got) 11 | } 12 | if got := q.GetID(); got != 0 { 13 | t.Errorf("new query has non-0 id: got %d", got) 14 | } 15 | } 16 | 17 | func testSetAndGetID(t *testing.T, q Query) { 18 | if got := q.GetID(); got != 0 { 19 | t.Errorf("new query does not have 0 id: got %d", got) 20 | } 21 | q.SetID(100) 22 | if got := q.GetID(); got != 100 { 23 | t.Errorf("GetID returned incorrect id: got %d", got) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pkg/query/scanner.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "encoding/gob" 5 | "io" 6 | "log" 7 | "sync" 8 | ) 9 | 10 | // scanner is used to read in Queries from a Reader where they are 11 | // Go-encoded and then distribute them to workers 12 | type scanner struct { 13 | r io.Reader 14 | limit *uint64 15 | } 16 | 17 | // newScanner returns a new scanner for a given Reader and its limit 18 | func newScanner(limit *uint64) *scanner { 19 | return &scanner{limit: limit} 20 | } 21 | 22 | // setReader sets the source, an io.Reader, that the scanner reads/decodes from 23 | func (s *scanner) setReader(r io.Reader) *scanner { 24 | s.r = r 25 | return s 26 | } 27 | 28 | // scan reads encoded Queries and places them into a channel 29 | func (s *scanner) scan(pool *sync.Pool, c chan Query) { 30 | decoder := gob.NewDecoder(s.r) 31 | 32 | n := uint64(0) 33 | for { 34 | if *s.limit > 0 && n >= *s.limit { 35 | // request queries limit reached, time to quit 36 | break 37 | } 38 | 39 | q := pool.Get().(Query) 40 | err := decoder.Decode(q) 41 | if err == io.EOF { 42 | // EOF, all done 43 | break 44 | } 45 | if err != nil { 46 | // Can't read, time to quit 47 | log.Fatal(err) 48 | } 49 | 50 | // We have a query, send it to the runner 51 | q.SetID(n) 52 | c <- q 53 | 54 | // Queries counter 55 | n++ 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /pkg/query/siridb.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | type SiriDB struct { 9 | HumanLabel []byte 10 | HumanDescription []byte 11 | SqlQuery []byte 12 | id uint64 13 | } 14 | 15 | var SiriDBPool = sync.Pool{ 16 | New: func() interface{} { 17 | return &SiriDB{ 18 | HumanLabel: make([]byte, 0, 1024), 19 | HumanDescription: make([]byte, 0, 1024), 20 | SqlQuery: make([]byte, 0, 1024), 21 | } 22 | }, 23 | } 24 | 25 | func NewSiriDB() *SiriDB { 26 | return SiriDBPool.Get().(*SiriDB) 27 | } 28 | 29 | // GetID returns the ID of this Query 30 | func (q *SiriDB) GetID() uint64 { 31 | return q.id 32 | } 33 | 34 | // SetID sets the ID for this Query 35 | func (q *SiriDB) SetID(id uint64) { 36 | q.id = id 37 | } 38 | 39 | // String produces a debug-ready description of a Query. 40 | func (q *SiriDB) String() string { 41 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s", q.HumanLabel, q.HumanDescription) 42 | } 43 | 44 | // HumanLabelName returns the human readable name of this Query 45 | func (q *SiriDB) HumanLabelName() []byte { 46 | return q.HumanLabel 47 | } 48 | 49 | // HumanDescriptionName returns the human readable description of this Query 50 | func (q *SiriDB) HumanDescriptionName() []byte { 51 | return q.HumanDescription 52 | } 53 | 54 | // Release resets and returns this Query to its pool 55 | func (q *SiriDB) Release() { 56 | q.HumanLabel = q.HumanLabel[:0] 57 | q.HumanDescription = q.HumanDescription[:0] 58 | q.id = 0 59 | q.SqlQuery = q.SqlQuery[:0] 60 | 61 | SiriDBPool.Put(q) 62 | } 63 | -------------------------------------------------------------------------------- /pkg/query/siridb_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import "testing" 4 | 5 | func TestNewSiriDB(t *testing.T) { 6 | check := func(sq *SiriDB) { 7 | testValidNewQuery(t, sq) 8 | if got := len(sq.SqlQuery); got != 0 { 9 | t.Errorf("new query has non-0 sql query: got %d", got) 10 | } 11 | } 12 | sq := NewSiriDB() 13 | check(sq) 14 | sq.HumanLabel = []byte("foo") 15 | sq.HumanDescription = []byte("bar") 16 | sq.SqlQuery = []byte("SELECT * FROM *") 17 | sq.SetID(1) 18 | if got := string(sq.HumanLabelName()); got != "foo" { 19 | t.Errorf("incorrect label name: got %s", got) 20 | } 21 | if got := string(sq.HumanDescriptionName()); got != "bar" { 22 | t.Errorf("incorrect desc: got %s", got) 23 | } 24 | sq.Release() 25 | 26 | // Since we use a pool, check that the next one is reset 27 | sq = NewSiriDB() 28 | check(sq) 29 | sq.Release() 30 | } 31 | 32 | func TestSiriDBSetAndGetID(t *testing.T) { 33 | for i := 0; i < 2; i++ { 34 | q := NewSiriDB() 35 | testSetAndGetID(t, q) 36 | q.Release() 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /pkg/query/stat_processor_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestStatProcessorSend(t *testing.T) { 9 | s := GetStat() 10 | s.isWarm = true 11 | statPool.Put(s) 12 | s = GetStat() 13 | if s.isWarm { 14 | t.Errorf("initial stat came back warm unexpectedly") 15 | } 16 | s.value = 10.1 17 | sp := &defaultStatProcessor{} 18 | sp.c = make(chan *Stat, 2) 19 | sp.send([]*Stat{s, s}) 20 | r := <-sp.c 21 | if r.value != s.value { 22 | t.Errorf("sent a stat and got a different one back") 23 | } 24 | if r.isWarm { 25 | t.Errorf("received stat is warm unexpectedly") 26 | } 27 | 28 | // 2nd value too 29 | r = <-sp.c 30 | if r.value != s.value { 31 | t.Errorf("sent a stat and got a different one back (2)") 32 | } 33 | if r.isWarm { 34 | t.Errorf("received stat is warm unexpectedly (2)") 35 | } 36 | 37 | // should not send anything 38 | wantLen := len(sp.c) 39 | sp.send(nil) 40 | time.Sleep(25 * time.Millisecond) 41 | if got := len(sp.c); got != wantLen { 42 | t.Errorf("empty stat array changed channel length: got %d want %d", got, wantLen) 43 | } 44 | } 45 | 46 | func TestStatProcessorSendWarm(t *testing.T) { 47 | s := GetStat() 48 | if s.isWarm { 49 | t.Errorf("initial stat came back warm unexpectedly") 50 | } 51 | s.value = 10.1 52 | sp := &defaultStatProcessor{} 53 | sp.c = make(chan *Stat, 2) 54 | sp.sendWarm([]*Stat{s, s}) 55 | r := <-sp.c 56 | if r.value != s.value { 57 | t.Errorf("sent a stat and got a different one back") 58 | } 59 | if !r.isWarm { 60 | t.Errorf("received stat is NOT warm unexpectedly") 61 | } 62 | 63 | // 2nd value too 64 | r = <-sp.c 65 | if r.value != s.value { 66 | t.Errorf("sent a stat and got a different one back (2)") 67 | } 68 | if !r.isWarm { 69 | t.Errorf("received stat is NOT warm unexpectedly (2)") 70 | } 71 | 72 | // should not send anything 73 | wantLen := len(sp.c) 74 | sp.sendWarm(nil) 75 | time.Sleep(25 * time.Millisecond) 76 | if got := len(sp.c); got != wantLen { 77 | t.Errorf("empty stat array changed channel length: got %d want %d", got, wantLen) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /pkg/query/timescaledb.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // TimescaleDB encodes a TimescaleDB request. This will be serialized for use 9 | // by the tsbs_run_queries_timescaledb program. 10 | type TimescaleDB struct { 11 | HumanLabel []byte 12 | HumanDescription []byte 13 | 14 | Hypertable []byte // e.g. "cpu" 15 | SqlQuery []byte 16 | id uint64 17 | } 18 | 19 | // TimescaleDBPool is a sync.Pool of TimescaleDB Query types 20 | var TimescaleDBPool = sync.Pool{ 21 | New: func() interface{} { 22 | return &TimescaleDB{ 23 | HumanLabel: make([]byte, 0, 1024), 24 | HumanDescription: make([]byte, 0, 1024), 25 | Hypertable: make([]byte, 0, 1024), 26 | SqlQuery: make([]byte, 0, 1024), 27 | } 28 | }, 29 | } 30 | 31 | // NewTimescaleDB returns a new TimescaleDB Query instance 32 | func NewTimescaleDB() *TimescaleDB { 33 | return TimescaleDBPool.Get().(*TimescaleDB) 34 | } 35 | 36 | // GetID returns the ID of this Query 37 | func (q *TimescaleDB) GetID() uint64 { 38 | return q.id 39 | } 40 | 41 | // SetID sets the ID for this Query 42 | func (q *TimescaleDB) SetID(n uint64) { 43 | q.id = n 44 | } 45 | 46 | // String produces a debug-ready description of a Query. 47 | func (q *TimescaleDB) String() string { 48 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s, Hypertable: %s, Query: %s", q.HumanLabel, q.HumanDescription, q.Hypertable, q.SqlQuery) 49 | } 50 | 51 | // HumanLabelName returns the human readable name of this Query 52 | func (q *TimescaleDB) HumanLabelName() []byte { 53 | return q.HumanLabel 54 | } 55 | 56 | // HumanDescriptionName returns the human readable description of this Query 57 | func (q *TimescaleDB) HumanDescriptionName() []byte { 58 | return q.HumanDescription 59 | } 60 | 61 | // Release resets and returns this Query to its pool 62 | func (q *TimescaleDB) Release() { 63 | q.HumanLabel = q.HumanLabel[:0] 64 | q.HumanDescription = q.HumanDescription[:0] 65 | q.id = 0 66 | 67 | q.Hypertable = q.Hypertable[:0] 68 | q.SqlQuery = q.SqlQuery[:0] 69 | 70 | TimescaleDBPool.Put(q) 71 | } 72 | -------------------------------------------------------------------------------- /pkg/query/timescaledb_test.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import "testing" 4 | 5 | func TestNewTimescaleDB(t *testing.T) { 6 | check := func(tq *TimescaleDB) { 7 | testValidNewQuery(t, tq) 8 | if got := len(tq.Hypertable); got != 0 { 9 | t.Errorf("new query has non-0 hypertable label: got %d", got) 10 | } 11 | if got := len(tq.SqlQuery); got != 0 { 12 | t.Errorf("new query has non-0 sql query: got %d", got) 13 | } 14 | } 15 | tq := NewTimescaleDB() 16 | check(tq) 17 | tq.HumanLabel = []byte("foo") 18 | tq.HumanDescription = []byte("bar") 19 | tq.Hypertable = []byte("table") 20 | tq.SqlQuery = []byte("SELECT * FROM *") 21 | tq.SetID(1) 22 | if got := string(tq.HumanLabelName()); got != "foo" { 23 | t.Errorf("incorrect label name: got %s", got) 24 | } 25 | if got := string(tq.HumanDescriptionName()); got != "bar" { 26 | t.Errorf("incorrect desc: got %s", got) 27 | } 28 | tq.Release() 29 | 30 | // Since we use a pool, check that the next one is reset 31 | tq = NewTimescaleDB() 32 | check(tq) 33 | tq.Release() 34 | } 35 | 36 | func TestTimescaleDBSetAndGetID(t *testing.T) { 37 | for i := 0; i < 2; i++ { 38 | q := NewTimescaleDB() 39 | testSetAndGetID(t, q) 40 | q.Release() 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /pkg/query/timestream.go: -------------------------------------------------------------------------------- 1 | package query 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // Timestream encodes a Timestream request. This will be serialized for use 9 | // by the tsbs_run_queries_timestream program. 10 | type Timestream struct { 11 | HumanLabel []byte 12 | HumanDescription []byte 13 | 14 | Table []byte // e.g. "cpu" 15 | SqlQuery []byte 16 | id uint64 17 | } 18 | 19 | // TimestreamPool is a sync.Pool of Timestream Query types 20 | var TimestreamPool = sync.Pool{ 21 | New: func() interface{} { 22 | return &Timestream{ 23 | HumanLabel: make([]byte, 0, 1024), 24 | HumanDescription: make([]byte, 0, 1024), 25 | Table: make([]byte, 0, 50), 26 | SqlQuery: make([]byte, 0, 1024), 27 | } 28 | }, 29 | } 30 | 31 | // NewTimestream returns a new Timestream Query instance 32 | func NewTimestream() *Timestream { 33 | return TimestreamPool.Get().(*Timestream) 34 | } 35 | 36 | // GetID returns the ID of this Query 37 | func (q *Timestream) GetID() uint64 { 38 | return q.id 39 | } 40 | 41 | // SetID sets the ID for this Query 42 | func (q *Timestream) SetID(n uint64) { 43 | q.id = n 44 | } 45 | 46 | // String produces a debug-ready description of a Query. 47 | func (q *Timestream) String() string { 48 | return fmt.Sprintf( 49 | "HumanLabel: %s, HumanDescription: %s, Table: %s, Query: %s", 50 | q.HumanLabel, q.HumanDescription, q.Table, q.SqlQuery, 51 | ) 52 | } 53 | 54 | // HumanLabelName returns the human readable name of this Query 55 | func (q *Timestream) HumanLabelName() []byte { 56 | return q.HumanLabel 57 | } 58 | 59 | // HumanDescriptionName returns the human readable description of this Query 60 | func (q *Timestream) HumanDescriptionName() []byte { 61 | return q.HumanDescription 62 | } 63 | 64 | // Release resets and returns this Query to its pool 65 | func (q *Timestream) Release() { 66 | q.HumanLabel = q.HumanLabel[:0] 67 | q.HumanDescription = q.HumanDescription[:0] 68 | q.id = 0 69 | q.Table = q.Table[:0] 70 | q.SqlQuery = q.SqlQuery[:0] 71 | 72 | TimestreamPool.Put(q) 73 | } 74 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/benchmark.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "github.com/timescale/tsbs/load" 5 | "github.com/timescale/tsbs/pkg/targets" 6 | "sync" 7 | ) 8 | 9 | func NewBenchmark(loadFileName, endpoint string, bufPool *sync.Pool) targets.Benchmark { 10 | return &benchmark{ 11 | loadFileName: loadFileName, 12 | endpoint: endpoint, 13 | bufPool: bufPool, 14 | } 15 | } 16 | 17 | type benchmark struct { 18 | loadFileName string 19 | endpoint string 20 | bufPool *sync.Pool 21 | } 22 | 23 | func (b *benchmark) GetDataSource() targets.DataSource { 24 | return &fileDataSource{reader: load.GetBufferedReader(b.loadFileName)} 25 | } 26 | 27 | func (b *benchmark) GetBatchFactory() targets.BatchFactory { 28 | return &factory{bufPool: b.bufPool} 29 | } 30 | 31 | func (b *benchmark) GetPointIndexer(n uint) targets.PointIndexer { 32 | return &pointIndexer{nchan: n} 33 | } 34 | 35 | func (b *benchmark) GetProcessor() targets.Processor { 36 | return &processor{endpoint: b.endpoint, bufPool: b.bufPool} 37 | } 38 | 39 | func (b *benchmark) GetDBCreator() targets.DBCreator { 40 | return &dbCreator{} 41 | } 42 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/creator.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "bufio" 5 | ) 6 | 7 | // loader.DBCreator interface implementation 8 | type dbCreator struct { 9 | } 10 | 11 | // loader.DBCreator interface implementation 12 | func (d *dbCreator) Init() { 13 | } 14 | 15 | // loader.DBCreator interface implementation 16 | func (d *dbCreator) readDataHeader(br *bufio.Reader) { 17 | } 18 | 19 | // loader.DBCreator interface implementation 20 | func (d *dbCreator) DBExists(dbName string) bool { 21 | return false 22 | } 23 | 24 | // loader.DBCreator interface implementation 25 | func (d *dbCreator) RemoveOldDB(dbName string) error { 26 | return nil 27 | } 28 | 29 | // loader.DBCreator interface implementation 30 | func (d *dbCreator) CreateDB(dbName string) error { 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/implemented_target.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | ) 11 | 12 | func NewTarget() targets.ImplementedTarget { 13 | return &akumuliTarget{} 14 | } 15 | 16 | type akumuliTarget struct { 17 | } 18 | 19 | func (t *akumuliTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 20 | flagSet.String(flagPrefix+"endpoint", "http://localhost:8282", "Akumuli RESP endpoint IP address.") 21 | } 22 | 23 | func (t *akumuliTarget) TargetName() string { 24 | return constants.FormatAkumuli 25 | } 26 | 27 | func (t *akumuliTarget) Serializer() serialize.PointSerializer { 28 | return &Serializer{} 29 | } 30 | 31 | func (t *akumuliTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 32 | panic("not implemented") 33 | } 34 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/process.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "encoding/binary" 5 | "github.com/timescale/tsbs/pkg/targets" 6 | "log" 7 | "net" 8 | "sync" 9 | ) 10 | 11 | type processor struct { 12 | bufPool *sync.Pool 13 | endpoint string 14 | conn net.Conn 15 | worker int 16 | } 17 | 18 | func (p *processor) Init(numWorker int, _, _ bool) { 19 | p.worker = numWorker 20 | c, err := net.Dial("tcp", p.endpoint) 21 | if err == nil { 22 | p.conn = c 23 | log.Println("Connection with", p.endpoint, "successful") 24 | } else { 25 | log.Println("Can't establish connection with", p.endpoint) 26 | panic("Connection error") 27 | } 28 | } 29 | 30 | func (p *processor) Close(doLoad bool) { 31 | if doLoad { 32 | p.conn.Close() 33 | } 34 | } 35 | 36 | func (p *processor) ProcessBatch(b targets.Batch, doLoad bool) (uint64, uint64) { 37 | batch := b.(*batch) 38 | var nmetrics uint64 39 | if doLoad { 40 | head := batch.buf.Bytes() 41 | for len(head) != 0 { 42 | nbytes := binary.LittleEndian.Uint16(head[4:6]) 43 | nfields := binary.LittleEndian.Uint16(head[6:8]) 44 | payload := head[8:nbytes] 45 | p.conn.Write(payload) 46 | nmetrics += uint64(nfields) 47 | head = head[nbytes:] 48 | } 49 | } 50 | batch.buf.Reset() 51 | p.bufPool.Put(batch.buf) 52 | return nmetrics, uint64(batch.rows) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/scan.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/binary" 7 | "io" 8 | "sync" 9 | 10 | "github.com/timescale/tsbs/pkg/data" 11 | "github.com/timescale/tsbs/pkg/data/usecases/common" 12 | "github.com/timescale/tsbs/pkg/targets" 13 | ) 14 | 15 | type fileDataSource struct { 16 | reader *bufio.Reader 17 | } 18 | 19 | func (d *fileDataSource) NextItem() data.LoadedPoint { 20 | hdr, err := d.reader.Peek(6) 21 | if err == io.EOF { 22 | return data.LoadedPoint{} 23 | } 24 | nbytes := binary.LittleEndian.Uint16(hdr[4:6]) 25 | body := make([]byte, nbytes) 26 | _, err = io.ReadFull(d.reader, body) 27 | if err == io.EOF { 28 | return data.LoadedPoint{} 29 | } 30 | return data.NewLoadedPoint(body) 31 | } 32 | 33 | // Cassandra doesn't serialize headers, no need to read them 34 | func (d *fileDataSource) Headers() *common.GeneratedDataHeaders { return nil } 35 | 36 | type pointIndexer struct { 37 | nchan uint 38 | } 39 | 40 | func (i *pointIndexer) GetIndex(p data.LoadedPoint) uint { 41 | hdr := p.Data.([]byte) 42 | id := binary.LittleEndian.Uint32(hdr[0:4]) 43 | return uint(id) % i.nchan 44 | } 45 | 46 | type batch struct { 47 | buf *bytes.Buffer 48 | rows uint 49 | } 50 | 51 | func (b *batch) Len() uint { 52 | return b.rows 53 | } 54 | 55 | func (b *batch) Append(item data.LoadedPoint) { 56 | payload := item.Data.([]byte) 57 | b.buf.Write(payload) 58 | b.rows++ 59 | } 60 | 61 | type factory struct { 62 | bufPool *sync.Pool 63 | } 64 | 65 | func (f *factory) New() targets.Batch { 66 | return &batch{buf: f.bufPool.Get().(*bytes.Buffer)} 67 | } 68 | -------------------------------------------------------------------------------- /pkg/targets/akumuli/serializer_test.go: -------------------------------------------------------------------------------- 1 | package akumuli 2 | 3 | import ( 4 | "bytes" 5 | "github.com/timescale/tsbs/pkg/data" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func TestAkumuliSerializerSerialize(t *testing.T) { 12 | 13 | serializer := NewAkumuliSerializer() 14 | 15 | points := []*data.Point{ 16 | serialize.TestPointDefault(), 17 | serialize.TestPointInt(), 18 | serialize.TestPointMultiField(), 19 | serialize.TestPointDefault(), 20 | serialize.TestPointInt(), 21 | serialize.TestPointMultiField(), 22 | } 23 | 24 | type testCase struct { 25 | expCount int 26 | expValue string 27 | name string 28 | } 29 | 30 | cases := []testCase{ 31 | { 32 | expCount: 1, 33 | expValue: "+cpu.usage_guest_nice hostname=host_0 region=eu-west-1 datacenter=eu-west-1b", 34 | name: "series name default", 35 | }, 36 | { 37 | expCount: 1, 38 | expValue: "+cpu.usage_guest hostname=host_0 region=eu-west-1 datacenter=eu-west-1b", 39 | name: "series name int", 40 | }, 41 | { 42 | expCount: 1, 43 | expValue: "+cpu.big_usage_guest|cpu.usage_guest|cpu.usage_guest_nice hostname=host_0 region=eu-west-1 datacenter=eu-west-1b", 44 | name: "series name multi-field", 45 | }, 46 | { 47 | expCount: 2, 48 | expValue: "*1\n+38.24311829", 49 | name: "value default", 50 | }, 51 | { 52 | expCount: 2, 53 | expValue: "*1\n:38", 54 | name: "value int", 55 | }, 56 | { 57 | expCount: 2, 58 | expValue: "*3\n:5000000000\n:38\n+38.24311829", 59 | name: "value multi-field", 60 | }, 61 | { 62 | expCount: 6, 63 | expValue: ":1451606400000000000", 64 | name: "timestamp", 65 | }, 66 | } 67 | buf := new(bytes.Buffer) 68 | for _, point := range points { 69 | serializer.Serialize(point, buf) 70 | } 71 | 72 | got := buf.String() 73 | 74 | for _, c := range cases { 75 | actualCnt := strings.Count(got, c.expValue) 76 | if actualCnt != c.expCount { 77 | t.Errorf("Output incorrect: %s expected %d times got %d times", c.name, c.expCount, actualCnt) 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /pkg/targets/cassandra/db_specific_config.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "time" 6 | ) 7 | 8 | type SpecificConfig struct { 9 | Hosts string `yaml:"hosts" mapstructure:"hosts"` 10 | ReplicationFactor int `yaml:"replication-factor" mapstructure:"replication-factor"` 11 | ConsistencyLevel string `yaml:"consistency" mapstructure:"consistency"` 12 | WriteTimeout time.Duration `yaml:"write-timeout" mapstructureL:"write-timeout"` 13 | } 14 | 15 | func parseSpecificConfig(v *viper.Viper) (*SpecificConfig, error) { 16 | var conf SpecificConfig 17 | if err := v.Unmarshal(&conf); err != nil { 18 | return nil, err 19 | } 20 | return &conf, nil 21 | } 22 | -------------------------------------------------------------------------------- /pkg/targets/cassandra/implemented_target.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "time" 11 | ) 12 | 13 | func NewTarget() targets.ImplementedTarget { 14 | return &cassandraTarget{} 15 | } 16 | 17 | type cassandraTarget struct { 18 | } 19 | 20 | func (t *cassandraTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 21 | flagSet.String(flagPrefix+"hosts", "localhost:9042", "Comma separated list of Cassandra hosts in a cluster.") 22 | flagSet.Int(flagPrefix+"replication-factor", 1, "Number of nodes that must have a copy of each key.") 23 | flagSet.String(flagPrefix+"consistency", "ALL", "Desired write consistency level. See Cassandra consistency documentation. Default: ALL") 24 | flagSet.Duration(flagPrefix+"write-timeout", 10*time.Second, "Write timeout.") 25 | } 26 | 27 | func (t *cassandraTarget) TargetName() string { 28 | return constants.FormatCassandra 29 | } 30 | 31 | func (t *cassandraTarget) Serializer() serialize.PointSerializer { 32 | return &Serializer{} 33 | } 34 | 35 | func (t *cassandraTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 36 | panic("not implemented") 37 | } 38 | -------------------------------------------------------------------------------- /pkg/targets/cassandra/scan_test.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSingleMetricToInsertStatement(t *testing.T) { 8 | cases := []struct { 9 | desc string 10 | inputCSV string 11 | outputInsertStatement string 12 | }{ 13 | { 14 | desc: "A properly formatted CSV line should result in a properly formatted CQL INSERT statement", 15 | inputCSV: "series_double,cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b,rack=67,os=Ubuntu16.10,arch=x86,team=NYC,service=7,service_version=0,service_environment=production,usage_guest_nice,2016-01-01,1451606400000000000,38.2431182911542820", 16 | outputInsertStatement: "INSERT INTO series_double(series_id, timestamp_ns, value) VALUES('cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b,rack=67,os=Ubuntu16.10,arch=x86,team=NYC,service=7,service_version=0,service_environment=production#usage_guest_nice#2016-01-01', 1451606400000000000, 38.2431182911542820)", 17 | }, 18 | { 19 | desc: "A properly formatted CSV line with an arbitrary number of tags should result in a properly formatted CQL INSERT statement", 20 | inputCSV: "series_bigint,redis,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b,rack=67,os=Ubuntu16.10,arch=x86,team=NYC,service=7,service_version=0,service_environment=production,port=6379,server=redis_1,used_cpu_user,2016-01-01,1451606400000000000,388", 21 | outputInsertStatement: "INSERT INTO series_bigint(series_id, timestamp_ns, value) VALUES('redis,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b,rack=67,os=Ubuntu16.10,arch=x86,team=NYC,service=7,service_version=0,service_environment=production,port=6379,server=redis_1#used_cpu_user#2016-01-01', 1451606400000000000, 388)", 22 | }, 23 | } 24 | 25 | for _, c := range cases { 26 | output := singleMetricToInsertStatement(c.inputCSV) 27 | if output != c.outputInsertStatement { 28 | t.Errorf("%s \nOutput incorrect: \nWant: %s \nGot: %s", c.desc, c.outputInsertStatement, output) 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /pkg/targets/clickhouse/creator_test.go: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestGenerateTagsTableQuery(t *testing.T) { 9 | testCases := []struct { 10 | inTagNames []string 11 | inTagTypes []string 12 | out string 13 | }{{ 14 | inTagNames: []string{"tag1"}, 15 | inTagTypes: []string{"string"}, 16 | out: "CREATE TABLE tags(\n" + 17 | "created_date Date DEFAULT today(),\n" + 18 | "created_at DateTime DEFAULT now(),\n" + 19 | "id UInt32,\n" + 20 | "tag1 Nullable(String)" + 21 | ") ENGINE = MergeTree(created_date, (id), 8192)"}, { 22 | inTagNames: []string{"tag1", "tag2", "tag3", "tag4"}, 23 | inTagTypes: []string{"int32", "int64", "float32", "float64"}, 24 | out: "CREATE TABLE tags(\n" + 25 | "created_date Date DEFAULT today(),\n" + 26 | "created_at DateTime DEFAULT now(),\n" + 27 | "id UInt32,\n" + 28 | "tag1 Nullable(Int32),\n" + 29 | "tag2 Nullable(Int64),\n" + 30 | "tag3 Nullable(Float32),\n" + 31 | "tag4 Nullable(Float64)" + 32 | ") ENGINE = MergeTree(created_date, (id), 8192)"}, 33 | } 34 | for _, tc := range testCases { 35 | t.Run(fmt.Sprintf("tags table for %v", tc.inTagNames), func(t *testing.T) { 36 | res := generateTagsTableQuery(tc.inTagNames, tc.inTagTypes) 37 | if res != tc.out { 38 | t.Errorf("unexpected result.\nexpected: %s\ngot: %s", tc.out, res) 39 | } 40 | }) 41 | } 42 | } 43 | 44 | func TestGenerateTagsTableQueryPanicOnWrongFormat(t *testing.T) { 45 | defer func() { 46 | r := recover() 47 | if r == nil { 48 | t.Errorf("did not panic when should") 49 | } 50 | }() 51 | 52 | generateTagsTableQuery([]string{"tag"}, []string{}) 53 | 54 | t.Fatalf("test should have stopped at this point") 55 | } 56 | 57 | func TestGenerateTagsTableQueryPanicOnWrongType(t *testing.T) { 58 | defer func() { 59 | r := recover() 60 | if r == nil { 61 | t.Errorf("did not panic when should") 62 | } 63 | }() 64 | 65 | generateTagsTableQuery([]string{"unknownType"}, []string{"uint32"}) 66 | 67 | t.Fatalf("test should have stopped at this point") 68 | } 69 | -------------------------------------------------------------------------------- /pkg/targets/clickhouse/implemented_target.go: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "github.com/timescale/tsbs/pkg/targets/timescaledb" 11 | ) 12 | 13 | func NewTarget() targets.ImplementedTarget { 14 | return &clickhouseTarget{} 15 | } 16 | 17 | type clickhouseTarget struct{} 18 | 19 | func (c clickhouseTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 20 | panic("implement me") 21 | } 22 | 23 | func (c clickhouseTarget) Serializer() serialize.PointSerializer { 24 | return ×caledb.Serializer{} 25 | } 26 | 27 | func (c clickhouseTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 28 | flagSet.String(flagPrefix+"host", "localhost", "Hostname of ClickHouse instance") 29 | flagSet.String(flagPrefix+"user", "default", "User to connect to ClickHouse as") 30 | flagSet.String(flagPrefix+"password", "", "Password to connect to ClickHouse") 31 | flagSet.Bool(flagPrefix+"log-batches", false, "Whether to time individual batches.") 32 | flagSet.Int(flagPrefix+"debug", 0, "Debug printing (choices: 0, 1, 2). (default 0)") 33 | } 34 | 35 | func (c clickhouseTarget) TargetName() string { 36 | return constants.FormatClickhouse 37 | } 38 | -------------------------------------------------------------------------------- /pkg/targets/clickhouse/indexer.go: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "hash/fnv" 5 | "strings" 6 | 7 | "github.com/timescale/tsbs/pkg/data" 8 | ) 9 | 10 | // hostnameIndexer is used to consistently send the same hostnames to the same queue 11 | type hostnameIndexer struct { 12 | partitions uint 13 | } 14 | 15 | // scan.PointIndexer interface implementation 16 | func (i *hostnameIndexer) GetIndex(item data.LoadedPoint) uint { 17 | p := item.Data.(*point) 18 | hostname := strings.SplitN(p.row.tags, ",", 2)[0] 19 | h := fnv.New32a() 20 | h.Write([]byte(hostname)) 21 | return uint(h.Sum32()) % i.partitions 22 | } 23 | -------------------------------------------------------------------------------- /pkg/targets/common/generic_point_indexer.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "hash" 6 | "hash/fnv" 7 | ) 8 | 9 | // hashPropertySelectFn defines a function that 10 | // for a data.LoadedPoint return a byte array generated 11 | // from the point properties that will be 12 | // used to calculate the hash 13 | type hashPropertySelectFn func(point *data.LoadedPoint) []byte 14 | 15 | // GenericPointIndexer implements the targets.PointIndexer 16 | // where the input for the hash function is provided 17 | // as an input function 18 | type GenericPointIndexer struct { 19 | propertySelector hashPropertySelectFn 20 | hasher hash.Hash32 21 | maxPartitions uint 22 | } 23 | 24 | func NewGenericPointIndexer(maxPartitions uint, propertySelector hashPropertySelectFn) *GenericPointIndexer { 25 | return &GenericPointIndexer{ 26 | hasher: fnv.New32a(), 27 | propertySelector: propertySelector, 28 | maxPartitions: maxPartitions, 29 | } 30 | } 31 | 32 | func (g *GenericPointIndexer) GetIndex(point data.LoadedPoint) uint { 33 | g.hasher.Reset() 34 | g.hasher.Write(g.propertySelector(&point)) 35 | return uint(g.hasher.Sum32()) % g.maxPartitions 36 | } 37 | -------------------------------------------------------------------------------- /pkg/targets/constants/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | // Formats supported for generation 4 | const ( 5 | FormatCassandra = "cassandra" 6 | FormatClickhouse = "clickhouse" 7 | FormatInflux = "influx" 8 | FormatMongo = "mongo" 9 | FormatSiriDB = "siridb" 10 | FormatTimescaleDB = "timescaledb" 11 | FormatAkumuli = "akumuli" 12 | FormatCrateDB = "cratedb" 13 | FormatPrometheus = "prometheus" 14 | FormatVictoriaMetrics = "victoriametrics" 15 | FormatTimestream = "timestream" 16 | FormatQuestDB = "questdb" 17 | ) 18 | 19 | func SupportedFormats() []string { 20 | return []string{ 21 | FormatCassandra, 22 | FormatClickhouse, 23 | FormatInflux, 24 | FormatMongo, 25 | FormatSiriDB, 26 | FormatTimescaleDB, 27 | FormatAkumuli, 28 | FormatCrateDB, 29 | FormatPrometheus, 30 | FormatVictoriaMetrics, 31 | FormatTimestream, 32 | FormatQuestDB, 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pkg/targets/crate/implemented_target.go: -------------------------------------------------------------------------------- 1 | package crate 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | ) 11 | 12 | func NewTarget() targets.ImplementedTarget { 13 | return &crateTarget{} 14 | } 15 | 16 | type crateTarget struct { 17 | } 18 | 19 | func (t *crateTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 20 | flagSet.String(flagPrefix+"hosts", "localhost", "CrateDB hostnames") 21 | flagSet.Uint(flagPrefix+"port", 5432, "A port to connect to database instances") 22 | flagSet.String(flagPrefix+"user", "crate", "User to connect to CrateDB") 23 | flagSet.String(flagPrefix+"pass", "", "Password for user connecting to CrateDB") 24 | flagSet.Int(flagPrefix+"replicas", 0, "Number of replicas per a metric table") 25 | flagSet.Int(flagPrefix+"shards", 5, "Number of shards per a metric table") 26 | } 27 | 28 | func (t *crateTarget) TargetName() string { 29 | return constants.FormatCrateDB 30 | } 31 | 32 | func (t *crateTarget) Serializer() serialize.PointSerializer { 33 | return &Serializer{} 34 | } 35 | 36 | func (t *crateTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 37 | panic("not implemented") 38 | } 39 | -------------------------------------------------------------------------------- /pkg/targets/crate/serializer.go: -------------------------------------------------------------------------------- 1 | package crate 2 | 3 | import ( 4 | "fmt" 5 | "github.com/timescale/tsbs/pkg/data" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "io" 8 | ) 9 | 10 | const TAB = '\t' 11 | 12 | // CrateDBSerializer writes a Point in a serialized form for CrateDB 13 | type Serializer struct{} 14 | 15 | // Serialize Point p to the given Writer w, so it can be loaded by the CrateDB 16 | // loader. The format is TSV with one line per point, that contains the 17 | // measurement type, tags with keys and values as a JSON object, timestamp, 18 | // and metric values. 19 | // 20 | // An example of a serialized point: 21 | // cpu\t{"hostname":"host_0","rack":"1"}\t1451606400000000000\t38\t0\t50\t41234 22 | func (s *Serializer) Serialize(p *data.Point, w io.Writer) error { 23 | buf := make([]byte, 0, 256) 24 | 25 | // measurement type 26 | buf = append(buf, p.MeasurementName()...) 27 | buf = append(buf, TAB) 28 | 29 | // tags 30 | tagKeys := p.TagKeys() 31 | tagValues := p.TagValues() 32 | if len(tagKeys) > 0 { 33 | buf = append(buf, '{') 34 | for i, key := range tagKeys { 35 | buf = append(buf, '"') 36 | buf = append(buf, key...) 37 | buf = append(buf, []byte("\":\"")...) 38 | buf = serialize.FastFormatAppend(tagValues[i], buf) 39 | buf = append(buf, []byte("\",")...) 40 | } 41 | buf = buf[:len(buf)-1] 42 | buf = append(buf, '}') 43 | } else { 44 | buf = append(buf, []byte("null")...) 45 | } 46 | 47 | // timestamp 48 | buf = append(buf, TAB) 49 | ts := fmt.Sprintf("%d", p.Timestamp().UTC().UnixNano()) 50 | buf = append(buf, ts...) 51 | 52 | // metrics 53 | fieldValues := p.FieldValues() 54 | for _, v := range fieldValues { 55 | buf = append(buf, TAB) 56 | buf = serialize.FastFormatAppend(v, buf) 57 | } 58 | buf = append(buf, '\n') 59 | _, err := w.Write(buf) 60 | return err 61 | } 62 | -------------------------------------------------------------------------------- /pkg/targets/crate/serializer_test.go: -------------------------------------------------------------------------------- 1 | package crate 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/serialize" 5 | "testing" 6 | ) 7 | 8 | func TestCrateDBSerializerSerialize(t *testing.T) { 9 | cases := []serialize.SerializeCase{ 10 | { 11 | Desc: "a regular Point", 12 | InputPoint: serialize.TestPointDefault(), 13 | Output: "cpu\t{\"hostname\":\"host_0\",\"region\":\"eu-west-1\",\"datacenter\":\"eu-west-1b\"}\t1451606400000000000\t38.24311829\n", 14 | }, 15 | { 16 | Desc: "a regular Point using int as value", 17 | InputPoint: serialize.TestPointInt(), 18 | Output: "cpu\t{\"hostname\":\"host_0\",\"region\":\"eu-west-1\",\"datacenter\":\"eu-west-1b\"}\t1451606400000000000\t38\n", 19 | }, 20 | { 21 | Desc: "a regular Point with multiple fields", 22 | InputPoint: serialize.TestPointMultiField(), 23 | Output: "cpu\t{\"hostname\":\"host_0\",\"region\":\"eu-west-1\",\"datacenter\":\"eu-west-1b\"}\t1451606400000000000\t5000000000\t38\t38.24311829\n", 24 | }, 25 | { 26 | Desc: "a Point with no tags", 27 | InputPoint: serialize.TestPointNoTags(), 28 | Output: "cpu\tnull\t1451606400000000000\t38.24311829\n", 29 | }, 30 | } 31 | 32 | serialize.SerializerTest(t, cases, &Serializer{}) 33 | } 34 | 35 | func TestCrateDBSerializerSerializeErr(t *testing.T) { 36 | p := serialize.TestPointMultiField() 37 | s := &Serializer{} 38 | err := s.Serialize(p, &serialize.ErrWriter{}) 39 | if err == nil { 40 | t.Errorf("no error returned when expected") 41 | } else if err.Error() != serialize.ErrWriterAlwaysErr { 42 | t.Errorf("unexpected writer error: %v", err) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /pkg/targets/creator.go: -------------------------------------------------------------------------------- 1 | package targets 2 | 3 | // DBCreator is an interface for a benchmark to do the initial setup of a database 4 | // in preparation for running a benchmark against it. 5 | type DBCreator interface { 6 | // Init should set up any connection or other setup for talking to the DB, but should NOT create any databases 7 | Init() 8 | 9 | // DBExists checks if a database with the given name currently exists. 10 | DBExists(dbName string) bool 11 | 12 | // CreateDB creates a database with the given name. 13 | CreateDB(dbName string) error 14 | 15 | // RemoveOldDB removes an existing database with the given name. 16 | RemoveOldDB(dbName string) error 17 | } 18 | 19 | // DBCreatorCloser is a DBCreator that also needs a Close method to cleanup any connections 20 | // after the benchmark is finished. 21 | type DBCreatorCloser interface { 22 | DBCreator 23 | 24 | // Close cleans up any database connections 25 | Close() 26 | } 27 | 28 | // DBCreatorPost is a DBCreator that also needs to do some initialization after the 29 | // database is created (e.g., only one client should actually create the DB, so 30 | // non-creator clients should still set themselves up for writing) 31 | type DBCreatorPost interface { 32 | DBCreator 33 | 34 | // PostCreateDB does further initialization after the database is created 35 | PostCreateDB(dbName string) error 36 | } 37 | -------------------------------------------------------------------------------- /pkg/targets/influx/implemented_target.go: -------------------------------------------------------------------------------- 1 | package influx 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "time" 11 | ) 12 | 13 | func NewTarget() targets.ImplementedTarget { 14 | return &influxTarget{} 15 | } 16 | 17 | type influxTarget struct { 18 | } 19 | 20 | func (t *influxTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 21 | flagSet.String(flagPrefix+"urls", "http://localhost:8086", "InfluxDB URLs, comma-separated. Will be used in a round-robin fashion.") 22 | flagSet.Int(flagPrefix+"replication-factor", 1, "Cluster replication factor (only applies to clustered databases).") 23 | flagSet.String(flagPrefix+"consistency", "all", "Write consistency. Must be one of: any, one, quorum, all.") 24 | flagSet.String(flagPrefix+"auth-token", "", "Use the Authorization header with the Token scheme to provide your token to InfluxDB. If empty will not send the Authorization header.") 25 | flagSet.String(flagPrefix+"organization", "", "Organization name (InfluxDB v2).") 26 | flagSet.Duration(flagPrefix+"backoff", time.Second, "Time to sleep between requests when server indicates backpressure is needed.") 27 | flagSet.Bool(flagPrefix+"gzip", true, "Whether to gzip encode requests (default true).") 28 | } 29 | 30 | func (t *influxTarget) TargetName() string { 31 | return constants.FormatInflux 32 | } 33 | 34 | func (t *influxTarget) Serializer() serialize.PointSerializer { 35 | return &Serializer{} 36 | } 37 | 38 | func (t *influxTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 39 | panic("not implemented") 40 | } 41 | -------------------------------------------------------------------------------- /pkg/targets/influx/serializer_test.go: -------------------------------------------------------------------------------- 1 | package influx 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/serialize" 5 | "testing" 6 | ) 7 | 8 | func TestInfluxSerializerSerialize(t *testing.T) { 9 | cases := []serialize.SerializeCase{ 10 | { 11 | Desc: "a regular Point", 12 | InputPoint: serialize.TestPointDefault(), 13 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b usage_guest_nice=38.24311829 1451606400000000000\n", 14 | }, 15 | { 16 | Desc: "a regular Point using int as value", 17 | InputPoint: serialize.TestPointInt(), 18 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b usage_guest=38i 1451606400000000000\n", 19 | }, 20 | { 21 | Desc: "a regular Point with multiple fields", 22 | InputPoint: serialize.TestPointMultiField(), 23 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b big_usage_guest=5000000000i,usage_guest=38i,usage_guest_nice=38.24311829 1451606400000000000\n", 24 | }, 25 | { 26 | Desc: "a Point with no tags", 27 | InputPoint: serialize.TestPointNoTags(), 28 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 29 | }, { 30 | Desc: "a Point with a nil tag", 31 | InputPoint: serialize.TestPointWithNilTag(), 32 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 33 | }, { 34 | Desc: "a Point with a nil field", 35 | InputPoint: serialize.TestPointWithNilField(), 36 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 37 | }, 38 | } 39 | 40 | serialize.SerializerTest(t, cases, &Serializer{}) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/targets/initializers/target_initializers.go: -------------------------------------------------------------------------------- 1 | package initializers 2 | 3 | import ( 4 | "fmt" 5 | "github.com/timescale/tsbs/pkg/targets" 6 | "github.com/timescale/tsbs/pkg/targets/akumuli" 7 | "github.com/timescale/tsbs/pkg/targets/cassandra" 8 | "github.com/timescale/tsbs/pkg/targets/clickhouse" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "github.com/timescale/tsbs/pkg/targets/crate" 11 | "github.com/timescale/tsbs/pkg/targets/influx" 12 | "github.com/timescale/tsbs/pkg/targets/mongo" 13 | "github.com/timescale/tsbs/pkg/targets/prometheus" 14 | "github.com/timescale/tsbs/pkg/targets/questdb" 15 | "github.com/timescale/tsbs/pkg/targets/siridb" 16 | "github.com/timescale/tsbs/pkg/targets/timescaledb" 17 | "github.com/timescale/tsbs/pkg/targets/timestream" 18 | "github.com/timescale/tsbs/pkg/targets/victoriametrics" 19 | "strings" 20 | ) 21 | 22 | func GetTarget(format string) targets.ImplementedTarget { 23 | switch format { 24 | case constants.FormatTimescaleDB: 25 | return timescaledb.NewTarget() 26 | case constants.FormatAkumuli: 27 | return akumuli.NewTarget() 28 | case constants.FormatCassandra: 29 | return cassandra.NewTarget() 30 | case constants.FormatClickhouse: 31 | return clickhouse.NewTarget() 32 | case constants.FormatCrateDB: 33 | return crate.NewTarget() 34 | case constants.FormatInflux: 35 | return influx.NewTarget() 36 | case constants.FormatMongo: 37 | return mongo.NewTarget() 38 | case constants.FormatPrometheus: 39 | return prometheus.NewTarget() 40 | case constants.FormatSiriDB: 41 | return siridb.NewTarget() 42 | case constants.FormatVictoriaMetrics: 43 | return victoriametrics.NewTarget() 44 | case constants.FormatTimestream: 45 | return timestream.NewTarget() 46 | case constants.FormatQuestDB: 47 | return questdb.NewTarget() 48 | } 49 | 50 | supportedFormatsStr := strings.Join(constants.SupportedFormats(), ",") 51 | panic(fmt.Sprintf("Unrecognized format %s, supported: %s", format, supportedFormatsStr)) 52 | } 53 | -------------------------------------------------------------------------------- /pkg/targets/mongo/MongoReading.go: -------------------------------------------------------------------------------- 1 | // automatically generated by the FlatBuffers compiler, do not modify 2 | 3 | package mongo 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type MongoReading struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsMongoReading(buf []byte, offset flatbuffers.UOffsetT) *MongoReading { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &MongoReading{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *MongoReading) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *MongoReading) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *MongoReading) Key() []byte { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 33 | } 34 | return nil 35 | } 36 | 37 | func (rcv *MongoReading) Value() float64 { 38 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 39 | if o != 0 { 40 | return rcv._tab.GetFloat64(o + rcv._tab.Pos) 41 | } 42 | return 0.0 43 | } 44 | 45 | func (rcv *MongoReading) MutateValue(n float64) bool { 46 | return rcv._tab.MutateFloat64Slot(6, n) 47 | } 48 | 49 | func MongoReadingStart(builder *flatbuffers.Builder) { 50 | builder.StartObject(2) 51 | } 52 | func MongoReadingAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { 53 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) 54 | } 55 | func MongoReadingAddValue(builder *flatbuffers.Builder, value float64) { 56 | builder.PrependFloat64Slot(1, value, 0.0) 57 | } 58 | func MongoReadingEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 59 | return builder.EndObject() 60 | } 61 | -------------------------------------------------------------------------------- /pkg/targets/mongo/MongoTag.go: -------------------------------------------------------------------------------- 1 | // automatically generated by the FlatBuffers compiler, do not modify 2 | 3 | package mongo 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type MongoTag struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsMongoTag(buf []byte, offset flatbuffers.UOffsetT) *MongoTag { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &MongoTag{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *MongoTag) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *MongoTag) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *MongoTag) Key() []byte { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 33 | } 34 | return nil 35 | } 36 | 37 | func (rcv *MongoTag) Value() []byte { 38 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 39 | if o != 0 { 40 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 41 | } 42 | return nil 43 | } 44 | 45 | func MongoTagStart(builder *flatbuffers.Builder) { 46 | builder.StartObject(2) 47 | } 48 | func MongoTagAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { 49 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) 50 | } 51 | func MongoTagAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { 52 | builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) 53 | } 54 | func MongoTagEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 55 | return builder.EndObject() 56 | } 57 | -------------------------------------------------------------------------------- /pkg/targets/mongo/implemented_target.go: -------------------------------------------------------------------------------- 1 | package mongo 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "time" 11 | ) 12 | 13 | func NewTarget() targets.ImplementedTarget { 14 | return &mongoTarget{} 15 | } 16 | 17 | type mongoTarget struct { 18 | } 19 | 20 | func (t *mongoTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 21 | flagSet.String(flagPrefix+"url", "localhost:27017", "Mongo URL.") 22 | flagSet.Duration(flagPrefix+"write-timeout", 10*time.Second, "Write timeout.") 23 | flagSet.Bool(flagPrefix+"document-per-event", false, "Whether to use one document per event or aggregate by hour") 24 | } 25 | 26 | func (t *mongoTarget) TargetName() string { 27 | return constants.FormatMongo 28 | } 29 | 30 | func (t *mongoTarget) Serializer() serialize.PointSerializer { 31 | return &Serializer{} 32 | } 33 | 34 | func (t *mongoTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 35 | panic("not implemented") 36 | } 37 | -------------------------------------------------------------------------------- /pkg/targets/mongo/mongo.fbs: -------------------------------------------------------------------------------- 1 | // mongo.fbs 2 | namespace serialize; 3 | table MongoTag { 4 | key:string; 5 | value:string; 6 | } 7 | 8 | table MongoReading { 9 | key:string; 10 | value:double; 11 | } 12 | 13 | table MongoPoint { 14 | measurementName:string; 15 | timestamp:long; 16 | tags:[MongoTag]; 17 | fields:[MongoReading]; 18 | } 19 | 20 | root_type MongoPoint; 21 | -------------------------------------------------------------------------------- /pkg/targets/processor.go: -------------------------------------------------------------------------------- 1 | package targets 2 | 3 | // Processor is a type that processes the work for a loading worker 4 | type Processor interface { 5 | // Init does per-worker setup needed before receiving data 6 | Init(workerNum int, doLoad, hashWorkers bool) 7 | // ProcessBatch handles a single batch of data 8 | ProcessBatch(b Batch, doLoad bool) (metricCount, rowCount uint64) 9 | } 10 | 11 | // ProcessorCloser is a Processor that also needs to close or cleanup afterwards 12 | type ProcessorCloser interface { 13 | Processor 14 | // Close cleans up after a Processor 15 | Close(doLoad bool) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/targets/prometheus/benchmark_test.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/timescale/promscale/pkg/prompb" 5 | "github.com/timescale/tsbs/cmd/tsbs_load_prometheus/adapter/noop" 6 | "net/http" 7 | "net/http/httptest" 8 | "net/url" 9 | "sync" 10 | "testing" 11 | ) 12 | 13 | func TestPrometheusLoader(t *testing.T) { 14 | adapter := noop.Adapter{} 15 | server := httptest.NewServer(http.HandlerFunc(adapter.Handler)) 16 | serverURL, err := url.Parse(server.URL) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | pb := Benchmark{ 21 | adapterWriteUrl: serverURL.String(), 22 | batchPool: &sync.Pool{}, 23 | } 24 | pp := pb.GetProcessor().(*Processor) 25 | batch := &Batch{series: []prompb.TimeSeries{{}}} 26 | samples, _ := pp.ProcessBatch(batch, true) 27 | if samples != 1 { 28 | t.Error("wrong number of samples") 29 | } 30 | if adapter.SampleCounter != samples { 31 | t.Error("wrong number of samples processed") 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pkg/targets/prometheus/db_specific_config.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import "github.com/blagojts/viper" 4 | 5 | type SpecificConfig struct { 6 | AdapterWriteURL string `yaml:"adapter-write-url" mapstructure:"adapter-write-url"` 7 | UseCurrentTime bool `yaml:"use-current-time" mapstructure:"use-current-time"` 8 | } 9 | 10 | func parseSpecificConfig(v *viper.Viper) (*SpecificConfig, error) { 11 | var conf SpecificConfig 12 | if err := v.Unmarshal(&conf); err != nil { 13 | return nil, err 14 | } 15 | return &conf, nil 16 | } 17 | -------------------------------------------------------------------------------- /pkg/targets/prometheus/implemented_target.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | ) 11 | 12 | func NewTarget() targets.ImplementedTarget { 13 | return &prometheusTarget{} 14 | } 15 | 16 | type prometheusTarget struct { 17 | } 18 | 19 | func (t *prometheusTarget) TargetName() string { 20 | return constants.FormatPrometheus 21 | } 22 | 23 | func (t *prometheusTarget) Serializer() serialize.PointSerializer { 24 | return &Serializer{} 25 | } 26 | 27 | func (t *prometheusTarget) Benchmark(_ string, dataSourceConfig *source.DataSourceConfig, v *viper.Viper) (targets.Benchmark, error) { 28 | promSpecificConfig, err := parseSpecificConfig(v) 29 | if err != nil { 30 | return nil, err 31 | } 32 | return NewBenchmark(promSpecificConfig, dataSourceConfig) 33 | } 34 | 35 | func (t *prometheusTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 36 | flagSet.String(flagPrefix+"adapter-write-url", "http://localhost:9201/write", "Prometheus adapter url to send data to") 37 | flagSet.Bool(flagPrefix+"use-current-time", false, "Whether to replace the simulated timestamp with the current timestamp") 38 | } 39 | -------------------------------------------------------------------------------- /pkg/targets/questdb/implemented_target.go: -------------------------------------------------------------------------------- 1 | package questdb 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | ) 11 | 12 | func NewTarget() targets.ImplementedTarget { 13 | return &influxTarget{} 14 | } 15 | 16 | type influxTarget struct { 17 | } 18 | 19 | func (t *influxTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 20 | flagSet.String(flagPrefix+"url", "http://localhost:9000/", "QuestDB REST end point") 21 | flagSet.String(flagPrefix+"ilp-bind-to", "127.0.0.1:9009", "QuestDB influx line protocol TCP ip:port") 22 | } 23 | 24 | func (t *influxTarget) TargetName() string { 25 | return constants.FormatQuestDB 26 | } 27 | 28 | func (t *influxTarget) Serializer() serialize.PointSerializer { 29 | return &Serializer{} 30 | } 31 | 32 | func (t *influxTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 33 | panic("not implemented") 34 | } 35 | -------------------------------------------------------------------------------- /pkg/targets/questdb/serializer_test.go: -------------------------------------------------------------------------------- 1 | package questdb 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/serialize" 5 | "testing" 6 | ) 7 | 8 | func TestInfluxSerializerSerialize(t *testing.T) { 9 | cases := []serialize.SerializeCase{ 10 | { 11 | Desc: "a regular Point", 12 | InputPoint: serialize.TestPointDefault(), 13 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b usage_guest_nice=38.24311829 1451606400000000000\n", 14 | }, 15 | { 16 | Desc: "a regular Point using int as value", 17 | InputPoint: serialize.TestPointInt(), 18 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b usage_guest=38i 1451606400000000000\n", 19 | }, 20 | { 21 | Desc: "a regular Point with multiple fields", 22 | InputPoint: serialize.TestPointMultiField(), 23 | Output: "cpu,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b big_usage_guest=5000000000i,usage_guest=38i,usage_guest_nice=38.24311829 1451606400000000000\n", 24 | }, 25 | { 26 | Desc: "a Point with no tags", 27 | InputPoint: serialize.TestPointNoTags(), 28 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 29 | }, { 30 | Desc: "a Point with a nil tag", 31 | InputPoint: serialize.TestPointWithNilTag(), 32 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 33 | }, { 34 | Desc: "a Point with a nil field", 35 | InputPoint: serialize.TestPointWithNilField(), 36 | Output: "cpu usage_guest_nice=38.24311829 1451606400000000000\n", 37 | }, 38 | } 39 | 40 | serialize.SerializerTest(t, cases, &Serializer{}) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/targets/siridb/implemented_target.go: -------------------------------------------------------------------------------- 1 | package siridb 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | ) 11 | 12 | func NewTarget() targets.ImplementedTarget { 13 | return &siriTarget{} 14 | } 15 | 16 | type siriTarget struct { 17 | } 18 | 19 | func (t *siriTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 20 | flagSet.String(flagPrefix+"dbuser", "iris", "Username to enter SiriDB") 21 | flagSet.String(flagPrefix+"dbpass", "siri", "Password to enter SiriDB") 22 | 23 | flagSet.String(flagPrefix+"hosts", "localhost:9000", "Provide 1 or 2 (comma seperated) SiriDB hosts. If 2 hosts are provided, 2 pools are created.") 24 | flagSet.Bool(flagPrefix+"replica", false, "Whether to create a replica instead of a second pool, when two hosts are provided.") 25 | 26 | flagSet.Bool(flagPrefix+"log-batches", false, "Whether to time individual batches.") 27 | flagSet.Int(flagPrefix+"write-timeout", 10, "Write timeout.") 28 | } 29 | 30 | func (t *siriTarget) TargetName() string { 31 | return constants.FormatSiriDB 32 | } 33 | 34 | func (t *siriTarget) Serializer() serialize.PointSerializer { 35 | return &Serializer{} 36 | } 37 | 38 | func (t *siriTarget) Benchmark(string, *source.DataSourceConfig, *viper.Viper) (targets.Benchmark, error) { 39 | panic("not implemented") 40 | } 41 | -------------------------------------------------------------------------------- /pkg/targets/timescaledb/benchmark.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "github.com/timescale/tsbs/internal/inputs" 5 | "github.com/timescale/tsbs/pkg/data/source" 6 | "github.com/timescale/tsbs/pkg/targets" 7 | ) 8 | 9 | const pgxDriver = "pgx" 10 | const pqDriver = "postgres" 11 | 12 | func NewBenchmark(dbName string, opts *LoadingOptions, dataSourceConfig *source.DataSourceConfig) (targets.Benchmark, error) { 13 | var ds targets.DataSource 14 | if dataSourceConfig.Type == source.FileDataSourceType { 15 | ds = newFileDataSource(dataSourceConfig.File.Location) 16 | } else { 17 | dataGenerator := &inputs.DataGenerator{} 18 | simulator, err := dataGenerator.CreateSimulator(dataSourceConfig.Simulator) 19 | if err != nil { 20 | return nil, err 21 | } 22 | ds = newSimulationDataSource(simulator) 23 | } 24 | 25 | return &benchmark{ 26 | opts: opts, 27 | ds: ds, 28 | dbName: dbName, 29 | }, nil 30 | } 31 | 32 | type benchmark struct { 33 | opts *LoadingOptions 34 | ds targets.DataSource 35 | dbName string 36 | } 37 | 38 | func (b *benchmark) GetDataSource() targets.DataSource { 39 | return b.ds 40 | } 41 | 42 | func (b *benchmark) GetBatchFactory() targets.BatchFactory { 43 | return &factory{} 44 | } 45 | 46 | func (b *benchmark) GetPointIndexer(maxPartitions uint) targets.PointIndexer { 47 | if maxPartitions > 1 { 48 | return &hostnameIndexer{partitions: maxPartitions} 49 | } 50 | return &targets.ConstantIndexer{} 51 | } 52 | 53 | func (b *benchmark) GetProcessor() targets.Processor { 54 | return newProcessor(b.opts, getDriver(b.opts.ForceTextFormat), b.dbName) 55 | } 56 | 57 | func (b *benchmark) GetDBCreator() targets.DBCreator { 58 | return &dbCreator{ 59 | opts: b.opts, 60 | connDB: b.opts.ConnDB, 61 | ds: b.ds, 62 | driver: getDriver(b.opts.ForceTextFormat), 63 | connStr: b.opts.GetConnectString(b.dbName), 64 | } 65 | } 66 | 67 | func getDriver(forceTextFormat bool) string { 68 | if forceTextFormat { 69 | return pqDriver 70 | } else { 71 | return pgxDriver 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /pkg/targets/timescaledb/program_options_test.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestGetConnectString(t *testing.T) { 9 | wantHost := "localhost" 10 | wantDB := "benchmark" 11 | wantUser := "postgres" 12 | wantPort := "5432" 13 | want := fmt.Sprintf("host=%s dbname=%s user=%s ssl=disable port=5432", wantHost, wantDB, wantUser) 14 | cases := []struct { 15 | desc string 16 | pgConnect string 17 | }{ 18 | { 19 | desc: "replace host, dbname, user", 20 | pgConnect: "host=foo dbname=bar user=joe ssl=disable", 21 | }, 22 | { 23 | desc: "replace just some", 24 | pgConnect: "host=foo dbname=bar ssl=disable", 25 | }, 26 | { 27 | desc: "no replace", 28 | pgConnect: "ssl=disable", 29 | }, 30 | } 31 | 32 | for _, c := range cases { 33 | opts := LoadingOptions{Port: wantPort, Host: wantHost, User: wantUser, PostgresConnect: c.pgConnect} 34 | cstr := opts.GetConnectString(wantDB) 35 | if cstr != want { 36 | t.Errorf("%s: incorrect connect string: got %s want %s", c.desc, cstr, want) 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /pkg/targets/timescaledb/scan.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "hash/fnv" 5 | "strings" 6 | 7 | "github.com/timescale/tsbs/pkg/data" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | ) 10 | 11 | const ( 12 | defaultReadSize = 4 << 20 // 4 MB 13 | ) 14 | 15 | // hostnameIndexer is used to consistently send the same hostnames to the same worker 16 | type hostnameIndexer struct { 17 | partitions uint 18 | } 19 | 20 | func (i *hostnameIndexer) GetIndex(item data.LoadedPoint) uint { 21 | p := item.Data.(*point) 22 | hostname := strings.SplitN(p.row.tags, ",", 2)[0] 23 | h := fnv.New32a() 24 | h.Write([]byte(hostname)) 25 | return uint(h.Sum32()) % i.partitions 26 | } 27 | 28 | // point is a single row of data keyed by which hypertable it belongs 29 | type point struct { 30 | hypertable string 31 | row *insertData 32 | } 33 | 34 | type hypertableArr struct { 35 | m map[string][]*insertData 36 | cnt uint 37 | } 38 | 39 | func (ha *hypertableArr) Len() uint { 40 | return ha.cnt 41 | } 42 | 43 | func (ha *hypertableArr) Append(item data.LoadedPoint) { 44 | that := item.Data.(*point) 45 | k := that.hypertable 46 | ha.m[k] = append(ha.m[k], that.row) 47 | ha.cnt++ 48 | } 49 | 50 | type factory struct{} 51 | 52 | func (f *factory) New() targets.Batch { 53 | return &hypertableArr{ 54 | m: map[string][]*insertData{}, 55 | cnt: 0, 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /pkg/targets/timescaledb/serializer.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "fmt" 5 | "github.com/timescale/tsbs/pkg/data" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "io" 8 | ) 9 | 10 | // Serializer writes a Point in a serialized form for TimescaleDB 11 | type Serializer struct{} 12 | 13 | // Serialize writes Point p to the given Writer w, so it can be 14 | // loaded by the TimescaleDB loader. The format is CSV with two lines per Point, 15 | // with the first row being the tags and the second row being the field values. 16 | // 17 | // e.g., 18 | // tags,,,,... 19 | // ,,,,,... 20 | func (s *Serializer) Serialize(p *data.Point, w io.Writer) error { 21 | // Tag row first, prefixed with name 'tags' 22 | buf := make([]byte, 0, 256) 23 | buf = append(buf, []byte("tags")...) 24 | tagKeys := p.TagKeys() 25 | tagValues := p.TagValues() 26 | for i, v := range tagValues { 27 | buf = append(buf, ',') 28 | buf = append(buf, tagKeys[i]...) 29 | buf = append(buf, '=') 30 | buf = serialize.FastFormatAppend(v, buf) 31 | } 32 | buf = append(buf, '\n') 33 | _, err := w.Write(buf) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | // Field row second 39 | buf = make([]byte, 0, 256) 40 | buf = append(buf, p.MeasurementName()...) 41 | buf = append(buf, ',') 42 | buf = append(buf, []byte(fmt.Sprintf("%d", p.Timestamp().UTC().UnixNano()))...) 43 | fieldValues := p.FieldValues() 44 | for _, v := range fieldValues { 45 | buf = append(buf, ',') 46 | buf = serialize.FastFormatAppend(v, buf) 47 | } 48 | buf = append(buf, '\n') 49 | _, err = w.Write(buf) 50 | return err 51 | } 52 | -------------------------------------------------------------------------------- /pkg/targets/timescaledb/serializer_test.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data/serialize" 5 | "testing" 6 | ) 7 | 8 | func TestTimescaleDBSerializerSerialize(t *testing.T) { 9 | cases := []serialize.SerializeCase{ 10 | { 11 | Desc: "a regular Point", 12 | InputPoint: serialize.TestPointDefault(), 13 | Output: "tags,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b\ncpu,1451606400000000000,38.24311829\n", 14 | }, 15 | { 16 | Desc: "a regular Point using int as value", 17 | InputPoint: serialize.TestPointInt(), 18 | Output: "tags,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b\ncpu,1451606400000000000,38\n", 19 | }, 20 | { 21 | Desc: "a regular Point with multiple fields", 22 | InputPoint: serialize.TestPointMultiField(), 23 | Output: "tags,hostname=host_0,region=eu-west-1,datacenter=eu-west-1b\ncpu,1451606400000000000,5000000000,38,38.24311829\n", 24 | }, 25 | { 26 | Desc: "a Point with no tags", 27 | InputPoint: serialize.TestPointNoTags(), 28 | Output: "tags\ncpu,1451606400000000000,38.24311829\n", 29 | }, 30 | } 31 | 32 | serialize.SerializerTest(t, cases, &Serializer{}) 33 | } 34 | 35 | func TestTimescaleDBSerializerSerializeErr(t *testing.T) { 36 | p := serialize.TestPointMultiField() 37 | s := &Serializer{} 38 | err := s.Serialize(p, &serialize.ErrWriter{}) 39 | if err == nil { 40 | t.Errorf("no error returned when expected") 41 | } else if err.Error() != serialize.ErrWriterAlwaysErr { 42 | t.Errorf("unexpected writer error: %v", err) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /pkg/targets/timestream/aws_session.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | "github.com/aws/aws-sdk-go/aws/session" 6 | "golang.org/x/net/http2" 7 | "net" 8 | "net/http" 9 | "time" 10 | ) 11 | 12 | func OpenAWSSession(awsRegion *string, timeout time.Duration) (*session.Session, error) { 13 | tr := &http.Transport{ 14 | ResponseHeaderTimeout: 20 * time.Second, 15 | // Using DefaultTransport values for other parameters: https://golang.org/pkg/net/http/#RoundTripper 16 | Proxy: http.ProxyFromEnvironment, 17 | DialContext: (&net.Dialer{ 18 | KeepAlive: 30 * time.Second, 19 | Timeout: timeout, 20 | }).DialContext, 21 | MaxIdleConns: 100, 22 | IdleConnTimeout: 90 * time.Second, 23 | TLSHandshakeTimeout: 10 * time.Second, 24 | ExpectContinueTimeout: 1 * time.Second, 25 | } 26 | if err := http2.ConfigureTransport(tr); err != nil { 27 | panic("could not configure http transport: " + err.Error()) 28 | 29 | } 30 | return session.NewSession(&aws.Config{ 31 | Region: awsRegion, 32 | MaxRetries: aws.Int(10), 33 | HTTPClient: &http.Client{Transport: tr}}) 34 | } 35 | -------------------------------------------------------------------------------- /pkg/targets/timestream/batch.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import ( 4 | "github.com/timescale/tsbs/pkg/data" 5 | "github.com/timescale/tsbs/pkg/targets" 6 | "sync" 7 | ) 8 | 9 | // NewBatchFactory returns a object pool backed 10 | // batch factory that produces batches that hold 11 | // timestream deserialized points 12 | func NewBatchFactory() *batchFactory { 13 | // TODO modify targets.BatchFactory to have 14 | // a Return method so the pool is not passed around 15 | // different objects 16 | pool := &sync.Pool{New: func() interface{} { 17 | return &batch{rows: make(map[string][]deserializedPoint)} 18 | }} 19 | return &batchFactory{pool: pool} 20 | } 21 | 22 | // batch implements targets.Batch interface 23 | type batch struct { 24 | // keep the rows per table 25 | rows map[string][]deserializedPoint 26 | // total number of points 27 | cnt uint 28 | } 29 | 30 | func (b *batch) Len() uint { 31 | return b.cnt 32 | } 33 | 34 | func (b *batch) Append(item data.LoadedPoint) { 35 | var point deserializedPoint 36 | point = *item.Data.(*deserializedPoint) 37 | table := point.table 38 | b.rows[table] = append(b.rows[table], point) 39 | b.cnt++ 40 | } 41 | 42 | func (b *batch) reset() { 43 | b.rows = map[string][]deserializedPoint{} 44 | b.cnt = 0 45 | } 46 | 47 | // batchFactory implements the targets.BatchFactory interface 48 | type batchFactory struct { 49 | pool *sync.Pool 50 | } 51 | 52 | func (b *batchFactory) New() targets.Batch { 53 | return b.pool.Get().(*batch) 54 | } 55 | -------------------------------------------------------------------------------- /pkg/targets/timestream/config.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | ) 7 | 8 | type SpecificConfig struct { 9 | UseCommonAttributes bool `yaml:"use-common-attributes" mapstructure:"use-common-attributes"` 10 | AwsRegion string `yaml:"aws-region" mapstructure:"aws-region"` 11 | HashProperty string `yaml:"hash-property" mapstructure:"hash-property"` 12 | UseCurrentTime bool `yaml:"use-current-time" mapstructure:"use-current-time"` 13 | MagStoreRetentionInDays int64 `yaml:"mag-store-retention-in-days" mapstructure:"mag-store-retention-in-days"` 14 | MemStoreRetentionInHours int64 `yaml:"mem-store-retention-in-hours" mapstructure:"mem-store-retention-in-hours"` 15 | } 16 | 17 | func parseSpecificConfig(v *viper.Viper) (*SpecificConfig, error) { 18 | var conf SpecificConfig 19 | if err := v.Unmarshal(&conf); err != nil { 20 | return nil, err 21 | } 22 | return &conf, nil 23 | } 24 | 25 | func targetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 26 | flagSet.Bool( 27 | flagPrefix+"use-common-attributes", 28 | true, 29 | "Timestream client makes write requests with common attributes. "+ 30 | "If false, each value is written as a separate Record and a request of 100 records at once is sent") 31 | flagSet.String(flagPrefix+"aws-region", "us-east-1", "AWS region where the db is located") 32 | flagSet.String( 33 | flagPrefix+"hash-property", 34 | "hostname", 35 | "Dimension to use when hashing points to different workers", 36 | ) 37 | flagSet.Bool( 38 | flagPrefix+"use-current-time", 39 | false, 40 | "Use the local current timestamp when generating the records to load") 41 | flagSet.Int64( 42 | "mag-store-retention-in-days", 43 | 180, 44 | "The duration for which data must be stored in the magnetic store", 45 | ) 46 | flagSet.Int64( 47 | flagPrefix+"mem-store-retention-in-hours", 48 | 12, 49 | "The duration for which data must be stored in the memory store") 50 | } 51 | -------------------------------------------------------------------------------- /pkg/targets/timestream/deserialized_point.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | // deserializedPoint is a struct used by the Timestream 4 | // loader to send data to the db. All the fields are strings 5 | // because the Timestream SDK accepts only string values 6 | // with the types specified separately via enums 7 | type deserializedPoint struct { 8 | timeUnixNano string 9 | table string 10 | tags []string 11 | tagKeys []string 12 | fields []*string 13 | } 14 | -------------------------------------------------------------------------------- /pkg/targets/timestream/implemented_target.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/pkg/errors" 6 | "github.com/spf13/pflag" 7 | "github.com/timescale/tsbs/pkg/data/serialize" 8 | "github.com/timescale/tsbs/pkg/data/source" 9 | "github.com/timescale/tsbs/pkg/targets" 10 | "github.com/timescale/tsbs/pkg/targets/constants" 11 | ) 12 | 13 | type implementedTarget struct{} 14 | 15 | func NewTarget() targets.ImplementedTarget { 16 | return implementedTarget{} 17 | } 18 | 19 | func (i implementedTarget) Benchmark(targetDb string, dataSourceConfig *source.DataSourceConfig, v *viper.Viper) (targets.Benchmark, error) { 20 | specificConfig, err := parseSpecificConfig(v) 21 | if err != nil { 22 | return nil, errors.Wrap(err, "could not create benchmark") 23 | } 24 | return newBenchmark(targetDb, specificConfig, dataSourceConfig) 25 | } 26 | 27 | func (i implementedTarget) Serializer() serialize.PointSerializer { 28 | return &serializer{} 29 | } 30 | 31 | func (i implementedTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 32 | targetSpecificFlags(flagPrefix, flagSet) 33 | } 34 | 35 | func (i implementedTarget) TargetName() string { 36 | return constants.FormatTimestream 37 | } 38 | -------------------------------------------------------------------------------- /pkg/targets/timestream/serializer.go: -------------------------------------------------------------------------------- 1 | package timestream 2 | 3 | import "github.com/timescale/tsbs/pkg/targets/timescaledb" 4 | 5 | type serializer = timescaledb.Serializer 6 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/batch.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "bytes" 5 | "github.com/timescale/tsbs/pkg/data" 6 | "log" 7 | ) 8 | 9 | const errNotThreeTuplesFmt = "parse error: line does not have 3 tuples, has %d" 10 | 11 | var ( 12 | spaceSep = []byte(" ") 13 | commaSep = []byte(",") 14 | newLine = []byte("\n") 15 | ) 16 | 17 | type batch struct { 18 | buf *bytes.Buffer 19 | rows uint64 20 | metrics uint64 21 | } 22 | 23 | func (b *batch) Len() uint { 24 | return uint(b.rows) 25 | } 26 | 27 | func (b *batch) Append(item data.LoadedPoint) { 28 | that := item.Data.([]byte) 29 | b.rows++ 30 | 31 | // Each influx line is format "csv-tags csv-fields timestamp" 32 | if args := bytes.Count(that, spaceSep); args != 2 { 33 | log.Fatalf(errNotThreeTuplesFmt, args+1) 34 | return 35 | } 36 | 37 | // seek for fields position in slice 38 | fieldsPos := bytes.Index(that, spaceSep) 39 | // seek for timestamps position in slice 40 | timestampPos := bytes.Index(that[fieldsPos+1:], spaceSep) + fieldsPos 41 | fields := that[fieldsPos+1 : timestampPos] 42 | b.metrics += uint64(bytes.Count(fields, commaSep) + 1) 43 | 44 | b.buf.Write(that) 45 | b.buf.Write(newLine) 46 | } 47 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/benchmark.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "github.com/blagojts/viper" 8 | "github.com/timescale/tsbs/load" 9 | "github.com/timescale/tsbs/pkg/data/source" 10 | "github.com/timescale/tsbs/pkg/targets" 11 | "sync" 12 | ) 13 | 14 | type SpecificConfig struct { 15 | ServerURLs []string `yaml:"urls" mapstructure:"urls"` 16 | } 17 | 18 | func parseSpecificConfig(v *viper.Viper) (*SpecificConfig, error) { 19 | var conf SpecificConfig 20 | if err := v.Unmarshal(&conf); err != nil { 21 | return nil, err 22 | } 23 | return &conf, nil 24 | } 25 | 26 | // loader.Benchmark interface implementation 27 | type benchmark struct { 28 | serverURLs []string 29 | dataSource targets.DataSource 30 | } 31 | 32 | func NewBenchmark(vmSpecificConfig *SpecificConfig, dataSourceConfig *source.DataSourceConfig) (targets.Benchmark, error) { 33 | if dataSourceConfig.Type != source.FileDataSourceType { 34 | return nil, errors.New("only FILE data source type is supported for VictoriaMetrics") 35 | } 36 | 37 | br := load.GetBufferedReader(dataSourceConfig.File.Location) 38 | return &benchmark{ 39 | dataSource: &fileDataSource{ 40 | scanner: bufio.NewScanner(br), 41 | }, 42 | serverURLs: vmSpecificConfig.ServerURLs, 43 | }, nil 44 | } 45 | 46 | func (b *benchmark) GetDataSource() targets.DataSource { 47 | return b.dataSource 48 | } 49 | 50 | func (b *benchmark) GetBatchFactory() targets.BatchFactory { 51 | bufPool := sync.Pool{ 52 | New: func() interface{} { 53 | return bytes.NewBuffer(make([]byte, 0, 16*1024*1024)) 54 | }, 55 | } 56 | return &factory{bufPool: &bufPool} 57 | } 58 | 59 | func (b *benchmark) GetPointIndexer(maxPartitions uint) targets.PointIndexer { 60 | return &targets.ConstantIndexer{} 61 | } 62 | 63 | func (b *benchmark) GetProcessor() targets.Processor { 64 | return &processor{vmURLs: b.serverURLs} 65 | } 66 | 67 | func (b *benchmark) GetDBCreator() targets.DBCreator { 68 | return &dbCreator{} 69 | } 70 | 71 | type factory struct { 72 | bufPool *sync.Pool 73 | } 74 | 75 | func (f *factory) New() targets.Batch { 76 | return &batch{buf: f.bufPool.Get().(*bytes.Buffer)} 77 | } 78 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/creator.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | // VictoriaMetrics don't have a database abstraction 4 | type dbCreator struct{} 5 | 6 | func (d *dbCreator) Init() {} 7 | 8 | func (d *dbCreator) DBExists(dbName string) bool { return true } 9 | 10 | func (d *dbCreator) CreateDB(dbName string) error { return nil } 11 | 12 | func (d *dbCreator) RemoveOldDB(dbName string) error { return nil } 13 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/data_source.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "bufio" 5 | "github.com/timescale/tsbs/pkg/data" 6 | "github.com/timescale/tsbs/pkg/data/usecases/common" 7 | "log" 8 | ) 9 | 10 | type fileDataSource struct { 11 | scanner *bufio.Scanner 12 | } 13 | 14 | func (f fileDataSource) NextItem() data.LoadedPoint { 15 | ok := f.scanner.Scan() 16 | if !ok && f.scanner.Err() == nil { // nothing scanned & no error = EOF 17 | return data.LoadedPoint{} 18 | } else if !ok { 19 | log.Fatalf("scan error: %v", f.scanner.Err()) 20 | } 21 | return data.NewLoadedPoint(f.scanner.Bytes()) 22 | } 23 | 24 | func (f fileDataSource) Headers() *common.GeneratedDataHeaders { 25 | return nil 26 | } 27 | 28 | type decoder struct { 29 | scanner *bufio.Scanner 30 | } 31 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/implemented_target.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "github.com/blagojts/viper" 5 | "github.com/spf13/pflag" 6 | "github.com/timescale/tsbs/pkg/data/serialize" 7 | "github.com/timescale/tsbs/pkg/data/source" 8 | "github.com/timescale/tsbs/pkg/targets" 9 | "github.com/timescale/tsbs/pkg/targets/constants" 10 | "github.com/timescale/tsbs/pkg/targets/influx" 11 | ) 12 | 13 | func NewTarget() targets.ImplementedTarget { 14 | return &vmTarget{} 15 | } 16 | 17 | type vmTarget struct { 18 | } 19 | 20 | func (vm vmTarget) Benchmark(_ string, dataSourceConfig *source.DataSourceConfig, v *viper.Viper) (targets.Benchmark, error) { 21 | vmSpecificConfig, err := parseSpecificConfig(v) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | return NewBenchmark(vmSpecificConfig, dataSourceConfig) 27 | } 28 | 29 | func (vm vmTarget) Serializer() serialize.PointSerializer { 30 | return &influx.Serializer{} 31 | } 32 | 33 | func (vm vmTarget) TargetSpecificFlags(flagPrefix string, flagSet *pflag.FlagSet) { 34 | flagSet.String( 35 | flagPrefix+"urls", 36 | "http://localhost:8428/write", 37 | "Comma-separated list of VictoriaMetrics ingestion URLs(single-node or VMInsert)", 38 | ) 39 | } 40 | 41 | func (vm vmTarget) TargetName() string { 42 | return constants.FormatVictoriaMetrics 43 | } 44 | -------------------------------------------------------------------------------- /pkg/targets/victoriametrics/processor.go: -------------------------------------------------------------------------------- 1 | package victoriametrics 2 | 3 | import ( 4 | "bytes" 5 | "github.com/timescale/tsbs/pkg/targets" 6 | "log" 7 | "net/http" 8 | "time" 9 | ) 10 | 11 | type processor struct { 12 | url string 13 | vmURLs []string 14 | } 15 | 16 | func (p *processor) Init(workerNum int, doLoad, hashWorkers bool) { 17 | p.url = p.vmURLs[workerNum%len(p.vmURLs)] 18 | } 19 | 20 | func (p *processor) ProcessBatch(b targets.Batch, doLoad bool) (metricCount, rowCount uint64) { 21 | batch := b.(*batch) 22 | if !doLoad { 23 | return batch.metrics, batch.rows 24 | } 25 | mc, rc := p.do(batch) 26 | return mc, rc 27 | } 28 | 29 | func (p *processor) do(b *batch) (uint64, uint64) { 30 | for { 31 | r := bytes.NewReader(b.buf.Bytes()) 32 | req, err := http.NewRequest("POST", p.url, r) 33 | if err != nil { 34 | log.Fatalf("error while creating new request: %s", err) 35 | } 36 | resp, err := http.DefaultClient.Do(req) 37 | if err != nil { 38 | log.Fatalf("error while executing request: %s", err) 39 | } 40 | resp.Body.Close() 41 | if resp.StatusCode == http.StatusNoContent { 42 | b.buf.Reset() 43 | return b.metrics, b.rows 44 | } 45 | log.Printf("server returned HTTP status %d. Retrying", resp.StatusCode) 46 | time.Sleep(time.Millisecond * 10) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /scripts/full_cycle_minitest/full_cycle_minitest_clickhouse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # showcases the ftsb 3 phases for clickhouse 3 | # - 1) data and query generation 4 | # - 2) data loading/insertion 5 | # - 3) query execution 6 | 7 | MAX_RPS=${MAX_RPS:-"0"} 8 | MAX_QUERIES=${MAX_QUERIES:-"1000"} 9 | PASSWORD=${PASSWORD:-""} 10 | 11 | mkdir -p /tmp/bulk_data 12 | 13 | # generate data 14 | $GOPATH/bin/tsbs_generate_data --format clickhouse --use-case cpu-only --scale 10 --seed 123 --file /tmp/bulk_data/clickhouse_data 15 | 16 | # generate queries 17 | $GOPATH/bin/tsbs_generate_queries --queries=${MAX_QUERIES} --format clickhouse --use-case cpu-only --scale 10 --seed 123 --query-type lastpoint --file /tmp/bulk_data/clickhouse_query_lastpoint 18 | $GOPATH/bin/tsbs_generate_queries --queries=${MAX_QUERIES} --format clickhouse --use-case cpu-only --scale 10 --seed 123 --query-type cpu-max-all-1 --file /tmp/bulk_data/clickhouse_query_cpu-max-all-1 19 | $GOPATH/bin/tsbs_generate_queries --queries=${MAX_QUERIES} --format clickhouse --use-case cpu-only --scale 10 --seed 123 --query-type high-cpu-1 --file /tmp/bulk_data/clickhouse_query_high-cpu-1 20 | 21 | # insert benchmark 22 | $GOPATH/bin/tsbs_load_clickhouse --db-name=benchmark --host=127.0.0.1 --workers=1 --file=/tmp/bulk_data/clickhouse_data --results-file="clickhouse_load_results.json" 23 | 24 | # queries benchmark 25 | #last point query is broke 26 | #$GOPATH/bin/tsbs_run_queries_clickhouse --max-rps=${MAX_RPS} --hdr-latencies="${MAX_RPS}rps_clickhouse_query_lastpoint.hdr" --db-name=benchmark --hosts=127.0.0.1 --workers=1 --max-queries=${MAX_QUERIES} --file=/tmp/bulk_data/clickhouse_query_lastpoint 27 | $GOPATH/bin/tsbs_run_queries_clickhouse --max-rps=${MAX_RPS} --hdr-latencies="${MAX_RPS}rps_clickhouse_query_cpu-max-all-1.hdr" --db-name=benchmark --hosts=127.0.0.1 --workers=1 --max-queries=${MAX_QUERIES} --file=/tmp/bulk_data/clickhouse_query_cpu-max-all-1 28 | $GOPATH/bin/tsbs_run_queries_clickhouse --max-rps=${MAX_RPS} --hdr-latencies="${MAX_RPS}rps_clickhouse_query_high-cpu-1.hdr" --db-name=benchmark --hosts=127.0.0.1 --workers=1 --max-queries=${MAX_QUERIES} --file=/tmp/bulk_data/clickhouse_query_high-cpu-1 29 | -------------------------------------------------------------------------------- /scripts/full_cycle_minitest/full_cycle_minitest_questdb.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # showcases the ftsb 3 phases for questdb 3 | # - 1) data generation 4 | # - 2) data loading/insertion 5 | # - 3) query execution 6 | 7 | # generate data 8 | mkdir -p /tmp/bulk_data 9 | $GOPATH/bin/tsbs_generate_data --format questdb --use-case cpu-only --scale 10 --seed 123 --file /tmp/bulk_data/questdb_data 10 | 11 | # generate queries 12 | $GOPATH/bin/tsbs_generate_queries --format questdb --use-case cpu-only --scale 10 --seed 123 --query-type lastpoint --file /tmp/bulk_data/questdb_query_lastpoint 13 | $GOPATH/bin/tsbs_generate_queries --format questdb --use-case cpu-only --scale 10 --seed 123 --query-type cpu-max-all-1 --file /tmp/bulk_data/questdb_query_cpu-max-all-1 14 | $GOPATH/bin/tsbs_generate_queries --format questdb --use-case cpu-only --scale 10 --seed 123 --query-type high-cpu-1 --file /tmp/bulk_data/questdb_query_high-cpu-1 15 | $GOPATH/bin/tsbs_generate_queries --format questdb --use-case cpu-only --scale 10 --seed 123 --query-type single-groupby-5-1-1 --file /tmp/bulk_data/questdb_query_single-groupby-5-1-1 16 | $GOPATH/bin/tsbs_generate_queries --format questdb --use-case cpu-only --scale 10 --seed 123 --query-type groupby-orderby-limit --file /tmp/bulk_data/questdb_query_groupby-orderby-limit 17 | 18 | # insert benchmark 19 | $GOPATH/bin/tsbs_load_questdb --file=/tmp/bulk_data/questdb_data 20 | 21 | # queries benchmark 22 | $GOPATH/bin/tsbs_run_queries_questdb --max-queries=10 --file=/tmp/bulk_data/questdb_query_lastpoint 23 | $GOPATH/bin/tsbs_run_queries_questdb --max-queries=10 --file=/tmp/bulk_data/questdb_query_cpu-max-all-1 24 | $GOPATH/bin/tsbs_run_queries_questdb --max-queries=10 --file=/tmp/bulk_data/questdb_query_high-cpu-1 25 | $GOPATH/bin/tsbs_run_queries_questdb --max-queries=10 --file=/tmp/bulk_data/questdb_query_single-groupby-5-1-1 26 | $GOPATH/bin/tsbs_run_queries_questdb --max-queries=10 --file=/tmp/bulk_data/questdb_query_groupby-orderby-limit 27 | -------------------------------------------------------------------------------- /scripts/load/load_akumuli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_akumuli)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_akumuli not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-akumuli-data.gz} 12 | INGESTION_PORT=${INGESTION_PORT:-8282} 13 | QUERY_PORT=${QUERY_PORT:-8181} 14 | 15 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 16 | source ${EXE_DIR}/load_common.sh 17 | 18 | until curl http://${DATABASE_HOST}:${QUERY_PORT}/api/stats 2>/dev/null; do 19 | echo "Waiting for akumulid" 20 | sleep 1 21 | done 22 | 23 | # Load new data 24 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 25 | --workers=${NUM_WORKERS} \ 26 | --batch-size=${BATCH_SIZE} \ 27 | --endpoint=${DATABASE_HOST}:${INGESTION_PORT} 28 | -------------------------------------------------------------------------------- /scripts/load/load_cassandra.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_cassandra)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_cassandra not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-cassandra-data.gz} 12 | DATABASE_PORT=${DATABASE_PORT:-9042} 13 | 14 | # Load parameters - personal 15 | CASSANDRA_TIMEOUT=${CASSANDRA_TIMEOUT:-1000s} 16 | REPLICATION_FACTOR=${REPLICATION_FACTOR:-1} 17 | BATCH_SIZE=${BATCH_SIZE:-100} 18 | 19 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 20 | source ${EXE_DIR}/load_common.sh 21 | 22 | while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do 23 | echo "Waiting for cassandra" 24 | sleep 1 25 | done 26 | 27 | cqlsh -e 'drop keyspace measurements;' 28 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 29 | --workers=${NUM_WORKERS} \ 30 | --batch-size=${BATCH_SIZE} \ 31 | --reporting-period=${REPORTING_PERIOD} \ 32 | --write-timeout=${CASSANDRA_TIMEOUT} \ 33 | --hosts=${DATABASE_HOST}:${DATABASE_PORT} \ 34 | --replication-factor=${REPLICATION_FACTOR} 35 | -------------------------------------------------------------------------------- /scripts/load/load_clickhouse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_clickhouse)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_clickhouse not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-clickhouse-data.gz} 12 | DATABASE_USER=${DATABASE_USER:-default} 13 | DATABASE_PASSWORD=${DATABASE_PASSWORD:-""} 14 | 15 | # Load parameters - personal 16 | PROGRESS_INTERVAL=${PROGRESS_INTERVAL:-10s} 17 | HASH_WORKERS=${HASH_WORKERS:-false} 18 | 19 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 20 | source ${EXE_DIR}/load_common.sh 21 | 22 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 23 | --host=${DATABASE_HOST} \ 24 | --port=${DATABASE_PORT} \ 25 | --user=${DATABASE_USER} \ 26 | --password=${DATABASE_PASSWORD} \ 27 | --db-name=${DATABASE_NAME} \ 28 | --batch-size=${BATCH_SIZE} \ 29 | --workers=${NUM_WORKERS} \ 30 | --reporting-period=${PROGRESS_INTERVAL} \ 31 | --hash-workers=${HASH_WORKERS} 32 | -------------------------------------------------------------------------------- /scripts/load/load_common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Database credentials 4 | DATABASE_HOST=${DATABASE_HOST:-"localhost"} 5 | DATABASE_NAME=${DATABASE_NAME:-"benchmark"} 6 | PORT=${PORT:-5432} 7 | 8 | # Data folder 9 | BULK_DATA_DIR=${BULK_DATA_DIR:-"/tmp/bulk_data"} 10 | # Full path to data file 11 | DATA_FILE=${DATA_FILE:-${BULK_DATA_DIR}/${DATA_FILE_NAME}} 12 | 13 | # Load parameters 14 | BATCH_SIZE=${BATCH_SIZE:-10000} 15 | # How many concurrent worker would load data - match num of cores, or default to 4 16 | NUM_WORKERS=${NUM_WORKERS:-$(grep -c ^processor /proc/cpuinfo 2> /dev/null || echo 4)} 17 | BACKOFF_SECS=${BACKOFF_SECS:-1s} 18 | REPORTING_PERIOD=${REPORTING_PERIOD:-10s} 19 | 20 | DO_CREATE_DB=${DO_CREATE_DB:-true} 21 | 22 | # Ensure data file is in place 23 | if [ ! -f ${DATA_FILE} ]; then 24 | echo "Cannot find data file ${DATA_FILE}" 25 | exit -1 26 | fi 27 | 28 | echo "Bulk loading file ${DATA_FILE}" 29 | 30 | set -x 31 | -------------------------------------------------------------------------------- /scripts/load/load_cratedb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_cratedb)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_cratedb not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-cratedb-data.gz} 12 | DATABASE_HOST=${DATABASE_HOST:-"localhost"} 13 | DATABASE_PORT=${DATABASE_PORT:-5432} 14 | 15 | # Load parameters - database specific 16 | REPLICATION_FACTOR=${REPLICATION_FACTOR:-0} 17 | NUMBER_OF_SHARDS=${NUMBER_OF_SHARDS:-5} 18 | USER=${USER:-crate} 19 | PASSWORD=${PASSWORD} 20 | 21 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 22 | source ${EXE_DIR}/load_common.sh 23 | 24 | while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do 25 | echo "Waiting for CrateDB..." 26 | sleep 1 27 | done 28 | 29 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 30 | --hosts=${DATABASE_HOST} \ 31 | --port=${DATABASE_PORT} \ 32 | --user=${USER} \ 33 | --pass=${PASSWORD} \ 34 | --replicas=${REPLICATION_FACTOR} \ 35 | --shards=${NUMBER_OF_SHARDS} 36 | -------------------------------------------------------------------------------- /scripts/load/load_influx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_influx)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_influx not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-influx-data.gz} 12 | DATABASE_PORT=${DATABASE_PORT:-8086} 13 | INFLUX_AUTH_TOKEN=${$INFLUX_AUTH_TOKEN:-""} 14 | 15 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 16 | source ${EXE_DIR}/load_common.sh 17 | 18 | until curl http://${DATABASE_HOST}:${DATABASE_PORT}/ping 2>/dev/null; do 19 | echo "Waiting for InfluxDB" 20 | sleep 1 21 | done 22 | 23 | # Remove previous database 24 | curl --header "Authorization: Token $INFLUX_AUTH_TOKEN" \ 25 | -X POST http://${DATABASE_HOST}:${DATABASE_PORT}/query?q=drop%20database%20${DATABASE_NAME} 26 | 27 | 28 | # Load new data 29 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 30 | --db-name=${DATABASE_NAME} \ 31 | --backoff=${BACKOFF_SECS} \ 32 | --workers=${NUM_WORKERS} \ 33 | --batch-size=${BATCH_SIZE} \ 34 | --reporting-period=${REPORTING_PERIOD} \ 35 | --auth-token $INFLUX_AUTH_TOKEN \ 36 | --urls=http://${DATABASE_HOST}:${DATABASE_PORT} 37 | -------------------------------------------------------------------------------- /scripts/load/load_mongo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_mongo)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_mongo not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-mongo-data.gz} 12 | 13 | # Load parameters - personal 14 | PROGRESS_INTERVAL=${PROGRESS_INTERVAL:-10s} 15 | 16 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 17 | source ${EXE_DIR}/load_common.sh 18 | 19 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 20 | --db-name=${DATABASE_NAME} \ 21 | --batch-size=${BATCH_SIZE} \ 22 | --workers=${NUM_WORKERS} \ 23 | --reporting-period=${PROGRESS_INTERVAL} 24 | -------------------------------------------------------------------------------- /scripts/load/load_questdb.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script assumes questdb is up and running, you can start it with docker with 4 | # docker run -p 9000:9000 -p 8812:8812 -p 9009:9009 -p 9003:9003 questdb/questdb 5 | 6 | 7 | # Ensure loader is available 8 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_questdb)} 9 | if [[ -z "$EXE_FILE_NAME" ]]; then 10 | echo "tsbs_load_questdb not available. It is not specified explicitly and not found in \$PATH" 11 | exit 1 12 | fi 13 | 14 | # Load parameters - common 15 | DATA_FILE_NAME=${DATA_FILE_NAME:-influx-data.gz} 16 | DATABASE_PORT=${DATABASE_PORT:-9000} 17 | DATABASE_HEALTH_PORT=${DATABASE_HEALTH_PORT:-9003} 18 | ILP_PORT=${ILP_PORT:-9009} 19 | 20 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 21 | source ${EXE_DIR}/load_common.sh 22 | 23 | until curl http://${DATABASE_HOST}:${DATABASE_HEALTH_PORT}/ping 2>/dev/null; do 24 | echo "Waiting for QuestDB" 25 | sleep 1 26 | done 27 | 28 | # Remove previous table 29 | curl -X GET http://${DATABASE_HOST}:${DATABASE_PORT}/exec?query=drop%20table%20cpu 30 | # Load new data 31 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 32 | --workers=${NUM_WORKERS} \ 33 | --batch-size=${BATCH_SIZE} \ 34 | --reporting-period=${REPORTING_PERIOD} \ 35 | --url=http://${DATABASE_HOST}:${DATABASE_PORT} \ 36 | --ilp-bind-to ${DATABASE_HOST}:${ILP_PORT} 37 | -------------------------------------------------------------------------------- /scripts/load/load_victoriametrics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure loader is available 4 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_load_victoriametrics)} 5 | if [[ -z "$EXE_FILE_NAME" ]]; then 6 | echo "tsbs_load_victoriametrics not available. It is not specified explicitly and not found in \$PATH" 7 | exit 1 8 | fi 9 | 10 | # Load parameters - common 11 | DATA_FILE_NAME=${DATA_FILE_NAME:-victoriametrics-data.gz} 12 | DATABASE_PORT=${DATABASE_PORT:-8428} 13 | DATABASE_PATH=${DATABASE_PATH:write} 14 | 15 | EXE_DIR=${EXE_DIR:-$(dirname $0)} 16 | source ${EXE_DIR}/load_common.sh 17 | 18 | # Load data 19 | cat ${DATA_FILE} | gunzip | $EXE_FILE_NAME \ 20 | --urls=http://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_PATH} 21 | -------------------------------------------------------------------------------- /scripts/release_siridb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NUMBER_OF_SERVERS=${NUMBER_OF_SERVERS:-2} 4 | END=`expr $NUMBER_OF_SERVERS - 1` 5 | 6 | # Create the directories for config file(s). 7 | mkdir /tmp/siridb/ 8 | 9 | # Configuration of SiriDB 10 | for i in $(seq 0 $END); do 11 | # Create a directory for every server to store the database. 12 | # And remove the old database if present. 13 | rm /tmp/siridb/dbpath$i/ -r 14 | mkdir /tmp/siridb/dbpath$i/ 15 | `cat < /tmp/siridb/tsbs-siridb$i.conf 16 | [siridb] 17 | listen_client_port = 900$i 18 | server_name = %HOSTNAME:901$i 19 | ip_support = ALL 20 | optimize_interval = 900 21 | heartbeat_interval = 30 22 | default_db_path = /tmp/siridb/dbpath$i 23 | max_open_files = 512 24 | enable_shard_compression = 1 25 | enable_pipe_support = 0 26 | buffer_sync_interval = 500 27 | EOT` 28 | 29 | SIRIDB_SERVER_DIR="siridb-server -l debug" 30 | DB_DIR="/tmp/siridb/tsbs-siridb$i.conf" 31 | 32 | xterm -e ${SIRIDB_SERVER_DIR} -c ${DB_DIR} & 33 | done 34 | 35 | 36 | -------------------------------------------------------------------------------- /scripts/run_queries/run_queries_timescaledb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit immediately if a command exits with a non-zero status. 4 | set -e 5 | 6 | # Ensure runner is available 7 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_run_queries_timescaledb)} 8 | if [[ -z "$EXE_FILE_NAME" ]]; then 9 | echo "tsbs_run_queries_timescaledb not available. It is not specified explicitly and not found in \$PATH" 10 | exit 1 11 | fi 12 | 13 | # Queries folder 14 | BULK_DATA_DIR=${BULK_DATA_DIR:-"/tmp/bulk_queries"} 15 | 16 | # How many queries would be run 17 | MAX_QUERIES=${MAX_QUERIES:-"0"} 18 | 19 | # How many concurrent worker would run queries - match num of cores, or default to 4 20 | NUM_WORKERS=${NUM_WORKERS:-$(grep -c ^processor /proc/cpuinfo 2> /dev/null || echo 4)} 21 | 22 | for FULL_DATA_FILE_NAME in ${BULK_DATA_DIR}/queries_timescaledb*; do 23 | # $FULL_DATA_FILE_NAME: /full/path/to/file_with.ext 24 | # $DATA_FILE_NAME: file_with.ext 25 | # $DIR: /full/path/to 26 | # $EXTENSION: ext 27 | # NO_EXT_DATA_FILE_NAME: file_with 28 | 29 | DATA_FILE_NAME=$(basename -- "${FULL_DATA_FILE_NAME}") 30 | DIR=$(dirname "${FULL_DATA_FILE_NAME}") 31 | EXTENSION="${DATA_FILE_NAME##*.}" 32 | NO_EXT_DATA_FILE_NAME="${DATA_FILE_NAME%.*}" 33 | 34 | # Several options on how to name results file 35 | #OUT_FULL_FILE_NAME="${DIR}/result_${DATA_FILE_NAME}" 36 | OUT_FULL_FILE_NAME="${DIR}/result_${NO_EXT_DATA_FILE_NAME}.out" 37 | #OUT_FULL_FILE_NAME="${DIR}/${NO_EXT_DATA_FILE_NAME}.out" 38 | 39 | if [ "${EXTENSION}" == "gz" ]; then 40 | GUNZIP="gunzip" 41 | else 42 | GUNZIP="cat" 43 | fi 44 | 45 | echo "Running ${DATA_FILE_NAME}" 46 | cat $FULL_DATA_FILE_NAME \ 47 | | $GUNZIP \ 48 | | $EXE_FILE_NAME \ 49 | --max-queries $MAX_QUERIES \ 50 | --workers $NUM_WORKERS \ 51 | | tee $OUT_FULL_FILE_NAME 52 | done 53 | -------------------------------------------------------------------------------- /scripts/run_queries/run_queries_timestream.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit immediately if a command exits with a non-zero status. 4 | set -e 5 | 6 | # Ensure runner is available 7 | EXE_FILE_NAME=${EXE_FILE_NAME:-$(which tsbs_run_queries_timestream)} 8 | if [[ -z "$EXE_FILE_NAME" ]]; then 9 | echo "tsbs_run_queries_timestream not available. It is not specified explicitly and not found in \$PATH" 10 | exit 1 11 | fi 12 | 13 | # AWS region of database 14 | AWS_REGION=${AWS_REGION:"us-east-1"} 15 | 16 | # Queries folder 17 | BULK_DATA_DIR=${BULK_DATA_DIR:-"/tmp/bulk_queries"} 18 | 19 | # How many queries would be run 20 | MAX_QUERIES=${MAX_QUERIES:-"0"} 21 | 22 | # How many concurrent worker would run queries - match num of cores, or default to 4 23 | NUM_WORKERS=${NUM_WORKERS:-$(grep -c ^processor /proc/cpuinfo 2> /dev/null || echo 4)} 24 | 25 | 26 | for FULL_DATA_FILE_NAME in ${BULK_DATA_DIR}/queries_timestream*; do 27 | # $FULL_DATA_FILE_NAME: /full/path/to/file_with.ext 28 | # $DATA_FILE_NAME: file_with.ext 29 | # $DIR: /full/path/to 30 | # $EXTENSION: ext 31 | # NO_EXT_DATA_FILE_NAME: file_with 32 | 33 | DATA_FILE_NAME=$(basename -- "${FULL_DATA_FILE_NAME}") 34 | DIR=$(dirname "${FULL_DATA_FILE_NAME}") 35 | EXTENSION="${DATA_FILE_NAME##*.}" 36 | NO_EXT_DATA_FILE_NAME="${DATA_FILE_NAME%.*}" 37 | 38 | # Several options on how to name results file 39 | #OUT_FULL_FILE_NAME="${DIR}/result_${DATA_FILE_NAME}" 40 | OUT_FULL_FILE_NAME="${DIR}/result_${NO_EXT_DATA_FILE_NAME}.out" 41 | #OUT_FULL_FILE_NAME="${DIR}/${NO_EXT_DATA_FILE_NAME}.out" 42 | 43 | if [ "${EXTENSION}" == "gz" ]; then 44 | GUNZIP="gunzip" 45 | else 46 | GUNZIP="cat" 47 | fi 48 | 49 | echo "Running ${DATA_FILE_NAME}" 50 | cat $FULL_DATA_FILE_NAME \ 51 | | $GUNZIP \ 52 | | $EXE_FILE_NAME \ 53 | --max-queries $MAX_QUERIES \ 54 | --workers $NUM_WORKERS \ 55 | --aws-region $AWS_REGION 56 | | tee $OUT_FULL_FILE_NAME 57 | done 58 | -------------------------------------------------------------------------------- /scripts/start_timescaledb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PG_VER=${PG_VER:-10} 4 | MEM=${MEM:-`free -m | grep "Mem" | awk '{print $7}'`} 5 | 6 | let "SHARED=$MEM/4" 7 | let "CACHE=2*$MEM/3" 8 | let "WORK=($MEM-$SHARED)/30" 9 | let "MAINT=$MEM/16" 10 | 11 | sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -c -U postgres -D /etc/postgresql/${PG_VER}/main -l /tmp/postgres.log -o "-cshared_preload_libraries=timescaledb \ 12 | -clog_line_prefix=\"%m [%p]: [%x] %u@%d\" \ 13 | -clogging_collector=off \ 14 | -csynchronous_commit=off \ 15 | -cmax_wal_size=10GB \ 16 | -cshared_buffers=${SHARED}MB \ 17 | -ceffective_cache_size=${CACHE}MB \ 18 | -cwork_mem=${WORK}MB \ 19 | -cmaintenance_work_mem=${MAINT}MB \ 20 | -cmax_files_per_process=100 \ 21 | -cautovacuum=on" start 22 | -------------------------------------------------------------------------------- /scripts/stop_timescaledb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PG_VER=${PG_VER:-10} 4 | sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -U postgres -D /etc/postgresql/${PG_VER}/main stop 5 | --------------------------------------------------------------------------------