├── DEMO.txt ├── LICENSE ├── README.md ├── bonitoo.toml ├── bulk_data_gen ├── common │ ├── config.go │ ├── distribution.go │ ├── point.go │ ├── pools.go │ ├── serializer.go │ ├── serializer_cassandra.go │ ├── serializer_elasticsearch.go │ ├── serializer_graphite.go │ ├── serializer_influxdb.go │ ├── serializer_mongodb.go │ ├── serializer_opentsdb.go │ ├── serializer_splunk.go │ ├── serializer_timescale.go │ └── simulation.go ├── dashboard │ ├── generate_data.go │ ├── host.go │ ├── status.go │ └── system.go ├── devops │ ├── devops_cpu.go │ ├── devops_disk.go │ ├── devops_diskio.go │ ├── devops_generate_data.go │ ├── devops_host.go │ ├── devops_kernel.go │ ├── devops_mem.go │ ├── devops_net.go │ ├── devops_nginx.go │ ├── devops_postgresql.go │ └── devops_redis.go ├── iot │ ├── air_quality_room.go │ ├── air_temp_hum_indoor.go │ ├── air_temp_hum_outdoor.go │ ├── camera_detection.go │ ├── door.go │ ├── generate_data.go │ ├── home_config.go │ ├── home_state.go │ ├── light_level.go │ ├── radiator_valve.go │ ├── smartHome.go │ ├── water_leakage_room.go │ ├── water_level.go │ ├── weather_outdoor.go │ └── window_room.go ├── metaqueries │ └── generate_data.go └── multi_measurement │ └── generate_data.go ├── bulk_load ├── load.go ├── process.go ├── scan.go ├── stats.go └── sync.go ├── bulk_query ├── http │ ├── common.go │ ├── fasthttp_client.go │ ├── http_client.go │ └── query.go ├── process.go ├── query.go ├── scan.go └── stats.go ├── bulk_query_gen ├── cassandra │ ├── cassandra_devops_8_hosts.go │ ├── cassandra_devops_common.go │ ├── cassandra_devops_groupby.go │ ├── cassandra_devops_singlehost.go │ ├── cassandra_devops_singlehost_12hr.go │ ├── cassandra_iot_common.go │ ├── cassandra_iot_singlehost.go │ └── query.go ├── common_params.go ├── database_config.go ├── devops.go ├── elasticsearch │ ├── es_devops_8_hosts.go │ ├── es_devops_common.go │ ├── es_devops_groupby.go │ ├── es_devops_singlehost.go │ └── es_devops_singlehost_12hr.go ├── graphite │ ├── graphite_common.go │ ├── graphite_devops_8_hosts.go │ ├── graphite_devops_common.go │ ├── graphite_devops_groupby.go │ ├── graphite_devops_singlehost.go │ └── graphite_devops_singlehost_12hr.go ├── influxdb │ ├── influx_bareagg_common.go │ ├── influx_bareagg_count.go │ ├── influx_bareagg_first.go │ ├── influx_bareagg_last.go │ ├── influx_bareagg_max.go │ ├── influx_bareagg_mean.go │ ├── influx_bareagg_min.go │ ├── influx_bareagg_sum.go │ ├── influx_common.go │ ├── influx_dashboard_all.go │ ├── influx_dashboard_availability.go │ ├── influx_dashboard_common.go │ ├── influx_dashboard_cpu_num.go │ ├── influx_dashboard_cpu_utilization.go │ ├── influx_dashboard_disk_allocated.go │ ├── influx_dashboard_disk_usage.go │ ├── influx_dashboard_disk_utilization.go │ ├── influx_dashboard_http_request_duration.go │ ├── influx_dashboard_http_requests.go │ ├── influx_dashboard_kapa_cpu.go │ ├── influx_dashboard_kapa_load.go │ ├── influx_dashboard_kapa_ram.go │ ├── influx_dashboard_memory_total.go │ ├── influx_dashboard_memory_utilization.go │ ├── influx_dashboard_nginx_requests.go │ ├── influx_dashboard_queue_bytes.go │ ├── influx_dashboard_redis_memory_utilization.go │ ├── influx_dashboard_system_load.go │ ├── influx_dashboard_throughput.go │ ├── influx_devops_8_hosts.go │ ├── influx_devops_common.go │ ├── influx_devops_groupby.go │ ├── influx_devops_singlehost.go │ ├── influx_devops_singlehost_12hr.go │ ├── influx_group_window_transpose_common.go │ ├── influx_group_window_transpose_count.go │ ├── influx_group_window_transpose_first.go │ ├── influx_group_window_transpose_last.go │ ├── influx_group_window_transpose_max.go │ ├── influx_group_window_transpose_mean.go │ ├── influx_group_window_transpose_min.go │ ├── influx_group_window_transpose_sum.go │ ├── influx_groupagg_common.go │ ├── influx_groupagg_count.go │ ├── influx_groupagg_first.go │ ├── influx_groupagg_last.go │ ├── influx_groupagg_max.go │ ├── influx_groupagg_mean.go │ ├── influx_groupagg_min.go │ ├── influx_groupagg_sum.go │ ├── influx_iot_aggregate_drop.go │ ├── influx_iot_aggregate_keep.go │ ├── influx_iot_common.go │ ├── influx_iot_singlehost.go │ ├── influx_iot_sorted_pivot.go │ ├── influx_iot_stand_alone_filter.go │ ├── influx_metaquery_cardinality.go │ ├── influx_metaquery_common.go │ ├── influx_metaquery_field_keys.go │ ├── influx_metaquery_tag_values.go │ ├── influx_multi_measurement_common.go │ ├── influx_ungroupedagg_common.go │ ├── influx_ungroupedagg_count.go │ ├── influx_ungroupedagg_first.go │ ├── influx_ungroupedagg_last.go │ ├── influx_ungroupedagg_max.go │ ├── influx_ungroupedagg_mean.go │ ├── influx_ungroupedagg_min.go │ ├── influx_ungroupedagg_sum.go │ ├── influx_windowagg_common.go │ ├── influx_windowagg_count.go │ ├── influx_windowagg_first.go │ ├── influx_windowagg_last.go │ ├── influx_windowagg_max.go │ ├── influx_windowagg_mean.go │ ├── influx_windowagg_min.go │ └── influx_windowagg_sum.go ├── iot.go ├── metaquery.go ├── mongodb │ ├── mongo_devops_8_hosts_1hr.go │ ├── mongo_devops_common.go │ ├── mongo_devops_singlehost.go │ ├── mongo_devops_singlehost_12hr.go │ ├── mongo_iot_common.go │ ├── mongo_iot_singlehost.go │ ├── options.go │ └── query.go ├── opentsdb │ ├── opentsdb_devops_8_hosts.go │ ├── opentsdb_devops_common.go │ ├── opentsdb_devops_singlehost.go │ └── opentsdb_devops_singlehost_12hr.go ├── query.go ├── query_generator.go ├── splunk │ ├── splunk_common.go │ ├── splunk_devops_8_hosts.go │ ├── splunk_devops_common.go │ ├── splunk_devops_groupby.go │ ├── splunk_devops_singlehost.go │ └── splunk_devops_singlehost_12hr.go ├── time_interval.go ├── time_window.go └── timescaledb │ ├── query.go │ ├── timescale_devops_8_hosts_1hr.go │ ├── timescale_devops_common.go │ ├── timescale_devops_groupby.go │ ├── timescale_devops_singlehost.go │ ├── timescale_devops_singlehost_12hr.go │ ├── timescale_iot_common.go │ └── timescale_iot_singlehost.go ├── cmd ├── bulk_data_gen │ └── main.go ├── bulk_load_cassandra │ └── main.go ├── bulk_load_es │ ├── http_writer.go │ └── main.go ├── bulk_load_graphite │ └── main.go ├── bulk_load_influx │ ├── http_writer.go │ └── main.go ├── bulk_load_mongo │ ├── main.go │ └── unsafe.go ├── bulk_load_opentsdb │ ├── http_writer.go │ └── main.go ├── bulk_load_splunk │ ├── http_writer.go │ └── main.go ├── bulk_load_timescale │ └── main.go ├── bulk_query_gen │ └── main.go ├── query_benchmarker_cassandra │ ├── conn.go │ ├── main.go │ ├── query.go │ ├── query_executor.go │ ├── query_plan.go │ ├── query_plan_aggregators.go │ └── time_util.go ├── query_benchmarker_es │ ├── main.go │ └── query.go ├── query_benchmarker_graphite │ └── main.go ├── query_benchmarker_influxdb │ └── main.go ├── query_benchmarker_mongo │ ├── main.go │ ├── query.go │ └── unsafe.go ├── query_benchmarker_opentsdb │ ├── http_client.go │ ├── main.go │ └── query.go ├── query_benchmarker_splunk │ └── main.go └── query_benchmarker_timescale │ ├── main.go │ └── query.go ├── go.mod ├── go.sum ├── mongo.flatbuffers.fbs ├── mongo_serialization ├── Field.go ├── Item.go ├── Tag.go └── ValueType.go ├── timescale.proto ├── timescale_serializaition └── timescale.pb.go ├── util └── report │ ├── core.go │ ├── result.go │ ├── result_test.go │ └── telemetry.go └── void_server └── main.go /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 InfluxData 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bulk_data_gen/common/point.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/gob" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | // Point wraps a single data point. It stores database-agnostic data 10 | // representing one point in time of one measurement. 11 | // 12 | // Internally, Point uses byte slices instead of strings to try to minimize 13 | // overhead. 14 | type Point struct { 15 | MeasurementName []byte 16 | TagKeys [][]byte 17 | TagValues [][]byte 18 | FieldKeys [][]byte 19 | FieldValues []interface{} 20 | Timestamp *time.Time 21 | 22 | encoder *gob.Encoder 23 | } 24 | 25 | // Using these literals prevents the slices from escaping to the heap, saving 26 | // a few micros per call: 27 | var () 28 | 29 | // scratchBufPool helps reuse serialization scratch buffers. 30 | var scratchBufPool = &sync.Pool{ 31 | New: func() interface{} { 32 | return make([]byte, 0, 1024) 33 | }, 34 | } 35 | 36 | func (p *Point) Reset() { 37 | p.MeasurementName = nil 38 | p.TagKeys = p.TagKeys[:0] 39 | p.TagValues = p.TagValues[:0] 40 | p.FieldKeys = p.FieldKeys[:0] 41 | p.FieldValues = p.FieldValues[:0] 42 | p.Timestamp = nil 43 | } 44 | 45 | func (p *Point) SetTimestamp(t *time.Time) { 46 | p.Timestamp = t 47 | } 48 | 49 | func (p *Point) SetMeasurementName(s []byte) { 50 | p.MeasurementName = s 51 | } 52 | 53 | func (p *Point) AppendTag(key, value []byte) { 54 | p.TagKeys = append(p.TagKeys, key) 55 | p.TagValues = append(p.TagValues, value) 56 | } 57 | 58 | func (p *Point) AppendField(key []byte, value interface{}) { 59 | p.FieldKeys = append(p.FieldKeys, key) 60 | p.FieldValues = append(p.FieldValues, value) 61 | } 62 | -------------------------------------------------------------------------------- /bulk_data_gen/common/pools.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "sync" 5 | 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | var fbBuilderPool = &sync.Pool{ 10 | New: func() interface{} { 11 | return flatbuffers.NewBuilder(0) 12 | }, 13 | } 14 | var bufPool = &sync.Pool{ 15 | New: func() interface{} { 16 | return []byte{} 17 | }, 18 | } 19 | var bufPool8 = &sync.Pool{ 20 | New: func() interface{} { 21 | return make([]byte, 8) 22 | }, 23 | } 24 | -------------------------------------------------------------------------------- /bulk_data_gen/common/serializer.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "regexp" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | type Serializer interface { 12 | SerializePoint(w io.Writer, p *Point) error 13 | SerializeSize(w io.Writer, points int64, values int64) error 14 | } 15 | 16 | const DatasetSizeMarker = "dataset-size:" 17 | 18 | var DatasetSizeMarkerRE = regexp.MustCompile(DatasetSizeMarker + `(\d+),(\d+)`) 19 | 20 | func serializeSizeInText(w io.Writer, points int64, values int64) error { 21 | buf := scratchBufPool.Get().([]byte) 22 | buf = append(buf, fmt.Sprintf("%s%d,%d\n", DatasetSizeMarker, points, values)...) 23 | _, err := w.Write(buf) 24 | if err != nil { 25 | return err 26 | } 27 | return nil 28 | } 29 | 30 | func fastFormatAppend(v interface{}, buf []byte, singleQuotesForString bool) []byte { 31 | var quotationChar = "\"" 32 | if singleQuotesForString { 33 | quotationChar = "'" 34 | } 35 | switch v.(type) { 36 | case int: 37 | return strconv.AppendInt(buf, int64(v.(int)), 10) 38 | case int64: 39 | return strconv.AppendInt(buf, v.(int64), 10) 40 | case float64: 41 | return strconv.AppendFloat(buf, v.(float64), 'f', 16, 64) 42 | case float32: 43 | return strconv.AppendFloat(buf, float64(v.(float32)), 'f', 16, 32) 44 | case bool: 45 | return strconv.AppendBool(buf, v.(bool)) 46 | case []byte: 47 | buf = append(buf, quotationChar...) 48 | buf = append(buf, v.([]byte)...) 49 | buf = append(buf, quotationChar...) 50 | return buf 51 | case string: 52 | buf = append(buf, quotationChar...) 53 | buf = append(buf, v.(string)...) 54 | buf = append(buf, quotationChar...) 55 | return buf 56 | default: 57 | panic(fmt.Sprintf("unknown field type for %#v", v)) 58 | } 59 | } 60 | 61 | func CheckTotalValues(line string) (totalPoints, totalValues int64, err error) { 62 | if strings.HasPrefix(line, DatasetSizeMarker) { 63 | parts := DatasetSizeMarkerRE.FindAllStringSubmatch(line, -1) 64 | if parts == nil || len(parts[0]) != 3 { 65 | err = fmt.Errorf("incorrent number of matched groups: %#v", parts) 66 | return 67 | } 68 | if i, e := strconv.Atoi(parts[0][1]); e == nil { 69 | totalPoints = int64(i) 70 | } else { 71 | err = e 72 | return 73 | } 74 | if i, e := strconv.Atoi(parts[0][2]); e == nil { 75 | totalValues = int64(i) 76 | } else { 77 | err = e 78 | } 79 | } 80 | return 81 | } 82 | -------------------------------------------------------------------------------- /bulk_data_gen/common/serializer_graphite.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | ) 7 | 8 | type SerializerGraphiteLine struct { 9 | buf []byte 10 | } 11 | 12 | func NewSerializerGraphiteLine() *SerializerGraphiteLine { 13 | return &SerializerGraphiteLine{ 14 | buf:make([]byte, 0, 4096), 15 | } 16 | } 17 | 18 | // SerializePoint writes Point data to the given writer, conforming to the 19 | // Graphite plain text line protocol. 20 | func (s *SerializerGraphiteLine) SerializePoint(w io.Writer, p *Point) (err error) { 21 | timestamp := p.Timestamp.UTC().Unix() 22 | buf := s.buf[:0] 23 | for i := 0; i < len(p.FieldKeys); i++ { 24 | buf = append(buf, []byte(p.MeasurementName)...) 25 | buf = append(buf, "."...) 26 | buf = append(buf, p.FieldKeys[i]...) 27 | for i := 0; i < len(p.TagKeys); i++ { 28 | buf = append(buf, ";"...) 29 | buf = append(buf, p.TagKeys[i]...) 30 | buf = append(buf, "="...) 31 | buf = append(buf, p.TagValues[i]...) 32 | } 33 | buf = append(buf, " "...) 34 | v := p.FieldValues[i] 35 | buf = fastFormatAppend(v, buf, true) 36 | buf = append(buf, " "...) 37 | buf = append(buf, []byte(fmt.Sprintf("%d", timestamp))...) 38 | buf = append(buf, "\n"...) 39 | } 40 | _, err = w.Write(buf) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | return nil 46 | } 47 | 48 | func (s *SerializerGraphiteLine) SerializeSize(w io.Writer, points int64, values int64) error { 49 | return serializeSizeInText(w, points, values) 50 | } 51 | -------------------------------------------------------------------------------- /bulk_data_gen/common/serializer_influxdb.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type serializerInflux struct { 8 | } 9 | 10 | func NewSerializerInflux() *serializerInflux { 11 | return &serializerInflux{} 12 | } 13 | 14 | // SerializeInfluxBulk writes Point data to the given writer, conforming to the 15 | // InfluxDB wire protocol. 16 | // 17 | // This function writes output that looks like: 18 | // ,= = \n 19 | // 20 | // For example: 21 | // foo,tag0=bar baz=-1.0 100\n 22 | // 23 | // TODO(rw): Speed up this function. The bulk of time is spent in strconv. 24 | func (s *serializerInflux) SerializePoint(w io.Writer, p *Point) (err error) { 25 | buf := scratchBufPool.Get().([]byte) 26 | buf = append(buf, p.MeasurementName...) 27 | 28 | for i := 0; i < len(p.TagKeys); i++ { 29 | buf = append(buf, ',') 30 | buf = append(buf, p.TagKeys[i]...) 31 | buf = append(buf, '=') 32 | buf = append(buf, p.TagValues[i]...) 33 | } 34 | 35 | if len(p.FieldKeys) > 0 { 36 | buf = append(buf, ' ') 37 | } 38 | 39 | for i := 0; i < len(p.FieldKeys); i++ { 40 | buf = append(buf, p.FieldKeys[i]...) 41 | buf = append(buf, '=') 42 | 43 | v := p.FieldValues[i] 44 | buf = fastFormatAppend(v, buf, false) 45 | 46 | // Influx uses 'i' to indicate integers: 47 | switch v.(type) { 48 | case int, int64: 49 | buf = append(buf, 'i') 50 | } 51 | 52 | if i+1 < len(p.FieldKeys) { 53 | buf = append(buf, ',') 54 | } 55 | } 56 | 57 | buf = append(buf, ' ') 58 | buf = fastFormatAppend(p.Timestamp.UTC().UnixNano(), buf, true) 59 | buf = append(buf, '\n') 60 | _, err = w.Write(buf) 61 | 62 | buf = buf[:0] 63 | scratchBufPool.Put(buf) 64 | 65 | return err 66 | } 67 | 68 | func (s *serializerInflux) SerializeSize(w io.Writer, points int64, values int64) error { 69 | return serializeSizeInText(w, points, values) 70 | } 71 | -------------------------------------------------------------------------------- /bulk_data_gen/common/serializer_splunk.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | ) 8 | 9 | type SerializerSplunkJson struct { 10 | buf []byte 11 | } 12 | 13 | func NewSerializerSplunkJson() *SerializerSplunkJson { 14 | return &SerializerSplunkJson{ 15 | buf:make([]byte, 0, 4096), 16 | } 17 | } 18 | 19 | // SerializePoint writes Point data to the given writer, conforming to the 20 | // Splunk JSON format. 21 | // 22 | // This function writes output that looks like: 23 | // ... 24 | func (s *SerializerSplunkJson) SerializePoint(w io.Writer, p *Point) (err error) { 25 | timestamp := p.Timestamp.UTC().Unix() 26 | buf := s.buf[:0] 27 | var host []byte 28 | for i := 0; i < len(p.TagKeys); i++ { 29 | if bytes.Compare(p.TagKeys[i], []byte("hostname")) == 0 { 30 | host = p.TagValues[i] 31 | break 32 | } 33 | } 34 | timestampPart := fmt.Sprintf("\"time\":%d,", timestamp) 35 | sourcePart := fmt.Sprintf("\"source\":\"%s\",", p.MeasurementName) 36 | hostPart := fmt.Sprintf("\"host\":\"%s\",", string(host)) 37 | for i := 0; i < len(p.FieldKeys); i++ { 38 | buf = append(buf, "{"...) 39 | buf = append(buf, []byte(timestampPart)...) 40 | buf = append(buf, []byte("\"event\":\"metric\",")...) 41 | buf = append(buf, []byte(sourcePart)...) 42 | buf = append(buf, []byte(hostPart)...) 43 | buf = append(buf, []byte("\"fields\":{",)...) 44 | for i := 0; i < len(p.TagKeys); i++ { 45 | buf = append(buf, "\""...) 46 | buf = append(buf, p.TagKeys[i]...) 47 | buf = append(buf, "\":\""...) 48 | buf = append(buf, p.TagValues[i]...) 49 | buf = append(buf, "\","...) 50 | } 51 | buf = append(buf, "\"_value\":"...) 52 | v := p.FieldValues[i] 53 | buf = fastFormatAppend(v, buf, false) 54 | buf = append(buf, ",\"metric_name\":\""...) 55 | buf = append(buf, p.MeasurementName...) 56 | buf = append(buf, "."...) 57 | buf = append(buf, p.FieldKeys[i]...) 58 | buf = append(buf, "\""...) 59 | buf = append(buf, "}}\n"...) 60 | } 61 | _, err = w.Write(buf) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | return nil 67 | } 68 | 69 | func (s *SerializerSplunkJson) SerializeSize(w io.Writer, points int64, values int64) error { 70 | return serializeSizeInText(w, points, values) 71 | } 72 | -------------------------------------------------------------------------------- /bulk_data_gen/common/simulation.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "time" 4 | 5 | const ( 6 | DefaultDateTimeStart = "2018-01-01T00:00:00Z" 7 | DefaultDateTimeEnd = "2018-01-02T00:00:00Z" 8 | UseCaseDevOps = "devops" 9 | UseCaseIot = "iot" 10 | UseCaseDashboard = "dashboard" 11 | UseCaseMetaquery = "metaquery" 12 | UseCaseWindowAggregate = "window-agg" 13 | UseCaseGroupAggregate = "group-agg" 14 | UseCaseBareAggregate = "bare-agg" 15 | UseCaseUngroupedAggregate = "ungrouped-agg" 16 | UseCaseGroupWindowTransposeHighCard = "group-window-transpose-high-card" 17 | UseCaseGroupWindowTransposeLowCard = "group-window-transpose-low-card" 18 | UseCaseMultiMeasurement = "multi-measurement" 19 | ) 20 | 21 | // Use case choices: 22 | var UseCaseChoices = []string{ 23 | UseCaseDevOps, 24 | UseCaseIot, 25 | UseCaseDashboard, 26 | UseCaseMetaquery, 27 | UseCaseWindowAggregate, 28 | UseCaseGroupAggregate, 29 | UseCaseBareAggregate, 30 | UseCaseUngroupedAggregate, 31 | UseCaseMultiMeasurement, 32 | } 33 | 34 | // Simulator simulates a use case. 35 | type Simulator interface { 36 | Total() int64 37 | SeenPoints() int64 38 | SeenValues() int64 39 | Finished() bool 40 | Next(*Point) 41 | } 42 | 43 | // SimulatedMeasurement simulates one measurement (e.g. Redis for DevOps). 44 | type SimulatedMeasurement interface { 45 | Tick(time.Duration) 46 | ToPoint(*Point) bool //returns true if point if properly filled, false means, that point should be skipped 47 | } 48 | 49 | // MakeUsablePoint allocates a new Point ready for use by a Simulator. 50 | func MakeUsablePoint() *Point { 51 | return &Point{ 52 | MeasurementName: nil, 53 | TagKeys: make([][]byte, 0), 54 | TagValues: make([][]byte, 0), 55 | FieldKeys: make([][]byte, 0), 56 | FieldValues: make([]interface{}, 0), 57 | Timestamp: &time.Time{}, 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /bulk_data_gen/dashboard/status.go: -------------------------------------------------------------------------------- 1 | package dashboard 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | StatusByteString = []byte("status") // heap optimization 10 | // Field keys for 'air condition indoor' points. 11 | ServiceUpFieldKey = []byte("service_up") 12 | ) 13 | 14 | type StatusMeasurement struct { 15 | timestamp time.Time 16 | serviceUp Distribution 17 | } 18 | 19 | func NewStatusMeasurement(start time.Time) *StatusMeasurement { 20 | //state 21 | serviceUp := TSD(0, 1, 0) 22 | 23 | return &StatusMeasurement{ 24 | timestamp: start, 25 | serviceUp: serviceUp, 26 | } 27 | } 28 | 29 | func (m *StatusMeasurement) Tick(d time.Duration) { 30 | m.timestamp = m.timestamp.Add(d) 31 | m.serviceUp.Advance() 32 | } 33 | 34 | func (m *StatusMeasurement) ToPoint(p *Point) bool { 35 | p.SetMeasurementName(StatusByteString) 36 | p.SetTimestamp(&m.timestamp) 37 | p.AppendField(ServiceUpFieldKey, int(m.serviceUp.Get())) 38 | return true 39 | } 40 | -------------------------------------------------------------------------------- /bulk_data_gen/dashboard/system.go: -------------------------------------------------------------------------------- 1 | package dashboard 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | var ( 10 | SystemByteString = []byte("system") // heap optimization 11 | ) 12 | 13 | var ( 14 | NCPUsFieldKey = []byte("n_cpus") 15 | // Field keys for 'load' points. 16 | LoadFieldKeys = [][]byte{ 17 | []byte("load1"), 18 | []byte("load5"), 19 | []byte("load15"), 20 | } 21 | 22 | CPUsCount = []int{2, 4, 8, 16, 32, 64} 23 | ) 24 | 25 | type SystemMeasurement struct { 26 | timestamp time.Time 27 | ncpus int 28 | distributions []Distribution 29 | } 30 | 31 | func NewSystemMeasurement(start time.Time) *SystemMeasurement { 32 | distributions := make([]Distribution, len(LoadFieldKeys)) 33 | ncpus := CPUsCount[rand.Intn(len(CPUsCount))] 34 | for i := range distributions { 35 | distributions[i] = &ClampedRandomWalkDistribution{ 36 | State: rand.Float64() * 100.0 * float64(ncpus), 37 | Min: 0.0, 38 | Max: float64(ncpus) * (1 + rand.Float64()), 39 | Step: &NormalDistribution{ 40 | Mean: 0.0, 41 | StdDev: 10.0, 42 | }, 43 | } 44 | } 45 | return &SystemMeasurement{ 46 | timestamp: start, 47 | ncpus: ncpus, 48 | distributions: distributions, 49 | } 50 | } 51 | 52 | func (m *SystemMeasurement) Tick(d time.Duration) { 53 | m.timestamp = m.timestamp.Add(d) 54 | for i := range m.distributions { 55 | m.distributions[i].Advance() 56 | } 57 | } 58 | 59 | func (m *SystemMeasurement) ToPoint(p *Point) bool { 60 | p.SetMeasurementName(SystemByteString) 61 | p.SetTimestamp(&m.timestamp) 62 | 63 | p.AppendField(NCPUsFieldKey, m.ncpus) 64 | for i := range m.distributions { 65 | p.AppendField(LoadFieldKeys[i], m.distributions[i].Get()) 66 | } 67 | return true 68 | } 69 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_cpu.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | var ( 10 | CPUByteString = []byte("cpu") // heap optimization 11 | CPUTotalByteString = []byte("cpu-total") // heap optimization 12 | ) 13 | 14 | var ( 15 | // Field keys for 'cpu' points. 16 | CPUFieldKeys = [][]byte{ 17 | []byte("usage_user"), 18 | []byte("usage_system"), 19 | []byte("usage_idle"), 20 | []byte("usage_nice"), 21 | []byte("usage_iowait"), 22 | []byte("usage_irq"), 23 | []byte("usage_softirq"), 24 | []byte("usage_steal"), 25 | []byte("usage_guest"), 26 | []byte("usage_guest_nice"), 27 | } 28 | ) 29 | 30 | type CPUMeasurement struct { 31 | timestamp time.Time 32 | distributions []Distribution 33 | } 34 | 35 | func NewCPUMeasurement(start time.Time) *CPUMeasurement { 36 | distributions := make([]Distribution, len(CPUFieldKeys)) 37 | for i := range distributions { 38 | distributions[i] = &ClampedRandomWalkDistribution{ 39 | State: rand.Float64() * 100.0, 40 | Min: 0.0, 41 | Max: 100.0, 42 | Step: &NormalDistribution{ 43 | Mean: 0.0, 44 | StdDev: 1.0, 45 | }, 46 | } 47 | } 48 | return &CPUMeasurement{ 49 | timestamp: start, 50 | distributions: distributions, 51 | } 52 | } 53 | 54 | func (m *CPUMeasurement) Tick(d time.Duration) { 55 | m.timestamp = m.timestamp.Add(d) 56 | for i := range m.distributions { 57 | m.distributions[i].Advance() 58 | } 59 | } 60 | 61 | func (m *CPUMeasurement) ToPoint(p *Point) bool { 62 | p.SetMeasurementName(CPUByteString) 63 | p.SetTimestamp(&m.timestamp) 64 | 65 | for i := range m.distributions { 66 | p.AppendField(CPUFieldKeys[i], m.distributions[i].Get()) 67 | } 68 | return true 69 | } 70 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_diskio.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "fmt" 5 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 6 | "math/rand" 7 | "time" 8 | ) 9 | 10 | var ( 11 | DiskIOByteString = []byte("diskio") // heap optimization 12 | SerialByteString = []byte("serial") 13 | 14 | DiskIOFields = []LabeledDistributionMaker{ 15 | {[]byte("reads"), func() Distribution { return MWD(ND(50, 1), 0) }}, 16 | {[]byte("writes"), func() Distribution { return MWD(ND(50, 1), 0) }}, 17 | {[]byte("read_bytes"), func() Distribution { return MWD(ND(100, 1), 0) }}, 18 | {[]byte("write_bytes"), func() Distribution { return MWD(ND(100, 1), 0) }}, 19 | {[]byte("read_time"), func() Distribution { return MWD(ND(5, 1), 0) }}, 20 | {[]byte("write_time"), func() Distribution { return MWD(ND(5, 1), 0) }}, 21 | {[]byte("io_time"), func() Distribution { return MWD(ND(5, 1), 0) }}, 22 | } 23 | ) 24 | 25 | type DiskIOMeasurement struct { 26 | timestamp time.Time 27 | 28 | serial []byte 29 | distributions []Distribution 30 | } 31 | 32 | func NewDiskIOMeasurement(start time.Time) *DiskIOMeasurement { 33 | distributions := make([]Distribution, len(DiskIOFields)) 34 | for i := range DiskIOFields { 35 | distributions[i] = DiskIOFields[i].DistributionMaker() 36 | } 37 | 38 | serial := []byte(fmt.Sprintf("%03d-%03d-%03d", rand.Intn(1000), rand.Intn(1000), rand.Intn(1000))) 39 | if Config != nil { // partial override from external config 40 | serial = Config.GetTagBytesValue(DiskIOByteString, SerialByteString, true, serial) 41 | } 42 | return &DiskIOMeasurement{ 43 | serial: serial, 44 | 45 | timestamp: start, 46 | distributions: distributions, 47 | } 48 | } 49 | 50 | func (m *DiskIOMeasurement) Tick(d time.Duration) { 51 | m.timestamp = m.timestamp.Add(d) 52 | 53 | for i := range m.distributions { 54 | m.distributions[i].Advance() 55 | } 56 | } 57 | 58 | func (m *DiskIOMeasurement) ToPoint(p *Point) bool { 59 | p.SetMeasurementName(DiskIOByteString) 60 | p.SetTimestamp(&m.timestamp) 61 | 62 | p.AppendTag(SerialByteString, m.serial) 63 | 64 | for i := range m.distributions { 65 | p.AppendField(DiskIOFields[i].Label, int64(m.distributions[i].Get())) 66 | } 67 | return true 68 | } 69 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_kernel.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | var ( 10 | KernelByteString = []byte("kernel") // heap optimization 11 | BootTimeByteString = []byte("boot_time") 12 | KernelFields = []LabeledDistributionMaker{ 13 | {[]byte("interrupts"), func() Distribution { return MWD(ND(5, 1), 0) }}, 14 | {[]byte("context_switches"), func() Distribution { return MWD(ND(5, 1), 0) }}, 15 | {[]byte("processes_forked"), func() Distribution { return MWD(ND(5, 1), 0) }}, 16 | {[]byte("disk_pages_in"), func() Distribution { return MWD(ND(5, 1), 0) }}, 17 | {[]byte("disk_pages_out"), func() Distribution { return MWD(ND(5, 1), 0) }}, 18 | } 19 | ) 20 | 21 | type KernelMeasurement struct { 22 | timestamp time.Time 23 | 24 | bootTime int64 25 | uptime time.Duration 26 | distributions []Distribution 27 | } 28 | 29 | func NewKernelMeasurement(start time.Time) *KernelMeasurement { 30 | distributions := make([]Distribution, len(KernelFields)) 31 | for i := range KernelFields { 32 | distributions[i] = KernelFields[i].DistributionMaker() 33 | } 34 | 35 | bootTime := rand.Int63n(240) 36 | return &KernelMeasurement{ 37 | bootTime: bootTime, 38 | 39 | timestamp: start, 40 | distributions: distributions, 41 | } 42 | } 43 | 44 | func (m *KernelMeasurement) Tick(d time.Duration) { 45 | m.timestamp = m.timestamp.Add(d) 46 | 47 | for i := range m.distributions { 48 | m.distributions[i].Advance() 49 | } 50 | } 51 | 52 | func (m *KernelMeasurement) ToPoint(p *Point) bool { 53 | p.SetMeasurementName(KernelByteString) 54 | p.SetTimestamp(&m.timestamp) 55 | 56 | p.AppendField(BootTimeByteString, m.bootTime) 57 | for i := range m.distributions { 58 | p.AppendField(KernelFields[i].Label, int64(m.distributions[i].Get())) 59 | } 60 | return true 61 | } 62 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_net.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "fmt" 5 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 6 | "math/rand" 7 | "time" 8 | ) 9 | 10 | var ( 11 | NetByteString = []byte("net") // heap optimization 12 | NetTags = [][]byte{ 13 | []byte("interface"), 14 | } 15 | 16 | NetFields = []LabeledDistributionMaker{ 17 | {[]byte("bytes_sent"), func() Distribution { return MWD(ND(50, 1), 0) }}, 18 | {[]byte("bytes_recv"), func() Distribution { return MWD(ND(50, 1), 0) }}, 19 | {[]byte("packets_sent"), func() Distribution { return MWD(ND(50, 1), 0) }}, 20 | {[]byte("packets_recv"), func() Distribution { return MWD(ND(50, 1), 0) }}, 21 | {[]byte("err_in"), func() Distribution { return MWD(ND(5, 1), 0) }}, 22 | {[]byte("err_out"), func() Distribution { return MWD(ND(5, 1), 0) }}, 23 | {[]byte("drop_in"), func() Distribution { return MWD(ND(5, 1), 0) }}, 24 | {[]byte("drop_out"), func() Distribution { return MWD(ND(5, 1), 0) }}, 25 | } 26 | ) 27 | 28 | type NetMeasurement struct { 29 | timestamp time.Time 30 | 31 | interfaceName []byte 32 | uptime time.Duration 33 | distributions []Distribution 34 | } 35 | 36 | func NewNetMeasurement(start time.Time) *NetMeasurement { 37 | distributions := make([]Distribution, len(NetFields)) 38 | for i := range NetFields { 39 | distributions[i] = NetFields[i].DistributionMaker() 40 | } 41 | 42 | interfaceName := []byte(fmt.Sprintf("eth%d", rand.Intn(4))) 43 | if Config != nil { // partial override from external config 44 | interfaceName = Config.GetTagBytesValue(NetByteString, NetTags[0], true, interfaceName) 45 | } 46 | return &NetMeasurement{ 47 | interfaceName: interfaceName, 48 | 49 | timestamp: start, 50 | distributions: distributions, 51 | } 52 | } 53 | 54 | func (m *NetMeasurement) Tick(d time.Duration) { 55 | m.timestamp = m.timestamp.Add(d) 56 | 57 | for i := range m.distributions { 58 | m.distributions[i].Advance() 59 | } 60 | } 61 | 62 | func (m *NetMeasurement) ToPoint(p *Point) bool { 63 | p.SetMeasurementName(NetByteString) 64 | p.SetTimestamp(&m.timestamp) 65 | 66 | p.AppendTag(NetTags[0], m.interfaceName) 67 | 68 | for i := range m.distributions { 69 | p.AppendField(NetFields[i].Label, int64(m.distributions[i].Get())) 70 | } 71 | return true 72 | } 73 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_nginx.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | "fmt" 5 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 6 | "math/rand" 7 | "time" 8 | ) 9 | 10 | var ( 11 | NginxByteString = []byte("nginx") // heap optimization 12 | 13 | NginxTags = [][]byte{ 14 | []byte("port"), 15 | []byte("server"), 16 | } 17 | 18 | NginxFields = []LabeledDistributionMaker{ 19 | {[]byte("accepts"), func() Distribution { return MWD(ND(5, 1), 0) }}, 20 | {[]byte("active"), func() Distribution { return CWD(ND(5, 1), 0, 100, 0) }}, 21 | {[]byte("handled"), func() Distribution { return MWD(ND(5, 1), 0) }}, 22 | {[]byte("reading"), func() Distribution { return CWD(ND(5, 1), 0, 100, 0) }}, 23 | {[]byte("requests"), func() Distribution { return MWD(ND(5, 1), 0) }}, 24 | {[]byte("waiting"), func() Distribution { return CWD(ND(5, 1), 0, 100, 0) }}, 25 | {[]byte("writing"), func() Distribution { return CWD(ND(5, 1), 0, 100, 0) }}, 26 | } 27 | ) 28 | 29 | type NginxMeasurement struct { 30 | timestamp time.Time 31 | 32 | port, serverName []byte 33 | distributions []Distribution 34 | } 35 | 36 | func NewNginxMeasurement(start time.Time) *NginxMeasurement { 37 | distributions := make([]Distribution, len(NginxFields)) 38 | for i := range NginxFields { 39 | distributions[i] = NginxFields[i].DistributionMaker() 40 | } 41 | 42 | serverName := []byte(fmt.Sprintf("nginx_%d", rand.Intn(100000))) 43 | port := []byte(fmt.Sprintf("%d", rand.Intn(20000)+1024)) 44 | if Config != nil { // partial override from external config 45 | serverName = Config.GetTagBytesValue(NginxByteString, NginxTags[1], true, serverName) 46 | port = Config.GetTagBytesValue(NginxByteString, NginxTags[0], true, port) 47 | } 48 | return &NginxMeasurement{ 49 | port: port, 50 | serverName: serverName, 51 | 52 | timestamp: start, 53 | distributions: distributions, 54 | } 55 | } 56 | 57 | func (m *NginxMeasurement) Tick(d time.Duration) { 58 | m.timestamp = m.timestamp.Add(d) 59 | 60 | for i := range m.distributions { 61 | m.distributions[i].Advance() 62 | } 63 | } 64 | 65 | func (m *NginxMeasurement) ToPoint(p *Point) bool { 66 | p.SetMeasurementName(NginxByteString) 67 | p.SetTimestamp(&m.timestamp) 68 | 69 | p.AppendTag(NginxTags[0], m.port) 70 | p.AppendTag(NginxTags[1], m.serverName) 71 | 72 | for i := range m.distributions { 73 | p.AppendField(NginxFields[i].Label, int64(m.distributions[i].Get())) 74 | } 75 | return true 76 | } 77 | -------------------------------------------------------------------------------- /bulk_data_gen/devops/devops_postgresql.go: -------------------------------------------------------------------------------- 1 | package devops 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | PostgresqlByteString = []byte("postgresl") // heap optimization 10 | PostgresqlFields = []LabeledDistributionMaker{ 11 | {[]byte("numbackends"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 12 | {[]byte("xact_commit"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 13 | {[]byte("xact_rollback"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 14 | {[]byte("blks_read"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 15 | {[]byte("blks_hit"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 16 | {[]byte("tup_returned"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 17 | {[]byte("tup_fetched"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 18 | {[]byte("tup_inserted"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 19 | {[]byte("tup_updated"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 20 | {[]byte("tup_deleted"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 21 | {[]byte("conflicts"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 22 | {[]byte("temp_files"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 23 | {[]byte("temp_bytes"), func() Distribution { return CWD(ND(1024, 1), 0, 1024*1024*1024, 0) }}, 24 | {[]byte("deadlocks"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 25 | {[]byte("blk_read_time"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 26 | {[]byte("blk_write_time"), func() Distribution { return CWD(ND(5, 1), 0, 1000, 0) }}, 27 | } 28 | ) 29 | 30 | type PostgresqlMeasurement struct { 31 | timestamp time.Time 32 | distributions []Distribution 33 | } 34 | 35 | func NewPostgresqlMeasurement(start time.Time) *PostgresqlMeasurement { 36 | distributions := make([]Distribution, len(PostgresqlFields)) 37 | for i := range PostgresqlFields { 38 | distributions[i] = PostgresqlFields[i].DistributionMaker() 39 | } 40 | 41 | return &PostgresqlMeasurement{ 42 | timestamp: start, 43 | distributions: distributions, 44 | } 45 | } 46 | 47 | func (m *PostgresqlMeasurement) Tick(d time.Duration) { 48 | m.timestamp = m.timestamp.Add(d) 49 | 50 | for i := range m.distributions { 51 | m.distributions[i].Advance() 52 | } 53 | } 54 | 55 | func (m *PostgresqlMeasurement) ToPoint(p *Point) bool { 56 | p.SetMeasurementName(PostgresqlByteString) 57 | p.SetTimestamp(&m.timestamp) 58 | 59 | for i := range m.distributions { 60 | p.AppendField(PostgresqlFields[i].Label, int64(m.distributions[i].Get())) 61 | } 62 | return true 63 | } 64 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/air_quality_room.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | AirQualityRoomByteString = []byte("air_quality_room") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air quality indoor' points. 14 | AirQualityRoomFieldKeys = [][]byte{ 15 | []byte("co2_level"), 16 | []byte("co_level"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type AirQualityRoomMeasurement struct { 22 | sensorId []byte 23 | timestamp time.Time 24 | distributions []Distribution 25 | } 26 | 27 | func NewAirQualityRoomMeasurement(start time.Time, id []byte) *AirQualityRoomMeasurement { 28 | distributions := make([]Distribution, len(AirQualityRoomFieldKeys)) 29 | //co2_level 30 | distributions[0] = MUDWD(ND(0, 1), 200, 3000, 300) 31 | //co_level 32 | distributions[1] = MUDWD(ND(0.001, 0.0001), 0, 10, 0) 33 | //battery_voltage 34 | distributions[2] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 35 | 36 | return &AirQualityRoomMeasurement{ 37 | timestamp: start, 38 | distributions: distributions, 39 | sensorId: id, 40 | } 41 | } 42 | 43 | func (m *AirQualityRoomMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *AirQualityRoomMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(AirQualityRoomByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 54 | for i := range m.distributions { 55 | p.AppendField(AirQualityRoomFieldKeys[i], m.distributions[i].Get()) 56 | } 57 | return true 58 | } 59 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/air_temp_hum_indoor.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | AirConditionRoomByteString = []byte("air_condition_room") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air condition indoor' points. 14 | AirConditionRoomFieldKeys = [][]byte{ 15 | []byte("temperature"), 16 | []byte("humidity"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type AirConditionRoomMeasurement struct { 22 | sensorId []byte 23 | timestamp time.Time 24 | distributions []Distribution 25 | } 26 | 27 | func NewAirConditionRoomMeasurement(start time.Time, id []byte) *AirConditionRoomMeasurement { 28 | distributions := make([]Distribution, len(AirConditionRoomFieldKeys)) 29 | //temperature 30 | distributions[0] = MUDWD(ND(0, 1), 15, 28, 15) 31 | //humidity 32 | distributions[1] = MUDWD(ND(0, 1), 25, 60, 40) 33 | //battery_voltage 34 | distributions[2] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 35 | 36 | return &AirConditionRoomMeasurement{ 37 | timestamp: start, 38 | distributions: distributions, 39 | sensorId: id, 40 | } 41 | } 42 | 43 | func (m *AirConditionRoomMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *AirConditionRoomMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(AirConditionRoomByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 54 | for i := range m.distributions { 55 | p.AppendField(AirConditionRoomFieldKeys[i], m.distributions[i].Get()) 56 | } 57 | return true 58 | } 59 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/air_temp_hum_outdoor.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | AirConditionOutdoorByteString = []byte("air_condition_outdoor") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air condition indoor' points. 14 | AirConditionOutdoorFieldKeys = [][]byte{ 15 | []byte("temperature"), 16 | []byte("humidity"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type AirConditionOutdoorMeasurement struct { 22 | sensorId []byte 23 | timestamp time.Time 24 | distributions []Distribution 25 | } 26 | 27 | func NewAirConditionOutdoorMeasurement(start time.Time, id []byte) *AirConditionOutdoorMeasurement { 28 | distributions := make([]Distribution, len(AirConditionOutdoorFieldKeys)) 29 | //temperature 30 | distributions[0] = MUDWD(ND(0, 1), -20, 28, 0) 31 | //humidity 32 | distributions[1] = MUDWD(ND(0, 1), 5, 95, 80) 33 | //battery_voltage 34 | distributions[2] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 35 | 36 | return &AirConditionOutdoorMeasurement{ 37 | timestamp: start, 38 | distributions: distributions, 39 | sensorId: id, 40 | } 41 | } 42 | 43 | func (m *AirConditionOutdoorMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *AirConditionOutdoorMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(AirConditionOutdoorByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 54 | for i := range m.distributions { 55 | p.AppendField(AirConditionOutdoorFieldKeys[i], m.distributions[i].Get()) 56 | } 57 | return true 58 | } 59 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/door.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | DoorByteString = []byte("door_state") // heap optimization 10 | DoorTagKey = []byte("door_id") 11 | ) 12 | 13 | var ( 14 | // Field keys for 'air condition indoor' points. 15 | DoorFieldKeys = [][]byte{ 16 | []byte("state"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type DoorMeasurement struct { 22 | sensorId []byte 23 | doorId []byte 24 | timestamp time.Time 25 | distributions []Distribution 26 | } 27 | 28 | func NewDoorMeasurement(start time.Time, doorId []byte, sendorId []byte) *DoorMeasurement { 29 | distributions := make([]Distribution, len(DoorFieldKeys)) 30 | //state 31 | distributions[0] = TSD(0, 1, 0) 32 | //battery_voltage 33 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 34 | 35 | return &DoorMeasurement{ 36 | timestamp: start, 37 | distributions: distributions, 38 | sensorId: sendorId, 39 | doorId: doorId, 40 | } 41 | } 42 | 43 | func (m *DoorMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *DoorMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(DoorByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(DoorTagKey, m.doorId) 54 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 55 | for i := range m.distributions { 56 | p.AppendField(DoorFieldKeys[i], m.distributions[i].Get()) 57 | } 58 | return true 59 | } 60 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/home_config.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | var ( 10 | HomeConfigByteString = []byte("home_config") // heap optimization 11 | ) 12 | 13 | var ( 14 | // Field keys for 'air condition indoor' points. 15 | HomeConfigFieldKeys = [][]byte{ 16 | []byte("config_string"), 17 | } 18 | ) 19 | 20 | type HomeConfigMeasurement struct { 21 | lastChange time.Time 22 | changeInterval time.Duration 23 | sensorId []byte 24 | timestamp time.Time 25 | config []byte 26 | updateValue bool 27 | } 28 | 29 | func NewHomeConfigMeasurement(start time.Time, id []byte) *HomeConfigMeasurement { 30 | 31 | return &HomeConfigMeasurement{ 32 | timestamp: start, 33 | lastChange: start, 34 | sensorId: id, 35 | config: genRandomString(), 36 | changeInterval: time.Hour * time.Duration(rand.Int63n(12)+1), 37 | } 38 | } 39 | 40 | func (m *HomeConfigMeasurement) Tick(d time.Duration) { 41 | m.timestamp = m.timestamp.Add(d) 42 | //change config only in random 12 hours interval 43 | if m.timestamp.Sub(m.lastChange) > m.changeInterval { 44 | m.config = genRandomString() 45 | m.changeInterval = time.Hour * time.Duration(rand.Int63n(12)+1) 46 | m.updateValue = true 47 | m.lastChange = m.timestamp 48 | } else { 49 | m.updateValue = false 50 | } 51 | } 52 | 53 | func (m *HomeConfigMeasurement) ToPoint(p *Point) bool { 54 | if m.updateValue { 55 | p.SetMeasurementName(HomeConfigByteString) 56 | p.SetTimestamp(&m.timestamp) 57 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 58 | p.AppendField(HomeConfigFieldKeys[0], m.config) 59 | } 60 | return m.updateValue 61 | } 62 | 63 | func genRandomString() []byte { 64 | //len 10-20k 65 | len := int((rand.Int63n(10) + 10) * 1024) 66 | buff := make([]byte, len) 67 | for i := 0; i < len; i++ { 68 | buff[i] = byte(rand.Int63n(87) + 40) 69 | for buff[i] == 92 { 70 | buff[i] = byte(rand.Int63n(87) + 40) 71 | } 72 | } 73 | return buff 74 | } 75 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/home_state.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | var HomeStates = [][]byte{ 10 | []byte("Empty"), 11 | []byte("Half"), 12 | []byte("Full"), 13 | } 14 | 15 | var ( 16 | HomeStateByteString = []byte("home_state") // heap optimization 17 | ) 18 | 19 | var ( 20 | // Field keys for 'air condition indoor' points. 21 | HomeStateFieldKeys = [][]byte{ 22 | []byte("state"), 23 | []byte("state_string"), 24 | } 25 | ) 26 | 27 | type HomeStateMeasurement struct { 28 | sensorId []byte 29 | timestamp time.Time 30 | state int64 31 | } 32 | 33 | func NewHomeStateMeasurement(start time.Time, id []byte) *HomeStateMeasurement { 34 | 35 | return &HomeStateMeasurement{ 36 | timestamp: start, 37 | sensorId: id, 38 | } 39 | } 40 | 41 | func (m *HomeStateMeasurement) Tick(d time.Duration) { 42 | m.timestamp = m.timestamp.Add(d) 43 | m.state = rand.Int63n(int64(len(HomeStates))) 44 | } 45 | 46 | func (m *HomeStateMeasurement) ToPoint(p *Point) bool { 47 | p.SetMeasurementName(HomeStateByteString) 48 | p.SetTimestamp(&m.timestamp) 49 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 50 | p.AppendField(HomeStateFieldKeys[0], m.state) 51 | p.AppendField(HomeStateFieldKeys[1], HomeStates[m.state]) 52 | return true 53 | } 54 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/light_level.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | LightLevelRoomByteString = []byte("light_level_room") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air quality indoor' points. 14 | LightLevelRoomFieldKeys = [][]byte{ 15 | []byte("level"), 16 | []byte("battery_voltage"), 17 | } 18 | ) 19 | 20 | type LightLevelRoomMeasurement struct { 21 | sensorId []byte 22 | timestamp time.Time 23 | distributions []Distribution 24 | } 25 | 26 | func NewLightLevelRoomMeasurement(start time.Time, id []byte) *LightLevelRoomMeasurement { 27 | distributions := make([]Distribution, len(LightLevelRoomFieldKeys)) 28 | //level 29 | distributions[0] = MUDWD(ND(0, 1), 0.00001, 1e5, 10000) 30 | //battery_voltage 31 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 32 | 33 | return &LightLevelRoomMeasurement{ 34 | timestamp: start, 35 | distributions: distributions, 36 | sensorId: id, 37 | } 38 | } 39 | 40 | func (m *LightLevelRoomMeasurement) Tick(d time.Duration) { 41 | m.timestamp = m.timestamp.Add(d) 42 | for i := range m.distributions { 43 | m.distributions[i].Advance() 44 | } 45 | } 46 | 47 | func (m *LightLevelRoomMeasurement) ToPoint(p *Point) bool { 48 | p.SetMeasurementName(LightLevelRoomByteString) 49 | p.SetTimestamp(&m.timestamp) 50 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 51 | for i := range m.distributions { 52 | p.AppendField(LightLevelRoomFieldKeys[i], m.distributions[i].Get()) 53 | } 54 | return true 55 | } 56 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/radiator_valve.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | RadiatorValveRoomByteString = []byte("radiator_valve_room") // heap optimization 10 | RadiatorTagKey = []byte("radiator") 11 | ) 12 | 13 | var ( 14 | // Field keys for 'air quality indoor' points. 15 | RadiatorValveRoomFieldKeys = [][]byte{ 16 | []byte("opening_level"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type RadiatorValveRoomMeasurement struct { 22 | sensorId []byte 23 | randiatorId []byte 24 | timestamp time.Time 25 | distributions []Distribution 26 | } 27 | 28 | func NewRadiatorValveRoomMeasurement(start time.Time, randiatorId []byte, sensorId []byte) *RadiatorValveRoomMeasurement { 29 | distributions := make([]Distribution, len(RadiatorValveRoomFieldKeys)) 30 | //opening_level 31 | distributions[0] = CWD(ND(0, 1), 0.0, 100, 0) 32 | //battery_voltage 33 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 34 | 35 | return &RadiatorValveRoomMeasurement{ 36 | timestamp: start, 37 | distributions: distributions, 38 | sensorId: sensorId, 39 | randiatorId: randiatorId, 40 | } 41 | } 42 | 43 | func (m *RadiatorValveRoomMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *RadiatorValveRoomMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(RadiatorValveRoomByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(RadiatorTagKey, m.randiatorId) 54 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 55 | for i := range m.distributions { 56 | p.AppendField(RadiatorValveRoomFieldKeys[i], m.distributions[i].Get()) 57 | } 58 | return true 59 | } 60 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/water_leakage_room.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | WaterLeakageRoomByteString = []byte("water_leakage_room") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air condition indoor' points. 14 | WaterLeakageRoomFieldKeys = [][]byte{ 15 | []byte("leakage"), 16 | []byte("battery_voltage"), 17 | } 18 | ) 19 | 20 | type WaterLeakageRoomMeasurement struct { 21 | sensorId []byte 22 | roomId []byte 23 | timestamp time.Time 24 | distributions []Distribution 25 | } 26 | 27 | func NewWaterLeakageRoomMeasurement(start time.Time, roomId []byte, sensorId []byte) *WaterLeakageRoomMeasurement { 28 | distributions := make([]Distribution, len(WaterLeakageRoomFieldKeys)) 29 | //state 30 | distributions[0] = TSD(0, 1, 0) 31 | //battery_voltage 32 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 33 | 34 | return &WaterLeakageRoomMeasurement{ 35 | timestamp: start, 36 | distributions: distributions, 37 | sensorId: sensorId, 38 | roomId: roomId, 39 | } 40 | } 41 | 42 | func (m *WaterLeakageRoomMeasurement) Tick(d time.Duration) { 43 | m.timestamp = m.timestamp.Add(d) 44 | for i := range m.distributions { 45 | m.distributions[i].Advance() 46 | } 47 | } 48 | 49 | func (m *WaterLeakageRoomMeasurement) ToPoint(p *Point) bool { 50 | p.SetMeasurementName(WaterLeakageRoomByteString) 51 | p.SetTimestamp(&m.timestamp) 52 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 53 | p.AppendTag(RoomTagKey, m.roomId) 54 | for i := range m.distributions { 55 | p.AppendField(WaterLeakageRoomFieldKeys[i], m.distributions[i].Get()) 56 | } 57 | return true 58 | } 59 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/water_level.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | WaterLevelByteString = []byte("water_level") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air quality indoor' points. 14 | WaterLevelFieldKeys = [][]byte{ 15 | []byte("level"), 16 | []byte("battery_voltage"), 17 | } 18 | ) 19 | 20 | type WaterLevelMeasurement struct { 21 | sensorId []byte 22 | timestamp time.Time 23 | distributions []Distribution 24 | } 25 | 26 | func NewWaterLevelMeasurement(start time.Time, id []byte) *WaterLevelMeasurement { 27 | distributions := make([]Distribution, len(WaterLevelFieldKeys)) 28 | //level 29 | distributions[0] = MUDWD(ND(0, 1), 0.0, 8000, 5000) 30 | //battery_voltage 31 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 32 | 33 | return &WaterLevelMeasurement{ 34 | timestamp: start, 35 | distributions: distributions, 36 | sensorId: id, 37 | } 38 | } 39 | 40 | func (m *WaterLevelMeasurement) Tick(d time.Duration) { 41 | m.timestamp = m.timestamp.Add(d) 42 | for i := range m.distributions { 43 | m.distributions[i].Advance() 44 | } 45 | } 46 | 47 | func (m *WaterLevelMeasurement) ToPoint(p *Point) bool { 48 | p.SetMeasurementName(WaterLevelByteString) 49 | p.SetTimestamp(&m.timestamp) 50 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 51 | for i := range m.distributions { 52 | p.AppendField(WaterLevelFieldKeys[i], m.distributions[i].Get()) 53 | } 54 | return true 55 | } 56 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/weather_outdoor.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | WeatherOutdoorByteString = []byte("weather_outdoor") // heap optimization 10 | ) 11 | 12 | var ( 13 | // Field keys for 'air condition indoor' points. 14 | WeatherOutdoorFieldKeys = [][]byte{ 15 | []byte("pressure"), 16 | []byte("wind_speed"), 17 | []byte("wind_direction"), 18 | []byte("precipitation"), 19 | []byte("battery_voltage"), 20 | } 21 | ) 22 | 23 | type WeatherOutdoorMeasurement struct { 24 | sensorId []byte 25 | timestamp time.Time 26 | distributions []Distribution 27 | } 28 | 29 | func NewWeatherOutdoorMeasurement(start time.Time, id []byte) *WeatherOutdoorMeasurement { 30 | distributions := make([]Distribution, len(WeatherOutdoorFieldKeys)) 31 | //pressure 32 | distributions[0] = CWD(ND(0, 10), 900, 1200, 1000) 33 | //wind_speed 34 | distributions[1] = CWD(ND(0, 1), 0, 60, 0) 35 | //wind_direction 36 | distributions[2] = CWD(ND(0, 1), 0, 359, 90) 37 | //precipitation 38 | distributions[3] = MUDWD(ND(0, 1), 5, 95, 80) 39 | //battery_voltage 40 | distributions[4] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 41 | 42 | return &WeatherOutdoorMeasurement{ 43 | timestamp: start, 44 | distributions: distributions, 45 | sensorId: id, 46 | } 47 | } 48 | 49 | func (m *WeatherOutdoorMeasurement) Tick(d time.Duration) { 50 | m.timestamp = m.timestamp.Add(d) 51 | for i := range m.distributions { 52 | m.distributions[i].Advance() 53 | } 54 | } 55 | 56 | func (m *WeatherOutdoorMeasurement) ToPoint(p *Point) bool { 57 | p.SetMeasurementName(WeatherOutdoorByteString) 58 | p.SetTimestamp(&m.timestamp) 59 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 60 | for i := range m.distributions { 61 | p.AppendField(WeatherOutdoorFieldKeys[i], m.distributions[i].Get()) 62 | } 63 | return true 64 | } 65 | -------------------------------------------------------------------------------- /bulk_data_gen/iot/window_room.go: -------------------------------------------------------------------------------- 1 | package iot 2 | 3 | import ( 4 | . "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 5 | "time" 6 | ) 7 | 8 | var ( 9 | WindowByteString = []byte("window_state_room") // heap optimization 10 | WindowTagKey = []byte("window_id") 11 | ) 12 | 13 | var ( 14 | // Field keys for 'air condition indoor' points. 15 | WindowFieldKeys = [][]byte{ 16 | []byte("state"), 17 | []byte("battery_voltage"), 18 | } 19 | ) 20 | 21 | type WindowMeasurement struct { 22 | sensorId []byte 23 | windowId []byte 24 | timestamp time.Time 25 | distributions []Distribution 26 | } 27 | 28 | func NewWindowMeasurement(start time.Time, windowId []byte, sensorId []byte) *WindowMeasurement { 29 | distributions := make([]Distribution, len(WindowFieldKeys)) 30 | //state 31 | distributions[0] = TSD(0, 1, 0) 32 | //battery_voltage 33 | distributions[1] = MUDWD(ND(0.01, 0.005), 1, 3.2, 3.2) 34 | 35 | return &WindowMeasurement{ 36 | timestamp: start, 37 | distributions: distributions, 38 | sensorId: sensorId, 39 | windowId: windowId, 40 | } 41 | } 42 | 43 | func (m *WindowMeasurement) Tick(d time.Duration) { 44 | m.timestamp = m.timestamp.Add(d) 45 | for i := range m.distributions { 46 | m.distributions[i].Advance() 47 | } 48 | } 49 | 50 | func (m *WindowMeasurement) ToPoint(p *Point) bool { 51 | p.SetMeasurementName(WindowByteString) 52 | p.SetTimestamp(&m.timestamp) 53 | p.AppendTag(SensorHomeTagKeys[0], m.sensorId) 54 | p.AppendTag(WindowTagKey, m.windowId) 55 | for i := range m.distributions { 56 | p.AppendField(WindowFieldKeys[i], m.distributions[i].Get()) 57 | } 58 | return true 59 | } 60 | -------------------------------------------------------------------------------- /bulk_data_gen/multi_measurement/generate_data.go: -------------------------------------------------------------------------------- 1 | package multiMeasurement 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "math/rand" 8 | 9 | "github.com/influxdata/influxdb-comparisons/bulk_data_gen/common" 10 | ) 11 | 12 | const MeasSig = "Measurement-%d" 13 | const FieldSig = "Field-%d" 14 | const NumFields = 1 // number of fields for each measurement 15 | const MeasMultiplier = 50 // scaleVar * measMultiplier = number of unique measurements 16 | 17 | type MeasurementSimulatorConfig struct { 18 | Start time.Time 19 | End time.Time 20 | 21 | ScaleFactor int 22 | } 23 | 24 | func (d *MeasurementSimulatorConfig) ToSimulator() *MeasurementSimulator { 25 | s := d.ScaleFactor * MeasMultiplier // number of measurements to create 26 | 27 | dg := &MeasurementSimulator{ 28 | madePoints: 0, 29 | madeValues: 0, 30 | maxPoints: int64(s * 50), // 50 points per measurement, or approx. 1 per shard for a year of data 31 | 32 | fieldList: make([][]byte, NumFields), 33 | measList: make([][]byte, s), 34 | 35 | timestampNow: d.Start, 36 | timestampStart: d.Start, 37 | timestampEnd: d.End, 38 | } 39 | 40 | for i := 0; i < s; i++ { 41 | dg.measList[i] = []byte(fmt.Sprintf(MeasSig, i)) 42 | } 43 | 44 | for i := 0; i < NumFields; i++ { 45 | dg.fieldList[i] = []byte(fmt.Sprintf(FieldSig, i)) 46 | } 47 | 48 | dg.stepTime = time.Duration(int64(dg.timestampEnd.Sub(dg.timestampStart)) / dg.maxPoints) 49 | 50 | return dg 51 | } 52 | 53 | // MeasurementSimulator fullfills the Simulator interface. 54 | type MeasurementSimulator struct { 55 | madePoints int64 56 | maxPoints int64 57 | madeValues int64 58 | 59 | fieldList [][]byte 60 | measList [][]byte 61 | 62 | timestampNow time.Time 63 | timestampStart time.Time 64 | timestampEnd time.Time 65 | stepTime time.Duration 66 | } 67 | 68 | func (g *MeasurementSimulator) SeenPoints() int64 { 69 | return g.madePoints 70 | } 71 | 72 | func (g *MeasurementSimulator) SeenValues() int64 { 73 | return g.madeValues 74 | } 75 | 76 | func (g *MeasurementSimulator) Total() int64 { 77 | return g.maxPoints 78 | } 79 | 80 | func (g *MeasurementSimulator) Finished() bool { 81 | return g.madePoints >= g.maxPoints 82 | } 83 | 84 | // Next advances a Point to the next state in the generator. 85 | func (g *MeasurementSimulator) Next(p *common.Point) { 86 | p.SetMeasurementName(g.measList[rand.Intn(len(g.measList))]) 87 | p.SetTimestamp(&g.timestampNow) 88 | 89 | for _, f := range g.fieldList { 90 | p.AppendField(f, rand.Float64()) 91 | } 92 | 93 | g.madePoints++ 94 | g.madeValues += int64(len(g.fieldList)) 95 | g.timestampNow = g.timestampNow.Add(g.stepTime) 96 | } 97 | -------------------------------------------------------------------------------- /bulk_load/process.go: -------------------------------------------------------------------------------- 1 | package bulk_load 2 | 3 | import ( 4 | "github.com/influxdata/influxdb-comparisons/util/report" 5 | "sync" 6 | ) 7 | 8 | type BatchProcessor interface { 9 | PrepareProcess(i int) 10 | RunProcess(i int, waitGroup *sync.WaitGroup, telemetryPoints chan *report.Point, reportTags [][2]string) error 11 | AfterRunProcess(i int) 12 | EmptyBatchChanel() 13 | } 14 | -------------------------------------------------------------------------------- /bulk_load/scan.go: -------------------------------------------------------------------------------- 1 | package bulk_load 2 | 3 | import "io" 4 | 5 | type Scanner interface { 6 | RunScanner(r io.Reader, syncChanDone chan int) 7 | IsScanFinished() bool 8 | GetReadStatistics() (itemsRead, bytesRead, valuesRead int64) 9 | } 10 | -------------------------------------------------------------------------------- /bulk_load/sync.go: -------------------------------------------------------------------------------- 1 | package bulk_load 2 | 3 | import "errors" 4 | 5 | type NotifyHandler func(arg int) (int, error) 6 | 7 | var handler NotifyHandler 8 | 9 | func RegisterHandler(notifHandler NotifyHandler) { 10 | handler = notifHandler 11 | } 12 | 13 | type NotifyReceiver struct { 14 | } 15 | 16 | func (t *NotifyReceiver) Notify(args *int, reply *int) error { 17 | var e error 18 | if handler != nil { 19 | var r int 20 | r, e = handler(*args) 21 | *reply = r 22 | } else { 23 | e = errors.New("no handler registered") 24 | } 25 | return e 26 | } 27 | -------------------------------------------------------------------------------- /bulk_query/http/common.go: -------------------------------------------------------------------------------- 1 | package http 2 | 3 | import "time" 4 | 5 | const DefaultIdleConnectionTimeout = 90 * time.Second 6 | 7 | var UseFastHttp = true 8 | var idleConnectionTimeout = DefaultIdleConnectionTimeout 9 | 10 | // HTTPClient is a reusable HTTP Client. 11 | type HTTPClientCommon struct { 12 | Host []byte 13 | HostString string 14 | debug int 15 | } 16 | 17 | // HTTPClientDoOptions wraps options uses when calling `Do`. 18 | type HTTPClientDoOptions struct { 19 | ContentType string 20 | Authorization string 21 | Debug int 22 | PrettyPrintResponses bool 23 | Path []byte 24 | AuthToken string 25 | Accept string 26 | } 27 | 28 | // HTTPClient interface. 29 | type HTTPClient interface { 30 | HostString() string 31 | Do(q *Query, opts *HTTPClientDoOptions) (lag float64, err error) 32 | } 33 | 34 | func NewHTTPClient(host string, debug int, dialTimeout time.Duration, readTimeout time.Duration, writeTimeout time.Duration) HTTPClient { 35 | if UseFastHttp { 36 | return NewFastHTTPClient(host, debug, dialTimeout, readTimeout, writeTimeout) 37 | } else { 38 | return NewDefaultHTTPClient(host, debug, dialTimeout, readTimeout, writeTimeout) 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /bulk_query/http/query.go: -------------------------------------------------------------------------------- 1 | package http 2 | 3 | import "fmt" 4 | 5 | // Query holds HTTP request data, typically decoded from the program's input. 6 | type Query struct { 7 | HumanLabel []byte 8 | HumanDescription []byte 9 | Method []byte 10 | Path []byte 11 | Body []byte 12 | ID int64 13 | } 14 | 15 | // String produces a debug-ready description of a Query. 16 | func (q *Query) String() string { 17 | return fmt.Sprintf("ID: %d, HumanLabel: %s, HumanDescription: %s, Method: %s, Path: %s, Body:%s", q.ID, q.HumanLabel, q.HumanDescription, q.Method, q.Path, q.Body) 18 | } 19 | -------------------------------------------------------------------------------- /bulk_query/process.go: -------------------------------------------------------------------------------- 1 | package bulk_query 2 | 3 | import "sync" 4 | 5 | type Processor interface { 6 | PrepareProcess(i int) 7 | RunProcess(i int, qorkersGroup *sync.WaitGroup, statPool sync.Pool, statChan chan *Stat) 8 | } 9 | -------------------------------------------------------------------------------- /bulk_query/scan.go: -------------------------------------------------------------------------------- 1 | package bulk_query 2 | 3 | import "io" 4 | 5 | type Scanner interface { 6 | RunScan(r io.Reader, closeChan chan int) 7 | IsScanFinished() bool 8 | } 9 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/cassandra_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | "time" 6 | ) 7 | 8 | // CassandraDevops8Hosts produces Cassandra-specific queries for the devops groupby case. 9 | type CassandraDevops8Hosts struct { 10 | CassandraDevops 11 | } 12 | 13 | func NewCassandraDevops8Hosts(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | underlying := newCassandraDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*CassandraDevops) 15 | return &CassandraDevops8Hosts{ 16 | CassandraDevops: *underlying, 17 | } 18 | } 19 | 20 | func (d *CassandraDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 21 | q := NewCassandraQuery() // from pool 22 | d.MaxCPUUsageHourByMinuteEightHosts(q) 23 | return q 24 | } 25 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/cassandra_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // CassandraDevopsGroupby produces Cassandra-specific queries for the devops groupby case. 7 | type CassandraDevopsGroupby struct { 8 | CassandraDevops 9 | } 10 | 11 | func NewCassandraDevopsGroupBy(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newCassandraDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*CassandraDevops) 13 | return &CassandraDevopsGroupby{ 14 | CassandraDevops: *underlying, 15 | } 16 | 17 | } 18 | 19 | func (d *CassandraDevopsGroupby) Dispatch(i int) bulkQuerygen.Query { 20 | q := NewCassandraQuery() // from pool 21 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 22 | return q 23 | } 24 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/cassandra_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // CassandraDevopsSingleHost produces Cassandra-specific queries for the devops single-host case. 7 | type CassandraDevopsSingleHost struct { 8 | CassandraDevops 9 | } 10 | 11 | func NewCassandraDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newCassandraDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*CassandraDevops) 13 | return &CassandraDevopsSingleHost{ 14 | CassandraDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *CassandraDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewCassandraQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/cassandra_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // CassandraDevopsSingleHost12hr produces Cassandra-specific queries for the devops single-host case. 7 | type CassandraDevopsSingleHost12hr struct { 8 | CassandraDevops 9 | } 10 | 11 | func NewCassandraDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newCassandraDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*CassandraDevops) 13 | return &CassandraDevopsSingleHost12hr{ 14 | CassandraDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *CassandraDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewCassandraQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/cassandra_iot_singlehost.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // CassandraIotSingleHost produces Cassandra-specific queries for the devops single-host case. 7 | type CassandraIotSingleHost struct { 8 | CassandraIot 9 | } 10 | 11 | func NewCassandraIotSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newCassandraIotCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*CassandraIot) 13 | return &CassandraIotSingleHost{ 14 | CassandraIot: *underlying, 15 | } 16 | } 17 | 18 | func (d *CassandraIotSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewCassandraQuery() // from pool 20 | d.AverageTemperatureDayByHourOneHome(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/cassandra/query.go: -------------------------------------------------------------------------------- 1 | package cassandra 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | // CassandraQuery encodes a Cassandra request. This will be serialized for use 10 | // by the query_benchmarker program. 11 | type CassandraQuery struct { 12 | HumanLabel []byte 13 | HumanDescription []byte 14 | 15 | MeasurementName []byte // e.g. "cpu" 16 | FieldName []byte // e.g. "usage_user" 17 | AggregationType []byte // e.g. "avg" or "sum". used literally in the cassandra query. 18 | TimeStart time.Time 19 | TimeEnd time.Time 20 | GroupByDuration time.Duration 21 | TagsCondition []byte 22 | } 23 | 24 | var CassandraQueryPool sync.Pool = sync.Pool{ 25 | New: func() interface{} { 26 | return &CassandraQuery{ 27 | HumanLabel: []byte{}, 28 | HumanDescription: []byte{}, 29 | MeasurementName: []byte{}, 30 | FieldName: []byte{}, 31 | AggregationType: []byte{}, 32 | TagsCondition: []byte{}, 33 | } 34 | }, 35 | } 36 | 37 | func NewCassandraQuery() *CassandraQuery { 38 | return CassandraQueryPool.Get().(*CassandraQuery) 39 | } 40 | 41 | // String produces a debug-ready description of a Query. 42 | func (q *CassandraQuery) String() string { 43 | return fmt.Sprintf("HumanLabel: %s, HumanDescription: %s, MeasurementName: %s, AggregationType: %s, TimeStart: %s, TimeEnd: %s, GroupByDuration: %s, TagSets: %s", q.HumanLabel, q.HumanDescription, q.MeasurementName, q.AggregationType, q.TimeStart, q.TimeEnd, q.GroupByDuration, q.TagsCondition) 44 | } 45 | 46 | func (q *CassandraQuery) HumanLabelName() []byte { 47 | return q.HumanLabel 48 | } 49 | func (q *CassandraQuery) HumanDescriptionName() []byte { 50 | return q.HumanDescription 51 | } 52 | 53 | func (q *CassandraQuery) Release() { 54 | q.HumanLabel = q.HumanLabel[:0] 55 | q.HumanDescription = q.HumanDescription[:0] 56 | 57 | q.MeasurementName = q.MeasurementName[:0] 58 | q.FieldName = q.FieldName[:0] 59 | q.AggregationType = q.AggregationType[:0] 60 | q.GroupByDuration = 0 61 | q.TimeStart = time.Time{} 62 | q.TimeEnd = time.Time{} 63 | q.TagsCondition = q.TagsCondition[:0] 64 | 65 | CassandraQueryPool.Put(q) 66 | } 67 | -------------------------------------------------------------------------------- /bulk_query_gen/common_params.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | type CommonParams struct { 4 | AllInterval TimeInterval 5 | ScaleVar int 6 | } 7 | 8 | func NewCommonParams(interval TimeInterval, scaleVar int) *CommonParams { 9 | return &CommonParams{ 10 | AllInterval: interval, 11 | ScaleVar: scaleVar, 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /bulk_query_gen/database_config.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | type DatabaseConfig map[string]string 4 | 5 | const ( 6 | DatabaseName = "database-name" 7 | ) 8 | -------------------------------------------------------------------------------- /bulk_query_gen/devops.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | // Devops describes a devops query generator. 4 | type Devops interface { 5 | MaxCPUUsageHourByMinuteOneHost(Query) 6 | MaxCPUUsageHourByMinuteTwoHosts(Query) 7 | MaxCPUUsageHourByMinuteFourHosts(Query) 8 | MaxCPUUsageHourByMinuteEightHosts(Query) 9 | MaxCPUUsageHourByMinuteSixteenHosts(Query) 10 | MaxCPUUsageHourByMinuteThirtyTwoHosts(Query) 11 | 12 | MaxCPUUsage12HoursByMinuteOneHost(Query) 13 | 14 | MeanCPUUsageDayByHourAllHostsGroupbyHost(Query) 15 | 16 | //CountCPUUsageDayByHourAllHostsGroupbyHost(Query) 17 | 18 | Dispatch(int) Query 19 | } 20 | 21 | // devopsDispatchAll round-robins through the different devops queries. 22 | func DevopsDispatchAll(d Devops, iteration int, q Query, scaleVar int) { 23 | if scaleVar <= 0 { 24 | panic("logic error: bad scalevar") 25 | } 26 | mod := 1 27 | if scaleVar >= 2 { 28 | mod++ 29 | } 30 | if scaleVar >= 4 { 31 | mod++ 32 | } 33 | if scaleVar >= 8 { 34 | mod++ 35 | } 36 | if scaleVar >= 16 { 37 | mod++ 38 | } 39 | if scaleVar >= 32 { 40 | mod++ 41 | } 42 | 43 | switch iteration % mod { 44 | case 0: 45 | d.MaxCPUUsageHourByMinuteOneHost(q) 46 | case 1: 47 | d.MaxCPUUsageHourByMinuteTwoHosts(q) 48 | case 2: 49 | d.MaxCPUUsageHourByMinuteFourHosts(q) 50 | case 3: 51 | d.MaxCPUUsageHourByMinuteEightHosts(q) 52 | case 4: 53 | d.MaxCPUUsageHourByMinuteSixteenHosts(q) 54 | case 5: 55 | d.MaxCPUUsageHourByMinuteThirtyTwoHosts(q) 56 | default: 57 | panic("logic error in switch statement") 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /bulk_query_gen/elasticsearch/es_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // ElasticSearchDevops8Hosts produces ElasticSearch-specific queries for the devops groupby case. 7 | type ElasticSearchDevops8Hosts struct { 8 | ElasticSearchDevops 9 | } 10 | 11 | func NewElasticSearchDevops8Hosts(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewElasticSearchDevops(queriesFullRange, scaleVar).(*ElasticSearchDevops) 13 | return &ElasticSearchDevops8Hosts{ 14 | ElasticSearchDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *ElasticSearchDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/elasticsearch/es_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // ElasticSearchDevopsGroupBy produces ES-specific queries for the devops groupby case. 7 | type ElasticSearchDevopsGroupBy struct { 8 | ElasticSearchDevops 9 | } 10 | 11 | func NewElasticSearchDevopsGroupBy(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewElasticSearchDevops(queriesFullRange, scaleVar).(*ElasticSearchDevops) 13 | return &ElasticSearchDevopsGroupBy{ 14 | ElasticSearchDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *ElasticSearchDevopsGroupBy) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/elasticsearch/es_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // ElasticSearchDevopsSingleHost produces ES-specific queries for the devops single-host case. 7 | type ElasticSearchDevopsSingleHost struct { 8 | ElasticSearchDevops 9 | } 10 | 11 | func NewElasticSearchDevopsSingleHost(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewElasticSearchDevops(queriesFullRange, scaleVar).(*ElasticSearchDevops) 13 | return &ElasticSearchDevopsSingleHost{ 14 | ElasticSearchDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *ElasticSearchDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/elasticsearch/es_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // ElasticSearchDevopsSingleHost12hr produces ES-specific queries for the devops single-host case. 7 | type ElasticSearchDevopsSingleHost12hr struct { 8 | ElasticSearchDevops 9 | } 10 | 11 | func NewElasticSearchDevopsSingleHost12hr(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewElasticSearchDevops(queriesFullRange, scaleVar).(*ElasticSearchDevops) 13 | return &ElasticSearchDevopsSingleHost12hr{ 14 | ElasticSearchDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *ElasticSearchDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/graphite/graphite_common.go: -------------------------------------------------------------------------------- 1 | package graphite 2 | 3 | import ( 4 | "fmt" 5 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 6 | "net/url" 7 | "time" 8 | ) 9 | 10 | type GraphiteCommon struct { 11 | bulkQuerygen.CommonParams 12 | } 13 | 14 | func newGraphiteCommon(interval bulkQuerygen.TimeInterval, scaleVar int) *GraphiteCommon { 15 | return &GraphiteCommon{ 16 | CommonParams: *bulkQuerygen.NewCommonParams(interval, scaleVar), 17 | } 18 | } 19 | 20 | func (d *GraphiteCommon) getHttpQuery(humanLabel, from, until, query string, q *bulkQuerygen.HTTPQuery) { 21 | q.HumanLabel = []byte(humanLabel) 22 | q.HumanDescription = []byte(fmt.Sprintf("%s: %s - %s", humanLabel, from, until)) 23 | 24 | getValues := url.Values{} 25 | getValues.Set("target", query) 26 | q.Method = []byte("GET") 27 | q.Path = []byte(fmt.Sprintf("/render?%s&format=json&noNullPoints=true&from=%s&until=%s", getValues.Encode(), from, until)) 28 | q.Body = nil 29 | } 30 | 31 | func getTimestamp(t time.Time) string { 32 | return fmt.Sprintf("%02d:%02d_%04d%02d%02d", t.Hour(), t.Minute(), t.Year(), t.Month(), t.Day()) 33 | } 34 | -------------------------------------------------------------------------------- /bulk_query_gen/graphite/graphite_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package graphite 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // GraphiteDevops8Hosts produces Influx-specific queries for the devops groupby case. 7 | type GraphiteDevops8Hosts struct { 8 | GraphiteDevops 9 | } 10 | 11 | func NewGraphiteDevops8Hosts(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newGraphiteDevopsCommon(queriesFullRange, queryInterval, scaleVar).(*GraphiteDevops) 13 | return &GraphiteDevops8Hosts{ 14 | GraphiteDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *GraphiteDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/graphite/graphite_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package graphite 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // GraphiteDevopsGroupby produces Influx-specific queries for the devops groupby case. 7 | type GraphiteDevopsGroupby struct { 8 | GraphiteDevops 9 | } 10 | 11 | func NewGraphiteDevopsGroupBy(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newGraphiteDevopsCommon(interval, duration, scaleVar).(*GraphiteDevops) 13 | return &GraphiteDevopsGroupby{ 14 | GraphiteDevops: *underlying, 15 | } 16 | 17 | } 18 | 19 | func (d *GraphiteDevopsGroupby) Dispatch(i int) bulkQuerygen.Query { 20 | q := bulkQuerygen.NewHTTPQuery() // from pool 21 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 22 | return q 23 | } 24 | -------------------------------------------------------------------------------- /bulk_query_gen/graphite/graphite_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package graphite 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // GraphiteDevopsSingleHost produces Influx-specific queries for the devops single-host case. 7 | type GraphiteDevopsSingleHost struct { 8 | GraphiteDevops 9 | } 10 | 11 | func NewGraphiteDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newGraphiteDevopsCommon(interval, duration, scaleVar).(*GraphiteDevops) 13 | return &GraphiteDevopsSingleHost{ 14 | GraphiteDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *GraphiteDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/graphite/graphite_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package graphite 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // GraphiteDevopsSingleHost12hr produces Influx-specific queries for the devops single-host case over a 12hr period. 7 | type GraphiteDevopsSingleHost12hr struct { 8 | GraphiteDevops 9 | } 10 | 11 | func NewGraphiteDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newGraphiteDevopsCommon(queriesFullRange, queryInterval, scaleVar).(*GraphiteDevops) 13 | return &GraphiteDevopsSingleHost12hr{ 14 | GraphiteDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *GraphiteDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 8 | ) 9 | 10 | type InfluxBareAggregateQuery struct { 11 | InfluxCommon 12 | aggregate Aggregate 13 | } 14 | 15 | func NewInfluxBareAggregateQuery(agg Aggregate, lang Language, dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, scaleVar int) bulkQuerygen.QueryGenerator { 16 | if _, ok := dbConfig[bulkQuerygen.DatabaseName]; !ok { 17 | panic("need influx database name") 18 | } 19 | 20 | return &InfluxBareAggregateQuery{ 21 | InfluxCommon: *newInfluxCommon(lang, dbConfig[bulkQuerygen.DatabaseName], queriesFullRange, scaleVar), 22 | aggregate: agg, 23 | } 24 | } 25 | 26 | func (d *InfluxBareAggregateQuery) Dispatch(i int) bulkQuerygen.Query { 27 | q := bulkQuerygen.NewHTTPQuery() 28 | d.BareAggregateQuery(q) 29 | return q 30 | } 31 | 32 | func (d *InfluxBareAggregateQuery) BareAggregateQuery(qi bulkQuerygen.Query) { 33 | interval := d.AllInterval.RandWindow(time.Hour * 6) 34 | 35 | var query string 36 | if d.language == InfluxQL { 37 | query = fmt.Sprintf("SELECT %s(temperature) FROM air_condition_room WHERE time >= '%s' AND time < '%s' GROUP BY home_id,room_id,sensor_id", 38 | d.aggregate, interval.StartString(), interval.EndString()) 39 | } else { 40 | query = fmt.Sprintf(`from(bucket:"%s") 41 | |> range(start:%s, stop:%s) 42 | |> filter(fn:(r) => r._measurement == "air_condition_room" and r._field == "temperature") 43 | |> %s() 44 | |> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.aggregate) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) %s temperature, rand %s", d.language.String(), d.aggregate, interval.StartString()) 51 | q := qi.(*bulkQuerygen.HTTPQuery) 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | } 54 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_count.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Count, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Count, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_first.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(First, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(First, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_last.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Last, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Last, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_max.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Max, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Max, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_mean.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Mean, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Mean, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_min.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Min, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Min, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_bareagg_sum.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLBareAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxBareAggregateQuery(Sum, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxBareAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxBareAggregateQuery(Sum, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | 7 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 8 | ) 9 | 10 | type Language bool 11 | 12 | const ( 13 | InfluxQL Language = false 14 | Flux Language = true 15 | ) 16 | 17 | func (lang Language) String() string { 18 | if lang == InfluxQL { 19 | return "InfluxQL" 20 | } else { 21 | return "Flux" 22 | } 23 | } 24 | 25 | type Aggregate string 26 | 27 | const ( 28 | Count Aggregate = "count" 29 | Sum Aggregate = "sum" 30 | Mean Aggregate = "mean" 31 | Min Aggregate = "min" 32 | Max Aggregate = "max" 33 | First Aggregate = "first" 34 | Last Aggregate = "last" 35 | ) 36 | 37 | type Cardinality string 38 | 39 | const ( 40 | HighCardinality Cardinality = "high-card" 41 | LowCardinality Cardinality = "low-card" 42 | ) 43 | 44 | type InfluxCommon struct { 45 | bulkQuerygen.CommonParams 46 | language Language 47 | DatabaseName string 48 | } 49 | 50 | func newInfluxCommon(lang Language, dbName string, interval bulkQuerygen.TimeInterval, scaleVar int) *InfluxCommon { 51 | return &InfluxCommon{ 52 | CommonParams: *bulkQuerygen.NewCommonParams(interval, scaleVar), 53 | language: lang, 54 | DatabaseName: dbName} 55 | } 56 | 57 | // getHttpQuery gets the right kind of http request based on the language being used 58 | func (d *InfluxCommon) getHttpQuery(humanLabel, intervalStart, query string, q *bulkQuerygen.HTTPQuery) { 59 | q.HumanLabel = []byte(humanLabel) 60 | q.HumanDescription = []byte(fmt.Sprintf("%s: %s", humanLabel, intervalStart)) 61 | q.Language = d.language.String() 62 | 63 | if d.language == InfluxQL { 64 | getValues := url.Values{} 65 | getValues.Set("db", d.DatabaseName) 66 | getValues.Set("q", query) 67 | q.Method = []byte("GET") 68 | q.Path = []byte(fmt.Sprintf("/query?%s", getValues.Encode())) 69 | q.Body = nil 70 | } else { 71 | q.Method = []byte("POST") 72 | //q.Path will be set in query_benchmarker_influxdb 73 | q.Body = []byte(query) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_availability.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardAvailability produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardAvailability struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardAvailability(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardAvailability{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardAvailability(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardAvailability{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardAvailability) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT (sum("service_up") / count("service_up"))*100 AS "up_time" FROM "watcher"."autogen"."ping" WHERE cluster_id = :Cluster_Id: and time > :dashboardTime: FILL(linear) 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT (sum(\"service_up\") / count(\"service_up\"))*100 AS \"up_time\" FROM status WHERE cluster_id = '%s' and %s FILL(linear)", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { // TODO fill(linear) how?? 36 | query = fmt.Sprintf(`data = from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "status" and r._field == "service_up" and r._cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value"])`+"\n"+ 40 | `sum = data |> sum()`+"\n"+ 41 | `count = data |> count()`+"\n"+ 42 | `join(tables:{sum:sum,count:count},on:["_time"]) `+ 43 | `|> map(fn: (r) => ({_time:r._start_sum,_value:(float(v:r._value_sum) / float(v:r._value_count) * 100.0))) `+ 44 | `|> keep(columns:["_time", "_value"]) `+ 45 | `|> yield()`, 46 | d.DatabaseName, 47 | interval.StartString(), interval.EndString(), 48 | d.GetRandomClusterId()) 49 | } 50 | 51 | humanLabel := fmt.Sprintf("InfluxDB (%s) Availability (Percent), rand cluster in %s", d.language.String(), interval.Duration()) 52 | 53 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 54 | return q 55 | } 56 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "github.com/influxdata/influxdb-comparisons/bulk_data_gen/dashboard" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | "math/rand" 8 | "time" 9 | ) 10 | 11 | // InfluxDashboard produces Influx-specific queries for all the devops query types. 12 | type InfluxDashboard struct { 13 | InfluxCommon 14 | ClustersCount int 15 | bulkQuerygen.TimeWindow 16 | } 17 | 18 | // NewInfluxDashboard makes an InfluxDashboard object ready to generate Queries. 19 | func newInfluxDashboard(lang Language, dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 20 | if _, ok := dbConfig[bulkQuerygen.DatabaseName]; !ok { 21 | panic("need influx database name") 22 | } 23 | clustersCount := scaleVar / dashboard.ClusterSize //ClusterSizes[len(dashboard.ClusterSizes)/2] 24 | if clustersCount == 0 { 25 | clustersCount = 1 26 | } 27 | return &InfluxDashboard{ 28 | InfluxCommon: *newInfluxCommon(lang, dbConfig[bulkQuerygen.DatabaseName], interval, scaleVar), 29 | ClustersCount: clustersCount, 30 | TimeWindow: bulkQuerygen.TimeWindow{interval.Start, duration}, 31 | } 32 | } 33 | 34 | // Dispatch fulfills the QueryGenerator interface. 35 | func (d *InfluxDashboard) Dispatch(i int) bulkQuerygen.Query { 36 | q := bulkQuerygen.NewHTTPQuery() // from pool 37 | //bulkQuerygen.DevopsDispatchAll(d, i, q, d.ScaleVar) 38 | return q 39 | } 40 | 41 | func (d *InfluxDashboard) DispatchCommon(i int) (*bulkQuerygen.HTTPQuery, *bulkQuerygen.TimeInterval) { 42 | q := bulkQuerygen.NewHTTPQuery() // from pool 43 | var interval bulkQuerygen.TimeInterval 44 | if bulkQuerygen.TimeWindowShift > 0 { 45 | interval = d.TimeWindow.SlidingWindow(&d.AllInterval) 46 | } else { 47 | interval = d.AllInterval.RandWindow(d.Duration) 48 | } 49 | return q, &interval 50 | } 51 | 52 | func (d *InfluxDashboard) GetTimeConstraint(interval *bulkQuerygen.TimeInterval) string { 53 | var s string 54 | switch bulkQuerygen.QueryIntervalType { 55 | case "window": 56 | s = fmt.Sprintf("time >= '%s' and time < '%s'", interval.StartString(), interval.EndString()) 57 | case "last": 58 | s = fmt.Sprintf("time >= now() - %dh and time < now() - %dh", int64(2*interval.Duration().Hours()), int64(interval.Duration().Hours())) 59 | case "recent": 60 | s = fmt.Sprintf("time >= now() - %dh and time < now() - %dh", int64(interval.Duration().Hours() + 24), int64(24)) 61 | } 62 | return s 63 | } 64 | 65 | func (d *InfluxDashboard) GetRandomClusterId() string { 66 | return fmt.Sprintf("%d", rand.Intn(d.ClustersCount-1)+1) 67 | } 68 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_cpu_num.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardCpuNum produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardCpuNum struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardCpuNum(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardCpuNum{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardCpuNum(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardCpuNum{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardCpuNum) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT last("max") from (SELECT max("n_cpus") FROM "telegraf"."default"."system" WHERE time > :dashboardTime: and cluster_id = :Cluster_Id: GROUP BY time(1m)) 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT last(\"max\") from (SELECT max(\"n_cpus\") FROM system WHERE cluster_id = '%s' and %s group by time(1m))", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "system" and r._field == "n_cpus" and r._cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value"]) `+ 40 | `|> aggregateWindow(every: 1m, fn: max, createEmpty: false) `+ 41 | `|> last() `+ 42 | `|> keep(columns:["_time", "_value"]) `+ 43 | `|> yield()`, 44 | d.DatabaseName, 45 | interval.StartString(), interval.EndString(), 46 | d.GetRandomClusterId()) 47 | } 48 | 49 | humanLabel := fmt.Sprintf("InfluxDB (%s) CPU (Number), rand cluster, %s by 1m", d.language.String(), interval.Duration()) 50 | 51 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 52 | return q 53 | } 54 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_cpu_utilization.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardCpuUtilization produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardCpuUtilization struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardCpuUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardCpuUtilization{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardCpuUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardCpuUtilization{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardCpuUtilization) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT mean("usage_user") FROM "telegraf"."default"."cpu" WHERE time > :dashboardTime: and cluster_id = :Cluster_Id: GROUP BY host, time(1m) 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT mean(\"usage_user\") FROM cpu WHERE cluster_id = '%s' and %s group by hostname,time(1m)", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "cpu" and r._field == "usage_user" and r._cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns:["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: mean, createEmpty: false) `+ 42 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 43 | `|> yield()`, 44 | d.DatabaseName, 45 | interval.StartString(), interval.EndString(), 46 | d.GetRandomClusterId()) 47 | } 48 | 49 | humanLabel := fmt.Sprintf("InfluxDB (%s) CPU Utilization (Percent), rand cluster, %s by host, 1m", d.language.String(), interval.Duration()) 50 | 51 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 52 | return q 53 | } 54 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_disk_allocated.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardDiskAllocated produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardDiskAllocated struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardDiskAllocated(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardDiskAllocated{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardDiskAllocated(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardDiskAllocated{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardDiskAllocated) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT last("max") from (SELECT max("total")/1073741824 FROM "telegraf"."default"."disk" WHERE time > :dashboardTime: and cluster_id = :Cluster_Id: and host =~ /.data./ GROUP BY time(120s)) 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT last(\"max\") from (SELECT max(\"total\")/1073741824 FROM disk WHERE cluster_id = '%s' and %s and hostname =~ /data/ group by time(120s))", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "disk" and r._field == "total" and r._cluster_id == "%s" and r.hostname =~ /data/) `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value"]) `+ 40 | `|> aggregateWindow(every: 120s, fn: max, createEmpty: false) `+ 41 | `|> map(fn: (r) => ({_time:r._time, _value:r._value / 1073741824})) `+ 42 | `|> last() `+ 43 | `|> keep(columns:["_time", "_value"]) `+ 44 | `|> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.GetRandomClusterId()) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) Disk Allocated (GB), rand cluster, %s by 120s", d.language.String(), interval.Duration()) 51 | 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | return q 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_disk_usage.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardDiskUsage produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardDiskUsage struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardDiskUsage(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardDiskUsage{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardDiskUsage(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardDiskUsage{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardDiskUsage) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT last("used_percent") AS "mean_used_percent" FROM "telegraf"."default"."disk" WHERE time > :dashboardTime: and cluster_id = :Cluster_Id: and host =~ /.data./ 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT last(\"used_percent\") AS \"mean_used_percent\" FROM disk WHERE cluster_id = '%s' and %s and hostname =~ /data/", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "disk" and r._field == "used_percent" and r._cluster_id == "%s" and r.hostname =~ /data/) `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value"]) `+ 40 | `|> last() `+ 41 | `|> keep(columns:["_time", "_value"]) `+ 42 | `|> yield()`, 43 | d.DatabaseName, 44 | interval.StartString(), interval.EndString(), 45 | d.GetRandomClusterId()) 46 | } 47 | 48 | humanLabel := fmt.Sprintf("InfluxDB (%s) Disk Usage (GB), rand cluster, %s", d.language.String(), interval.Duration()) 49 | 50 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 51 | return q 52 | } 53 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_disk_utilization.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardDiskUtilization produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardDiskUtilization struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardDiskUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardDiskUtilization{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardDiskUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardDiskUtilization{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardDiskUtilization) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT max("used_percent") FROM "telegraf"."default"."disk" WHERE "cluster_id" = :Cluster_Id: AND "path" = '/influxdb/conf' AND time > :dashboardTime: AND host =~ /.data./ GROUP BY time(1m), "host" 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT max(\"used_percent\") FROM disk WHERE cluster_id = '%s' and \"path\" = '/dev/sda1' and %s AND hostname =~ /data/ group by time(1m), \"hostname\"", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "disk" and r._field == "used_percent" and r._cluster_id == "%s" and r.path == "/dev/sda1" and r.hostname =~ /data/) `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> aggregateWindow(every: 1m, fn: max, createEmpty: false) `+ 41 | `|> keep(columns: ["_time","_value", "hostname"]) `+ 42 | `|> group(columns: ["hostname"]) `+ 43 | `|> yield()`, 44 | d.DatabaseName, 45 | interval.StartString(), interval.EndString(), 46 | d.GetRandomClusterId()) 47 | } 48 | 49 | humanLabel := fmt.Sprintf("InfluxDB (%s) Disk Utilization (Percent), rand cluster, %s by 1m", d.language.String(), interval.Duration()) 50 | 51 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 52 | return q 53 | } 54 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_http_requests.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardHttpRequests produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardHttpRequests struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardHttpRequests(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardHttpRequests{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardHttpRequests(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardHttpRequests{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardHttpRequests) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT non_negative_derivative(mean("queryReq"), 10s) FROM "telegraf"."default"."influxdb_httpd" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT non_negative_derivative(mean(\"requests\"), 10s) FROM nginx WHERE cluster_id = '%s' and %s group by time(1m), \"hostname\"", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "nginx" and r._field == "requests" and r.cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns: ["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: mean, createEmpty: false) `+ 42 | `|> derivative(unit: 10s, nonNegative: true) `+ 43 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 44 | `|> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.GetRandomClusterId()) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) HTTP Requests/Min (Number), rand cluster, %s by 1m, host", d.language.String(), interval.Duration()) 51 | 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | return q 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_kapa_cpu.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardKapaCpu produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardKapaCpu struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardKapaCpu(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardKapaCpu{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardKapaCpu(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardKapaCpu{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardKapaCpu) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT 100 - "usage_idle" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m AND "cpu"='cpu-total' AND "host"='kapacitor' 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT 100 - \"usage_idle\" FROM cpu WHERE hostname='kapacitor_1' and %s", d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "cpu" and r._field == "usage_idle" and r.hostname == "kapacitor_1") `+ 39 | `|> keep(columns:["_time", "_value"]) `+ 40 | `|> map(fn: (r) => ({_time:r._time,_value:100.0 - r._value)) `+ 41 | `|> yield()`, 42 | d.DatabaseName, 43 | interval.StartString(), interval.EndString()) 44 | } 45 | 46 | humanLabel := fmt.Sprintf("InfluxDB (%s) kapa cpu in %s", d.language.String(), interval.Duration()) 47 | 48 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 49 | return q 50 | } 51 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_kapa_ram.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardKapaRam produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardKapaRam struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardKapaRam(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardKapaRam{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardKapaRam(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardKapaRam{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardKapaRam) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT "used_percent" FROM "telegraf"."autogen"."mem" WHERE time > :dashboardTime: AND "host"='kapacitor' 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT \"used_percent\" FROM mem WHERE hostname='kapacitor_1' and %s", d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "mem" and r._field == "used_percent" and r.hostname == "kapacitor_1") `+ 39 | `|> keep(columns:["_time", "_value"]) `+ 40 | `|> yield()`, 41 | d.DatabaseName, 42 | interval.StartString(), interval.EndString()) 43 | } 44 | 45 | humanLabel := fmt.Sprintf("InfluxDB (%s) kapa mem used in %s", d.language.String(), interval.Duration()) 46 | 47 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 48 | return q 49 | } 50 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_memory_utilization.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardMemoryUtilization produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardMemoryUtilization struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardMemoryUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardMemoryUtilization{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardMemoryUtilization(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardMemoryUtilization{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardMemoryUtilization) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT mean("used_percent") FROM "telegraf"."default"."mem" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT mean(\"used_percent\") FROM mem WHERE cluster_id = '%s' and %s group by time(1m), hostname", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "mem" and r._field == "used_percent" and r._cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns:["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: mean, createEmpty: false) ` + 42 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 43 | `|> yield()`, 44 | d.DatabaseName, 45 | interval.StartString(), interval.EndString(), 46 | d.GetRandomClusterId()) 47 | } 48 | 49 | humanLabel := fmt.Sprintf("InfluxDB (%s) Memory Utilization (Percent), rand cluster, %s by 1m", d.language.String(), interval.Duration()) 50 | 51 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 52 | return q 53 | } 54 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_nginx_requests.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardNginxRequests produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardNginxRequests struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardNginxRequests(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardNginxRequests{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardNginxRequests(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardNginxRequests{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardNginxRequests) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT non_negative_derivative(mean("queriesExecuted"), 1s) FROM "telegraf"."default"."influxdb_queryExecutor" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT non_negative_derivative(mean(\"accepts\"), 1s) FROM nginx WHERE cluster_id = '%s' and %s group by time(1m), \"hostname\"", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "nginx" and r._field == "accepts" and r.cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns: ["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: mean, createEmpty: false) `+ 42 | `|> derivative(unit: 1s, nonNegative: true) `+ 43 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 44 | `|> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.GetRandomClusterId()) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) Queries Executed (Number) , rand cluster, %s by 1m, host", d.language.String(), interval.Duration()) 51 | 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | return q 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_queue_bytes.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardQueueBytes produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardQueueBytes struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardQueueBytes(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardQueueBytes{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardQueueBytes(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardQueueBytes{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardQueueBytes) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT mean("queueBytes") FROM "telegraf"."default"."influxdb_hh_processor" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" fill(0) 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT mean(\"temp_files\") FROM postgresl WHERE cluster_id = '%s' and %s group by time(1m), hostname, fill(0)", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "postgresl" and r._field == "temp_files" and r.cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns: ["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: mean, createEmpty: true) `+ 42 | `|> fill(value: 0.0) `+ 43 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 44 | `|> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.GetRandomClusterId()) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) Hinted HandOff Queue Size (MB), rand cluster, %s by 1m", d.language.String(), interval.Duration()) 51 | 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | return q 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_dashboard_throughput.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import ( 5 | "fmt" 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxDashboardThroughput produces Influx-specific queries for the dashboard single-host case. 10 | type InfluxDashboardThroughput struct { 11 | InfluxDashboard 12 | } 13 | 14 | func NewInfluxQLDashboardThroughput(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 15 | underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 16 | return &InfluxDashboardThroughput{ 17 | InfluxDashboard: *underlying, 18 | } 19 | } 20 | 21 | func NewFluxDashboardThroughput(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 22 | underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) 23 | return &InfluxDashboardThroughput{ 24 | InfluxDashboard: *underlying, 25 | } 26 | } 27 | 28 | func (d *InfluxDashboardThroughput) Dispatch(i int) bulkQuerygen.Query { 29 | q, interval := d.InfluxDashboard.DispatchCommon(i) 30 | 31 | var query string 32 | //SELECT non_negative_derivative(max("pointReqLocal"), 10s) FROM "telegraf"."default"."influxdb_write" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" 33 | if d.language == InfluxQL { 34 | query = fmt.Sprintf("SELECT non_negative_derivative(max(\"keyspace_hits\"), 10s) FROM redis WHERE cluster_id = '%s' and %s group by time(1m), \"hostname\"", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) 35 | } else { 36 | query = fmt.Sprintf(`from(bucket:"%s") `+ 37 | `|> range(start:%s, stop:%s) `+ 38 | `|> filter(fn:(r) => r._measurement == "redis" and r._field == "keyspace_hits" and r.cluster_id == "%s") `+ 39 | `|> keep(columns:["_start", "_stop", "_time", "_value", "hostname"]) `+ 40 | `|> group(columns: ["hostname"]) `+ 41 | `|> aggregateWindow(every: 1m, fn: max, createEmpty: false) `+ 42 | `|> derivative(unit: 10s, nonNegative: true) `+ 43 | `|> keep(columns: ["_time", "_value", "hostname"]) `+ 44 | `|> yield()`, 45 | d.DatabaseName, 46 | interval.StartString(), interval.EndString(), 47 | d.GetRandomClusterId()) 48 | } 49 | 50 | humanLabel := fmt.Sprintf("InfluxDB (%s) Per-Host Point Throughput (Number), %s by 1m", d.language.String(), interval.Duration()) 51 | 52 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 53 | return q 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // InfluxDevops8Hosts produces Influx-specific queries for the devops groupby case. 7 | type InfluxDevops8Hosts struct { 8 | InfluxDevops 9 | } 10 | 11 | func NewInfluxQLDevops8Hosts(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newInfluxDevopsCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxDevops) 13 | return &InfluxDevops8Hosts{ 14 | InfluxDevops: *underlying, 15 | } 16 | } 17 | 18 | func NewFluxDevops8Hosts(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 19 | underlying := newInfluxDevopsCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxDevops) 20 | return &InfluxDevops8Hosts{ 21 | InfluxDevops: *underlying, 22 | } 23 | } 24 | 25 | func (d *InfluxDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 26 | q := bulkQuerygen.NewHTTPQuery() // from pool 27 | d.MaxCPUUsageHourByMinuteEightHosts(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // InfluxDevopsGroupby produces Influx-specific queries for the devops groupby case. 7 | type InfluxDevopsGroupby struct { 8 | InfluxDevops 9 | } 10 | 11 | func NewInfluxQLDevopsGroupBy(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newInfluxDevopsCommon(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDevops) 13 | return &InfluxDevopsGroupby{ 14 | InfluxDevops: *underlying, 15 | } 16 | 17 | } 18 | 19 | func NewFluxDevopsGroupBy(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 20 | underlying := newInfluxDevopsCommon(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDevops) 21 | return &InfluxDevopsGroupby{ 22 | InfluxDevops: *underlying, 23 | } 24 | 25 | } 26 | 27 | func (d *InfluxDevopsGroupby) Dispatch(i int) bulkQuerygen.Query { 28 | q := bulkQuerygen.NewHTTPQuery() // from pool 29 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 30 | return q 31 | } 32 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // InfluxDevopsSingleHost produces Influx-specific queries for the devops single-host case. 7 | type InfluxDevopsSingleHost struct { 8 | InfluxDevops 9 | } 10 | 11 | func NewInfluxQLDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newInfluxDevopsCommon(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDevops) 13 | return &InfluxDevopsSingleHost{ 14 | InfluxDevops: *underlying, 15 | } 16 | } 17 | 18 | func NewFluxDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 19 | underlying := newInfluxDevopsCommon(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDevops) 20 | return &InfluxDevopsSingleHost{ 21 | InfluxDevops: *underlying, 22 | } 23 | } 24 | 25 | func (d *InfluxDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 26 | q := bulkQuerygen.NewHTTPQuery() // from pool 27 | d.MaxCPUUsageHourByMinuteOneHost(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // InfluxDevopsSingleHost12hr produces Influx-specific queries for the devops single-host case over a 12hr period. 7 | type InfluxDevopsSingleHost12hr struct { 8 | InfluxDevops 9 | } 10 | 11 | func NewInfluxQLDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newInfluxDevopsCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxDevops) 13 | return &InfluxDevopsSingleHost12hr{ 14 | InfluxDevops: *underlying, 15 | } 16 | } 17 | 18 | func NewFluxDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 19 | underlying := newInfluxDevopsCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxDevops) 20 | return &InfluxDevopsSingleHost12hr{ 21 | InfluxDevops: *underlying, 22 | } 23 | } 24 | 25 | func (d *InfluxDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 26 | q := bulkQuerygen.NewHTTPQuery() // from pool 27 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_count.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Count, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Count, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeCountCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Count, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeCountCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Count, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_first.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(First, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(First, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeFirstCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(First, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeFirstCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(First, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_last.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Last, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Last, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeLastCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Last, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeLastCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Last, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_max.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Max, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Max, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeMaxCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Max, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeMaxCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Max, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_mean.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Mean, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Mean, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeMeanCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Mean, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeMeanCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Mean, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_min.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Min, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Min, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeMinCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Min, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeMinCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Min, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_group_window_transpose_sum.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxQL query for "Group Window" on the standard cardinality IoT dataset 10 | func NewInfluxQLGroupWindowTransposeSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 11 | return NewInfluxGroupWindowTransposeQuery(Sum, LowCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 12 | } 13 | 14 | // Flux Query query for "Group Window" on the standard cardinality IoT dataset 15 | func NewFluxGroupWindowTransposeSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | return NewInfluxGroupWindowTransposeQuery(Sum, LowCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 17 | } 18 | 19 | // InfluxQL query for "Group Window" on the high cardinality Metaquery dataset 20 | func NewInfluxQLGroupWindowTransposeSumCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | return NewInfluxGroupWindowTransposeQuery(Sum, HighCardinality, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 22 | } 23 | 24 | // Flux query for "Group Window" on the high cardinality Metaquery dataset 25 | func NewFluxGroupWindowTransposeSumCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 26 | return NewInfluxGroupWindowTransposeQuery(Sum, HighCardinality, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 27 | } 28 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 8 | ) 9 | 10 | type InfluxGroupAggregateQuery struct { 11 | InfluxCommon 12 | aggregate Aggregate 13 | } 14 | 15 | func NewInfluxGroupAggregateQuery(agg Aggregate, lang Language, dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, scaleVar int) bulkQuerygen.QueryGenerator { 16 | if _, ok := dbConfig[bulkQuerygen.DatabaseName]; !ok { 17 | panic("need influx database name") 18 | } 19 | 20 | return &InfluxGroupAggregateQuery{ 21 | InfluxCommon: *newInfluxCommon(lang, dbConfig[bulkQuerygen.DatabaseName], queriesFullRange, scaleVar), 22 | aggregate: agg, 23 | } 24 | } 25 | 26 | func (d *InfluxGroupAggregateQuery) Dispatch(i int) bulkQuerygen.Query { 27 | q := bulkQuerygen.NewHTTPQuery() 28 | d.GroupAggregateQuery(q) 29 | return q 30 | } 31 | 32 | func (d *InfluxGroupAggregateQuery) GroupAggregateQuery(qi bulkQuerygen.Query) { 33 | interval := d.AllInterval.RandWindow(time.Hour * 6) 34 | 35 | var query string 36 | if d.language == InfluxQL { 37 | query = fmt.Sprintf("SELECT %s(temperature) FROM air_condition_room WHERE time > '%s' AND time < '%s' GROUP BY home_id", 38 | d.aggregate, interval.StartString(), interval.EndString()) 39 | } else { 40 | query = fmt.Sprintf(`from(bucket:"%s") 41 | |> range(start:%s, stop:%s) 42 | |> filter(fn:(r) => r._measurement == "air_condition_room" and r._field == "temperature") 43 | |> group(columns:["home_id"]) 44 | |> %s() 45 | |> yield()`, 46 | d.DatabaseName, 47 | interval.StartString(), interval.EndString(), 48 | d.aggregate) 49 | } 50 | 51 | humanLabel := fmt.Sprintf("InfluxDB (%s) %s temperature, rand %s by home_id", d.language.String(), d.aggregate, interval.StartString()) 52 | q := qi.(*bulkQuerygen.HTTPQuery) 53 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_count.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Count, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Count, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_first.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(First, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(First, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_last.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Last, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Last, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_max.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Max, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Max, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_mean.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Mean, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Mean, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_min.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Min, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Min, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_groupagg_sum.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLGroupAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxGroupAggregateQuery(Sum, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxGroupAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxGroupAggregateQuery(Sum, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_iot_aggregate_drop.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxIotAggregateDrop produces queries that will test performance 10 | // on Flux statements with drop() |> aggregateWindow() 11 | type InfluxIotAggregateDrop struct { 12 | InfluxIot 13 | interval time.Duration 14 | } 15 | 16 | func NewInfluxQLIotAggregateDrop(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 17 | underlying := NewInfluxIotCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 18 | return &InfluxIotAggregateDrop{ 19 | InfluxIot: *underlying, 20 | interval: queryInterval, 21 | } 22 | } 23 | 24 | func NewFluxIotAggregateDrop(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 25 | underlying := NewInfluxIotCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 26 | return &InfluxIotAggregateDrop{ 27 | InfluxIot: *underlying, 28 | interval: queryInterval, 29 | } 30 | } 31 | 32 | func (d *InfluxIotAggregateDrop) Dispatch(i int) bulkQuerygen.Query { 33 | q := bulkQuerygen.NewHTTPQuery() // from pool 34 | d.IotAggregateDrop(q, d.interval) 35 | return q 36 | } 37 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_iot_aggregate_keep.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxIotAggregateKeep produces queries that will test performance 10 | // on Flux statements with keep() |> aggregateWindow() 11 | type InfluxIotAggregateKeep struct { 12 | InfluxIot 13 | interval time.Duration 14 | } 15 | 16 | func NewInfluxQLIotAggregateKeep(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 17 | underlying := NewInfluxIotCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 18 | return &InfluxIotAggregateKeep{ 19 | InfluxIot: *underlying, 20 | interval: queryInterval, 21 | } 22 | } 23 | 24 | func NewFluxIotAggregateKeep(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 25 | underlying := NewInfluxIotCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 26 | return &InfluxIotAggregateKeep{ 27 | InfluxIot: *underlying, 28 | interval: queryInterval, 29 | } 30 | } 31 | 32 | func (d *InfluxIotAggregateKeep) Dispatch(i int) bulkQuerygen.Query { 33 | q := bulkQuerygen.NewHTTPQuery() // from pool 34 | d.IotAggregateKeep(q, d.interval) 35 | return q 36 | } 37 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_iot_singlehost.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // InfluxIotSingleHost produces Influx-specific queries for the devops single-host case. 7 | type InfluxIotSingleHost struct { 8 | InfluxIot 9 | } 10 | 11 | func NewInfluxQLIotSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewInfluxIotCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 13 | return &InfluxIotSingleHost{ 14 | InfluxIot: *underlying, 15 | } 16 | } 17 | 18 | func NewFluxIotSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 19 | underlying := NewInfluxIotCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 20 | return &InfluxIotSingleHost{ 21 | InfluxIot: *underlying, 22 | } 23 | } 24 | 25 | func (d *InfluxIotSingleHost) Dispatch(i int) bulkQuerygen.Query { 26 | q := bulkQuerygen.NewHTTPQuery() // from pool 27 | d.AverageTemperatureDayByHourOneHome(q) 28 | return q 29 | } 30 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_iot_sorted_pivot.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | "time" 6 | ) 7 | 8 | // InfluxIotSortedPivot produces queries that will test performance 9 | // on Flux pivot function 10 | type InfluxIotSortedPivot struct { 11 | InfluxIot 12 | interval time.Duration 13 | } 14 | 15 | func NewInfluxQLIotSortedPivot(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | underlying := NewInfluxIotCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 17 | return &InfluxIotSortedPivot{ 18 | InfluxIot: *underlying, 19 | interval: queryInterval, 20 | } 21 | } 22 | 23 | func NewFluxIotSortedPivot(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 24 | underlying := NewInfluxIotCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 25 | return &InfluxIotSortedPivot{ 26 | InfluxIot: *underlying, 27 | interval: queryInterval, 28 | } 29 | } 30 | 31 | func (d *InfluxIotSortedPivot) Dispatch(i int) bulkQuerygen.Query { 32 | q := bulkQuerygen.NewHTTPQuery() 33 | d.IotSortedPivot(q, d.interval) 34 | return q 35 | } 36 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_iot_stand_alone_filter.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | type InfluxIotStandAloneFilter struct { 10 | InfluxIot 11 | } 12 | 13 | func NewInfluxQLIotStandAloneFilter(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | underlying := NewInfluxIotCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 15 | return &InfluxIotStandAloneFilter{ 16 | InfluxIot: *underlying, 17 | } 18 | } 19 | 20 | func NewFluxIotStandAloneFilter(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | underlying := NewInfluxIotCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxIot) 22 | return &InfluxIotStandAloneFilter{ 23 | InfluxIot: *underlying, 24 | } 25 | } 26 | 27 | func (d *InfluxIotStandAloneFilter) Dispatch(i int) bulkQuerygen.Query { 28 | q := bulkQuerygen.NewHTTPQuery() // from pool 29 | d.StandAloneFilter(q) 30 | return q 31 | } 32 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_metaquery_cardinality.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | type InfluxMetaqueryCardinality struct { 10 | InfluxMetaquery 11 | } 12 | 13 | func NewInfluxQLMetaqueryCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | underlying := NewInfluxMetaqueryCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 15 | return &InfluxMetaqueryCardinality{ 16 | InfluxMetaquery: *underlying, 17 | } 18 | } 19 | 20 | func NewFluxMetaqueryCardinality(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 21 | underlying := NewInfluxMetaqueryCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 22 | return &InfluxMetaqueryCardinality{ 23 | InfluxMetaquery: *underlying, 24 | } 25 | } 26 | 27 | func (d *InfluxMetaqueryCardinality) Dispatch(i int) bulkQuerygen.Query { 28 | q := bulkQuerygen.NewHTTPQuery() // from pool 29 | d.MetaqueryCardinality(q) 30 | return q 31 | } 32 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_metaquery_field_keys.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxMetaqueryFieldKeys produces metaqueries that will return a list of all 10 | // field keys associated with a measurement. 11 | type InfluxMetaqueryFieldKeys struct { 12 | InfluxMetaquery 13 | } 14 | 15 | func NewInfluxQLMetaqueryFieldKeys(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | underlying := NewInfluxMetaqueryCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 17 | return &InfluxMetaqueryFieldKeys{ 18 | InfluxMetaquery: *underlying, 19 | } 20 | } 21 | 22 | func NewFluxMetaqueryFieldKeys(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 23 | underlying := NewInfluxMetaqueryCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 24 | return &InfluxMetaqueryFieldKeys{ 25 | InfluxMetaquery: *underlying, 26 | } 27 | } 28 | 29 | func (d *InfluxMetaqueryFieldKeys) Dispatch(i int) bulkQuerygen.Query { 30 | q := bulkQuerygen.NewHTTPQuery() // from pool 31 | d.MetaqueryFieldKeys(q) 32 | return q 33 | } 34 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_metaquery_tag_values.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | // InfluxMetaqueryTagValues produces metaqueries that will return a list of all 10 | // tag values for a specific tag key name. 11 | type InfluxMetaqueryTagValues struct { 12 | InfluxMetaquery 13 | } 14 | 15 | func NewInfluxQLMetaqueryTagValues(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 16 | underlying := NewInfluxMetaqueryCommon(InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 17 | return &InfluxMetaqueryTagValues{ 18 | InfluxMetaquery: *underlying, 19 | } 20 | } 21 | 22 | func NewFluxMetaqueryTagValues(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 23 | underlying := NewInfluxMetaqueryCommon(Flux, dbConfig, queriesFullRange, queryInterval, scaleVar).(*InfluxMetaquery) 24 | return &InfluxMetaqueryTagValues{ 25 | InfluxMetaquery: *underlying, 26 | } 27 | } 28 | 29 | func (d *InfluxMetaqueryTagValues) Dispatch(i int) bulkQuerygen.Query { 30 | q := bulkQuerygen.NewHTTPQuery() // from pool 31 | d.MetaqueryTagValues(q) 32 | return q 33 | } 34 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 8 | ) 9 | 10 | type InfluxUngroupedAggregateQuery struct { 11 | InfluxCommon 12 | aggregate Aggregate 13 | } 14 | 15 | func NewInfluxUngroupedAggregateQuery(agg Aggregate, lang Language, dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, scaleVar int) bulkQuerygen.QueryGenerator { 16 | if _, ok := dbConfig[bulkQuerygen.DatabaseName]; !ok { 17 | panic("need influx database name") 18 | } 19 | 20 | return &InfluxUngroupedAggregateQuery{ 21 | InfluxCommon: *newInfluxCommon(lang, dbConfig[bulkQuerygen.DatabaseName], queriesFullRange, scaleVar), 22 | aggregate: agg, 23 | } 24 | } 25 | 26 | func (d *InfluxUngroupedAggregateQuery) Dispatch(i int) bulkQuerygen.Query { 27 | q := bulkQuerygen.NewHTTPQuery() 28 | d.UngroupedAggregateQuery(q) 29 | return q 30 | } 31 | 32 | func (d *InfluxUngroupedAggregateQuery) UngroupedAggregateQuery(qi bulkQuerygen.Query) { 33 | interval := d.AllInterval.RandWindow(time.Hour * 6) 34 | 35 | var query string 36 | if d.language == InfluxQL { 37 | query = fmt.Sprintf("SELECT %s(temperature) FROM air_condition_room WHERE time >= '%s' AND time < '%s'", 38 | d.aggregate, interval.StartString(), interval.EndString()) 39 | } else { 40 | query = fmt.Sprintf(`from(bucket:"%s") 41 | |> range(start:%s, stop:%s) 42 | |> filter(fn:(r) => r._measurement == "air_condition_room" and r._field == "temperature") 43 | |> group() 44 | |> %s() 45 | |> yield()`, 46 | d.DatabaseName, 47 | interval.StartString(), interval.EndString(), 48 | d.aggregate) 49 | } 50 | 51 | humanLabel := fmt.Sprintf("InfluxDB (%s) %s temperature, rand %s", d.language.String(), d.aggregate, interval.StartString()) 52 | q := qi.(*bulkQuerygen.HTTPQuery) 53 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 54 | } 55 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_count.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Count, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Count, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_first.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(First, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(First, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_last.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Last, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Last, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_max.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Max, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Max, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_mean.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Mean, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Mean, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_min.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Min, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Min, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_ungroupedagg_sum.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLUngroupedAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxUngroupedAggregateQuery(Sum, InfluxQL, dbConfig, queriesFullRange, scaleVar) 11 | } 12 | 13 | func NewFluxUngroupedAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, _ time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxUngroupedAggregateQuery(Sum, Flux, dbConfig, queriesFullRange, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_common.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 8 | ) 9 | 10 | type InfluxWindowAggregateQuery struct { 11 | InfluxCommon 12 | aggregate Aggregate 13 | interval time.Duration 14 | } 15 | 16 | func NewInfluxWindowAggregateQuery(agg Aggregate, lang Language, dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 17 | if _, ok := dbConfig[bulkQuerygen.DatabaseName]; !ok { 18 | panic("need influx database name") 19 | } 20 | 21 | return &InfluxWindowAggregateQuery{ 22 | InfluxCommon: *newInfluxCommon(lang, dbConfig[bulkQuerygen.DatabaseName], queriesFullRange, scaleVar), 23 | aggregate: agg, 24 | interval: queryInterval, 25 | } 26 | } 27 | 28 | func (d *InfluxWindowAggregateQuery) Dispatch(i int) bulkQuerygen.Query { 29 | q := bulkQuerygen.NewHTTPQuery() 30 | d.WindowAggregateQuery(q) 31 | return q 32 | } 33 | 34 | func (d *InfluxWindowAggregateQuery) WindowAggregateQuery(qi bulkQuerygen.Query) { 35 | interval := d.AllInterval.RandWindow(time.Hour * 6) 36 | 37 | var query string 38 | if d.language == InfluxQL { 39 | query = fmt.Sprintf("SELECT %s(temperature) FROM air_condition_room WHERE time > '%s' AND time < '%s' GROUP BY time(%s)", 40 | d.aggregate, interval.StartString(), interval.EndString(), d.interval) 41 | } else { 42 | query = fmt.Sprintf(`from(bucket:"%s") 43 | |> range(start:%s, stop:%s) 44 | |> filter(fn:(r) => r._measurement == "air_condition_room" and r._field == "temperature") 45 | |> aggregateWindow(every:%s, fn:%s) 46 | |> yield()`, 47 | d.DatabaseName, 48 | interval.StartString(), interval.EndString(), 49 | d.interval, d.aggregate) 50 | } 51 | 52 | humanLabel := fmt.Sprintf("InfluxDB (%s) %s temperature, rand %s by %s", d.language.String(), d.aggregate, interval.StartString(), d.interval) 53 | q := qi.(*bulkQuerygen.HTTPQuery) 54 | d.getHttpQuery(humanLabel, interval.StartString(), query, q) 55 | } 56 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_count.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Count, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateCount(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Count, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_first.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(First, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateFirst(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(First, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_last.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Last, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateLast(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Last, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_max.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Max, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateMax(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Max, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_mean.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Mean, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateMean(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Mean, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_min.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Min, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateMin(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Min, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/influxdb/influx_windowagg_sum.go: -------------------------------------------------------------------------------- 1 | package influxdb 2 | 3 | import ( 4 | "time" 5 | 6 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 7 | ) 8 | 9 | func NewInfluxQLWindowAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 10 | return NewInfluxWindowAggregateQuery(Sum, InfluxQL, dbConfig, queriesFullRange, queryInterval, scaleVar) 11 | } 12 | 13 | func NewFluxWindowAggregateSum(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 14 | return NewInfluxWindowAggregateQuery(Sum, Flux, dbConfig, queriesFullRange, queryInterval, scaleVar) 15 | } 16 | -------------------------------------------------------------------------------- /bulk_query_gen/iot.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | // Devops describes a devops query generator. 4 | type Iot interface { 5 | AverageTemperatureDayByHourOneHome(Query) 6 | 7 | Dispatch(int) Query 8 | } 9 | 10 | // devopsDispatchAll round-robins through the different devops queries. 11 | func IotDispatchAll(d Iot, iteration int, q Query, scaleVar int) { 12 | if scaleVar <= 0 { 13 | panic("logic error: bad scalevar") 14 | } 15 | mod := 1 16 | if scaleVar >= 2 { 17 | mod++ 18 | } 19 | if scaleVar >= 4 { 20 | mod++ 21 | } 22 | if scaleVar >= 8 { 23 | mod++ 24 | } 25 | if scaleVar >= 16 { 26 | mod++ 27 | } 28 | if scaleVar >= 32 { 29 | mod++ 30 | } 31 | 32 | switch iteration % mod { 33 | case 0: 34 | d.AverageTemperatureDayByHourOneHome(q) 35 | //case 1: 36 | // d.MaxCPUUsageHourByMinuteTwoHosts(q, scaleVar) 37 | //case 2: 38 | // d.MaxCPUUsageHourByMinuteFourHosts(q, scaleVar) 39 | //case 3: 40 | // d.MaxCPUUsageHourByMinuteEightHosts(q, scaleVar) 41 | //case 4: 42 | // d.MaxCPUUsageHourByMinuteSixteenHosts(q, scaleVar) 43 | //case 5: 44 | // d.MaxCPUUsageHourByMinuteThirtyTwoHosts(q, scaleVar) 45 | default: 46 | panic("logic error in switch statement") 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /bulk_query_gen/metaquery.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | type Metaquery interface { 4 | StandardMetaquery(Query) 5 | Dispatch(int) Query 6 | } 7 | 8 | func MetaqueryDispatchAll(d Metaquery, q Query) { 9 | d.StandardMetaquery(q) 10 | } 11 | -------------------------------------------------------------------------------- /bulk_query_gen/mongodb/mongo_devops_8_hosts_1hr.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // MongoDevopsSingleHost produces Mongo-specific queries for the devops single-host case. 7 | type MongoDevops8Hosts1Hr struct { 8 | MongoDevops 9 | } 10 | 11 | func NewMongoDevops8Hosts1Hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewMongoDevops(dbConfig, queriesFullRange, queryInterval, scaleVar).(*MongoDevops) 13 | return &MongoDevops8Hosts1Hr{ 14 | MongoDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *MongoDevops8Hosts1Hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewMongoQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/mongodb/mongo_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // MongoDevopsSingleHost produces Mongo-specific queries for the devops single-host case. 7 | type MongoDevopsSingleHost struct { 8 | MongoDevops 9 | } 10 | 11 | func NewMongoDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewMongoDevops(dbConfig, queriesFullRange, queryInterval, scaleVar).(*MongoDevops) 13 | return &MongoDevopsSingleHost{ 14 | MongoDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *MongoDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewMongoQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/mongodb/mongo_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // MongoDevopsSingleHost produces Mongo-specific queries for the devops single-host case. 7 | type MongoDevopsSingleHost12hr struct { 8 | MongoDevops 9 | } 10 | 11 | func NewMongoDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewMongoDevops(dbConfig, queriesFullRange, queryInterval, scaleVar).(*MongoDevops) 13 | return &MongoDevopsSingleHost12hr{ 14 | MongoDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *MongoDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewMongoQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/mongodb/mongo_iot_singlehost.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // MongoIotSingleHost produces Mongo-specific queries for the devops single-host case. 7 | type MongoIotSingleHost struct { 8 | MongoIot 9 | } 10 | 11 | func NewMongoIotSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewMongoIot(dbConfig, queriesFullRange, queryInterval, scaleVar).(*MongoIot) 13 | return &MongoIotSingleHost{ 14 | MongoIot: *underlying, 15 | } 16 | } 17 | 18 | func (d *MongoIotSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewMongoQuery() // from pool 20 | d.AverageTemperatureDayByHourOneHome(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/mongodb/options.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import ( 4 | "log" 5 | "strings" 6 | ) 7 | 8 | const ( 9 | FlatFormat = "flat" 10 | KeyPairFormat = "key-pair" 11 | TimeseriesFormat = "timeseries" 12 | ) 13 | 14 | var DocumentFormat = FlatFormat 15 | var UseTimeseries = false 16 | var UseSingleCollection = false 17 | 18 | func ParseOptions(documentFormat string, oneCollection bool) { 19 | switch documentFormat { 20 | case FlatFormat, KeyPairFormat, TimeseriesFormat: 21 | DocumentFormat = documentFormat 22 | default: 23 | log.Fatalf("unsupported document format: '%s'", documentFormat) 24 | } 25 | UseTimeseries = strings.Contains(documentFormat, TimeseriesFormat) 26 | if UseTimeseries { 27 | log.Print("Using MongoDB 5+ time series collection") 28 | DocumentFormat = FlatFormat 29 | } 30 | log.Printf("Using %s point serialization", DocumentFormat) 31 | UseSingleCollection = oneCollection 32 | if UseSingleCollection { 33 | log.Println("Using single collection for all measurements") 34 | } else { 35 | log.Println("Using collections per measurement type") 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /bulk_query_gen/opentsdb/opentsdb_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package opentsdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // OpenTSDBDevops8Hosts produces OpenTSDB-specific queries for the devops groupby case. 7 | type OpenTSDBDevops8Hosts struct { 8 | OpenTSDBDevops 9 | } 10 | 11 | func NewOpenTSDBDevops8Hosts(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newOpenTSDBDevopsCommon(queriesFullRange, queryInterval, scaleVar).(*OpenTSDBDevops) 13 | return &OpenTSDBDevops8Hosts{ 14 | OpenTSDBDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *OpenTSDBDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/opentsdb/opentsdb_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package opentsdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // OpenTSDBDevopsSingleHost produces OpenTSDB-specific queries for the devops single-host case. 7 | type OpenTSDBDevopsSingleHost struct { 8 | OpenTSDBDevops 9 | } 10 | 11 | func NewOpenTSDBDevopsSingleHost(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newOpenTSDBDevopsCommon(queriesFullRange, queryInterval, scaleVar).(*OpenTSDBDevops) 13 | return &OpenTSDBDevopsSingleHost{ 14 | OpenTSDBDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *OpenTSDBDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/opentsdb/opentsdb_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package opentsdb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // OpenTSDBDevopsSingleHost12hr produces OpenTSDB-specific queries for the devops single-host case over a 12hr period. 7 | type OpenTSDBDevopsSingleHost12hr struct { 8 | OpenTSDBDevops 9 | } 10 | 11 | func NewOpenTSDBDevopsSingleHost12hr(_ bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newOpenTSDBDevopsCommon(queriesFullRange, queryInterval, scaleVar).(*OpenTSDBDevops) 13 | return &OpenTSDBDevopsSingleHost12hr{ 14 | OpenTSDBDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *OpenTSDBDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/query.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | const DefaultQueryInterval = time.Hour 10 | 11 | type Query interface { 12 | Release() 13 | HumanLabelName() []byte 14 | HumanDescriptionName() []byte 15 | fmt.Stringer 16 | } 17 | 18 | var HTTPQueryPool sync.Pool = sync.Pool{ 19 | New: func() interface{} { 20 | return &HTTPQuery{ 21 | HumanLabel: []byte{}, 22 | HumanDescription: []byte{}, 23 | Method: []byte{}, 24 | Path: []byte{}, 25 | Body: []byte{}, 26 | Language: "", 27 | StartTimestamp: 0, 28 | EndTimestamp: 0, 29 | } 30 | }, 31 | } 32 | 33 | // HTTPQuery encodes an HTTP request. This will typically by serialized for use 34 | // by the query_benchmarker program. 35 | type HTTPQuery struct { 36 | HumanLabel []byte 37 | HumanDescription []byte 38 | Method []byte 39 | Path []byte 40 | Body []byte 41 | Language string 42 | StartTimestamp int64 43 | EndTimestamp int64 44 | } 45 | 46 | func NewHTTPQuery() *HTTPQuery { 47 | return HTTPQueryPool.Get().(*HTTPQuery) 48 | } 49 | 50 | // String produces a debug-ready description of a Query. 51 | func (q *HTTPQuery) String() string { 52 | return fmt.Sprintf("HumanLabel: \"%s\", HumanDescription: \"%s\", Method: \"%s\", Path: \"%s\", Body: \"%s\"", q.HumanLabel, q.HumanDescription, q.Method, q.Path, q.Body) 53 | } 54 | 55 | func (q *HTTPQuery) HumanLabelName() []byte { 56 | return q.HumanLabel 57 | } 58 | func (q *HTTPQuery) HumanDescriptionName() []byte { 59 | return q.HumanDescription 60 | } 61 | 62 | func (q *HTTPQuery) Release() { 63 | q.HumanLabel = q.HumanLabel[:0] 64 | q.HumanDescription = q.HumanDescription[:0] 65 | q.Method = q.Method[:0] 66 | q.Path = q.Path[:0] 67 | q.Body = q.Body[:0] 68 | q.Language = "" 69 | q.StartTimestamp = 0 70 | q.EndTimestamp = 0 71 | 72 | HTTPQueryPool.Put(q) 73 | } 74 | -------------------------------------------------------------------------------- /bulk_query_gen/query_generator.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | import "time" 4 | 5 | // QueryGenerator describes a generator of queries, typically according to a 6 | // use case. 7 | type QueryGenerator interface { 8 | Dispatch(int) Query 9 | } 10 | 11 | type QueryGeneratorMaker func(dbConfig DatabaseConfig, queriesFullRange TimeInterval, queryInterval time.Duration, scaleVar int) QueryGenerator 12 | -------------------------------------------------------------------------------- /bulk_query_gen/splunk/splunk_common.go: -------------------------------------------------------------------------------- 1 | package splunk 2 | 3 | import ( 4 | "fmt" 5 | bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 6 | "net/url" 7 | "time" 8 | ) 9 | 10 | type SplunkCommon struct { 11 | bulkQuerygen.CommonParams 12 | } 13 | 14 | func newSplunkCommon(interval bulkQuerygen.TimeInterval, scaleVar int) *SplunkCommon { 15 | return &SplunkCommon{ 16 | CommonParams: *bulkQuerygen.NewCommonParams(interval, scaleVar), 17 | } 18 | } 19 | 20 | func (d *SplunkCommon) getHttpQuery(humanLabel, from, until, query string, q *bulkQuerygen.HTTPQuery) { 21 | q.HumanLabel = []byte(humanLabel) 22 | q.HumanDescription = []byte(fmt.Sprintf("%s: %s - %s", humanLabel, from, until)) 23 | 24 | getValues := url.Values{} 25 | getValues.Set("search", query) 26 | q.Method = []byte("GET") 27 | q.Path = []byte(fmt.Sprintf("/services/search/jobs/export?%s&output_mode=json", getValues.Encode())) 28 | q.Body = nil 29 | } 30 | 31 | // TODO copy&pasted from Graphite - what good is this for??? 32 | func getTimestamp(t time.Time) string { 33 | return fmt.Sprintf("%02d:%02d_%04d%02d%02d", t.Hour(), t.Minute(), t.Year(), t.Month(), t.Day()) 34 | } 35 | -------------------------------------------------------------------------------- /bulk_query_gen/splunk/splunk_devops_8_hosts.go: -------------------------------------------------------------------------------- 1 | package splunk 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // SplunkDevops8Hosts produces Influx-specific queries for the devops groupby case. 7 | type SplunkDevops8Hosts struct { 8 | SplunkDevops 9 | } 10 | 11 | func NewSplunkDevops8Hosts(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newSplunkDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*SplunkDevops) 13 | return &SplunkDevops8Hosts{ 14 | SplunkDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *SplunkDevops8Hosts) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/splunk/splunk_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package splunk 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // SplunkDevopsGroupby produces Influx-specific queries for the devops groupby case. 7 | type SplunkDevopsGroupby struct { 8 | SplunkDevops 9 | } 10 | 11 | func NewSplunkDevopsGroupBy(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newSplunkDevopsCommon(dbConfig, interval, duration, scaleVar).(*SplunkDevops) 13 | return &SplunkDevopsGroupby{ 14 | SplunkDevops: *underlying, 15 | } 16 | 17 | } 18 | 19 | func (d *SplunkDevopsGroupby) Dispatch(i int) bulkQuerygen.Query { 20 | q := bulkQuerygen.NewHTTPQuery() // from pool 21 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 22 | return q 23 | } 24 | -------------------------------------------------------------------------------- /bulk_query_gen/splunk/splunk_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package splunk 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // SplunkDevopsSingleHost produces Influx-specific queries for the devops single-host case. 7 | type SplunkDevopsSingleHost struct { 8 | SplunkDevops 9 | } 10 | 11 | func NewSplunkDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newSplunkDevopsCommon(dbConfig, interval, duration, scaleVar).(*SplunkDevops) 13 | return &SplunkDevopsSingleHost{ 14 | SplunkDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *SplunkDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/splunk/splunk_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package splunk 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // SplunkDevopsSingleHost12hr produces Influx-specific queries for the devops single-host case over a 12hr period. 7 | type SplunkDevopsSingleHost12hr struct { 8 | SplunkDevops 9 | } 10 | 11 | func NewSplunkDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newSplunkDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*SplunkDevops) 13 | return &SplunkDevopsSingleHost12hr{ 14 | SplunkDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *SplunkDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := bulkQuerygen.NewHTTPQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/time_interval.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | // TimeInterval represents an interval of time. 9 | type TimeInterval struct { 10 | Start, End time.Time 11 | } 12 | 13 | // NewTimeInterval constructs a TimeInterval. 14 | func NewTimeInterval(start, end time.Time) TimeInterval { 15 | return TimeInterval{ 16 | Start: start, 17 | End: end, 18 | } 19 | } 20 | 21 | // Duration converts a TimeInterval to a time.Duration. 22 | func (ti *TimeInterval) Duration() time.Duration { 23 | return ti.End.Sub(ti.Start) 24 | } 25 | 26 | // RandWindow creates a TimeInterval of duration `window` at a uniformly-random 27 | // start time within this time interval. 28 | func (ti *TimeInterval) RandWindow(window time.Duration) TimeInterval { 29 | lower := ti.Start.UnixNano() 30 | upper := ti.End.Add(-window).UnixNano() 31 | 32 | if upper <= lower { 33 | panic("logic error: bad time bounds") 34 | } 35 | 36 | start := lower + rand.Int63n(upper-lower) 37 | end := start + window.Nanoseconds() 38 | 39 | x := NewTimeInterval(time.Unix(0, start), time.Unix(0, end)) 40 | if x.Duration() != window { 41 | panic("logic error: generated interval does not equal window") 42 | } 43 | 44 | return x 45 | } 46 | 47 | // StartString formats the start of the time interval. 48 | func (ti *TimeInterval) StartString() string { 49 | return ti.Start.UTC().Format(time.RFC3339) 50 | } 51 | 52 | // EndString formats the end of the time interval. 53 | func (ti *TimeInterval) EndString() string { 54 | return ti.End.UTC().Format(time.RFC3339) 55 | } 56 | 57 | // StartUnixNano returns the start time as nanoseconds. 58 | func (ti *TimeInterval) StartUnixNano() int64 { 59 | return ti.Start.UTC().UnixNano() 60 | } 61 | 62 | // EndUnixNano returns the end time as nanoseconds. 63 | func (ti *TimeInterval) EndUnixNano() int64 { 64 | return ti.End.UTC().UnixNano() 65 | } 66 | -------------------------------------------------------------------------------- /bulk_query_gen/time_window.go: -------------------------------------------------------------------------------- 1 | package bulk_query_gen 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | var ( 8 | TimeWindowShift time.Duration 9 | QueryIntervalType string 10 | ) 11 | 12 | type TimeWindow struct { 13 | Start time.Time 14 | Duration time.Duration 15 | } 16 | 17 | // Inspired by TimeInterval.RandWindow 18 | func (tw *TimeWindow) SlidingWindow(AllInterval *TimeInterval) TimeInterval { 19 | start := tw.Start.UnixNano() 20 | end := tw.Start.Add(tw.Duration).UnixNano() 21 | 22 | if TimeWindowShift > 0 { // shift by user-specified amount 23 | tw.Start = tw.Start.Add(TimeWindowShift) 24 | } else { // shift by query duration (default) 25 | tw.Start = tw.Start.Add(tw.Duration) 26 | } 27 | 28 | if tw.Start.UnixNano() >= AllInterval.End.UnixNano() { 29 | tw.Start = AllInterval.Start 30 | } 31 | 32 | x := NewTimeInterval(time.Unix(0, start), time.Unix(0, end)) 33 | 34 | return x 35 | } -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/query.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | var SQLQueryPool sync.Pool = sync.Pool{ 9 | New: func() interface{} { 10 | return &SQLQuery{ 11 | HumanLabel: []byte{}, 12 | HumanDescription: []byte{}, 13 | QuerySQL: []byte{}, 14 | } 15 | }, 16 | } 17 | 18 | // SQLQuery encodes an full constructed SQL query. This will typically by serialized for use 19 | // by the query_benchmarker program. 20 | type SQLQuery struct { 21 | HumanLabel []byte 22 | HumanDescription []byte 23 | QuerySQL []byte 24 | } 25 | 26 | func NewSQLQuery() *SQLQuery { 27 | return SQLQueryPool.Get().(*SQLQuery) 28 | } 29 | 30 | // String produces a debug-ready description of a Query. 31 | func (q *SQLQuery) String() string { 32 | return fmt.Sprintf("HumanLabel: \"%s\", HumanDescription: \"%s\", Query: \"%s\"", q.HumanLabel, q.HumanDescription, q.QuerySQL) 33 | } 34 | 35 | func (q *SQLQuery) HumanLabelName() []byte { 36 | return q.HumanLabel 37 | } 38 | func (q *SQLQuery) HumanDescriptionName() []byte { 39 | return q.HumanDescription 40 | } 41 | 42 | func (q *SQLQuery) Release() { 43 | q.HumanLabel = q.HumanLabel[:0] 44 | q.HumanDescription = q.HumanDescription[:0] 45 | q.QuerySQL = q.QuerySQL[:0] 46 | 47 | SQLQueryPool.Put(q) 48 | } 49 | -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/timescale_devops_8_hosts_1hr.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // TimescaleDevops8Hosts1Hr produces Timescale-specific queries for the devops single-host case. 7 | type TimescaleDevops8Hosts1Hr struct { 8 | TimescaleDevops 9 | } 10 | 11 | func NewTimescaleDevops8Hosts1Hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newTimescaleDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*TimescaleDevops) 13 | return &TimescaleDevops8Hosts1Hr{ 14 | TimescaleDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *TimescaleDevops8Hosts1Hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewSQLQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteEightHosts(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/timescale_devops_groupby.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // TimescaleDevopsGroupby produces Timescale-specific queries for the devops groupby case. 7 | type TimescaleDevopsGroupby struct { 8 | TimescaleDevops 9 | } 10 | 11 | func NewTimescaleDevopsGroupby(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newTimescaleDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*TimescaleDevops) 13 | return &TimescaleDevopsGroupby{ 14 | TimescaleDevops: *underlying, 15 | } 16 | 17 | } 18 | 19 | func (d *TimescaleDevopsGroupby) Dispatch(i int) bulkQuerygen.Query { 20 | q := NewSQLQuery() // from pool 21 | d.MeanCPUUsageDayByHourAllHostsGroupbyHost(q) 22 | return q 23 | } 24 | -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/timescale_devops_singlehost.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // TimescaleDevopsSingleHost produces Timescale-specific queries for the devops single-host case. 7 | type TimescaleDevopsSingleHost struct { 8 | TimescaleDevops 9 | } 10 | 11 | func NewTimescaleDevopsSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newTimescaleDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*TimescaleDevops) 13 | return &TimescaleDevopsSingleHost{ 14 | TimescaleDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *TimescaleDevopsSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewSQLQuery() // from pool 20 | d.MaxCPUUsageHourByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/timescale_devops_singlehost_12hr.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // TimescaleDevopsSingleHost12hr produces Timescale-specific queries for the devops single-host case. 7 | type TimescaleDevopsSingleHost12hr struct { 8 | TimescaleDevops 9 | } 10 | 11 | func NewTimescaleDevopsSingleHost12hr(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := newTimescaleDevopsCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*TimescaleDevops) 13 | return &TimescaleDevopsSingleHost12hr{ 14 | TimescaleDevops: *underlying, 15 | } 16 | } 17 | 18 | func (d *TimescaleDevopsSingleHost12hr) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewSQLQuery() // from pool 20 | d.MaxCPUUsage12HoursByMinuteOneHost(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /bulk_query_gen/timescaledb/timescale_iot_singlehost.go: -------------------------------------------------------------------------------- 1 | package timescaledb 2 | 3 | import "time" 4 | import bulkQuerygen "github.com/influxdata/influxdb-comparisons/bulk_query_gen" 5 | 6 | // TimescaleIotSingleHost produces Timescale-specific queries for the devops single-host case. 7 | type TimescaleIotSingleHost struct { 8 | TimescaleIot 9 | } 10 | 11 | func NewTimescaleIotSingleHost(dbConfig bulkQuerygen.DatabaseConfig, queriesFullRange bulkQuerygen.TimeInterval, queryInterval time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { 12 | underlying := NewTimescaleIotCommon(dbConfig, queriesFullRange, queryInterval, scaleVar).(*TimescaleIot) 13 | return &TimescaleIotSingleHost{ 14 | TimescaleIot: *underlying, 15 | } 16 | } 17 | 18 | func (d *TimescaleIotSingleHost) Dispatch(i int) bulkQuerygen.Query { 19 | q := NewSQLQuery() // from pool 20 | d.AverageTemperatureDayByHourOneHome(q) 21 | return q 22 | } 23 | -------------------------------------------------------------------------------- /cmd/bulk_load_mongo/unsafe.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | // unsafeBytesToString converts a []byte to a string without a heap allocation. 9 | // 10 | // It is unsafe, and is intended to prepare input to short-lived functions 11 | // that require strings. 12 | func unsafeBytesToString(in []byte) string { 13 | src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) 14 | dst := reflect.StringHeader{ 15 | Data: src.Data, 16 | Len: src.Len, 17 | } 18 | s := *(*string)(unsafe.Pointer(&dst)) 19 | return s 20 | } 21 | 22 | // unsafeStringToBytes converts a string to a []byte without a heap allocation. 23 | // 24 | // It is unsafe, and is intended to prepare input to short-lived functions 25 | // that require byte slices. 26 | func unsafeStringToBytes(in string) []byte { 27 | src := *(*reflect.StringHeader)(unsafe.Pointer(&in)) 28 | dst := reflect.SliceHeader{ 29 | Data: src.Data, 30 | Len: src.Len, 31 | Cap: src.Len, 32 | } 33 | s := *(*[]byte)(unsafe.Pointer(&dst)) 34 | return s 35 | } 36 | -------------------------------------------------------------------------------- /cmd/bulk_load_splunk/http_writer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/pkg/errors" 6 | "time" 7 | 8 | "github.com/valyala/fasthttp" 9 | ) 10 | 11 | const DefaultIdleConnectionTimeout = 90 * time.Second 12 | 13 | // HTTPWriterConfig is the configuration used to create an HTTPWriter. 14 | type HTTPWriterConfig struct { 15 | // URL of the host, in form "http://example.com:8086" 16 | Host string 17 | 18 | // Authorization token 19 | Token string 20 | 21 | // Debug label for more informative errors. 22 | DebugInfo string 23 | } 24 | 25 | // HTTPWriter is a Writer that writes to an InfluxDB HTTP server. 26 | type HTTPWriter struct { 27 | client fasthttp.Client 28 | 29 | c HTTPWriterConfig 30 | url []byte 31 | auth string 32 | } 33 | 34 | // NewHTTPWriter returns a new HTTPWriter from the supplied HTTPWriterConfig. 35 | func NewHTTPWriter(c HTTPWriterConfig) *HTTPWriter { 36 | return &HTTPWriter{ 37 | client: fasthttp.Client{ 38 | Name: "bulk_load_splunk", 39 | MaxIdleConnDuration: DefaultIdleConnectionTimeout, 40 | }, 41 | 42 | c: c, 43 | url: []byte(c.Host + "/services/collector"), 44 | auth: fmt.Sprintf("Splunk %s", c.Token), 45 | } 46 | } 47 | 48 | var ( 49 | post = []byte("POST") 50 | applicationJson = []byte("application/json") 51 | ) 52 | 53 | // WriteJsonProtocol writes the given byte slice to the HTTP server described in the Writer's HTTPWriterConfig. 54 | // It returns the latency in nanoseconds and any error received while sending the data over HTTP, 55 | // or it returns a new error if the HTTP response isn't as expected. 56 | func (w *HTTPWriter) WriteJsonProtocol(body []byte, isGzip bool) (int64, error) { 57 | req := fasthttp.AcquireRequest() 58 | req.Header.SetContentTypeBytes(applicationJson) 59 | req.Header.SetMethodBytes(post) 60 | req.Header.SetRequestURIBytes(w.url) 61 | req.Header.Add("Authorization", w.auth) 62 | if isGzip { 63 | req.Header.Add("Content-Encoding", "gzip") 64 | } 65 | req.SetBody(body) 66 | 67 | resp := fasthttp.AcquireResponse() 68 | start := time.Now() 69 | err := w.client.Do(req, resp) 70 | lat := time.Since(start).Nanoseconds() 71 | if err == nil { 72 | sc := resp.StatusCode() 73 | if sc != fasthttp.StatusOK { 74 | err = fmt.Errorf("%s - unexpected POST response (status %d): %s", w.c.DebugInfo, sc, resp.Body()) 75 | } 76 | } else { 77 | err = errors.Wrap(err, "POST failed") 78 | } 79 | 80 | fasthttp.ReleaseResponse(resp) 81 | fasthttp.ReleaseRequest(req) 82 | 83 | return lat, err 84 | } 85 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_cassandra/conn.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/gocql/gocql" 8 | ) 9 | 10 | // NewCassandraSession creates a new Cassandra session. It is goroutine-safe 11 | // by default, and uses a connection pool. 12 | func NewCassandraSession(daemonUrl string, timeout time.Duration) *gocql.Session { 13 | cluster := gocql.NewCluster(daemonUrl) 14 | cluster.Keyspace = BlessedKeyspace 15 | cluster.Consistency = gocql.One 16 | cluster.ProtoVersion = 4 17 | cluster.Timeout = timeout 18 | session, err := cluster.CreateSession() 19 | if err != nil { 20 | log.Fatal(err) 21 | } 22 | return session 23 | } 24 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_cassandra/query_plan_aggregators.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // Type Aggregator merges QueryPlan results on the client in constant time. 6 | // This is intended to match the aggregation that a CQLQuery performs on a 7 | // Cassandra server. 8 | // 9 | // Note that the underlying functions should be commutative and associative. 10 | type Aggregator interface { 11 | Put(float64) 12 | Get() float64 13 | } 14 | 15 | // AggregatorMax aggregates the maximum of a stream of values. 16 | type AggregatorMax struct { 17 | value float64 18 | count int64 19 | } 20 | 21 | // Put puts a value for finding the maximum. 22 | func (a *AggregatorMax) Put(n float64) { 23 | if n > a.value || a.count == 0 { 24 | a.value = n 25 | } 26 | a.count++ 27 | } 28 | 29 | // Get computes the aggregated maximum. 30 | func (a *AggregatorMax) Get() float64 { 31 | if a.count == 0 { 32 | return 0 33 | } 34 | return a.value 35 | } 36 | 37 | // AggregatorMax aggregates the minimum of a stream of values. 38 | type AggregatorMin struct { 39 | value float64 40 | count int64 41 | } 42 | 43 | // Put puts a value for finding the minimum. 44 | func (a *AggregatorMin) Put(n float64) { 45 | if n < a.value || a.count == 0 { 46 | a.value = n 47 | } 48 | a.count++ 49 | } 50 | 51 | // Get computes the aggregated minimum. 52 | func (a *AggregatorMin) Get() float64 { 53 | return a.value 54 | } 55 | 56 | // AggregatorMax aggregates the average of a stream of values. 57 | type AggregatorAvg struct { 58 | value float64 59 | count int64 60 | } 61 | 62 | // Put puts a value for averaging. 63 | func (a *AggregatorAvg) Put(n float64) { 64 | a.value += n 65 | a.count++ 66 | } 67 | 68 | // Get computes the aggregated average. 69 | func (a *AggregatorAvg) Get() float64 { 70 | if a.count == 0 { 71 | return 0 72 | } 73 | return a.value / float64(a.count) 74 | } 75 | 76 | // GetConstantSpaceAggr translates a label into a new ConstantSpaceAggr. 77 | func GetAggregator(label string) (Aggregator, error) { 78 | // TODO(rw): fewer heap allocations here. 79 | switch label { 80 | case "min": 81 | return &AggregatorMin{}, nil 82 | case "max": 83 | return &AggregatorMax{}, nil 84 | case "avg": 85 | return &AggregatorAvg{}, nil 86 | default: 87 | return nil, fmt.Errorf("invalid aggregation specifier") 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_es/query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // Query holds HTTP request data, typically decoded from the program's input. 6 | type Query struct { 7 | HumanLabel []byte 8 | HumanDescription []byte 9 | Method []byte 10 | Path []byte 11 | Body []byte 12 | ID int64 13 | } 14 | 15 | // String produces a debug-ready description of a Query. 16 | func (q *Query) String() string { 17 | return fmt.Sprintf("ID: %d, HumanLabel: %s, HumanDescription: %s, Method: %s, Path: %s, Body:%s", q.ID, q.HumanLabel, q.HumanDescription, q.Method, q.Path, q.Body) 18 | } 19 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_mongo/query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/influxdata/influxdb-comparisons/bulk_query_gen/mongodb" 6 | ) 7 | 8 | // Query holds Mongo BSON request data, typically decoded from the program's 9 | // input. 10 | type Query struct { 11 | HumanLabel []byte 12 | HumanDescription []byte 13 | DatabaseName []byte 14 | CollectionName []byte 15 | BsonDoc []mongodb.M 16 | ID int64 17 | } 18 | 19 | // String produces a debug-ready description of a Query. 20 | func (q *Query) String() string { 21 | return fmt.Sprintf("ID: %d, HumanLabel: %s, HumanDescription: %s, Database: %s, Collection: %s", q.ID, q.HumanLabel, q.HumanDescription, q.DatabaseName, q.CollectionName) 22 | } 23 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_mongo/unsafe.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | // unsafeBytesToString converts a []byte to a string without a heap allocation. 9 | // 10 | // It is unsafe, and is intended to prepare input to short-lived functions 11 | // that require strings. 12 | func unsafeBytesToString(in []byte) string { 13 | src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) 14 | dst := reflect.StringHeader{ 15 | Data: src.Data, 16 | Len: src.Len, 17 | } 18 | s := *(*string)(unsafe.Pointer(&dst)) 19 | return s 20 | } 21 | 22 | // unsafeStringToBytes converts a string to a []byte without a heap allocation. 23 | // 24 | // It is unsafe, and is intended to prepare input to short-lived functions 25 | // that require byte slices. 26 | func unsafeStringToBytes(in string) []byte { 27 | src := *(*reflect.StringHeader)(unsafe.Pointer(&in)) 28 | dst := reflect.SliceHeader{ 29 | Data: src.Data, 30 | Len: src.Len, 31 | Cap: src.Len, 32 | } 33 | s := *(*[]byte)(unsafe.Pointer(&dst)) 34 | return s 35 | } 36 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_opentsdb/query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // Query holds HTTP request data, typically decoded from the program's input. 6 | type Query struct { 7 | HumanLabel []byte 8 | HumanDescription []byte 9 | Method []byte 10 | Path []byte 11 | Body []byte 12 | ID int64 13 | StartTimestamp int64 14 | EndTimestamp int64 15 | } 16 | 17 | // String produces a debug-ready description of a Query. 18 | func (q *Query) String() string { 19 | return fmt.Sprintf("ID: %d, HumanLabel: %s, HumanDescription: %s, Method: %s, Path: %s, Body:%s", q.ID, q.HumanLabel, q.HumanDescription, q.Method, q.Path, q.Body) 20 | } 21 | -------------------------------------------------------------------------------- /cmd/query_benchmarker_timescale/query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // Query holds Timescale SQL query, typically decoded from the program's 6 | // input. 7 | type Query struct { 8 | HumanLabel []byte 9 | HumanDescription []byte 10 | QuerySQL []byte 11 | ID int64 12 | } 13 | 14 | // String produces a debug-ready description of a Query. 15 | func (q *Query) String() string { 16 | return fmt.Sprintf("ID: %d, HumanLabel: %s, HumanDescription: %s, Query: %s", q.ID, q.HumanLabel, q.HumanDescription, q.QuerySQL) 17 | } 18 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxdb-comparisons 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/gocql/gocql v1.0.0 7 | github.com/gofrs/uuid v4.2.0+incompatible // indirect 8 | github.com/golang/protobuf v1.5.2 9 | github.com/google/flatbuffers v1.12.0 10 | github.com/jackc/pgx/v4 v4.18.2 11 | github.com/kisielk/og-rek v1.2.0 12 | github.com/klauspost/compress v1.15.0 13 | github.com/lib/pq v1.10.4 // indirect 14 | github.com/pelletier/go-toml v1.9.0 15 | github.com/pkg/errors v0.9.1 16 | github.com/pkg/profile v1.6.0 17 | github.com/shopspring/decimal v1.3.1 // indirect 18 | github.com/stretchr/testify v1.8.1 19 | github.com/valyala/fasthttp v1.34.0 20 | go.mongodb.org/mongo-driver v1.11.0 21 | ) 22 | -------------------------------------------------------------------------------- /mongo.flatbuffers.fbs: -------------------------------------------------------------------------------- 1 | namespace mongo_serialization; 2 | 3 | enum ValueType:byte { Int = 0, Long = 1, Float = 2, Double = 3, String = 4 } 4 | 5 | table Tag { 6 | key:[ubyte]; 7 | val:[ubyte]; 8 | } 9 | 10 | table Field { 11 | key:[ubyte]; 12 | value_type:ValueType; 13 | int_value:int; 14 | long_value:long; 15 | float_value:float; 16 | double_value:double; 17 | string_value:[ubyte]; 18 | } 19 | 20 | table Item { 21 | seriesId:[ubyte]; 22 | measurement_name:[ubyte]; 23 | tags:[Tag]; 24 | fields:[Field]; 25 | timestamp_nanos:long; 26 | } 27 | 28 | root_type Item; 29 | -------------------------------------------------------------------------------- /mongo_serialization/Tag.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package mongo_serialization 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type Tag struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &Tag{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *Tag) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *Tag) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *Tag) Key(j int) byte { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | a := rcv._tab.Vector(o) 33 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 34 | } 35 | return 0 36 | } 37 | 38 | func (rcv *Tag) KeyLength() int { 39 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 40 | if o != 0 { 41 | return rcv._tab.VectorLen(o) 42 | } 43 | return 0 44 | } 45 | 46 | func (rcv *Tag) KeyBytes() []byte { 47 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 48 | if o != 0 { 49 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 50 | } 51 | return nil 52 | } 53 | 54 | func (rcv *Tag) Val(j int) byte { 55 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 56 | if o != 0 { 57 | a := rcv._tab.Vector(o) 58 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 59 | } 60 | return 0 61 | } 62 | 63 | func (rcv *Tag) ValLength() int { 64 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 65 | if o != 0 { 66 | return rcv._tab.VectorLen(o) 67 | } 68 | return 0 69 | } 70 | 71 | func (rcv *Tag) ValBytes() []byte { 72 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 73 | if o != 0 { 74 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 75 | } 76 | return nil 77 | } 78 | 79 | func TagStart(builder *flatbuffers.Builder) { 80 | builder.StartObject(2) 81 | } 82 | func TagAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { 83 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) 84 | } 85 | func TagStartKeyVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 86 | return builder.StartVector(1, numElems, 1) 87 | } 88 | func TagAddVal(builder *flatbuffers.Builder, val flatbuffers.UOffsetT) { 89 | builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(val), 0) 90 | } 91 | func TagStartValVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 92 | return builder.StartVector(1, numElems, 1) 93 | } 94 | func TagEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 95 | return builder.EndObject() 96 | } 97 | -------------------------------------------------------------------------------- /mongo_serialization/ValueType.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package mongo_serialization 4 | 5 | type ValueType = int8 6 | const ( 7 | ValueTypeInt ValueType = 0 8 | ValueTypeLong ValueType = 1 9 | ValueTypeFloat ValueType = 2 10 | ValueTypeDouble ValueType = 3 11 | ValueTypeString ValueType = 4 12 | ) 13 | 14 | var EnumNamesValueType = map[ValueType]string{ 15 | ValueTypeInt:"Int", 16 | ValueTypeLong:"Long", 17 | ValueTypeFloat:"Float", 18 | ValueTypeDouble:"Double", 19 | ValueTypeString:"String", 20 | } 21 | 22 | -------------------------------------------------------------------------------- /timescale.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package timescale_serialization; 3 | 4 | message FlatPoint { 5 | string measurementName = 1; 6 | repeated string columns = 2; 7 | 8 | enum ValueType { 9 | INTEGER = 0; 10 | FLOAT = 1; 11 | STRING = 2; 12 | } 13 | 14 | message FlatPointValue { 15 | ValueType type = 1; 16 | int64 intVal = 2; 17 | double doubleVal = 3; 18 | string stringVal = 4; 19 | } 20 | 21 | repeated FlatPointValue values = 3; 22 | } 23 | //protoc --gofast_out=timescale_serializaition timescale.proto -------------------------------------------------------------------------------- /util/report/result_test.go: -------------------------------------------------------------------------------- 1 | package report 2 | 3 | import ( 4 | "github.com/stretchr/testify/require" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestResultsInfluxDbV1(t *testing.T) { 10 | var reportTags [][2]string 11 | reportTags = append(reportTags, [2]string{"hours", "2"}) 12 | reportTags = append(reportTags, [2]string{"hosts", "100"}) 13 | reportParams := &LoadReportParams{ 14 | ReportParams: ReportParams{ 15 | ReportDatabaseName: "test_benchmarks", 16 | ReportHost: "http://localhost:8086", 17 | ReportUser: "", 18 | ReportPassword: "", 19 | Hostname: "mypc", 20 | DBType: "InfluxDB", 21 | DestinationUrl: "http://localhost:8086", 22 | Workers: 10, 23 | ItemLimit: -1, 24 | }, 25 | IsGzip: false, 26 | BatchSize: 5000, 27 | } 28 | err := ReportLoadResult(reportParams, 300, 30001000, 23001000, time.Minute*5) 29 | require.NoError(t, err) 30 | } 31 | 32 | func TestResultsInfluxDbV2(t *testing.T) { 33 | var reportTags [][2]string 34 | reportTags = append(reportTags, [2]string{"hours", "2"}) 35 | reportTags = append(reportTags, [2]string{"hosts", "100"}) 36 | reportParams := &LoadReportParams{ 37 | ReportParams: ReportParams{ 38 | ReportDatabaseName: "0418a0edc9573000", 39 | ReportHost: "http://localhost:9999", 40 | ReportOrgId: "03d32366bb107000", 41 | ReportAuthToken: "2sRnZBpWjDzF009nGnGHSsmbNMvV36F4GXIvEkNPIH1dTgMiw6G_NmCnfn136w3flrqZ34zv52nb1fB4hiJGCA==", 42 | Hostname: "mypc", 43 | DBType: "InfluxDB", 44 | DestinationUrl: "http://localhost:8086", 45 | Workers: 10, 46 | ItemLimit: -1, 47 | }, 48 | IsGzip: false, 49 | BatchSize: 5000, 50 | } 51 | err := ReportLoadResult(reportParams, 300, 30001000, 23001000, time.Minute*5) 52 | require.NoError(t, err) 53 | } 54 | -------------------------------------------------------------------------------- /util/report/telemetry.go: -------------------------------------------------------------------------------- 1 | package report 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | // TelemetryRunAsync runs a collection loop with many defaults already set. It will 9 | // abort the program if an error occurs. Assumes points are owned by the 10 | // GlobalPointPool. 11 | func TelemetryRunAsync(c *Collector, batchSize uint64, writeToStderr bool, skipN uint64) (src chan *Point, done chan struct{}) { 12 | src = make(chan *Point, 100) 13 | done = make(chan struct{}) 14 | 15 | send := func() { 16 | c.PrepBatch() 17 | if writeToStderr { 18 | _, err := os.Stderr.Write(c.buf.Bytes()) 19 | if err != nil { 20 | log.Fatalf("collector error (stderr): %v", err.Error()) 21 | } 22 | } 23 | 24 | err := c.SendBatch() 25 | if err != nil { 26 | log.Fatalf("collector error (http): %v", err.Error()) 27 | } 28 | 29 | for _, p := range c.Points { 30 | PutPointIntoGlobalPool(p) 31 | } 32 | } 33 | 34 | go func() { 35 | var i uint64 36 | for p := range src { 37 | i++ 38 | 39 | if i <= skipN { 40 | continue 41 | } 42 | 43 | c.Put(p) 44 | 45 | if i%batchSize == 0 { 46 | send() 47 | c.Reset() 48 | } 49 | } 50 | if len(c.Points) > 0 { 51 | send() 52 | c.Reset() 53 | } 54 | done <- struct{}{} 55 | }() 56 | 57 | return 58 | } 59 | -------------------------------------------------------------------------------- /void_server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | 7 | "github.com/valyala/fasthttp" 8 | "github.com/valyala/fasthttp/reuseport" 9 | ) 10 | 11 | var ( 12 | addr = flag.String("addr", ":8080", "TCP address to listen to") 13 | ) 14 | 15 | var body = []byte("\n") 16 | 17 | func main() { 18 | flag.Parse() 19 | 20 | ln, err := reuseport.Listen("tcp4", *addr) 21 | if err != nil { 22 | log.Fatal(err) 23 | } 24 | 25 | if err := fasthttp.Serve(ln, defaultRequestHandler); err != nil { 26 | log.Fatalf("Error in ListenAndServe: %s", err) 27 | } 28 | } 29 | 30 | func defaultRequestHandler(ctx *fasthttp.RequestCtx) { 31 | ctx.SetStatusCode(fasthttp.StatusNoContent) 32 | } 33 | --------------------------------------------------------------------------------