├── .codeclimate.yml ├── .coverage_tests.sh ├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── release.md ├── PULL_REQUEST_TEMPLATE.md ├── renovate.json └── workflows │ ├── benchmark.yaml │ ├── docker-compose-tests.yml │ ├── go-e2e.yml │ ├── go-releaser.yml │ ├── go-report-card.yml │ ├── go-scheduled.yml │ ├── go.yml │ ├── gosec.yml │ ├── integration.yml │ └── mixin.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── .mdox.validate.yaml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── EXTENSION_VERSION ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── bors.toml ├── build ├── Dockerfile ├── conf │ └── promscale.conf ├── deepcopy-gen-header.txt ├── env │ ├── deb.env │ └── rpm.env ├── prom-migrator │ └── Dockerfile ├── scripts │ ├── postinstall.sh │ ├── postremove.sh │ └── preinstall.sh └── systemd │ └── promscale.service ├── cmd └── promscale │ └── main.go ├── config.yml ├── deploy └── helm-chart │ └── README.md ├── docker-compose ├── alerts.yml ├── docker-compose.yaml ├── high-availability │ ├── docker-compose.yaml │ ├── prometheus1.yml │ ├── prometheus2.yml │ └── test.sh ├── jaeger-promscale-demo │ ├── README.md │ └── docker-compose.yaml ├── otel-collector-config.yml ├── prometheus.yml ├── promscale-demo │ └── docker-compose.yaml ├── promscale_prometheus.yml ├── rules.yml └── test.sh ├── docs ├── alerting.md ├── assets │ ├── promscale-HA-arch.png │ ├── promscale-arch.png │ └── promscale-logo.png ├── bare-metal-promscale-stack.md ├── binary.md ├── compression-job-hotfix.md ├── configuration.md ├── configuring_prometheus.md ├── data-model.md ├── dataset.md ├── docker.md ├── downsampling.md ├── gsoc │ └── ideas_2021.md ├── high-availability │ ├── new_ha_system.png │ └── prometheus-HA.md ├── metric_deletion_and_retention.md ├── metrics.md ├── mixin │ ├── .gitignore │ ├── .lint │ ├── Makefile │ ├── README.md │ ├── alerts │ │ ├── alerts.libsonnet │ │ └── alerts.yaml │ ├── config.libsonnet │ ├── dashboards │ │ ├── apm-dependencies.json │ │ ├── apm-home.json │ │ ├── apm-service-dependencies-downstream.json │ │ ├── apm-service-dependencies-upstream.json │ │ ├── apm-service-overview.json │ │ ├── dashboards.libsonnet │ │ └── promscale.json │ └── mixin.libsonnet ├── multi_tenancy.md ├── multinode.md ├── prometheus_api.md ├── runbooks │ ├── PromscaleCacheHighNumberOfEvictions.md │ ├── PromscaleCompressionLow.md │ ├── PromscaleDBHighErrorRate.md │ ├── PromscaleDown.md │ ├── PromscaleIngestHighDataDuplication.md │ ├── PromscaleIngestHighErrorRate.md │ ├── PromscaleIngestHighLatency.md │ ├── PromscaleMaintenanceJobFailures.md │ ├── PromscaleMaintenanceJobRunningTooLong.md │ ├── PromscalePostgreSQLSharedBuffersLow.md │ ├── PromscaleQueryHighErrorRate.md │ ├── PromscaleQueryHighLatency.md │ ├── PromscaleStorageHighLatency.md │ ├── PromscaleStorageUnhealthy.md │ └── troubleshooting-guide.md ├── scripts │ ├── hotfix-compression-job-performance.sql │ ├── install-crontab.sh │ ├── prom-execute-maintenance.sh │ └── rollback-hotfix-compression-job-performance.sql ├── sql_api.md ├── sql_permissions.md ├── sql_schema.md ├── telemetry.md ├── tracing.md ├── vacuum.md └── writing_to_promscale.md ├── go.mod ├── go.sum ├── migration-tool ├── cmd │ └── prom-migrator │ │ ├── README.md │ │ ├── main.go │ │ └── main_test.go ├── go.mod ├── go.sum └── pkg │ ├── integration_tests │ ├── datasets_test.go │ ├── integration_test.go │ ├── remote_reader_test.go │ └── remote_writer_test.go │ ├── log │ └── log.go │ ├── planner │ ├── planner.go │ ├── planner_test.go │ ├── slab.go │ └── slab_test.go │ ├── reader │ └── reader.go │ ├── utils │ ├── auth.go │ ├── client.go │ ├── flags.go │ └── utils.go │ └── writer │ ├── shards.go │ └── writer.go ├── pkg ├── api │ ├── alerts.go │ ├── common.go │ ├── common_test.go │ ├── delete.go │ ├── delete_test.go │ ├── health.go │ ├── health_test.go │ ├── label_values.go │ ├── labels.go │ ├── labels_test.go │ ├── marshal.go │ ├── marshal_test.go │ ├── metadata.go │ ├── metrics.go │ ├── otlp.go │ ├── parser │ │ ├── json │ │ │ ├── json.go │ │ │ └── json_test.go │ │ ├── parser.go │ │ ├── parser_test.go │ │ ├── protobuf │ │ │ └── protobuf.go │ │ └── text │ │ │ ├── text.go │ │ │ └── text_test.go │ ├── query.go │ ├── query_exemplar.go │ ├── query_exemplar_test.go │ ├── query_range.go │ ├── query_range_test.go │ ├── query_test.go │ ├── read.go │ ├── read_test.go │ ├── reload.go │ ├── router.go │ ├── router_test.go │ ├── rules.go │ ├── series.go │ ├── series_test.go │ ├── write.go │ └── write_test.go ├── auth │ ├── auth.go │ └── auth_test.go ├── clockcache │ ├── Readme.md │ ├── cache.go │ ├── cache_bench_test.go │ ├── cache_test.go │ └── metrics.go ├── dataset │ ├── config.go │ ├── config_test.go │ ├── deepcopy_generated.go │ ├── doc.go │ └── duration.go ├── ewma │ └── ewma.go ├── ha │ ├── client │ │ └── client.go │ ├── filter.go │ ├── filter_test.go │ ├── mock_ha_lock_client.go │ ├── mock_ha_service.go │ ├── service.go │ └── state │ │ └── lease.go ├── internal │ └── testhelpers │ │ ├── containers.go │ │ ├── containers_test.go │ │ ├── jaeger_container.go │ │ ├── postgres_container.go │ │ └── prometheus_container.go ├── jaeger │ ├── api.go │ ├── proxy │ │ └── proxy.go │ └── store │ │ ├── binary_tags.go │ │ ├── binary_tags_test.go │ │ ├── config.go │ │ ├── find_trace_ids.go │ │ ├── find_traces.go │ │ ├── get_dependencies.go │ │ ├── get_operations.go │ │ ├── get_services.go │ │ ├── get_trace.go │ │ ├── metrics.go │ │ ├── store.go │ │ ├── tag_info.go │ │ ├── trace_query.go │ │ ├── trace_scan.go │ │ ├── translation.go │ │ └── translation_test.go ├── limits │ ├── flags.go │ ├── flags_test.go │ └── mem │ │ ├── mem.go │ │ └── mem_linux.go ├── log │ └── log.go ├── migrations │ ├── Readme.md │ ├── generate.go │ ├── migration_files_generated.go │ ├── migrations.go │ ├── mod_time_fs.go │ └── sql │ │ ├── idempotent │ │ ├── apply_permissions.sql │ │ ├── base.sql │ │ ├── exemplar.sql │ │ ├── ha.sql │ │ ├── maintenance.sql │ │ ├── matcher-functions.sql │ │ ├── metric-metadata.sql │ │ ├── remote-commands.sql │ │ ├── tag-operators.sql │ │ ├── telemetry.sql │ │ ├── tracing-functions.sql │ │ ├── tracing-tags.sql │ │ └── tracing-views.sql │ │ ├── preinstall │ │ ├── 000-utils.sql │ │ ├── 001-users.sql │ │ ├── 002-schemas.sql │ │ ├── 003-tag-operators.sql │ │ ├── 004-tables.sql │ │ ├── 005-matcher_operators.sql │ │ ├── 006-install_uda.sql │ │ ├── 007-tables_ha.sql │ │ ├── 008-tables_metadata.sql │ │ ├── 009-tables_exemplar.sql │ │ ├── 010-tracing.sql │ │ ├── 011-tracing-well-known-tags.sql │ │ └── 012-telemetry.sql │ │ └── versions │ │ └── dev │ │ ├── .gitignore │ │ ├── 0.1.0-beta.2.dev │ │ └── 1-drop_procedure_named_drop_chunks.sql │ │ ├── 0.1.0-beta.4.dev │ │ └── 1-drop_timescale_prometheus_extra.sql │ │ ├── 0.1.1-dev │ │ ├── 1-add_default_compression_setting.sql │ │ └── 2-add_id_deletion_epochs.sql │ │ ├── 0.1.3-dev │ │ └── 1-enable_multinode.sql │ │ ├── 0.1.4-dev │ │ └── 1-change_compression_job.sql │ │ ├── 0.1.5-dev │ │ ├── 1-update_execute_everywhere.sql │ │ ├── 2-update_autovac_settings.sql │ │ └── 3-drop_function_metric_view.sql │ │ ├── 0.10.0-dev │ │ ├── 1-alter_promscale_instance_information_column.sql │ │ └── 2-add_spans_total_column_telemetry.sql │ │ ├── 0.2.2-dev │ │ └── 1-set_up_ha.sql │ │ ├── 0.3.1-dev │ │ ├── 1-fix_permissions.sql │ │ └── 2-fix_more_permissions.sql │ │ ├── 0.4.2-dev │ │ ├── 1-drop_some_func.sql │ │ ├── 2-metric_metadata.sql │ │ ├── 3-drop_old_delete_expired_series.sql │ │ └── 4-drop_old_main_funcs.sql │ │ ├── 0.5.2-dev │ │ ├── 1-downsampling.sql │ │ └── 2-exemplar_objects.sql │ │ ├── 0.6.99-dev │ │ ├── 1-schema_types.sql │ │ ├── 2-tag_operators.sql │ │ ├── 3-matcher_operators.sql │ │ ├── 4-tracing.sql │ │ ├── 5-tracing_operators.sql │ │ └── 6-tracing_well_known_tags.sql │ │ ├── 0.7.0-beta.1.dev │ │ ├── 1-tracing.sql │ │ ├── 2-drop_get_tag_id.sql │ │ ├── 3-drop_info_function.sql │ │ ├── 4-drop_get_operation.sql │ │ └── 5-drop_trace_tree_funcs.sql │ │ ├── 0.7.2-dev │ │ ├── 1-telemetry.sql │ │ ├── 2-telemetry_housekeeper.sql │ │ ├── 3-event_name.sql │ │ └── 4-data_retention.sql │ │ └── 0.8.1-dev │ │ └── 1-drop_old_series_funcs.sql ├── pgclient │ ├── client.go │ ├── client_test.go │ ├── config.go │ ├── config_test.go │ └── metrics.go ├── pgmodel │ ├── Readme.md │ ├── cache │ │ ├── cache.go │ │ ├── cache_test.go │ │ ├── exemplar_key_cache.go │ │ ├── flags.go │ │ ├── flags_test.go │ │ ├── inverted_labels_cache.go │ │ ├── series_cache.go │ │ └── series_cache_test.go │ ├── common │ │ ├── errors │ │ │ └── errors.go │ │ ├── extension │ │ │ └── extension.go │ │ └── schema │ │ │ └── schema.go │ ├── delete │ │ └── delete.go │ ├── epochs.readme.md │ ├── exemplar │ │ └── exemplar.go │ ├── health │ │ └── health_checker.go │ ├── ingestor │ │ ├── buffer.go │ │ ├── copier.go │ │ ├── dispatcher.go │ │ ├── exemplar_label_formatter.go │ │ ├── handler_test.go │ │ ├── ingestor.go │ │ ├── ingestor_interface.go │ │ ├── ingestor_sql_test.go │ │ ├── ingestor_test.go │ │ ├── metric_batcher.go │ │ ├── metric_batcher_test.go │ │ ├── series_writer.go │ │ ├── trace │ │ │ ├── batch.go │ │ │ ├── cache.go │ │ │ ├── instrumentation_lib_batch.go │ │ │ ├── instrumentation_lib_batch_test.go │ │ │ ├── operation_batch.go │ │ │ ├── operation_batch_test.go │ │ │ ├── schema_url_batch.go │ │ │ ├── schema_url_batch_test.go │ │ │ ├── tag_batch.go │ │ │ ├── tag_batch_test.go │ │ │ ├── trace_batcher.go │ │ │ ├── trace_batcher_test.go │ │ │ ├── trace_dispatcher.go │ │ │ └── writer.go │ │ ├── watcher.go │ │ └── write_request_pool.go │ ├── lreader │ │ ├── labels_reader.go │ │ └── labels_reader_test.go │ ├── metadata │ │ └── metadata.go │ ├── metrics │ │ ├── database │ │ │ ├── database.go │ │ │ ├── database_test.go │ │ │ └── metrics.go │ │ ├── ha.go │ │ ├── ingest.go │ │ └── query.go │ ├── migrate.go │ ├── model │ │ ├── batch.go │ │ ├── batch_visitor.go │ │ ├── custom_types.go │ │ ├── exemplars.go │ │ ├── exemplars_test.go │ │ ├── insertables.go │ │ ├── interface.go │ │ ├── label_list.go │ │ ├── metric.go │ │ ├── pgutf8str │ │ │ ├── text_types.go │ │ │ └── text_types_test.go │ │ ├── samples.go │ │ ├── series.go │ │ └── sql_test_utils.go │ ├── new_migrate.go │ └── querier │ │ ├── clauses.go │ │ ├── common.go │ │ ├── interface.go │ │ ├── metadata.go │ │ ├── querier.go │ │ ├── querier_sql_test.go │ │ ├── query_builder.go │ │ ├── query_builder_exemplar.go │ │ ├── query_builder_samples.go │ │ ├── query_exemplar.go │ │ ├── query_remote_read.go │ │ ├── query_sample.go │ │ ├── query_tools.go │ │ ├── row.go │ │ ├── series_exemplar.go │ │ ├── series_exemplar_test.go │ │ ├── series_set.go │ │ ├── series_set_test.go │ │ └── timestamp_series.go ├── pgxconn │ ├── copy_from.go │ ├── implement.go │ └── pgx_conn.go ├── prompb │ ├── README.md │ ├── custom.go │ ├── custom.ts.go │ ├── remote.pb.go │ └── types.pb.go ├── promql │ ├── bench_test.go │ ├── engine.go │ ├── engine_test.go │ ├── functions.go │ ├── functions_test.go │ ├── fuzz.go │ ├── fuzz_test.go │ ├── promql_test.go │ ├── quantile.go │ ├── query_logger.go │ ├── query_logger_test.go │ ├── test.go │ ├── test_test.go │ ├── testdata │ │ ├── aggregators.test │ │ ├── at_modifier.test │ │ ├── collision.test │ │ ├── functions.test │ │ ├── histograms.test │ │ ├── literals.test │ │ ├── operators.test │ │ ├── selectors.test │ │ ├── staleness.test │ │ ├── subquery.test │ │ └── trig_functions.test │ ├── value.go │ └── value_test.go ├── query │ ├── config.go │ ├── query_engine.go │ └── queryable.go ├── rules │ ├── adapters │ │ ├── ingest.go │ │ └── query.go │ ├── config.go │ ├── config_test.go │ ├── rules.go │ ├── rules_test.go │ ├── testdata │ │ ├── alert_config.good.config.yaml │ │ ├── no_rules.bad.config.yaml │ │ ├── no_rules.good.config.yaml │ │ ├── non_existent_rules.good.config.yaml │ │ ├── rules.glob.config.yaml │ │ ├── rules.good.config.yaml │ │ ├── rules.yaml │ │ └── rules_dir │ │ │ ├── rule.one.yaml │ │ │ └── rule.two.yaml │ ├── upstream.go │ └── upstream_test.go ├── runner │ ├── args.go │ ├── client.go │ ├── codec.go │ ├── config_parser.go │ ├── flags.go │ ├── flags_test.go │ └── runner.go ├── telemetry │ ├── metadata.go │ ├── promql.go │ ├── telemetry.go │ └── telemetry_test.go ├── tenancy │ ├── authorizer.go │ ├── config.go │ ├── config_test.go │ ├── flags.go │ ├── flags_test.go │ ├── interface.go │ ├── read.go │ ├── read_test.go │ ├── write.go │ └── write_test.go ├── tests │ ├── constants.go │ ├── end_to_end_tests │ │ ├── README.md │ │ ├── alerts_test.go │ │ ├── concurrent_sql_test.go │ │ ├── config_dataset_test.go │ │ ├── continuous_agg_test.go │ │ ├── create_test.go │ │ ├── database_metrics_test.go │ │ ├── datasets_metrics_test.go │ │ ├── db_connections_test.go │ │ ├── delete_test.go │ │ ├── drop_test.go │ │ ├── exemplar_query_endpoint_test.go │ │ ├── exemplar_test.go │ │ ├── functions_test.go │ │ ├── generate_jaeger_response_test.go │ │ ├── golden_files_test.go │ │ ├── ha_check_insert_sql_test.go │ │ ├── ha_multiple_promscales_test.go │ │ ├── ha_single_promscale_test.go │ │ ├── ha_try_change_leader_sql_test.go │ │ ├── ingest_trace_test.go │ │ ├── insert_compressed_chunks_test.go │ │ ├── jaeger_store_integration_test.go │ │ ├── jaeger_store_test.go │ │ ├── main_test.go │ │ ├── metadata_test.go │ │ ├── metric_ingest_bench_test.go │ │ ├── metrics_duplicate_insert_test.go │ │ ├── migrate_test.go │ │ ├── multi_tenancy_test.go │ │ ├── nan_test.go │ │ ├── new_migrate_test.go │ │ ├── no_timescaledb_test.go │ │ ├── null_chars_test.go │ │ ├── prometheus_wal_test.go │ │ ├── promql_endpoint_integration_test.go │ │ ├── promql_label_endpoint_test.go │ │ ├── promql_query_endpoint_test.go │ │ ├── promql_series_endpoint_test.go │ │ ├── promql_write_endpoint_test.go │ │ ├── query_integration_test.go │ │ ├── router_test.go │ │ ├── rules_test.go │ │ ├── sql_bench_test.go │ │ ├── sync_commit_test.go │ │ ├── tag_op_test.go │ │ ├── telemetry_test.go │ │ ├── trace_ingest_bench_test.go │ │ ├── trace_operation_calls_test.go │ │ ├── trace_put_test.go │ │ ├── trace_query_integration_test.go │ │ ├── trace_response_test.go │ │ ├── trace_retention_test.go │ │ ├── trace_tree_test.go │ │ ├── vacuum_test.go │ │ ├── view_test.go │ │ └── zlast_test.go │ ├── test_migrations │ │ ├── generate.go │ │ ├── migration_files_generated.go │ │ ├── migrations.go │ │ └── sql │ │ │ ├── idempotent │ │ │ ├── 1-toc-run_second.sql │ │ │ └── 2-toc-run_first.sql │ │ │ ├── preinstall │ │ │ └── 001-setup.sql │ │ │ └── versions │ │ │ └── dev │ │ │ ├── 0.1.0-dev │ │ │ └── 1-migration.sql │ │ │ ├── 0.10.0-dev │ │ │ ├── 1-migr_98_at.sql │ │ │ └── 2-1_mig.sql │ │ │ ├── 0.10.1-dev │ │ │ ├── 1-migr_98_at.sql │ │ │ └── 2-1_mig.sql │ │ │ ├── 0.10.2-beta.dev │ │ │ └── 1-migr_98_at.sql │ │ │ ├── 0.11.0-dev │ │ │ ├── 1-migr_98_at.sql │ │ │ └── 2-1_mig.sql │ │ │ ├── 0.2.0-dev │ │ │ └── 1-migration.sql │ │ │ └── 0.9.0-dev │ │ │ └── 1-migration.sql │ ├── testdata │ │ ├── expected │ │ │ ├── info_view-postgres.out │ │ │ ├── info_view-timescaledb-multinode.out │ │ │ ├── info_view-timescaledb.out │ │ │ ├── support-timescaledb-multinode.out │ │ │ ├── support.out │ │ │ ├── views-timescaledb-multinode.out │ │ │ └── views.out │ │ ├── import.json │ │ ├── jaeger_query_responses.sz │ │ ├── prometheus-data.tar.gz │ │ ├── real-dataset.sz │ │ ├── rules │ │ │ ├── alerts.yaml │ │ │ ├── config.alertmanager.yaml │ │ │ ├── config.alerts.yaml │ │ │ ├── config.empty_rules.yaml │ │ │ ├── config.recording_rules_eval.yaml │ │ │ └── rules.yaml │ │ ├── sql │ │ │ ├── support.sql │ │ │ └── views.sql │ │ └── trace_data.go │ ├── testsupport │ │ ├── metric_loader.go │ │ ├── mock_pgx_conn.go │ │ └── series_gen.go │ └── upgrade_tests │ │ ├── shapshot.go │ │ └── upgrade_test.go ├── thanos │ └── store.go ├── tracer │ ├── codec.go │ └── trace.go ├── util │ ├── lock.go │ ├── metrics.go │ ├── throughput │ │ ├── throughput.go │ │ └── throughput_test.go │ ├── ticker.go │ ├── util.go │ └── util_test.go ├── vacuum │ ├── vacuum.go │ └── vacuum_test.go └── version │ └── version.go └── scripts ├── end_to_end_tests.sh ├── fallback-docker.sh └── wait-for.sh /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | exclude_patterns: 2 | - "**/*_test.go" 3 | - "pkg/prompb/" # autogenerated 4 | - "pkg/promql" # from prometheus upstream 5 | checks: 6 | file-lines: 7 | config: 8 | threshold: 350 # ideal number is 300 plus a little leeway 9 | -------------------------------------------------------------------------------- /.coverage_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | echo "" > coverage.txt 5 | 6 | for d in $(go list ./... | grep -v vendor); do 7 | go test -race -coverprofile=profile.out -covermode=atomic $d 8 | if [ -f profile.out ]; then 9 | cat profile.out >> coverage.txt 10 | rm profile.out 11 | fi 12 | done 13 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | pkg/migrations/migration_files_generated.go -diff 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @timescale/o11y-applications 2 | 3 | # sql related stuff 4 | *.sql @timescale/o11y-data-platform 5 | 6 | # Go deps 7 | go.mod @timescale/o11y-applications 8 | go.sum @timescale/o11y-applications 9 | 10 | # CI system and repository configuration 11 | .github/ @timescale/o11y-applications 12 | .github/workflows/mixin.yml @timescale/o11y-services 13 | 14 | # Go packages 15 | /pkg/migrations/ @timescale/o11y-data-platform 16 | /pkg/migration-tool @timescale/o11y-applications 17 | /pkg/pgmodel/querier @timescale/o11y-data-platform 18 | /pkg/tests/test_migrations/ @timescale/o11y-data-platform 19 | /pkg/tests/testdata/ @timescale/o11y-data-platform 20 | /pkg/tests/upgrade_tests/ @timescale/o11y-data-platform 21 | 22 | /docs/mixin @timescale/o11y-services 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | 13 | **To Reproduce** 14 | 15 | 16 | **Expected behavior** 17 | 18 | 19 | **Screenshots** 20 | If applicable, add screenshots to help explain your problem. 21 | 22 | **Configuration (as applicable)** 23 | - Promscale Connector: 24 | - TimescaleDB: 25 | - Prometheus: 26 | - Jaeger: 27 | - OpenTelemetry: 28 | 29 | **Version** 30 | - Distribution/OS: 31 | - Promscale: 32 | - TimescaleDB: 33 | 34 | **Additional context** 35 | 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | 12 | **Describe the solution you'd like.** 13 | 14 | 15 | **Describe alternatives you've considered** 16 | 17 | 18 | **How would this feature help you*** 19 | 20 | 21 | **Additional context** 22 | 23 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 8 | 9 | ## Merge requirements 10 | 11 | Please take into account the following non-code changes that you may need to make with your PR: 12 | 13 | - [ ] CHANGELOG entry for user-facing changes 14 | - [ ] Updated the relevant documentation 15 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "timezone": "Etc/UTC", 3 | "extends": [ 4 | "config:base", 5 | "schedule:daily" 6 | ], 7 | "ignoreDeps": ["github.com/prometheus/prometheus"], 8 | "dependencyDashboardLabels": ["dependencies"], 9 | "labels": ["dependencies"], 10 | "regexManagers": [ 11 | { 12 | "fileMatch": ["^EXTENSION_VERSION$"], 13 | "matchStrings": ["(?.*?)\\n"], 14 | "datasourceTemplate": "github-tags", 15 | "depNameTemplate": "timescale/promscale_extension" 16 | }, 17 | { 18 | "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], 19 | "matchStrings": ["golangci-lint-version:\\s(?.*?)\\n"], 20 | "datasourceTemplate": "github-tags", 21 | "depNameTemplate": "golangci/golangci-lint" 22 | }, 23 | { 24 | "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], 25 | "matchStrings": ["goreleaser-version:\\s(?.*?)\\n"], 26 | "datasourceTemplate": "github-tags", 27 | "depNameTemplate": "goreleaser/goreleaser" 28 | }, 29 | { 30 | "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], 31 | "matchStrings": ["golang-version:\\s(?.*?)\\n"], 32 | "datasourceTemplate": "golang-version", 33 | "depNameTemplate": "golang" 34 | }, 35 | { 36 | "fileMatch": ["Dockerfile"], 37 | "matchStrings": ["FROM golang:(?.*?)-alpine"], 38 | "datasourceTemplate": "golang-version", 39 | "depNameTemplate": "golang" 40 | } 41 | ], 42 | "packageRules": [ 43 | { 44 | "addLabels": ["go"], 45 | "groupName": "golang dependencies", 46 | "matchManagers": ["gomod"], 47 | "commitMessageExtra": "", 48 | "enabled": false 49 | }, 50 | { 51 | "addLabels": ["github_actions"], 52 | "groupName": "github actions", 53 | "matchPaths": [".github/**"] 54 | }, 55 | { 56 | "groupName": "docker-compose", 57 | "matchManagers": ["docker-compose"] 58 | } 59 | ] 60 | } 61 | -------------------------------------------------------------------------------- /.github/workflows/docker-compose-tests.yml: -------------------------------------------------------------------------------- 1 | name: Docker Compose tests 2 | 3 | defaults: 4 | run: 5 | shell: bash --noprofile --norc -eo pipefail {0} 6 | 7 | on: 8 | push: 9 | branches: [master, main, force_test, release-*] 10 | pull_request: 11 | branches: [master, main, force_test, release-*] 12 | 13 | env: 14 | golang-version: '1.15' 15 | 16 | jobs: 17 | 18 | build: 19 | name: Run 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Check out code into the Go module directory 23 | uses: actions/checkout@v3 24 | 25 | - name: Set up Go ${{ env.golang-version }} 26 | uses: actions/setup-go@v3.5.0 27 | with: 28 | go-version: ${{ env.golang-version }} 29 | id: go 30 | 31 | - name: Test docker-compose setup referenced in docs 32 | run: ./docker-compose/test.sh 33 | 34 | - name: Test high-availability docker-compose setup 35 | run: ./docker-compose/high-availability/test.sh 36 | -------------------------------------------------------------------------------- /.github/workflows/go-report-card.yml: -------------------------------------------------------------------------------- 1 | name: Go Report Card 2 | 3 | defaults: 4 | run: 5 | shell: bash --noprofile --norc -eo pipefail {0} 6 | 7 | on: 8 | push: 9 | branches: [master, main] 10 | 11 | jobs: 12 | build: 13 | name: Build 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Request Report Update 17 | run: | 18 | curl -X POST -F "repo=github.com/$GITHUB_REPOSITORY" https://goreportcard.com/checks 19 | -------------------------------------------------------------------------------- /.github/workflows/go-scheduled.yml: -------------------------------------------------------------------------------- 1 | name: Daily scheduled extended Go tests 2 | 3 | defaults: 4 | run: 5 | shell: bash --noprofile --norc -eo pipefail {0} 6 | 7 | on: 8 | push: 9 | branches: [force_test, release-*] 10 | schedule: 11 | - cron: "6 0 * * *" 12 | 13 | env: 14 | golang-version: 1.19.4 15 | 16 | jobs: 17 | 18 | build: 19 | name: Build 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Check out code into the Go module directory 23 | uses: actions/checkout@v3 24 | 25 | - name: Set up Go ${{ env.golang-version }} 26 | uses: actions/setup-go@v3.5.0 27 | with: 28 | go-version: ${{ env.golang-version }} 29 | id: go 30 | 31 | - name: Use Go module caching 32 | uses: actions/cache@v3 33 | with: 34 | path: ~/go/pkg/mod 35 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 36 | restore-keys: | 37 | ${{ runner.os }}-go- 38 | 39 | - name: Build 40 | run: make build 41 | 42 | test: 43 | name: Test 44 | runs-on: ubuntu-latest 45 | steps: 46 | - name: Check out code into the Go module directory 47 | uses: actions/checkout@v3 48 | with: 49 | lfs: true 50 | 51 | - name: Checkout LFS objects 52 | run: git lfs checkout 53 | 54 | - name: Set up Go ${{ env.golang-version }} 55 | uses: actions/setup-go@v3.5.0 56 | with: 57 | go-version: ${{ env.golang-version }} 58 | id: go 59 | 60 | - name: Use Go module caching 61 | uses: actions/cache@v3 62 | with: 63 | path: ~/go/pkg/mod 64 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 65 | restore-keys: | 66 | ${{ runner.os }}-go- 67 | 68 | - name: Test extended dataset 69 | run: go test -v -race -timeout=30m ./pkg/tests/end_to_end_tests/ -extended-test 70 | -------------------------------------------------------------------------------- /.github/workflows/gosec.yml: -------------------------------------------------------------------------------- 1 | name: Run Gosec 2 | defaults: 3 | run: 4 | shell: bash --noprofile --norc -eo pipefail {0} 5 | on: 6 | push: 7 | branches: [master, main, force_test, release-*] 8 | pull_request: 9 | branches: [master, main, force_test, release-*] 10 | jobs: 11 | tests: 12 | runs-on: ubuntu-latest 13 | env: 14 | GO111MODULE: on 15 | steps: 16 | - name: Checkout Source 17 | uses: actions/checkout@v3 18 | - name: Run Gosec Security Scanner 19 | uses: securego/gosec@v2.14.0 20 | with: 21 | args: -exclude-dir=pkg/promql -exclude-dir=pkg/prompb -exclude-dir=migration-tool ./... 22 | -------------------------------------------------------------------------------- /.github/workflows/integration.yml: -------------------------------------------------------------------------------- 1 | name: Integration 2 | 3 | defaults: 4 | run: 5 | shell: bash --noprofile --norc -eo pipefail {0} 6 | 7 | on: 8 | push: 9 | branches: [master, main, force_test, release-*, staging, trying] 10 | pull_request: 11 | branches: [master, main, force_test, release-*] 12 | 13 | env: 14 | golang-version: 1.19.4 15 | 16 | jobs: 17 | run: 18 | runs-on: ubuntu-latest 19 | strategy: 20 | matrix: 21 | pg: 22 | - 15 23 | - 14 24 | - 13 25 | # TODO (mat): re-enable the pg12 test 26 | #- 12 27 | steps: 28 | - name: Check out code into the Go module directory 29 | uses: actions/checkout@v3 30 | 31 | - name: Set up Go ${{ env.golang-version }} 32 | uses: actions/setup-go@v3.5.0 33 | with: 34 | go-version: ${{ env.golang-version }} 35 | id: go 36 | 37 | - name: Prepare metadata 38 | id: metadata 39 | run: | 40 | branch_name=$(echo ${{github.head_ref || github.ref_name}} | sed 's#/#-#') 41 | possible_branch_tag=$(echo ${branch_name}-ts2-pg${{matrix.pg}}) 42 | extension_version=$(cat EXTENSION_VERSION | tr -d '[:space:]') 43 | stable_branch_tag=$(echo ${extension_version}-ts2-pg${{matrix.pg}}) 44 | image_base="ghcr.io/timescale/dev_promscale_extension" 45 | docker_image=$(./scripts/fallback-docker.sh ${image_base}:${possible_branch_tag} ${image_base}:${stable_branch_tag}) 46 | echo "docker_image=${docker_image}" >> ${GITHUB_OUTPUT} 47 | 48 | - name: TimescaleDB 2.x with Promscale extension (pg${{matrix.pg}}) 49 | run: ./scripts/end_to_end_tests.sh ${{steps.metadata.outputs.docker_image}} 50 | shell: bash 51 | 52 | # Added to summarize the matrix (otherwise we would need to list every single job in branch protection rules) 53 | run-result: 54 | name: integration results 55 | if: always() 56 | needs: 57 | - run 58 | runs-on: ubuntu-latest 59 | steps: 60 | - name: Mark the job as a success 61 | if: needs.run.result == 'success' 62 | run: exit 0 63 | - name: Mark the job as a failure 64 | if: needs.run.result != 'success' 65 | run: exit 1 66 | -------------------------------------------------------------------------------- /.github/workflows/mixin.yml: -------------------------------------------------------------------------------- 1 | name: mixin 2 | on: 3 | push: 4 | paths: 5 | - 'docs/mixin/**' 6 | pull_request: 7 | paths: 8 | - 'docs/mixin/**' 9 | 10 | env: 11 | golang-version: 1.19.4 12 | 13 | defaults: 14 | run: 15 | working-directory: docs/mixin 16 | 17 | jobs: 18 | check-mixin: 19 | runs-on: ubuntu-latest 20 | name: Check monitoring mixin 21 | steps: 22 | - uses: actions/checkout@v3 23 | 24 | - name: Set up Go ${{ env.golang-version }} 25 | uses: actions/setup-go@v3.5.0 26 | with: 27 | go-version: ${{ env.golang-version }} 28 | id: go 29 | 30 | - uses: actions/setup-go@v3 31 | with: 32 | go-version: ${{ env.golang-version }} 33 | 34 | - name: download mixtool 35 | run: go install -a github.com/monitoring-mixins/mixtool/cmd/mixtool@latest 36 | 37 | - name: download jsonnetfmt 38 | run: go install -a github.com/google/go-jsonnet/cmd/jsonnetfmt@latest 39 | 40 | - name: download promtool 41 | run: | 42 | VERSION=$(curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}' | sed 's/v//') 43 | curl -s -L "https://github.com/prometheus/prometheus/releases/download/v${VERSION}/prometheus-${VERSION}.linux-amd64.tar.gz" | tar -zxf - -C "${GITHUB_WORKSPACE}/" --strip-components 1 "prometheus-${VERSION}.linux-amd64/promtool" 44 | 45 | - name: download gojsontoyaml 46 | run: go install github.com/brancz/gojsontoyaml@latest 47 | 48 | - name: lint 49 | run: make lint 50 | 51 | - name: fmt 52 | run: make fmt && git diff --exit-code 53 | 54 | - name: build 55 | run: make build 56 | 57 | - name: test alerts 58 | run: | 59 | PATH="${PATH}:${GITHUB_WORKSPACE}" 60 | make test 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | /promscale* 3 | /prom-migrator* 4 | /migration-tool/prom-migrator* 5 | /cmd/promscale/promscale 6 | /cmd/prom-migrator/prom-migrator 7 | /dist 8 | /vendor 9 | /version.properties 10 | /.target_os 11 | /.history 12 | /.vscode 13 | /.idea 14 | .DS_Store 15 | .git 16 | venv 17 | /pkg/tests/testdata/traces-dataset.sz 18 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | run: 3 | timeout: 5m 4 | skip-dirs: 5 | - pkg/promql 6 | - pkg/promb 7 | 8 | -------------------------------------------------------------------------------- /.mdox.validate.yaml: -------------------------------------------------------------------------------- 1 | version: 1 2 | 3 | validators: 4 | # Ignore localhost links. 5 | - regex: 'localhost' 6 | type: "ignore" -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | The Timescale Code of Conduct can be found at https://www.timescale.com/code-of-conduct. 2 | -------------------------------------------------------------------------------- /EXTENSION_VERSION: -------------------------------------------------------------------------------- 1 | master 2 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | 2 | Promscale Connector by Timescale (TM) 3 | 4 | Copyright (c) 2017-2021 Timescale, Inc. All Rights Reserved. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. -------------------------------------------------------------------------------- /bors.toml: -------------------------------------------------------------------------------- 1 | status = ["Build and Lint", "Go Tests", "Go End-to-End Test Results", "Bash End-to-End Tests"] 2 | delete-merged-branches = true 3 | required_approvals = 1 4 | timeout_sec = 3600 # one hours -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage 2 | FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.19.3-alpine AS builder 3 | RUN apk update && apk add --no-cache git 4 | WORKDIR /promscale 5 | COPY ./go.mod ./go.sum ./ 6 | RUN go mod download 7 | COPY ./.git .git/ 8 | COPY ./pkg pkg/ 9 | COPY ./cmd cmd/ 10 | ARG TARGETOS 11 | ARG TARGETARCH 12 | RUN go generate ./... 13 | RUN GIT_COMMIT=$(git rev-list -1 HEAD) && GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) \ 14 | && GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH:-amd64} CGO_ENABLED=0 go build -a \ 15 | -ldflags "-w -X 'github.com/timescale/promscale/pkg/version.CommitHash=$GIT_COMMIT' -X 'github.com/timescale/promscale/pkg/version.Branch=$GIT_BRANCH'" \ 16 | -o /bin/promscale ./cmd/promscale 17 | 18 | # Final image 19 | FROM busybox 20 | ENV PROMSCALE_PKG=docker 21 | LABEL maintainer="Timescale https://www.timescale.com" 22 | COPY --from=builder /bin/promscale / 23 | ENTRYPOINT ["/promscale"] 24 | -------------------------------------------------------------------------------- /build/conf/promscale.conf: -------------------------------------------------------------------------------- 1 | # PROMSCALE_METRICS_ASYNC_ACKS="" 2 | # PROMSCALE_DB_CONNECTIONS_MAX="-1" 3 | # PROMSCALE_DB_HOST="localhost" 4 | # PROMSCALE_DB_NAME="timescale" 5 | # PROMSCALE_DB_PASSWORD="" 6 | # PROMSCALE_DB_PORT="5432" 7 | PROMSCALE_DB_SSL_MODE="prefer" 8 | # PROMSCALE_DB_USER="postgres" 9 | # PROMSCALE_DB_NUM_WRITER_CONNECTIONS="4" 10 | # PROMSCALE_STARTUP_INSTALL_EXTENSIONS="true" 11 | # PROMSCALE_METRICS_CACHE_LABELS_SIZE="10000" 12 | # PROMSCALE_TELEMETRY_LOG_FORMAT="logfmt" 13 | # PROMSCALE_METRICS_CACHE_METRICS_SIZE="10000" 14 | # PROMSCALE_TELEMETRY_LOG_THROUGHPUT_REPORT_INTERVAL="" 15 | # PROMSCALE_STARTUP_USE_SCHEMA_VERSION_LEASE="" 16 | # PROMSCALE_WEB_CORS_ORIGIN=".*" 17 | # PROMSCALE_WEB_LISTEN_ADDRESS=":9201" 18 | # PROMSCALE_WEB_TELEMETRY_PATH="/metrics" 19 | 20 | # OPTIONS="" 21 | -------------------------------------------------------------------------------- /build/deepcopy-gen-header.txt: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | -------------------------------------------------------------------------------- /build/env/deb.env: -------------------------------------------------------------------------------- 1 | PROMSCALE_PKG="deb" 2 | -------------------------------------------------------------------------------- /build/env/rpm.env: -------------------------------------------------------------------------------- 1 | PROMSCALE_PKG="rpm" 2 | -------------------------------------------------------------------------------- /build/prom-migrator/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage 2 | FROM golang:1.19.3-alpine AS builder 3 | COPY ./.git build/.git 4 | COPY ./pkg build/pkg 5 | COPY ./migration-tool build/migration-tool 6 | COPY ./go.mod build/go.mod 7 | COPY ./go.sum build/go.sum 8 | RUN apk update && apk add --no-cache git \ 9 | && cd build \ 10 | && go mod download \ 11 | && GIT_COMMIT=$(git rev-list -1 HEAD) \ 12 | && cd migration-tool/ \ 13 | && CGO_ENABLED=0 go build -a \ 14 | -o /go/prom-migrator ./cmd/prom-migrator 15 | 16 | # Final image 17 | FROM busybox 18 | LABEL maintainer="Timescale https://www.timescale.com" 19 | COPY --from=builder /go/prom-migrator / 20 | ENTRYPOINT ["/prom-migrator"] 21 | -------------------------------------------------------------------------------- /build/scripts/postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "Promscale installed as a systemd service" 3 | echo "Enable auto-start with: systemctl enable promscale" 4 | echo "And start the service now with: systemctl start promscale" 5 | echo "----------------------------------------" 6 | CONF_FILE=$(find /etc -name promscale.conf 2>/dev/null | head -n 1) 7 | echo "Modify configuration by editing $CONF_FILE config file" 8 | echo "And then restart the service with: systemctl restart promscale" 9 | 10 | 11 | sed -i "s|__ENV_FILE__|$CONF_FILE|g" /usr/lib/systemd/system/promscale.service 12 | 13 | PROMSCALE_ENV_FILE=$(find /etc -name promscale.env 2>/dev/null | head -n 1) 14 | cat $PROMSCALE_ENV_FILE >> $CONF_FILE 15 | -------------------------------------------------------------------------------- /build/scripts/postremove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Abort if any command returns an error value 4 | set -e 5 | 6 | USER=promscale 7 | 8 | if getent passwd "${USER}" > /dev/null 2>&1 ; then 9 | userdel "${USER}" 2>/dev/null 10 | fi 11 | if getent group "${USER}" > /dev/null 2>&1 ; then 12 | groupdel "${USER}" 2>/dev/null 13 | fi 14 | -------------------------------------------------------------------------------- /build/scripts/preinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Abort if any command returns an error value 4 | set -e 5 | 6 | USER=promscale 7 | 8 | # Following user part should be tested on both RPM and DEB systems 9 | if ! getent group "${USER}" > /dev/null 2>&1 ; then 10 | groupadd --system "${USER}" 11 | fi 12 | GID=$(getent group "${USER}" | cut -d: -f 3) 13 | if ! id "${USER}" > /dev/null 2>&1 ; then 14 | adduser --system --no-create-home \ 15 | --gid "${GID}" --shell /bin/false \ 16 | "${USER}" 17 | fi 18 | -------------------------------------------------------------------------------- /build/systemd/promscale.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=TimescaleDB Promscale Service 3 | Documentation=https://github.com/timescale/promscale 4 | After=syslog.target 5 | After=network.target 6 | 7 | [Service] 8 | Type=simple 9 | User=promscale 10 | Group=promscale 11 | EnvironmentFile=__ENV_FILE__ 12 | ExecStart=/usr/bin/promscale $OPTIONS 13 | Restart=on-failure 14 | RestartSec=10 15 | KillMode=mixed 16 | KillSignal=SIGINT 17 | ProtectSystem=strict 18 | NoNewPrivileges=true 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /cmd/promscale/main.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | 11 | "github.com/timescale/promscale/pkg/log" 12 | "github.com/timescale/promscale/pkg/runner" 13 | "github.com/timescale/promscale/pkg/version" 14 | _ "go.uber.org/automaxprocs" 15 | ) 16 | 17 | func main() { 18 | log.InitDefault() 19 | args := os.Args[1:] 20 | if shouldProceed := runner.ParseArgs(args); !shouldProceed { 21 | os.Exit(0) 22 | } 23 | cfg := &runner.Config{} 24 | cfg, err := runner.ParseFlags(cfg, args) 25 | if err != nil { 26 | log.Info("msg", version.Info()) 27 | log.Fatal("msg", "cannot parse flags", "err", err) 28 | } 29 | err = log.Init(cfg.LogCfg) 30 | if err != nil { 31 | fmt.Println(version.Info()) 32 | log.Fatal("msg", "cannot start logger", "err", err) 33 | } 34 | err = runner.Run(cfg) 35 | if err != nil { 36 | os.Exit(1) 37 | } 38 | os.Exit(0) 39 | } 40 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | startup.dataset: 2 | metrics: 3 | default_chunk_interval: 1d 4 | -------------------------------------------------------------------------------- /deploy/helm-chart/README.md: -------------------------------------------------------------------------------- 1 | # Promscale Helm chart 2 | 3 | Promscale helm chart code was migrated to [timescale/helm-charts](https://github.com/timescale/helm-charts/tree/master/charts/promscale) repository. 4 | -------------------------------------------------------------------------------- /docker-compose/alerts.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: alerts 3 | rules: 4 | - alert: Watchdog 5 | annotations: 6 | description: > 7 | This is a Watchdog alert is meant to ensure that the entire Alerting 8 | pipeline is functional. It is always firing in normal operation 9 | summary: Alerting Watchdog 10 | expr: vector(1) 11 | -------------------------------------------------------------------------------- /docker-compose/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.0' 2 | 3 | services: 4 | db: 5 | # TODO change to 15 when the new HA image is available 6 | image: timescale/timescaledb-ha:pg14-latest 7 | ports: 8 | - 5432:5432/tcp 9 | environment: 10 | POSTGRES_PASSWORD: password 11 | POSTGRES_USER: postgres 12 | TSTUNE_PROFILE: promscale 13 | 14 | prometheus: 15 | image: prom/prometheus:latest 16 | depends_on: 17 | - promscale 18 | ports: 19 | - 9090:9090/tcp 20 | volumes: 21 | - ${PWD}/prometheus.yml:/etc/prometheus/prometheus.yml 22 | 23 | alertmanager: 24 | image: prom/alertmanager:latest 25 | ports: 26 | - 9093:9093/tcp 27 | 28 | promscale: 29 | image: timescale/promscale:latest 30 | ports: 31 | - 9201:9201/tcp 32 | - 9202:9202/tcp 33 | restart: on-failure 34 | depends_on: 35 | - db 36 | volumes: 37 | - ${PWD}/promscale_prometheus.yml:/prometheus.yml 38 | - ${PWD}/rules.yml:/rules.yml 39 | - ${PWD}/alerts.yml:/alerts.yml 40 | environment: 41 | PROMSCALE_DB_URI: postgres://postgres:password@db:5432/postgres?sslmode=allow 42 | PROMSCALE_TRACING_OTLP_SERVER_ADDRESS: ":9202" 43 | PROMSCALE_TELEMETRY_TRACE_OTEL_ENDPOINT: "otel-collector:4317" 44 | PROMSCALE_TELEMETRY_TRACE_SAMPLING_RATIO: "0.1" 45 | PROMSCALE_METRICS_RULES_CONFIG_FILE: /prometheus.yml 46 | 47 | otel-collector: 48 | platform: linux/amd64 49 | image: "otel/opentelemetry-collector-contrib:0.63.1" 50 | command: [ "--config=/etc/otel-collector-config.yml" ] 51 | volumes: 52 | - ${PWD}/otel-collector-config.yml:/etc/otel-collector-config.yml 53 | ports: 54 | - "14268:14268" # jaeger http 55 | 56 | jaeger: 57 | image: jaegertracing/jaeger-query:1.37.0 58 | environment: 59 | SPAN_STORAGE_TYPE: grpc-plugin 60 | METRICS_STORAGE_TYPE: prometheus 61 | GRPC_STORAGE_SERVER: promscale:9202 62 | PROMETHEUS_SERVER_URL: "http://promscale:9201" 63 | depends_on: 64 | - promscale 65 | 66 | ports: 67 | - "16686:16686" 68 | 69 | node_exporter: 70 | image: quay.io/prometheus/node-exporter 71 | ports: 72 | - "9100:9100" 73 | -------------------------------------------------------------------------------- /docker-compose/high-availability/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.0' 2 | 3 | services: 4 | db: 5 | # TODO change to 15 when the new HA image is available 6 | image: timescale/timescaledb-ha:pg14-latest 7 | ports: 8 | - 5432:5432/tcp 9 | environment: 10 | POSTGRES_PASSWORD: password 11 | POSTGRES_USER: postgres 12 | 13 | prometheus1: 14 | image: prom/prometheus:latest 15 | ports: 16 | - 9091:9090/tcp 17 | volumes: 18 | - ./prometheus1.yml:/etc/prometheus/prometheus.yml:ro 19 | 20 | prometheus2: 21 | image: prom/prometheus:latest 22 | ports: 23 | - 9092:9090/tcp 24 | volumes: 25 | - ./prometheus2.yml:/etc/prometheus/prometheus.yml:ro 26 | 27 | alertmanager: 28 | image: prom/alertmanager:latest 29 | ports: 30 | - 9093:9093/tcp 31 | 32 | promscale-connector1: 33 | image: timescale/promscale:latest 34 | ports: 35 | - 9201:9201/tcp 36 | restart: on-failure 37 | depends_on: 38 | - db 39 | - prometheus1 40 | volumes: 41 | - ${PWD}/../promscale_prometheus.yml:/prometheus.yml 42 | - ${PWD}/../rules.yml:/rules.yml 43 | - ${PWD}/../alerts.yml:/alerts.yml 44 | environment: 45 | PROMSCALE_METRICS_HIGH_AVAILABILITY: true 46 | PROMSCALE_DB_URI: postgres://postgres:password@db:5432/postgres?sslmode=allow 47 | PROMSCALE_METRICS_RULES_CONFIG_FILE: /prometheus.yml 48 | 49 | promscale-connector2: 50 | image: timescale/promscale:latest 51 | ports: 52 | - 9202:9201/tcp 53 | restart: on-failure 54 | depends_on: 55 | - db 56 | - prometheus2 57 | volumes: 58 | - ${PWD}/../promscale_prometheus.yml:/prometheus.yml 59 | - ${PWD}/../rules.yml:/rules.yml 60 | - ${PWD}/../alerts.yml:/alerts.yml 61 | environment: 62 | PROMSCALE_METRICS_HIGH_AVAILABILITY: true 63 | PROMSCALE_DB_URI: postgres://postgres:password@db:5432/postgres?sslmode=allow 64 | PROMSCALE_METRICS_RULES_CONFIG_FILE: /prometheus.yml 65 | 66 | node_exporter: 67 | image: quay.io/prometheus/node-exporter 68 | ports: 69 | - "9100:9100" 70 | -------------------------------------------------------------------------------- /docker-compose/high-availability/prometheus1.yml: -------------------------------------------------------------------------------- 1 | 2 | # my global config 3 | global: 4 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 5 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 6 | # scrape_timeout is set to the global default (10s). 7 | 8 | # Attach these labels to any time series or alerts when communicating with 9 | # external systems (federation, remote storage, Alertmanager). 10 | external_labels: 11 | cluster: 'monitoring-cluster' 12 | __replica__: 'prometheus1' 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | rule_files: 16 | # - "first.rules" 17 | # - "second.rules" 18 | 19 | remote_write: 20 | - url: "http://promscale-connector1:9201/write" 21 | - url: "http://promscale-connector2:9201/write" 22 | remote_read: 23 | - url: "http://promscale-connector1:9201/read" 24 | - url: "http://promscale-connector2:9201/read" 25 | 26 | # A scrape configuration containing exactly one endpoint to scrape: 27 | # Here it's Prometheus itself. 28 | scrape_configs: 29 | # The job name is added as a label `job=` to any timeseries scraped from this config. 30 | - job_name: prometheus 31 | static_configs: 32 | - targets: ['localhost:9090'] 33 | - job_name: node-exporter 34 | static_configs: 35 | - targets: ['node_exporter:9100'] 36 | - job_name: promscale 37 | static_configs: 38 | - targets: 39 | - 'promscale-connector1:9201' 40 | - 'promscale-connector2:9201' 41 | -------------------------------------------------------------------------------- /docker-compose/high-availability/prometheus2.yml: -------------------------------------------------------------------------------- 1 | 2 | # my global config 3 | global: 4 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 5 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 6 | # scrape_timeout is set to the global default (10s). 7 | 8 | # Attach these labels to any time series or alerts when communicating with 9 | # external systems (federation, remote storage, Alertmanager). 10 | external_labels: 11 | cluster: 'monitoring-cluster' 12 | __replica__: 'prometheus2' 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | rule_files: 16 | # - "first.rules" 17 | # - "second.rules" 18 | 19 | remote_write: 20 | - url: "http://promscale-connector1:9201/write" 21 | - url: "http://promscale-connector2:9201/write" 22 | remote_read: 23 | - url: "http://promscale-connector1:9201/read" 24 | - url: "http://promscale-connector2:9201/read" 25 | 26 | scrape_configs: 27 | # The job name is added as a label `job=` to any timeseries scraped from this config. 28 | - job_name: prometheus 29 | static_configs: 30 | - targets: ['localhost:9090'] 31 | - job_name: node-exporter 32 | static_configs: 33 | - targets: ['node_exporter:9100'] 34 | - job_name: promscale 35 | static_configs: 36 | - targets: 37 | - 'promscale-connector1:9201' 38 | - 'promscale-connector2:9201' 39 | 40 | -------------------------------------------------------------------------------- /docker-compose/jaeger-promscale-demo/README.md: -------------------------------------------------------------------------------- 1 | # Hot R.O.D. - Rides on Demand 2 | 3 | This is a demo application that consists of several microservices and illustrates 4 | the use of the OpenTracing API. It can be run standalone, but requires Jaeger backend 5 | to view the traces. A tutorial / walkthrough is available: 6 | * as a blog post [Take OpenTracing for a HotROD ride][hotrod-tutorial], 7 | * as a video [OpenShift Commons Briefing: Distributed Tracing with Jaeger & Prometheus on Kubernetes][hotrod-openshift]. 8 | 9 | ## Features 10 | 11 | * Discover architecture of the whole system via data-driven dependency diagram 12 | * View request timeline & errors, understand how the app works 13 | * Find sources of latency, lack of concurrency 14 | * Highly contextualized logging 15 | * Use baggage propagation to 16 | * Diagnose inter-request contention (queueing) 17 | * Attribute time spent in a service 18 | * Use open source libraries with OpenTracing integration to get vendor-neutral instrumentation for free 19 | 20 | ## Running 21 | 22 | ### Run everything via `docker-compose` 23 | 24 | * Download `docker-compose.yml` from https://github.com/timescale/promscale/blob/master/docker-compose/jaeger-promscale-demo/docker-compose.yaml 25 | * Run Jaeger backend and HotROD demo with `docker-compose -f path-to-yml-file up` 26 | * Access Jaeger UI at http://localhost:16686 and HotROD app at http://localhost:8080 27 | * Shutdown / cleanup with `docker-compose -f path-to-yml-file down` 28 | 29 | [hotrod-tutorial]: https://medium.com/@YuriShkuro/take-opentracing-for-a-hotrod-ride-f6e3141f7941 30 | [hotrod-openshift]: https://blog.openshift.com/openshift-commons-briefing-82-distributed-tracing-with-jaeger-prometheus-on-kubernetes/ 31 | -------------------------------------------------------------------------------- /docker-compose/jaeger-promscale-demo/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.0' 2 | 3 | services: 4 | db: 5 | # TODO change to 15 when the new HA image is available 6 | image: timescale/timescaledb-ha:pg14-latest 7 | environment: 8 | POSTGRES_PASSWORD: password 9 | POSTGRES_USER: postgres 10 | # ports: 11 | # - 5432:5432/tcp 12 | 13 | promscale: 14 | # TODO: Use docker image with Jaeger gRPC remote store support after 0.14.0 release. 15 | image: timescale/promscale:latest 16 | restart: on-failure 17 | depends_on: 18 | - db 19 | environment: 20 | PROMSCALE_DB_URI: postgres://postgres:password@db:5432/postgres?sslmode=allow 21 | PROMSCALE_TRACING_OTLP_SERVER_ADDRESS: ":9202" 22 | # ports: 23 | # - 9201:9201/tcp 24 | # - 9202:9202/tcp 25 | 26 | jaeger-query: 27 | depends_on: 28 | - promscale 29 | restart: on-failure 30 | image: jaegertracing/jaeger-query:1.37.0 31 | environment: 32 | SPAN_STORAGE_TYPE: grpc-plugin 33 | GRPC_STORAGE_SERVER: promscale:9202 34 | ports: 35 | - "16686:16686" 36 | 37 | hotrod: 38 | depends_on: 39 | - jaeger-collector 40 | image: jaegertracing/example-hotrod:1.37.0 41 | environment: 42 | JAEGER_ENDPOINT: http://jaeger-collector:14268/api/traces 43 | ports: 44 | - "8080-8083:8080-8083" 45 | 46 | jaeger-collector: 47 | depends_on: 48 | - promscale 49 | restart: on-failure 50 | image: jaegertracing/jaeger-collector:1.37.0 51 | environment: 52 | SPAN_STORAGE_TYPE: grpc-plugin 53 | GRPC_STORAGE_SERVER: promscale:9202 54 | # ports: 55 | # - "6831:6831/udp" 56 | # - "14268:14268" 57 | -------------------------------------------------------------------------------- /docker-compose/otel-collector-config.yml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | 7 | exporters: 8 | logging: 9 | jaeger: 10 | endpoint: jaeger-all-in-one:14250 11 | tls: 12 | insecure: true 13 | prometheusremotewrite: 14 | endpoint: "http://promscale:9201/write" 15 | tls: 16 | insecure: true 17 | 18 | processors: 19 | batch: 20 | spanmetrics: 21 | metrics_exporter: prometheusremotewrite 22 | 23 | 24 | service: 25 | telemetry: 26 | logs: 27 | level: "debug" 28 | 29 | pipelines: 30 | traces: 31 | receivers: [otlp] 32 | exporters: [jaeger, logging] 33 | processors: [batch, spanmetrics] 34 | metrics: 35 | receivers: [otlp] 36 | exporters: [logging, prometheusremotewrite] 37 | processors: [batch] 38 | 39 | -------------------------------------------------------------------------------- /docker-compose/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | evaluation_interval: 10s 4 | scrape_configs: 5 | - job_name: prometheus 6 | static_configs: 7 | - targets: ['localhost:9090'] 8 | - job_name: node-exporter 9 | static_configs: 10 | - targets: ['node_exporter:9100'] 11 | - job_name: promscale 12 | static_configs: 13 | - targets: ['promscale:9201'] 14 | 15 | remote_write: 16 | - url: "http://promscale:9201/write" 17 | remote_read: 18 | - url: "http://promscale:9201/read" 19 | read_recent: true 20 | -------------------------------------------------------------------------------- /docker-compose/promscale_prometheus.yml: -------------------------------------------------------------------------------- 1 | # Rules and alerts are read from the specified file(s) 2 | rule_files: 3 | - rules.yml 4 | - alerts.yml 5 | 6 | # Alerting specifies settings related to the Alertmanager 7 | alerting: 8 | alert_relabel_configs: 9 | - replacement: "production" 10 | target_label: "env" 11 | action: "replace" 12 | alertmanagers: 13 | - static_configs: 14 | - targets: 15 | # Alertmanager's default port is 9093 16 | - alertmanager:9093 17 | -------------------------------------------------------------------------------- /docker-compose/rules.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: rules 3 | rules: 4 | - record: instance_cpu:node_cpu_seconds_not_idle:rate5m 5 | expr: > 6 | sum(rate(node_cpu_seconds_total{mode!="idle"}[5m])) 7 | without (mode,cpu) 8 | -------------------------------------------------------------------------------- /docker-compose/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | SELF_DIR="$(cd "$(dirname "${0}")" && pwd)" 6 | cd "$SELF_DIR" 7 | 8 | echo "running tests" 9 | 10 | #build latest image 11 | docker build -t timescale/promscale:latest ../ --file ../build/Dockerfile 12 | 13 | docker compose -p test up -d 14 | 15 | cleanup() { 16 | if (( $? != 0 )); then 17 | echo "ERROR" 18 | fi 19 | docker compose -p test down > /dev/null 2>&1 20 | } 21 | 22 | trap cleanup EXIT 23 | 24 | ## list all container names 25 | declare -a arr=("db" "promscale" "prometheus" "node_exporter") 26 | 27 | ## now loop through the above array 28 | ## to check containers are running 29 | for i in "${arr[@]}" 30 | do 31 | containerName=`docker ps -q -f name="$i"` 32 | if [ -n "$containerName" ]; then 33 | echo "$i container is running" 34 | else 35 | echo "$i container failed to run" 36 | exit 1 37 | fi 38 | done 39 | 40 | i=0 41 | while [ "$(docker logs -n10 test-promscale-1 2>&1| grep samples/sec | wc -l)" -lt 1 ]; do 42 | i=$((i+=1)) 43 | if [ "$i" -gt 600 ]; then 44 | echo "ERROR: Promscale couldn't ingest data for over 600s. Exiting." 45 | exit 1 46 | fi 47 | echo "Waiting for promscale to ingest data" 48 | sleep 1 49 | done 50 | 51 | echo "SUCCESS" 52 | -------------------------------------------------------------------------------- /docs/alerting.md: -------------------------------------------------------------------------------- 1 | # Alerting 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/alert/ 4 | -------------------------------------------------------------------------------- /docs/assets/promscale-HA-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/docs/assets/promscale-HA-arch.png -------------------------------------------------------------------------------- /docs/assets/promscale-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/docs/assets/promscale-arch.png -------------------------------------------------------------------------------- /docs/assets/promscale-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/docs/assets/promscale-logo.png -------------------------------------------------------------------------------- /docs/compression-job-hotfix.md: -------------------------------------------------------------------------------- 1 | # Compression Job Hotfix to Improve Performance 2 | 3 | Users have reported that Promscale background maintenance jobs that handle data 4 | compression and retention get slower over time as the number of TimescaleDB 5 | chunks grows. We've identified the problem which is related to two suboptimal 6 | queries used when compressing data. The code that uses this queries are 7 | functions that run inside the database and so the hotfix involves updating 8 | those functions by executing SQL commands against the database. This problem 9 | affects Promscale versions 0.10.0 and earlier. 10 | 11 | Below we explain the steps to apply a hotfix to your Promscale instance. This 12 | fix will be included in the next release of Promscale. 13 | 14 | ## Steps to Apply the Hotfix 15 | 16 | 1. Upgrade Promscale to version 0.10.0. 17 | 2. Download the [hotfix file](scripts/hotfix-compression-job-performance.sql). 18 | 3. Apply the hotfix by executing the SQL code in the hotfix. For example you can 19 | use psql: 20 | 21 | `psql -f hotfix-compression-job-performance.sql` 22 | 23 | ## Rollback the Hotfix 24 | 25 | To rollback the hotfix and restore the original version of the functions in 26 | Promscale 0.10.0: 27 | 28 | 1. Download the [rollback file](scripts/rollback-hotfix-compression-job-performance.sql). 29 | 2. Apply the rollback file by executing the SQL code in the hotfix: 30 | 31 | `psql -f rollback-hotfix-compression-job-performance.sql` 32 | -------------------------------------------------------------------------------- /docs/configuring_prometheus.md: -------------------------------------------------------------------------------- 1 | # Configuring Prometheus for better Performance with Promscale 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/send-data/prometheus/ 4 | -------------------------------------------------------------------------------- /docs/data-model.md: -------------------------------------------------------------------------------- 1 | # Promscale Data Storage 2 | 3 | This document provides the foundations to understand how Promscale stores data. 4 | 5 | ## Metrics 6 | 7 | The most important components of the metric data model are as follows: 8 | 9 | ``` 10 | _prom_catalog 11 | ┌───────────────────────────────────────────────────────┐ 12 | │ │ 13 | │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ 14 | │ │ metric │◄──────┤ series ├─────►│ label │ │ 15 | │ └──────────┘ └────▲─────┘ └──────────┘ │ 16 | │ │ │ 17 | │ series_id │ 18 | └───────────────────────────┼───────────────────────────┘ 19 | │ 20 | ┌─────────────┴─────────────┐ 21 | │ prom_data. │ 22 | └┬──────────────────────────┘ 23 | └─ BTree (series_id, time) INCLUDE (value) 24 | ``` 25 | 26 | The `_prom_catalog` schema contains metadata tables which describe `metrics`, 27 | `label`s, and `series`. Each metric stored in the system is represented through 28 | an entry in the `metric` table. The `label` table contains (key, value) pairs 29 | with identity. The `series` table represents the combination of a `metric`, and 30 | a list of `label`s. 31 | 32 | The actual metric data is stored in a TimescaleDB hypertable (one per metric) 33 | with the same name as the metric (assuming the metric name is shorter than 62 34 | characters, otherwise it is truncated). These hypertables are in the 35 | `prom_data` schema. Each row in the hypertable stores the timestamp, series id, 36 | and the observed value. 37 | 38 | Additional to the data in the hypertable, we build a covering btree index over 39 | the pair of (series_id, time), including the observed value. 40 | -------------------------------------------------------------------------------- /docs/docker.md: -------------------------------------------------------------------------------- 1 | # 🐳 Docker 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/installation/docker/ 4 | -------------------------------------------------------------------------------- /docs/downsampling.md: -------------------------------------------------------------------------------- 1 | # Downsampling 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/downsample-data/ 4 | -------------------------------------------------------------------------------- /docs/high-availability/new_ha_system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/docs/high-availability/new_ha_system.png -------------------------------------------------------------------------------- /docs/metric_deletion_and_retention.md: -------------------------------------------------------------------------------- 1 | # Deleting Data 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/manage-data/ 4 | -------------------------------------------------------------------------------- /docs/mixin/.gitignore: -------------------------------------------------------------------------------- 1 | /alerts.yaml 2 | /rules.yaml 3 | dashboards_out/ -------------------------------------------------------------------------------- /docs/mixin/.lint: -------------------------------------------------------------------------------- 1 | # Those exclusions are needed due to usage of newer dashboard schema 2 | exclusions: 3 | panel-datasource-rule: 4 | panel-title-description-rule: 5 | panel-units-rule: 6 | target-job-rule: 7 | target-instance-rule: 8 | template-job-rule: 9 | template-instance-rule: 10 | template-datasource-rule: 11 | -------------------------------------------------------------------------------- /docs/mixin/Makefile: -------------------------------------------------------------------------------- 1 | JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s 2 | 3 | default: build 4 | 5 | all: fmt lint build clean 6 | 7 | fmt: 8 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 9 | xargs -n 1 -- $(JSONNET_FMT) -i 10 | 11 | lint: 12 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 13 | while read f; do \ 14 | $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ 15 | done 16 | 17 | mixtool lint mixin.libsonnet 18 | 19 | build: 20 | mixtool generate all mixin.libsonnet 21 | 22 | clean: 23 | rm -rf dashboards_out alerts.yaml rules.yaml 24 | 25 | test: build 26 | # If you don't have promtool, install it with 27 | # go install -a github.com/prometheus/prometheus/cmd/promtool@latest 28 | promtool check rules alerts.yaml 29 | 30 | # If you don't have gojsontoyaml, install it with 31 | # go install -a github.com/brancz/gojsontoyaml@latest 32 | gojsontoyaml -yamltojson < alerts/alerts.yaml | jq -e '.groups[].rules[].annotations | has("runbook_url")' 33 | -------------------------------------------------------------------------------- /docs/mixin/README.md: -------------------------------------------------------------------------------- 1 | # Promscale Mixin 2 | 3 | *This is a work in progress. We aim for it to become a good role model for alerts and dashboards eventually, but it is not quite there yet.* 4 | 5 | The Promscale Mixin is a set of configurable, reusable, and extensible alerts and dashboards based on the metrics exported by the Promscale. The mixin creates recording and alerting rules for Prometheus and suitable dashboard descriptions for Grafana. 6 | 7 | We recommend monitoring your Promscale deployments and include the mixins in this package 8 | as part of your alerting environment. 9 | 10 | ## Using pre-built mixin 11 | 12 | ### How to use 13 | 14 | Promscale alerts are defined [here](alerts/alerts.yaml). Copy the context into a file 15 | say `promscale_alerts.yaml`. 16 | 17 | ### Configuring Prometheus 18 | 19 | In the Prometheus configuration file, add `promscale_alerts.yaml` under `rule_files` like 20 | 21 | ```yaml 22 | rule_files: 23 | - promscale_alerts.yaml 24 | ``` 25 | 26 | ## Building mixin 27 | 28 | To build it, you need to have mixtool and jsonnetfmt installed. If you have a working Go development environment, it's easiest to run the following: 29 | 30 | ```console 31 | $ go install github.com/monitoring-mixins/mixtool/cmd/mixtool@latest 32 | $ go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest 33 | ``` 34 | 35 | You can then build the Prometheus rules files alerts.yaml and rules.yaml and a directory dashboard_out with the JSON dashboard files for Grafana: 36 | 37 | ``` 38 | $ make build 39 | ``` 40 | 41 | For more advanced uses of mixins, see https://github.com/monitoring-mixins/docs. 42 | -------------------------------------------------------------------------------- /docs/mixin/alerts/alerts.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | prometheusAlerts+:: { 3 | groups+: std.parseYaml(importstr 'alerts.yaml').groups, 4 | }, 5 | } 6 | -------------------------------------------------------------------------------- /docs/mixin/config.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | // This is left to allow possible customization in the future 3 | _config+:: {}, 4 | } 5 | -------------------------------------------------------------------------------- /docs/mixin/dashboards/dashboards.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | grafanaDashboards+:: { 3 | 'apm-dependencies.json': (import 'apm-dependencies.json'), 4 | 'apm-home.json': (import 'apm-home.json'), 5 | 'apm-service-dependencies-downstream.json': (import 'apm-service-dependencies-downstream.json'), 6 | 'apm-service-dependencies-upstream.json': (import 'apm-service-dependencies-upstream.json'), 7 | 'apm-service-overview.json': (import 'apm-service-overview.json'), 8 | 'promscale.json': (import 'promscale.json'), 9 | }, 10 | } 11 | -------------------------------------------------------------------------------- /docs/mixin/mixin.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'alerts/alerts.libsonnet') + 2 | (import 'dashboards/dashboards.libsonnet') + 3 | (import 'config.libsonnet') 4 | -------------------------------------------------------------------------------- /docs/multi_tenancy.md: -------------------------------------------------------------------------------- 1 | # Multi-tenancy 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/scale-ha/prometheus-multi-tenancy/ 4 | -------------------------------------------------------------------------------- /docs/multinode.md: -------------------------------------------------------------------------------- 1 | # Running Promscale with Multi-Node deployment of TimescaleDB 2 | 3 | Using Promscale to write and read data to a multi-node deployment of 4 | TimescaleDB is straightforward. To get started, you have to follow [these instruction](https://docs.timescale.com/latest/getting-started/setup-multi-node-basic) 5 | to set up a multi-node TimescaleDB cluster. Then, you point Promscale 6 | to connect to the access node of the cluster. That's it! 7 | 8 | Promscale will automatically connect to the cluster and set up any 9 | table/objecte/roles that it needs. When querying Promscale data from 10 | SQL, simply connect to the access node as well. 11 | 12 | ## Expanding the cluster 13 | 14 | When adding nodes to a TimescaleDB cluster that is already being written to by 15 | Promscale, you should run the `add_prom_node(node_name)` function 16 | after running the standard `add_data_node()` function. For example: 17 | 18 | ```sql 19 | SELECT add_data_node('example_node_name', host => 'example_host_address'); 20 | CALL add_prom_node('example_node_name'); 21 | ``` 22 | 23 | Note: `add_prom_node` should be run by a database superuser e.g. "postgres". 24 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleCacheHighNumberOfEvictions.md: -------------------------------------------------------------------------------- 1 | # PromscaleCacheHighNumberOfEvictions 2 | 3 | ## Meaning 4 | 5 | Promscale is evicting cache entries at a higher rate than expected 6 | 7 | ## Impact 8 | 9 | Ingestion and query operations have poor throughput performance 10 | 11 | ## Diagnosis 12 | 1. Open Promscale dashboard in Grafana 13 | 2. Go to Evictions chart in Cache section 14 | 3. If the evictions is high, see [High metric series](#high-metric-series) for mitigation 15 | 16 | ## Mitigation 17 | 18 | ### High metric series 19 | 1. Increase the cache size based on the requirement. 20 | 2. See the `cache` based flags [here](https://github.com/timescale/promscale/blob/master/docs/configuration.md#metrics-specific-flags) 21 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleDBHighErrorRate.md: -------------------------------------------------------------------------------- 1 | # PromscaleDBHighErrorRate 2 | 3 | ## Meaning 4 | 5 | Promscale is experiencing high errors while performing database requests 6 | 7 | ## Impact 8 | 9 | Ingestion and querying operations will have frequent failures. This may also lead to loss of data while ingesting traces. 10 | 11 | ## Diagnosis 12 | 1. Make sure that Prometheus is monitoring Promscale's `/metric` endpoint 13 | 2. Make sure that Promscale Dashboard is installed on Grafana 14 | 3. Open Grafana 15 | 4. Open Promscale dashboard 16 | 5. Go to Query section 17 | 6. See the **Errors (HTTP)** graph. If you see high error rates, see [Invalid or corrupt query data](#invalid-or-corrupt-query-data) for mitigation 18 | 7. Check Postgres logs to see if there are any errors. If found, see [Database is unhealthy](#database-is-unhealthy) and [Ingestion data is invalid or corrupt](#ingestion-data-is-invalid-or-corrupt) for mitigation 19 | 8. Go to grafana dashboard about Promscale and check the **Network latency** panel in **Database** row. If latency is high, go to [High network latency](PromscaleIngestHighLatency.md#high-network-latency) 20 | 21 | ## Mitigation 22 | 23 | ### Database is unhealthy 24 | 1. Check the logs and fix errors (if any) 25 | 2. Make sure that database is ready for normal operations 26 | 3. Add more disk space if required 27 | 28 | ### Ingestion data is invalid or corrupt 29 | 30 | Same as **Ingestion data is invalid or corrupt** in [PromscaleIngestHighErrorRate](PromscaleIngestHighErrorRate.md#ingestion-data-is-invalid-or-corrupt). 31 | 32 | ### Invalid or corrupt query data 33 | 34 | Make sure your queries do not contain any corrupt information and is parsable by SQL 35 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleDown.md: -------------------------------------------------------------------------------- 1 | # PromscaleDown 2 | 3 | ## Meaning 4 | 5 | Promscale instance is unreachable 6 | 7 | ## Impact 8 | 9 | Monitoring data cannot be collected and as a result there is no visibility into the operations of promscale. 10 | Promscale operations may be affected, and it may not be possible to ingest or query metrics or tracing data. 11 | 12 | ## Diagnosis 13 | 1. Check if Promscale is up and running 14 | 2. Check if data is scraped in Prometheus 15 | 16 | ## Mitigation 17 | 18 | Ensure that Promscale metric endpoint is being scraped by Prometheus 19 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleIngestHighDataDuplication.md: -------------------------------------------------------------------------------- 1 | # PromscaleIngestHighDataDuplication 2 | 3 | ## Meaning 4 | 5 | Client payload has either duplicates or retrying many a time for the 6 | data which has been already ingested. 7 | 8 | ## Impact 9 | 10 | Poor ingestion throughput. 11 | 12 | ## Diagnosis 13 | 14 | 1. Check Prometheus log for timeout or batch retry errors. If you are seeing 15 | more of these errors, follow our [resource recommendations](#resource-recommendations) section for more details. 16 | 17 | 2. If you are running Prometheus in HA mode, follow [Prometheus high availability](#prometheus-high-availability) to ensure the configurations are indeed right. 18 | 19 | ## Mitigation 20 | 21 | ### Resource recommendations 22 | 23 | Right resource allocation for Promscale and TimescaleDB would help to attain 24 | better ingestion throughput. Follow the guideline on [resource recommendations](https://docs.timescale.com/promscale/latest/recommendations/resource-recomm/#metrics). 25 | 26 | ### Prometheus high availability 27 | 28 | This could happen if the Prometheus HA deployment is not configured to 29 | decorate the samples with the metadata from the replica that's pushing 30 | the data. In this scenario, two or more Prometheus replicas from the same 31 | cluster will be sending the exact same datapoints, and since there's no 32 | cluster/replica metadata, Promscale doesn't have the information needed 33 | to just accept the data from one of them and will try to persist them all. 34 | Follow the guideline on running [Prometheus in HA mode](https://docs.timescale.com/promscale/latest/scale-ha/high-availability/#promscale-and-prometheus-high-availability) to fix the problem. 35 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleIngestHighErrorRate.md: -------------------------------------------------------------------------------- 1 | # PromscaleIngestHighErrorRate 2 | 3 | ## Meaning 4 | 5 | Promscale is experiencing high number of errors while ingesting metric samples or traces 6 | 7 | ## Impact 8 | 9 | For metric data, high error rates will affect Prometheus remote-storage component, leading to frequent 10 | retrying of samples batch and buffering of samples in the WAL, increasing Prometheus disk usage. 11 | If left untreated, this will lead to upsharding, increasing Prometheus memory usage. Longer retry 12 | durations may result dropping of samples as well. 13 | 14 | For tracing data this may result in spans being dropped. 15 | 16 | High error rates may also indicate an unhealthy database or a lack of any disk space. 17 | 18 | ## Diagnosis 19 | 1. Check the Promscale logs for errors. If they exists, go to [Ingestion data is invalid or corrupt](#ingestion-data-is-invalid-or-corrupt) for mitigation 20 | 2. Check the logs for Prometheus and (if used) OpenTelemetry Collector to see if there are any error messages. If any error exists, go to [Ingestion data is invalid or corrupt](#ingestion-data-is-invalid-or-corrupt) for mitigation 21 | 3. Check if your database is reachable 22 | 4. Check if the database has sufficient disk space. If the database runs out of space it can change the connection to read-only mode, leading to ingestion errors. 23 | 5. Check the Postgres logs for any errors. If found, check [Database is unhealthy](#database-is-unhealthy) for mitigation 24 | 25 | ## Mitigation 26 | 27 | ### Ingestion data is invalid or corrupt 28 | 29 | Reconfigure your data source (Prometheus, Opentelemetry Collector, etc.) to ensure that all applied configurations 30 | are as specified in the documentation 31 | 32 | ### Database is unhealthy 33 | 1. Ensure proper connection to the database 34 | 2. Go through Postgres logs for detailed information 35 | 3. Add more disk space to your Postgres cluster 36 | 4. Fix errors to make sure database that is able to accept connections 37 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleMaintenanceJobFailures.md: -------------------------------------------------------------------------------- 1 | # PromscaleMaintenanceJobFailures 2 | 3 | ## Meaning 4 | 5 | Promscale maintenance jobs failed to execute successfully 6 | 7 | ## Impact 8 | 9 | Delay in compression and retention policy, leading to high disk usage 10 | 11 | ## Diagnosis 12 | 1. Open psql 13 | 2. Run the following debugging query: 14 | 15 | ```postgresql 16 | select * from 17 | timescaledb_information.job_stats stats 18 | inner join 19 | timescaledb_information.jobs jobs 20 | on jobs.job_id = stats.job_id 21 | where jobs.proc_name = 'execute_maintenance_job'; 22 | ``` 23 | 24 | This will give you information about the maintenance jobs. The `last_run_status` column will indicate any failed jobs. 25 | For mitigation, see [Unexpected maintenance jobs behaviour](#unexpected-maintenance-jobs-behaviour) 26 | 27 | 3. Check Postgres logs for any failure 28 | 29 | ## Mitigation 30 | 31 | ### Unexpected maintenance jobs behaviour 32 | 33 | Run the following debugging query 34 | 35 | ```postgresql 36 | SELECT * FROM timescaledb_information.job_stats js 37 | INNER JOIN timescaledb_information.jobs j USING (job_id) 38 | WHERE proc_name ='execute_maintenance_job'; 39 | ``` 40 | 41 | If the output has: 42 | 1. no rows => No jobs are installed 43 | 44 | `SELECT prom_api.config_maintenance_jobs(number_jobs => 2, new_schedule_interval => '30 minutes'::interval)` 45 | 46 | 2. last_run_status not = 'Success' => Job had an error 47 | 48 | Run `CALL prom_api.execute_maintenance(true);` and see if there are any obvious errors 49 | 50 | 3. job_status != 'Scheduled' => Job disabled 51 | 52 | 4. next_start not within 30 min 53 | 54 | Try increasing maintenance jobs and see if it helps: `SELECT prom_api.config_maintenance_jobs(number_jobs => X, new_schedule_interval => '30 minutes'::interval)` 55 | 56 | If some problem persists with scheduling, then please open an issue at https://github.com/timescale/promscale/issues 57 | 58 | 5. last_run_duration is high (>30 min) => Not enough parallelism 59 | 60 | Increase the number of maintenance jobs N using `SELECT prom_api.config_maintenance_jobs(number_jobs => N, new_schedule_interval => '30 minutes'::interval)` 61 | -------------------------------------------------------------------------------- /docs/runbooks/PromscalePostgreSQLSharedBuffersLow.md: -------------------------------------------------------------------------------- 1 | # PromscalePostgreSQLSharedBuffersLow 2 | 3 | ## Meaning 4 | 5 | Open chunks (the chunks where data is current written into) can't fit into 6 | PostgreSQL shared buffers. Total size is calculated by summing up all chunk 7 | relations and indexes sizes. 8 | 9 | ## Impact 10 | 11 | Database performance will be affected, especially the ingest speed. The effect 12 | will be less if you are running PostgreSQL on fast local disk. 13 | 14 | ## Mitigation 15 | 16 | Increase percentage of PostgreSQL memory allocated to shared buffers. 17 | If you have already allocated huge percentage of memory to shared buffers (eg 75%) 18 | you should consider increasing database memory. 19 | `shared_buffers` can be set through `postgresql.conf`. 20 | To make sure that your new setting is applied you can run: 21 | `SELECT * FROM pg_settings WHERE name = 'shared_buffers';` 22 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleQueryHighErrorRate.md: -------------------------------------------------------------------------------- 1 | # PromscaleQueryHighErrorRate 2 | 3 | ## Meaning 4 | 5 | Promscale is experiencing high error rates while evaluating queries 6 | 7 | ## Impact 8 | 9 | Frequent query evaluation failures. Alerts based on data retrieved via Promscale may not be accurate 10 | 11 | ## Diagnosis 12 | 1. Check query errors in the grafana dashboard in **Errors (HTTP)** in the Query row. If the panel show high error rate, go to [Invalid PromQL query](#invalid-promql-query) for mitigation steps 13 | 2. Check if your database is reachable and accepting queries by running a simple query `select time, value from promscale_query_requests_total limit 1;` 14 | 3. Go to Grafana and open Promscale dashboard. Go to database health panel and see if it shows high error rates. If the error rate is not high, then the alerts are specific to the queries executed. Otherwise, see [Database is unhealthy](#database-is-unhealthy) for mitigation 15 | 4. Check database logs for errors 16 | 5. Go to [Database is unhealthy](#database-is-unhealthy) for mitigation steps 17 | 18 | ## Mitigation 19 | 20 | ### Database is unhealthy 21 | 1. Ensure database has proper resources 22 | 2. Check Postgres logs and fix errors if any 23 | 24 | ### Invalid PromQL query 25 | 1. Enable prometheus query log using this [guide](https://prometheus.io/docs/guides/query-log/#enable-the-query-log) 26 | 2. Check the query log for failing PromQL queries 27 | 3. Check the PromQL query syntax that is failing your grafana dashboard or recording rules 28 | 4. Run the above PromQL query and verify if it can execute in a reasonable time 29 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleQueryHighLatency.md: -------------------------------------------------------------------------------- 1 | # PromscaleQueryHighLatency 2 | 3 | ## Meaning 4 | 5 | Promscale query evaluation is taking more time than expected 6 | 7 | ## Impact 8 | 9 | Slow query responses and possible query timeouts 10 | 11 | ## Diagnosis 12 | 13 | Refer to diagnosis steps from [PromscaleIngestHighLatency](PromscaleIngestHighLatency.md#diagnosis) alert runbook 14 | 15 | ## Mitigation 16 | 17 | Follow the mitigation steps in [PromscaleIngestHighLatency](PromscaleIngestHighLatency.md#mitigation) alert runbook 18 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleStorageHighLatency.md: -------------------------------------------------------------------------------- 1 | # PromscaleStorageHighLatency 2 | 3 | ## Meaning 4 | 5 | Promscale is experiencing high latency in receiving database response 6 | 7 | ## Impact 8 | 9 | Ingestion and querying operations will require more time to complete 10 | 11 | ## Diagnosis 12 | 13 | Refer to diagnosis steps from [PromscaleIngestHighlatency](PromscaleIngestHighLatency.md#diagnosis) 14 | 15 | ## Mitigation 16 | 17 | Refer to diagnosis steps from [PromscaleIngestHighlatency](PromscaleIngestHighLatency.md#mitigation) 18 | -------------------------------------------------------------------------------- /docs/runbooks/PromscaleStorageUnhealthy.md: -------------------------------------------------------------------------------- 1 | # PromscaleStorageUnhealthy 2 | 3 | ## Meaning 4 | 5 | Promscale database is unhealthy with frequent errors while checking the health 6 | 7 | ## Impact 8 | 9 | Ingestion and querying operations can witness frequent failures, timeouts or may take more time to complete 10 | 11 | ## Diagnosis 12 | 13 | Storage unhealthy alert is fired when the `/healthz` endpoint does not report success for a significant duration of time. 14 | Check Postgres logs and see if there are any errors 15 | 16 | ## Mitigation 17 | 18 | Refer to the Mitigation for **Database is unhealthy** in [PromscaleIngestHighErrorRate](PromscaleIngestHighErrorRate.md#database-is-unhealthy) 19 | -------------------------------------------------------------------------------- /docs/scripts/install-crontab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "0,30 * * * * //prom-execute-maintenance.sh" | crontab - -------------------------------------------------------------------------------- /docs/scripts/prom-execute-maintenance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export PGPASSWORD="" 3 | psql --host localhost --port 5432 --dbname postgres --user postgres -c 'CALL prom_api.execute_maintenance();' -------------------------------------------------------------------------------- /docs/scripts/rollback-hotfix-compression-job-performance.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION _prom_catalog.compress_chunk_for_metric(metric_table TEXT, chunk_schema_name name, chunk_table_name name) RETURNS VOID 2 | AS $$ 3 | DECLARE 4 | chunk_full_name text; 5 | BEGIN 6 | SELECT 7 | format('%I.%I', chunk_schema, chunk_name) 8 | INTO chunk_full_name 9 | FROM timescaledb_information.chunks 10 | WHERE hypertable_schema = 'prom_data' 11 | AND hypertable_name = metric_table 12 | AND chunk_schema = chunk_schema_name 13 | AND chunk_name = chunk_table_name; 14 | 15 | PERFORM public.compress_chunk(chunk_full_name, if_not_compressed => true); 16 | END; 17 | $$ 18 | LANGUAGE PLPGSQL 19 | SECURITY DEFINER 20 | --search path must be set for security definer 21 | SET search_path = pg_temp; 22 | 23 | CREATE OR REPLACE PROCEDURE _prom_catalog.compress_old_chunks(metric_table TEXT, compress_before TIMESTAMPTZ) 24 | AS $$ 25 | DECLARE 26 | chunk_schema_name name; 27 | chunk_table_name name; 28 | chunk_range_end timestamptz; 29 | chunk_num INT; 30 | BEGIN 31 | FOR chunk_schema_name, chunk_table_name, chunk_range_end, chunk_num IN 32 | SELECT 33 | chunk_schema, 34 | chunk_name, 35 | range_end, 36 | row_number() OVER (ORDER BY range_end DESC) 37 | FROM timescaledb_information.chunks 38 | WHERE hypertable_schema = 'prom_data' 39 | AND hypertable_name = metric_table 40 | AND NOT is_compressed 41 | ORDER BY range_end ASC 42 | LOOP 43 | CONTINUE WHEN chunk_num <= 1 OR chunk_range_end > compress_before; 44 | PERFORM _prom_catalog.compress_chunk_for_metric(metric_table, chunk_schema_name, chunk_table_name); 45 | COMMIT; 46 | END LOOP; 47 | END; 48 | $$ LANGUAGE PLPGSQL; 49 | -------------------------------------------------------------------------------- /docs/sql_api.md: -------------------------------------------------------------------------------- 1 | # Function API Reference 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/sql-api/ 4 | 5 | 24 | -------------------------------------------------------------------------------- /docs/sql_permissions.md: -------------------------------------------------------------------------------- 1 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/roles-and-permissions/ 2 | -------------------------------------------------------------------------------- /docs/tracing.md: -------------------------------------------------------------------------------- 1 | # Tracing 2 | 3 | The content in this page has been moved to https://docs.timescale.com/promscale/latest/ 4 | -------------------------------------------------------------------------------- /migration-tool/pkg/utils/auth.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package utils 6 | 7 | import "github.com/prometheus/common/config" 8 | 9 | // Auth defines the authentication for prom-migrator. 10 | type Auth struct { 11 | Username string 12 | Password string 13 | PasswordFile string 14 | BearerToken string 15 | BearerTokenFile string 16 | config.TLSConfig 17 | } 18 | 19 | // Convert converts the auth credentials to HTTP client compatible format. 20 | func (a *Auth) ToHTTPClientConfig() config.HTTPClientConfig { 21 | conf := config.HTTPClientConfig{} 22 | if a.Password != "" { 23 | conf.BasicAuth = &config.BasicAuth{ 24 | Username: a.Username, 25 | } 26 | if a.PasswordFile != "" { 27 | conf.BasicAuth.PasswordFile = a.PasswordFile 28 | } else { 29 | conf.BasicAuth.Password = config.Secret(a.Password) 30 | } 31 | } 32 | // Note: Even though password and bearer_token are mutually exclusive, we apply both. The part of verification (whether both were used) 33 | // is left on .Validate() function which is expected to be called in the main. This avoids us from missing corner cases. 34 | if a.BearerTokenFile != "" { 35 | conf.BearerTokenFile = a.BearerTokenFile 36 | } else if a.BearerToken != "" { 37 | // Since Password and BearerToken are mutually exclusive, we assign both on input flag condition 38 | // and leave upto the HTTPClientConfig.Validate() for validation. 39 | conf.BearerToken = config.Secret(a.BearerToken) 40 | } 41 | if appliedAny(a.CAFile, a.CertFile, a.KeyFile, a.ServerName) || a.InsecureSkipVerify { 42 | conf.TLSConfig = a.TLSConfig 43 | } 44 | return conf 45 | } 46 | 47 | // appliedAny returns true if any of the fields are non-empty. 48 | func appliedAny(sets ...string) bool { 49 | for _, s := range sets { 50 | if s != "" { 51 | return true 52 | } 53 | } 54 | return false 55 | } 56 | -------------------------------------------------------------------------------- /migration-tool/pkg/utils/flags.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package utils 6 | 7 | import ( 8 | "fmt" 9 | "strings" 10 | ) 11 | 12 | type HeadersFlag struct { 13 | Headers map[string][]string 14 | } 15 | 16 | func (f HeadersFlag) String() string { 17 | var s string 18 | for k, vs := range f.Headers { 19 | s += k + "=" + strings.Join(vs, ",") 20 | } 21 | return s 22 | } 23 | 24 | func (f HeadersFlag) Set(value string) error { 25 | k, v, found := strings.Cut(value, ":") 26 | if !found { 27 | return fmt.Errorf("HTTP header values should be specified with the format `key:value`") 28 | } 29 | vs, exists := f.Headers[k] 30 | if exists { 31 | f.Headers[k] = append(vs, v) 32 | return nil 33 | } 34 | f.Headers[k] = []string{v} 35 | return nil 36 | } 37 | -------------------------------------------------------------------------------- /pkg/api/alerts.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "net/http" 9 | "time" 10 | 11 | "github.com/NYTimes/gziphandler" 12 | ) 13 | 14 | func Alerts(conf *Config, updateMetrics updateMetricCallback) http.Handler { 15 | hf := corsWrapper(conf, alertsHandler(conf, updateMetrics)) 16 | return gziphandler.GzipHandler(hf) 17 | } 18 | 19 | func alertsHandler(apiConf *Config, updateMetrics updateMetricCallback) http.HandlerFunc { 20 | return func(w http.ResponseWriter, r *http.Request) { 21 | statusCode := "400" 22 | begin := time.Now() 23 | defer func() { 24 | updateMetrics("/api/v1/alerts", statusCode, "", time.Since(begin).Seconds()) 25 | }() 26 | 27 | if apiConf.Rules == nil { 28 | statusCode = "200" 29 | respond(w, http.StatusOK, &AlertDiscovery{Alerts: []*Alert{}}) 30 | return 31 | } 32 | 33 | alerts := []*Alert{} 34 | alertingRules := apiConf.Rules.AlertingRules() 35 | for _, alertingRule := range alertingRules { 36 | alerts = append( 37 | alerts, 38 | rulesAlertsToAPIAlerts(alertingRule.ActiveAlerts())..., 39 | ) 40 | } 41 | 42 | statusCode = "200" 43 | res := &AlertDiscovery{Alerts: alerts} 44 | respond(w, http.StatusOK, res) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/api/health.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "net/http" 9 | 10 | "github.com/timescale/promscale/pkg/log" 11 | "github.com/timescale/promscale/pkg/pgmodel/health" 12 | ) 13 | 14 | func Health(hc health.HealthCheckerFn) http.HandlerFunc { 15 | return func(w http.ResponseWriter, r *http.Request) { 16 | err := hc() 17 | if err != nil { 18 | log.Warn("msg", "Healthcheck failed", err) 19 | http.Error(w, err.Error(), http.StatusInternalServerError) 20 | return 21 | } 22 | w.Header().Set("Content-Length", "0") 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /pkg/api/label_values.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "fmt" 9 | "math" 10 | "net/http" 11 | 12 | "github.com/NYTimes/gziphandler" 13 | "github.com/gorilla/mux" 14 | "github.com/prometheus/common/model" 15 | "github.com/timescale/promscale/pkg/promql" 16 | ) 17 | 18 | func LabelValues(conf *Config, queryable promql.Queryable) http.Handler { 19 | hf := corsWrapper(conf, labelValues(queryable)) 20 | return gziphandler.GzipHandler(hf) 21 | } 22 | 23 | func labelValues(queryable promql.Queryable) http.HandlerFunc { 24 | return func(w http.ResponseWriter, r *http.Request) { 25 | name := mux.Vars(r)["name"] 26 | if !model.LabelNameRE.MatchString(name) { 27 | respondError(w, http.StatusBadRequest, fmt.Errorf("invalid label name: %s", name), "bad_data") 28 | return 29 | } 30 | querier, err := queryable.SamplesQuerier(r.Context(), math.MinInt64, math.MaxInt64) 31 | if err != nil { 32 | respondError(w, http.StatusInternalServerError, err, "internal") 33 | return 34 | } 35 | defer querier.Close() 36 | 37 | var values labelsValue 38 | values, warnings, err := querier.LabelValues(name) 39 | if err != nil { 40 | respondError(w, http.StatusInternalServerError, err, "internal") 41 | return 42 | } 43 | 44 | respondLabels(w, &promql.Result{ 45 | Value: values, 46 | }, warnings) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/api/labels.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "encoding/json" 9 | "math" 10 | "net/http" 11 | "strings" 12 | 13 | "github.com/NYTimes/gziphandler" 14 | "github.com/prometheus/prometheus/promql/parser" 15 | "github.com/prometheus/prometheus/storage" 16 | "github.com/timescale/promscale/pkg/promql" 17 | ) 18 | 19 | type labelsValue []string 20 | 21 | func (l labelsValue) Type() parser.ValueType { 22 | return parser.ValueTypeNone 23 | } 24 | 25 | func (l labelsValue) String() string { 26 | return strings.Join(l, "\n") 27 | } 28 | 29 | func Labels(conf *Config, queryable promql.Queryable) http.Handler { 30 | hf := corsWrapper(conf, labelsHandler(queryable)) 31 | return gziphandler.GzipHandler(hf) 32 | } 33 | 34 | func labelsHandler(queryable promql.Queryable) http.HandlerFunc { 35 | return func(w http.ResponseWriter, r *http.Request) { 36 | querier, err := queryable.SamplesQuerier(r.Context(), math.MinInt64, math.MaxInt64) 37 | if err != nil { 38 | respondError(w, http.StatusInternalServerError, err, "internal") 39 | return 40 | } 41 | defer querier.Close() 42 | var names labelsValue 43 | names, warnings, err := querier.LabelNames() 44 | if err != nil { 45 | respondError(w, http.StatusInternalServerError, err, "internal") 46 | return 47 | } 48 | respondLabels(w, &promql.Result{ 49 | Value: names, 50 | }, warnings) 51 | } 52 | } 53 | 54 | func respondLabels(w http.ResponseWriter, res *promql.Result, warnings storage.Warnings) { 55 | setResponseHeaders(w, res, false, warnings) 56 | resp := &response{ 57 | Status: "success", 58 | Data: res.Value, 59 | } 60 | _ = json.NewEncoder(w).Encode(resp) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/api/metadata.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "net/http" 9 | "strconv" 10 | 11 | "github.com/NYTimes/gziphandler" 12 | "github.com/timescale/promscale/pkg/pgclient" 13 | "github.com/timescale/promscale/pkg/pgmodel/metadata" 14 | ) 15 | 16 | func MetricMetadata(conf *Config, client *pgclient.Client) http.Handler { 17 | hf := corsWrapper(conf, metricMetadataHandler(client)) 18 | return gziphandler.GzipHandler(hf) 19 | } 20 | 21 | func metricMetadataHandler(client *pgclient.Client) http.HandlerFunc { 22 | return func(w http.ResponseWriter, r *http.Request) { 23 | if err := r.ParseForm(); err != nil { 24 | respondError(w, http.StatusBadRequest, err, "bad_data") 25 | return 26 | } 27 | var ( 28 | limit int64 29 | err error 30 | 31 | metric = r.FormValue("metric") 32 | limitStr = r.FormValue("limit") 33 | ) 34 | if limitStr != "" { 35 | limit, err = strconv.ParseInt(limitStr, 10, 32) 36 | if err != nil { 37 | respondError(w, http.StatusBadRequest, err, "converting string to integer") 38 | return 39 | } 40 | } 41 | data, err := metadata.MetricQuery(r.Context(), client.ReadOnlyConnection(), metric, int(limit)) 42 | if err != nil { 43 | respondError(w, http.StatusInternalServerError, err, "fetching metric metadata") 44 | return 45 | } 46 | respond(w, http.StatusOK, data) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/api/otlp.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "context" 9 | 10 | "github.com/timescale/promscale/pkg/pgmodel/ingestor" 11 | "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" 12 | ) 13 | 14 | func NewTraceServer(i ingestor.DBInserter) ptraceotlp.GRPCServer { 15 | return &tracesServer{ 16 | ingestor: i, 17 | } 18 | } 19 | 20 | type tracesServer struct { 21 | ingestor ingestor.DBInserter 22 | } 23 | 24 | func (t *tracesServer) Export(ctx context.Context, tr ptraceotlp.Request) (ptraceotlp.Response, error) { 25 | return ptraceotlp.NewResponse(), t.ingestor.IngestTraces(ctx, tr.Traces()) 26 | } 27 | -------------------------------------------------------------------------------- /pkg/api/parser/protobuf/protobuf.go: -------------------------------------------------------------------------------- 1 | package protobuf 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net/http" 7 | "sync" 8 | 9 | "github.com/gogo/protobuf/proto" 10 | "github.com/timescale/promscale/pkg/prompb" 11 | ) 12 | 13 | // ParseRequest is responsible for populating the write request from the 14 | // data in the request in protobuf format. 15 | func ParseRequest(r *http.Request, wr *prompb.WriteRequest) error { 16 | b := bufPool.Get().(*bytes.Buffer) 17 | defer bufPool.Put(b) 18 | b.Reset() 19 | 20 | _, err := b.ReadFrom(r.Body) 21 | if err != nil { 22 | return fmt.Errorf("request body read error: %w", err) 23 | } 24 | 25 | if err = proto.Unmarshal(b.Bytes(), wr); err != nil { 26 | return fmt.Errorf("protobuf unmarshal error: %w", err) 27 | } 28 | 29 | return r.Body.Close() 30 | } 31 | 32 | var bufPool = sync.Pool{ 33 | New: func() interface{} { 34 | return new(bytes.Buffer) 35 | }, 36 | } 37 | -------------------------------------------------------------------------------- /pkg/api/parser/text/text.go: -------------------------------------------------------------------------------- 1 | package text 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/prometheus/common/model" 10 | "github.com/prometheus/prometheus/model/labels" 11 | "github.com/prometheus/prometheus/model/textparse" 12 | "github.com/timescale/promscale/pkg/prompb" 13 | "github.com/timescale/promscale/pkg/util" 14 | ) 15 | 16 | var timeProvider = time.Now 17 | 18 | // ParseRequest parses an incoming HTTP request as a Prometheus text format. 19 | func ParseRequest(r *http.Request, wr *prompb.WriteRequest) error { 20 | b, err := io.ReadAll(r.Body) 21 | if err != nil { 22 | return fmt.Errorf("error reading request body: %w", err) 23 | } 24 | 25 | var ( 26 | et textparse.Entry 27 | defTime = int64(model.TimeFromUnixNano(timeProvider().UnixNano())) 28 | ) 29 | 30 | p, err := textparse.New(b, r.Header.Get("Content-Type")) 31 | if err != nil { 32 | return fmt.Errorf("parsing contents from request body: %w", err) 33 | } 34 | 35 | for { 36 | if et, err = p.Next(); err != nil { 37 | if err == io.EOF { 38 | break 39 | } 40 | return fmt.Errorf("error parsing text entries: %w", err) 41 | } 42 | 43 | switch et { 44 | case textparse.EntryType, 45 | textparse.EntryHelp, 46 | textparse.EntryUnit, 47 | textparse.EntryComment: 48 | continue 49 | default: 50 | } 51 | 52 | t := defTime 53 | _, tp, v := p.Series() 54 | if tp != nil { 55 | t = *tp 56 | } 57 | 58 | var lset labels.Labels 59 | _ = p.Metric(&lset) 60 | 61 | ll := util.LabelToPrompbLabels(lset) 62 | 63 | wr.Timeseries = append(wr.Timeseries, prompb.TimeSeries{ 64 | Labels: ll, 65 | Samples: []prompb.Sample{ 66 | { 67 | Timestamp: t, 68 | Value: v, 69 | }, 70 | }, 71 | }) 72 | } 73 | 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/api/reload.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package api 6 | 7 | import ( 8 | "fmt" 9 | "net/http" 10 | 11 | "github.com/timescale/promscale/pkg/log" 12 | ) 13 | 14 | func Reload(reload func() error, webAdmin bool) http.HandlerFunc { 15 | return func(w http.ResponseWriter, r *http.Request) { 16 | if !webAdmin { 17 | err := fmt.Errorf("reload received but web admin is disabled. To enable, start Promscale with '-web.enable-admin-api' flag") 18 | log.Error("msg", err.Error()) 19 | http.Error(w, fmt.Errorf("failed to reload: %w", err).Error(), http.StatusUnauthorized) 20 | return 21 | } 22 | if err := reload(); err != nil { 23 | log.Error("msg", "failed to reload", "err", err.Error()) 24 | http.Error(w, fmt.Errorf("failed to reload: %w", err).Error(), http.StatusInternalServerError) 25 | return 26 | } 27 | w.Header().Set("Content-Length", "0") 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /pkg/dataset/deepcopy_generated.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | // +build !ignore_autogenerated 3 | 4 | // This file and its contents are licensed under the Apache License 2.0. 5 | // Please see the included NOTICE for copyright information and 6 | // LICENSE for a copy of the license. 7 | 8 | // Code generated by deepcopy-gen. DO NOT EDIT. 9 | 10 | package dataset 11 | 12 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 13 | func (in *Config) DeepCopyInto(out *Config) { 14 | *out = *in 15 | in.Metrics.DeepCopyInto(&out.Metrics) 16 | out.Traces = in.Traces 17 | return 18 | } 19 | 20 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. 21 | func (in *Config) DeepCopy() *Config { 22 | if in == nil { 23 | return nil 24 | } 25 | out := new(Config) 26 | in.DeepCopyInto(out) 27 | return out 28 | } 29 | 30 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 31 | func (in *Metrics) DeepCopyInto(out *Metrics) { 32 | *out = *in 33 | if in.Compression != nil { 34 | in, out := &in.Compression, &out.Compression 35 | *out = new(bool) 36 | **out = **in 37 | } 38 | return 39 | } 40 | 41 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. 42 | func (in *Metrics) DeepCopy() *Metrics { 43 | if in == nil { 44 | return nil 45 | } 46 | out := new(Metrics) 47 | in.DeepCopyInto(out) 48 | return out 49 | } 50 | 51 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 52 | func (in *Traces) DeepCopyInto(out *Traces) { 53 | *out = *in 54 | return 55 | } 56 | 57 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Traces. 58 | func (in *Traces) DeepCopy() *Traces { 59 | if in == nil { 60 | return nil 61 | } 62 | out := new(Traces) 63 | in.DeepCopyInto(out) 64 | return out 65 | } 66 | -------------------------------------------------------------------------------- /pkg/dataset/doc.go: -------------------------------------------------------------------------------- 1 | // Promscale stores some configuration information in the Postgres database 2 | // which it is connected to. We call this configuration the Promscale dataset 3 | // configuration. 4 | // 5 | // The dataset configuration handles config items like retention period, 6 | // chunk interval, compression, etc. This config options are applied on startup 7 | // as SQL queries. For more context review the `/docs/dataset.md` page and the 8 | // `dataset/config.go` file. 9 | // 10 | // +k8s:deepcopy-gen=package 11 | package dataset 12 | -------------------------------------------------------------------------------- /pkg/ewma/ewma.go: -------------------------------------------------------------------------------- 1 | // This code has been borrowed from https://github.com/prometheus/prometheus/blob/main/storage/remote/ewma.go 2 | // 3 | // This file and its contents are licensed under the Apache License 2.0. 4 | // Please see the included NOTICE for copyright information and 5 | // LICENSE for a copy of the license. 6 | 7 | package ewma 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "time" 13 | ) 14 | 15 | // Rate tracks an exponentially weighted moving average of a per-second rate. 16 | type Rate struct { 17 | newEvents int64 18 | 19 | alpha float64 20 | interval time.Duration 21 | lastRate float64 22 | init bool 23 | mutex sync.Mutex 24 | } 25 | 26 | // NewEWMARate always allocates a new ewmaRate, as this guarantees the atomically 27 | // accessed int64 will be aligned on ARM. See prometheus#2666. 28 | func NewEWMARate(alpha float64, interval time.Duration) *Rate { 29 | return &Rate{ 30 | alpha: alpha, 31 | interval: interval, 32 | } 33 | } 34 | 35 | // Rate returns the per-second rate. 36 | func (r *Rate) Rate() float64 { 37 | r.mutex.Lock() 38 | defer r.mutex.Unlock() 39 | return r.lastRate 40 | } 41 | 42 | // Tick assumes to be called every r.interval. 43 | func (r *Rate) Tick() { 44 | newEvents := atomic.SwapInt64(&r.newEvents, 0) 45 | instantRate := float64(newEvents) / r.interval.Seconds() 46 | 47 | r.mutex.Lock() 48 | defer r.mutex.Unlock() 49 | 50 | if r.init { 51 | r.lastRate += r.alpha * (instantRate - r.lastRate) 52 | } else if newEvents > 0 { 53 | r.init = true 54 | r.lastRate = instantRate 55 | } 56 | } 57 | 58 | // Incr counts incr events. 59 | func (r *Rate) Incr(incr int64) { 60 | atomic.AddInt64(&r.newEvents, incr) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/ha/mock_ha_service.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package ha 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/timescale/promscale/pkg/ha/client" 12 | ) 13 | 14 | func MockNewHAService() *Service { 15 | lockClient := newMockLockClient() 16 | 17 | service := &Service{ 18 | state: &sync.Map{}, 19 | leaseClient: lockClient, 20 | currentTimeProvider: time.Now, 21 | } 22 | return service 23 | } 24 | 25 | func SetLeaderInMockService(service *Service, states []client.LeaseDBState) { 26 | for _, state := range states { 27 | curr := service.leaseClient.(*mockLockClient).leadersPerCluster[state.Cluster] 28 | service.leaseClient.(*mockLockClient).leadersPerCluster[state.Cluster] = append(curr, state) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /pkg/jaeger/api.go: -------------------------------------------------------------------------------- 1 | package jaeger 2 | 3 | import ( 4 | "github.com/gorilla/mux" 5 | jaegerQueryApp "github.com/jaegertracing/jaeger/cmd/query/app" 6 | jaegerQueryService "github.com/jaegertracing/jaeger/cmd/query/app/querysvc" 7 | "github.com/jaegertracing/jaeger/pkg/tenancy" 8 | 9 | "github.com/timescale/promscale/pkg/jaeger/store" 10 | "github.com/timescale/promscale/pkg/pgxconn" 11 | ) 12 | 13 | func ExtendQueryAPIs(r *mux.Router, conn pgxconn.PgxConn, reader *store.Store) { 14 | handler := jaegerQueryApp.NewAPIHandler( 15 | jaegerQueryService.NewQueryService(reader, reader, jaegerQueryService.QueryServiceOptions{}), 16 | tenancy.NewManager(&tenancy.Options{Enabled: false}), 17 | ) 18 | handler.RegisterRoutes(r) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/jaeger/store/config.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "flag" 5 | "time" 6 | ) 7 | 8 | const ( 9 | DefaultMaxTraceDuration = time.Hour 10 | ) 11 | 12 | type Config struct { 13 | MaxTraceDuration time.Duration 14 | StreamingSpanWriter bool 15 | } 16 | 17 | var DefaultConfig = Config{ 18 | MaxTraceDuration: DefaultMaxTraceDuration, 19 | StreamingSpanWriter: true, 20 | } 21 | 22 | func ParseFlags(fs *flag.FlagSet, cfg *Config) *Config { 23 | fs.DurationVar(&cfg.MaxTraceDuration, "tracing.max-trace-duration", DefaultMaxTraceDuration, "Maximum duration of any trace in the system. This parameter is used to optimize queries.") 24 | fs.BoolVar(&cfg.StreamingSpanWriter, "tracing.streaming-span-writer", true, "StreamingSpanWriter for remote Jaeger grpc store.") 25 | return cfg 26 | } 27 | 28 | func Validate(cfg *Config) error { 29 | return nil 30 | } 31 | -------------------------------------------------------------------------------- /pkg/jaeger/store/find_trace_ids.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package store 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | 11 | "github.com/jackc/pgx/v5/pgtype" 12 | "github.com/jaegertracing/jaeger/model" 13 | "github.com/jaegertracing/jaeger/storage/spanstore" 14 | "github.com/timescale/promscale/pkg/pgxconn" 15 | ) 16 | 17 | func findTraceIDs(ctx context.Context, builder *Builder, conn pgxconn.PgxConn, q *spanstore.TraceQueryParameters) ([]model.TraceID, error) { 18 | tInfo, err := FindTagInfo(ctx, q, conn) 19 | if err != nil { 20 | return nil, fmt.Errorf("querying trace tags error: %w", err) 21 | } 22 | if tInfo == nil { 23 | //tags cannot be matched 24 | return []model.TraceID{}, nil 25 | } 26 | query, params := builder.findTraceIDsQuery(q, tInfo) 27 | rows, err := conn.Query(ctx, query, params...) 28 | if err != nil { 29 | return nil, fmt.Errorf("querying traces: %w", err) 30 | } 31 | defer rows.Close() 32 | 33 | traceIds := make([]model.TraceID, 0) 34 | var traceIdUUID pgtype.UUID 35 | for rows.Next() { 36 | if rows.Err() != nil { 37 | return nil, fmt.Errorf("trace ids row iterator: %w", rows.Err()) 38 | } 39 | if err = rows.Scan(&traceIdUUID); err != nil { 40 | return nil, fmt.Errorf("scanning trace ids: %w", err) 41 | } 42 | trace_id, err := model.TraceIDFromBytes(traceIdUUID.Bytes[:]) 43 | if err != nil { 44 | return nil, fmt.Errorf("converting trace_id UUID->model: %w", err) 45 | } 46 | traceIds = append(traceIds, trace_id) 47 | } 48 | if rows.Err() != nil { 49 | return nil, fmt.Errorf("trace ids row iterator: %w", rows.Err()) 50 | } 51 | return traceIds, nil 52 | } 53 | -------------------------------------------------------------------------------- /pkg/jaeger/store/get_services.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package store 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | 11 | "github.com/jackc/pgx/v5" 12 | "github.com/jackc/pgx/v5/pgtype" 13 | "github.com/pkg/errors" 14 | "github.com/timescale/promscale/pkg/pgxconn" 15 | ) 16 | 17 | const getServicesSQL = ` 18 | SELECT 19 | array_agg(value#>>'{}' ORDER BY value) 20 | FROM 21 | _ps_trace.tag 22 | WHERE 23 | key='service.name' and value IS NOT NULL` 24 | 25 | func getServices(ctx context.Context, conn pgxconn.PgxConn) ([]string, error) { 26 | var pgServices pgtype.FlatArray[pgtype.Text] 27 | if err := conn.QueryRow(ctx, getServicesSQL).Scan(&pgServices); err != nil { 28 | if errors.Is(err, pgx.ErrNoRows) { 29 | return []string{}, nil 30 | } 31 | return nil, fmt.Errorf("fetching services: %w", err) 32 | } 33 | return textArraytoStringArr(pgServices) 34 | } 35 | -------------------------------------------------------------------------------- /pkg/jaeger/store/get_trace.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package store 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | 11 | "github.com/jaegertracing/jaeger/model" 12 | "github.com/jaegertracing/jaeger/storage/spanstore" 13 | "github.com/timescale/promscale/pkg/pgxconn" 14 | ) 15 | 16 | func getTrace(ctx context.Context, builder *Builder, conn pgxconn.PgxConn, traceID model.TraceID) (*model.Trace, error) { 17 | query, params, err := builder.getTraceQuery(traceID) 18 | if err != nil { 19 | return nil, fmt.Errorf("get trace query: %w", err) 20 | } 21 | rows, err := conn.Query(ctx, query, params...) 22 | if err != nil { 23 | return nil, fmt.Errorf("querying traces: %w", err) 24 | } 25 | defer rows.Close() 26 | traces, err := scanTraces(rows) 27 | if err != nil { 28 | return nil, fmt.Errorf("scanning traces: %w", err) 29 | } 30 | 31 | switch len(traces) { 32 | case 0: 33 | return nil, spanstore.ErrTraceNotFound 34 | case 1: 35 | return traces[0], nil 36 | default: 37 | return nil, fmt.Errorf("found more than one trace (count=%d) when searching for a traceID", len(traces)) 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /pkg/jaeger/store/metrics.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package store 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/prometheus/client_golang/prometheus" 11 | "github.com/timescale/promscale/pkg/telemetry" 12 | "github.com/timescale/promscale/pkg/util" 13 | ) 14 | 15 | var ( 16 | // TODO (harkishen): update telemetry module to extract metric data from series in new consistent metrics. 17 | traceRequestsExec = prometheus.NewCounter(prometheus.CounterOpts{ 18 | Namespace: util.PromNamespace, 19 | Subsystem: "trace", 20 | Name: "query_requests_executed_total", 21 | Help: "Total number of query requests successfully executed by /getTrace and /fetchTraces API.", 22 | }) 23 | // Even though this is handled by promscale_query_requests_total{type="trace", handler="get_dependencies", code="200"} 24 | // yet we will have to keep this metric for telemetry as extracting the underlying series from a metric will require 25 | // changing telemetry arch that tracks the all prometheus metrics, just for this metric, which is not worth. 26 | dependencyRequestsExec = prometheus.NewCounter(prometheus.CounterOpts{ 27 | Namespace: util.PromNamespace, 28 | Subsystem: "trace", 29 | Name: "dependency_requests_executed_total", 30 | Help: "Total number of dependency requests successfully executed.", 31 | }) 32 | ) 33 | 34 | func RegisterTelemetryMetrics(t telemetry.Engine) error { 35 | var err error 36 | if err = t.RegisterMetric("promscale_trace_query_requests_executed_total", traceRequestsExec); err != nil { 37 | return fmt.Errorf("register 'promscale_trace_query_requests_executed_total' metric for telemetry: %w", err) 38 | } 39 | if err = t.RegisterMetric("promscale_trace_dependency_requests_executed_total", dependencyRequestsExec); err != nil { 40 | return fmt.Errorf("register 'promscale_trace_dependency_requests_executed_total' metric for telemetry: %w", err) 41 | } 42 | return nil 43 | } 44 | 45 | func init() { 46 | prometheus.MustRegister( 47 | traceRequestsExec, 48 | dependencyRequestsExec, 49 | ) 50 | } 51 | -------------------------------------------------------------------------------- /pkg/limits/mem/mem.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | // This file and its contents are licensed under the Apache License 2.0. 5 | // Please see the included NOTICE for copyright information and 6 | // LICENSE for a copy of the license 7 | 8 | package mem 9 | 10 | import ( 11 | "github.com/pbnjay/memory" 12 | ) 13 | 14 | func SystemMemory() uint64 { 15 | return memory.TotalMemory() 16 | } 17 | -------------------------------------------------------------------------------- /pkg/limits/mem/mem_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | // This file and its contents are licensed under the Apache License 2.0. 5 | // Please see the included NOTICE for copyright information and 6 | // LICENSE for a copy of the license 7 | 8 | package mem 9 | 10 | import ( 11 | "github.com/containerd/cgroups" 12 | v2 "github.com/containerd/cgroups/v2" 13 | "github.com/pbnjay/memory" 14 | ) 15 | 16 | func SystemMemory() uint64 { 17 | //try getting the cgroup value, if not fallback to system value 18 | cgroup := getCGroupMemory() 19 | if validMemoryValue(cgroup) { 20 | return cgroup 21 | } 22 | system := memory.TotalMemory() 23 | if validMemoryValue(system) { 24 | return system 25 | } 26 | return 0 27 | } 28 | 29 | func validMemoryValue(value uint64) bool { 30 | // some systems return absurdly large values for unlimited 31 | // so cut-off at 10 petabytes 32 | // e.g. docker: https://unix.stackexchange.com/questions/420906/what-is-the-value-for-the-cgroups-limit-in-bytes-if-the-memory-is-not-restricte 33 | if value <= 0 || value > 10e15 { 34 | return false 35 | } 36 | return true 37 | } 38 | 39 | func getCGroupMemory() uint64 { 40 | v2 := getCGroupV2Memory() 41 | if validMemoryValue(v2) { 42 | return v2 43 | } 44 | v1 := getCGroupV1Memory() 45 | if validMemoryValue(v1) { 46 | return v1 47 | } 48 | return 0 49 | } 50 | 51 | func getCGroupV2Memory() uint64 { 52 | manager, err := v2.LoadManager("/sys/fs/cgroup", "/") 53 | if err != nil { 54 | return 0 55 | } 56 | metrics, err := manager.Stat() 57 | if err != nil { 58 | return 0 59 | } 60 | return metrics.Memory.UsageLimit 61 | } 62 | 63 | func getCGroupV1Memory() uint64 { 64 | c, err := cgroups.Load(cgroups.V1, cgroups.StaticPath("/")) 65 | if err != nil { 66 | return 0 67 | } 68 | stats, err := c.Stat(cgroups.IgnoreNotExist) 69 | if err != nil { 70 | return 0 71 | } 72 | if stats.Memory == nil { 73 | return 0 74 | } 75 | return stats.Memory.HierarchicalMemoryLimit 76 | } 77 | -------------------------------------------------------------------------------- /pkg/migrations/Readme.md: -------------------------------------------------------------------------------- 1 | # Version Migrations 2 | 3 | This directory contains sql scripts for schema setup and upgrading. 4 | 5 | All scripts are contained in the `sql` directory and are separated as follows: 6 | 7 | 1. `preinstall` - This directory contains all scripts that will be executed on 8 | a new database install. 9 | 2. `idempotent` - This directory contains all scripts that contain idempotent 10 | content which is executed after a fresh install or a version upgrade. 11 | 3. `versions/dev` - This directory contains subdirectories that are named after 12 | the development version they were introduced in. A version's migrations cannot 13 | be modified. So, to introduce a new version, you have to bump the app version. 14 | For example, if the current app version is 0.1.1-dev, to introduce a new migration 15 | script, you must add a sql file name `versions/dev/0.1.1/1-blah.sql` and bump 16 | the app version to 0.1.1-dev.1. 17 | 18 | All script files are executed in a explicit order. Ordering can happen in two ways: 19 | 20 | - Using a table of contents which needs to be present in the `pkg/pgmodel/migrate.go` 21 | file. Files not present in the ToC will be ignored. 22 | - Using a numbered prefix delimited by a `-` e.g. `1-base.sql`, `2-secondary.sql`. 23 | In this case, fils are ordered by the numeric prefix, low to high. 24 | Files that do not follow this format will be ignored. 25 | 26 | **NOTE** If any changes are made to the `sql` directory, you must rerun 27 | `go generate` in this directory. 28 | -------------------------------------------------------------------------------- /pkg/migrations/generate.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | // +build ignore 3 | 4 | // This file and its contents are licensed under the Apache License 2.0. 5 | // Please see the included NOTICE for copyright information and 6 | // LICENSE for a copy of the license. 7 | 8 | // This file is a binary that generates migration_files_generated.go 9 | // it is not built by default, but rather invoked by the go:generate command 10 | // defined in migrations.go 11 | package main 12 | 13 | import ( 14 | "log" 15 | "net/http" 16 | 17 | "github.com/shurcooL/vfsgen" 18 | "github.com/timescale/promscale/pkg/migrations" 19 | ) 20 | 21 | var Assets http.FileSystem = migrations.NewModTimeFs(http.Dir("sql")) 22 | 23 | func main() { 24 | err := vfsgen.Generate(Assets, vfsgen.Options{ 25 | Filename: "migration_files_generated.go", 26 | PackageName: "migrations", 27 | VariableName: "MigrationFiles", 28 | }) 29 | if err != nil { 30 | log.Fatalln(err) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/migrations/migrations.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package migrations 6 | 7 | // This is a stub to define the go:generate command to create the go file 8 | // that embeds the sql files into a go variable to make the sql files part 9 | // of the binary 10 | 11 | //go:generate go run -tags=dev generate.go 12 | 13 | // pin vfsgen version in go mod. It's used in generate.go but that isn't picked up 14 | // because it uses "ignore" tag. See https://github.com/shurcooL/vfsgen/issues/83 15 | import _ "github.com/shurcooL/vfsgen" 16 | -------------------------------------------------------------------------------- /pkg/migrations/mod_time_fs.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package migrations 6 | 7 | import ( 8 | "net/http" 9 | "os" 10 | "time" 11 | ) 12 | 13 | // modTimeFS is an http.FileSystem wrapper that modifies 14 | // underlying fs such that all of its file mod times are set to zero. 15 | type modTimeFS struct { 16 | fs http.FileSystem 17 | } 18 | 19 | func (fs modTimeFS) Open(name string) (http.File, error) { 20 | f, err := fs.fs.Open(name) 21 | if err != nil { 22 | return nil, err 23 | } 24 | return modTimeFile{f}, nil 25 | } 26 | 27 | type modTimeFile struct { 28 | http.File 29 | } 30 | 31 | func (f modTimeFile) Stat() (os.FileInfo, error) { 32 | fi, err := f.File.Stat() 33 | if err != nil { 34 | return nil, err 35 | } 36 | return modTimeFileInfo{fi}, nil 37 | } 38 | 39 | type modTimeFileInfo struct { 40 | os.FileInfo 41 | } 42 | 43 | func (modTimeFileInfo) ModTime() time.Time { 44 | return time.Time{} 45 | } 46 | 47 | func NewModTimeFs(sub http.FileSystem) http.FileSystem { 48 | return modTimeFS{ 49 | fs: sub, 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /pkg/migrations/sql/idempotent/apply_permissions.sql: -------------------------------------------------------------------------------- 1 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA ps_tag FROM PUBLIC; 2 | REVOKE ALL ON ALL PROCEDURES IN SCHEMA ps_tag FROM PUBLIC; 3 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA _prom_catalog FROM PUBLIC; 4 | REVOKE ALL ON ALL PROCEDURES IN SCHEMA _prom_catalog FROM PUBLIC; 5 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA prom_api FROM PUBLIC; 6 | REVOKE ALL ON ALL PROCEDURES IN SCHEMA prom_api FROM PUBLIC; 7 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA _ps_trace FROM PUBLIC; 8 | REVOKE ALL ON ALL PROCEDURES IN SCHEMA _ps_trace FROM PUBLIC; 9 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA ps_trace FROM PUBLIC; 10 | REVOKE ALL ON ALL PROCEDURES IN SCHEMA ps_trace FROM PUBLIC; -------------------------------------------------------------------------------- /pkg/migrations/sql/idempotent/metric-metadata.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION _prom_catalog.insert_metric_metadatas(t TIMESTAMPTZ[], metric_family_name TEXT[], metric_type TEXT[], metric_unit TEXT[], metric_help TEXT[]) 2 | RETURNS BIGINT 3 | AS 4 | $$ 5 | DECLARE 6 | num_rows BIGINT; 7 | BEGIN 8 | INSERT INTO _prom_catalog.metadata (last_seen, metric_family, type, unit, help) 9 | SELECT * FROM UNNEST($1, $2, $3, $4, $5) res(last_seen, metric_family, type, unit, help) 10 | ORDER BY res.metric_family, res.type, res.unit, res.help 11 | ON CONFLICT (metric_family, type, unit, help) DO 12 | UPDATE SET last_seen = EXCLUDED.last_seen; 13 | GET DIAGNOSTICS num_rows = ROW_COUNT; 14 | RETURN num_rows; 15 | END; 16 | $$ LANGUAGE plpgsql; 17 | GRANT EXECUTE ON FUNCTION _prom_catalog.insert_metric_metadatas(TIMESTAMPTZ[], TEXT[], TEXT[], TEXT[], TEXT[]) TO prom_writer; 18 | 19 | CREATE OR REPLACE FUNCTION prom_api.get_metric_metadata(metric_family_name TEXT) 20 | RETURNS TABLE (metric_family TEXT, type TEXT, unit TEXT, help TEXT) 21 | AS 22 | $$ 23 | SELECT metric_family, type, unit, help FROM _prom_catalog.metadata WHERE metric_family = metric_family_name ORDER BY last_seen DESC 24 | $$ LANGUAGE SQL; 25 | GRANT EXECUTE ON FUNCTION prom_api.get_metric_metadata(TEXT) TO prom_reader; 26 | 27 | -- metric_families should have unique elements, otherwise there will be duplicate rows in the returned table. 28 | CREATE OR REPLACE FUNCTION prom_api.get_multiple_metric_metadata(metric_families TEXT[]) 29 | RETURNS TABLE (metric_family TEXT, type TEXT, unit TEXT, help TEXT) 30 | AS 31 | $$ 32 | SELECT info.* 33 | FROM unnest(metric_families) AS family(name) 34 | INNER JOIN LATERAL ( 35 | SELECT metric_family, type, unit, help FROM _prom_catalog.metadata WHERE metric_family = family.name ORDER BY last_seen DESC LIMIT 1 36 | ) AS info ON (true) 37 | $$ LANGUAGE SQL; 38 | GRANT EXECUTE ON FUNCTION prom_api.get_multiple_metric_metadata(TEXT[]) TO prom_reader; 39 | -------------------------------------------------------------------------------- /pkg/migrations/sql/idempotent/remote-commands.sql: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | Some remote commands are registered in the preinstall or upgrade scripts. 4 | 5 | Two remote commands are registered in the idempotent scripts which get run 6 | at the end of a fresh install and after every version upgrade. Thus, it's 7 | difficult to know where in the sequence these two will show up. 8 | 9 | This update will ensure a consistent ordering of our remote commands and 10 | place any potential user defined remote commands at the end of ours in 11 | original order. 12 | */ 13 | WITH x(key, seq) AS 14 | ( 15 | VALUES 16 | ('create_prom_reader' , 1), 17 | ('create_prom_writer' , 2), 18 | ('create_prom_modifier' , 3), 19 | ('create_prom_admin' , 4), 20 | ('create_prom_maintenance' , 5), 21 | ('grant_prom_reader_prom_writer' , 6), 22 | ('create_schemas' , 7), 23 | ('tracing_types' , 8), 24 | ('_prom_catalog.do_decompress_chunks_after' , 9), 25 | ('_prom_catalog.compress_old_chunks' , 10) 26 | ) 27 | UPDATE _prom_catalog.remote_commands u SET seq = z.seq 28 | FROM 29 | ( 30 | -- our remote commands from above 31 | SELECT key, seq 32 | FROM x 33 | UNION 34 | -- any other remote commands get listed afterwards 35 | SELECT key, (SELECT max(seq) FROM x) + row_number() OVER (ORDER BY seq) 36 | FROM _prom_catalog.remote_commands k 37 | WHERE NOT EXISTS 38 | ( 39 | SELECT 1 40 | FROM x 41 | WHERE x.key = k.key 42 | ) 43 | ORDER BY seq 44 | ) z 45 | WHERE u.key = z.key 46 | ; -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/000-utils.sql: -------------------------------------------------------------------------------- 1 | --perms for schema will be addressed later; 2 | CREATE SCHEMA IF NOT EXISTS _prom_catalog; 3 | 4 | --table to save commands so they can be run when adding new nodes 5 | CREATE TABLE _prom_catalog.remote_commands( 6 | key TEXT PRIMARY KEY, 7 | seq SERIAL, 8 | transactional BOOLEAN, 9 | command TEXT 10 | ); 11 | --only the prom owner has any permissions. 12 | GRANT ALL ON TABLE _prom_catalog.remote_commands to CURRENT_USER; 13 | GRANT ALL ON SEQUENCE _prom_catalog.remote_commands_seq_seq to CURRENT_USER; 14 | 15 | CREATE OR REPLACE PROCEDURE _prom_catalog.execute_everywhere(command_key text, command TEXT, transactional BOOLEAN = true) 16 | AS $func$ 17 | BEGIN 18 | IF command_key IS NOT NULL THEN 19 | INSERT INTO _prom_catalog.remote_commands(key, command, transactional) VALUES(command_key, command, transactional) 20 | ON CONFLICT (key) DO UPDATE SET command = excluded.command, transactional = excluded.transactional; 21 | END IF; 22 | 23 | EXECUTE command; 24 | BEGIN 25 | CALL distributed_exec(command); 26 | EXCEPTION 27 | WHEN undefined_function THEN 28 | -- we're not on Timescale 2, just return 29 | RETURN; 30 | WHEN SQLSTATE '0A000' THEN 31 | -- we're not the access node, just return 32 | RETURN; 33 | END; 34 | END 35 | $func$ LANGUAGE PLPGSQL; 36 | --redundant given schema settings but extra caution for this function 37 | REVOKE ALL ON PROCEDURE _prom_catalog.execute_everywhere(text, text, boolean) FROM PUBLIC; 38 | 39 | CREATE OR REPLACE PROCEDURE _prom_catalog.update_execute_everywhere_entry(command_key text, command TEXT, transactional BOOLEAN = true) 40 | AS $func$ 41 | BEGIN 42 | UPDATE _prom_catalog.remote_commands 43 | SET 44 | command=update_execute_everywhere_entry.command, 45 | transactional=update_execute_everywhere_entry.transactional 46 | WHERE key = command_key; 47 | END 48 | $func$ LANGUAGE PLPGSQL; 49 | --redundant given schema settings but extra caution for this function 50 | REVOKE ALL ON PROCEDURE _prom_catalog.update_execute_everywhere_entry(text, text, boolean) FROM PUBLIC; -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/001-users.sql: -------------------------------------------------------------------------------- 1 | CALL _prom_catalog.execute_everywhere('create_prom_reader', $ee$ 2 | DO $$ 3 | BEGIN 4 | CREATE ROLE prom_reader; 5 | EXCEPTION WHEN duplicate_object THEN 6 | RAISE NOTICE 'role prom_reader already exists, skipping create'; 7 | RETURN; 8 | END 9 | $$; 10 | $ee$); 11 | 12 | CALL _prom_catalog.execute_everywhere('create_prom_writer', $ee$ 13 | DO $$ 14 | BEGIN 15 | CREATE ROLE prom_writer; 16 | EXCEPTION WHEN duplicate_object THEN 17 | RAISE NOTICE 'role prom_writer already exists, skipping create'; 18 | RETURN; 19 | END 20 | $$; 21 | $ee$); 22 | 23 | CALL _prom_catalog.execute_everywhere('create_prom_modifier', $ee$ 24 | DO $$ 25 | BEGIN 26 | CREATE ROLE prom_modifier; 27 | EXCEPTION WHEN duplicate_object THEN 28 | RAISE NOTICE 'role prom_modifier already exists, skipping create'; 29 | RETURN; 30 | END 31 | $$; 32 | $ee$); 33 | 34 | CALL _prom_catalog.execute_everywhere('create_prom_admin', $ee$ 35 | DO $$ 36 | BEGIN 37 | CREATE ROLE prom_admin; 38 | EXCEPTION WHEN duplicate_object THEN 39 | RAISE NOTICE 'role prom_admin already exists, skipping create'; 40 | RETURN; 41 | END 42 | $$; 43 | $ee$); 44 | 45 | CALL _prom_catalog.execute_everywhere('create_prom_maintenance', $ee$ 46 | DO $$ 47 | BEGIN 48 | CREATE ROLE prom_maintenance; 49 | EXCEPTION WHEN duplicate_object THEN 50 | RAISE NOTICE 'role prom_maintenance already exists, skipping create'; 51 | RETURN; 52 | END 53 | $$; 54 | $ee$); 55 | 56 | CALL _prom_catalog.execute_everywhere('grant_prom_reader_prom_writer',$ee$ 57 | DO $$ 58 | BEGIN 59 | GRANT prom_reader TO prom_writer; 60 | GRANT prom_reader TO prom_maintenance; 61 | GRANT prom_writer TO prom_modifier; 62 | GRANT prom_modifier TO prom_admin; 63 | GRANT prom_maintenance TO prom_admin; 64 | END 65 | $$; 66 | $ee$); 67 | -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/003-tag-operators.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE ps_tag.tag_op_jsonb_path_exists AS (tag_key text, value jsonpath); 2 | CREATE TYPE ps_tag.tag_op_regexp_matches AS (tag_key text, value text); 3 | CREATE TYPE ps_tag.tag_op_regexp_not_matches AS (tag_key text, value text); 4 | CREATE TYPE ps_tag.tag_op_equals AS (tag_key text, value jsonb); 5 | CREATE TYPE ps_tag.tag_op_not_equals AS (tag_key text, value jsonb); 6 | CREATE TYPE ps_tag.tag_op_less_than AS (tag_key text, value jsonb); 7 | CREATE TYPE ps_tag.tag_op_less_than_or_equal AS (tag_key text, value jsonb); 8 | CREATE TYPE ps_tag.tag_op_greater_than AS (tag_key text, value jsonb); 9 | CREATE TYPE ps_tag.tag_op_greater_than_or_equal AS (tag_key text, value jsonb); 10 | -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/006-install_uda.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION _prom_catalog.get_timescale_major_version() 2 | RETURNS INT 3 | AS $func$ 4 | SELECT split_part(extversion, '.', 1)::INT FROM pg_catalog.pg_extension WHERE extname='timescaledb' LIMIT 1; 5 | $func$ 6 | LANGUAGE SQL STABLE PARALLEL SAFE; 7 | 8 | --just a stub will be replaced in the idempotent scripts 9 | CREATE OR REPLACE PROCEDURE _prom_catalog.execute_maintenance_job(job_id int, config jsonb) 10 | AS $$ 11 | BEGIN 12 | RAISE 'calling execute_maintenance_job stub, should have been replaced'; 13 | END 14 | $$ LANGUAGE PLPGSQL; 15 | 16 | CREATE OR REPLACE FUNCTION _prom_catalog.is_timescaledb_installed() 17 | RETURNS BOOLEAN 18 | AS $func$ 19 | SELECT count(*) > 0 FROM pg_extension WHERE extname='timescaledb'; 20 | $func$ 21 | LANGUAGE SQL STABLE; 22 | GRANT EXECUTE ON FUNCTION _prom_catalog.is_timescaledb_installed() TO prom_reader; 23 | 24 | CREATE OR REPLACE FUNCTION _prom_catalog.is_timescaledb_oss() 25 | RETURNS BOOLEAN AS 26 | $$ 27 | BEGIN 28 | IF _prom_catalog.is_timescaledb_installed() THEN 29 | IF _prom_catalog.get_timescale_major_version() >= 2 THEN 30 | -- TimescaleDB 2.x 31 | RETURN (SELECT current_setting('timescaledb.license') = 'apache'); 32 | ELSE 33 | -- TimescaleDB 1.x 34 | -- Note: We cannot use current_setting() in 1.x, otherwise we get permission errors as 35 | -- we need to be superuser. We should not enforce the use of superuser. Hence, we take 36 | -- help of a view. 37 | RETURN (SELECT edition = 'apache' FROM timescaledb_information.license); 38 | END IF; 39 | END IF; 40 | RETURN false; 41 | END; 42 | $$ 43 | LANGUAGE plpgsql; 44 | GRANT EXECUTE ON FUNCTION _prom_catalog.is_timescaledb_oss() TO prom_reader; 45 | 46 | --add 2 jobs executing every 30 min by default for timescaledb 2.0 47 | DO $$ 48 | BEGIN 49 | IF NOT _prom_catalog.is_timescaledb_oss() AND _prom_catalog.get_timescale_major_version() >= 2 THEN 50 | PERFORM public.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 51 | PERFORM public.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 52 | END IF; 53 | END 54 | $$; 55 | -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/007-tables_ha.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE _prom_catalog.ha_leases 2 | ( 3 | cluster_name TEXT PRIMARY KEY, 4 | leader_name TEXT, 5 | lease_start TIMESTAMPTZ, 6 | lease_until TIMESTAMPTZ 7 | ); 8 | GRANT SELECT ON TABLE _prom_catalog.ha_leases TO prom_reader; 9 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.ha_leases TO prom_writer; 10 | 11 | CREATE TABLE _prom_catalog.ha_leases_logs 12 | ( 13 | cluster_name TEXT NOT NULL, 14 | leader_name TEXT NOT NULL, 15 | lease_start TIMESTAMPTZ NOT NULL, -- inclusive 16 | lease_until TIMESTAMPTZ, -- exclusive 17 | PRIMARY KEY (cluster_name, leader_name, lease_start) 18 | ); 19 | GRANT SELECT ON TABLE _prom_catalog.ha_leases_logs TO prom_reader; 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.ha_leases_logs TO prom_writer; 21 | 22 | 23 | -- STUB for function that trigger to automatically keep the log calls - real implementation in ha.sql 24 | CREATE OR REPLACE FUNCTION _prom_catalog.ha_leases_audit_fn() 25 | RETURNS TRIGGER 26 | AS 27 | $func$ 28 | BEGIN 29 | RAISE 'Just a stub, should be overwritten'; 30 | RETURN NEW; 31 | END; 32 | $func$ LANGUAGE plpgsql VOLATILE; 33 | 34 | -- trigger to automatically keep the log 35 | CREATE TRIGGER ha_leases_audit 36 | AFTER INSERT OR UPDATE 37 | ON _prom_catalog.ha_leases 38 | FOR EACH ROW 39 | EXECUTE PROCEDURE _prom_catalog.ha_leases_audit_fn(); 40 | 41 | -- default values for lease 42 | INSERT INTO _prom_catalog.default(key, value) 43 | VALUES ('ha_lease_timeout', '1m'), 44 | ('ha_lease_refresh', '10s') 45 | ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/008-tables_metadata.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _prom_catalog.metadata 2 | ( 3 | last_seen TIMESTAMPTZ NOT NULL, 4 | metric_family TEXT NOT NULL, 5 | type TEXT DEFAULT NULL, 6 | unit TEXT DEFAULT NULL, 7 | help TEXT DEFAULT NULL, 8 | PRIMARY KEY (metric_family, type, unit, help) 9 | ); 10 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.metadata TO prom_writer; 11 | GRANT SELECT ON TABLE _prom_catalog.metadata TO prom_reader; 12 | 13 | CREATE INDEX IF NOT EXISTS metadata_index ON _prom_catalog.metadata 14 | ( 15 | metric_family, last_seen 16 | ); 17 | -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/009-tables_exemplar.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _prom_catalog.exemplar_label_key_position ( 2 | metric_name TEXT NOT NULL, 3 | key TEXT NOT NULL, 4 | pos INTEGER NOT NULL, 5 | PRIMARY KEY (metric_name, key) INCLUDE (pos) 6 | ); 7 | GRANT SELECT ON TABLE _prom_catalog.exemplar_label_key_position TO prom_reader; 8 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.exemplar_label_key_position TO prom_writer; 9 | 10 | CREATE TABLE IF NOT EXISTS _prom_catalog.exemplar ( 11 | id SERIAL PRIMARY KEY, 12 | metric_name TEXT NOT NULL, 13 | table_name TEXT NOT NULL, 14 | UNIQUE (metric_name) INCLUDE (table_name, id) 15 | ); 16 | GRANT SELECT ON TABLE _prom_catalog.exemplar TO prom_reader; 17 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.exemplar TO prom_writer; 18 | 19 | GRANT USAGE, SELECT ON SEQUENCE exemplar_id_seq TO prom_writer; 20 | -------------------------------------------------------------------------------- /pkg/migrations/sql/preinstall/012-telemetry.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _ps_catalog.promscale_instance_information ( 2 | uuid UUID NOT NULL PRIMARY KEY, 3 | last_updated TIMESTAMPTZ NOT NULL, 4 | promscale_ingested_samples_total BIGINT DEFAULT 0 NOT NULL, 5 | promscale_metrics_queries_success_total BIGINT DEFAULT 0 NOT NULL, 6 | promscale_metrics_queries_timedout_total BIGINT DEFAULT 0 NOT NULL, 7 | promscale_metrics_queries_failed_total BIGINT DEFAULT 0 NOT NULL, 8 | promscale_trace_query_requests_executed_total BIGINT DEFAULT 0 NOT NULL, 9 | promscale_trace_dependency_requests_executed_total BIGINT DEFAULT 0 NOT NULL, 10 | is_counter_reset_row BOOLEAN DEFAULT FALSE NOT NULL, -- counter reset row has '00000000-0000-0000-0000-000000000000' uuid 11 | promscale_ingested_spans_total BIGINT DEFAULT 0 NOT NULL 12 | CHECK((uuid = '00000000-0000-0000-0000-000000000000' OR NOT is_counter_reset_row) AND (uuid != '00000000-0000-0000-0000-000000000000' OR is_counter_reset_row)) 13 | ); 14 | GRANT SELECT ON TABLE _ps_catalog.promscale_instance_information TO prom_reader; 15 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _ps_catalog.promscale_instance_information TO prom_writer; 16 | 17 | -- Write a counter reset row, i.e., the first row in the table. Purpose: 18 | -- The above promscale_.* rows logically behave as counter. They get deleted by 19 | -- telemetry-housekeeper promscale when last_updated is too old to be stale. Since 20 | -- counters are always increasing, if these rows get deleted, it will result in data-loss. 21 | -- To avoid this loss of data, we treat the first row as immutable, and use it for incrementing 22 | -- the attributes of this row, with the values of the stale rows before they are deleted. 23 | INSERT INTO _ps_catalog.promscale_instance_information (uuid, last_updated, is_counter_reset_row) 24 | VALUES ('00000000-0000-0000-0000-000000000000', '2021-12-09 00:00:00'::TIMESTAMPTZ, TRUE); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/pkg/migrations/sql/versions/dev/.gitignore -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.0-beta.2.dev/1-drop_procedure_named_drop_chunks.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE IF EXISTS prom_api.drop_chunks(); 2 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.0-beta.4.dev/1-drop_timescale_prometheus_extra.sql: -------------------------------------------------------------------------------- 1 | DROP EXTENSION IF EXISTS timescale_prometheus_extra; 2 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.1-dev/1-add_default_compression_setting.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE _prom_catalog.metric 2 | ADD COLUMN default_compression BOOLEAN NOT NULL DEFAULT true; 3 | 4 | INSERT INTO _prom_catalog.default(key,value) VALUES 5 | ('metric_compression', (exists(select * from pg_proc where proname = 'compress_chunk')::text)); 6 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.3-dev/1-enable_multinode.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE execute_everywhere(command TEXT, transactional BOOLEAN = true) 2 | AS $func$ 3 | BEGIN 4 | EXECUTE command; 5 | 6 | BEGIN 7 | CALL distributed_exec(command); 8 | EXCEPTION 9 | WHEN undefined_function THEN 10 | -- we're not on Timescale 2, just return 11 | RETURN; 12 | WHEN SQLSTATE '0A000' THEN 13 | -- we're not the access node, just return 14 | RETURN; 15 | END; 16 | END 17 | $func$ LANGUAGE PLPGSQL; 18 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.4-dev/1-change_compression_job.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE _prom_catalog.metric 2 | ADD COLUMN delay_compression_until TIMESTAMPTZ DEFAULT NULL; 3 | 4 | DROP PROCEDURE IF EXISTS _prom_catalog.decompress_chunks_after(NAME, TIMESTAMPTZ); 5 | DROP PROCEDURE IF EXISTS _prom_catalog.do_decompress_chunks_after(NAME, TIMESTAMPTZ); 6 | DROP PROCEDURE IF EXISTS _prom_catalog.compression_job(INT, JSONB); 7 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.5-dev/2-update_autovac_settings.sql: -------------------------------------------------------------------------------- 1 | DO $doit$ 2 | DECLARE 3 | r RECORD; 4 | BEGIN 5 | FOR r IN 6 | SELECT * 7 | FROM _prom_catalog.metric 8 | WHERE default_chunk_interval 9 | LOOP 10 | EXECUTE FORMAT($$ 11 | ALTER TABLE prom_data.%I SET (autovacuum_vacuum_threshold = 50000, autovacuum_analyze_threshold = 50000) 12 | $$, r.table_name); 13 | EXECUTE FORMAT($$ 14 | ALTER TABLE prom_data_series.%I SET (autovacuum_vacuum_threshold = 100, autovacuum_analyze_threshold = 100) 15 | $$, r.table_name); 16 | END LOOP; 17 | END 18 | $doit$; 19 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.1.5-dev/3-drop_function_metric_view.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.metric_view() CASCADE; -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.10.0-dev/1-alter_promscale_instance_information_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE _ps_catalog.promscale_instance_information 2 | RENAME COLUMN promscale_metrics_queries_executed_total 3 | TO promscale_metrics_queries_success_total; -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.10.0-dev/2-add_spans_total_column_telemetry.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE _ps_catalog.promscale_instance_information 2 | ADD COLUMN promscale_ingested_spans_total BIGINT NOT NULL DEFAULT 0 ; -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.2.2-dev/1-set_up_ha.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE _prom_catalog.ha_leases 2 | ( 3 | cluster_name TEXT PRIMARY KEY, 4 | leader_name TEXT, 5 | lease_start TIMESTAMPTZ, 6 | lease_until TIMESTAMPTZ 7 | ); 8 | 9 | CREATE TABLE _prom_catalog.ha_leases_logs 10 | ( 11 | cluster_name TEXT NOT NULL, 12 | leader_name TEXT NOT NULL, 13 | lease_start TIMESTAMPTZ NOT NULL, -- inclusive 14 | lease_until TIMESTAMPTZ, -- exclusive 15 | PRIMARY KEY (cluster_name, leader_name, lease_start) 16 | ); 17 | 18 | 19 | -- STUB for function that trigger to automatically keep the log calls - real implementation in ha.sql 20 | CREATE OR REPLACE FUNCTION _prom_catalog.ha_leases_audit_fn() 21 | RETURNS TRIGGER 22 | AS 23 | $func$ 24 | BEGIN 25 | RAISE 'Just a stub, should be overwritten'; 26 | RETURN NEW; 27 | END; 28 | $func$ LANGUAGE plpgsql VOLATILE; 29 | 30 | -- trigger to automatically keep the log 31 | CREATE TRIGGER ha_leases_audit 32 | AFTER INSERT OR UPDATE 33 | ON _prom_catalog.ha_leases 34 | FOR EACH ROW 35 | EXECUTE PROCEDURE _prom_catalog.ha_leases_audit_fn(); 36 | 37 | -- default values for lease 38 | INSERT INTO _prom_catalog.default(key, value) 39 | VALUES ('ha_lease_timeout', '1m'), 40 | ('ha_lease_refresh', '10s') 41 | ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; 42 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.4.2-dev/1-drop_some_func.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.get_new_pos_for_key(text, text); 2 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_label_ids(text, text[], text[]); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.4.2-dev/2-metric_metadata.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _prom_catalog.metadata 2 | ( 3 | last_seen TIMESTAMPTZ NOT NULL, 4 | metric_family TEXT NOT NULL, 5 | type TEXT DEFAULT NULL, 6 | unit TEXT DEFAULT NULL, 7 | help TEXT DEFAULT NULL, 8 | PRIMARY KEY (metric_family, type, unit, help) 9 | ); 10 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.metadata TO prom_writer; 11 | GRANT SELECT ON TABLE _prom_catalog.metadata TO prom_reader; 12 | 13 | CREATE INDEX IF NOT EXISTS metadata_index ON _prom_catalog.metadata 14 | ( 15 | metric_family, last_seen 16 | ); 17 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.4.2-dev/3-drop_old_delete_expired_series.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.delete_expired_series(TEXT, TIMESTAMPTZ); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.4.2-dev/4-drop_old_main_funcs.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE IF EXISTS _prom_catalog.drop_metric_chunks(TEXT, TIMESTAMPTZ, TIMESTAMPTZ); 2 | DROP PROCEDURE IF EXISTS _prom_catalog.execute_data_retention_policy(); 3 | DROP PROCEDURE IF EXISTS prom_api.execute_maintenance(); 4 | DROP FUNCTION IF EXISTS prom_api.config_maintenance_jobs(int, interval); 5 | DROP PROCEDURE IF EXISTS _prom_catalog.execute_compression_policy(); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.5.2-dev/1-downsampling.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | BEGIN 3 | --This fixes previous updates to 0.6 that were only partially applied. See issue #755 4 | --Often this isn't needed and so will error out 5 | ALTER TABLE _prom_catalog.metric 6 | DROP COLUMN table_schema, 7 | DROP COLUMN series_table, 8 | DROP COLUMN is_view, 9 | ADD CONSTRAINT "metric_metric_name_table_name_key" UNIQUE(metric_name) INCLUDE (table_name), 10 | ADD CONSTRAINT "metric_table_name_key" UNIQUE(table_name); 11 | EXCEPTION WHEN others THEN --ignore 12 | NULL; 13 | END 14 | $$; 15 | 16 | ALTER TABLE _prom_catalog.metric 17 | ADD COLUMN table_schema name NOT NULL DEFAULT 'prom_data', 18 | ADD COLUMN series_table name, -- series_table stores the name of the table used to store the series data for this metric. 19 | ADD COLUMN is_view BOOLEAN NOT NULL DEFAULT false, 20 | DROP CONSTRAINT metric_metric_name_table_name_key, 21 | DROP CONSTRAINT metric_table_name_key, 22 | ADD CONSTRAINT metric_metric_name_table_schema_table_name_key UNIQUE (metric_name, table_schema) INCLUDE (table_name), 23 | ADD CONSTRAINT metric_table_schema_table_name_key UNIQUE(table_schema, table_name); 24 | 25 | UPDATE _prom_catalog.metric SET series_table = table_name WHERE 1 = 1; 26 | ALTER TABLE _prom_catalog.metric ALTER COLUMN series_table SET NOT NULL; 27 | 28 | DROP FUNCTION IF EXISTS _prom_catalog.get_metric_table_name_if_exists(TEXT); 29 | DROP FUNCTION IF EXISTS _prom_catalog.get_confirmed_unused_series( TEXT, BIGINT[], TIMESTAMPTZ); 30 | DROP FUNCTION IF EXISTS _prom_catalog.mark_unused_series(TEXT, TIMESTAMPTZ, TIMESTAMPTZ); 31 | DROP FUNCTION IF EXISTS _prom_catalog.delete_expired_series(TEXT, TIMESTAMPTZ, BIGINT, TIMESTAMPTZ); 32 | DROP FUNCTION IF EXISTS _prom_catalog.drop_metric_chunk_data(TEXT, TIMESTAMPTZ); 33 | DROP PROCEDURE IF EXISTS _prom_catalog.drop_metric_chunks(TEXT, TIMESTAMPTZ, TIMESTAMPTZ, BOOLEAN); 34 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.0-beta.1.dev/2-drop_get_tag_id.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _ps_trace.get_tag_id(ps_trace.tag_map, ps_trace.tag_k) CASCADE; 2 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.0-beta.1.dev/3-drop_info_function.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.hypertable_compression_stats(name); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.0-beta.1.dev/4-drop_get_operation.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS ps_trace.get_operation(text, text, ps_trace.span_kind) CASCADE; 2 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.0-beta.1.dev/5-drop_trace_tree_funcs.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS ps_trace.upstream_spans(ps_trace.trace_id, bigint, int) CASCADE; 2 | DROP FUNCTION IF EXISTS ps_trace.downstream_spans(ps_trace.trace_id, bigint, int) CASCADE; 3 | DROP FUNCTION IF EXISTS ps_trace.span_tree(ps_trace.trace_id, bigint, int) CASCADE; 4 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.2-dev/2-telemetry_housekeeper.sql: -------------------------------------------------------------------------------- 1 | -- See the purpose in migrations/sql/preinstall/012-telemetry.sql 2 | INSERT INTO _ps_catalog.promscale_instance_information (uuid, last_updated, is_counter_reset_row) 3 | VALUES ('00000000-0000-0000-0000-000000000000', '2021-12-09 00:00:00'::TIMESTAMPTZ, TRUE); -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.7.2-dev/4-data_retention.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO _prom_catalog.default(key, value) 2 | VALUES ('trace_retention_period', (30 * INTERVAL '1 days')::text); 3 | -------------------------------------------------------------------------------- /pkg/migrations/sql/versions/dev/0.8.1-dev/1-drop_old_series_funcs.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.get_new_pos_for_key(text, text[], boolean); 2 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_label_ids(TEXT, text[], text[]); 3 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_series_id_for_label_array(TEXT, prom_api.label_array); 4 | -------------------------------------------------------------------------------- /pkg/pgmodel/cache/exemplar_key_cache.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package cache 6 | 7 | import ( 8 | "unsafe" 9 | 10 | "github.com/timescale/promscale/pkg/clockcache" 11 | ) 12 | 13 | // Make the cache size the same as the metric size, assuming every metric has an exemplar 14 | const DefaultExemplarKeyPosCacheSize = DefaultMetricCacheSize 15 | 16 | type PositionCache interface { 17 | // GetLabelPositions fetches the position of label keys (as index) that must be respected 18 | // while pushing exemplar label's values to the database. 19 | GetLabelPositions(metric string) (map[string]int, bool) 20 | // SetOrUpdateLabelPositions sets or updates the position of label (index) keys for the given metric. 21 | SetOrUpdateLabelPositions(metric string, index map[string]int) 22 | } 23 | 24 | type ExemplarLabelsPosCache struct { 25 | cache *clockcache.Cache 26 | } 27 | 28 | // NewExemplarLabelsPosCache creates a cache of map[metric_name]LabelPositions where LabelPositions is 29 | // map[LabelName]LabelPosition. This means that the cache stores positions of each label's value per metric basis, 30 | // which is meant to preserve and reuse _prom_catalog.exemplar_label_position table's 'pos' column. 31 | func NewExemplarLabelsPosCache(config Config) PositionCache { 32 | return &ExemplarLabelsPosCache{cache: clockcache.WithMetrics("exemplar_labels", "metric", config.ExemplarKeyPosCacheSize)} 33 | } 34 | 35 | func (pos *ExemplarLabelsPosCache) GetLabelPositions(metric string) (map[string]int, bool) { 36 | labelPos, exists := pos.cache.Get(metric) 37 | if !exists { 38 | return nil, false 39 | } 40 | return labelPos.(map[string]int), true 41 | } 42 | 43 | func (pos *ExemplarLabelsPosCache) SetOrUpdateLabelPositions(metric string, index map[string]int) { 44 | /* Sizeof only measures map header; not what's inside. Assume 100-length metric names in worst case */ 45 | size := uint64(unsafe.Sizeof(index)) + uint64(len(index)*(100+4)) // #nosec 46 | pos.cache.Update(metric, index, size) 47 | } 48 | -------------------------------------------------------------------------------- /pkg/pgmodel/cache/flags_test.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license 4 | 5 | package cache 6 | 7 | import ( 8 | "flag" 9 | "io" 10 | "os" 11 | "testing" 12 | 13 | "github.com/peterbourgon/ff/v3" 14 | "github.com/stretchr/testify/require" 15 | "github.com/timescale/promscale/pkg/limits" 16 | ) 17 | 18 | func fullyParse(t *testing.T, args []string, lcfg *limits.Config, expectError bool) Config { 19 | fs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError) 20 | fs.SetOutput(io.Discard) 21 | config := &Config{} 22 | ParseFlags(fs, config) 23 | err := ff.Parse(fs, args) 24 | if !expectError { 25 | require.NoError(t, err) 26 | } else { 27 | require.Error(t, err) 28 | return Config{} 29 | } 30 | require.NoError(t, Validate(config, *lcfg)) 31 | return *config 32 | } 33 | 34 | func TestParse(t *testing.T) { 35 | config := fullyParse(t, []string{}, &limits.Config{TargetMemoryBytes: 100000}, false) 36 | require.Equal(t, uint64(50000), config.SeriesCacheMemoryMaxBytes) 37 | 38 | config = fullyParse(t, []string{"-metrics.cache.series.max-bytes", "60%"}, &limits.Config{TargetMemoryBytes: 200000}, false) 39 | require.Equal(t, uint64(120000), config.SeriesCacheMemoryMaxBytes) 40 | 41 | fullyParse(t, []string{"-metrics.cache.series.max-bytes", "60"}, &limits.Config{TargetMemoryBytes: 100000}, true) 42 | 43 | config = fullyParse(t, []string{"-metrics.cache.series.max-bytes", "60000"}, &limits.Config{TargetMemoryBytes: 200000}, false) 44 | require.Equal(t, uint64(60000), config.SeriesCacheMemoryMaxBytes) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/pgmodel/cache/inverted_labels_cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "github.com/timescale/promscale/pkg/clockcache" 5 | ) 6 | 7 | const DefaultInvertedLabelsCacheSize = 500000 8 | 9 | type LabelInfo struct { 10 | LabelID int32 // id of label 11 | Pos int32 // position of specific label within a specific metric. 12 | } 13 | 14 | type LabelKey struct { 15 | MetricName, Name, Value string 16 | } 17 | 18 | func NewLabelKey(metricName, name, value string) LabelKey { 19 | return LabelKey{MetricName: metricName, Name: name, Value: value} 20 | } 21 | 22 | func (lk LabelKey) len() int { 23 | return len(lk.MetricName) + len(lk.Name) + len(lk.Value) 24 | } 25 | 26 | func NewLabelInfo(lableID, pos int32) LabelInfo { 27 | return LabelInfo{LabelID: lableID, Pos: pos} 28 | } 29 | 30 | func (li LabelInfo) len() int { 31 | return 8 32 | } 33 | 34 | // (metric, label key-pair) -> (label id,label position) cache 35 | // Used when creating series to avoid DB calls for labels 36 | // Each label position is unique for a specific metric, meaning that 37 | // one label can have different position for different metrics 38 | type InvertedLabelsCache struct { 39 | *ResizableCache 40 | } 41 | 42 | // Cache is thread-safe 43 | func NewInvertedLabelsCache(config Config, sigClose chan struct{}) *InvertedLabelsCache { 44 | cache := clockcache.WithMetrics("inverted_labels", "metric", config.InvertedLabelsCacheSize) 45 | return &InvertedLabelsCache{NewResizableCache(cache, config.InvertedLabelsCacheMaxBytes, sigClose)} 46 | } 47 | 48 | func (c *InvertedLabelsCache) GetLabelsId(key LabelKey) (LabelInfo, bool) { 49 | id, found := c.Get(key) 50 | if found { 51 | return id.(LabelInfo), found 52 | } 53 | return LabelInfo{}, false 54 | } 55 | 56 | func (c *InvertedLabelsCache) Put(key LabelKey, val LabelInfo) bool { 57 | _, added := c.Insert(key, val, uint64(key.len())+uint64(val.len())+17) 58 | return added 59 | } 60 | -------------------------------------------------------------------------------- /pkg/pgmodel/common/errors/errors.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package errors 6 | 7 | import "fmt" 8 | 9 | var ( 10 | ErrNoMetricName = fmt.Errorf("metric name missing") 11 | ErrNoClausesGen = fmt.Errorf("no clauses generated") 12 | ErrEntryNotFound = fmt.Errorf("entry not found") 13 | ErrInvalidCacheEntryType = fmt.Errorf("invalid cache entry type stored") 14 | ErrInvalidRowData = fmt.Errorf("invalid row data, length of arrays does not match") 15 | ErrExtUnavailable = fmt.Errorf("the extension is not available") 16 | ErrMissingTableName = fmt.Errorf("missing metric table name") 17 | ErrTimeBasedDeletion = fmt.Errorf("time based series deletion is unsupported") 18 | ErrInvalidSemverFormat = fmt.Errorf("app version is not semver format, aborting migration") 19 | ErrQueryMismatchTimestampValue = fmt.Errorf("query returned a mismatch in timestamps and values") 20 | 21 | ErrTmplMissingUnderlyingRelation = `the underlying table ("%s"."%s") which is used to store the metric` + 22 | "values has been moved/removed thus the data cannot be retrieved" 23 | ) 24 | -------------------------------------------------------------------------------- /pkg/pgmodel/common/schema/schema.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package schema 6 | 7 | import ( 8 | "github.com/jackc/pgx/v5/pgtype" 9 | ) 10 | 11 | const ( 12 | PromData = "prom_data" 13 | PromDataExemplar = "prom_data_exemplar" 14 | PromExt = "_prom_ext" 15 | // Public is where all timescaledb-functions are loaded 16 | Public = "public" 17 | 18 | LockID = 5585198506344173278 // Chosen randomly. 19 | 20 | PromDataSeries = "prom_data_series" 21 | PsTrace = "_ps_trace" 22 | ) 23 | 24 | var ( 25 | PromDataColumns = []string{"time", "value", "series_id"} 26 | PromDataColumnsOIDs = []uint32{ 27 | pgtype.TimestamptzOID, 28 | pgtype.Float8OID, 29 | pgtype.Int8OID, 30 | } 31 | PromExemplarColumns = []string{"time", "series_id", "exemplar_label_values", "value"} 32 | ) 33 | 34 | func PromExemplarColumnsOIDs(typeMap *pgtype.Map) ([]uint32, bool) { 35 | labelValueArrayType, ok := typeMap.TypeForName("_prom_api.label_value_array") 36 | if !ok { 37 | return []uint32{}, ok 38 | } 39 | return []uint32{ 40 | pgtype.TimestamptzOID, 41 | pgtype.Int8OID, 42 | labelValueArrayType.OID, 43 | pgtype.Float8OID, 44 | }, ok 45 | } 46 | -------------------------------------------------------------------------------- /pkg/pgmodel/exemplar/exemplar.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package exemplar 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "time" 11 | 12 | "github.com/prometheus/prometheus/promql/parser" 13 | "github.com/timescale/promscale/pkg/pgmodel/model" 14 | "github.com/timescale/promscale/pkg/promql" 15 | ) 16 | 17 | // QueryExemplar fetches the exemplars from the database using the queryable. 18 | func QueryExemplar(ctx context.Context, query string, queryable promql.Queryable, start, end time.Time) ([]model.ExemplarQueryResult, error) { 19 | expr, err := parser.ParseExpr(query) 20 | if err != nil { 21 | return nil, err 22 | } 23 | selectors := parser.ExtractSelectors(expr) 24 | if len(selectors) < 1 { 25 | // We have nothing to fetch if there are no selectors. 26 | return []model.ExemplarQueryResult{}, nil 27 | } 28 | querier := queryable.ExemplarsQuerier(ctx) 29 | results, err := querier.Select(start, end, selectors...) 30 | if err != nil { 31 | return nil, fmt.Errorf("selecting exemplars: %w", err) 32 | } 33 | return results, nil 34 | } 35 | -------------------------------------------------------------------------------- /pkg/pgmodel/health/health_checker.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package health 6 | 7 | import ( 8 | "context" 9 | 10 | "github.com/timescale/promscale/pkg/pgxconn" 11 | ) 12 | 13 | // HealthCheckerFn allows checking for proper db operations. 14 | type HealthCheckerFn func() error 15 | 16 | func NewHealthChecker(conn pgxconn.PgxConn) HealthCheckerFn { 17 | return func() error { 18 | rows, err := conn.Query(context.Background(), "SELECT") 19 | 20 | if err != nil { 21 | return err 22 | } 23 | 24 | rows.Close() 25 | return nil 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /pkg/pgmodel/ingestor/ingestor_interface.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package ingestor 6 | 7 | import ( 8 | "context" 9 | 10 | "github.com/timescale/promscale/pkg/prompb" 11 | "go.opentelemetry.io/collector/pdata/ptrace" 12 | ) 13 | 14 | // DBInserter is responsible for ingesting the TimeSeries protobuf structs and 15 | // storing them in the database. 16 | type DBInserter interface { 17 | // IngestMetrics takes an array of TimeSeries and attempts to store it into the database. 18 | // Returns the number of metrics ingested and any error encountered before finishing. 19 | IngestMetrics(context.Context, *prompb.WriteRequest) (uint64, uint64, error) 20 | IngestTraces(context.Context, ptrace.Traces) error 21 | Close() 22 | } 23 | -------------------------------------------------------------------------------- /pkg/pgmodel/ingestor/trace/cache.go: -------------------------------------------------------------------------------- 1 | package trace 2 | 3 | import "github.com/timescale/promscale/pkg/clockcache" 4 | 5 | const ( 6 | urlCacheSize = 10000 7 | operationCacheSize = 10000 8 | instLibCacheSize = 10000 9 | tagCacheSize = 100000 10 | ) 11 | 12 | func newSchemaCache() *clockcache.Cache { 13 | return clockcache.WithMetrics("schema", "trace", urlCacheSize) 14 | } 15 | 16 | func newOperationCache() *clockcache.Cache { 17 | return clockcache.WithMetrics("operation", "trace", operationCacheSize) 18 | } 19 | 20 | func newInstrumentationLibraryCache() *clockcache.Cache { 21 | return clockcache.WithMetrics("instrumentation_lib", "trace", instLibCacheSize) 22 | } 23 | 24 | func newTagCache() *clockcache.Cache { 25 | return clockcache.WithMetrics("tag", "trace", tagCacheSize) 26 | } 27 | -------------------------------------------------------------------------------- /pkg/pgmodel/ingestor/trace/schema_url_batch.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package trace 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | 11 | "github.com/jackc/pgx/v5" 12 | "github.com/jackc/pgx/v5/pgtype" 13 | "github.com/timescale/promscale/pkg/pgxconn" 14 | ) 15 | 16 | const insertSchemaURLSQL = `SELECT ps_trace.put_schema_url($1)` 17 | 18 | type schemaURL string 19 | 20 | func (s schemaURL) SizeInCache() uint64 { 21 | return uint64(len(s) + 9) // 9 bytes for pgtype.Int8 22 | } 23 | 24 | func (s schemaURL) Before(url sortable) bool { 25 | if u, ok := url.(schemaURL); ok { 26 | return s < u 27 | } 28 | panic(fmt.Sprintf("cannot use Before function on schemaURL with a different type: %T", url)) 29 | } 30 | 31 | func (s schemaURL) AddToDBBatch(batch pgxconn.PgxBatch) { 32 | batch.Queue(insertSchemaURLSQL, s) 33 | } 34 | 35 | func (s schemaURL) ScanIDs(r pgx.BatchResults) (interface{}, error) { 36 | var id pgtype.Int8 37 | err := r.QueryRow().Scan(&id) 38 | return id, err 39 | } 40 | 41 | type schemaURLBatch struct { 42 | b batcher 43 | } 44 | 45 | func newSchemaUrlBatch(cache cache) schemaURLBatch { 46 | return schemaURLBatch{ 47 | b: newBatcher(cache), 48 | } 49 | } 50 | 51 | func (s schemaURLBatch) Queue(url string) { 52 | if url == "" { 53 | return 54 | } 55 | s.b.Queue(schemaURL(url)) 56 | } 57 | 58 | func (s schemaURLBatch) SendBatch(ctx context.Context, conn pgxconn.PgxConn) (err error) { 59 | return s.b.SendBatch(ctx, conn) 60 | } 61 | 62 | func (s schemaURLBatch) GetID(url string) (pgtype.Int8, error) { 63 | if url == "" { 64 | return pgtype.Int8{Valid: false}, nil 65 | } 66 | id, err := s.b.GetID(schemaURL(url)) 67 | if err != nil { 68 | return id, fmt.Errorf("error getting ID for schema url %s: %w", url, err) 69 | } 70 | 71 | return id, nil 72 | } 73 | -------------------------------------------------------------------------------- /pkg/pgmodel/ingestor/write_request_pool.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package ingestor 6 | 7 | import ( 8 | "sync" 9 | 10 | "github.com/timescale/promscale/pkg/prompb" 11 | ) 12 | 13 | var wrPool = sync.Pool{ 14 | New: func() interface{} { 15 | return new(prompb.WriteRequest) 16 | }, 17 | } 18 | 19 | // NewWriteRequest returns a new *prompb.WriteRequest from the pool. 20 | func NewWriteRequest() *prompb.WriteRequest { 21 | return wrPool.Get().(*prompb.WriteRequest) 22 | } 23 | 24 | // FinishWriteRequest adds the *prompb.WriteRequest back into the pool after setting parameters to default. 25 | func FinishWriteRequest(wr *prompb.WriteRequest) { 26 | if wr == nil { 27 | return 28 | } 29 | for i := range wr.Timeseries { 30 | ts := &wr.Timeseries[i] 31 | for j := range ts.Labels { 32 | ts.Labels[j] = prompb.Label{} 33 | } 34 | ts.Labels = ts.Labels[:0] 35 | ts.Samples = ts.Samples[:0] 36 | ts.Exemplars = ts.Exemplars[:0] 37 | ts.XXX_unrecognized = nil 38 | } 39 | wr.Timeseries = wr.Timeseries[:0] 40 | wr.Metadata = wr.Metadata[:0] 41 | wr.XXX_unrecognized = nil 42 | wrPool.Put(wr) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/pgmodel/metadata/metadata.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package metadata 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | 11 | "github.com/timescale/promscale/pkg/pgmodel/model" 12 | "github.com/timescale/promscale/pkg/pgxconn" 13 | ) 14 | 15 | // MetricQuery returns metadata corresponding to metric or metric_family. 16 | func MetricQuery(ctx context.Context, conn pgxconn.PgxConn, metric string, limit int) (map[string][]model.Metadata, error) { 17 | var ( 18 | rows pgxconn.PgxRows 19 | err error 20 | ) 21 | if metric != "" { 22 | rows, err = conn.Query(ctx, "SELECT * from prom_api.get_metric_metadata($1)", metric) 23 | } else { 24 | rows, err = conn.Query(ctx, "SELECT metric_family, type, unit, help from _prom_catalog.metadata ORDER BY metric_family, last_seen DESC") 25 | } 26 | if err != nil { 27 | return nil, fmt.Errorf("query metric metadata: %w", err) 28 | } 29 | defer rows.Close() 30 | metricFamilies := make(map[string][]model.Metadata) 31 | for rows.Next() { 32 | if limit != 0 && len(metricFamilies) >= limit { 33 | // Limit is applied on number of metric_families and not on number of metadata. 34 | break 35 | } 36 | var metricFamily, typ, unit, help string 37 | if err := rows.Scan(&metricFamily, &typ, &unit, &help); err != nil { 38 | return nil, fmt.Errorf("query result: %w", err) 39 | } 40 | metricFamilies[metricFamily] = append(metricFamilies[metricFamily], model.Metadata{ 41 | Unit: unit, 42 | Type: typ, 43 | Help: help, 44 | }) 45 | } 46 | return metricFamilies, nil 47 | } 48 | -------------------------------------------------------------------------------- /pkg/pgmodel/metrics/database/database_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/stretchr/testify/require" 10 | "github.com/timescale/promscale/pkg/tests/testsupport" 11 | "github.com/timescale/promscale/pkg/util" 12 | ) 13 | 14 | func TestCustomPollConfig(t *testing.T) { 15 | engine := &metricsEngineImpl{ 16 | conn: testsupport.MockPgxConn{}, 17 | ctx: context.Background(), 18 | metrics: []metricQueryWrap{ 19 | { 20 | metrics: counters( 21 | prometheus.CounterOpts{ 22 | Namespace: util.PromNamespace, 23 | Subsystem: "sql_database", 24 | Name: "test", 25 | Help: "test", 26 | }, 27 | ), 28 | customPollConfig: updateAtMostEvery(1 * time.Second), 29 | query: "SELECT 1", 30 | }, 31 | }, 32 | } 33 | 34 | testStart := time.Now() 35 | testMetricPollConfig := &engine.metrics[0].customPollConfig 36 | 37 | if err := engine.Update(); err != nil { 38 | t.Fail() 39 | } 40 | require.False(t, testMetricPollConfig.lastUpdate.After(testStart)) 41 | 42 | require.Eventually(t, func() bool { 43 | if err := engine.Update(); err != nil { 44 | t.Fail() 45 | } 46 | return testMetricPollConfig.lastUpdate.After(testStart) 47 | }, 5*time.Second, 1*time.Second) 48 | 49 | } 50 | -------------------------------------------------------------------------------- /pkg/pgmodel/metrics/ha.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/timescale/promscale/pkg/util" 6 | ) 7 | 8 | var ( 9 | HAClusterLeaderDetails = prometheus.NewGaugeVec( 10 | prometheus.GaugeOpts{ 11 | Namespace: util.PromNamespace, 12 | Subsystem: "ha", 13 | Name: "cluster_leader_info", 14 | Help: "Info on HA clusters and respective leaders.", 15 | }, 16 | []string{"cluster", "replica"}) 17 | NumOfHAClusterLeaderChanges = prometheus.NewCounterVec( 18 | prometheus.CounterOpts{ 19 | Namespace: util.PromNamespace, 20 | Subsystem: "ha", 21 | Name: "cluster_leader_changes_total", 22 | Help: "Total number of times leader changed per cluster.", 23 | }, 24 | []string{"cluster"}) 25 | ) 26 | 27 | func init() { 28 | prometheus.MustRegister(HAClusterLeaderDetails, NumOfHAClusterLeaderChanges) 29 | } 30 | -------------------------------------------------------------------------------- /pkg/pgmodel/metrics/query.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/timescale/promscale/pkg/util" 6 | ) 7 | 8 | var ( 9 | QueryDuration = prometheus.NewHistogramVec( 10 | prometheus.HistogramOpts{ 11 | Namespace: util.PromNamespace, 12 | Subsystem: "query", 13 | Name: "duration_seconds", 14 | Help: "Time taken to respond to the query/query batch.", 15 | Buckets: []float64{0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 50, 100, 250, 500, 1000, 2500}, 16 | }, []string{"type", "handler", "code"}, 17 | ) 18 | Query = prometheus.NewCounterVec( 19 | prometheus.CounterOpts{ 20 | Namespace: util.PromNamespace, 21 | Subsystem: "query", 22 | Name: "requests_total", 23 | Help: "Number of query requests to Promscale.", 24 | }, []string{"type", "handler", "code", "reason"}, 25 | ) 26 | ) 27 | 28 | func init() { 29 | prometheus.MustRegister( 30 | Query, 31 | QueryDuration, 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/exemplars_test.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package model 6 | 7 | import ( 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | "github.com/timescale/promscale/pkg/prompb" 12 | ) 13 | 14 | func TestOrderExemplarLabelsPositionExists(t *testing.T) { 15 | rawExemplars := []prompb.Exemplar{ 16 | { 17 | Labels: []prompb.Label{{Name: "TraceID", Value: "some_trace_id"}, {Name: "component", Value: "tester"}}, 18 | Value: 1.5, 19 | Timestamp: 1, 20 | }, 21 | { 22 | Labels: []prompb.Label{{Name: "app", Value: "test"}, {Name: "component", Value: "tester"}}, 23 | Value: 2.5, 24 | Timestamp: 3, 25 | }, 26 | { 27 | Labels: []prompb.Label{}, // No labels. A valid label according to Open Metrics. 28 | Value: 3.5, 29 | Timestamp: 5, 30 | }, 31 | } 32 | insertable := NewPromExemplars(nil, rawExemplars) 33 | index := prepareIndex(rawExemplars) 34 | require.True(t, insertable.OrderExemplarLabels(index)) 35 | rawExemplars = append(rawExemplars, prompb.Exemplar{ 36 | Labels: []prompb.Label{{Name: "namespace", Value: "default"}}, 37 | Value: 10, 38 | Timestamp: 10, 39 | }) 40 | insertable = NewPromExemplars(nil, rawExemplars) 41 | // Index invalid now. Should return positionExists as false, indicating that index needs an update. 42 | require.False(t, insertable.OrderExemplarLabels(index)) 43 | } 44 | 45 | func prepareIndex(exemplars []prompb.Exemplar) map[string]int { 46 | index := make(map[string]int) 47 | position := 1 48 | for _, exemplar := range exemplars { 49 | for i := range exemplar.Labels { 50 | lbl := exemplar.Labels[i] 51 | if _, exists := index[lbl.Name]; !exists { 52 | index[lbl.Name] = position 53 | position++ 54 | } 55 | } 56 | } 57 | return index 58 | } 59 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/insertables.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package model 6 | 7 | import "github.com/timescale/promscale/pkg/prompb" 8 | 9 | type InsertableType uint8 10 | 11 | const ( 12 | Sample InsertableType = iota 13 | Exemplar 14 | ) 15 | 16 | type Insertable interface { 17 | // Series returns the reference of the series, the insertable belongs to. 18 | Series() *Series 19 | // Count returns the number data points in the current insertable. 20 | Count() int 21 | // MaxTs returns the max timestamp among the datapoints in the insertable. 22 | // In most cases, this will be the timestamp from the last sample, since 23 | // Prometheus dispatches data in sorted order of time. 24 | MaxTs() int64 25 | // Iterator returns an iterator that iterates over underlying datapoints. 26 | Iterator() Iterator 27 | // Type returns type of underlying insertable. 28 | Type() InsertableType 29 | // IsOfType returns true if the provided type matches with the underlying insertable datatype. 30 | IsOfType(InsertableType) bool 31 | } 32 | 33 | // Iterator iterates over datapoints. 34 | type Iterator interface { 35 | // HasNext returns true if there is any datapoint that is yet to be read. 36 | HasNext() bool 37 | } 38 | 39 | // SamplesIterator iterates over samples. 40 | type SamplesIterator interface { 41 | Iterator 42 | // Value returns current samples timestamp and value. 43 | Value() (timestamp int64, value float64) 44 | } 45 | 46 | // ExemplarsIterator iterates over exemplars. 47 | type ExemplarsIterator interface { 48 | Iterator 49 | // Value returns the current exemplar's value array, timestamp and value. 50 | Value() (labels []prompb.Label, timestamp int64, value float64) 51 | } 52 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/interface.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package model 6 | 7 | import ( 8 | "context" 9 | "math" 10 | "time" 11 | 12 | "github.com/jackc/pgx/v5/pgtype" 13 | ) 14 | 15 | const ( 16 | MetricNameLabelName = "__name__" 17 | SchemaNameLabelName = "__schema__" 18 | ColumnNameLabelName = "__column__" 19 | ) 20 | 21 | var ( 22 | MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() 23 | MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() 24 | ) 25 | 26 | type Metadata struct { 27 | MetricFamily string `json:"metric,omitempty"` 28 | Unit string `json:"unit"` 29 | Type string `json:"type"` 30 | Help string `json:"help"` 31 | } 32 | 33 | // Dispatcher is responsible for inserting label, series and data into the storage. 34 | type Dispatcher interface { 35 | InsertTs(ctx context.Context, rows Data) (uint64, error) 36 | InsertMetadata(context.Context, []Metadata) (uint64, error) 37 | CompleteMetricCreation(context.Context) error 38 | Close() 39 | } 40 | 41 | func TimestamptzToMs(t pgtype.Timestamptz) int64 { 42 | switch t.InfinityModifier { 43 | case pgtype.NegativeInfinity: 44 | return math.MinInt64 45 | case pgtype.Infinity: 46 | return math.MaxInt64 47 | default: 48 | return t.Time.UnixNano() / 1e6 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/label_list.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package model 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/jackc/pgx/v5/pgtype" 11 | "github.com/timescale/promscale/pkg/pgmodel/model/pgutf8str" 12 | ) 13 | 14 | type LabelList struct { 15 | names pgtype.FlatArray[pgutf8str.Text] 16 | values pgtype.FlatArray[pgutf8str.Text] 17 | } 18 | 19 | func NewLabelList(size int) *LabelList { 20 | return &LabelList{ 21 | // We want to avoid runtime conversion of []string to pgutf8str.TextArray. The best way to do that is 22 | // to use directly the pgutf8str.TextArray under the hood. 23 | // The implementations done here are kept in line with what happens in 24 | // https://github.com/jackc/pgtype/blob/master/text_array.go 25 | names: pgtype.FlatArray[pgutf8str.Text](make([]pgutf8str.Text, 0, size)), 26 | values: pgtype.FlatArray[pgutf8str.Text](make([]pgutf8str.Text, 0, size)), 27 | } 28 | } 29 | 30 | func (ls *LabelList) Add(name string, value string) error { 31 | var ( 32 | nameT pgutf8str.Text 33 | valueT pgutf8str.Text 34 | ) 35 | if err := nameT.Scan(name); err != nil { 36 | return fmt.Errorf("setting pgutf8str.Text: %w", err) 37 | } 38 | if err := valueT.Scan(value); err != nil { 39 | return fmt.Errorf("setting pgutf8str.Text: %w", err) 40 | } 41 | ls.names = append(ls.names, nameT) 42 | ls.values = append(ls.values, valueT) 43 | return nil 44 | } 45 | 46 | // Get returns the addresses of names and values slice after updating the array dimensions. 47 | func (ls *LabelList) Get() (pgtype.FlatArray[pgutf8str.Text], pgtype.FlatArray[pgutf8str.Text]) { 48 | return ls.names, ls.values 49 | } 50 | 51 | func (ls *LabelList) Len() int { return len(ls.names) } 52 | func (ls *LabelList) Swap(i, j int) { 53 | ls.names[i], ls.names[j] = ls.names[j], ls.names[i] 54 | ls.values[i], ls.values[j] = ls.values[j], ls.values[i] 55 | } 56 | 57 | func (ls LabelList) Less(i, j int) bool { 58 | elemI := ls.names[i].String 59 | elemJ := ls.names[j].String 60 | return elemI < elemJ || (elemI == elemJ && elemI < elemJ) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/metric.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | // MetricInfo contains all the database specific metric data. 4 | type MetricInfo struct { 5 | MetricID int64 6 | TableSchema, TableName, SeriesTable string 7 | } 8 | 9 | // Len returns the memory size of MetricInfo in bytes. 10 | func (v MetricInfo) Len() int { 11 | return 8 + len(v.TableSchema) + len(v.TableName) + len(v.SeriesTable) 12 | } 13 | -------------------------------------------------------------------------------- /pkg/pgmodel/model/samples.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package model 6 | 7 | import "github.com/timescale/promscale/pkg/prompb" 8 | 9 | type promSamples struct { 10 | series *Series 11 | samples []prompb.Sample 12 | } 13 | 14 | func NewPromSamples(series *Series, sampleSet []prompb.Sample) Insertable { 15 | return &promSamples{series, sampleSet} 16 | } 17 | 18 | func (t *promSamples) Series() *Series { 19 | return t.series 20 | } 21 | 22 | func (t *promSamples) Count() int { 23 | return len(t.samples) 24 | } 25 | 26 | type samplesIterator struct { 27 | curr int 28 | total int 29 | data []prompb.Sample 30 | } 31 | 32 | func (t *promSamples) MaxTs() int64 { 33 | numSamples := len(t.samples) 34 | if numSamples == 0 { 35 | // If no samples exist, return a -ve int, so that the stats 36 | // caller does not capture this value. 37 | return -1 38 | } 39 | return t.samples[numSamples-1].Timestamp 40 | } 41 | 42 | func (i *samplesIterator) HasNext() bool { 43 | return i.curr < i.total 44 | } 45 | 46 | // Value in samplesIterator does not return labels, since samples do not have labels. 47 | // Its the series that have th labels in samples. 48 | func (i *samplesIterator) Value() (timestamp int64, value float64) { 49 | timestamp, value = i.data[i.curr].Timestamp, i.data[i.curr].Value 50 | i.curr++ 51 | return 52 | } 53 | 54 | func (t *promSamples) Iterator() Iterator { 55 | return &samplesIterator{data: t.samples, total: len(t.samples)} 56 | } 57 | 58 | func (t *promSamples) Type() InsertableType { 59 | return Sample 60 | } 61 | 62 | func (t *promSamples) IsOfType(typ InsertableType) bool { 63 | return Sample == typ 64 | } 65 | -------------------------------------------------------------------------------- /pkg/pgmodel/querier/query_builder_exemplar.go: -------------------------------------------------------------------------------- 1 | package querier 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/jackc/pgx/v5" 8 | "github.com/timescale/promscale/pkg/pgmodel/common/schema" 9 | pgmodel "github.com/timescale/promscale/pkg/pgmodel/model" 10 | ) 11 | 12 | const ( 13 | exemplarsBySeriesIDsSQLFormat = `SELECT s.labels, m.time, m.value, m.exemplar_label_values 14 | FROM %[1]s m 15 | INNER JOIN %[2]s s 16 | ON m.series_id = s.id 17 | WHERE m.series_id IN (%[3]s) 18 | AND time >= '%[4]s' 19 | AND time <= '%[5]s' 20 | GROUP BY s.id, m.time, m.value, m.exemplar_label_values` 21 | 22 | exemplarByMetricSQLFormat = `SELECT series.labels, result.time, result.value, result.exemplar_label_values 23 | FROM %[2]s series 24 | INNER JOIN LATERAL ( 25 | SELECT time, value, exemplar_label_values 26 | FROM %[1]s metric 27 | WHERE metric.series_id = series.id 28 | AND time >= '%[4]s' 29 | AND time <= '%[5]s' 30 | ORDER BY time 31 | ) as result ON (result.value is not null) 32 | WHERE 33 | %[3]s` 34 | ) 35 | 36 | func buildSingleMetricExemplarsQuery(metadata *evalMetadata) string { 37 | filter := metadata.timeFilter 38 | finalSQL := fmt.Sprintf(exemplarByMetricSQLFormat, 39 | pgx.Identifier{schema.PromDataExemplar, filter.metric}.Sanitize(), 40 | pgx.Identifier{schema.PromDataSeries, filter.metric}.Sanitize(), 41 | strings.Join(metadata.clauses, " AND "), 42 | filter.start, 43 | filter.end, 44 | ) 45 | return finalSQL 46 | } 47 | 48 | func buildMultipleMetricExemplarsQuery(filter timeFilter, series []pgmodel.SeriesID) (string, error) { 49 | s := make([]string, len(series)) 50 | for i, sID := range series { 51 | s[i] = fmt.Sprintf("%d", sID) 52 | } 53 | baseQuery := exemplarsBySeriesIDsSQLFormat 54 | return fmt.Sprintf( 55 | baseQuery, 56 | pgx.Identifier{schema.PromDataExemplar, filter.metric}.Sanitize(), 57 | pgx.Identifier{schema.PromDataSeries, filter.metric}.Sanitize(), 58 | strings.Join(s, ","), 59 | filter.start, 60 | filter.end, 61 | ), nil 62 | } 63 | -------------------------------------------------------------------------------- /pkg/pgmodel/querier/query_remote_read.go: -------------------------------------------------------------------------------- 1 | package querier 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/timescale/promscale/pkg/prompb" 8 | ) 9 | 10 | type queryRemoteRead struct { 11 | *pgxQuerier 12 | ctx context.Context 13 | } 14 | 15 | func newQueryRemoteRead(ctx context.Context, qr *pgxQuerier) *queryRemoteRead { 16 | return &queryRemoteRead{qr, ctx} 17 | } 18 | 19 | // Query implements the RemoteReadQuerier interface. It is the entrypoint for 20 | // remote read queries. 21 | func (q *queryRemoteRead) Query(query *prompb.Query) ([]*prompb.TimeSeries, error) { 22 | if query == nil { 23 | return []*prompb.TimeSeries{}, nil 24 | } 25 | 26 | matchers, err := fromLabelMatchers(query.Matchers) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | qrySamples := newQuerySamples(q.ctx, q.pgxQuerier) 32 | sampleRows, _, err := qrySamples.fetchSamplesRows(query.StartTimestampMs, query.EndTimestampMs, nil, nil, nil, matchers) 33 | if err != nil { 34 | return nil, err 35 | } 36 | results, err := buildTimeSeries(sampleRows, q.tools.labelsReader) 37 | if err != nil { 38 | return nil, fmt.Errorf("building time-series: %w", err) 39 | } 40 | return results, nil 41 | } 42 | -------------------------------------------------------------------------------- /pkg/pgmodel/querier/timestamp_series.go: -------------------------------------------------------------------------------- 1 | package querier 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/jackc/pgx/v5/pgtype" 7 | prommodel "github.com/prometheus/common/model" 8 | "github.com/timescale/promscale/pkg/pgmodel/model" 9 | ) 10 | 11 | // TimestampSeries represent an array of timestamps (model.Time/int64) that is 0-indexed. 12 | type TimestampSeries interface { 13 | //At returns the element at an index location, as well as a bool to indicate 14 | //whether the value is valid (or NULL for example) 15 | At(index int) (int64, bool) 16 | Len() int 17 | } 18 | 19 | // rowTimestampSeries is a TimestampSeries based on data fetched from a database row 20 | type rowTimestampSeries struct { 21 | times *model.ReusableArray[pgtype.Timestamptz] 22 | } 23 | 24 | func newRowTimestampSeries(times *model.ReusableArray[pgtype.Timestamptz]) *rowTimestampSeries { 25 | return &rowTimestampSeries{times: times} 26 | } 27 | 28 | func (t *rowTimestampSeries) At(index int) (int64, bool) { 29 | return model.TimestamptzToMs(t.times.FlatArray[index]), t.times.FlatArray[index].Valid 30 | } 31 | 32 | func (t *rowTimestampSeries) Len() int { 33 | return len(t.times.FlatArray) 34 | } 35 | 36 | // regularTimestampSeries represents a time-series that is regular (e.g. each timestamp is step duration ahead of the previous one) 37 | type regularTimestampSeries struct { 38 | start time.Time 39 | end time.Time 40 | step time.Duration 41 | len int 42 | } 43 | 44 | func newRegularTimestampSeries(start time.Time, end time.Time, step time.Duration) *regularTimestampSeries { 45 | len := (end.Sub(start) / step) + 1 46 | return ®ularTimestampSeries{ 47 | start: start, 48 | end: end, 49 | step: step, 50 | len: int(len), 51 | } 52 | } 53 | 54 | func (t *regularTimestampSeries) Len() int { 55 | return t.len 56 | } 57 | 58 | func (t *regularTimestampSeries) At(index int) (int64, bool) { 59 | time := t.start.Add(time.Duration(index) * t.step) 60 | return int64(prommodel.TimeFromUnixNano(time.UnixNano())), true 61 | } 62 | -------------------------------------------------------------------------------- /pkg/prompb/README.md: -------------------------------------------------------------------------------- 1 | The compiled protobufs are version controlled and you won't normally need to 2 | re-compile them when building Prometheus. 3 | 4 | If however you have modified the defs and do need to re-compile, run 5 | `make proto` from the parent dir. 6 | 7 | In order for the script to run, you'll need `protoc` (version 3.12.3) in your 8 | PATH. 9 | -------------------------------------------------------------------------------- /pkg/prompb/custom.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prompb 15 | 16 | func (m Sample) T() int64 { return m.Timestamp } 17 | func (m Sample) V() float64 { return m.Value } 18 | -------------------------------------------------------------------------------- /pkg/prompb/custom.ts.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package prompb 6 | 7 | func (m *Labels) Reset() { *m = Labels{Labels: m.Labels[:0]} } 8 | func (m *WriteRequest) Reset() { 9 | *m = WriteRequest{Timeseries: m.Timeseries[:0], Metadata: m.Metadata[:0]} 10 | } 11 | func (m *TimeSeries) Reset() { 12 | *m = TimeSeries{Labels: m.Labels[:0], Exemplars: m.Exemplars[:0], Samples: m.Samples[:0]} 13 | } 14 | func (m *Exemplar) Reset() { *m = Exemplar{Labels: m.Labels[:0]} } 15 | -------------------------------------------------------------------------------- /pkg/promql/fuzz_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Only build when go-fuzz is in use 15 | //go:build gofuzz 16 | // +build gofuzz 17 | 18 | package promql 19 | 20 | import ( 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) { 27 | defer func() { 28 | if p := recover(); p == nil { 29 | t.Error("invalid content type should panic") 30 | } else { 31 | err, ok := p.(error) 32 | require.True(t, ok) 33 | require.Contains(t, err.Error(), "duplicate parameter name") 34 | } 35 | }() 36 | 37 | const invalidContentType = "application/openmetrics-text; charset=UTF-8; charset=utf-8" 38 | fuzzParseMetricWithContentType([]byte{}, invalidContentType) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/promql/promql_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package promql 15 | 16 | import ( 17 | "path/filepath" 18 | "testing" 19 | 20 | "github.com/stretchr/testify/require" 21 | ) 22 | 23 | func TestEvaluations(t *testing.T) { 24 | files, err := filepath.Glob("testdata/*.test") 25 | require.NoError(t, err) 26 | 27 | for _, fn := range files { 28 | t.Run(fn, func(t *testing.T) { 29 | test, err := newTestFromFile(t, fn) 30 | require.NoError(t, err) 31 | require.NoError(t, test.Run()) 32 | 33 | test.Close() 34 | }) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/promql/testdata/collision.test: -------------------------------------------------------------------------------- 1 | 2 | load 1s 3 | node_namespace_pod:kube_pod_info:{namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 4 | node_cpu_seconds_total{cpu="10",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 5 | node_cpu_seconds_total{cpu="35",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 6 | node_cpu_seconds_total{cpu="89",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 7 | 8 | eval instant at 4s count by(namespace, pod, cpu) (node_cpu_seconds_total{cpu=~".*",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v"}) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{namespace="observability",pod="node-exporter-l454v"} 9 | {cpu="10",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 10 | {cpu="35",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 11 | {cpu="89",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 12 | 13 | clear 14 | 15 | # Test duplicate labelset in promql output. 16 | load 5m 17 | testmetric1{src="a",dst="b"} 0 18 | testmetric2{src="a",dst="b"} 1 19 | 20 | eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) 21 | 22 | clear -------------------------------------------------------------------------------- /pkg/promql/testdata/literals.test: -------------------------------------------------------------------------------- 1 | eval instant at 50m 12.34e6 2 | 12340000 3 | 4 | eval instant at 50m 12.34e+6 5 | 12340000 6 | 7 | eval instant at 50m 12.34e-6 8 | 0.00001234 9 | 10 | eval instant at 50m 1+1 11 | 2 12 | 13 | eval instant at 50m 1-1 14 | 0 15 | 16 | eval instant at 50m 1 - -1 17 | 2 18 | 19 | eval instant at 50m .2 20 | 0.2 21 | 22 | eval instant at 50m +0.2 23 | 0.2 24 | 25 | eval instant at 50m -0.2e-6 26 | -0.0000002 27 | 28 | eval instant at 50m +Inf 29 | +Inf 30 | 31 | eval instant at 50m inF 32 | +Inf 33 | 34 | eval instant at 50m -inf 35 | -Inf 36 | 37 | eval instant at 50m NaN 38 | NaN 39 | 40 | eval instant at 50m nan 41 | NaN 42 | 43 | eval instant at 50m 2. 44 | 2 45 | 46 | eval instant at 50m 1 / 0 47 | +Inf 48 | 49 | eval instant at 50m ((1) / (0)) 50 | +Inf 51 | 52 | eval instant at 50m -1 / 0 53 | -Inf 54 | 55 | eval instant at 50m 0 / 0 56 | NaN 57 | 58 | eval instant at 50m 1 % 0 59 | NaN 60 | -------------------------------------------------------------------------------- /pkg/promql/testdata/staleness.test: -------------------------------------------------------------------------------- 1 | load 10s 2 | metric 0 1 stale 2 3 | 4 | # Instant vector doesn't return series when stale. 5 | eval instant at 10s metric 6 | {__name__="metric"} 1 7 | 8 | eval instant at 20s metric 9 | 10 | eval instant at 30s metric 11 | {__name__="metric"} 2 12 | 13 | eval instant at 40s metric 14 | {__name__="metric"} 2 15 | 16 | # It goes stale 5 minutes after the last sample. 17 | eval instant at 330s metric 18 | {__name__="metric"} 2 19 | 20 | eval instant at 331s metric 21 | 22 | 23 | # Range vector ignores stale sample. 24 | eval instant at 30s count_over_time(metric[1m]) 25 | {} 3 26 | 27 | eval instant at 10s count_over_time(metric[1s]) 28 | {} 1 29 | 30 | eval instant at 20s count_over_time(metric[1s]) 31 | 32 | eval instant at 20s count_over_time(metric[10s]) 33 | {} 1 34 | 35 | 36 | clear 37 | 38 | load 10s 39 | metric 0 40 | 41 | # Series with single point goes stale after 5 minutes. 42 | eval instant at 0s metric 43 | {__name__="metric"} 0 44 | 45 | eval instant at 150s metric 46 | {__name__="metric"} 0 47 | 48 | eval instant at 300s metric 49 | {__name__="metric"} 0 50 | 51 | eval instant at 301s metric 52 | -------------------------------------------------------------------------------- /pkg/query/query_engine.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package query 6 | 7 | import ( 8 | "time" 9 | 10 | "github.com/go-kit/log" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/timescale/promscale/pkg/promql" 13 | ) 14 | 15 | func NewEngine(logger log.Logger, queryTimeout, lookBackDelta, subqueryDefaultStepInterval time.Duration, maxSamples int, enabledFeaturesMap map[string]struct{}) (*promql.Engine, error) { 16 | engineOpts := promql.EngineOpts{ 17 | Logger: logger, 18 | Reg: prometheus.NewRegistry(), 19 | MaxSamples: maxSamples, 20 | Timeout: queryTimeout, 21 | LookbackDelta: lookBackDelta, 22 | NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(subqueryDefaultStepInterval) }, 23 | } 24 | 25 | _, engineOpts.EnableAtModifier = enabledFeaturesMap["promql-at-modifier"] 26 | _, engineOpts.EnableNegativeOffset = enabledFeaturesMap["promql-negative-offset"] 27 | _, engineOpts.EnablePerStepStats = enabledFeaturesMap["promql-per-step-stats"] 28 | return promql.NewEngine(engineOpts), nil 29 | } 30 | 31 | func durationMilliseconds(d time.Duration) int64 { 32 | return int64(d / (time.Millisecond / time.Nanosecond)) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/rules/adapters/query.go: -------------------------------------------------------------------------------- 1 | package adapters 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/prometheus/prometheus/model/labels" 8 | "github.com/prometheus/prometheus/storage" 9 | "github.com/timescale/promscale/pkg/promql" 10 | ) 11 | 12 | type queryAdapter struct { 13 | queryable promql.Queryable 14 | } 15 | 16 | // NewQueryAdapter acts as an adapter to make Promscale's Queryable compatible with storage.Queryable 17 | func NewQueryAdapter(q promql.Queryable) *queryAdapter { 18 | return &queryAdapter{q} 19 | } 20 | 21 | func (q *queryAdapter) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { 22 | qr, err := q.queryable.SamplesQuerier(ctx, mint, maxt) 23 | if err != nil { 24 | return nil, fmt.Errorf("samples-querier: %w", err) 25 | } 26 | return querierAdapter{qr}, nil 27 | } 28 | 29 | type querierAdapter struct { 30 | qr promql.SamplesQuerier 31 | } 32 | 33 | func (q querierAdapter) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { 34 | // Pushdowns are not supported here. This is fine as Prometheus rule-manager only uses queryable to know 35 | // the previous state of the alert. This function is not used in recording/alerting rules evaluation. 36 | seriesSet, _ := q.qr.Select(sortSeries, hints, nil, nil, matchers...) 37 | return seriesSet 38 | } 39 | 40 | func (q querierAdapter) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { 41 | if len(matchers) > 0 { 42 | // Weak TODO: We need to implement the matchers. 43 | // Note: We behave the same as Prometheus does at the moment. 44 | // See https://github.com/prometheus/prometheus/blob/9558b9b54bd3d0cb1d63b9084f8cbcda6b0d72fb/tsdb/index/index.go#L1483 45 | return nil, nil, fmt.Errorf("searching by matchers not implemented in LabelValues()") 46 | } 47 | 48 | return q.qr.LabelValues(name) 49 | } 50 | 51 | func (q querierAdapter) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { 52 | return q.qr.LabelNames(matchers...) 53 | } 54 | 55 | func (q querierAdapter) Close() error { 56 | q.qr.Close() 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /pkg/rules/rules_test.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package rules 6 | 7 | import ( 8 | "context" 9 | "testing" 10 | 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/stretchr/testify/require" 13 | "github.com/timescale/promscale/pkg/pgclient" 14 | ) 15 | 16 | func TestRegexInConfig(t *testing.T) { 17 | cfg := DefaultConfig 18 | cfg.PrometheusConfigAddress = "./testdata/rules.glob.config.yaml" 19 | 20 | m, reloader, err := NewManager(context.Background(), prometheus.NewRegistry(), &pgclient.Client{}, &cfg) 21 | require.NoError(t, err) 22 | require.NoError(t, reloader()) 23 | 24 | ruleGroups := m.RuleGroups() 25 | require.Equal(t, "g-one", ruleGroups[0].Name()) 26 | require.Equal(t, "g-two", ruleGroups[1].Name()) 27 | } 28 | -------------------------------------------------------------------------------- /pkg/rules/testdata/alert_config.good.config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1m 3 | evaluation_interval: 1m 4 | 5 | alerting: 6 | alertmanagers: 7 | - scheme: https 8 | static_configs: 9 | - targets: [] 10 | 11 | rule_files: 12 | - rules.yaml -------------------------------------------------------------------------------- /pkg/rules/testdata/no_rules.bad.config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 20s 3 | evaluation_interval: 30s 4 | 5 | rules_files: [] -------------------------------------------------------------------------------- /pkg/rules/testdata/no_rules.good.config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1m 3 | evaluation_interval: 1m 4 | -------------------------------------------------------------------------------- /pkg/rules/testdata/non_existent_rules.good.config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1m 3 | evaluation_interval: 1m 4 | 5 | rule_files: 6 | - non_existent_rules.yaml -------------------------------------------------------------------------------- /pkg/rules/testdata/rules.glob.config.yaml: -------------------------------------------------------------------------------- 1 | rule_files: 2 | - rules_dir/* -------------------------------------------------------------------------------- /pkg/rules/testdata/rules.good.config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1m 3 | evaluation_interval: 1m 4 | 5 | rule_files: 6 | - rules.yaml -------------------------------------------------------------------------------- /pkg/rules/testdata/rules.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: promscale-general 3 | rules: 4 | - record: Rule 5 | expr: up 6 | labels: 7 | component: rule -------------------------------------------------------------------------------- /pkg/rules/testdata/rules_dir/rule.one.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: g-one 3 | rules: 4 | - record: One 5 | expr: up -------------------------------------------------------------------------------- /pkg/rules/testdata/rules_dir/rule.two.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: g-two 3 | rules: 4 | - record: Two 5 | expr: up -------------------------------------------------------------------------------- /pkg/runner/args.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | package runner 5 | 6 | import ( 7 | "fmt" 8 | 9 | "github.com/timescale/promscale/pkg/version" 10 | ) 11 | 12 | // ParseArgs parses the provided args and prints accordingly. This function should be called before ParseFlags in order 13 | // to process the non-input flags like "-version". 14 | func ParseArgs(args []string) (shouldProceed bool) { 15 | shouldProceed = true 16 | for _, flag := range args { 17 | flag = flag[1:] 18 | switch flag { 19 | case "version": 20 | shouldProceed = false 21 | fmt.Println(version.Promscale) 22 | } 23 | } 24 | return 25 | } 26 | -------------------------------------------------------------------------------- /pkg/runner/codec.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license 4 | package runner 5 | 6 | import ( 7 | gogoproto "github.com/gogo/protobuf/proto" 8 | "google.golang.org/grpc/encoding" 9 | ) 10 | 11 | // This file undoes what https://github.com/jaegertracing/jaeger/blob/master/pkg/gogocodec/codec.go does 12 | // in that it forces everything (both jaeger and not jaeger) to use the new protobuf lib instead of the old one 13 | func init() { 14 | encoding.RegisterCodec(newCodec()) 15 | } 16 | 17 | type gogoCodec struct { 18 | } 19 | 20 | var _ encoding.Codec = (*gogoCodec)(nil) 21 | 22 | func newCodec() *gogoCodec { 23 | return &gogoCodec{} 24 | } 25 | 26 | // Name implements encoding.Codec 27 | func (c *gogoCodec) Name() string { 28 | return "proto" 29 | } 30 | 31 | // Marshal implements encoding.Code 32 | func (c *gogoCodec) Marshal(v interface{}) ([]byte, error) { 33 | return gogoproto.Marshal(v.(gogoproto.Message)) 34 | } 35 | 36 | // Unmarshal implements encoding.Codec 37 | func (c *gogoCodec) Unmarshal(data []byte, v interface{}) error { 38 | return gogoproto.Unmarshal(data, v.(gogoproto.Message)) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/telemetry/telemetry_test.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package telemetry 6 | 7 | import ( 8 | "testing" 9 | 10 | "github.com/prometheus/client_golang/prometheus" 11 | "github.com/stretchr/testify/require" 12 | "github.com/timescale/promscale/pkg/tests/testsupport" 13 | ) 14 | 15 | func TestRegisterMetric(t *testing.T) { 16 | metric := prometheus.NewGauge(prometheus.GaugeOpts{Namespace: "test", Name: "extraction"}) 17 | 18 | engine := &engineImpl{} 19 | _, has := engine.metrics.Load("some_stats") 20 | require.False(t, has) 21 | 22 | require.NoError(t, engine.RegisterMetric("some_stats", metric)) 23 | 24 | _, has = engine.metrics.Load("some_stats") 25 | require.True(t, has) 26 | 27 | wrongMetric := prometheus.NewHistogram(prometheus.HistogramOpts{Namespace: "test", Name: "wrong", Buckets: prometheus.DefBuckets}) 28 | wrongMetric.Observe(164) 29 | 30 | require.Error(t, engine.RegisterMetric("some_wrong_stats", wrongMetric)) 31 | 32 | _, has = engine.metrics.Load("some_wrong_stats") 33 | require.False(t, has) 34 | } 35 | 36 | func TestEngineStop(t *testing.T) { 37 | engine := &engineImpl{ 38 | conn: testsupport.MockPgxConn{}, 39 | } 40 | engine.Start() 41 | engine.Stop() 42 | } 43 | -------------------------------------------------------------------------------- /pkg/tenancy/authorizer.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package tenancy 6 | 7 | import "fmt" 8 | 9 | // Authorizer authorizes the read/write operations in multi-tenancy. 10 | type Authorizer interface { 11 | // ReadAuthorizer returns a authorizer that authorizes read operations. 12 | ReadAuthorizer() ReadAuthorizer 13 | // WriteAuthorizer returns a authorizer that authorizes write operations. 14 | WriteAuthorizer() WriteAuthorizer 15 | } 16 | 17 | // multiTenancy type implements the tenancy concept in Promscale. 18 | type genericAuthorizer struct { 19 | write WriteAuthorizer 20 | read ReadAuthorizer 21 | } 22 | 23 | // NewAuthorizer returns a new MultiTenancy type. 24 | func NewAuthorizer(c AuthConfig) (Authorizer, error) { 25 | readAuthr, err := NewReadAuthorizer(c) 26 | if err != nil { 27 | return nil, fmt.Errorf("creating tenancy: %w", err) 28 | } 29 | writeAuthr := NewWriteAuthorizer(c) 30 | return &genericAuthorizer{ 31 | read: readAuthr, 32 | write: writeAuthr, 33 | }, nil 34 | } 35 | 36 | func (mt *genericAuthorizer) ReadAuthorizer() ReadAuthorizer { 37 | return mt.read 38 | } 39 | 40 | func (mt *genericAuthorizer) WriteAuthorizer() WriteAuthorizer { 41 | return mt.write 42 | } 43 | 44 | type noopAuthorizer struct{} 45 | 46 | // NewNoopAuthorizer returns a No-op tenancy that is used to initialize tenancy types for no operations. 47 | func NewNoopAuthorizer() Authorizer { 48 | return &noopAuthorizer{} 49 | } 50 | 51 | func (np *noopAuthorizer) ReadAuthorizer() ReadAuthorizer { 52 | return nil 53 | } 54 | 55 | func (np *noopAuthorizer) WriteAuthorizer() WriteAuthorizer { 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /pkg/tenancy/interface.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package tenancy 6 | 7 | import ( 8 | "fmt" 9 | "net/http" 10 | 11 | "github.com/prometheus/prometheus/model/labels" 12 | "github.com/timescale/promscale/pkg/prompb" 13 | ) 14 | 15 | const regexOR = "|" 16 | 17 | var ErrUnauthorizedTenant = fmt.Errorf("unauthorized or invalid tenant") 18 | 19 | // ReadAuthorizer tells if a read request is allowed to query via Promscale. 20 | type ReadAuthorizer interface { 21 | // AppendTenantMatcher applies a safety matcher to incoming query matchers. This safety matcher is responsible 22 | // from prevent unauthorized query reads from tenants that the incoming query is not supposed to read. 23 | AppendTenantMatcher(ms []*labels.Matcher) []*labels.Matcher 24 | } 25 | 26 | // WriteAuthorizer tells if a write request is authorized to be written. 27 | type WriteAuthorizer interface { 28 | // Process processes the incoming write requests to be multi-tenancy compatible. 29 | Process(*http.Request, *prompb.WriteRequest) error 30 | } 31 | -------------------------------------------------------------------------------- /pkg/tenancy/read.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package tenancy 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/prometheus/prometheus/model/labels" 11 | ) 12 | 13 | type readAuthorizer struct { 14 | AuthConfig 15 | // mtSafetyLabelPair is a label-pair that is applied to incoming multi-tenant read requests for security reasons. 16 | // This matcher helps prevent a query from querying a tenant for the query has not been authorized. 17 | mtSafetyLabelMatcher *labels.Matcher 18 | } 19 | 20 | // NewReadAuthorizer is a authorizer for performing read operations on valid tenants. 21 | func NewReadAuthorizer(cfg AuthConfig) (ReadAuthorizer, error) { 22 | matcher, err := cfg.getTenantSafetyMatcher() 23 | if err != nil { 24 | return nil, fmt.Errorf("get safety tenant matcher: %w", err) 25 | } 26 | return &readAuthorizer{ 27 | AuthConfig: cfg, 28 | mtSafetyLabelMatcher: matcher, 29 | }, nil 30 | } 31 | 32 | func (a *readAuthorizer) AppendTenantMatcher(ms []*labels.Matcher) []*labels.Matcher { 33 | if a.mtSafetyLabelMatcher == nil { 34 | return ms 35 | } 36 | ms = append(ms, a.mtSafetyLabelMatcher) 37 | return ms 38 | } 39 | -------------------------------------------------------------------------------- /pkg/tests/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | ) 7 | 8 | var ( 9 | PromscaleExtensionVersion string 10 | PromscaleExtensionContainer string 11 | ) 12 | 13 | func init() { 14 | content, err := os.ReadFile("../../../EXTENSION_VERSION") 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | PromscaleExtensionVersion = strings.TrimSpace(string(content)) 20 | PromscaleExtensionContainer = "ghcr.io/timescale/dev_promscale_extension:" + PromscaleExtensionVersion + "-ts2-pg15" 21 | } 22 | -------------------------------------------------------------------------------- /pkg/tests/end_to_end_tests/README.md: -------------------------------------------------------------------------------- 1 | Run these tests by executing `go test` in this directory. A specific test can 2 | be targeted by running `go test -run `. 3 | 4 | The test binary takes a number of parameters which configure its runtime 5 | behaviour. These are defined at the top of `main_test.go` in this directory. 6 | 7 | Some particularly interesting options are: 8 | 9 | - `-update`: Updates the golden SQL files which are used as a reference 10 | - `-extended`: Run extended testing dataset and PromQL queries 11 | - `-use-extension`: Use the promscale extension 12 | - `-use-docker`: The test harness will start `timescaledb` in a docker 13 | container to test against. If set to `false`, the test harness will attempt 14 | to connect to `localhost:5432` 15 | -------------------------------------------------------------------------------- /pkg/tests/end_to_end_tests/jaeger_store_test.go: -------------------------------------------------------------------------------- 1 | package end_to_end_tests 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/jackc/pgx/v5/pgxpool" 8 | "github.com/stretchr/testify/require" 9 | "github.com/timescale/promscale/pkg/jaeger/store" 10 | jaegerstore "github.com/timescale/promscale/pkg/jaeger/store" 11 | ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" 12 | "github.com/timescale/promscale/pkg/pgxconn" 13 | ) 14 | 15 | // Similar to TestQueryTraces, but uses Jaeger span ingestion interface. 16 | func TestJaegerSpanIngestion(t *testing.T) { 17 | withDB(t, "jaeger_span_store_e2e", func(db *pgxpool.Pool, t testing.TB) { 18 | ingestor, err := ingstr.NewPgxIngestorForTests(pgxconn.NewPgxConn(db), nil) 19 | require.NoError(t, err) 20 | defer ingestor.Close() 21 | 22 | jaegerStore := jaegerstore.New(pgxconn.NewQueryLoggingPgxConn(db), ingestor, &store.DefaultConfig) 23 | 24 | fixtures, err := getTracesFixtures() 25 | if err != nil { 26 | require.NoError(t, err) 27 | } 28 | for _, b := range fixtures.batches { 29 | for _, s := range b.Spans { 30 | err = jaegerStore.SpanWriter().WriteSpan(context.Background(), s) 31 | require.NoError(t, err) 32 | } 33 | } 34 | 35 | getOperationsTest(t, jaegerStore) 36 | findTraceTest(t, jaegerStore, fixtures) 37 | getDependenciesTest(t, jaegerStore) 38 | }) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/tests/end_to_end_tests/metrics_duplicate_insert_test.go: -------------------------------------------------------------------------------- 1 | package end_to_end_tests 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/jackc/pgx/v5/pgxpool" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/prometheus/client_golang/prometheus/testutil" 10 | "github.com/stretchr/testify/require" 11 | ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" 12 | "github.com/timescale/promscale/pkg/pgmodel/metrics" 13 | "github.com/timescale/promscale/pkg/pgxconn" 14 | ) 15 | 16 | func TestMetricsDuplicateInsert(t *testing.T) { 17 | ctx := context.Background() 18 | ts := generateSmallTimeseries() 19 | withDB(t, "metrics_duplicate_insert_test", func(db *pgxpool.Pool, t testing.TB) { 20 | ingestor, err := ingstr.NewPgxIngestorForTests(pgxconn.NewPgxConn(db), nil) 21 | require.NoError(t, err) 22 | defer ingestor.Close() 23 | _, _, err = ingestor.IngestMetrics(ctx, newWriteRequestWithTs(copyMetrics(ts))) 24 | require.NoError(t, err) 25 | // Previous tests might have ingested duplicates. 26 | duplicatesBefore := testutil.ToFloat64(metrics.IngestorDuplicates.With(prometheus.Labels{"type": "metric", "kind": "sample"})) 27 | _, _, err = ingestor.IngestMetrics(ctx, newWriteRequestWithTs(copyMetrics(ts))) 28 | require.NoError(t, err) 29 | require.Greater(t, testutil.ToFloat64(metrics.IngestorDuplicates.With(prometheus.Labels{"type": "metric", "kind": "sample"})), duplicatesBefore, "duplicates insert must have occurred") 30 | }) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/generate.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | // +build ignore 3 | 4 | // This file and its contents are licensed under the Apache License 2.0. 5 | // Please see the included NOTICE for copyright information and 6 | // LICENSE for a copy of the license. 7 | 8 | // This file is a binary that generates migration_files_generated.go 9 | // it is not built by default, but rather invoked by the go:generate command 10 | // defined in migrations.go 11 | package main 12 | 13 | import ( 14 | "log" 15 | "net/http" 16 | 17 | "github.com/shurcooL/vfsgen" 18 | "github.com/timescale/promscale/pkg/migrations" 19 | ) 20 | 21 | var Assets http.FileSystem = migrations.NewModTimeFs(http.Dir("sql")) 22 | 23 | func main() { 24 | err := vfsgen.Generate(Assets, vfsgen.Options{ 25 | Filename: "migration_files_generated.go", 26 | PackageName: "test_migrations", 27 | VariableName: "MigrationFiles", 28 | }) 29 | if err != nil { 30 | log.Fatalln(err) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/migrations.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package test_migrations 6 | 7 | // This is a stub to define the go:generate command to create the go file 8 | // that embeds the sql files into a go variable to make the sql files part 9 | // of the binary 10 | 11 | //go:generate go run -tags=dev generate.go 12 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/idempotent/1-toc-run_second.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('idempotent 2'); -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/idempotent/2-toc-run_first.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('idempotent 1'); -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/preinstall/001-setup.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE log (msg TEXT, id serial); 2 | 3 | INSERT INTO log VALUES('setup'); -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.1.0-dev/1-migration.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.1.0'); -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.10.0-dev/1-migr_98_at.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.0=1'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.10.0-dev/2-1_mig.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.0=2'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.10.1-dev/1-migr_98_at.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.1=1'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.10.1-dev/2-1_mig.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.1=2'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.10.2-beta.dev/1-migr_98_at.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.2-beta=1'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.11.0-dev/1-migr_98_at.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.11.0=1'); 2 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.11.0-dev/2-1_mig.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.10.0=2'); 2 | SELECT 10/0; --will generate error 3 | -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.2.0-dev/1-migration.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.2.0'); -------------------------------------------------------------------------------- /pkg/tests/test_migrations/sql/versions/dev/0.9.0-dev/1-migration.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO log VALUES('migration 0.9.0'); 2 | -------------------------------------------------------------------------------- /pkg/tests/testdata/import.json: -------------------------------------------------------------------------------- 1 | {"labels":{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"},"samples":[[1577836800000,100],[1577836801000,99],[1577836802000,98]]} 2 | {"labels":{"__name__": "cpu_usage", "namespace":"prod", "node": "brain"},"samples":[[1577836800000,100],[1577836801000,99],[1577836802000,98]]} 3 | {"labels":{"__name__": "cpu_usage", "namespace":"stage", "node": "brain"},"samples":[[1577836800000,100],[1577836801000,99],[1577836802000,98]]} 4 | 5 | -------------------------------------------------------------------------------- /pkg/tests/testdata/jaeger_query_responses.sz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/pkg/tests/testdata/jaeger_query_responses.sz -------------------------------------------------------------------------------- /pkg/tests/testdata/prometheus-data.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/pkg/tests/testdata/prometheus-data.tar.gz -------------------------------------------------------------------------------- /pkg/tests/testdata/real-dataset.sz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale/6ee8545bf30d3bd1ba778cba1736eb0ac21169fe/pkg/tests/testdata/real-dataset.sz -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/alerts.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: promscale-general 3 | rules: 4 | - alert: Test 5 | expr: absent(up) -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/config.alertmanager.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/prometheus/alertmanager/blob/f958b8be84b870e363f7dafcbeb807b463269a75/config/testdata/conf.empty-fields.yml#L1 2 | global: 3 | smtp_smarthost: 'localhost:25' 4 | smtp_from: 'alertmanager@example.org' 5 | smtp_auth_username: '' 6 | smtp_auth_password: '' 7 | smtp_hello: '' 8 | slack_api_url: 'https://slack.com/webhook' 9 | templates: 10 | - '/etc/alertmanager/template/*.tmpl' 11 | route: 12 | group_by: ['alertname', 'cluster', 'service'] 13 | receiver: team-X-mails 14 | routes: 15 | - match_re: 16 | service: ^(foo1|foo2|baz)$ 17 | receiver: team-X-mails 18 | receivers: 19 | - name: 'team-X-mails' 20 | email_configs: 21 | - to: 'team-X+alerts@example.org' 22 | -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/config.alerts.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | evaluation_interval: 1s 3 | alerting: 4 | alertmanagers: 5 | - static_configs: 6 | - targets: 7 | - 'localhost:9093' 8 | 9 | rule_files: 10 | - alerts.yaml -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/config.empty_rules.yaml: -------------------------------------------------------------------------------- 1 | # Empty file indicates recording rules are not applied. -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/config.recording_rules_eval.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | evaluation_interval: 100ms 3 | 4 | rule_files: 5 | - rules.yaml 6 | -------------------------------------------------------------------------------- /pkg/tests/testdata/rules/rules.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: e2e 3 | rules: 4 | - record: test_rule 5 | expr: firstMetric @ 4 + firstMetric @ 5 6 | labels: 7 | kind: recording 8 | -------------------------------------------------------------------------------- /pkg/tests/testdata/sql/support.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 5 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 6 | CALL _prom_catalog.finalize_metric_creation(); 7 | INSERT INTO prom_data.cpu_usage 8 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 9 | FROM generate_series(1,10) g; 10 | INSERT INTO prom_data.cpu_usage 11 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 12 | FROM generate_series(1,10) g; 13 | INSERT INTO prom_data.cpu_total 14 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 15 | FROM generate_series(1,10) g; 16 | INSERT INTO prom_data.cpu_total 17 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 18 | FROM generate_series(1,10) g; 19 | 20 | --this should use a subquery with the Promscale extension but not without 21 | --this is thanks to the support function make_call_subquery_support 22 | ANALYZE; 23 | EXPLAIN (costs off) SELECT time, value, jsonb(labels), val(namespace_id) FROM cpu_usage WHERE labels ? ('namespace' !== 'dev' ) ORDER BY time, series_id LIMIT 5; 24 | -------------------------------------------------------------------------------- /pkg/tests/testdata/sql/views.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 5 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 6 | CALL _prom_catalog.finalize_metric_creation(); 7 | INSERT INTO prom_data.cpu_usage 8 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 9 | FROM generate_series(1,10) g; 10 | INSERT INTO prom_data.cpu_usage 11 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 12 | FROM generate_series(1,10) g; 13 | INSERT INTO prom_data.cpu_total 14 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 15 | FROM generate_series(1,10) g; 16 | INSERT INTO prom_data.cpu_total 17 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 18 | FROM generate_series(1,10) g; 19 | 20 | SELECT * FROM prom_info.label ORDER BY key; 21 | 22 | \set ON_ERROR_STOP 0 23 | SELECT count(public.compress_chunk(i)) from public.show_chunks('prom_data.cpu_usage') i; 24 | \set ON_ERROR_STOP 0 25 | 26 | SET role prom_reader; 27 | SELECT * FROM cpu_usage ORDER BY time, series_id LIMIT 5; 28 | SELECT time, value, jsonb(labels), val(namespace_id) FROM cpu_usage ORDER BY time, series_id LIMIT 5; 29 | SELECT * FROM prom_series.cpu_usage ORDER BY series_id; 30 | -------------------------------------------------------------------------------- /pkg/tests/testsupport/mock_pgx_conn.go: -------------------------------------------------------------------------------- 1 | package testsupport 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/jackc/pgx/v5" 7 | "github.com/jackc/pgx/v5/pgconn" 8 | "github.com/jackc/pgx/v5/pgxpool" 9 | "github.com/timescale/promscale/pkg/pgxconn" 10 | ) 11 | 12 | type MockRow struct{} 13 | 14 | func (MockRow) Scan(dest ...interface{}) error { return nil } 15 | 16 | type MockBatchResults struct{} 17 | 18 | func (MockBatchResults) Exec() (pgconn.CommandTag, error) { 19 | return pgconn.CommandTag{}, nil 20 | } 21 | 22 | func (MockBatchResults) Query() (pgx.Rows, error) { 23 | return nil, nil 24 | } 25 | 26 | func (MockBatchResults) QueryRow() pgx.Row { 27 | return MockRow{} 28 | } 29 | 30 | func (MockBatchResults) Close() error { return nil } 31 | 32 | type MockBatch struct{} 33 | 34 | func (MockBatch) Queue(query string, arguments ...any) *pgx.QueuedQuery { return nil } 35 | 36 | func (MockBatch) Len() int { 37 | return 0 38 | } 39 | 40 | type MockPgxConn struct{} 41 | 42 | func (MockPgxConn) Close() {} 43 | func (MockPgxConn) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) { 44 | return pgconn.CommandTag{}, nil 45 | } 46 | func (MockPgxConn) Query(ctx context.Context, sql string, args ...interface{}) (pgxconn.PgxRows, error) { 47 | return nil, nil 48 | } 49 | func (MockPgxConn) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row { 50 | return MockRow{} 51 | } 52 | func (MockPgxConn) CopyFrom( 53 | ctx context.Context, 54 | tx pgx.Tx, 55 | tableName pgx.Identifier, 56 | columnNames []string, 57 | rowSrc pgx.CopyFromSource, 58 | oids []uint32, 59 | ) (int64, error) { 60 | return 0, nil 61 | } 62 | func (MockPgxConn) CopyFromRows(rows [][]interface{}) pgx.CopyFromSource { return nil } 63 | func (MockPgxConn) NewBatch() pgxconn.PgxBatch { return MockBatch{} } 64 | func (MockPgxConn) SendBatch(ctx context.Context, b pgxconn.PgxBatch) (pgx.BatchResults, error) { 65 | return MockBatchResults{}, nil 66 | } 67 | func (MockPgxConn) Acquire(ctx context.Context) (*pgxpool.Conn, error) { return nil, nil } 68 | func (MockPgxConn) BeginTx(ctx context.Context) (pgx.Tx, error) { return nil, nil } 69 | -------------------------------------------------------------------------------- /pkg/tracer/codec.go: -------------------------------------------------------------------------------- 1 | package tracer 2 | 3 | import ( 4 | "google.golang.org/grpc/encoding" 5 | "google.golang.org/protobuf/proto" 6 | ) 7 | 8 | // clientCodec is used to force the gRPC client into the 9 | // specific codec that works with OTEL exporter. 10 | type clientCodec struct{} 11 | 12 | var _ encoding.Codec = (*clientCodec)(nil) 13 | 14 | func newClientCodec() *clientCodec { 15 | return &clientCodec{} 16 | } 17 | 18 | // Name implements encoding.Codec 19 | func (c *clientCodec) Name() string { 20 | return "proto" 21 | } 22 | 23 | // Marshal implements encoding.Codec 24 | func (c *clientCodec) Marshal(v interface{}) ([]byte, error) { 25 | return proto.Marshal(v.(proto.Message)) 26 | } 27 | 28 | // Unmarshal implements encoding.Codec 29 | func (c *clientCodec) Unmarshal(data []byte, v interface{}) error { 30 | return proto.Unmarshal(data, v.(proto.Message)) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/util/metrics.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | io_prometheus_client "github.com/prometheus/client_model/go" 8 | ) 9 | 10 | // returns a exponential histogram for a saturating metric. Grows exponentially 11 | // until max-10, and has another bucket for max. 12 | // This is done so we can tell from the histogram if the resource was saturated or not. 13 | func HistogramBucketsSaturating(start float64, factor float64, max float64) []float64 { 14 | if max-10 < 1 { 15 | panic("HistogramBucketsSaturating needs a positive max") 16 | } 17 | if start < 0 { 18 | panic("HistogramBucketsSaturating needs a positive start value") 19 | } 20 | if factor <= 1 { 21 | panic("HistogramBucketsSaturating needs a factor greater than 1") 22 | } 23 | buckets := make([]float64, 0) 24 | for start < max-10 { 25 | buckets = append(buckets, start) 26 | if start == 0 { 27 | start = 1 28 | continue 29 | } 30 | start *= factor 31 | } 32 | buckets = append(buckets, max-10) 33 | buckets = append(buckets, max) 34 | return buckets 35 | } 36 | 37 | func ExtractMetricValue(counterOrGauge prometheus.Metric) (float64, error) { 38 | var internal io_prometheus_client.Metric 39 | if err := counterOrGauge.Write(&internal); err != nil { 40 | return 0, fmt.Errorf("error writing metric: %w", err) 41 | } 42 | switch { 43 | case internal.Gauge != nil: 44 | return internal.Gauge.GetValue(), nil 45 | case internal.Counter != nil: 46 | return internal.Counter.GetValue(), nil 47 | case internal.Histogram != nil: 48 | if sampleCnt := internal.Histogram.GetSampleCount(); sampleCnt != 0 { 49 | return internal.Histogram.GetSampleSum() / float64(sampleCnt), nil 50 | } else { 51 | return 0.0, nil 52 | } 53 | default: 54 | return 0, fmt.Errorf("all three Gauge, Counter and Histogram are nil") 55 | } 56 | } 57 | 58 | func ExtractMetricDesc(metric prometheus.Metric) (string, error) { 59 | return metric.Desc().String(), nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/util/throughput/throughput_test.go: -------------------------------------------------------------------------------- 1 | // This file and its contents are licensed under the Apache License 2.0. 2 | // Please see the included NOTICE for copyright information and 3 | // LICENSE for a copy of the license. 4 | 5 | package throughput 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestThroughputWatcher(t *testing.T) { 15 | // Test do not report throughput. 16 | InitWatcher(0) 17 | require.True(t, throughputWatcher == nil) 18 | 19 | // Test report throughput. 20 | InitWatcher(time.Second) 21 | require.True(t, throughputWatcher.every == time.Second) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/util/ticker.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "time" 4 | 5 | type Ticker interface { 6 | Channel() <-chan time.Time 7 | Stop() 8 | } 9 | 10 | type ticker struct { 11 | *time.Ticker 12 | } 13 | 14 | func (t *ticker) Channel() <-chan time.Time { 15 | return t.C 16 | } 17 | 18 | func NewTicker(d time.Duration) Ticker { 19 | return &ticker{time.NewTicker(d)} 20 | } 21 | 22 | type ManualTicker struct { 23 | C chan time.Time 24 | } 25 | 26 | func (m *ManualTicker) Channel() <-chan time.Time { 27 | return m.C 28 | } 29 | 30 | func (m *ManualTicker) Tick() { 31 | m.C <- time.Now() 32 | } 33 | func (m *ManualTicker) Wait() { 34 | <-m.C 35 | } 36 | 37 | func (m *ManualTicker) Stop() { 38 | panic("not implemented") 39 | } 40 | 41 | func NewManualTicker(channelSize int) *ManualTicker { 42 | return &ManualTicker{C: make(chan time.Time, channelSize)} 43 | } 44 | -------------------------------------------------------------------------------- /scripts/fallback-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script takes a number of docker images and outputs the 4 | # first one which exists, or "no image present" if none exists. 5 | 6 | for img in "$@"; do 7 | if docker image inspect "${img}" 1>/dev/null 2>&1 || docker manifest inspect "${img}" 1>/dev/null 2>&1; then 8 | echo "${img}" 9 | exit 0 10 | fi 11 | done 12 | echo "no image present" 13 | exit 1 14 | -------------------------------------------------------------------------------- /scripts/wait-for.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # from https://github.com/eficode/wait-for/tree/8d9b4446df0b71275ad1a1c68db0cc2bb6978228 3 | 4 | TIMEOUT=15 5 | QUIET=0 6 | 7 | echoerr() { 8 | if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi 9 | } 10 | 11 | usage() { 12 | cmdname=${0} 13 | exitcode="$1" 14 | cat << USAGE >&2 15 | Usage: 16 | $cmdname host:port [-t timeout] [-- command args] 17 | -q | --quiet Do not output any status messages 18 | -t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout 19 | -- COMMAND ARGS Execute command with args after the test finishes 20 | USAGE 21 | exit "$exitcode" 22 | } 23 | 24 | wait_for() { 25 | # shellcheck disable=SC2034 26 | for i in $(seq $TIMEOUT) ; do 27 | nc -z "$HOST" "$PORT" > /dev/null 2>&1 28 | 29 | result=$? 30 | if [ $result -eq 0 ] ; then 31 | if [ $# -gt 0 ] ; then 32 | exec "$@" 33 | fi 34 | exit 0 35 | fi 36 | sleep 1 37 | done 38 | echo "Operation timed out" >&2 39 | exit 1 40 | } 41 | 42 | while [ $# -gt 0 ] 43 | do 44 | case "$1" in 45 | *:* ) 46 | HOST=$(printf "%s\n" "$1"| cut -d : -f 1) 47 | PORT=$(printf "%s\n" "$1"| cut -d : -f 2) 48 | shift 1 49 | ;; 50 | -q | --quiet) 51 | QUIET=1 52 | shift 1 53 | ;; 54 | -t) 55 | TIMEOUT="$2" 56 | if [ "$TIMEOUT" = "" ]; then break; fi 57 | shift 2 58 | ;; 59 | --timeout=*) 60 | TIMEOUT="${1#*=}" 61 | shift 1 62 | ;; 63 | --) 64 | shift 65 | break 66 | ;; 67 | --help) 68 | usage 0 69 | ;; 70 | *) 71 | echoerr "Unknown argument: $1" 72 | usage 1 73 | ;; 74 | esac 75 | done 76 | 77 | if [ "$HOST" = "" ] || [ "$PORT" = "" ]; then 78 | echoerr "Error: you need to provide a host and port to test." 79 | usage 2 80 | fi 81 | 82 | wait_for "$@" 83 | --------------------------------------------------------------------------------