├── test ├── __init__.py ├── integration │ └── ..text ├── mock │ ├── data.yml │ └── mockdata.yaml └── units │ ├── test_fit.py │ ├── test_influx.py │ ├── test_prometheus.py │ ├── prometheus.proto │ └── prometheus_pb2.py ├── .coverage ├── demo ├── influxdb │ └── .env ├── grafana │ ├── config.monitoring │ ├── provisioning │ │ └── datasources │ │ │ └── datasource.yml │ └── dashboard.json ├── prometheus.yml ├── telegraf.conf └── docker-compose.yml ├── images ├── forecaster.PNG └── xforecast.png ├── xforecast-helm ├── templates │ ├── NOTES.txt │ ├── configmap.yaml │ ├── serviceaccount.yaml │ ├── tests │ │ └── test-connection.yaml │ ├── service.yaml │ ├── _helpers.tpl │ └── deployment.yaml ├── .helmignore ├── Chart.yaml └── values.yaml ├── requirements.txt ├── Dockerfile ├── docker-compose.yml ├── .gitignore ├── baseDockerfile ├── packages ├── datasources │ ├── logger.py │ ├── async-test.py │ ├── async-test-concurrent.py │ ├── prometheus.proto │ ├── influx.py │ ├── prometheus.py │ └── prometheus_pb2.py ├── helper │ ├── functions.py │ ├── test_functions.py │ ├── prometh.py │ └── fit_and_predict.py └── models │ ├── memory_usage.json │ └── cpu_usage.json ├── .github └── workflows │ ├── main.yml │ └── test.yml ├── design.md ├── config.yaml ├── README.md ├── main.py └── LICENSE /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/integration/..text: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.coverage: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/xforecast/HEAD/.coverage -------------------------------------------------------------------------------- /demo/influxdb/.env: -------------------------------------------------------------------------------- 1 | INFLUXDB_USERNAME=admin 2 | INFLUXDB_PASSWORD=admin -------------------------------------------------------------------------------- /images/forecaster.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/xforecast/HEAD/images/forecaster.PNG -------------------------------------------------------------------------------- /images/xforecast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/xforecast/HEAD/images/xforecast.png -------------------------------------------------------------------------------- /demo/grafana/config.monitoring: -------------------------------------------------------------------------------- 1 | GF_SECURITY_ADMIN_PASSWORD=mSxthPEKij 2 | GF_USERS_ALLOW_SIGN_UP=false -------------------------------------------------------------------------------- /xforecast-helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | Application started using config.yaml:- 3 | {{.Values.config}} 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-snappy 2 | pytest 3 | pandas 4 | pystan 5 | prophet==1.0.1 6 | pyyaml 7 | requests 8 | datetime 9 | influxdb 10 | protobuf==3.20.0 11 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM xmigrate/base:xforecast 2 | 3 | WORKDIR /app/workspace 4 | 5 | RUN apk add snappy 6 | 7 | COPY . . 8 | 9 | ENTRYPOINT ["python3.7","./main.py"] 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | forecaster: 5 | build: . 6 | ports: 7 | - "9100:9100" 8 | volumes: 9 | - ./config.yaml:/app/workspace/config.yaml 10 | 11 | -------------------------------------------------------------------------------- /xforecast-helm/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: {{ include "xforecast-helm.fullname" . }} 5 | data: 6 | config.yaml: |- 7 | {{.Values.config | indent 6}} 8 | 9 | -------------------------------------------------------------------------------- /test/mock/data.yml: -------------------------------------------------------------------------------- 1 | Time: 2 | - 2022-08-25 11:59:00 3 | - 2022-08-25 11:59:15 4 | - 2022-08-25 11:59:30 5 | - 2022-08-25 11:59:45 6 | - 2022-08-25 12:00:00 7 | y: 8 | - '138902.341796875' 9 | - '138902.341796875' 10 | - '138929.43359375' 11 | - '138929.43359375' 12 | - '138954.6875' 13 | -------------------------------------------------------------------------------- /demo/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | 4 | scrape_configs: 5 | - job_name: "prometheus" 6 | scrape_interval: 1m 7 | static_configs: 8 | - targets: ["localhost:9090"] 9 | 10 | - job_name: "node" 11 | static_configs: 12 | - targets: ["node-exporter:9100"] 13 | 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | *.log 14 | *.pyc 15 | .cache/ 16 | __pycache__/ 17 | 18 | kube-forecast/packages/models/*.json 19 | 20 | # Dependency directories (remove the comment below to include it) 21 | # vendor/ 22 | -------------------------------------------------------------------------------- /baseDockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7-alpine 2 | 3 | WORKDIR . 4 | 5 | COPY ./requirements.txt ./requirements.txt 6 | 7 | RUN apk update \ 8 | && apk --no-cache --update add --virtual build-dependencies build-base gcc snappy-dev zlib-dev jpeg-dev openjpeg-dev \ 9 | && pip3 install --upgrade --no-cache-dir pip && pip3 install --upgrade --no-cache-dir setuptools && pip3 install --no-cache-dir -r ./requirements.txt \ 10 | && apk del build-dependencies 11 | -------------------------------------------------------------------------------- /xforecast-helm/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "xforecast-helm.serviceAccountName" . }} 6 | labels: 7 | {{- include "xforecast-helm.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | automountServiceAccountToken: false -------------------------------------------------------------------------------- /xforecast-helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /xforecast-helm/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "xforecast-helm.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "xforecast-helm.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "xforecast-helm.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /xforecast-helm/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "xforecast-helm.fullname" . }} 6 | labels: 7 | {{- include "xforecast-helm.labels" . | nindent 4 }} 8 | spec: 9 | type: {{ .Values.service.type }} 10 | ports: 11 | - port: {{ .Values.service.port }} 12 | targetPort: http 13 | protocol: TCP 14 | name: http 15 | selector: 16 | {{- include "xforecast-helm.selectorLabels" . | nindent 4 }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /packages/datasources/logger.py: -------------------------------------------------------------------------------- 1 | import logging,sys 2 | logging.basicConfig( 3 | level=logging.INFO, 4 | format="%(asctime)s [%(levelname)s] %(message)s", 5 | handlers=[ 6 | logging.FileHandler("app.log"), 7 | logging.StreamHandler(sys.stdout) 8 | ] 9 | ) 10 | def logger(message, type): 11 | if type == "debug": 12 | logging.debug(message) 13 | elif type == "warning": 14 | logging.warning(message) 15 | elif type == "info": 16 | logging.info(message) 17 | elif type == "error": 18 | logging.error(message) 19 | elif type == "critical": 20 | logging.critical(message) -------------------------------------------------------------------------------- /packages/datasources/async-test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | async def print_hello(msg): 4 | a = 0 5 | while True: 6 | print(f"{msg} {a}") 7 | a += 1 8 | if a>5: 9 | b=2/0 10 | await asyncio.sleep(1) 11 | 12 | async def test(msg): 13 | a = asyncio.ensure_future(print_hello(msg)) 14 | #a is a future object. a.done when the task is ready, refer this https://docs.python.org/3/library/asyncio-future.html#asyncio.ensure_future 15 | while not a.done(): 16 | await asyncio.sleep(1) 17 | try: 18 | result = a.result() 19 | except asyncio.CancelledError: 20 | print("Someone cancelled") 21 | except Exception as e: 22 | print(f"Some error: {e}") 23 | 24 | asyncio.run(test("hi")) -------------------------------------------------------------------------------- /demo/telegraf.conf: -------------------------------------------------------------------------------- 1 | [global_tags] 2 | [agent] 3 | interval = "10s" 4 | round_interval = true 5 | metric_batch_size = 1000 6 | metric_buffer_limit = 10000 7 | collection_jitter = "0s" 8 | flush_interval = "10s" 9 | flush_jitter = "0s" 10 | precision = "0s" 11 | hostname = "" 12 | omit_hostname = false 13 | [[outputs.influxdb]] 14 | [[inputs.cpu]] 15 | percpu = true 16 | totalcpu = true 17 | collect_cpu_time = false 18 | report_active = false 19 | core_tags = false 20 | [[inputs.disk]] 21 | ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] 22 | [[inputs.diskio]] 23 | [[inputs.kernel]] 24 | [[inputs.mem]] 25 | [[inputs.processes]] 26 | [[inputs.swap]] 27 | [[inputs.system]] 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /test/units/test_fit.py: -------------------------------------------------------------------------------- 1 | from packages.helper.fit_and_predict import * 2 | import pytest 3 | 4 | @pytest.mark.asyncio 5 | async def test_fit_and_predict(): 6 | metric_name = "mockname" 7 | data_store = "mockstore" 8 | start_time = '2022-09-12 08:53:00' 9 | end_time = '2022-09-12 08:54:00' 10 | prev_stime = '2022-09-12 08:50:00' 11 | prev_etime = '2022-09-12 08:51:00' 12 | prom_query = "mockquery" 13 | write_back_metric = "mockmetric" 14 | model = {'hyperparameters': {'changepoint_prior_scale':0.05, 'seasonality_prior_scale':10, 'holidays_prior_scale':10, 'changepoint_range':0.8, 'seasonality_mode':'additive'}} 15 | r = await fit_and_predict(metric_name,data_store,start_time,end_time,prom_query,write_back_metric,model,prev_stime,prev_etime,periods=1,frequency='60s',old_model_loc=None,test=True) -------------------------------------------------------------------------------- /packages/datasources/async-test-concurrent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | async def print_hello(msg): 4 | a = 0 5 | while True: 6 | print(f"{msg} {a}") 7 | a += 1 8 | if a>5: 9 | b=2/0 10 | await asyncio.sleep(1) 11 | 12 | async def test(): 13 | #multiple tasks running con currently 14 | a = asyncio.gather(print_hello("hi"), print_hello("hello")) 15 | #a is a future object. a.done when the task is ready, refer this https://docs.python.org/3/library/asyncio-future.html#asyncio.ensure_future 16 | while not a.done(): 17 | await asyncio.sleep(1) 18 | try: 19 | result = a.result() 20 | except asyncio.CancelledError: 21 | print("Someone cancelled") 22 | except Exception as e: 23 | print(f"Some error: {e}") 24 | 25 | asyncio.run(test()) -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: CI 4 | 5 | on: 6 | push: 7 | branches: 8 | - 'main' 9 | tags: 10 | - 'v*' 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - 18 | name: Checkout 19 | uses: actions/checkout@v2 20 | - 21 | name: Login to Docker Hub 22 | uses: docker/login-action@v1 23 | with: 24 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 25 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 26 | - 27 | name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v1 29 | - 30 | name: Build and push 31 | uses: docker/build-push-action@v2 32 | with: 33 | context: . 34 | file: ./Dockerfile 35 | push: true 36 | tags: ${{ secrets.DOCKER_HUB_USERNAME }}/xforecast:latest 37 | cache-from: type=registry,ref=${{ secrets.DOCKER_HUB_USERNAME }}/xforecast:buildcache 38 | cache-to: type=registry,ref=${{ secrets.DOCKER_HUB_USERNAME }}/xforecast:buildcache,mode=max 39 | -------------------------------------------------------------------------------- /test/units/test_influx.py: -------------------------------------------------------------------------------- 1 | from packages.datasources.influx import * 2 | 3 | with open('test/mock/mockdata.yaml') as f: 4 | mockdata = yaml.load(f, Loader=SafeLoader) 5 | 6 | def test_influx_getdata(): 7 | data_store = {'url':'localhost','port':8086,'user':'admin','pass':'admin','db_name':'telegraf','measurement':'cpu' } 8 | start_time = '2022-09-12 08:53:00' 9 | end_time = '2022-09-12 08:54:00' 10 | prev_stime = '2022-09-12 08:50:00' 11 | prev_etime = '2022-09-12 08:51:00' 12 | prom_query = 'SELECT mean("usage_idle") *-1 +100 FROM "autogen"."cpu" WHERE ("host" = '+"'ip-172-31-31-81') AND time >= '2022-09-12 08:53:00' AND time <= '2022-09-12 08:54:00' GROUP BY time(10s)'" 13 | r = get_data_from_influxdb(data_store,start_time,end_time,prev_stime,prev_etime,prom_query,test=True) 14 | assert r == mockdata['influx_results'][1]['values']['value'] 15 | 16 | def test_influx_writedata(): 17 | val = 500 18 | tim = 1650000 19 | write_name = 'mockname' 20 | data_store = {'url':'localhost','port':8086,'user':'admin','pass':'admin','db_name':'telegraf','measurement':'cpu' } 21 | r = write_data_to_influxdb(val,tim,write_name,data_store,test=True) 22 | -------------------------------------------------------------------------------- /test/units/test_prometheus.py: -------------------------------------------------------------------------------- 1 | from packages.datasources.prometheus import * 2 | 3 | 4 | with open('test/mock/mockdata.yaml') as f: 5 | mockdata = yaml.load(f, Loader=SafeLoader) 6 | 7 | def test_prom(): 8 | query = 'http://192.168.1.9:9090/api/v1/query_range?query=100 - ((node_memory_MemAvailable_bytes{instance="node-exporter:9100"} * 100) / node_memory_MemTotal_bytes{instance="node-exporter:9100"})&start=1662978840&end=1662978900&step=15s' 9 | r = prometheus(query,test=True) 10 | assert r == mockdata['prom_results'][1]['values']['value'] 11 | def test_getdata(): 12 | prom_query = '100 - ((node_memory_MemAvailable_bytes{instance="node-exporter:9100"} * 100) / node_memory_MemTotal_bytes{instance="node-exporter:9100"})' 13 | start_time = 1662978840 14 | end_time = 1662978900 15 | url = 'http://localhost:9090' 16 | r=get_data_from_prometheus(prom_query,start_time,end_time,url,test=True) 17 | assert r == mockdata['getdata_results'][1]['data_points']['data_point'] 18 | def test_writedata(): 19 | val = 5000 20 | tim = datetime.datetime(2023, 9, 12, 8, 53) 21 | write_name = "name" 22 | prom_url = "localhost:9090" 23 | write_to_prometheus(val,tim,write_name,prom_url,test=True) -------------------------------------------------------------------------------- /xforecast-helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: xforecast-helm 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: Test 4 | 5 | on: 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - 16 | name: checkout 17 | uses: actions/checkout@v3 18 | - 19 | name: Set up Python 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: '3.7' 23 | - 24 | name: Install dependencies 25 | run: | 26 | ls 27 | python -m pip install --upgrade pip 28 | pip install -r ./requirements.txt 29 | - 30 | name: Test with pytest 31 | run: | 32 | pip install pytest 33 | pip install pytest-asyncio 34 | pip install pytest-cov 35 | python3.7 -m pytest test/ --cov --cov-report xml 36 | 37 | - uses: codecov/codecov-action@v3 38 | with: 39 | token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos 40 | files: ./coverage.xml # optional 41 | flags: unittests # optional 42 | name: codecov-umbrella # optional 43 | fail_ci_if_error: true # optional (default = false) 44 | verbose: true # optional (default = false) 45 | 46 | 47 | -------------------------------------------------------------------------------- /demo/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | # list of datasources that should be deleted from the database 5 | deleteDatasources: 6 | - name: Prometheus 7 | orgId: 1 8 | 9 | # list of datasources to insert/update depending 10 | # whats available in the database 11 | datasources: 12 | # name of the datasource. Required 13 | - name: Prometheus 14 | # datasource type. Required 15 | type: prometheus 16 | # access mode. direct or proxy. Required 17 | access: proxy 18 | # org id. will default to orgId 1 if not specified 19 | orgId: 1 20 | # url 21 | url: http://prometheus:9090 22 | # database password, if used 23 | password: 24 | # database user, if used 25 | user: 26 | # database name, if used 27 | database: 28 | # enable/disable basic auth 29 | basicAuth: false 30 | # basic auth username, if used 31 | basicAuthUser: 32 | # basic auth password, if used 33 | basicAuthPassword: 34 | # enable/disable with credentials headers 35 | withCredentials: 36 | # mark as default datasource. Max one per org 37 | isDefault: true 38 | # fields that will be converted to json and stored in json_data 39 | jsonData: 40 | graphiteVersion: "1.1" 41 | tlsAuth: false 42 | tlsAuthWithCACert: false 43 | # json object of data that will be encrypted. 44 | secureJsonData: 45 | tlsCACert: "..." 46 | tlsClientCert: "..." 47 | tlsClientKey: "..." 48 | version: 1 49 | # allow users to edit datasources from the UI. 50 | editable: true -------------------------------------------------------------------------------- /design.md: -------------------------------------------------------------------------------- 1 | # xforecast 2 | 3 | ## Overview 4 | xforecast helps to predict the data points for a given time period of a metric. It learns from the past data points of that metric. 5 | We will be using pre-trained models for this purpose. 6 | 7 | ## Architecture 8 | 9 | ![Alt text](images/forecaster.PNG?raw=true "Architecture diagram of kube-forecast") 10 | 11 | xforecast has the following components, 12 | 13 | - Datastore 14 | - Visualizer 15 | - Forecaster 16 | 17 | ### Datastore 18 | We will be using prometheus as the supported datastore initially since the majority of the engineers uses this tool for collecting metrics from k8s cluster. Datastore will be used to query the datapoints of the metrics to be predicted by the forecaster. Datastore will be used by the forecaster to write the forecasted data points of that metrics. 19 | 20 | ### Visualizer 21 | We use Grafana to plot the graph against the actual data points and forecasted data points of the metrics. This can be the same grafana that the engineers already have. Grafana can be used to set alerts to remediate the issue proactively with automated or manual means. 22 | 23 | ### Forecaster 24 | Forecaster is an always-running application written Python. It reads the configurations such as the datastore url, metric name, training data hrs etc. from the config file. This config can be loaded as a configmap. Once the training is completed, it will start predicting the data points for x period in every y mins. Here x and y are loaded from the configuration. the predicted data points will be written back to the datastore. 25 | 26 | #### ML Model 27 | We need to consider models which can be trained with multi-dimensional data(multiple metrics) 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /packages/helper/functions.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from prophet import Prophet 3 | from prophet.serialize import model_to_json, model_from_json 4 | 5 | def stan_init(m): 6 | """Retrieve parameters from a trained model. 7 | 8 | Retrieve parameters from a trained model in the format 9 | used to initialize a new Stan model. 10 | 11 | Parameters 12 | ---------- 13 | m: A trained model of the Prophet class. 14 | 15 | Returns 16 | ------- 17 | A Dictionary containing retrieved parameters of m. 18 | 19 | """ 20 | res = {} 21 | for pname in ['k', 'm', 'sigma_obs']: 22 | res[pname] = m.params[pname][0][0] 23 | for pname in ['delta', 'beta']: 24 | res[pname] = m.params[pname][0] 25 | return res 26 | 27 | def fit_and_predict(df, periods=1000,frequency='60s',old_model_loc=None,new_model_loc='./serialized_model.json'): 28 | response = {} 29 | old_model = None 30 | model = None 31 | new_model_loc = new_model_loc 32 | try: 33 | if old_model_loc != None: 34 | with open(old_model_loc, 'r') as fin: 35 | old_model = model_from_json(fin.read()) # Load model 36 | print(type(old_model)) 37 | model = Prophet(seasonality_mode='multiplicative').fit(df,init=stan_init(old_model)) 38 | else: 39 | model = Prophet(seasonality_mode='multiplicative').fit(df) 40 | with open(new_model_loc, 'w') as fout: 41 | fout.write(model_to_json(model)) # Save model 42 | future_df = model.make_future_dataframe(periods=periods, freq=frequency) 43 | fcst = model.predict(future_df) 44 | fcst = fcst[-(periods):] 45 | response['status'] = 'success' 46 | response['model_location'] = new_model_loc 47 | response['yhat'] = fcst['yhat'] 48 | response['yhat_lower'] = fcst['yhat_lower'] 49 | response['yhat_upper'] = fcst['yhat_upper'] 50 | response['ds'] = fcst['ds'] 51 | except Exception as e: 52 | print(e) 53 | response['status'] = 'failure' 54 | return(response) 55 | 56 | -------------------------------------------------------------------------------- /xforecast-helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "xforecast-helm.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "xforecast-helm.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "xforecast-helm.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "xforecast-helm.labels" -}} 37 | helm.sh/chart: {{ include "xforecast-helm.chart" . }} 38 | {{ include "xforecast-helm.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "xforecast-helm.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "xforecast-helm.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "xforecast-helm.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "xforecast-helm.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /test/units/prometheus.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Prometheus Team 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | syntax = "proto3"; 15 | package prometheus; 16 | 17 | option go_package = "prompb"; 18 | 19 | message WriteRequest { 20 | repeated prometheus.TimeSeries timeseries = 1; 21 | } 22 | 23 | message ReadRequest { 24 | repeated Query queries = 1; 25 | } 26 | 27 | message ReadResponse { 28 | // In same order as the request's queries. 29 | repeated QueryResult results = 1; 30 | } 31 | 32 | message Query { 33 | int64 start_timestamp_ms = 1; 34 | int64 end_timestamp_ms = 2; 35 | repeated prometheus.LabelMatcher matchers = 3; 36 | prometheus.ReadHints hints = 4; 37 | } 38 | 39 | message QueryResult { 40 | // Samples within a time series must be ordered by time. 41 | repeated prometheus.TimeSeries timeseries = 1; 42 | } 43 | 44 | message Sample { 45 | double value = 1; 46 | int64 timestamp = 2; 47 | } 48 | 49 | message TimeSeries { 50 | repeated Label labels = 1; 51 | repeated Sample samples = 2; 52 | } 53 | 54 | message Label { 55 | string name = 1; 56 | string value = 2; 57 | } 58 | 59 | message Labels { 60 | repeated Label labels = 1; 61 | } 62 | 63 | // Matcher specifies a rule, which can match or set of labels or not. 64 | message LabelMatcher { 65 | enum Type { 66 | EQ = 0; 67 | NEQ = 1; 68 | RE = 2; 69 | NRE = 3; 70 | } 71 | Type type = 1; 72 | string name = 2; 73 | string value = 3; 74 | } 75 | 76 | message ReadHints { 77 | int64 step_ms = 1; // Query step size in milliseconds. 78 | string func = 2; // String representation of surrounding function or aggregation. 79 | int64 start_ms = 3; // Start time in milliseconds. 80 | int64 end_ms = 4; // End time in milliseconds. 81 | } 82 | -------------------------------------------------------------------------------- /packages/datasources/prometheus.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Prometheus Team 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | syntax = "proto3"; 15 | package prometheus; 16 | 17 | option go_package = "prompb"; 18 | 19 | message WriteRequest { 20 | repeated prometheus.TimeSeries timeseries = 1; 21 | } 22 | 23 | message ReadRequest { 24 | repeated Query queries = 1; 25 | } 26 | 27 | message ReadResponse { 28 | // In same order as the request's queries. 29 | repeated QueryResult results = 1; 30 | } 31 | 32 | message Query { 33 | int64 start_timestamp_ms = 1; 34 | int64 end_timestamp_ms = 2; 35 | repeated prometheus.LabelMatcher matchers = 3; 36 | prometheus.ReadHints hints = 4; 37 | } 38 | 39 | message QueryResult { 40 | // Samples within a time series must be ordered by time. 41 | repeated prometheus.TimeSeries timeseries = 1; 42 | } 43 | 44 | message Sample { 45 | double value = 1; 46 | int64 timestamp = 2; 47 | } 48 | 49 | message TimeSeries { 50 | repeated Label labels = 1; 51 | repeated Sample samples = 2; 52 | } 53 | 54 | message Label { 55 | string name = 1; 56 | string value = 2; 57 | } 58 | 59 | message Labels { 60 | repeated Label labels = 1; 61 | } 62 | 63 | // Matcher specifies a rule, which can match or set of labels or not. 64 | message LabelMatcher { 65 | enum Type { 66 | EQ = 0; 67 | NEQ = 1; 68 | RE = 2; 69 | NRE = 3; 70 | } 71 | Type type = 1; 72 | string name = 2; 73 | string value = 3; 74 | } 75 | 76 | message ReadHints { 77 | int64 step_ms = 1; // Query step size in milliseconds. 78 | string func = 2; // String representation of surrounding function or aggregation. 79 | int64 start_ms = 3; // Start time in milliseconds. 80 | int64 end_ms = 4; // End time in milliseconds. 81 | } 82 | -------------------------------------------------------------------------------- /xforecast-helm/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for xforecast-helm. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: docker.io/xmigrate/xforecast 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "latest" 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | 17 | serviceAccount: 18 | # Specifies whether a service account should be created 19 | create: true 20 | # Annotations to add to the service account 21 | annotations: {} 22 | # The name of the service account to use. 23 | # If not set and create is true, a name is generated using the fullname template 24 | name: "" 25 | 26 | podAnnotations: {} 27 | 28 | podSecurityContext: {} 29 | # fsGroup: 2000 30 | 31 | securityContext: 32 | capabilities: 33 | drop: 34 | - ALL 35 | # readOnlyRootFilesystem: true 36 | # runAsNonRoot: true 37 | # runAsUser: 1000700001 38 | 39 | config: | 40 | prometheus_url: http://prometheus:9000 41 | metrics: 42 | - name: windows_cpu_time_total #metric name in prometheus 43 | start_time: '2022-08-25T06:29:00.000Z' 44 | end_time: '2022-08-25T06:30:00.000Z' 45 | query: avg+by(instance)+(windows_cpu_time_total{mode="idle"}) 46 | training_interval: 1h #amount of data should be used for training 47 | forecast_duration: 5m #How data points should be predicted, here it will predict for 5 mins 48 | forecast_every: 60 #At what interval the app do the predictions 49 | forecast_basedon: 60 #Forecast based on past how many data points 50 | write_back_metric: forecast_cpu_time_new #Where should it write back the metrics 51 | 52 | 53 | service: 54 | enabled: false 55 | type: ClusterIP 56 | port: 80 57 | 58 | resources: {} 59 | # We usually recommend not to specify default resources and to leave this as a conscious 60 | # choice for the user. This also increases chances charts run on environments with little 61 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 62 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 63 | # limits: 64 | # cpu: 100m 65 | # memory: 128Mi 66 | # requests: 67 | # cpu: 100m 68 | # memory: 128Mi 69 | 70 | 71 | nodeSelector: {} 72 | 73 | tolerations: [] 74 | 75 | affinity: {} 76 | -------------------------------------------------------------------------------- /xforecast-helm/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "xforecast-helm.fullname" . }} 5 | labels: 6 | {{- include "xforecast-helm.labels" . | nindent 4 }} 7 | spec: 8 | {{- /* if not .Values.autoscaling.enabled }} 9 | replicas: {{ .Values.replicaCount }} 10 | {{- end */}} 11 | replicas: {{ .Values.replicaCount }} 12 | selector: 13 | matchLabels: 14 | {{- include "xforecast-helm.selectorLabels" . | nindent 6 }} 15 | template: 16 | metadata: 17 | {{- with .Values.podAnnotations }} 18 | annotations: 19 | {{- toYaml . | nindent 8 }} 20 | {{- end }} 21 | labels: 22 | {{- include "xforecast-helm.selectorLabels" . | nindent 8 }} 23 | spec: 24 | {{- with .Values.imagePullSecrets }} 25 | imagePullSecrets: 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | serviceAccountName: {{ include "xforecast-helm.serviceAccountName" . }} 29 | securityContext: 30 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 31 | volumes: 32 | - name: config-volume 33 | configMap: 34 | name: {{ include "xforecast-helm.fullname" . }} 35 | items: 36 | - key: config.yaml 37 | path: config.yaml 38 | containers: 39 | - name: {{ .Chart.Name }} 40 | securityContext: 41 | {{- toYaml .Values.securityContext | nindent 12 }} 42 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 43 | imagePullPolicy: {{ .Values.image.pullPolicy }} 44 | volumeMounts: 45 | - name: config-volume 46 | mountPath: /app/workspace/config.yaml 47 | subPath: config.yaml 48 | ports: 49 | - name: http 50 | containerPort: 80 51 | protocol: TCP 52 | # livenessProbe: 53 | # httpGet: 54 | # path: / 55 | # port: http 56 | # readinessProbe: 57 | # httpGet: 58 | # path: / 59 | # port: http 60 | resources: 61 | {{- toYaml .Values.resources | nindent 12 }} 62 | {{- with .Values.nodeSelector }} 63 | nodeSelector: 64 | {{- toYaml . | nindent 8 }} 65 | {{- end }} 66 | {{- with .Values.affinity }} 67 | affinity: 68 | {{- toYaml . | nindent 8 }} 69 | {{- end }} 70 | {{- with .Values.tolerations }} 71 | tolerations: 72 | {{- toYaml . | nindent 8 }} 73 | {{- end }} 74 | -------------------------------------------------------------------------------- /test/mock/mockdata.yaml: -------------------------------------------------------------------------------- 1 | prom_results: 2 | - results: 3 | result: {"status":"success","data":{"resultType":"matrix","result":[{"metric":{"instance":"node-exporter:9100","job":"node"},"values":[[1662978840,"30.762664683513876"],[1662978855,"30.768725077912023"],[1662978870,"30.7615861387481"],[1662978885,"30.78387606390737"],[1662978900,"30.773450131171586"]]}]}} 4 | - values: 5 | value: [{'metric': {'instance': 'node-exporter:9100', 'job': 'node'}, 'values': [[1662978840, '30.762664683513876'], [1662978855, '30.768725077912023'], [1662978870, '30.7615861387481'], [1662978885, '30.78387606390737'], [1662978900, '30.773450131171586']]}] 6 | value2: dfsfdsfsdf 7 | getdata_results: 8 | - results: 9 | result: [{'metric': {'instance': 'node-exporter:9100', 'job': 'node'}, 'values': [[1662978840, '30.762664683513876'], [1662978855, '30.768725077912023'], [1662978870, '30.7615861387481'], [1662978885, '30.78387606390737'], [1662978900, '30.773450131171586']]}] 10 | - data_points: 11 | data_point: "{'Time': [datetime.datetime(2022, 9, 12, 10, 34), datetime.datetime(2022, 9, 12, 10, 34, 15), datetime.datetime(2022, 9, 12, 10, 34, 30), datetime.datetime(2022, 9, 12, 10, 34, 45), datetime.datetime(2022, 9, 12, 10, 35)], 'y': ['30.762664683513876', '30.768725077912023', '30.7615861387481', '30.78387606390737', '30.773450131171586']}" 12 | 13 | influx_results: 14 | - results: 15 | result: [[{'time': '2022-09-12T08:53:00Z', 'mean': 50.87687511613649}, {'time': '2022-09-12T08:53:10Z', 'mean': 50.57760243944724}, {'time': '2022-09-12T08:53:20Z', 'mean': 50.50485283113489}, {'time': '2022-09-12T08:53:30Z', 'mean': 50.664869957894595}, {'time': '2022-09-12T08:53:40Z', 'mean': 50.527145382764374}, {'time': '2022-09-12T08:53:50Z', 'mean': 50.48463651916247}, {'time': '2022-09-12T08:54:00Z', 'mean': 50.54480966540953}]] 16 | - values: 17 | value: "{'Time': [datetime.datetime(2022, 9, 12, 8, 53), datetime.datetime(2022, 9, 12, 8, 53, 10), datetime.datetime(2022, 9, 12, 8, 53, 20), datetime.datetime(2022, 9, 12, 8, 53, 30), datetime.datetime(2022, 9, 12, 8, 53, 40), datetime.datetime(2022, 9, 12, 8, 53, 50), datetime.datetime(2022, 9, 12, 8, 54)], 'y': [50.87687511613649, 50.57760243944724, 50.50485283113489, 50.664869957894595, 50.527145382764374, 50.48463651916247, 50.54480966540953]}" 18 | 19 | fit_results: 20 | - values: 21 | value: {"Time": [datetime.datetime(2023, 9, 12, 8, 53), datetime.datetime(2022, 9, 12, 8, 53, 10), datetime.datetime(2022, 9, 12, 8, 53, 20), datetime.datetime(2022, 9, 12, 8, 53, 30), datetime.datetime(2022, 9, 12, 8, 53, 40), datetime.datetime(2022, 9, 12, 8, 53, 50), datetime.datetime(2022, 9, 12, 8, 54)], "y": [50.87687511613649, 50.57760243944724, 50.50485283113489, 50.664869957894595, 50.527145382764374, 50.48463651916247, 50.54480966540953]} 22 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | 2 | metrics: 3 | - name: cpu_usage #metric name in prometheus 4 | data_store : 5 | name : influxdb 6 | url: 192.168.1.9 7 | port: 8086 8 | user : admin 9 | pass : admin 10 | db_name : telegraf 11 | measurement : cpu 12 | start_time: '2022-09-14 11:19:00' 13 | end_time: '2022-09-14 11:20:00' 14 | query: SELECT mean("usage_idle") *-1 +100 FROM "autogen"."cpu" WHERE ("host" = 'ip-172-31-31-81') AND time >= '2022-09-14 11:19:00' AND time <= '2022-09-14 11:20:00' GROUP BY time(10s) 15 | training_interval: 1h #amount of data should be used for training 16 | forecast_duration: 5m #How data points should be predicted, here it will predict for 5 mins 17 | forecast_every: 60 #At what interval the app do the predictions 18 | forecast_basedon: 60 #Forecast based on past how many data points 19 | write_back_metric: forecast_cpu_use #Where should it write back the metrics 20 | models : 21 | model_name: prophet 22 | hyperparameters: 23 | changepoint_prior_scale : 0.05 #determines the flexibility of the trend changes 24 | seasonality_prior_scale : 10 #determines the flexibility of the seasonality changes 25 | holidays_prior_scale : 10 #determines the flexibiity to fit the holidays 26 | changepoint_range : 0.8 #proportion of the history where the trend changes are applied 27 | seasonality_mode : additive #whether the mode of seasonality is additive or multiplicative 28 | - name: memory_usage #metric name in prometheus 29 | data_store : 30 | name : prometheus 31 | url: http://192.168.1.9:9090 32 | start_time: '2022-09-14 11:19:00' 33 | end_time: '2022-09-14 11:20:00' 34 | query: 100 - ((node_memory_MemAvailable_bytes{instance="node-exporter:9100"} * 100) / node_memory_MemTotal_bytes{instance="node-exporter:9100"}) 35 | training_interval: 1h #amount of data should be used for training 36 | forecast_duration: 5m #How data points should be predicted, here it will predict for 5 mins 37 | forecast_every: 60 #At what interval the app do the predictions 38 | forecast_basedon: 60 #Forecast based on past how many data points 39 | write_back_metric: forecast_mem_usage #Where should it write back the metrics 40 | models : 41 | model_name: prophet 42 | hyperparameters: 43 | changepoint_prior_scale : 0.05 #determines the flexibility of the trend changes 44 | seasonality_prior_scale : 10 #determines the flexibility of the seasonality changes 45 | holidays_prior_scale : 10 #determines the flexibiity to fit the holidays 46 | changepoint_range : 0.8 #proportion of the history where the trend changes are applied 47 | seasonality_mode : additive #whether the mode of seasonality is additive or multiplicative 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /packages/helper/test_functions.py: -------------------------------------------------------------------------------- 1 | import time 2 | import functions 3 | import yaml 4 | import prometh 5 | import datetime 6 | import pandas as pd 7 | from yaml.loader import SafeLoader 8 | from collections import defaultdict 9 | from influxdb import InfluxDBClient 10 | from dateutil import parser 11 | 12 | 13 | # with open('./config.yaml') as f: 14 | # data = yaml.load(f, Loader=SafeLoader) 15 | # #print(data) 16 | 17 | # metric_dict = defaultdict(list) 18 | # for elements in data['metrics']: 19 | # for element in elements: 20 | # metric_dict[element].append(elements[element]) 21 | 22 | # metric_name = metric_dict['name'] 23 | # start_time = metric_dict['start_time'] 24 | # end_time = metric_dict['end_time'] 25 | # url = data['prometheus_url'] 26 | # prom_query = metric_dict['query'] 27 | # forecast_every = metric_dict['forecast_every'] 28 | 29 | # #print(type(forecast_every[0])) 30 | 31 | client = InfluxDBClient('192.168.1.9', 8086, 'admin', 'admin', 'telegraf') 32 | #print(client.get_list_database()) 33 | #print(client.get_list_measurements()) 34 | metric = 'usage_user' 35 | measurement = 'cpu' 36 | fieldname= "" 37 | predicted_field = "forecast_value" 38 | query = 'SELECT "usage_user" FROM "autogen"."cpu" WHERE time >= \'2022-09-07 6:30:00\'' 39 | value = client.query(query) 40 | #print(value) 41 | values = list(value.get_points(measurement=measurement)) 42 | #print(values) 43 | 44 | 45 | data_points = {} 46 | data_time = [] 47 | data_value=[] 48 | 49 | for elements in values: 50 | yourdate = parser.parse(elements['time']) 51 | yourdate = yourdate.replace(tzinfo=None) 52 | data_time.append(yourdate) 53 | data_value.append(elements[metric]) 54 | data_points['Time'] = data_time 55 | data_points['y'] = data_value 56 | 57 | 58 | 59 | dt=data_points 60 | print(dt) 61 | df={} 62 | df['Time'] = pd.to_datetime(dt['Time'], format='%d/%m/%y %H:%M:%S') 63 | df['ds'] = df['Time'] 64 | df['y'] = dt['y'] 65 | 66 | df=pd.DataFrame(df) 67 | 68 | 69 | #train_df = df[10:] 70 | 71 | response = functions.fit_and_predict(df,periods=10,frequency='60s',old_model_loc=None) 72 | print(response['ds'] ,response['yhat']) 73 | 74 | data_to_influx_yhat = response['yhat'].to_dict() 75 | data_to_influx_tim = response['ds'].to_dict() 76 | json_payload=[] 77 | data_list = [] 78 | for elements in data_to_influx_tim: 79 | print( data_to_influx_yhat[elements]) 80 | # data = { 81 | # "measurement" : "cpu", 82 | # "time" : data_to_influx_tim[elements], 83 | # "fields" : { 84 | # "forecast_value" : data_to_influx_yhat[elements] 85 | # } 86 | # } 87 | data = {} 88 | data['measurement'] = measurement 89 | data['time'] = data_to_influx_tim[elements] 90 | data['fields'] = {predicted_field:data_to_influx_yhat[elements]} 91 | json_payload.append(data) 92 | 93 | client.write_points(json_payload) -------------------------------------------------------------------------------- /demo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | networks: 4 | monitoring: 5 | driver: bridge 6 | 7 | volumes: 8 | prometheus_data: 9 | driver: local 10 | grafana_data: 11 | driver: local 12 | influxdb-storage: 13 | driver: local 14 | chronograf-storage: 15 | driver: local 16 | 17 | services: 18 | node-exporter: 19 | image: prom/node-exporter:latest 20 | container_name: node-exporter 21 | restart: unless-stopped 22 | volumes: 23 | - /proc:/host/proc:ro 24 | - /sys:/host/sys:ro 25 | - /:/rootfs:ro 26 | command: 27 | - '--path.procfs=/host/proc' 28 | - '--path.rootfs=/rootfs' 29 | - '--path.sysfs=/host/sys' 30 | - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' 31 | expose: 32 | - 9100 33 | networks: 34 | - monitoring 35 | 36 | prometheus: 37 | image: prom/prometheus:latest 38 | container_name: prometheus 39 | restart: unless-stopped 40 | volumes: 41 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 42 | - prometheus_data:/prometheus 43 | command: 44 | - '--config.file=/etc/prometheus/prometheus.yml' 45 | - '--storage.tsdb.path=/prometheus' 46 | - '--web.console.libraries=/etc/prometheus/console_libraries' 47 | - '--web.console.templates=/etc/prometheus/consoles' 48 | - '--web.enable-lifecycle' 49 | - '--enable-feature=remote-write-receiver' 50 | ports: 51 | - 9090:9090 52 | expose: 53 | - 9090 54 | networks: 55 | - monitoring 56 | influxdb: 57 | image: influxdb:1.8 58 | ports: 59 | - '8086:8086' 60 | expose: 61 | - 8086 62 | volumes: 63 | - influxdb-storage:/var/lib/influxdb 64 | env_file: 65 | - ./influxdb/.env 66 | environment: 67 | - INFLUXDB_DB=db0 68 | - INFLUXDB_ADMIN_USER=${INFLUXDB_USERNAME} 69 | - INFLUXDB_ADMIN_PASSWORD=${INFLUXDB_PASSWORD} 70 | networks: 71 | - monitoring 72 | chronograf: 73 | image: chronograf:latest 74 | ports: 75 | - '8888:8888' 76 | expose: 77 | - 8888 78 | volumes: 79 | - chronograf-storage:/var/lib/chronograf 80 | depends_on: 81 | - influxdb 82 | env_file: 83 | - ./influxdb/.env 84 | environment: 85 | - INFLUXDB_URL=http://influxdb:8086 86 | - INFLUXDB_USERNAME=${INFLUXDB_USERNAME} 87 | - INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD} 88 | networks: 89 | - monitoring 90 | grafana: 91 | image: grafana/grafana 92 | user: "472" 93 | depends_on: 94 | - prometheus 95 | ports: 96 | - 3000:3000 97 | volumes: 98 | - grafana_data:/var/lib/grafana 99 | - ./grafana/provisioning/:/etc/grafana/provisioning/ 100 | env_file: 101 | - ./grafana/config.monitoring 102 | networks: 103 | - monitoring 104 | restart: always -------------------------------------------------------------------------------- /packages/models/memory_usage.json: -------------------------------------------------------------------------------- 1 | {"growth": "linear", "n_changepoints": 3, "specified_changepoints": false, "changepoint_range": 0.8, "yearly_seasonality": "auto", "weekly_seasonality": "auto", "daily_seasonality": "auto", "seasonality_mode": "additive", "seasonality_prior_scale": 10.0, "changepoint_prior_scale": 0.05, "holidays_prior_scale": 10.0, "mcmc_samples": 0, "interval_width": 0.8, "uncertainty_samples": 1000, "y_scale": 28.150069745894854, "logistic_floor": false, "country_holidays": null, "component_modes": {"additive": ["additive_terms", "extra_regressors_additive", "holidays"], "multiplicative": ["multiplicative_terms", "extra_regressors_multiplicative"]}, "changepoints": "{\"name\":\"ds\",\"index\":[1,2,3],\"data\":[\"2022-09-14T11:26:23.000Z\",\"2022-09-14T11:26:38.000Z\",\"2022-09-14T11:26:53.000Z\"]}", "history_dates": "{\"name\":\"ds\",\"index\":[0,1,2,3,4],\"data\":[\"2022-09-14T11:26:08.000Z\",\"2022-09-14T11:26:23.000Z\",\"2022-09-14T11:26:38.000Z\",\"2022-09-14T11:26:53.000Z\",\"2022-09-14T11:27:08.000Z\"]}", "train_holiday_names": null, "start": 1663154768.0, "t_scale": 60.0, "holidays": null, "history": "{\"schema\":{\"fields\":[{\"name\":\"Time\",\"type\":\"datetime\"},{\"name\":\"ds\",\"type\":\"datetime\"},{\"name\":\"y\",\"type\":\"number\"},{\"name\":\"floor\",\"type\":\"integer\"},{\"name\":\"t\",\"type\":\"number\"},{\"name\":\"y_scaled\",\"type\":\"number\"}],\"pandas_version\":\"0.20.0\"},\"data\":[{\"Time\":\"2022-09-14T11:26:08.000Z\",\"ds\":\"2022-09-14T11:26:08.000Z\",\"y\":28.1500697459,\"floor\":0,\"t\":0.0,\"y_scaled\":1.0},{\"Time\":\"2022-09-14T11:26:23.000Z\",\"ds\":\"2022-09-14T11:26:23.000Z\",\"y\":28.1259308869,\"floor\":0,\"t\":0.25,\"y_scaled\":0.9991424938},{\"Time\":\"2022-09-14T11:26:38.000Z\",\"ds\":\"2022-09-14T11:26:38.000Z\",\"y\":28.1350728377,\"floor\":0,\"t\":0.5,\"y_scaled\":0.9994672515},{\"Time\":\"2022-09-14T11:26:53.000Z\",\"ds\":\"2022-09-14T11:26:53.000Z\",\"y\":28.1412359507,\"floor\":0,\"t\":0.75,\"y_scaled\":0.9996861892},{\"Time\":\"2022-09-14T11:27:08.000Z\",\"ds\":\"2022-09-14T11:27:08.000Z\",\"y\":28.1108826194,\"floor\":0,\"t\":1.0,\"y_scaled\":0.9986079208}]}", "train_component_cols": "{\"schema\":{\"fields\":[{\"name\":\"additive_terms\",\"type\":\"integer\"},{\"name\":\"multiplicative_terms\",\"type\":\"integer\"}],\"pandas_version\":\"0.20.0\"},\"data\":[{\"additive_terms\":0,\"multiplicative_terms\":0}]}", "changepoints_t": [0.25, 0.5, 0.75], "seasonalities": [[], {}], "extra_regressors": [[], {}], "fit_kwargs": {"init": {"k": 0.0012605883960293172, "m": 0.9995810183076782, "sigma_obs": 7.99521446300743e-17, "delta": [-0.000845250022771138, -0.0009691228709357443, 4.371982876460762e-05], "beta": [9.035483845920483e-08]}}, "params": {"k": [[-0.00343002475821029]], "m": [[1.0]], "delta": [[0.004729055411319649, -0.00042327965101194095, -0.005188824687423449]], "sigma_obs": [[8.758613940868977e-17]], "beta": [[8.124713741762564e-10]], "trend": [[1.0, 0.9991424938104474, 0.9994672514737247, 0.999686189224249, 0.9986079208029175]]}, "__prophet_version": "1.0"} -------------------------------------------------------------------------------- /packages/helper/prometh.py: -------------------------------------------------------------------------------- 1 | from time import strftime 2 | import yaml 3 | import requests 4 | import json 5 | import datetime 6 | import asyncio 7 | from yaml.loader import SafeLoader 8 | from collections import defaultdict 9 | 10 | 11 | 12 | def prometheus(query): 13 | result = json.loads(requests.get(query).text) 14 | value = result['data']['result'] 15 | return value 16 | 17 | def get_data_from_prometheus(metric_name,db_query, start_time, end_time, url): 18 | data_points = {} 19 | data_time=[] 20 | data_value=[] 21 | for i in range(len(metric_name)): 22 | if metric_name[i]: 23 | query = url+'/api/v1/query_range?query='+db_query[i]+'&start='+start_time[i]+'&end='+end_time[i]+'&step=15s' 24 | #print(query) 25 | result = prometheus(query) 26 | for elements in result: 27 | values = elements['values'] 28 | for element in values: 29 | #print(element[0]) 30 | date_time=datetime.datetime.fromtimestamp(element[0]) 31 | #date_time = date_time,strftime('%y/%m/%d %H:%S') 32 | #print(date_time) 33 | data_time.append(date_time) 34 | data_value.append(element[1]) 35 | data_points['Time'] = data_time 36 | data_points['y'] = data_value 37 | return data_points 38 | 39 | def train_model(): 40 | pass 41 | 42 | async def predict_datapoints(forecast_every,metric_name): 43 | while True: 44 | #TODO If trained model available, else break 45 | #Get the past data points and pass that to the model for prediction 46 | #Get the predictions and store that to prometheus data store 47 | #Sleep for the duration mentioned in the config file 48 | #Also add a mechanism to check if the config file is updated, if updated then break so that it will read from config again 49 | pass 50 | 51 | def write_to_prometheus(): 52 | pass 53 | 54 | 55 | if __name__ == "__main__": 56 | #TODO read the config file and assign to the variables 57 | #Check datastore type, if prometheus get it from get_data_from_promethues(metric_name, start_time, end_time, url) 58 | #Check if there's a trained model available for the given metric, else do the training also check the retrain flag in the config of each metric 59 | #Give predictions back to the prometheus if trained model is available locally 60 | with open('./config.yaml') as f: 61 | data = yaml.load(f, Loader=SafeLoader) 62 | #print(data) 63 | 64 | metric_dict = defaultdict(list) 65 | for elements in data['metrics']: 66 | for element in elements: 67 | metric_dict[element].append(elements[element]) 68 | 69 | metric_name = metric_dict['name'] 70 | start_time = metric_dict['start_time'] 71 | end_time = metric_dict['end_time'] 72 | url = data['prometheus_url'] 73 | db_query = metric_dict['query'] 74 | forecast_every = metric_dict['forecast_every'] 75 | 76 | data_for_training = get_data_from_prometheus(metric_name,db_query, start_time, end_time, url) 77 | print(data_for_training) 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /packages/models/cpu_usage.json: -------------------------------------------------------------------------------- 1 | {"growth": "linear", "n_changepoints": 4, "specified_changepoints": false, "changepoint_range": 0.8, "yearly_seasonality": "auto", "weekly_seasonality": "auto", "daily_seasonality": "auto", "seasonality_mode": "additive", "seasonality_prior_scale": 10.0, "changepoint_prior_scale": 0.05, "holidays_prior_scale": 10.0, "mcmc_samples": 0, "interval_width": 0.8, "uncertainty_samples": 1000, "y_scale": 50.9655859069886, "logistic_floor": false, "country_holidays": null, "component_modes": {"additive": ["additive_terms", "extra_regressors_additive", "holidays"], "multiplicative": ["multiplicative_terms", "extra_regressors_multiplicative"]}, "changepoints": "{\"name\":\"ds\",\"index\":[1,2,3,4],\"data\":[\"2022-09-14T11:26:10.000Z\",\"2022-09-14T11:26:20.000Z\",\"2022-09-14T11:26:30.000Z\",\"2022-09-14T11:26:40.000Z\"]}", "history_dates": "{\"name\":\"ds\",\"index\":[0,1,2,3,4,5,6],\"data\":[\"2022-09-14T11:26:00.000Z\",\"2022-09-14T11:26:10.000Z\",\"2022-09-14T11:26:20.000Z\",\"2022-09-14T11:26:30.000Z\",\"2022-09-14T11:26:40.000Z\",\"2022-09-14T11:26:50.000Z\",\"2022-09-14T11:27:00.000Z\"]}", "train_holiday_names": null, "start": 1663154760.0, "t_scale": 60.0, "holidays": null, "history": "{\"schema\":{\"fields\":[{\"name\":\"Time\",\"type\":\"datetime\"},{\"name\":\"ds\",\"type\":\"datetime\"},{\"name\":\"y\",\"type\":\"number\"},{\"name\":\"floor\",\"type\":\"integer\"},{\"name\":\"t\",\"type\":\"number\"},{\"name\":\"y_scaled\",\"type\":\"number\"}],\"pandas_version\":\"0.20.0\"},\"data\":[{\"Time\":\"2022-09-14T11:26:00.000Z\",\"ds\":\"2022-09-14T11:26:00.000Z\",\"y\":50.7003276623,\"floor\":0,\"t\":0.0,\"y_scaled\":0.9947953459},{\"Time\":\"2022-09-14T11:26:10.000Z\",\"ds\":\"2022-09-14T11:26:10.000Z\",\"y\":50.5631916023,\"floor\":0,\"t\":0.1666666667,\"y_scaled\":0.9921045879},{\"Time\":\"2022-09-14T11:26:20.000Z\",\"ds\":\"2022-09-14T11:26:20.000Z\",\"y\":50.5915597884,\"floor\":0,\"t\":0.3333333333,\"y_scaled\":0.9926612024},{\"Time\":\"2022-09-14T11:26:30.000Z\",\"ds\":\"2022-09-14T11:26:30.000Z\",\"y\":50.3689630853,\"floor\":0,\"t\":0.5,\"y_scaled\":0.988293614},{\"Time\":\"2022-09-14T11:26:40.000Z\",\"ds\":\"2022-09-14T11:26:40.000Z\",\"y\":50.4346413594,\"floor\":0,\"t\":0.6666666667,\"y_scaled\":0.9895822929},{\"Time\":\"2022-09-14T11:26:50.000Z\",\"ds\":\"2022-09-14T11:26:50.000Z\",\"y\":50.6525134859,\"floor\":0,\"t\":0.8333333333,\"y_scaled\":0.99385718},{\"Time\":\"2022-09-14T11:27:00.000Z\",\"ds\":\"2022-09-14T11:27:00.000Z\",\"y\":50.965585907,\"floor\":0,\"t\":1.0,\"y_scaled\":1.0}]}", "train_component_cols": "{\"schema\":{\"fields\":[{\"name\":\"additive_terms\",\"type\":\"integer\"},{\"name\":\"multiplicative_terms\",\"type\":\"integer\"}],\"pandas_version\":\"0.20.0\"},\"data\":[{\"additive_terms\":0,\"multiplicative_terms\":0}]}", "changepoints_t": [0.16666666666666666, 0.3333333333333333, 0.5, 0.6666666666666666], "seasonalities": [[], {}], "extra_regressors": [[], {}], "fit_kwargs": {"init": {"k": -0.03197894987148837, "m": 0.9999932643599309, "sigma_obs": 0.00023677477151907078, "delta": [0.04680709045362657, -0.039776703399876594, 0.02162077966692174, 0.02181138091434653], "beta": [1.232671128664879e-14]}}, "params": {"k": [[-0.015904066932294486]], "m": [[0.9947853276155955]], "delta": [[0.018822925752066143, -0.02876366298959767, 0.03161875270402375, 0.025449100617959092]], "sigma_obs": [[0.0002890089234443943]], "beta": [[-2.8738734413509894e-15]], "trend": [[0.9947853276155955, 0.9921346497935464, 0.9926211262635083, 0.9883136589018706, 0.9892759836575703, 0.9944798251829298, 0.9996836667082891]]}, "__prophet_version": "1.0"} -------------------------------------------------------------------------------- /packages/datasources/influx.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | from influxdb import InfluxDBClient 3 | from datetime import datetime 4 | from dateutil import parser 5 | from yaml.loader import SafeLoader 6 | from packages.datasources.logger import * 7 | 8 | with open('test/mock/mockdata.yaml') as f: 9 | mockdata = yaml.load(f, Loader=SafeLoader) 10 | 11 | 12 | def get_data_from_influxdb(data_store,start_time,end_time,prev_stime,prev_etime,db_query,test=False): 13 | """Get the required data points by querying influxdb. 14 | 15 | Parameters 16 | ---------- 17 | data_store: Data store details 18 | start_time: Start time for the database query 19 | end_time: End time for the database query 20 | prev_stime: Start time in the configuration file 21 | prev_etime: End time in configuration file 22 | db_query : influxdb Query 23 | test: whether to run the test or not 24 | 25 | Returns 26 | ------- 27 | 28 | data_points: A dictionary of lists containing time and values that are returned from influxdb. 29 | 30 | """ 31 | 32 | url = data_store['url'] 33 | port = data_store['port'] 34 | username = data_store['user'] 35 | password = data_store['pass'] 36 | db_name = data_store['db_name'] 37 | if test == False: 38 | client = InfluxDBClient(url, port, username, password, db_name) 39 | db_query = db_query.replace(prev_stime,start_time) 40 | db_query = db_query.replace(prev_etime,end_time) 41 | query=db_query 42 | if "GROUP BY" not in query: 43 | logger("A GROUP BY time($_interval) clause is required for proper execution","warning") 44 | if test == False: 45 | value = client.query(query) 46 | value = list(value) 47 | else: 48 | value = mockdata['influx_results'][0]['results']['result'] 49 | 50 | if value: 51 | logger("Fetching data from influxdb - Success","info") 52 | else: 53 | logger("Fetching data from influxdb - Failed","error") 54 | data_points = {} 55 | data_time = [] 56 | data_value=[] 57 | 58 | for elements in value[0]: 59 | k=elements.keys() 60 | k=list(k) 61 | datetime_time = parser.parse(elements[k[0]]) 62 | datetime_time = datetime_time.replace(tzinfo=None) 63 | data_time.append(datetime_time) 64 | data_value.append(elements[k[1]]) 65 | data_points['Time'] = data_time 66 | data_points['y'] = data_value 67 | if test == True: 68 | return str(data_points) 69 | else: 70 | return data_points 71 | 72 | def write_data_to_influxdb(val,tim,write_name,data_store,test=False): 73 | """Write the predicted data to influxdb. 74 | 75 | Parameters 76 | ---------- 77 | val: Value to be written 78 | tim: Which time the value should be written 79 | write_name: Custom metric name to be written 80 | data_store: Data store details 81 | test: whether to run the test or not 82 | 83 | """ 84 | 85 | url = data_store['url'] 86 | port = data_store['port'] 87 | username = data_store['user'] 88 | password = data_store['pass'] 89 | db_name = data_store['db_name'] 90 | measurement = data_store['measurement'] 91 | if test == False: 92 | client = InfluxDBClient(url, port, username, password, db_name) 93 | measurement = measurement 94 | json_payload=[] 95 | data = {} 96 | data['measurement'] = measurement 97 | data['time'] = tim 98 | data['fields'] = {write_name:val} 99 | json_payload.append(data) 100 | if test == False: 101 | if client.write_points(json_payload): 102 | logger("Writing data to influxdb - Success","info") 103 | else: 104 | logger("Writing data to influxdb - Failed","error") 105 | else: 106 | assert measurement == 'cpu' 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /packages/datasources/prometheus.py: -------------------------------------------------------------------------------- 1 | from time import strftime 2 | import requests 3 | import json,yaml 4 | import datetime 5 | import snappy 6 | import calendar 7 | from yaml.loader import SafeLoader 8 | from packages.datasources.logger import * 9 | from packages.datasources.prometheus_pb2 import ( 10 | TimeSeries, 11 | Label, 12 | Labels, 13 | Sample, 14 | WriteRequest 15 | ) 16 | 17 | with open('test/mock/mockdata.yaml') as f: 18 | mockdata = yaml.load(f, Loader=SafeLoader) 19 | 20 | def prometheus(query,test=False): 21 | """Gets data from prometheus using the http api. 22 | 23 | Parameters 24 | ---------- 25 | query: proemtheus query for the metric 26 | 27 | Returns 28 | ------- 29 | values: Result from prometheus 30 | 31 | """ 32 | 33 | 34 | if test == True: 35 | result = mockdata['prom_results'][0]['results']['result'] 36 | else: 37 | result = json.loads(requests.get(query).text) 38 | 39 | value = result['data']['result'] 40 | status=result['status'] 41 | if value: 42 | logger("Fetching data from prometheus - Success","info") 43 | else: 44 | logger("Fetching data from prometheus - Failed","error") 45 | return value 46 | 47 | def get_data_from_prometheus(db_query, start_time, end_time, url,test=False): 48 | """Get the required data points by querying prometheus. 49 | 50 | Parameters 51 | ---------- 52 | db_query: database query 53 | start_time : start time for the database query 54 | end_time : end time for the database query 55 | url : Prometheus url 56 | 57 | Returns 58 | ------- 59 | data_points: A dictionary of lists containing time and values that are returned from prometheus 60 | 61 | """ 62 | 63 | data_points = {} 64 | data_time = [] 65 | data_value=[] 66 | query = url+'/api/v1/query_range?query='+db_query+'&start='+str(start_time)+'&end='+str(end_time)+'&step=15s' 67 | if test == True: 68 | result = mockdata['getdata_results'][0]['results']['result'] 69 | else: 70 | result = prometheus(query) 71 | for elements in result: 72 | values = elements['values'] 73 | for element in values: 74 | date_time=datetime.datetime.utcfromtimestamp(element[0]) 75 | #print(date_time) 76 | data_time.append(date_time) 77 | data_value.append(element[1]) 78 | data_points['Time'] = data_time 79 | data_points['y'] = data_value 80 | if test == True: 81 | return str(data_points) 82 | else: 83 | return data_points 84 | 85 | def dt2ts(dt): 86 | """Converts a datetime object to UTC timestamp 87 | naive datetime will be considered UTC. 88 | """ 89 | return calendar.timegm(dt.utctimetuple()) 90 | 91 | def write_to_prometheus(val,tim,write_name,prom_url,test=False): 92 | """Write the predicted data to prometheus. 93 | 94 | Parameters 95 | ---------- 96 | val: Value to be written 97 | tim: Which time the value should be written 98 | write_name: Custom metric name to be written 99 | 100 | """ 101 | 102 | 103 | 104 | 105 | write_request = WriteRequest() 106 | 107 | series = write_request.timeseries.add() 108 | 109 | # name label always required 110 | label = series.labels.add() 111 | label.name = "__name__" 112 | label.value = write_name 113 | 114 | 115 | sample = series.samples.add() 116 | sample.value = val # your count? 117 | dtl = int(tim.timestamp()) 118 | sample.timestamp = dtl *1000 119 | 120 | #print(sample.timestamp) 121 | 122 | 123 | 124 | uncompressed = write_request.SerializeToString() 125 | compressed = snappy.compress(uncompressed) 126 | 127 | url = prom_url+"/api/v1/write" 128 | headers = { 129 | "Content-Encoding": "snappy", 130 | "Content-Type": "application/x-protobuf", 131 | "X-Prometheus-Remote-Write-Version": "0.1.0", 132 | "User-Agent": "metrics-worker" 133 | } 134 | if test == False: 135 | try: 136 | response = requests.post(url, headers=headers, data=compressed) 137 | #print(response) 138 | response = str(response) 139 | if response == '': 140 | #print("writing failed") 141 | logger("writing data to prometheus - Success","info") 142 | else: 143 | logger("writing data to prometheus - Failed","error") 144 | 145 | except Exception as e: 146 | print(e) 147 | logger(str(e),"error") 148 | else: 149 | assert val == 5000 -------------------------------------------------------------------------------- /demo/grafana/dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "__inputs": [ 3 | { 4 | "name": "DS_PROMETHEUS", 5 | "label": "Prometheus", 6 | "description": "", 7 | "type": "datasource", 8 | "pluginId": "prometheus", 9 | "pluginName": "Prometheus" 10 | } 11 | ], 12 | "__elements": {}, 13 | "__requires": [ 14 | { 15 | "type": "grafana", 16 | "id": "grafana", 17 | "name": "Grafana", 18 | "version": "9.0.6" 19 | }, 20 | { 21 | "type": "datasource", 22 | "id": "prometheus", 23 | "name": "Prometheus", 24 | "version": "1.0.0" 25 | }, 26 | { 27 | "type": "panel", 28 | "id": "timeseries", 29 | "name": "Time series", 30 | "version": "" 31 | } 32 | ], 33 | "annotations": { 34 | "list": [ 35 | { 36 | "builtIn": 1, 37 | "datasource": { 38 | "type": "grafana", 39 | "uid": "-- Grafana --" 40 | }, 41 | "enable": true, 42 | "hide": true, 43 | "iconColor": "rgba(0, 211, 255, 1)", 44 | "name": "Annotations & Alerts", 45 | "target": { 46 | "limit": 100, 47 | "matchAny": false, 48 | "tags": [], 49 | "type": "dashboard" 50 | }, 51 | "type": "dashboard" 52 | } 53 | ] 54 | }, 55 | "editable": true, 56 | "fiscalYearStartMonth": 0, 57 | "graphTooltip": 0, 58 | "id": null, 59 | "links": [], 60 | "liveNow": false, 61 | "panels": [ 62 | { 63 | "datasource": { 64 | "type": "prometheus", 65 | "uid": "${DS_PROMETHEUS}" 66 | }, 67 | "fieldConfig": { 68 | "defaults": { 69 | "color": { 70 | "mode": "palette-classic" 71 | }, 72 | "custom": { 73 | "axisLabel": "", 74 | "axisPlacement": "auto", 75 | "barAlignment": 0, 76 | "drawStyle": "line", 77 | "fillOpacity": 0, 78 | "gradientMode": "none", 79 | "hideFrom": { 80 | "legend": false, 81 | "tooltip": false, 82 | "viz": false 83 | }, 84 | "lineInterpolation": "linear", 85 | "lineWidth": 1, 86 | "pointSize": 5, 87 | "scaleDistribution": { 88 | "type": "linear" 89 | }, 90 | "showPoints": "auto", 91 | "spanNulls": false, 92 | "stacking": { 93 | "group": "A", 94 | "mode": "none" 95 | }, 96 | "thresholdsStyle": { 97 | "mode": "off" 98 | } 99 | }, 100 | "mappings": [], 101 | "thresholds": { 102 | "mode": "absolute", 103 | "steps": [ 104 | { 105 | "color": "green", 106 | "value": null 107 | }, 108 | { 109 | "color": "red", 110 | "value": 80 111 | } 112 | ] 113 | } 114 | }, 115 | "overrides": [] 116 | }, 117 | "gridPos": { 118 | "h": 9, 119 | "w": 12, 120 | "x": 0, 121 | "y": 0 122 | }, 123 | "id": 2, 124 | "options": { 125 | "legend": { 126 | "calcs": [], 127 | "displayMode": "list", 128 | "placement": "bottom" 129 | }, 130 | "tooltip": { 131 | "mode": "single", 132 | "sort": "none" 133 | } 134 | }, 135 | "targets": [ 136 | { 137 | "datasource": { 138 | "type": "prometheus", 139 | "uid": "${DS_PROMETHEUS}" 140 | }, 141 | "editorMode": "builder", 142 | "expr": "sum by(instance) (irate(node_cpu_seconds_total{instance=\"node-exporter:9100\"}[24h]))", 143 | "legendFormat": "__auto", 144 | "range": true, 145 | "refId": "A" 146 | } 147 | ], 148 | "title": "CPU usage", 149 | "type": "timeseries" 150 | } 151 | ], 152 | "schemaVersion": 36, 153 | "style": "dark", 154 | "tags": [], 155 | "templating": { 156 | "list": [] 157 | }, 158 | "time": { 159 | "from": "now-15m", 160 | "to": "now" 161 | }, 162 | "timepicker": {}, 163 | "timezone": "", 164 | "title": "System metrics with prediction", 165 | "uid": "kPVXv9zVz", 166 | "version": 1, 167 | "weekStart": "" 168 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | xforecast 3 |

4 |

5 | Xforecast, a light weight realtime plug and play tool for predictive analytics 6 |

7 |

8 | 9 | Test 10 | 11 | 12 | Coverage 13 | 14 | 15 | Build 16 | 17 |

18 | 19 | --- 20 | 21 | ## Overview 22 | xforecast is realtime predictive tool which could be used for short term data predictions. 23 | xforecast can be easily configured to learn multiple data streams for shorter time period(less than 24 hrs) and predict the data points in future. 24 | 25 | ## How it works? 26 | xforecast is an application written in Python. It can be run in a container and connect to your timeseries database to read the data points and write back the predicted data points. Currently xforecast supports prometheus and influxdb. 27 | 28 | ## How to run? 29 | You can start the application in 2 ways, either from source code or with docker and docker-compose. Running xforecast is easier with docker to get you started. 30 | First we need to edit the configuration. Below is a sample config which predict `mem_usage` of linux server. 31 | 32 | ``` 33 | metrics: 34 | - name: memory_usage #metric name in prometheus 35 | data_store : 36 | name : prometheus 37 | url: http://host.docker.internal:9000 38 | start_time: '2022-09-09T12:49:00.000Z' 39 | end_time: '2022-09-09T12:50:00.000Z' 40 | query: 100 - ((node_memory_MemAvailable_bytes{instance="node-exporter:9100"} * 100) / node_memory_MemTotal_bytes{instance="node-exporter:9100"}) 41 | forecast_every: 60 #At what interval the app do the predictions 42 | forecast_basedon: 60 #Forecast based on past how many data points 43 | write_back_metric: forecast_mem_usage #Where should it write back the metrics 44 | models : 45 | model_name: prophet 46 | hyperparameters: 47 | changepoint_prior_scale : 0.05 #determines the flexibility of the trend changes 48 | seasonality_prior_scale : 10 #determines the flexibility of the seasonality changes 49 | holidays_prior_scale : 10 #determines the flexibiity to fit the holidays 50 | changepoint_range : 0.8 #proportion of the history where the trend changes are applied 51 | seasonality_mode : additive #whether the mode of seasonality is additive or multiplicative 52 | - name: cpu_usage #metric name ininfluxdb 53 | data_store : 54 | name : influxdb 55 | url: 192.168.1.9 56 | port: 8086 57 | user : admin 58 | pass : admin 59 | db_name : telegraf 60 | measurement : cpu 61 | start_time: '2022-09-14 11:19:00' 62 | end_time: '2022-09-14 11:20:00' 63 | query: SELECT mean("usage_idle") *-1 +100 FROM "autogen"."cpu" WHERE ("host" = 'ip-172-31-31-81') AND time >= '2022-09-14 11:19:00' AND time <= '2022-09-14 11:20:00' GROUP BY time(10s) 64 | forecast_every: 60 #At what interval the app do the predictions 65 | forecast_basedon: 60 #Forecast based on past how many data points 66 | write_back_metric: forecast_cpu_use #Where should it write back the metrics 67 | models : 68 | model_name: prophet 69 | hyperparameters: 70 | changepoint_prior_scale : 0.05 #determines the flexibility of the trend changes 71 | seasonality_prior_scale : 10 #determines the flexibility of the seasonality changes 72 | holidays_prior_scale : 10 #determines the flexibiity to fit the holidays 73 | changepoint_range : 0.8 #proportion of the history where the trend changes are applied 74 | seasonality_mode : additive #whether the mode of seasonality is additive or multiplicative 75 | ``` 76 | 77 | Once you have created the above configuration file, you can start the forecaster by running 78 | 79 | ``` 80 | docker-compose up -d 81 | ``` 82 | 83 | As a next step you can create dashboards in grafana or your favourite visualisation tool. The predicted datapoints of the metrics can be found at the valuee of `write_back_metric` configuration. 84 | 85 | If you have multiple metrics to forecast, then you can append the details of those metrics to the configuration. 86 | 87 | ## Feature Roadmap 88 | - Support for multiple forecasting ML models 89 | - Support for auto-ml to automatically decide right model for each metric 90 | - Web dashboard to create the metric predictions and monitor the prediction accuracy and it's health 91 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import asyncio 3 | import time 4 | from datetime import datetime,timedelta,timezone 5 | from packages.datasources.logger import * 6 | from packages.helper.fit_and_predict import * 7 | from os.path import exists 8 | from yaml.loader import SafeLoader 9 | 10 | async def main(): 11 | """Reads the configuration file and creates a metric list""" 12 | 13 | 14 | with open('./config.yaml') as f: 15 | data = yaml.load(f, Loader=SafeLoader) 16 | #print(data) 17 | logger("Reading configuration","info") 18 | 19 | metric_list = [] 20 | for metric in data['metrics']: 21 | metric_list.append(metric) 22 | await forecast(metric_list) 23 | 24 | async def predict_every(metric_name,data_store,start_time,end_time,db_query,write_back_metric,forecast_every,forecast_basedon,model): 25 | """Calls fit_and_predict function at the required intervals 26 | 27 | Parameters 28 | ---------- 29 | metric_name : metric name in database 30 | data_store : dictionary containing details of the database used for query 31 | start_time : start time for the database query 32 | end_time : end time for the database query 33 | db_query : database query 34 | write_back_metric : name of the predicted/written metric 35 | forecast_every: at what interval the app does the predictions 36 | forecast_basedon: forecast based on past how many data points 37 | model: dictionary containing the model name and its hyperparameters for tuning 38 | 39 | """ 40 | if model['model_name'] == 'prophet': 41 | n=0 42 | prev_stime = start_time 43 | prev_etime = end_time 44 | while True: 45 | periods=(forecast_every/60) 46 | periods = int(periods) 47 | #print(periods) 48 | file_exists = exists('./packages/models/'+metric_name+'.json') 49 | if(file_exists): 50 | old_model_loc = './packages/models/'+metric_name+'.json' 51 | if n>0: 52 | #print("2nd") 53 | if data_store['name'] == 'prometheus': 54 | end_time = int(time.time()) 55 | start_time = end_time - (forecast_basedon) 56 | elif data_store['name'] == 'influxdb': 57 | end_time = datetime.utcnow() 58 | end_time = end_time.replace(second=0) 59 | t = int(forecast_basedon/60) 60 | start_time = end_time - timedelta(minutes=t) 61 | start_time = start_time.strftime('%Y-%m-%d %H:%M:%S') 62 | end_time = end_time.strftime('%Y-%m-%d %H:%M:%S') 63 | await fit_and_predict(metric_name,data_store,start_time,end_time,db_query,write_back_metric,model,prev_stime,prev_etime,periods=periods,frequency='60s',old_model_loc=old_model_loc) 64 | else: 65 | #print("og") 66 | if data_store['name'] == 'prometheus': 67 | start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') 68 | end_time = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S') 69 | start_time = int(start_time.replace(tzinfo=timezone.utc).timestamp()) 70 | end_time = int(end_time.replace(tzinfo=timezone.utc).timestamp()) 71 | await fit_and_predict(metric_name,data_store,start_time,end_time,db_query,write_back_metric,model,prev_stime,prev_etime,periods=periods,frequency='60s',old_model_loc=None) 72 | n+=1 73 | await asyncio.sleep((forecast_every)) 74 | 75 | async def forecast(metric_list): 76 | """Creates a tuple of functions and calls them using asyncio.gather. 77 | calls recursively if there is an exception. 78 | 79 | parameters 80 | ---------- 81 | metric_list: A list of dictionaries containing metric details 82 | 83 | """ 84 | while True: 85 | #get status of the async functions and restart failed ones 86 | async_params = [] 87 | for metric in metric_list: 88 | async_params.append(predict_every(metric['name'],metric['data_store'],metric['start_time'],metric['end_time'],metric['query'],metric['write_back_metric'],metric['forecast_every'],metric['forecast_basedon'],metric['models'])) 89 | async_params = tuple(async_params) 90 | g = asyncio.gather(*async_params) 91 | while not g.done(): 92 | await asyncio.sleep(1) 93 | try: 94 | result = g.result() 95 | except asyncio.CancelledError: 96 | print("Someone cancelled") 97 | msg="Someone cancelled" 98 | logger(str(msg),"warning") 99 | break 100 | except Exception as e: 101 | print(f"Some error: {e}") 102 | logger("Some error"+str(e),"error") 103 | break 104 | 105 | await forecast(metric_list) 106 | 107 | 108 | asyncio.run(main()) -------------------------------------------------------------------------------- /packages/helper/fit_and_predict.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import json 3 | from prophet import Prophet 4 | from prophet.serialize import model_to_json, model_from_json 5 | from packages.datasources.prometheus import * 6 | from packages.datasources.influx import * 7 | 8 | 9 | def stan_init(m): 10 | """Retrieve parameters from a trained model. 11 | 12 | Retrieve parameters from a trained model in the format 13 | used to initialize a new Stan model. 14 | 15 | Parameters 16 | ---------- 17 | m: A trained model of the Prophet class. 18 | 19 | Returns 20 | ------- 21 | A Dictionary containing retrieved parameters of m. 22 | 23 | """ 24 | res = {} 25 | for pname in ['k', 'm', 'sigma_obs']: 26 | res[pname] = m.params[pname][0][0] 27 | for pname in ['delta', 'beta']: 28 | res[pname] = m.params[pname][0] 29 | return res 30 | 31 | async def fit_and_predict(metric_name,data_store,start_time,end_time,db_query,write_back_metric,model,prev_stime,prev_etime,periods=1,frequency='60s',old_model_loc=None,new_model_loc='./serialized_model.json',test=False): 32 | """Predicts the values according to the data points recieved 33 | 34 | Parameters 35 | ---------- 36 | metric_name : metric name in prometheus 37 | data_store : dictionary containing details of the database used for query 38 | start_time : start time for the database query 39 | end_time : end time for the database query 40 | db_query : database query 41 | write_back_metric : name of the predicted/written metric 42 | model: dictionary containing the model name and its hyperparameters for tuning 43 | periods : number of data points predicted 44 | frequency : 45 | old_model_location : location of the trained model 46 | new_model_location : location where the newly trained model should be saved 47 | 48 | """ 49 | 50 | response = {} 51 | old_model = None 52 | prophet_model = None 53 | new_model_loc = './packages/models/'+metric_name+'.json' 54 | 55 | if test == False: 56 | if data_store['name'] == 'prometheus': 57 | data_for_training = get_data_from_prometheus(db_query,start_time,end_time,data_store['url']) 58 | elif data_store['name'] == "influxdb": 59 | data_for_training = get_data_from_influxdb(data_store,start_time,end_time,prev_stime,prev_etime,db_query) 60 | else: 61 | data_for_training = {"Time": [datetime(2023, 9, 12, 8, 53), datetime(2022, 9, 12, 8, 53, 10), datetime(2022, 9, 12, 8, 53, 20), datetime(2022, 9, 12, 8, 53, 30), datetime(2022, 9, 12, 8, 53, 40), datetime(2022, 9, 12, 8, 53, 50), datetime(2022, 9, 12, 8, 54)], "y": [50.87687511613649, 50.57760243944724, 50.50485283113489, 50.664869957894595, 50.527145382764374, 50.48463651916247, 50.54480966540953]} 62 | 63 | 64 | df={} 65 | df['Time'] = pd.to_datetime(data_for_training['Time'], format='%d/%m/%y %H:%M:%S') 66 | df['ds'] = df['Time'] 67 | df['y'] = data_for_training['y'] 68 | 69 | params = model["hyperparameters"] 70 | 71 | df=pd.DataFrame(df) 72 | 73 | #print(df.shape) 74 | #print(df.head()) 75 | try: 76 | 77 | if old_model_loc != None: 78 | with open(old_model_loc, 'r') as fin: 79 | old_model = model_from_json(fin.read()) # Load model 80 | 81 | logger("Retraining ML model","info") 82 | prophet_model = Prophet(changepoint_prior_scale=params["changepoint_prior_scale"],seasonality_prior_scale=params["seasonality_prior_scale"],holidays_prior_scale=params["holidays_prior_scale"],changepoint_range=params["changepoint_range"],seasonality_mode=params["seasonality_mode"],).fit(df,init=stan_init(old_model)) 83 | else: 84 | logger("Training ML model","info") 85 | prophet_model = Prophet(changepoint_prior_scale=params["changepoint_prior_scale"],seasonality_prior_scale=params["seasonality_prior_scale"],holidays_prior_scale=params["holidays_prior_scale"],changepoint_range=params["changepoint_range"],seasonality_mode=params["seasonality_mode"]).fit(df) 86 | #if old_model_loc == None: 87 | with open(new_model_loc, 'w') as fout: 88 | fout.write(model_to_json(prophet_model)) # Save model 89 | logger("Predicting future data points","info") 90 | future_df = prophet_model.make_future_dataframe(periods=periods, freq=frequency) 91 | fcst = prophet_model.predict(future_df) 92 | fcst = fcst[-(periods):] 93 | response['status'] = 'success' 94 | response['model_location'] = new_model_loc 95 | response['yhat'] = fcst['yhat'] 96 | response['yhat_lower'] = fcst['yhat_lower'] 97 | response['yhat_upper'] = fcst['yhat_upper'] 98 | response['ds'] = fcst['ds'] 99 | 100 | except Exception as e: 101 | print(e) 102 | response['status'] = 'failure' 103 | logger(str(e),"error") 104 | #print(response) 105 | 106 | 107 | data_to_write_yhatlower = response['yhat_lower'].to_dict() 108 | data_to_write_yhatupper = response['yhat_upper'].to_dict() 109 | data_to_write_yhat = response['yhat'].to_dict() 110 | data_to_write_tim = response['ds'].to_dict() 111 | if test == False: 112 | for elements in data_to_write_tim: 113 | if data_store['name'] == "prometheus": 114 | write_to_prometheus(data_to_write_yhat[elements],data_to_write_tim[elements],write_back_metric+'_yhat',data_store['url']) 115 | write_to_prometheus(data_to_write_yhatlower[elements],data_to_write_tim[elements],write_back_metric+'_yhat_lower',data_store['url']) 116 | write_to_prometheus(data_to_write_yhatupper[elements],data_to_write_tim[elements],write_back_metric+'_yhat_upper',data_store['url']) 117 | elif data_store['name'] == "influxdb": 118 | write_data_to_influxdb(data_to_write_yhat[elements],data_to_write_tim[elements],write_back_metric+'_yhat',data_store) 119 | write_data_to_influxdb(data_to_write_yhatlower[elements],data_to_write_tim[elements],write_back_metric+'_yhat_lower',data_store) 120 | write_data_to_influxdb(data_to_write_yhatupper[elements],data_to_write_tim[elements],write_back_metric+'_yhat_upper',data_store) 121 | else: 122 | assert data_to_write_yhat == {7: 50.824059424195525} 123 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Xmigrate consultancy services private limited. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /test/units/prometheus_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: prometheus.proto 4 | 5 | import sys 6 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 7 | from google.protobuf import descriptor as _descriptor 8 | from google.protobuf import message as _message 9 | from google.protobuf import reflection as _reflection 10 | from google.protobuf import symbol_database as _symbol_database 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='prometheus.proto', 20 | package='prometheus', 21 | syntax='proto3', 22 | serialized_options=_b('Z\006prompb'), 23 | serialized_pb=_b('\n\x10prometheus.proto\x12\nprometheus\":\n\x0cWriteRequest\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"1\n\x0bReadRequest\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult\"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints\"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"T\n\nTimeSeries\x12!\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.Label\x12#\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.Sample\"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"+\n\x06Labels\x12!\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.Label\"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03\"L\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x42\x08Z\x06prompbb\x06proto3') 24 | ) 25 | 26 | 27 | 28 | _LABELMATCHER_TYPE = _descriptor.EnumDescriptor( 29 | name='Type', 30 | full_name='prometheus.LabelMatcher.Type', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | values=[ 34 | _descriptor.EnumValueDescriptor( 35 | name='EQ', index=0, number=0, 36 | serialized_options=None, 37 | type=None), 38 | _descriptor.EnumValueDescriptor( 39 | name='NEQ', index=1, number=1, 40 | serialized_options=None, 41 | type=None), 42 | _descriptor.EnumValueDescriptor( 43 | name='RE', index=2, number=2, 44 | serialized_options=None, 45 | type=None), 46 | _descriptor.EnumValueDescriptor( 47 | name='NRE', index=3, number=3, 48 | serialized_options=None, 49 | type=None), 50 | ], 51 | containing_type=None, 52 | serialized_options=None, 53 | serialized_start=710, 54 | serialized_end=750, 55 | ) 56 | _sym_db.RegisterEnumDescriptor(_LABELMATCHER_TYPE) 57 | 58 | 59 | _WRITEREQUEST = _descriptor.Descriptor( 60 | name='WriteRequest', 61 | full_name='prometheus.WriteRequest', 62 | filename=None, 63 | file=DESCRIPTOR, 64 | containing_type=None, 65 | fields=[ 66 | _descriptor.FieldDescriptor( 67 | name='timeseries', full_name='prometheus.WriteRequest.timeseries', index=0, 68 | number=1, type=11, cpp_type=10, label=3, 69 | has_default_value=False, default_value=[], 70 | message_type=None, enum_type=None, containing_type=None, 71 | is_extension=False, extension_scope=None, 72 | serialized_options=None, file=DESCRIPTOR), 73 | ], 74 | extensions=[ 75 | ], 76 | nested_types=[], 77 | enum_types=[ 78 | ], 79 | serialized_options=None, 80 | is_extendable=False, 81 | syntax='proto3', 82 | extension_ranges=[], 83 | oneofs=[ 84 | ], 85 | serialized_start=32, 86 | serialized_end=90, 87 | ) 88 | 89 | 90 | _READREQUEST = _descriptor.Descriptor( 91 | name='ReadRequest', 92 | full_name='prometheus.ReadRequest', 93 | filename=None, 94 | file=DESCRIPTOR, 95 | containing_type=None, 96 | fields=[ 97 | _descriptor.FieldDescriptor( 98 | name='queries', full_name='prometheus.ReadRequest.queries', index=0, 99 | number=1, type=11, cpp_type=10, label=3, 100 | has_default_value=False, default_value=[], 101 | message_type=None, enum_type=None, containing_type=None, 102 | is_extension=False, extension_scope=None, 103 | serialized_options=None, file=DESCRIPTOR), 104 | ], 105 | extensions=[ 106 | ], 107 | nested_types=[], 108 | enum_types=[ 109 | ], 110 | serialized_options=None, 111 | is_extendable=False, 112 | syntax='proto3', 113 | extension_ranges=[], 114 | oneofs=[ 115 | ], 116 | serialized_start=92, 117 | serialized_end=141, 118 | ) 119 | 120 | 121 | _READRESPONSE = _descriptor.Descriptor( 122 | name='ReadResponse', 123 | full_name='prometheus.ReadResponse', 124 | filename=None, 125 | file=DESCRIPTOR, 126 | containing_type=None, 127 | fields=[ 128 | _descriptor.FieldDescriptor( 129 | name='results', full_name='prometheus.ReadResponse.results', index=0, 130 | number=1, type=11, cpp_type=10, label=3, 131 | has_default_value=False, default_value=[], 132 | message_type=None, enum_type=None, containing_type=None, 133 | is_extension=False, extension_scope=None, 134 | serialized_options=None, file=DESCRIPTOR), 135 | ], 136 | extensions=[ 137 | ], 138 | nested_types=[], 139 | enum_types=[ 140 | ], 141 | serialized_options=None, 142 | is_extendable=False, 143 | syntax='proto3', 144 | extension_ranges=[], 145 | oneofs=[ 146 | ], 147 | serialized_start=143, 148 | serialized_end=199, 149 | ) 150 | 151 | 152 | _QUERY = _descriptor.Descriptor( 153 | name='Query', 154 | full_name='prometheus.Query', 155 | filename=None, 156 | file=DESCRIPTOR, 157 | containing_type=None, 158 | fields=[ 159 | _descriptor.FieldDescriptor( 160 | name='start_timestamp_ms', full_name='prometheus.Query.start_timestamp_ms', index=0, 161 | number=1, type=3, cpp_type=2, label=1, 162 | has_default_value=False, default_value=0, 163 | message_type=None, enum_type=None, containing_type=None, 164 | is_extension=False, extension_scope=None, 165 | serialized_options=None, file=DESCRIPTOR), 166 | _descriptor.FieldDescriptor( 167 | name='end_timestamp_ms', full_name='prometheus.Query.end_timestamp_ms', index=1, 168 | number=2, type=3, cpp_type=2, label=1, 169 | has_default_value=False, default_value=0, 170 | message_type=None, enum_type=None, containing_type=None, 171 | is_extension=False, extension_scope=None, 172 | serialized_options=None, file=DESCRIPTOR), 173 | _descriptor.FieldDescriptor( 174 | name='matchers', full_name='prometheus.Query.matchers', index=2, 175 | number=3, type=11, cpp_type=10, label=3, 176 | has_default_value=False, default_value=[], 177 | message_type=None, enum_type=None, containing_type=None, 178 | is_extension=False, extension_scope=None, 179 | serialized_options=None, file=DESCRIPTOR), 180 | _descriptor.FieldDescriptor( 181 | name='hints', full_name='prometheus.Query.hints', index=3, 182 | number=4, type=11, cpp_type=10, label=1, 183 | has_default_value=False, default_value=None, 184 | message_type=None, enum_type=None, containing_type=None, 185 | is_extension=False, extension_scope=None, 186 | serialized_options=None, file=DESCRIPTOR), 187 | ], 188 | extensions=[ 189 | ], 190 | nested_types=[], 191 | enum_types=[ 192 | ], 193 | serialized_options=None, 194 | is_extendable=False, 195 | syntax='proto3', 196 | extension_ranges=[], 197 | oneofs=[ 198 | ], 199 | serialized_start=202, 200 | serialized_end=345, 201 | ) 202 | 203 | 204 | _QUERYRESULT = _descriptor.Descriptor( 205 | name='QueryResult', 206 | full_name='prometheus.QueryResult', 207 | filename=None, 208 | file=DESCRIPTOR, 209 | containing_type=None, 210 | fields=[ 211 | _descriptor.FieldDescriptor( 212 | name='timeseries', full_name='prometheus.QueryResult.timeseries', index=0, 213 | number=1, type=11, cpp_type=10, label=3, 214 | has_default_value=False, default_value=[], 215 | message_type=None, enum_type=None, containing_type=None, 216 | is_extension=False, extension_scope=None, 217 | serialized_options=None, file=DESCRIPTOR), 218 | ], 219 | extensions=[ 220 | ], 221 | nested_types=[], 222 | enum_types=[ 223 | ], 224 | serialized_options=None, 225 | is_extendable=False, 226 | syntax='proto3', 227 | extension_ranges=[], 228 | oneofs=[ 229 | ], 230 | serialized_start=347, 231 | serialized_end=404, 232 | ) 233 | 234 | 235 | _SAMPLE = _descriptor.Descriptor( 236 | name='Sample', 237 | full_name='prometheus.Sample', 238 | filename=None, 239 | file=DESCRIPTOR, 240 | containing_type=None, 241 | fields=[ 242 | _descriptor.FieldDescriptor( 243 | name='value', full_name='prometheus.Sample.value', index=0, 244 | number=1, type=1, cpp_type=5, label=1, 245 | has_default_value=False, default_value=float(0), 246 | message_type=None, enum_type=None, containing_type=None, 247 | is_extension=False, extension_scope=None, 248 | serialized_options=None, file=DESCRIPTOR), 249 | _descriptor.FieldDescriptor( 250 | name='timestamp', full_name='prometheus.Sample.timestamp', index=1, 251 | number=2, type=3, cpp_type=2, label=1, 252 | has_default_value=False, default_value=0, 253 | message_type=None, enum_type=None, containing_type=None, 254 | is_extension=False, extension_scope=None, 255 | serialized_options=None, file=DESCRIPTOR), 256 | ], 257 | extensions=[ 258 | ], 259 | nested_types=[], 260 | enum_types=[ 261 | ], 262 | serialized_options=None, 263 | is_extendable=False, 264 | syntax='proto3', 265 | extension_ranges=[], 266 | oneofs=[ 267 | ], 268 | serialized_start=406, 269 | serialized_end=448, 270 | ) 271 | 272 | 273 | _TIMESERIES = _descriptor.Descriptor( 274 | name='TimeSeries', 275 | full_name='prometheus.TimeSeries', 276 | filename=None, 277 | file=DESCRIPTOR, 278 | containing_type=None, 279 | fields=[ 280 | _descriptor.FieldDescriptor( 281 | name='labels', full_name='prometheus.TimeSeries.labels', index=0, 282 | number=1, type=11, cpp_type=10, label=3, 283 | has_default_value=False, default_value=[], 284 | message_type=None, enum_type=None, containing_type=None, 285 | is_extension=False, extension_scope=None, 286 | serialized_options=None, file=DESCRIPTOR), 287 | _descriptor.FieldDescriptor( 288 | name='samples', full_name='prometheus.TimeSeries.samples', index=1, 289 | number=2, type=11, cpp_type=10, label=3, 290 | has_default_value=False, default_value=[], 291 | message_type=None, enum_type=None, containing_type=None, 292 | is_extension=False, extension_scope=None, 293 | serialized_options=None, file=DESCRIPTOR), 294 | ], 295 | extensions=[ 296 | ], 297 | nested_types=[], 298 | enum_types=[ 299 | ], 300 | serialized_options=None, 301 | is_extendable=False, 302 | syntax='proto3', 303 | extension_ranges=[], 304 | oneofs=[ 305 | ], 306 | serialized_start=450, 307 | serialized_end=534, 308 | ) 309 | 310 | 311 | _LABEL = _descriptor.Descriptor( 312 | name='Label', 313 | full_name='prometheus.Label', 314 | filename=None, 315 | file=DESCRIPTOR, 316 | containing_type=None, 317 | fields=[ 318 | _descriptor.FieldDescriptor( 319 | name='name', full_name='prometheus.Label.name', index=0, 320 | number=1, type=9, cpp_type=9, label=1, 321 | has_default_value=False, default_value=_b("").decode('utf-8'), 322 | message_type=None, enum_type=None, containing_type=None, 323 | is_extension=False, extension_scope=None, 324 | serialized_options=None, file=DESCRIPTOR), 325 | _descriptor.FieldDescriptor( 326 | name='value', full_name='prometheus.Label.value', index=1, 327 | number=2, type=9, cpp_type=9, label=1, 328 | has_default_value=False, default_value=_b("").decode('utf-8'), 329 | message_type=None, enum_type=None, containing_type=None, 330 | is_extension=False, extension_scope=None, 331 | serialized_options=None, file=DESCRIPTOR), 332 | ], 333 | extensions=[ 334 | ], 335 | nested_types=[], 336 | enum_types=[ 337 | ], 338 | serialized_options=None, 339 | is_extendable=False, 340 | syntax='proto3', 341 | extension_ranges=[], 342 | oneofs=[ 343 | ], 344 | serialized_start=536, 345 | serialized_end=572, 346 | ) 347 | 348 | 349 | _LABELS = _descriptor.Descriptor( 350 | name='Labels', 351 | full_name='prometheus.Labels', 352 | filename=None, 353 | file=DESCRIPTOR, 354 | containing_type=None, 355 | fields=[ 356 | _descriptor.FieldDescriptor( 357 | name='labels', full_name='prometheus.Labels.labels', index=0, 358 | number=1, type=11, cpp_type=10, label=3, 359 | has_default_value=False, default_value=[], 360 | message_type=None, enum_type=None, containing_type=None, 361 | is_extension=False, extension_scope=None, 362 | serialized_options=None, file=DESCRIPTOR), 363 | ], 364 | extensions=[ 365 | ], 366 | nested_types=[], 367 | enum_types=[ 368 | ], 369 | serialized_options=None, 370 | is_extendable=False, 371 | syntax='proto3', 372 | extension_ranges=[], 373 | oneofs=[ 374 | ], 375 | serialized_start=574, 376 | serialized_end=617, 377 | ) 378 | 379 | 380 | _LABELMATCHER = _descriptor.Descriptor( 381 | name='LabelMatcher', 382 | full_name='prometheus.LabelMatcher', 383 | filename=None, 384 | file=DESCRIPTOR, 385 | containing_type=None, 386 | fields=[ 387 | _descriptor.FieldDescriptor( 388 | name='type', full_name='prometheus.LabelMatcher.type', index=0, 389 | number=1, type=14, cpp_type=8, label=1, 390 | has_default_value=False, default_value=0, 391 | message_type=None, enum_type=None, containing_type=None, 392 | is_extension=False, extension_scope=None, 393 | serialized_options=None, file=DESCRIPTOR), 394 | _descriptor.FieldDescriptor( 395 | name='name', full_name='prometheus.LabelMatcher.name', index=1, 396 | number=2, type=9, cpp_type=9, label=1, 397 | has_default_value=False, default_value=_b("").decode('utf-8'), 398 | message_type=None, enum_type=None, containing_type=None, 399 | is_extension=False, extension_scope=None, 400 | serialized_options=None, file=DESCRIPTOR), 401 | _descriptor.FieldDescriptor( 402 | name='value', full_name='prometheus.LabelMatcher.value', index=2, 403 | number=3, type=9, cpp_type=9, label=1, 404 | has_default_value=False, default_value=_b("").decode('utf-8'), 405 | message_type=None, enum_type=None, containing_type=None, 406 | is_extension=False, extension_scope=None, 407 | serialized_options=None, file=DESCRIPTOR), 408 | ], 409 | extensions=[ 410 | ], 411 | nested_types=[], 412 | enum_types=[ 413 | _LABELMATCHER_TYPE, 414 | ], 415 | serialized_options=None, 416 | is_extendable=False, 417 | syntax='proto3', 418 | extension_ranges=[], 419 | oneofs=[ 420 | ], 421 | serialized_start=620, 422 | serialized_end=750, 423 | ) 424 | 425 | 426 | _READHINTS = _descriptor.Descriptor( 427 | name='ReadHints', 428 | full_name='prometheus.ReadHints', 429 | filename=None, 430 | file=DESCRIPTOR, 431 | containing_type=None, 432 | fields=[ 433 | _descriptor.FieldDescriptor( 434 | name='step_ms', full_name='prometheus.ReadHints.step_ms', index=0, 435 | number=1, type=3, cpp_type=2, label=1, 436 | has_default_value=False, default_value=0, 437 | message_type=None, enum_type=None, containing_type=None, 438 | is_extension=False, extension_scope=None, 439 | serialized_options=None, file=DESCRIPTOR), 440 | _descriptor.FieldDescriptor( 441 | name='func', full_name='prometheus.ReadHints.func', index=1, 442 | number=2, type=9, cpp_type=9, label=1, 443 | has_default_value=False, default_value=_b("").decode('utf-8'), 444 | message_type=None, enum_type=None, containing_type=None, 445 | is_extension=False, extension_scope=None, 446 | serialized_options=None, file=DESCRIPTOR), 447 | _descriptor.FieldDescriptor( 448 | name='start_ms', full_name='prometheus.ReadHints.start_ms', index=2, 449 | number=3, type=3, cpp_type=2, label=1, 450 | has_default_value=False, default_value=0, 451 | message_type=None, enum_type=None, containing_type=None, 452 | is_extension=False, extension_scope=None, 453 | serialized_options=None, file=DESCRIPTOR), 454 | _descriptor.FieldDescriptor( 455 | name='end_ms', full_name='prometheus.ReadHints.end_ms', index=3, 456 | number=4, type=3, cpp_type=2, label=1, 457 | has_default_value=False, default_value=0, 458 | message_type=None, enum_type=None, containing_type=None, 459 | is_extension=False, extension_scope=None, 460 | serialized_options=None, file=DESCRIPTOR), 461 | ], 462 | extensions=[ 463 | ], 464 | nested_types=[], 465 | enum_types=[ 466 | ], 467 | serialized_options=None, 468 | is_extendable=False, 469 | syntax='proto3', 470 | extension_ranges=[], 471 | oneofs=[ 472 | ], 473 | serialized_start=752, 474 | serialized_end=828, 475 | ) 476 | 477 | _WRITEREQUEST.fields_by_name['timeseries'].message_type = _TIMESERIES 478 | _READREQUEST.fields_by_name['queries'].message_type = _QUERY 479 | _READRESPONSE.fields_by_name['results'].message_type = _QUERYRESULT 480 | _QUERY.fields_by_name['matchers'].message_type = _LABELMATCHER 481 | _QUERY.fields_by_name['hints'].message_type = _READHINTS 482 | _QUERYRESULT.fields_by_name['timeseries'].message_type = _TIMESERIES 483 | _TIMESERIES.fields_by_name['labels'].message_type = _LABEL 484 | _TIMESERIES.fields_by_name['samples'].message_type = _SAMPLE 485 | _LABELS.fields_by_name['labels'].message_type = _LABEL 486 | _LABELMATCHER.fields_by_name['type'].enum_type = _LABELMATCHER_TYPE 487 | _LABELMATCHER_TYPE.containing_type = _LABELMATCHER 488 | DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST 489 | DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST 490 | DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE 491 | DESCRIPTOR.message_types_by_name['Query'] = _QUERY 492 | DESCRIPTOR.message_types_by_name['QueryResult'] = _QUERYRESULT 493 | DESCRIPTOR.message_types_by_name['Sample'] = _SAMPLE 494 | DESCRIPTOR.message_types_by_name['TimeSeries'] = _TIMESERIES 495 | DESCRIPTOR.message_types_by_name['Label'] = _LABEL 496 | DESCRIPTOR.message_types_by_name['Labels'] = _LABELS 497 | DESCRIPTOR.message_types_by_name['LabelMatcher'] = _LABELMATCHER 498 | DESCRIPTOR.message_types_by_name['ReadHints'] = _READHINTS 499 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 500 | 501 | WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict( 502 | DESCRIPTOR = _WRITEREQUEST, 503 | __module__ = 'prometheus_pb2' 504 | # @@protoc_insertion_point(class_scope:prometheus.WriteRequest) 505 | )) 506 | _sym_db.RegisterMessage(WriteRequest) 507 | 508 | ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict( 509 | DESCRIPTOR = _READREQUEST, 510 | __module__ = 'prometheus_pb2' 511 | # @@protoc_insertion_point(class_scope:prometheus.ReadRequest) 512 | )) 513 | _sym_db.RegisterMessage(ReadRequest) 514 | 515 | ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict( 516 | DESCRIPTOR = _READRESPONSE, 517 | __module__ = 'prometheus_pb2' 518 | # @@protoc_insertion_point(class_scope:prometheus.ReadResponse) 519 | )) 520 | _sym_db.RegisterMessage(ReadResponse) 521 | 522 | Query = _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), dict( 523 | DESCRIPTOR = _QUERY, 524 | __module__ = 'prometheus_pb2' 525 | # @@protoc_insertion_point(class_scope:prometheus.Query) 526 | )) 527 | _sym_db.RegisterMessage(Query) 528 | 529 | QueryResult = _reflection.GeneratedProtocolMessageType('QueryResult', (_message.Message,), dict( 530 | DESCRIPTOR = _QUERYRESULT, 531 | __module__ = 'prometheus_pb2' 532 | # @@protoc_insertion_point(class_scope:prometheus.QueryResult) 533 | )) 534 | _sym_db.RegisterMessage(QueryResult) 535 | 536 | Sample = _reflection.GeneratedProtocolMessageType('Sample', (_message.Message,), dict( 537 | DESCRIPTOR = _SAMPLE, 538 | __module__ = 'prometheus_pb2' 539 | # @@protoc_insertion_point(class_scope:prometheus.Sample) 540 | )) 541 | _sym_db.RegisterMessage(Sample) 542 | 543 | TimeSeries = _reflection.GeneratedProtocolMessageType('TimeSeries', (_message.Message,), dict( 544 | DESCRIPTOR = _TIMESERIES, 545 | __module__ = 'prometheus_pb2' 546 | # @@protoc_insertion_point(class_scope:prometheus.TimeSeries) 547 | )) 548 | _sym_db.RegisterMessage(TimeSeries) 549 | 550 | Label = _reflection.GeneratedProtocolMessageType('Label', (_message.Message,), dict( 551 | DESCRIPTOR = _LABEL, 552 | __module__ = 'prometheus_pb2' 553 | # @@protoc_insertion_point(class_scope:prometheus.Label) 554 | )) 555 | _sym_db.RegisterMessage(Label) 556 | 557 | Labels = _reflection.GeneratedProtocolMessageType('Labels', (_message.Message,), dict( 558 | DESCRIPTOR = _LABELS, 559 | __module__ = 'prometheus_pb2' 560 | # @@protoc_insertion_point(class_scope:prometheus.Labels) 561 | )) 562 | _sym_db.RegisterMessage(Labels) 563 | 564 | LabelMatcher = _reflection.GeneratedProtocolMessageType('LabelMatcher', (_message.Message,), dict( 565 | DESCRIPTOR = _LABELMATCHER, 566 | __module__ = 'prometheus_pb2' 567 | # @@protoc_insertion_point(class_scope:prometheus.LabelMatcher) 568 | )) 569 | _sym_db.RegisterMessage(LabelMatcher) 570 | 571 | ReadHints = _reflection.GeneratedProtocolMessageType('ReadHints', (_message.Message,), dict( 572 | DESCRIPTOR = _READHINTS, 573 | __module__ = 'prometheus_pb2' 574 | # @@protoc_insertion_point(class_scope:prometheus.ReadHints) 575 | )) 576 | _sym_db.RegisterMessage(ReadHints) 577 | 578 | 579 | DESCRIPTOR._options = None 580 | # @@protoc_insertion_point(module_scope) 581 | -------------------------------------------------------------------------------- /packages/datasources/prometheus_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: prometheus.proto 4 | 5 | import sys 6 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 7 | from google.protobuf import descriptor as _descriptor 8 | from google.protobuf import message as _message 9 | from google.protobuf import reflection as _reflection 10 | from google.protobuf import symbol_database as _symbol_database 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='prometheus.proto', 20 | package='prometheus', 21 | syntax='proto3', 22 | serialized_options=_b('Z\006prompb'), 23 | serialized_pb=_b('\n\x10prometheus.proto\x12\nprometheus\":\n\x0cWriteRequest\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"1\n\x0bReadRequest\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult\"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints\"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"T\n\nTimeSeries\x12!\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.Label\x12#\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.Sample\"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"+\n\x06Labels\x12!\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.Label\"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03\"L\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x42\x08Z\x06prompbb\x06proto3') 24 | ) 25 | 26 | 27 | 28 | _LABELMATCHER_TYPE = _descriptor.EnumDescriptor( 29 | name='Type', 30 | full_name='prometheus.LabelMatcher.Type', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | values=[ 34 | _descriptor.EnumValueDescriptor( 35 | name='EQ', index=0, number=0, 36 | serialized_options=None, 37 | type=None), 38 | _descriptor.EnumValueDescriptor( 39 | name='NEQ', index=1, number=1, 40 | serialized_options=None, 41 | type=None), 42 | _descriptor.EnumValueDescriptor( 43 | name='RE', index=2, number=2, 44 | serialized_options=None, 45 | type=None), 46 | _descriptor.EnumValueDescriptor( 47 | name='NRE', index=3, number=3, 48 | serialized_options=None, 49 | type=None), 50 | ], 51 | containing_type=None, 52 | serialized_options=None, 53 | serialized_start=710, 54 | serialized_end=750, 55 | ) 56 | _sym_db.RegisterEnumDescriptor(_LABELMATCHER_TYPE) 57 | 58 | 59 | _WRITEREQUEST = _descriptor.Descriptor( 60 | name='WriteRequest', 61 | full_name='prometheus.WriteRequest', 62 | filename=None, 63 | file=DESCRIPTOR, 64 | containing_type=None, 65 | fields=[ 66 | _descriptor.FieldDescriptor( 67 | name='timeseries', full_name='prometheus.WriteRequest.timeseries', index=0, 68 | number=1, type=11, cpp_type=10, label=3, 69 | has_default_value=False, default_value=[], 70 | message_type=None, enum_type=None, containing_type=None, 71 | is_extension=False, extension_scope=None, 72 | serialized_options=None, file=DESCRIPTOR), 73 | ], 74 | extensions=[ 75 | ], 76 | nested_types=[], 77 | enum_types=[ 78 | ], 79 | serialized_options=None, 80 | is_extendable=False, 81 | syntax='proto3', 82 | extension_ranges=[], 83 | oneofs=[ 84 | ], 85 | serialized_start=32, 86 | serialized_end=90, 87 | ) 88 | 89 | 90 | _READREQUEST = _descriptor.Descriptor( 91 | name='ReadRequest', 92 | full_name='prometheus.ReadRequest', 93 | filename=None, 94 | file=DESCRIPTOR, 95 | containing_type=None, 96 | fields=[ 97 | _descriptor.FieldDescriptor( 98 | name='queries', full_name='prometheus.ReadRequest.queries', index=0, 99 | number=1, type=11, cpp_type=10, label=3, 100 | has_default_value=False, default_value=[], 101 | message_type=None, enum_type=None, containing_type=None, 102 | is_extension=False, extension_scope=None, 103 | serialized_options=None, file=DESCRIPTOR), 104 | ], 105 | extensions=[ 106 | ], 107 | nested_types=[], 108 | enum_types=[ 109 | ], 110 | serialized_options=None, 111 | is_extendable=False, 112 | syntax='proto3', 113 | extension_ranges=[], 114 | oneofs=[ 115 | ], 116 | serialized_start=92, 117 | serialized_end=141, 118 | ) 119 | 120 | 121 | _READRESPONSE = _descriptor.Descriptor( 122 | name='ReadResponse', 123 | full_name='prometheus.ReadResponse', 124 | filename=None, 125 | file=DESCRIPTOR, 126 | containing_type=None, 127 | fields=[ 128 | _descriptor.FieldDescriptor( 129 | name='results', full_name='prometheus.ReadResponse.results', index=0, 130 | number=1, type=11, cpp_type=10, label=3, 131 | has_default_value=False, default_value=[], 132 | message_type=None, enum_type=None, containing_type=None, 133 | is_extension=False, extension_scope=None, 134 | serialized_options=None, file=DESCRIPTOR), 135 | ], 136 | extensions=[ 137 | ], 138 | nested_types=[], 139 | enum_types=[ 140 | ], 141 | serialized_options=None, 142 | is_extendable=False, 143 | syntax='proto3', 144 | extension_ranges=[], 145 | oneofs=[ 146 | ], 147 | serialized_start=143, 148 | serialized_end=199, 149 | ) 150 | 151 | 152 | _QUERY = _descriptor.Descriptor( 153 | name='Query', 154 | full_name='prometheus.Query', 155 | filename=None, 156 | file=DESCRIPTOR, 157 | containing_type=None, 158 | fields=[ 159 | _descriptor.FieldDescriptor( 160 | name='start_timestamp_ms', full_name='prometheus.Query.start_timestamp_ms', index=0, 161 | number=1, type=3, cpp_type=2, label=1, 162 | has_default_value=False, default_value=0, 163 | message_type=None, enum_type=None, containing_type=None, 164 | is_extension=False, extension_scope=None, 165 | serialized_options=None, file=DESCRIPTOR), 166 | _descriptor.FieldDescriptor( 167 | name='end_timestamp_ms', full_name='prometheus.Query.end_timestamp_ms', index=1, 168 | number=2, type=3, cpp_type=2, label=1, 169 | has_default_value=False, default_value=0, 170 | message_type=None, enum_type=None, containing_type=None, 171 | is_extension=False, extension_scope=None, 172 | serialized_options=None, file=DESCRIPTOR), 173 | _descriptor.FieldDescriptor( 174 | name='matchers', full_name='prometheus.Query.matchers', index=2, 175 | number=3, type=11, cpp_type=10, label=3, 176 | has_default_value=False, default_value=[], 177 | message_type=None, enum_type=None, containing_type=None, 178 | is_extension=False, extension_scope=None, 179 | serialized_options=None, file=DESCRIPTOR), 180 | _descriptor.FieldDescriptor( 181 | name='hints', full_name='prometheus.Query.hints', index=3, 182 | number=4, type=11, cpp_type=10, label=1, 183 | has_default_value=False, default_value=None, 184 | message_type=None, enum_type=None, containing_type=None, 185 | is_extension=False, extension_scope=None, 186 | serialized_options=None, file=DESCRIPTOR), 187 | ], 188 | extensions=[ 189 | ], 190 | nested_types=[], 191 | enum_types=[ 192 | ], 193 | serialized_options=None, 194 | is_extendable=False, 195 | syntax='proto3', 196 | extension_ranges=[], 197 | oneofs=[ 198 | ], 199 | serialized_start=202, 200 | serialized_end=345, 201 | ) 202 | 203 | 204 | _QUERYRESULT = _descriptor.Descriptor( 205 | name='QueryResult', 206 | full_name='prometheus.QueryResult', 207 | filename=None, 208 | file=DESCRIPTOR, 209 | containing_type=None, 210 | fields=[ 211 | _descriptor.FieldDescriptor( 212 | name='timeseries', full_name='prometheus.QueryResult.timeseries', index=0, 213 | number=1, type=11, cpp_type=10, label=3, 214 | has_default_value=False, default_value=[], 215 | message_type=None, enum_type=None, containing_type=None, 216 | is_extension=False, extension_scope=None, 217 | serialized_options=None, file=DESCRIPTOR), 218 | ], 219 | extensions=[ 220 | ], 221 | nested_types=[], 222 | enum_types=[ 223 | ], 224 | serialized_options=None, 225 | is_extendable=False, 226 | syntax='proto3', 227 | extension_ranges=[], 228 | oneofs=[ 229 | ], 230 | serialized_start=347, 231 | serialized_end=404, 232 | ) 233 | 234 | 235 | _SAMPLE = _descriptor.Descriptor( 236 | name='Sample', 237 | full_name='prometheus.Sample', 238 | filename=None, 239 | file=DESCRIPTOR, 240 | containing_type=None, 241 | fields=[ 242 | _descriptor.FieldDescriptor( 243 | name='value', full_name='prometheus.Sample.value', index=0, 244 | number=1, type=1, cpp_type=5, label=1, 245 | has_default_value=False, default_value=float(0), 246 | message_type=None, enum_type=None, containing_type=None, 247 | is_extension=False, extension_scope=None, 248 | serialized_options=None, file=DESCRIPTOR), 249 | _descriptor.FieldDescriptor( 250 | name='timestamp', full_name='prometheus.Sample.timestamp', index=1, 251 | number=2, type=3, cpp_type=2, label=1, 252 | has_default_value=False, default_value=0, 253 | message_type=None, enum_type=None, containing_type=None, 254 | is_extension=False, extension_scope=None, 255 | serialized_options=None, file=DESCRIPTOR), 256 | ], 257 | extensions=[ 258 | ], 259 | nested_types=[], 260 | enum_types=[ 261 | ], 262 | serialized_options=None, 263 | is_extendable=False, 264 | syntax='proto3', 265 | extension_ranges=[], 266 | oneofs=[ 267 | ], 268 | serialized_start=406, 269 | serialized_end=448, 270 | ) 271 | 272 | 273 | _TIMESERIES = _descriptor.Descriptor( 274 | name='TimeSeries', 275 | full_name='prometheus.TimeSeries', 276 | filename=None, 277 | file=DESCRIPTOR, 278 | containing_type=None, 279 | fields=[ 280 | _descriptor.FieldDescriptor( 281 | name='labels', full_name='prometheus.TimeSeries.labels', index=0, 282 | number=1, type=11, cpp_type=10, label=3, 283 | has_default_value=False, default_value=[], 284 | message_type=None, enum_type=None, containing_type=None, 285 | is_extension=False, extension_scope=None, 286 | serialized_options=None, file=DESCRIPTOR), 287 | _descriptor.FieldDescriptor( 288 | name='samples', full_name='prometheus.TimeSeries.samples', index=1, 289 | number=2, type=11, cpp_type=10, label=3, 290 | has_default_value=False, default_value=[], 291 | message_type=None, enum_type=None, containing_type=None, 292 | is_extension=False, extension_scope=None, 293 | serialized_options=None, file=DESCRIPTOR), 294 | ], 295 | extensions=[ 296 | ], 297 | nested_types=[], 298 | enum_types=[ 299 | ], 300 | serialized_options=None, 301 | is_extendable=False, 302 | syntax='proto3', 303 | extension_ranges=[], 304 | oneofs=[ 305 | ], 306 | serialized_start=450, 307 | serialized_end=534, 308 | ) 309 | 310 | 311 | _LABEL = _descriptor.Descriptor( 312 | name='Label', 313 | full_name='prometheus.Label', 314 | filename=None, 315 | file=DESCRIPTOR, 316 | containing_type=None, 317 | fields=[ 318 | _descriptor.FieldDescriptor( 319 | name='name', full_name='prometheus.Label.name', index=0, 320 | number=1, type=9, cpp_type=9, label=1, 321 | has_default_value=False, default_value=_b("").decode('utf-8'), 322 | message_type=None, enum_type=None, containing_type=None, 323 | is_extension=False, extension_scope=None, 324 | serialized_options=None, file=DESCRIPTOR), 325 | _descriptor.FieldDescriptor( 326 | name='value', full_name='prometheus.Label.value', index=1, 327 | number=2, type=9, cpp_type=9, label=1, 328 | has_default_value=False, default_value=_b("").decode('utf-8'), 329 | message_type=None, enum_type=None, containing_type=None, 330 | is_extension=False, extension_scope=None, 331 | serialized_options=None, file=DESCRIPTOR), 332 | ], 333 | extensions=[ 334 | ], 335 | nested_types=[], 336 | enum_types=[ 337 | ], 338 | serialized_options=None, 339 | is_extendable=False, 340 | syntax='proto3', 341 | extension_ranges=[], 342 | oneofs=[ 343 | ], 344 | serialized_start=536, 345 | serialized_end=572, 346 | ) 347 | 348 | 349 | _LABELS = _descriptor.Descriptor( 350 | name='Labels', 351 | full_name='prometheus.Labels', 352 | filename=None, 353 | file=DESCRIPTOR, 354 | containing_type=None, 355 | fields=[ 356 | _descriptor.FieldDescriptor( 357 | name='labels', full_name='prometheus.Labels.labels', index=0, 358 | number=1, type=11, cpp_type=10, label=3, 359 | has_default_value=False, default_value=[], 360 | message_type=None, enum_type=None, containing_type=None, 361 | is_extension=False, extension_scope=None, 362 | serialized_options=None, file=DESCRIPTOR), 363 | ], 364 | extensions=[ 365 | ], 366 | nested_types=[], 367 | enum_types=[ 368 | ], 369 | serialized_options=None, 370 | is_extendable=False, 371 | syntax='proto3', 372 | extension_ranges=[], 373 | oneofs=[ 374 | ], 375 | serialized_start=574, 376 | serialized_end=617, 377 | ) 378 | 379 | 380 | _LABELMATCHER = _descriptor.Descriptor( 381 | name='LabelMatcher', 382 | full_name='prometheus.LabelMatcher', 383 | filename=None, 384 | file=DESCRIPTOR, 385 | containing_type=None, 386 | fields=[ 387 | _descriptor.FieldDescriptor( 388 | name='type', full_name='prometheus.LabelMatcher.type', index=0, 389 | number=1, type=14, cpp_type=8, label=1, 390 | has_default_value=False, default_value=0, 391 | message_type=None, enum_type=None, containing_type=None, 392 | is_extension=False, extension_scope=None, 393 | serialized_options=None, file=DESCRIPTOR), 394 | _descriptor.FieldDescriptor( 395 | name='name', full_name='prometheus.LabelMatcher.name', index=1, 396 | number=2, type=9, cpp_type=9, label=1, 397 | has_default_value=False, default_value=_b("").decode('utf-8'), 398 | message_type=None, enum_type=None, containing_type=None, 399 | is_extension=False, extension_scope=None, 400 | serialized_options=None, file=DESCRIPTOR), 401 | _descriptor.FieldDescriptor( 402 | name='value', full_name='prometheus.LabelMatcher.value', index=2, 403 | number=3, type=9, cpp_type=9, label=1, 404 | has_default_value=False, default_value=_b("").decode('utf-8'), 405 | message_type=None, enum_type=None, containing_type=None, 406 | is_extension=False, extension_scope=None, 407 | serialized_options=None, file=DESCRIPTOR), 408 | ], 409 | extensions=[ 410 | ], 411 | nested_types=[], 412 | enum_types=[ 413 | _LABELMATCHER_TYPE, 414 | ], 415 | serialized_options=None, 416 | is_extendable=False, 417 | syntax='proto3', 418 | extension_ranges=[], 419 | oneofs=[ 420 | ], 421 | serialized_start=620, 422 | serialized_end=750, 423 | ) 424 | 425 | 426 | _READHINTS = _descriptor.Descriptor( 427 | name='ReadHints', 428 | full_name='prometheus.ReadHints', 429 | filename=None, 430 | file=DESCRIPTOR, 431 | containing_type=None, 432 | fields=[ 433 | _descriptor.FieldDescriptor( 434 | name='step_ms', full_name='prometheus.ReadHints.step_ms', index=0, 435 | number=1, type=3, cpp_type=2, label=1, 436 | has_default_value=False, default_value=0, 437 | message_type=None, enum_type=None, containing_type=None, 438 | is_extension=False, extension_scope=None, 439 | serialized_options=None, file=DESCRIPTOR), 440 | _descriptor.FieldDescriptor( 441 | name='func', full_name='prometheus.ReadHints.func', index=1, 442 | number=2, type=9, cpp_type=9, label=1, 443 | has_default_value=False, default_value=_b("").decode('utf-8'), 444 | message_type=None, enum_type=None, containing_type=None, 445 | is_extension=False, extension_scope=None, 446 | serialized_options=None, file=DESCRIPTOR), 447 | _descriptor.FieldDescriptor( 448 | name='start_ms', full_name='prometheus.ReadHints.start_ms', index=2, 449 | number=3, type=3, cpp_type=2, label=1, 450 | has_default_value=False, default_value=0, 451 | message_type=None, enum_type=None, containing_type=None, 452 | is_extension=False, extension_scope=None, 453 | serialized_options=None, file=DESCRIPTOR), 454 | _descriptor.FieldDescriptor( 455 | name='end_ms', full_name='prometheus.ReadHints.end_ms', index=3, 456 | number=4, type=3, cpp_type=2, label=1, 457 | has_default_value=False, default_value=0, 458 | message_type=None, enum_type=None, containing_type=None, 459 | is_extension=False, extension_scope=None, 460 | serialized_options=None, file=DESCRIPTOR), 461 | ], 462 | extensions=[ 463 | ], 464 | nested_types=[], 465 | enum_types=[ 466 | ], 467 | serialized_options=None, 468 | is_extendable=False, 469 | syntax='proto3', 470 | extension_ranges=[], 471 | oneofs=[ 472 | ], 473 | serialized_start=752, 474 | serialized_end=828, 475 | ) 476 | 477 | _WRITEREQUEST.fields_by_name['timeseries'].message_type = _TIMESERIES 478 | _READREQUEST.fields_by_name['queries'].message_type = _QUERY 479 | _READRESPONSE.fields_by_name['results'].message_type = _QUERYRESULT 480 | _QUERY.fields_by_name['matchers'].message_type = _LABELMATCHER 481 | _QUERY.fields_by_name['hints'].message_type = _READHINTS 482 | _QUERYRESULT.fields_by_name['timeseries'].message_type = _TIMESERIES 483 | _TIMESERIES.fields_by_name['labels'].message_type = _LABEL 484 | _TIMESERIES.fields_by_name['samples'].message_type = _SAMPLE 485 | _LABELS.fields_by_name['labels'].message_type = _LABEL 486 | _LABELMATCHER.fields_by_name['type'].enum_type = _LABELMATCHER_TYPE 487 | _LABELMATCHER_TYPE.containing_type = _LABELMATCHER 488 | DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST 489 | DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST 490 | DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE 491 | DESCRIPTOR.message_types_by_name['Query'] = _QUERY 492 | DESCRIPTOR.message_types_by_name['QueryResult'] = _QUERYRESULT 493 | DESCRIPTOR.message_types_by_name['Sample'] = _SAMPLE 494 | DESCRIPTOR.message_types_by_name['TimeSeries'] = _TIMESERIES 495 | DESCRIPTOR.message_types_by_name['Label'] = _LABEL 496 | DESCRIPTOR.message_types_by_name['Labels'] = _LABELS 497 | DESCRIPTOR.message_types_by_name['LabelMatcher'] = _LABELMATCHER 498 | DESCRIPTOR.message_types_by_name['ReadHints'] = _READHINTS 499 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 500 | 501 | WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict( 502 | DESCRIPTOR = _WRITEREQUEST, 503 | __module__ = 'prometheus_pb2' 504 | # @@protoc_insertion_point(class_scope:prometheus.WriteRequest) 505 | )) 506 | _sym_db.RegisterMessage(WriteRequest) 507 | 508 | ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict( 509 | DESCRIPTOR = _READREQUEST, 510 | __module__ = 'prometheus_pb2' 511 | # @@protoc_insertion_point(class_scope:prometheus.ReadRequest) 512 | )) 513 | _sym_db.RegisterMessage(ReadRequest) 514 | 515 | ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict( 516 | DESCRIPTOR = _READRESPONSE, 517 | __module__ = 'prometheus_pb2' 518 | # @@protoc_insertion_point(class_scope:prometheus.ReadResponse) 519 | )) 520 | _sym_db.RegisterMessage(ReadResponse) 521 | 522 | Query = _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), dict( 523 | DESCRIPTOR = _QUERY, 524 | __module__ = 'prometheus_pb2' 525 | # @@protoc_insertion_point(class_scope:prometheus.Query) 526 | )) 527 | _sym_db.RegisterMessage(Query) 528 | 529 | QueryResult = _reflection.GeneratedProtocolMessageType('QueryResult', (_message.Message,), dict( 530 | DESCRIPTOR = _QUERYRESULT, 531 | __module__ = 'prometheus_pb2' 532 | # @@protoc_insertion_point(class_scope:prometheus.QueryResult) 533 | )) 534 | _sym_db.RegisterMessage(QueryResult) 535 | 536 | Sample = _reflection.GeneratedProtocolMessageType('Sample', (_message.Message,), dict( 537 | DESCRIPTOR = _SAMPLE, 538 | __module__ = 'prometheus_pb2' 539 | # @@protoc_insertion_point(class_scope:prometheus.Sample) 540 | )) 541 | _sym_db.RegisterMessage(Sample) 542 | 543 | TimeSeries = _reflection.GeneratedProtocolMessageType('TimeSeries', (_message.Message,), dict( 544 | DESCRIPTOR = _TIMESERIES, 545 | __module__ = 'prometheus_pb2' 546 | # @@protoc_insertion_point(class_scope:prometheus.TimeSeries) 547 | )) 548 | _sym_db.RegisterMessage(TimeSeries) 549 | 550 | Label = _reflection.GeneratedProtocolMessageType('Label', (_message.Message,), dict( 551 | DESCRIPTOR = _LABEL, 552 | __module__ = 'prometheus_pb2' 553 | # @@protoc_insertion_point(class_scope:prometheus.Label) 554 | )) 555 | _sym_db.RegisterMessage(Label) 556 | 557 | Labels = _reflection.GeneratedProtocolMessageType('Labels', (_message.Message,), dict( 558 | DESCRIPTOR = _LABELS, 559 | __module__ = 'prometheus_pb2' 560 | # @@protoc_insertion_point(class_scope:prometheus.Labels) 561 | )) 562 | _sym_db.RegisterMessage(Labels) 563 | 564 | LabelMatcher = _reflection.GeneratedProtocolMessageType('LabelMatcher', (_message.Message,), dict( 565 | DESCRIPTOR = _LABELMATCHER, 566 | __module__ = 'prometheus_pb2' 567 | # @@protoc_insertion_point(class_scope:prometheus.LabelMatcher) 568 | )) 569 | _sym_db.RegisterMessage(LabelMatcher) 570 | 571 | ReadHints = _reflection.GeneratedProtocolMessageType('ReadHints', (_message.Message,), dict( 572 | DESCRIPTOR = _READHINTS, 573 | __module__ = 'prometheus_pb2' 574 | # @@protoc_insertion_point(class_scope:prometheus.ReadHints) 575 | )) 576 | _sym_db.RegisterMessage(ReadHints) 577 | 578 | 579 | DESCRIPTOR._options = None 580 | # @@protoc_insertion_point(module_scope) 581 | --------------------------------------------------------------------------------