├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── exporter.cfg ├── prometheus_mysql_exporter ├── __init__.py ├── __main__.py ├── metrics.py ├── parser.py ├── scheduler.py └── utils.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | # Gedit 62 | *~ 63 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | os: linux 3 | dist: bionic 4 | python: 5 | - "3.5" 6 | - "3.6" 7 | - "3.7" 8 | - "3.8" 9 | install: 10 | - pip install -e . 11 | script: 12 | - python -m unittest 13 | jobs: 14 | include: 15 | - stage: deploy 16 | if: tag IS present 17 | python: 3.8 18 | script: skip 19 | deploy: 20 | provider: pypi 21 | edge: true 22 | username: $PYPI_USERNAME 23 | password: $PYPI_PASSWORD 24 | distributions: "sdist bdist_wheel" 25 | on: 26 | tags: true 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY setup.py /usr/src/app/ 6 | COPY README.md /usr/src/app/ 7 | RUN pip install -e . 8 | 9 | COPY prometheus_mysql_exporter/*.py /usr/src/app/prometheus_mysql_exporter/ 10 | COPY LICENSE /usr/src/app/ 11 | 12 | EXPOSE 9207 13 | 14 | ENTRYPOINT ["python", "-u", "/usr/local/bin/prometheus-mysql-exporter"] 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Braedon Vickers 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Prometheus MySQL Exporter 2 | ==== 3 | This Prometheus exporter periodically runs configured queries against a MySQL server and exports the results as Prometheus gauge metrics. 4 | 5 | [Source Code](https://github.com/braedon/prometheus-mysql-exporter) | [Python Package](https://pypi.org/project/prometheus-mysql-exporter) | [Docker Image](https://hub.docker.com/r/braedon/prometheus-mysql-exporter) | [Helm Chart](https://braedon.github.io/helm/prometheus-mysql-exporter) 6 | 7 | # Installation 8 | The exporter requires Python 3 and Pip 3 to be installed. 9 | 10 | To install the latest published version via Pip, run: 11 | ```bash 12 | > pip3 install prometheus-mysql-exporter 13 | ``` 14 | Note that you may need to add the start script location (see pip output) to your `PATH`. 15 | 16 | # Usage 17 | Once installed, you can run the exporter with the `prometheus-mysql-exporter` command. 18 | 19 | By default, it will bind to port 9207, query MySQL on `localhost:3306` using the `root` user (with no password) and run queries configured in a file `exporter.cfg` in the working directory. You can change any defaults or other settings as required by passing in options: 20 | ```bash 21 | > prometheus-mysql-exporter -p -s -u -P -z -c 22 | ``` 23 | Run with the `-h` flag to see details on all the available options. 24 | 25 | Note that all options can be set via environment variables. The environment variable names are prefixed with `MYSQL_EXPORTER`, e.g. `MYSQL_EXPORTER_MYSQL_USER=fred` is equivalent to `--mysql-user fred`. CLI options take precidence over environment variables. 26 | 27 | Command line options can also be set from a configuration file, by passing `--config FILE`. The format of the file should be [Configobj's unrepre mode](https://configobj.readthedocs.io/en/latest/configobj.html#unrepr-mode), so instead of `--mysql-user fred` you could use a configuration file `config_file` with `mysql-user="fred"` in it, and pass `--config config_file`. CLI options and environment variables take precedence over configuration files. 28 | 29 | CLI options, environment variables, and configuration files all override any default options. The full resolution order for a given option is: CLI > Environment > Configuration file > Default. 30 | 31 | See the provided [exporter.cfg](exporter.cfg) file for query configuration examples and explanation. 32 | 33 | # Docker 34 | Docker images for released versions can be found on Docker Hub (note that no `latest` version is provided): 35 | ```bash 36 | > sudo docker pull braedon/prometheus-mysql-exporter: 37 | ``` 38 | To run a container successfully, you will need to mount a query config file to `/usr/src/app/exporter.cfg` and map container port 9207 to a port on the host. Any options placed after the image name (`prometheus-mysql-exporter`) will be passed to the process inside the container. For example, you will need to use this to configure the MySQL server using `-s`. 39 | ```bash 40 | > sudo docker run --rm --name exporter \ 41 | -v :/usr/src/app/exporter.cfg \ 42 | -p :9207 \ 43 | braedon/prometheus-mysql-exporter: -s 44 | ``` 45 | If you don't want to mount the query config file in at run time, you could extend an existing image with your own Dockerfile that copies the config file in at build time. 46 | 47 | # Helm 48 | A Helm chart is available from the Helm repo at [https://braedon.github.io/helm](https://braedon.github.io/helm/). 49 | ```bash 50 | > helm repo add braedon https://braedon.github.io/helm 51 | > helm repo update 52 | 53 | > helm install braedon/prometheus-mysql-exporter --name \ 54 | --set mysql.server= \ 55 | --set image.tag= 56 | ``` 57 | See the [`prometheus-mysql-exporter` chart README](https://braedon.github.io/helm/prometheus-mysql-exporter/) for more details on how to configure the chart. 58 | 59 | # Development 60 | To install directly from the git repo, run the following in the root project directory: 61 | ```bash 62 | > pip3 install . 63 | ``` 64 | The exporter can be installed in "editable" mode, using pip's `-e` flag. This allows you to test out changes without having to re-install. 65 | ```bash 66 | > pip3 install -e . 67 | ``` 68 | To build a docker image directly from the git repo, run the following in the root project directory: 69 | ```bash 70 | > sudo docker build -t . 71 | ``` 72 | Send me a PR if you have a change you want to contribute! 73 | -------------------------------------------------------------------------------- /exporter.cfg: -------------------------------------------------------------------------------- 1 | # This section defines default settings for how queries should be run. 2 | # All settings can be overridden for any given query in its own section. 3 | # The values shown in this example are also the fallback values used if 4 | # a setting is not specified in the DEFAULT section or a query's section. 5 | [DEFAULT] 6 | # How often to run queries. 7 | QueryIntervalSecs = 15 8 | # What to do if a query throws an error. One of: 9 | # * preserve - keep the metrics/values from the last successful run. 10 | # * drop - remove metrics previously produced by the query. 11 | # * zero - keep metrics previously produced by the query, but reset their values to 0. 12 | QueryOnError = drop 13 | # What to do if a metric produced by the previous run of a query is not present 14 | # in the current run. One of: 15 | # * preserve - keep the value of the metric from the last run it was present in. 16 | # * drop - remove the metric. 17 | # * zero - keep the metric, but reset its value to 0. 18 | QueryOnMissing = drop 19 | 20 | # Queries are defined in sections beginning with 'query_'. 21 | # Characters following this prefix will be used as a prefix for all metrics 22 | # generated for this query 23 | [query_test1] 24 | # Settings that are not specified are inherited from the DEFAULT section. 25 | # The database to run the query on. 26 | QueryDatabase = test 27 | # The SQL SELECT statement to run. 28 | QueryStatement = SELECT bar, baz, count(*) as ni, max(ekki) as ptang FROM foo GROUP BY bar, baz; 29 | # A list of result columns to be be exported as metrics. They must be numeric. 30 | # Any result columns not listed will be used as metric labels instead. 31 | QueryValueColumns = ni,ptang 32 | 33 | [query_test2] 34 | # The DEFAULT settings can be overridden. 35 | QueryIntervalSecs = 5 36 | QueryOnError = preserve 37 | QueryOnMissing = zero 38 | QueryDatabase = test 39 | QueryStatement = SELECT bar, count(*) as baz FROM foo GROUP BY bar; 40 | QueryValueColumns = baz 41 | 42 | [query_test3] 43 | # Run the query at specific times defined using a cron-like format. 44 | # See https://pypi.org/project/croniter/ for format details. 45 | # If QueryCron is set QueryIntervalSecs is ignored. 46 | # This example is 00:03 every night. 47 | QueryCron = 3 0 * * * 48 | # The timezone to use when calcuating cron query run times. 49 | # Defaults to UTC if not specified. 50 | QueryCronTimezone = Pacific/Auckland 51 | QueryDatabase = test 52 | QueryStatement = SELECT bar, count(*) as baz FROM foo GROUP BY bar; 53 | QueryValueColumns = baz 54 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/__init__.py: -------------------------------------------------------------------------------- 1 | import click 2 | import click_config_file 3 | import configparser 4 | import glob 5 | import logging 6 | import os 7 | import pymysql 8 | import pytz 9 | import sched 10 | 11 | from dbutils.persistent_db import PersistentDB 12 | from jog import JogFormatter 13 | from prometheus_client import start_http_server 14 | from prometheus_client.core import REGISTRY 15 | 16 | from .metrics import gauge_generator, group_metrics, merge_metric_dicts 17 | from .parser import parse_response 18 | from .scheduler import schedule_job 19 | from .utils import log_exceptions, nice_shutdown 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | CONTEXT_SETTINGS = { 24 | 'help_option_names': ['-h', '--help'] 25 | } 26 | 27 | METRICS_BY_QUERY = {} 28 | 29 | 30 | class QueryMetricCollector(object): 31 | 32 | def collect(self): 33 | # Copy METRICS_BY_QUERY before iterating over it 34 | # as it may be updated by other threads. 35 | # (only first level - lower levels are replaced 36 | # wholesale, so don't worry about them) 37 | query_metrics = METRICS_BY_QUERY.copy() 38 | for metric_dict in query_metrics.values(): 39 | yield from gauge_generator(metric_dict) 40 | 41 | 42 | def run_query(mysql_client, query_name, db_name, query, value_columns, 43 | on_error, on_missing): 44 | 45 | log.debug('Running query %(query_name)s.', {'query_name': query_name}) 46 | try: 47 | conn = mysql_client.connection() 48 | 49 | try: 50 | with conn.cursor() as cursor: 51 | cursor.execute('USE `{}`;'.format(db_name)) 52 | cursor.execute(query) 53 | raw_response = cursor.fetchall() 54 | columns = [column[0] for column in cursor.description] 55 | 56 | finally: 57 | conn.close() 58 | 59 | response = [{column: row[i] for i, column in enumerate(columns)} 60 | for row in raw_response] 61 | metrics = parse_response(query_name, db_name, value_columns, response) 62 | metric_dict = group_metrics(metrics) 63 | 64 | except Exception: 65 | log.exception('Error while querying db %(db_name)s, query %(query)s.', 66 | {'db_name': db_name, 'query': query}) 67 | 68 | # If this query has successfully run before, we need to handle any 69 | # metrics produced by that previous run. 70 | if query_name in METRICS_BY_QUERY: 71 | old_metric_dict = METRICS_BY_QUERY[query_name] 72 | 73 | if on_error == 'preserve': 74 | metric_dict = old_metric_dict 75 | 76 | elif on_error == 'drop': 77 | metric_dict = {} 78 | 79 | elif on_error == 'zero': 80 | # Merging the old metric dict with an empty one, and zeroing 81 | # any missing metrics, produces a metric dict with the same 82 | # metrics, but all zero values. 83 | metric_dict = merge_metric_dicts(old_metric_dict, {}, 84 | zero_missing=True) 85 | 86 | METRICS_BY_QUERY[query_name] = metric_dict 87 | 88 | else: 89 | # If this query has successfully run before, we need to handle any 90 | # missing metrics. 91 | if query_name in METRICS_BY_QUERY: 92 | old_metric_dict = METRICS_BY_QUERY[query_name] 93 | 94 | if on_missing == 'preserve': 95 | metric_dict = merge_metric_dicts(old_metric_dict, metric_dict, 96 | zero_missing=False) 97 | 98 | elif on_missing == 'drop': 99 | pass # use new metric dict untouched 100 | 101 | elif on_missing == 'zero': 102 | metric_dict = merge_metric_dicts(old_metric_dict, metric_dict, 103 | zero_missing=True) 104 | 105 | METRICS_BY_QUERY[query_name] = metric_dict 106 | 107 | 108 | def validate_server_address(ctx, param, address_string): 109 | if ':' in address_string: 110 | host, port_string = address_string.split(':', 1) 111 | try: 112 | port = int(port_string) 113 | except ValueError: 114 | msg = "port '{}' in address '{}' is not an integer".format(port_string, address_string) 115 | raise click.BadParameter(msg) 116 | return (host, port) 117 | else: 118 | return (address_string, 3306) 119 | 120 | 121 | def configparser_enum_conv(enum): 122 | lower_enums = tuple(e.lower() for e in enum) 123 | 124 | def conv(value): 125 | lower_value = value.lower() 126 | if lower_value in lower_enums: 127 | return lower_value 128 | else: 129 | raise ValueError('Value {} not value. Must be one of {}'.format( 130 | value, ','.join(enum))) 131 | 132 | return conv 133 | 134 | 135 | CONFIGPARSER_CONVERTERS = { 136 | 'enum': configparser_enum_conv(('preserve', 'drop', 'zero')) 137 | } 138 | 139 | 140 | @click.command(context_settings=CONTEXT_SETTINGS) 141 | @click.option('--port', '-p', default=9207, 142 | help='Port to serve the metrics endpoint on. (default: 9207)') 143 | @click.option('--config-file', '-c', default='exporter.cfg', type=click.File(), 144 | help='Path to query config file. ' 145 | 'Can be absolute, or relative to the current working directory. ' 146 | '(default: exporter.cfg)') 147 | @click.option('--config-dir', default='./config', type=click.Path(file_okay=False), 148 | help='Path to query config directory. ' 149 | 'If present, any files ending in ".cfg" in the directory ' 150 | 'will be parsed as additional query config files. ' 151 | 'Merge order is main config file, then config directory files ' 152 | 'in filename order. ' 153 | 'Can be absolute, or relative to the current working directory. ' 154 | '(default: ./config)') 155 | @click.option('--mysql-server', '-s', callback=validate_server_address, default='localhost', 156 | help='Address of a MySQL server to run queries on. ' 157 | 'A port can be provided if non-standard (3306) e.g. mysql:3333. ' 158 | '(default: localhost)') 159 | @click.option('--mysql-user', '-u', default='root', 160 | help='MySQL user to run queries as. (default: root)') 161 | @click.option('--mysql-password', '-P', default='', 162 | help='Password for the MySQL user, if required. (default: no password)') 163 | @click.option('--mysql-local-timezone', '-z', 164 | help='Local timezone for sql commands like NOW(). (default: use server timezone)') 165 | @click.option('--json-logging', '-j', default=False, is_flag=True, 166 | help='Turn on json logging.') 167 | @click.option('--log-level', default='INFO', 168 | type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']), 169 | help='Detail level to log. (default: INFO)') 170 | @click.option('--verbose', '-v', default=False, is_flag=True, 171 | help='Turn on verbose (DEBUG) logging. Overrides --log-level.') 172 | @click_config_file.configuration_option() 173 | def cli(**options): 174 | """Export MySQL query results to Prometheus.""" 175 | 176 | log_handler = logging.StreamHandler() 177 | log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s' 178 | formatter = JogFormatter(log_format) if options['json_logging'] else logging.Formatter(log_format) 179 | log_handler.setFormatter(formatter) 180 | 181 | log_level = getattr(logging, options['log_level']) 182 | logging.basicConfig( 183 | handlers=[log_handler], 184 | level=logging.DEBUG if options['verbose'] else log_level 185 | ) 186 | logging.captureWarnings(True) 187 | 188 | port = options['port'] 189 | mysql_host, mysql_port = options['mysql_server'] 190 | 191 | mysql_username = options['mysql_user'] 192 | mysql_password = options['mysql_password'] 193 | mysql_timezone = options['mysql_local_timezone'] 194 | 195 | config = configparser.ConfigParser(converters=CONFIGPARSER_CONVERTERS) 196 | config.read_file(options['config_file']) 197 | 198 | config_dir_file_pattern = os.path.join(options['config_dir'], '*.cfg') 199 | config_dir_sorted_files = sorted(glob.glob(config_dir_file_pattern)) 200 | config.read(config_dir_sorted_files) 201 | 202 | query_prefix = 'query_' 203 | queries = {} 204 | for section in config.sections(): 205 | if section.startswith(query_prefix): 206 | query_name = section[len(query_prefix):] 207 | interval = config.getfloat(section, 'QueryIntervalSecs', 208 | fallback=15) 209 | cron = config.get(section, 'QueryCron', 210 | fallback=None) 211 | cron_tz = config.get(section, 'QueryCronTimezone', 212 | fallback=None) 213 | if cron_tz is not None: 214 | cron_tz = pytz.timezone(cron_tz) 215 | db_name = config.get(section, 'QueryDatabase') 216 | query = config.get(section, 'QueryStatement') 217 | value_columns = config.get(section, 'QueryValueColumns').split(',') 218 | on_error = config.getenum(section, 'QueryOnError', 219 | fallback='drop') 220 | on_missing = config.getenum(section, 'QueryOnMissing', 221 | fallback='drop') 222 | 223 | queries[query_name] = (interval, cron, cron_tz, 224 | db_name, query, value_columns, 225 | on_error, on_missing) 226 | 227 | scheduler = sched.scheduler() 228 | 229 | mysql_kwargs = dict(host=mysql_host, 230 | port=mysql_port, 231 | user=mysql_username, 232 | password=mysql_password, 233 | # Use autocommit mode to avoid keeping the same transaction across query 234 | # runs when the connection is reused. Using the same transaction would 235 | # prevent changes from being reflected in results, and therefore metrics. 236 | # Note: Queries could theoretically change data... 237 | autocommit=True) 238 | if mysql_timezone: 239 | mysql_kwargs['init_command'] = "SET time_zone = '{}'".format(mysql_timezone) 240 | 241 | mysql_client = PersistentDB(creator=pymysql, **mysql_kwargs) 242 | 243 | if queries: 244 | for query_name, (interval, cron, cron_tz, 245 | db_name, query, value_columns, 246 | on_error, on_missing) in queries.items(): 247 | schedule_job(scheduler, interval, cron, cron_tz, 248 | run_query, mysql_client, query_name, 249 | db_name, query, value_columns, on_error, on_missing) 250 | else: 251 | log.warning('No queries found in config file(s)') 252 | 253 | REGISTRY.register(QueryMetricCollector()) 254 | 255 | log.info('Starting server...') 256 | start_http_server(port) 257 | log.info('Server started on port %(port)s', {'port': port}) 258 | 259 | scheduler.run() 260 | 261 | 262 | @log_exceptions(exit_on_exception=True) 263 | @nice_shutdown() 264 | def main(): 265 | cli(auto_envvar_prefix='MYSQL_EXPORTER') 266 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/__main__.py: -------------------------------------------------------------------------------- 1 | from prometheus_mysql_exporter import main 2 | 3 | if __name__ == '__main__': 4 | main() 5 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/metrics.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from collections import OrderedDict 4 | from prometheus_client.core import GaugeMetricFamily 5 | 6 | 7 | METRIC_INVALID_CHARS = re.compile(r'[^a-zA-Z0-9_:]') 8 | METRIC_INVALID_START_CHARS = re.compile(r'^[^a-zA-Z_:]') 9 | LABEL_INVALID_CHARS = re.compile(r'[^a-zA-Z0-9_]') 10 | LABEL_INVALID_START_CHARS = re.compile(r'^[^a-zA-Z_]') 11 | LABEL_START_DOUBLE_UNDER = re.compile(r'^__+') 12 | 13 | 14 | def format_label_key(label_key): 15 | """ 16 | Construct a label key. 17 | 18 | Disallowed characters are replaced with underscores. 19 | """ 20 | label_key = LABEL_INVALID_CHARS.sub('_', label_key) 21 | label_key = LABEL_INVALID_START_CHARS.sub('_', label_key) 22 | label_key = LABEL_START_DOUBLE_UNDER.sub('_', label_key) 23 | return label_key 24 | 25 | 26 | def format_label_value(*values): 27 | """ 28 | Construct a label value. 29 | 30 | If multiple value components are provided, they are joined by underscores. 31 | """ 32 | return '_'.join(values) 33 | 34 | 35 | def format_labels(label_dict): 36 | """ 37 | Formats metric label dictionaries. 38 | 39 | Takes metric labels as a dictionary of label key -> label value. 40 | 41 | Label values can be list of strings. These will be joined together with 42 | underscores. 43 | 44 | Disallowed characters in label keys and values will be replaced with 45 | underscores. 46 | """ 47 | formatted_label_dict = OrderedDict() 48 | for label_key, label_value in label_dict.items(): 49 | formatted_label_key = format_label_key(label_key) 50 | 51 | if isinstance(label_value, str): 52 | formatted_label_value = format_label_value(label_value) 53 | else: 54 | formatted_label_value = format_label_value(*label_value) 55 | 56 | formatted_label_dict[formatted_label_key] = formatted_label_value 57 | 58 | return formatted_label_dict 59 | 60 | 61 | def format_metric_name(*names): 62 | """ 63 | Construct a metric name. 64 | 65 | If multiple name components are provided, they are joined by underscores. 66 | Disallowed characters are replaced with underscores. 67 | """ 68 | metric = '_'.join(names) 69 | metric = METRIC_INVALID_CHARS.sub('_', metric) 70 | metric = METRIC_INVALID_START_CHARS.sub('_', metric) 71 | return metric 72 | 73 | 74 | def group_metrics(metrics): 75 | """ 76 | Groups metrics with the same name but different label values. 77 | 78 | Takes metrics as a list of tuples containing: 79 | * metric name, 80 | * metric documentation, 81 | * dict of label key -> label value, 82 | * metric value. 83 | 84 | The metrics are grouped by metric name. All metrics with the same metric 85 | name must have the same set of label keys. 86 | 87 | A dict keyed by metric name is returned. Each metric name maps to a tuple 88 | containing: 89 | * metric documentation 90 | * label keys tuple, 91 | * dict of label values tuple -> metric value. 92 | """ 93 | 94 | metric_dict = {} 95 | for (metric_name, metric_doc, label_dict, value) in metrics: 96 | curr_label_keys = tuple(label_dict.keys()) 97 | 98 | if metric_name in metric_dict: 99 | label_keys = metric_dict[metric_name][1] 100 | assert set(curr_label_keys) == set(label_keys), \ 101 | 'Not all values for metric {} have the same keys. {} vs. {}.'.format( 102 | metric_name, curr_label_keys, label_keys) 103 | else: 104 | label_keys = curr_label_keys 105 | metric_dict[metric_name] = (metric_doc, label_keys, {}) 106 | 107 | label_values = tuple([label_dict[k] for k in label_keys]) 108 | 109 | metric_dict[metric_name][2][label_values] = value 110 | 111 | return metric_dict 112 | 113 | 114 | def merge_value_dicts(old_value_dict, new_value_dict, zero_missing=False): 115 | """ 116 | Merge an old and new value dict together, returning the merged value dict. 117 | 118 | Value dicts map from label values tuple -> metric value. 119 | 120 | Values from the new value dict have precidence. If any label values tuples 121 | from the old value dict are not present in the new value dict and 122 | zero_missing is set, their values are reset to zero. 123 | """ 124 | value_dict = new_value_dict.copy() 125 | value_dict.update({ 126 | label_values: 0 if zero_missing else old_value 127 | for label_values, old_value 128 | in old_value_dict.items() 129 | if label_values not in new_value_dict 130 | }) 131 | return value_dict 132 | 133 | 134 | def merge_metric_dicts(old_metric_dict, new_metric_dict, zero_missing=False): 135 | """ 136 | Merge an old and new metric dict together, returning the merged metric dict. 137 | 138 | Metric dicts are keyed by metric name. Each metric name maps to a tuple 139 | containing: 140 | * metric documentation 141 | * label keys tuple, 142 | * dict of label values tuple -> metric value. 143 | 144 | Values from the new metric dict have precidence. If any metric names from 145 | the old metric dict are not present in the new metric dict and zero_missing 146 | is set, their values are reset to zero. 147 | 148 | Merging (and missing value zeroing, if set) is performed on the value dicts 149 | for each metric, not just on the top level metrics themselves. 150 | """ 151 | metric_dict = new_metric_dict.copy() 152 | metric_dict.update({ 153 | metric_name: ( 154 | metric_doc, 155 | label_keys, 156 | merge_value_dicts( 157 | old_value_dict, 158 | new_value_dict=new_metric_dict[metric_name][2] 159 | if metric_name in new_metric_dict else {}, 160 | zero_missing=zero_missing 161 | ) 162 | ) 163 | for metric_name, (metric_doc, label_keys, old_value_dict) 164 | in old_metric_dict.items() 165 | }) 166 | return metric_dict 167 | 168 | 169 | def gauge_generator(metric_dict): 170 | """ 171 | Generates GaugeMetricFamily instances for a list of metrics. 172 | 173 | Takes metrics as a dict keyed by metric name. Each metric name maps to a 174 | tuple containing: 175 | * metric documentation 176 | * label keys tuple, 177 | * dict of label values tuple -> metric value. 178 | 179 | Yields a GaugeMetricFamily instance for each unique metric name, containing 180 | children for the various label combinations. Suitable for use in a collect() 181 | method of a Prometheus collector. 182 | """ 183 | 184 | for metric_name, (metric_doc, label_keys, value_dict) in metric_dict.items(): 185 | # If we have label keys we may have multiple different values, 186 | # each with their own label values. 187 | if label_keys: 188 | gauge = GaugeMetricFamily(metric_name, metric_doc, labels=label_keys) 189 | 190 | for label_values in sorted(value_dict.keys()): 191 | value = value_dict[label_values] 192 | gauge.add_metric(label_values, value) 193 | 194 | # No label keys, so we must have only a single value. 195 | else: 196 | gauge = GaugeMetricFamily(metric_name, metric_doc, value=list(value_dict.values())[0]) 197 | 198 | yield gauge 199 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/parser.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from numbers import Number 3 | 4 | from .metrics import format_metric_name, format_labels 5 | 6 | 7 | def parse_response(query_name, db_name, value_columns, response): 8 | """ 9 | Parse a SQL query response into a list of metric tuples. 10 | 11 | Each value column in each row of the response results in a metric, so long 12 | it is numeric. Other columns are converted to labels. The db name is also 13 | included in the labels, as 'db'. 14 | 15 | Metric tuples contain: 16 | * metric name, 17 | * metric documentation, 18 | * dict of label key -> label value, 19 | * metric value. 20 | """ 21 | result = [] 22 | 23 | for row in response: 24 | # NOTE: This db label isn't strictly necessary, since a single query can 25 | # only be run on a single database. It's retained for backwards 26 | # compatibility with previous versions that allowed queries to be 27 | # run on multiple databases. 28 | labels = OrderedDict({'db': db_name}) 29 | labels.update((column, str(row[column])) 30 | for column in row 31 | if column not in value_columns) 32 | 33 | for value_column in value_columns: 34 | value = row[value_column] 35 | if isinstance(value, Number): 36 | result.append(( 37 | format_metric_name(query_name, value_column), 38 | "Value column '{}' for query '{}'.".format(value_column, query_name), 39 | format_labels(labels), 40 | value, 41 | )) 42 | 43 | return result 44 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/scheduler.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | 4 | from croniter import croniter 5 | from datetime import datetime, timezone 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | 10 | def schedule_job(scheduler, interval, cron, cron_tz, func, *args, **kwargs): 11 | """ 12 | Schedule a function to be run at a fixed interval, or based on a 13 | cron expression. Uses the croniter module for cron handling. 14 | 15 | Works with schedulers from the stdlib sched module. 16 | """ 17 | 18 | def scheduled_run(scheduled_time, *args, **kwargs): 19 | try: 20 | func(*args, **kwargs) 21 | except Exception: 22 | log.exception('Error while running scheduled job.') 23 | 24 | current_time = time.monotonic() 25 | if cron: 26 | delay = calc_cron_delay(cron, cron_tz) 27 | # Assume the current_dt used by calc_cron_delay() represents the 28 | # same instant as current_time. Should be approximately true. 29 | next_scheduled_time = current_time + delay 30 | log.debug('Next cron based run in %(delay_s).2fs.', 31 | {'delay_s': delay}) 32 | else: 33 | next_scheduled_time = scheduled_time + interval 34 | while next_scheduled_time < current_time: 35 | next_scheduled_time += interval 36 | log.debug('Next interval based run in %(delay_s).2fs.', 37 | {'delay_s': next_scheduled_time - current_time}) 38 | 39 | scheduler.enterabs(time=next_scheduled_time, 40 | priority=1, 41 | action=scheduled_run, 42 | argument=(next_scheduled_time, *args), 43 | kwargs=kwargs) 44 | 45 | next_scheduled_time = time.monotonic() 46 | scheduler.enterabs(time=next_scheduled_time, 47 | priority=1, 48 | action=scheduled_run, 49 | argument=(next_scheduled_time, *args), 50 | kwargs=kwargs) 51 | 52 | 53 | def calc_cron_delay(cron, cron_tz): 54 | """ 55 | Return seconds until the next cron run time by parsing a cron 56 | expression. Uses the croniter module for cron handling. 57 | """ 58 | 59 | current_dt = datetime.now(timezone.utc) 60 | if cron_tz: 61 | current_dt = current_dt.astimezone(cron_tz) 62 | 63 | next_dt = croniter(cron, current_dt).get_next(datetime) 64 | 65 | delay = (next_dt - current_dt).total_seconds() 66 | assert delay > 0, 'Cron delay should be positive.' 67 | 68 | return delay 69 | -------------------------------------------------------------------------------- /prometheus_mysql_exporter/utils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | import signal 4 | import sys 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | def log_exceptions(exit_on_exception=False): 10 | """ 11 | Logs any exceptions raised. 12 | 13 | By default, exceptions are then re-raised. If set to exit on exception, 14 | sys.exit(1) is called instead. 15 | """ 16 | 17 | def decorator(func): 18 | 19 | @functools.wraps(func) 20 | def wrapper(*args, **kwargs): 21 | try: 22 | return func(*args, **kwargs) 23 | except Exception: 24 | if exit_on_exception: 25 | log.exception('Unrecoverable exception encountered. Exiting.') 26 | sys.exit(1) 27 | else: 28 | log.exception('Exception encountered.') 29 | raise 30 | 31 | return wrapper 32 | 33 | return decorator 34 | 35 | 36 | def nice_shutdown(shutdown_signals=(signal.SIGINT, signal.SIGTERM)): 37 | """ 38 | Logs shutdown signals nicely. 39 | 40 | Installs handlers for the shutdown signals (SIGINT and SIGTERM by default) 41 | that log the signal that has been received, and then raise SystemExit. 42 | The original handlers are restored before returning. 43 | """ 44 | 45 | def sig_handler(signum, _): 46 | log.info('Received signal %(signal)s.', 47 | {'signal': signal.Signals(signum).name}) 48 | # Raise SystemExit to bypass (most) try/except blocks. 49 | sys.exit() 50 | 51 | def decorator(func): 52 | 53 | @functools.wraps(func) 54 | def wrapper(*args, **kwargs): 55 | # Setup new shutdown handlers, storing the old ones for later. 56 | old_handlers = {} 57 | for sig in shutdown_signals: 58 | old_handlers[sig] = signal.signal(sig, sig_handler) 59 | 60 | try: 61 | return func(*args, **kwargs) 62 | 63 | finally: 64 | # Restore the old handlers 65 | for sig, old_handler in old_handlers.items(): 66 | signal.signal(sig, old_handler) 67 | 68 | return wrapper 69 | 70 | return decorator 71 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from os import path 3 | 4 | 5 | this_directory = path.abspath(path.dirname(__file__)) 6 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 7 | long_description = f.read() 8 | 9 | setup( 10 | name='prometheus-mysql-exporter', 11 | version='0.5.0', 12 | description='MySQL query Prometheus exporter', 13 | long_description=long_description, 14 | long_description_content_type='text/markdown', 15 | url='https://github.com/braedon/prometheus-mysql-exporter', 16 | author='Braedon Vickers', 17 | author_email='braedon.vickers@gmail.com', 18 | license='MIT', 19 | classifiers=[ 20 | 'Development Status :: 4 - Beta', 21 | 'Intended Audience :: Developers', 22 | 'Intended Audience :: System Administrators', 23 | 'Topic :: System :: Monitoring', 24 | 'License :: OSI Approved :: MIT License', 25 | 'Programming Language :: Python :: 3', 26 | 'Programming Language :: Python :: 3.5', 27 | 'Programming Language :: Python :: 3.6', 28 | 'Programming Language :: Python :: 3.7', 29 | 'Programming Language :: Python :: 3.8', 30 | ], 31 | keywords='monitoring prometheus exporter mysql', 32 | packages=find_packages(exclude=['tests']), 33 | python_requires='>=3.5', 34 | install_requires=[ 35 | 'click', 36 | 'click-config-file', 37 | 'croniter', 38 | 'DBUtils ~= 2.0', 39 | 'jog', 40 | 'PyMySQL', 41 | 'prometheus-client >= 0.6.0', 42 | 'pytz', 43 | ], 44 | entry_points={ 45 | 'console_scripts': [ 46 | 'prometheus-mysql-exporter=prometheus_mysql_exporter:main', 47 | ], 48 | }, 49 | ) 50 | --------------------------------------------------------------------------------