├── .dockerignore ├── .github └── workflows │ └── publish.yml ├── .gitignore ├── .npmrc ├── .prettierignore ├── .prettierrc ├── Dockerfile ├── LICENSE ├── README.md ├── package-lock.json ├── package.json ├── src ├── index.js ├── metrics-docs.js ├── metrics.js └── utils.js └── test └── e2e.test.js /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | node_modules 3 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Build and Publish 2 | on: 3 | release: 4 | types: [published] 5 | jobs: 6 | docker: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | - name: Print tag number 11 | run: echo Publishing to tag ${{ github.event.release.tag_name }} 12 | - name: Set up QEMU 13 | uses: docker/setup-qemu-action@v1 14 | - name: Set up Docker Buildx 15 | uses: docker/setup-buildx-action@v1 16 | - name: Login to DockerHub 17 | uses: docker/login-action@v1 18 | with: 19 | username: ${{ secrets.DOCKERHUB_USERNAME }} 20 | password: ${{ secrets.DOCKERHUB_TOKEN }} 21 | - name: Build and push (release tag) 22 | uses: docker/build-push-action@v2 23 | with: 24 | push: true 25 | tags: awaragi/prometheus-mssql-exporter:${{ github.event.release.tag_name }} 26 | - name: Build and push (latest) 27 | uses: docker/build-push-action@v2 28 | with: 29 | push: true 30 | tags: awaragi/prometheus-mssql-exporter:latest 31 | - name: Update repo description 32 | uses: peter-evans/dockerhub-description@v2 33 | with: 34 | username: ${{ secrets.DOCKERHUB_USERNAME }} 35 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 36 | repository: awaragi/prometheus-mssql-exporter 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.iml 3 | .idea 4 | .vscode 5 | .env 6 | yarn.lock 7 | yarn-error.log 8 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | registry=https://registry.npmjs.org -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules/* 2 | package-lock.json 3 | yarn.lock 4 | 5 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 160 3 | } 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16.14.2-alpine 2 | LABEL MAINTAINER="Pierre Awaragi (pierre@awaragi.com), cobolbaby" 3 | LABEL org.opencontainers.image.authors="Pierre Awaragi (pierre@awaragi.com), cobolbaby" 4 | 5 | # Create a directory where our app will be placed 6 | RUN mkdir -p /usr/src/app 7 | 8 | # Change directory so that our commands run inside this new directory 9 | WORKDIR /usr/src/app 10 | 11 | # Copy application 12 | COPY package.json package-lock.json src/*.js ./ 13 | 14 | # Install dependencies 15 | RUN npm ci --production 16 | 17 | # Expose the port the app runs in 18 | EXPOSE 4000 19 | 20 | # Serve the app 21 | CMD ["node", "index.js"] 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Pierre Awaragi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Prometheus MSSQL Exporter Docker Container 2 | 3 | Prometheus exporter for Microsoft SQL Server (MSSQL). Exposes the following metrics 4 | 5 | - mssql_up UP Status 6 | - mssql_product_version Instance version (Major.Minor) 7 | - mssql_instance_local_time Number of seconds since epoch on local instance 8 | - mssql_connections{database,state} Number of active connections 9 | - mssql_client_connections{client,database} Number of active client connections 10 | - mssql_deadlocks Number of lock requests per second that resulted in a deadlock since last restart 11 | - mssql_user_errors Number of user errors/sec since last restart 12 | - mssql_kill_connection_errors Number of kill connection errors/sec since last restart 13 | - mssql_database_state{database} Databases states: 0=ONLINE 1=RESTORING 2=RECOVERING 3=RECOVERY_PENDING 4=SUSPECT 5=EMERGENCY 6=OFFLINE 7=COPYING 10=OFFLINE_SECONDARY 14 | - mssql_log_growths{database} Total number of times the transaction log for the database has been expanded last restart 15 | - mssql_database_filesize{database,logicalname,type,filename} Physical sizes of files used by database in KB, their names and types (0=rows, 1=log, 2=filestream,3=n/a 4=fulltext(before v2008 of MSSQL)) 16 | - mssql_page_read_total Page reads/sec 17 | - mssql_page_write_total Page writes/sec 18 | - mssql_page_life_expectancy Indicates the minimum number of seconds a page will stay in the buffer pool on this node without references. The traditional advice from Microsoft used to be that the PLE should remain above 300 seconds 19 | - mssql_lazy_write_total Lazy writes/sec 20 | - mssql_page_checkpoint_total Checkpoint pages/sec 21 | - mssql_io_stall{database,type} Wait time (ms) of stall since last restart 22 | - mssql_io_stall_total{database} Wait time (ms) of stall since last restart 23 | - mssql_batch_requests Number of Transact-SQL command batches received per second. This statistic is affected by all constraints (such as I/O, number of users, cachesize, complexity of requests, and so on). High batch requests mean good throughput 24 | - mssql_transactions{database} Number of transactions started for the database per second. Transactions/sec does not count XTP-only transactions (transactions started by a natively compiled stored procedure.) 25 | - mssql_page_fault_count Number of page faults since last restart 26 | - mssql_memory_utilization_percentage Percentage of memory utilization 27 | - mssql_total_physical_memory_kb Total physical memory in KB 28 | - mssql_available_physical_memory_kb Available physical memory in KB 29 | - mssql_total_page_file_kb Total page file in KB 30 | - mssql_available_page_file_kb Available page file in KB 31 | 32 | Please feel free to submit other interesting metrics to include. 33 | 34 | > This exporter has been tested against MSSQL 2017 and 2019 docker images (only ones offered by Microsoft). Other versions might be work but have not been tested. 35 | 36 | ## Usage 37 | 38 | `docker run -e SERVER=192.168.56.101 -e USERNAME=SA -e PASSWORD=qkD4x3yy -e DEBUG=app -p 4000:4000 --name prometheus-mssql-exporter awaragi/prometheus-mssql-exporter` 39 | 40 | The image supports the following environments and exposes port 4000 41 | 42 | - **SERVER** server ip or dns name (required) 43 | - **PORT** server port (optional defaults to 1433) 44 | - **USERNAME** access user (required) 45 | - **PASSWORD** access password (required) 46 | - **ENCRYPT** force [encrypt](https://docs.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.encrypt?view=dotnet-plat-ext-6.0) setting (optional defaults to true) 47 | - **TRUST_SERVER_CERTIFICATE** sets [trustServerCertificate](https://docs.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.trustservercertificate?view=dotnet-plat-ext-6.0) setting (optional defaults to true) 48 | - **DEBUG** comma delimited list of enabled logs (optional currently supports app and metrics) 49 | 50 | It is **_required_** that the specified user has the following permissions 51 | 52 | - GRANT VIEW ANY DEFINITION TO 53 | - GRANT VIEW SERVER STATE TO 54 | 55 | ## Frequently Asked Questions (FAQ) 56 | 57 | ### Unable to connect to database 58 | 59 | Raised in [issue #19](https://github.com/awaragi/prometheus-mssql-exporter/issues/19) 60 | 61 | Probably your SQL Server is working as named instance. For named instances the TCP port is dynamically configured by default, so you may need do explicitly specify port in MSSQL settings as described [here](https://docs.microsoft.com/en-US/sql/database-engine/configure-windows/configure-a-server-to-listen-on-a-specific-tcp-port?view=sql-server-ver15). 62 | 63 | ### Running multiple instances of exporter 64 | 65 | Raised in [issue #20](https://github.com/awaragi/prometheus-mssql-exporter/issues/20) 66 | 67 | Each container should use its own docker port forward (e.g. -p 4001:4000 and -p 4002:4000) 68 | 69 | ### What Grafana dashboard can I use 70 | 71 | Here are some suggestions on available Grafana dashboards. If you are an author or such dashboard and want to have it referenced here, simply create a Pull Request. 72 | 73 | - https://grafana.com/grafana/dashboards/13919 74 | 75 | ### Running in the background 76 | 77 | Use `docker run -d ...` to run the container in background 78 | 79 | ## Development 80 | 81 | ## Launching a test mssql server 82 | 83 | To launch a local mssql instance to test against 84 | 85 | ```shell 86 | npm run test:mssql:2019 87 | # or 88 | npm run test:mssql:2017 89 | ``` 90 | 91 | To use a persistent storage add `-v /:/var/opt/mssql/data` to the command line. 92 | 93 | ## List all available metrics 94 | 95 | To list all available metrics and the used queries to generate these metrics - say for for DBA validation, use the following command 96 | 97 | ```shell 98 | npm run metrics 99 | ``` 100 | 101 | ## Environment variables 102 | 103 | - SERVER: sqlserver 104 | - PORT: sql server port (optional defaults to 1433) 105 | - USERNAME: sql server user (should have admin or user with required permissions) 106 | - PASSWORD: sql user password 107 | - ENCRYPT: force [encrypt](https://docs.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.encrypt?view=dotnet-plat-ext-6.0) setting (optional defaults to true) 108 | - TRUST_SERVER_CERTIFICATE: sets [trustServerCertificate](https://docs.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.trustservercertificate?view=dotnet-plat-ext-6.0) setting (optional defaults to true) 109 | - EXPOSE: webserver port (defaults to 4000) 110 | - DEBUG: verbose logging 111 | - app for application logging 112 | - metrics for metrics executions logging 113 | - db for database connection logging 114 | - queries for database queries and results logging 115 | 116 | ## Launch via command line 117 | 118 | ### Using NodeJS 119 | 120 | To execute and the application using locally running mssql (see above for how to launch a docker instance of mssql), 121 | use the following command which will generate all a detailed logs 122 | 123 | ```shell 124 | npm start 125 | ``` 126 | 127 | A more verbose execution with all queries and their results printed out 128 | 129 | ```shell 130 | npm run start:verbose 131 | ``` 132 | 133 | ### Using Docker 134 | 135 | To build and launch your docker image use the following command 136 | 137 | ```shell 138 | npm run docker:run 139 | ``` 140 | 141 | ## Testing 142 | 143 | ### Curl or Browser 144 | 145 | Use curl or wget to fetch the metrics from launched web application. 146 | 147 | ```shell 148 | curl http://localhost:4000/metrics 149 | ``` 150 | 151 | ### E2E Test with Expectations 152 | 153 | E2E test is available to execute against MSSQL 2017 or 2019 docker instances. 154 | 155 | The test does not care about the values of the metrics but checks the presence of all expected keys. 156 | 157 | To add new metrics, the E2E must get updated with their keys to pass. 158 | 159 | ```shell 160 | npm test 161 | ``` 162 | 163 | ## building and pushing image to dockerhub 164 | 165 | Use `docker build` and `docker push` or the bundled Github Workflows/Actions (see .github/workflows) 166 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prometheus-mssql-exporter", 3 | "version": "1.3.0", 4 | "main": "src/index.js", 5 | "private": false, 6 | "keywords": [ 7 | "prometheus", 8 | "mssql", 9 | "exporter", 10 | "metrics" 11 | ], 12 | "author": { 13 | "name": "Pierre Awaragi", 14 | "email": "pierre@awaragi.com" 15 | }, 16 | "license": "MIT", 17 | "readmeFilename": "README.md", 18 | "repository": { 19 | "type": "git", 20 | "url": "https://github.com/awaragi/prometheus-mssql-exporter.git" 21 | }, 22 | "bugs": { 23 | "url": "https://github.com/awaragi/prometheus-mssql-exporter/issues" 24 | }, 25 | "scripts": { 26 | "docker:build": "docker build . -t awaragi/prometheus-mssql-exporter", 27 | "docker:run": "export DOCKERID=$(docker build -q .) && docker run --name prometheus-mssql-exporter --rm -it -p 4000:4000 -e SERVER=$(docker inspect mssql | jq -r '.[].NetworkSettings.Networks.bridge.IPAddress') -e USERNAME=SA -e PASSWORD=qkD4x3yy -e DEBUG=app,db,metrics $DOCKERID ; docker image rm $DOCKERID", 28 | "docker:run:published": "export DOCKERID=awaragi/prometheus-mssql-exporter && docker run --name prometheus-mssql-exporter --rm -it -p 4000:4000 -e SERVER=$(docker inspect mssql | jq -r '.[].NetworkSettings.Networks.bridge.IPAddress') -e USERNAME=SA -e PASSWORD=qkD4x3yy -e DEBUG=app,db,metrics $DOCKERID ; docker image rm $DOCKERID", 29 | "start": "DEBUG=app,db,metrics SERVER=localhost USERNAME=SA PASSWORD=qkD4x3yy node src/index.js", 30 | "start:verbose": "DEBUG=app,db,metrics,queries SERVER=localhost USERNAME=SA PASSWORD=qkD4x3yy node src/index.js", 31 | "test:mssql:2019": "docker run --name mssql --rm -e ACCEPT_EULA=Y -e SA_PASSWORD=qkD4x3yy -p 1433:1433 --name mssql mcr.microsoft.com/mssql/server:2019-latest", 32 | "test:mssql:2017": "docker run --name mssql --rm -e ACCEPT_EULA=Y -e SA_PASSWORD=qkD4x3yy -p 1433:1433 --name mssql mcr.microsoft.com/mssql/server:2017-latest", 33 | "test:fetch": "curl http://localhost:4000/metrics", 34 | "test": "jest test", 35 | "format": "prettier --write .", 36 | "metrics": "node src/metrics-docs.js" 37 | }, 38 | "dependencies": { 39 | "debug": "4.3.4", 40 | "express": "4.17.3", 41 | "prom-client": "9.1.1", 42 | "tedious": "14.3.0" 43 | }, 44 | "devDependencies": { 45 | "jest": "27.5.1", 46 | "prettier": "2.6.0", 47 | "superagent": "7.1.1" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | const appLog = require("debug")("app"); 2 | const dbLog = require("debug")("db"); 3 | const queriesLog = require("debug")("queries"); 4 | 5 | const Connection = require("tedious").Connection; 6 | const Request = require("tedious").Request; 7 | const app = require("express")(); 8 | const client = require("prom-client"); 9 | 10 | const { entries } = require("./metrics"); 11 | 12 | let config = { 13 | connect: { 14 | server: process.env["SERVER"], 15 | authentication: { 16 | type: "default", 17 | options: { 18 | userName: process.env["USERNAME"], 19 | password: process.env["PASSWORD"], 20 | }, 21 | }, 22 | options: { 23 | port: parseInt(process.env["PORT"]) || 1433, 24 | encrypt: process.env["ENCRYPT"] !== undefined ? process.env["ENCRYPT"] === "true" : true, 25 | trustServerCertificate: process.env["TRUST_SERVER_CERTIFICATE"] !== undefined ? process.env["TRUST_SERVER_CERTIFICATE"] === "true" : true, 26 | rowCollectionOnRequestCompletion: true, 27 | }, 28 | }, 29 | port: parseInt(process.env["EXPOSE"]) || 4000, 30 | }; 31 | 32 | if (!config.connect.server) { 33 | throw new Error("Missing SERVER information"); 34 | } 35 | if (!config.connect.authentication.options.userName) { 36 | throw new Error("Missing USERNAME information"); 37 | } 38 | if (!config.connect.authentication.options.password) { 39 | throw new Error("Missing PASSWORD information"); 40 | } 41 | 42 | /** 43 | * Connects to a database server. 44 | * 45 | * @returns Promise 46 | */ 47 | async function connect() { 48 | return new Promise((resolve, reject) => { 49 | dbLog( 50 | "Connecting to", 51 | config.connect.authentication.options.userName + "@" + config.connect.server + ":" + config.connect.options.port, 52 | "encrypt:", 53 | config.connect.options.encrypt, 54 | "trustServerCertificate:", 55 | config.connect.options.trustServerCertificate 56 | ); 57 | 58 | let connection = new Connection(config.connect); 59 | connection.on("connect", (error) => { 60 | if (error) { 61 | console.error("Failed to connect to database:", error.message || error); 62 | reject(error); 63 | } else { 64 | dbLog("Connected to database"); 65 | resolve(connection); 66 | } 67 | }); 68 | connection.on("error", (error) => { 69 | console.error("Error while connected to database:", error.message || error); 70 | reject(error); 71 | }); 72 | connection.on("end", () => { 73 | dbLog("Connection to database ended"); 74 | }); 75 | connection.connect(); 76 | }); 77 | } 78 | 79 | /** 80 | * Recursive function that executes all collectors sequentially 81 | * 82 | * @param connection database connection 83 | * @param collector single metric: {query: string, collect: function(rows, metric)} 84 | * @param name name of collector variable 85 | * 86 | * @returns Promise of collect operation (no value returned) 87 | */ 88 | async function measure(connection, collector, name) { 89 | return new Promise((resolve) => { 90 | queriesLog(`Executing metric ${name} query: ${collector.query}`); 91 | let request = new Request(collector.query, (error, rowCount, rows) => { 92 | if (!error) { 93 | queriesLog(`Retrieved metric ${name} rows (${rows.length}): ${JSON.stringify(rows, null, 2)}`); 94 | 95 | if (rows.length > 0) { 96 | try { 97 | collector.collect(rows, collector.metrics); 98 | } catch (error) { 99 | console.error(`Error processing metric ${name} data`, collector.query, JSON.stringify(rows), error); 100 | } 101 | } else { 102 | console.error(`Query for metric ${name} returned 0 rows to process`, collector.query); 103 | } 104 | resolve(); 105 | } else { 106 | console.error(`Error executing metric ${name} SQL query`, collector.query, error); 107 | resolve(); 108 | } 109 | }); 110 | connection.execSql(request); 111 | }); 112 | } 113 | 114 | /** 115 | * Function that collects from an active server. 116 | * 117 | * @param connection database connection 118 | * 119 | * @returns Promise of execution (no value returned) 120 | */ 121 | async function collect(connection) { 122 | for (const [metricName, metric] of Object.entries(entries)) { 123 | await measure(connection, metric, metricName); 124 | } 125 | } 126 | 127 | app.get("/", (req, res) => { 128 | res.redirect("/metrics"); 129 | }); 130 | 131 | app.get("/metrics", async (req, res) => { 132 | res.contentType(client.register.contentType); 133 | 134 | try { 135 | appLog("Received /metrics request"); 136 | let connection = await connect(); 137 | await collect(connection); 138 | connection.close(); 139 | res.send(client.register.metrics()); 140 | appLog("Successfully processed /metrics request"); 141 | } catch (error) { 142 | // error connecting 143 | appLog("Error handling /metrics request"); 144 | const mssqlUp = entries.mssql_up.metrics.mssql_up; 145 | mssqlUp.set(0); 146 | res.header("X-Error", error.message || error); 147 | res.send(client.register.getSingleMetricAsString(mssqlUp.name)); 148 | } 149 | }); 150 | 151 | const server = app.listen(config.port, function () { 152 | appLog( 153 | `Prometheus-MSSQL Exporter listening on local port ${config.port} monitoring ${config.connect.authentication.options.userName}@${config.connect.server}:${config.connect.options.port}` 154 | ); 155 | }); 156 | 157 | process.on("SIGINT", function () { 158 | server.close(); 159 | process.exit(0); 160 | }); 161 | -------------------------------------------------------------------------------- /src/metrics-docs.js: -------------------------------------------------------------------------------- 1 | const { entries } = require("./metrics"); 2 | 3 | // DOCUMENTATION of queries and their associated metrics (targeted to DBAs) 4 | Object.entries(entries).forEach(([entryName, entry]) => { 5 | console.log("--[", entryName, "]"); 6 | for (let key in entry.metrics) { 7 | if (entry.metrics.hasOwnProperty(key)) { 8 | console.log("--", entry.metrics[key].name, entry.metrics[key].help); 9 | } 10 | } 11 | console.log(entry.query + ";"); 12 | console.log(""); 13 | }); 14 | 15 | console.log("/*"); 16 | Object.values(entries).forEach((entry) => { 17 | for (let key in entry.metrics) { 18 | if (entry.metrics.hasOwnProperty(key)) { 19 | console.log( 20 | "-", 21 | entry.metrics[key].name + (entry.metrics[key].labelNames.length > 0 ? "{" + entry.metrics[key].labelNames + "}" : ""), 22 | entry.metrics[key].help 23 | ); 24 | } 25 | } 26 | }); 27 | console.log("*/"); 28 | -------------------------------------------------------------------------------- /src/metrics.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Collection of metrics and their associated SQL requests 3 | * Created by Pierre Awaragi 4 | */ 5 | const metricsLog = require("debug")("metrics"); 6 | const client = require("prom-client"); 7 | const { productVersionParse } = require("./utils"); 8 | 9 | const mssql_up = { 10 | metrics: { 11 | mssql_up: new client.Gauge({ name: "mssql_up", help: "UP Status" }), 12 | }, 13 | query: "SELECT 1", 14 | collect: (rows, metrics) => { 15 | let mssql_up = rows[0][0].value; 16 | metricsLog("Fetched status of instance", mssql_up); 17 | metrics.mssql_up.set(mssql_up); 18 | }, 19 | }; 20 | 21 | const mssql_product_version = { 22 | metrics: { 23 | mssql_product_version: new client.Gauge({ name: "mssql_product_version", help: "Instance version (Major.Minor)" }), 24 | }, 25 | query: `SELECT CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion')) AS ProductVersion, 26 | SERVERPROPERTY('ProductVersion') AS ProductVersion 27 | `, 28 | collect: (rows, metrics) => { 29 | let v = productVersionParse(rows[0][0].value); 30 | const mssql_product_version = v.major + "." + v.minor; 31 | metricsLog("Fetched version of instance", mssql_product_version); 32 | metrics.mssql_product_version.set(mssql_product_version); 33 | }, 34 | }; 35 | 36 | const mssql_instance_local_time = { 37 | metrics: { 38 | mssql_instance_local_time: new client.Gauge({ name: "mssql_instance_local_time", help: "Number of seconds since epoch on local instance" }), 39 | }, 40 | query: `SELECT DATEDIFF(second, '19700101', GETUTCDATE())`, 41 | collect: (rows, metrics) => { 42 | const mssql_instance_local_time = rows[0][0].value; 43 | metricsLog("Fetched current time", mssql_instance_local_time); 44 | metrics.mssql_instance_local_time.set(mssql_instance_local_time); 45 | }, 46 | }; 47 | 48 | const mssql_connections = { 49 | metrics: { 50 | mssql_connections: new client.Gauge({ name: "mssql_connections", help: "Number of active connections", labelNames: ["database", "state"] }), 51 | }, 52 | query: `SELECT DB_NAME(sP.dbid) 53 | , COUNT(sP.spid) 54 | FROM sys.sysprocesses sP 55 | GROUP BY DB_NAME(sP.dbid)`, 56 | collect: (rows, metrics) => { 57 | for (let i = 0; i < rows.length; i++) { 58 | const row = rows[i]; 59 | const database = row[0].value; 60 | const mssql_connections = row[1].value; 61 | metricsLog("Fetched number of connections for database", database, mssql_connections); 62 | metrics.mssql_connections.set({ database, state: "current" }, mssql_connections); 63 | } 64 | }, 65 | }; 66 | 67 | const mssql_client_connections = { 68 | metrics: { 69 | mssql_client_connections: new client.Gauge({ 70 | name: "mssql_client_connections", 71 | help: "Number of active client connections", 72 | labelNames: ["client", "database"], 73 | }), 74 | }, 75 | query: `SELECT host_name, DB_NAME(dbid) dbname, COUNT(*) session_count 76 | FROM sys.dm_exec_sessions a 77 | LEFT JOIN sysprocesses b on a.session_id=b.spid 78 | WHERE is_user_process=1 79 | GROUP BY host_name, dbid`, 80 | collect: (rows, metrics) => { 81 | for (let i = 0; i < rows.length; i++) { 82 | const row = rows[i]; 83 | const client = row[0].value; 84 | const database = row[1].value; 85 | const mssql_client_connections = row[2].value; 86 | metricsLog("Fetched number of connections for client", client, database, mssql_client_connections); 87 | metrics.mssql_client_connections.set({ client, database }, mssql_client_connections); 88 | } 89 | }, 90 | }; 91 | 92 | const mssql_deadlocks = { 93 | metrics: { 94 | mssql_deadlocks_per_second: new client.Gauge({ 95 | name: "mssql_deadlocks", 96 | help: "Number of lock requests per second that resulted in a deadlock since last restart", 97 | }), 98 | }, 99 | query: `SELECT cntr_value 100 | FROM sys.dm_os_performance_counters 101 | WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total'`, 102 | collect: (rows, metrics) => { 103 | const mssql_deadlocks = rows[0][0].value; 104 | metricsLog("Fetched number of deadlocks/sec", mssql_deadlocks); 105 | metrics.mssql_deadlocks_per_second.set(mssql_deadlocks); 106 | }, 107 | }; 108 | 109 | const mssql_user_errors = { 110 | metrics: { 111 | mssql_user_errors: new client.Gauge({ name: "mssql_user_errors", help: "Number of user errors/sec since last restart" }), 112 | }, 113 | query: `SELECT cntr_value 114 | FROM sys.dm_os_performance_counters 115 | WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors'`, 116 | collect: (rows, metrics) => { 117 | const mssql_user_errors = rows[0][0].value; 118 | metricsLog("Fetched number of user errors/sec", mssql_user_errors); 119 | metrics.mssql_user_errors.set(mssql_user_errors); 120 | }, 121 | }; 122 | 123 | const mssql_kill_connection_errors = { 124 | metrics: { 125 | mssql_kill_connection_errors: new client.Gauge({ name: "mssql_kill_connection_errors", help: "Number of kill connection errors/sec since last restart" }), 126 | }, 127 | query: `SELECT cntr_value 128 | FROM sys.dm_os_performance_counters 129 | WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors'`, 130 | collect: (rows, metrics) => { 131 | const mssql_kill_connection_errors = rows[0][0].value; 132 | metricsLog("Fetched number of kill connection errors/sec", mssql_kill_connection_errors); 133 | metrics.mssql_kill_connection_errors.set(mssql_kill_connection_errors); 134 | }, 135 | }; 136 | 137 | const mssql_database_state = { 138 | metrics: { 139 | mssql_database_state: new client.Gauge({ 140 | name: "mssql_database_state", 141 | help: "Databases states: 0=ONLINE 1=RESTORING 2=RECOVERING 3=RECOVERY_PENDING 4=SUSPECT 5=EMERGENCY 6=OFFLINE 7=COPYING 10=OFFLINE_SECONDARY", 142 | labelNames: ["database"], 143 | }), 144 | }, 145 | query: `SELECT name,state FROM master.sys.databases`, 146 | collect: (rows, metrics) => { 147 | for (let i = 0; i < rows.length; i++) { 148 | const row = rows[i]; 149 | const database = row[0].value; 150 | const mssql_database_state = row[1].value; 151 | metricsLog("Fetched state for database", database, mssql_database_state); 152 | metrics.mssql_database_state.set({ database }, mssql_database_state); 153 | } 154 | }, 155 | }; 156 | 157 | const mssql_log_growths = { 158 | metrics: { 159 | mssql_log_growths: new client.Gauge({ 160 | name: "mssql_log_growths", 161 | help: "Total number of times the transaction log for the database has been expanded last restart", 162 | labelNames: ["database"], 163 | }), 164 | }, 165 | query: `SELECT rtrim(instance_name), cntr_value 166 | FROM sys.dm_os_performance_counters 167 | WHERE counter_name = 'Log Growths' and instance_name <> '_Total'`, 168 | collect: (rows, metrics) => { 169 | for (let i = 0; i < rows.length; i++) { 170 | const row = rows[i]; 171 | const database = row[0].value; 172 | const mssql_log_growths = row[1].value; 173 | metricsLog("Fetched number log growths for database", database, mssql_log_growths); 174 | metrics.mssql_log_growths.set({ database }, mssql_log_growths); 175 | } 176 | }, 177 | }; 178 | 179 | const mssql_database_filesize = { 180 | metrics: { 181 | mssql_database_filesize: new client.Gauge({ 182 | name: "mssql_database_filesize", 183 | help: "Physical sizes of files used by database in KB, their names and types (0=rows, 1=log, 2=filestream,3=n/a 4=fulltext(before v2008 of MSSQL))", 184 | labelNames: ["database", "logicalname", "type", "filename"], 185 | }), 186 | }, 187 | query: `SELECT DB_NAME(database_id) AS database_name, name AS logical_name, type, physical_name, (size * CAST(8 AS BIGINT)) size_kb FROM sys.master_files`, 188 | collect: (rows, metrics) => { 189 | for (let i = 0; i < rows.length; i++) { 190 | const row = rows[i]; 191 | const database = row[0].value; 192 | const logicalname = row[1].value; 193 | const type = row[2].value; 194 | const filename = row[3].value; 195 | const mssql_database_filesize = row[4].value; 196 | metricsLog( 197 | "Fetched size of files for database ", 198 | database, 199 | "logicalname", 200 | logicalname, 201 | "type", 202 | type, 203 | "filename", 204 | filename, 205 | "size", 206 | mssql_database_filesize 207 | ); 208 | metrics.mssql_database_filesize.set({ database, logicalname, type, filename }, mssql_database_filesize); 209 | } 210 | }, 211 | }; 212 | 213 | const mssql_buffer_manager = { 214 | metrics: { 215 | mssql_page_read_total: new client.Gauge({ name: "mssql_page_read_total", help: "Page reads/sec" }), 216 | mssql_page_write_total: new client.Gauge({ name: "mssql_page_write_total", help: "Page writes/sec" }), 217 | mssql_page_life_expectancy: new client.Gauge({ 218 | name: "mssql_page_life_expectancy", 219 | help: "Indicates the minimum number of seconds a page will stay in the buffer pool on this node without references. The traditional advice from Microsoft used to be that the PLE should remain above 300 seconds", 220 | }), 221 | mssql_lazy_write_total: new client.Gauge({ name: "mssql_lazy_write_total", help: "Lazy writes/sec" }), 222 | mssql_page_checkpoint_total: new client.Gauge({ name: "mssql_page_checkpoint_total", help: "Checkpoint pages/sec" }), 223 | }, 224 | query: `SELECT * FROM 225 | ( 226 | SELECT rtrim(counter_name) as counter_name, cntr_value 227 | FROM sys.dm_os_performance_counters 228 | WHERE counter_name in ('Page reads/sec', 'Page writes/sec', 'Page life expectancy', 'Lazy writes/sec', 'Checkpoint pages/sec') 229 | AND object_name = 'SQLServer:Buffer Manager' 230 | ) d 231 | PIVOT 232 | ( 233 | MAX(cntr_value) 234 | FOR counter_name IN ([Page reads/sec], [Page writes/sec], [Page life expectancy], [Lazy writes/sec], [Checkpoint pages/sec]) 235 | ) piv 236 | `, 237 | collect: (rows, metrics) => { 238 | const row = rows[0]; 239 | const page_read = row[0].value; 240 | const page_write = row[1].value; 241 | const page_life_expectancy = row[2].value; 242 | const lazy_write_total = row[3].value; 243 | const page_checkpoint_total = row[4].value; 244 | metricsLog( 245 | "Fetched the Buffer Manager", 246 | "page_read", 247 | page_read, 248 | "page_write", 249 | page_write, 250 | "page_life_expectancy", 251 | page_life_expectancy, 252 | "page_checkpoint_total", 253 | "page_checkpoint_total", 254 | page_checkpoint_total, 255 | "lazy_write_total", 256 | lazy_write_total 257 | ); 258 | metrics.mssql_page_read_total.set(page_read); 259 | metrics.mssql_page_write_total.set(page_write); 260 | metrics.mssql_page_life_expectancy.set(page_life_expectancy); 261 | metrics.mssql_page_checkpoint_total.set(page_checkpoint_total); 262 | metrics.mssql_lazy_write_total.set(lazy_write_total); 263 | }, 264 | }; 265 | 266 | const mssql_io_stall = { 267 | metrics: { 268 | mssql_io_stall: new client.Gauge({ name: "mssql_io_stall", help: "Wait time (ms) of stall since last restart", labelNames: ["database", "type"] }), 269 | mssql_io_stall_total: new client.Gauge({ name: "mssql_io_stall_total", help: "Wait time (ms) of stall since last restart", labelNames: ["database"] }), 270 | }, 271 | query: `SELECT 272 | cast(DB_Name(a.database_id) as varchar) as name, 273 | max(io_stall_read_ms), 274 | max(io_stall_write_ms), 275 | max(io_stall), 276 | max(io_stall_queued_read_ms), 277 | max(io_stall_queued_write_ms) 278 | FROM 279 | sys.dm_io_virtual_file_stats(null, null) a 280 | INNER JOIN sys.master_files b ON a.database_id = b.database_id and a.file_id = b.file_id 281 | GROUP BY a.database_id`, 282 | collect: (rows, metrics) => { 283 | for (let i = 0; i < rows.length; i++) { 284 | const row = rows[i]; 285 | const database = row[0].value; 286 | const read = row[1].value; 287 | const write = row[2].value; 288 | const stall = row[3].value; 289 | const queued_read = row[4].value; 290 | const queued_write = row[5].value; 291 | metricsLog("Fetched number of stalls for database", database, "read", read, "write", write, "queued_read", queued_read, "queued_write", queued_write); 292 | metrics.mssql_io_stall_total.set({ database }, stall); 293 | metrics.mssql_io_stall.set({ database, type: "read" }, read); 294 | metrics.mssql_io_stall.set({ database, type: "write" }, write); 295 | metrics.mssql_io_stall.set({ database, type: "queued_read" }, queued_read); 296 | metrics.mssql_io_stall.set({ database, type: "queued_write" }, queued_write); 297 | } 298 | }, 299 | }; 300 | 301 | const mssql_batch_requests = { 302 | metrics: { 303 | mssql_batch_requests: new client.Gauge({ 304 | name: "mssql_batch_requests", 305 | help: "Number of Transact-SQL command batches received per second. This statistic is affected by all constraints (such as I/O, number of users, cachesize, complexity of requests, and so on). High batch requests mean good throughput", 306 | }), 307 | }, 308 | query: `SELECT TOP 1 cntr_value 309 | FROM sys.dm_os_performance_counters 310 | WHERE counter_name = 'Batch Requests/sec'`, 311 | collect: (rows, metrics) => { 312 | for (let i = 0; i < rows.length; i++) { 313 | const row = rows[i]; 314 | const mssql_batch_requests = row[0].value; 315 | metricsLog("Fetched number of batch requests per second", mssql_batch_requests); 316 | metrics.mssql_batch_requests.set(mssql_batch_requests); 317 | } 318 | }, 319 | }; 320 | 321 | const mssql_transactions = { 322 | metrics: { 323 | mssql_transactions: new client.Gauge({ 324 | name: "mssql_transactions", 325 | help: "Number of transactions started for the database per second. Transactions/sec does not count XTP-only transactions (transactions started by a natively compiled stored procedure.)", 326 | labelNames: ["database"], 327 | }), 328 | }, 329 | query: `SELECT rtrim(instance_name), cntr_value 330 | FROM sys.dm_os_performance_counters 331 | WHERE counter_name = 'Transactions/sec' AND instance_name <> '_Total'`, 332 | collect: (rows, metrics) => { 333 | for (let i = 0; i < rows.length; i++) { 334 | const row = rows[i]; 335 | const database = row[0].value; 336 | const transactions = row[1].value; 337 | metricsLog("Fetched number of transactions per second", database, transactions); 338 | metrics.mssql_transactions.set({ database }, transactions); 339 | } 340 | }, 341 | }; 342 | 343 | const mssql_os_process_memory = { 344 | metrics: { 345 | mssql_page_fault_count: new client.Gauge({ name: "mssql_page_fault_count", help: "Number of page faults since last restart" }), 346 | mssql_memory_utilization_percentage: new client.Gauge({ name: "mssql_memory_utilization_percentage", help: "Percentage of memory utilization" }), 347 | }, 348 | query: `SELECT page_fault_count, memory_utilization_percentage 349 | FROM sys.dm_os_process_memory`, 350 | collect: (rows, metrics) => { 351 | const page_fault_count = rows[0][0].value; 352 | const memory_utilization_percentage = rows[0][1].value; 353 | metricsLog("Fetched page fault count", page_fault_count); 354 | metrics.mssql_page_fault_count.set(page_fault_count); 355 | metrics.mssql_memory_utilization_percentage.set(memory_utilization_percentage); 356 | }, 357 | }; 358 | 359 | const mssql_os_sys_memory = { 360 | metrics: { 361 | mssql_total_physical_memory_kb: new client.Gauge({ name: "mssql_total_physical_memory_kb", help: "Total physical memory in KB" }), 362 | mssql_available_physical_memory_kb: new client.Gauge({ name: "mssql_available_physical_memory_kb", help: "Available physical memory in KB" }), 363 | mssql_total_page_file_kb: new client.Gauge({ name: "mssql_total_page_file_kb", help: "Total page file in KB" }), 364 | mssql_available_page_file_kb: new client.Gauge({ name: "mssql_available_page_file_kb", help: "Available page file in KB" }), 365 | }, 366 | query: `SELECT total_physical_memory_kb, available_physical_memory_kb, total_page_file_kb, available_page_file_kb 367 | FROM sys.dm_os_sys_memory`, 368 | collect: (rows, metrics) => { 369 | const mssql_total_physical_memory_kb = rows[0][0].value; 370 | const mssql_available_physical_memory_kb = rows[0][1].value; 371 | const mssql_total_page_file_kb = rows[0][2].value; 372 | const mssql_available_page_file_kb = rows[0][3].value; 373 | metricsLog( 374 | "Fetched system memory information", 375 | "Total physical memory", 376 | mssql_total_physical_memory_kb, 377 | "Available physical memory", 378 | mssql_available_physical_memory_kb, 379 | "Total page file", 380 | mssql_total_page_file_kb, 381 | "Available page file", 382 | mssql_available_page_file_kb 383 | ); 384 | metrics.mssql_total_physical_memory_kb.set(mssql_total_physical_memory_kb); 385 | metrics.mssql_available_physical_memory_kb.set(mssql_available_physical_memory_kb); 386 | metrics.mssql_total_page_file_kb.set(mssql_total_page_file_kb); 387 | metrics.mssql_available_page_file_kb.set(mssql_available_page_file_kb); 388 | }, 389 | }; 390 | 391 | const entries = { 392 | mssql_up, 393 | mssql_product_version, 394 | mssql_instance_local_time, 395 | mssql_connections, 396 | mssql_client_connections, 397 | mssql_deadlocks, 398 | mssql_user_errors, 399 | mssql_kill_connection_errors, 400 | mssql_database_state, 401 | mssql_log_growths, 402 | mssql_database_filesize, 403 | mssql_buffer_manager, 404 | mssql_io_stall, 405 | mssql_batch_requests, 406 | mssql_transactions, 407 | mssql_os_process_memory, 408 | mssql_os_sys_memory, 409 | }; 410 | 411 | module.exports = { 412 | entries, 413 | }; 414 | -------------------------------------------------------------------------------- /src/utils.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Utility functions 3 | */ 4 | const productVersionParse = (version) => { 5 | const pattern = /^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)$/; 6 | const match = version.match(pattern); 7 | if (!match) throw new Error("Invalid product version " + version); 8 | return { 9 | major: Number(match[1]), 10 | minor: Number(match[2]), 11 | patch: Number(match[3]), 12 | build: Number(match[4]), 13 | }; 14 | }; 15 | 16 | module.exports = { 17 | productVersionParse, 18 | }; 19 | -------------------------------------------------------------------------------- /test/e2e.test.js: -------------------------------------------------------------------------------- 1 | const request = require("superagent"); 2 | 3 | function parse(text) { 4 | let lines = text.split("\n"); 5 | lines = lines.filter((line) => !line.startsWith("#")).filter((line) => line.length !== 0); 6 | const o = {}; 7 | lines.forEach((line) => { 8 | expect(line.indexOf(" ")).toBeGreaterThanOrEqual(0); 9 | [key, value] = line.split(" "); 10 | o[key] = parseInt(value); 11 | }); 12 | return o; 13 | } 14 | 15 | describe("E2E Test", function () { 16 | it("Fetch all metrics and ensure that all expected are present", async function () { 17 | const data = await request.get("http://localhost:4000/metrics"); 18 | expect(data.status).toBe(200); 19 | let text = data.text; 20 | const lines = parse(text); 21 | 22 | // some specific tests 23 | expect(lines.mssql_up).toBe(1); 24 | expect([14, 15]).toContain(lines.mssql_product_version); 25 | expect(lines.mssql_instance_local_time).toBeGreaterThan(0); 26 | expect(lines.mssql_total_physical_memory_kb).toBeGreaterThan(0); 27 | 28 | // lets ensure that there is at least one instance of these 2019 entries (that differ from 2017) 29 | const v2019 = ["mssql_client_connections", "mssql_database_filesize"]; 30 | v2019.forEach((k2019) => { 31 | const keys = Object.keys(lines); 32 | const i = keys.findIndex((key) => key.startsWith(k2019)); 33 | expect(i).toBeGreaterThanOrEqual(0); 34 | keys 35 | .filter((key) => key.startsWith(k2019)) 36 | .forEach((key) => { 37 | delete lines[key]; 38 | }); 39 | }); 40 | 41 | // bulk ensure that all expected results of a vanilla mssql server instance are here 42 | expect(Object.keys(lines)).toEqual([ 43 | "mssql_up", 44 | "mssql_product_version", 45 | "mssql_instance_local_time", 46 | 'mssql_connections{database="master",state="current"}', 47 | "mssql_deadlocks", 48 | "mssql_user_errors", 49 | "mssql_kill_connection_errors", 50 | 'mssql_database_state{database="master"}', 51 | 'mssql_database_state{database="tempdb"}', 52 | 'mssql_database_state{database="model"}', 53 | 'mssql_database_state{database="msdb"}', 54 | 'mssql_log_growths{database="tempdb"}', 55 | 'mssql_log_growths{database="model"}', 56 | 'mssql_log_growths{database="msdb"}', 57 | 'mssql_log_growths{database="mssqlsystemresource"}', 58 | 'mssql_log_growths{database="master"}', 59 | "mssql_page_read_total", 60 | "mssql_page_write_total", 61 | "mssql_page_life_expectancy", 62 | "mssql_lazy_write_total", 63 | "mssql_page_checkpoint_total", 64 | 'mssql_io_stall{database="master",type="read"}', 65 | 'mssql_io_stall{database="master",type="write"}', 66 | 'mssql_io_stall{database="master",type="queued_read"}', 67 | 'mssql_io_stall{database="master",type="queued_write"}', 68 | 'mssql_io_stall{database="tempdb",type="read"}', 69 | 'mssql_io_stall{database="tempdb",type="write"}', 70 | 'mssql_io_stall{database="tempdb",type="queued_read"}', 71 | 'mssql_io_stall{database="tempdb",type="queued_write"}', 72 | 'mssql_io_stall{database="model",type="read"}', 73 | 'mssql_io_stall{database="model",type="write"}', 74 | 'mssql_io_stall{database="model",type="queued_read"}', 75 | 'mssql_io_stall{database="model",type="queued_write"}', 76 | 'mssql_io_stall{database="msdb",type="read"}', 77 | 'mssql_io_stall{database="msdb",type="write"}', 78 | 'mssql_io_stall{database="msdb",type="queued_read"}', 79 | 'mssql_io_stall{database="msdb",type="queued_write"}', 80 | 'mssql_io_stall_total{database="master"}', 81 | 'mssql_io_stall_total{database="tempdb"}', 82 | 'mssql_io_stall_total{database="model"}', 83 | 'mssql_io_stall_total{database="msdb"}', 84 | "mssql_batch_requests", 85 | 'mssql_transactions{database="tempdb"}', 86 | 'mssql_transactions{database="model"}', 87 | 'mssql_transactions{database="msdb"}', 88 | 'mssql_transactions{database="mssqlsystemresource"}', 89 | 'mssql_transactions{database="master"}', 90 | "mssql_page_fault_count", 91 | "mssql_memory_utilization_percentage", 92 | "mssql_total_physical_memory_kb", 93 | "mssql_available_physical_memory_kb", 94 | "mssql_total_page_file_kb", 95 | "mssql_available_page_file_kb", 96 | ]); 97 | }); 98 | }); 99 | --------------------------------------------------------------------------------