├── .github └── FUNDING.yml ├── docs ├── overview.gif ├── replicaset.jpg ├── sharded-cluster.jpg └── screenshot-replset.png ├── src ├── lib │ ├── longest-string.js │ ├── longest-string.spec.js │ ├── get-shard-hosts.js │ ├── print-connections.spec.js │ ├── print-connections.js │ ├── status.js │ ├── get-shard-hosts.spec.js │ └── event-handlers.js ├── log.js ├── load-status.js ├── mongo-monitor.js └── load-status.spec.js ├── .gitignore ├── test ├── shard.isMaster.json ├── standalone.isMaster.json ├── shard.listShards.json ├── replset.isMaster.json ├── shard.node.isMaster.json ├── replset.replSetGetStatus.json ├── shard.replSet.replSetGetStatus.json ├── standalone.serverStatus.json └── replset.serverStatus.json ├── makefile ├── .eslintrc.yml ├── samples ├── standalone │ └── standalone.sh ├── replicaset │ └── replicaset.sh └── shard │ └── shard.sh ├── .circleci └── config.yml ├── bin └── mongo-monitor.js ├── package.json ├── CHANGELOG.md └── README.md /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # Support 'GitHub Sponsors' funding. 2 | github: dwmkerr 3 | -------------------------------------------------------------------------------- /docs/overview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dwmkerr/mongo-monitor/HEAD/docs/overview.gif -------------------------------------------------------------------------------- /docs/replicaset.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dwmkerr/mongo-monitor/HEAD/docs/replicaset.jpg -------------------------------------------------------------------------------- /docs/sharded-cluster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dwmkerr/mongo-monitor/HEAD/docs/sharded-cluster.jpg -------------------------------------------------------------------------------- /docs/screenshot-replset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dwmkerr/mongo-monitor/HEAD/docs/screenshot-replset.png -------------------------------------------------------------------------------- /src/lib/longest-string.js: -------------------------------------------------------------------------------- 1 | function longestString(strs) { 2 | return strs.reduce((acc, s) => s.length > acc ? s.length : acc, 0); 3 | } 4 | 5 | module.exports = longestString; 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .nyc_output 3 | artifacts/ 4 | 5 | # Sample datafiles 6 | samples/standalone/log 7 | samples/standalone/data 8 | samples/replicaset/log 9 | samples/replicaset/data* 10 | samples/shard/data* 11 | samples/shard/log 12 | -------------------------------------------------------------------------------- /test/shard.isMaster.json: -------------------------------------------------------------------------------- 1 | { 2 | "ismaster": true, 3 | "msg": "isdbgrid", 4 | "maxBsonObjectSize": 16777216, 5 | "maxMessageSizeBytes": 48000000, 6 | "maxWriteBatchSize": 1000, 7 | "localTime": "2018-03-01T04:13:18.825Z", 8 | "maxWireVersion": 5, 9 | "minWireVersion": 0, 10 | "ok": 1 11 | } 12 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | sample-standalone: 2 | cd ./samples/standalone && ./standalone.sh 3 | 4 | sample-replicaset: 5 | cd ./samples/replicaset && ./replicaset.sh 6 | 7 | sample-shard: 8 | cd ./samples/shard && ./shard.sh 9 | 10 | sample-shutdown: 11 | pgrep -ax mongod | awk '{print $$1}' | xargs kill 12 | pgrep -ax mongos | awk '{print $$1}' | xargs kill 13 | -------------------------------------------------------------------------------- /.eslintrc.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | eslint:recommended 3 | env: 4 | es6: true 5 | node: true 6 | mocha: true 7 | parserOptions: 8 | ecmaVersion: 8 9 | rules: 10 | indent: 11 | - error 12 | - 2 13 | no-console: 0 14 | linebreak-style: 15 | - error 16 | - unix 17 | quotes: 18 | - error 19 | - single 20 | semi: 21 | - error 22 | - always 23 | -------------------------------------------------------------------------------- /test/standalone.isMaster.json: -------------------------------------------------------------------------------- 1 | { 2 | "ismaster" : true, 3 | "maxBsonObjectSize" : 16777216, 4 | "maxMessageSizeBytes" : 48000000, 5 | "maxWriteBatchSize" : 100000, 6 | "localTime" : "2019-11-05T07:07:58.708Z", 7 | "logicalSessionTimeoutMinutes" : 30, 8 | "connectionId" : 1, 9 | "minWireVersion" : 0, 10 | "maxWireVersion" : 8, 11 | "readOnly" : false, 12 | "ok" : 1 13 | } 14 | -------------------------------------------------------------------------------- /src/lib/longest-string.spec.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const longestString = require('./longest-string'); 3 | 4 | describe('longest-string', () => { 5 | it('should return zero for an empty array', () => { 6 | expect(longestString([])).to.equal(0); 7 | }); 8 | 9 | it('should be able to identify the longest string', () => { 10 | expect(longestString([ 11 | 'one', 12 | 'two', 13 | 'three', 14 | 'four' 15 | ])).to.equal(5); 16 | }); 17 | }); 18 | 19 | -------------------------------------------------------------------------------- /src/log.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | 3 | let indentAmount = 0; 4 | 5 | function indent(value) { 6 | indentAmount += value; 7 | } 8 | 9 | const indentStr = () => { 10 | return ''.padStart(indentAmount); 11 | }; 12 | 13 | function info(msg) { 14 | console.log(indentStr() + chalk.blue(msg)); 15 | } 16 | 17 | function warn(msg) { 18 | console.log(indentStr() + chalk.yellow(msg)); 19 | } 20 | 21 | function error(msg) { 22 | console.log(indentStr() + chalk.red(msg)); 23 | } 24 | 25 | module.exports = { 26 | indent, 27 | info, 28 | warn, 29 | error 30 | }; 31 | -------------------------------------------------------------------------------- /samples/standalone/standalone.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Rebuild data and log directories. 4 | if [[ -d './data' ]]; then rm -rf ./data; fi 5 | mkdir ./data 6 | if [[ -d './log' ]]; then rm -rf ./log; fi 7 | mkdir ./log 8 | 9 | # Start the standalone process. 10 | mongod --fork --logpath ./log/standalone.log --oplogSize 50 --port 27017 --dbpath ./data 11 | 12 | # Wait a bit of time for the process to start. 13 | sleep 10 14 | 15 | # Show the process info. 16 | echo 17 | echo "Standalone MongoDB Running on 27107" 18 | echo 19 | echo "Monitor with: " 20 | echo " mongo-monitor localhost:27017" 21 | -------------------------------------------------------------------------------- /test/shard.listShards.json: -------------------------------------------------------------------------------- 1 | { 2 | "shards": [ 3 | { 4 | "_id": "shard1", 5 | "host": "shard1rs/mongod1.shard1.mongo-cluster.com:27017,mongod2.shard1.mongo-cluster.com:27017,mongod3.shard1.mongo-cluster.com:27017", 6 | "state": 1 7 | }, 8 | { 9 | "_id": "shard2", 10 | "host": "shard2rs/mongod1.shard2.mongo-cluster.com:27017,mongod2.shard2.mongo-cluster.com:27017,mongod3.shard3.mongo-cluster.com:27017", 11 | "state": 1 12 | }, 13 | { 14 | "_id": "shard3.mongo-cluster.com", 15 | "host": "shard3rs/mongod1.shard3.mongo-cluster.com:27017,mongod2.shard3.mongo-cluster.com:27017,mongod3.shard3.mongo-cluster.com:27017", 16 | "state": 1 17 | } 18 | ], 19 | "ok": 1 20 | } 21 | -------------------------------------------------------------------------------- /src/lib/get-shard-hosts.js: -------------------------------------------------------------------------------- 1 | function getShardHostDetails(host) { 2 | // If we do not have a slash in the host, then we have a shard which is NOT 3 | // a replicaset. 4 | if (host.indexOf('/') === -1) { 5 | return { 6 | connectionString: `mongodb://${host}`, 7 | replicaSet: null, 8 | hosts: [ host ] // single host only. 9 | }; 10 | } 11 | 12 | // Split out the shard replicaset and hosts. 13 | const [replicaSet, allHosts] = host.split('/'); 14 | 15 | // Split out the individual hosts. 16 | const hosts = allHosts.split(','); 17 | 18 | return { 19 | connectionString: `mongodb://${allHosts}?replicaSet=${replicaSet}`, 20 | replicaSet, 21 | hosts 22 | }; 23 | } 24 | 25 | module.exports = getShardHostDetails; 26 | -------------------------------------------------------------------------------- /test/replset.isMaster.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": [ 3 | "mongo1.mongo-cluster.com:27017", 4 | "mongo2.mongo-cluster.com:27017" 5 | ], 6 | "setName": "replset", 7 | "setVersion": 1, 8 | "ismaster": true, 9 | "secondary": false, 10 | "primary": "mongo1.mongo-cluster.com:27017", 11 | "me": "mongo1.mongo-cluster.com:27017", 12 | "electionId": "7fffffff0000000000000001", 13 | "lastWrite": { 14 | "opTime": { 15 | "ts": "6527928717405061121", 16 | "t": 1 17 | }, 18 | "lastWriteDate": "2018-03-01T10:57:25.000Z" 19 | }, 20 | "maxBsonObjectSize": 16777216, 21 | "maxMessageSizeBytes": 48000000, 22 | "maxWriteBatchSize": 1000, 23 | "localTime": "2018-03-01T10:57:30.378Z", 24 | "maxWireVersion": 5, 25 | "minWireVersion": 0, 26 | "readOnly": false, 27 | "ok": 1 28 | } 29 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/circle 5 | docker: 6 | - image: circleci/node:8 7 | steps: 8 | - checkout 9 | - restore_cache: 10 | key: dependency-cache-{{ checksum "package.json" }} 11 | - run: 12 | name: npm-install 13 | command: npm install 14 | - save_cache: 15 | key: dependency-cache-{{ checksum "package.json" }} 16 | paths: 17 | - ./node_modules 18 | - run: 19 | name: lint 20 | command: npm run lint 21 | - run: 22 | name: test 23 | command: npm run test:ci 24 | - store_artifacts: 25 | path: ./artifacts 26 | prefix: artifacts 27 | - store_test_results: 28 | path: ./artifacts/tests 29 | - run: 30 | name: upload-coverage 31 | command: bash <(curl -s https://codecov.io/bash) 32 | -------------------------------------------------------------------------------- /test/shard.node.isMaster.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": [ 3 | "mongod1.shard1.mongo-cluster.com:27017", 4 | "mongod2.shard1.mongo-cluster.com:27017", 5 | "mongod3.shard1.mongo-cluster.com:27017" 6 | ], 7 | "setName": "shard1replset", 8 | "setVersion": 1, 9 | "ismaster": true, 10 | "secondary": false, 11 | "primary": "mongod1.shard1.mongo-cluster.com:27017", 12 | "me": "mongod1.mongo-cluster.com:27017", 13 | "electionId": "7fffffff0000000000000001", 14 | "lastWrite": { 15 | "opTime": { 16 | "ts": "6527823546540883969", 17 | "t": 1 18 | }, 19 | "lastWriteDate": "2018-03-01T04:09:18.000Z" 20 | }, 21 | "maxBsonObjectSize": 16777216, 22 | "maxMessageSizeBytes": 48000000, 23 | "maxWriteBatchSize": 1000, 24 | "localTime": "2018-03-01T04:09:18.568Z", 25 | "maxWireVersion": 5, 26 | "minWireVersion": 0, 27 | "readOnly": false, 28 | "ok": 1, 29 | "$gleStats": { 30 | "lastOpTime": "0", 31 | "electionId": "7fffffff0000000000000001" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/lib/print-connections.spec.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const chalk = require('chalk'); 3 | const printConnections = require('./print-connections'); 4 | 5 | describe('print-connections', () => { 6 | it('should return "na" for invalid input', () => { 7 | expect(printConnections(0, 0)).to.equal('na'); 8 | }); 9 | 10 | it('should show connections under or equal to 40% utilzation in green', () => { 11 | expect(printConnections(40, 100)).to.equal(`${chalk.green(40)} / ${chalk.white(100)}`); 12 | }); 13 | 14 | it('should show connections under or equal to 70% utilzation in orange', () => { 15 | expect(printConnections(70, 100)).to.equal(`${chalk.yellow(70)} / ${chalk.white(100)}`); 16 | }); 17 | 18 | it('should show connections above 70% utilzation in red', () => { 19 | expect(printConnections(71, 100)).to.equal(`${chalk.red(71)} / ${chalk.white(100)}`); 20 | expect(printConnections(170, 100)).to.equal(`${chalk.red(170)} / ${chalk.white(100)}`); 21 | }); 22 | }); 23 | 24 | -------------------------------------------------------------------------------- /bin/mongo-monitor.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | const program = require('commander'); 3 | const chalk = require('chalk'); 4 | const { ConnectionString } = require('mongo-connection-string'); 5 | const pkg = require('../package.json'); 6 | const monitor = require('../src/mongo-monitor'); 7 | 8 | program 9 | .version(pkg.version, '-v, --version') 10 | .arguments('') 11 | .option('-i, --interval ', 'Interval for checking status, default is 1000ms') 12 | .parse(process.argv); 13 | 14 | 15 | // If we have been provided a final arg, it is always just a connection string. 16 | const connectionString = program.args[0]; 17 | 18 | const interval = Number.parseInt(program.connectionString) || 1000; 19 | 20 | if (!connectionString) { 21 | program.outputHelp(); 22 | process.exit(0); 23 | } 24 | 25 | // Parse the connection string into a ConnectionString object. 26 | const connStr = new ConnectionString(connectionString); 27 | 28 | console.log(`Connecting to: ${connStr}`); 29 | 30 | monitor({ connectionString: connStr, interval }) 31 | .catch((err) => { 32 | console.log(chalk.red(`An error occured: ${err.message}`)); 33 | process.exit(1); 34 | }); 35 | -------------------------------------------------------------------------------- /src/lib/print-connections.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | 3 | // These are the percentage threshholds of current/available 4 | // at which we use different colours. 5 | const threshholdGreen = 0.4; 6 | const threshholdAmber = 0.7; 7 | // note anything else is red... 8 | 9 | /** 10 | * printConnections - render to a string the number of connections 11 | * from a set of available connections. Colour coded to indicate 12 | * whether the number is healthy. 13 | * 14 | * The format is "x/y" 15 | * 16 | * @param current - the number of currently used connections 17 | * @param available - the number of available connections 18 | * @returns {string} - a string ready to print to the console 19 | */ 20 | function printConnections(current, available) { 21 | if (available === 0) return 'na'; 22 | 23 | const utilization = current / available; 24 | if (utilization <= threshholdGreen) 25 | return `${chalk.green(current)} / ${chalk.white(available)}`; 26 | if (utilization <= threshholdAmber) 27 | return `${chalk.yellow(current)} / ${chalk.white(available)}`; 28 | return `${chalk.red(current)} / ${chalk.white(available)}`; 29 | } 30 | 31 | module.exports = printConnections; 32 | -------------------------------------------------------------------------------- /src/lib/status.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const longestString = require('./longest-string'); 3 | 4 | const stateMap = [ 5 | { code: 0, name: 'STARTUP', colour: 'yellow' }, 6 | { code: 1, name: 'PRIMARY', colour: 'green' }, 7 | { code: 2, name: 'SECONDARY', colour: 'blue' }, 8 | { code: 3, name: 'RECOVERING', colour: 'yellow' }, 9 | { code: 5, name: 'STARTUP', colour: 'yellow' }, 10 | { code: 6, name: 'UNKNOWN', colour: 'red' }, 11 | { code: 7, name: 'ARBITER', colour: 'magenta' }, 12 | { code: 8, name: 'DOWN', colour: 'red' }, 13 | { code: 9, name: 'ROLLBACK', colour: 'yellow' }, 14 | { code: 10, name: 'REMOVED', colour: 'red' } 15 | ]; 16 | 17 | function getStatusName(stateCode) { 18 | const state = stateMap.find(sm => sm.code === stateCode); 19 | if (!state) return 'UNKNOWN'; 20 | return state.name; 21 | } 22 | 23 | function writeStatusName(statusName, padLeft = 0) { 24 | const state = stateMap.find(sm => sm.name === statusName); 25 | if (!state) return chalk.red('unknown'); 26 | return chalk[state.colour](state.name.padStart(padLeft)); 27 | } 28 | 29 | function writeStatusNameRightAligned(statusName) { 30 | const l = longestString(stateMap.map(s => s.name)); 31 | return writeStatusName(statusName, l); 32 | } 33 | 34 | module.exports = { 35 | getStatusName, 36 | writeStatusName, 37 | writeStatusNameRightAligned 38 | }; 39 | -------------------------------------------------------------------------------- /src/lib/get-shard-hosts.spec.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const getShardHosts = require('./get-shard-hosts'); 3 | 4 | describe('get-shard-hosts', () => { 5 | it('should correctly extract the replicaSet and hosts when a replicaset is present', () => { 6 | const input = 'replicaSetName/mongo1.shard1.mongo-cluster.com:27017,mongo2.shard1.mongo-cluster.com:27017,mongo3.shard1.mongo-cluster.com:27017'; 7 | const { connectionString, replicaSet, hosts } = getShardHosts(input); 8 | expect(connectionString).to.equal('mongodb://mongo1.shard1.mongo-cluster.com:27017,mongo2.shard1.mongo-cluster.com:27017,mongo3.shard1.mongo-cluster.com:27017?replicaSet=replicaSetName'); 9 | expect(replicaSet).to.equal('replicaSetName'); 10 | expect(hosts[0]).to.equal('mongo1.shard1.mongo-cluster.com:27017'); 11 | expect(hosts[1]).to.equal('mongo2.shard1.mongo-cluster.com:27017'); 12 | expect(hosts[2]).to.equal('mongo3.shard1.mongo-cluster.com:27017'); 13 | }); 14 | 15 | it('should correctly extract the replicaSet and hosts when a replicaset is not present', () => { 16 | const input = 'mongo1.shard1.mongo-cluster.com:27017'; 17 | const { connectionString, replicaSet, hosts } = getShardHosts(input); 18 | expect(connectionString).to.equal('mongodb://mongo1.shard1.mongo-cluster.com:27017'); 19 | expect(replicaSet).to.equal(null); 20 | expect(hosts[0]).to.equal('mongo1.shard1.mongo-cluster.com:27017'); 21 | }); 22 | }); 23 | 24 | -------------------------------------------------------------------------------- /samples/replicaset/replicaset.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Basic config. 4 | port1=27017 5 | port2=27018 6 | port3=27019 7 | port4=27020 8 | 9 | # Rebuild data and log directories. 10 | if [[ -d './data' ]]; then 11 | rm -rf ./data 12 | fi 13 | if [[ -d './log' ]]; then 14 | rm -rf ./log 15 | fi 16 | mkdir -p ./data/data1 17 | mkdir -p ./data/data2 18 | mkdir -p ./data/data3 19 | mkdir -p ./data/data4 20 | mkdir -p ./log 21 | 22 | # Start the replicaset processes. 23 | mongod --fork --logpath ./log/node1.log --oplogSize 50 --port ${port1} --dbpath ./data/data1 --replSet cluster 24 | mongod --fork --logpath ./log/node2.log --oplogSize 50 --port ${port2} --dbpath ./data/data2 --replSet cluster 25 | mongod --fork --logpath ./log/node3.log --oplogSize 50 --port ${port3} --dbpath ./data/data3 --replSet cluster 26 | mongod --fork --logpath ./log/node4.log --oplogSize 50 --port ${port4} --dbpath ./data/data4 --replSet cluster 27 | 28 | # Wait a bit of time for the process to start. 29 | sleep 3 30 | 31 | # Note that we don't add node 4, so we can easily demo adding a node. 32 | mongo --port 27017 --shell <<- EOF 33 | rs.initiate({ 34 | _id: 'cluster', 35 | members: [{ 36 | _id: 0, 37 | host: 'localhost:27017' 38 | },{ 39 | _id: 1, 40 | host: 'localhost:27018' 41 | },{ 42 | _id: 2, 43 | host: 'localhost:27019', 44 | arbiterOnly: true 45 | }] 46 | }); 47 | EOF 48 | 49 | # Show the process info. 50 | echo 51 | echo "Replicaset MongoDB Running" 52 | echo 53 | echo "Monitor with: " 54 | echo " mongo-monitor localhost:${port1},localhost:${port2},localhost:${port3}?replicaSet=cluster" 55 | 56 | -------------------------------------------------------------------------------- /src/lib/event-handlers.js: -------------------------------------------------------------------------------- 1 | // Event handlers for various mongo topology events. These can be used to 2 | // provide more data to the events shown to the user. 3 | // Only a few events are logged for now. 4 | const eventHandlers = { 5 | 6 | // timeout: () => { 7 | // return 'timeout'; 8 | // }, 9 | 10 | close: () => { 11 | return 'close'; 12 | }, 13 | 14 | serverOpening: () => { 15 | return 'serverOpening'; 16 | }, 17 | 18 | // serverDescriptionChanged: () => { 19 | // return 'serverDescriptionChanged'; 20 | // }, 21 | 22 | // serverHeartbeatStarted: () => { 23 | // return 'serverHeartbeatStarted'; 24 | // }, 25 | 26 | // serverHeartbeatSucceeded: () => { 27 | // return 'serverHeartbeatSuceeded'; 28 | // }, 29 | 30 | // serverHeartbeatFailed: () => { 31 | // return 'serverHeartbeatFailed'; 32 | // }, 33 | 34 | serverClosed: () => { 35 | return 'serverClosed'; 36 | }, 37 | 38 | topologyOpening: () => { 39 | return 'topologyOpening'; 40 | }, 41 | 42 | topologyClosed: () => { 43 | return 'topologyClosed'; 44 | }, 45 | 46 | // topologyDescriptionChanged: () => { 47 | // return 'topologyDescriptionChanged'; 48 | // }, 49 | 50 | joined: () => { 51 | return 'joined'; 52 | }, 53 | 54 | left: () => { 55 | return 'left'; 56 | }, 57 | 58 | // ping: () => { 59 | // return 'ping'; 60 | // }, 61 | 62 | // ha: () => { 63 | // return 'ha'; 64 | // }, 65 | 66 | // all: () => { 67 | // return 'all'; 68 | // }, 69 | 70 | // fullsetup: () => { 71 | // return 'timeout'; 72 | // }, 73 | 74 | }; 75 | 76 | module.exports = eventHandlers; 77 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mongo-monitor", 3 | "version": "1.1.1", 4 | "description": "MongoDB Cluster Monitor", 5 | "main": "monitor.js", 6 | "engines": { 7 | "node": ">= 8.5.0" 8 | }, 9 | "bin": { 10 | "mongo-monitor": "./bin/mongo-monitor.js" 11 | }, 12 | "scripts": { 13 | "start": "node ./bin/mongo-monitor.js", 14 | "debug": "node --inspect-brk ./bin/mongo-monitor.js", 15 | "lint": "eslint --ignore-path .gitignore .", 16 | "test": "mocha 'src/**/*.spec.js'", 17 | "test:debug": "mocha --inspect-brk --watch 'src/**/*.spec.js'", 18 | "test:coverage": "nyc --reporter=html --report-dir=./artifacts/coverage _mocha 'src/**/*.spec.js'", 19 | "test:ci": "nyc --reporter=lcov --reporter=html --report-dir=./artifacts/coverage _mocha --reporter mocha-junit-reporter --reporter-options mochaFile=./artifacts/tests/test-report.xml 'src/**/*.spec.js'", 20 | "test:watch": "mocha --watch 'src/**/*.spec.js'", 21 | "release": "npm run lint && npm test && standard-version", 22 | "preversion": "echo 'Version with \"npm run release\"' && exit 1" 23 | }, 24 | "author": "Dave Kerr ", 25 | "license": "ISC", 26 | "repository": { 27 | "type": "git", 28 | "url": "https://github.com/dwmkerr/mongo-monitor.git" 29 | }, 30 | "dependencies": { 31 | "chalk": "^2.4.1", 32 | "commander": "^3.0.0", 33 | "mongo-connection-string": "^0.1.4", 34 | "mongodb": "^3.3.3" 35 | }, 36 | "devDependencies": { 37 | "eslint": "^6.6.0", 38 | "mocha": "^7.0.1", 39 | "mocha-junit-reporter": "^1.17.0", 40 | "nock": "^11.1.0", 41 | "nyc": "^14.1.1", 42 | "sinon": "^8.0.0", 43 | "standard-version": "^8.0.0" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. 4 | 5 | ### [1.1.1](https://github.com/dwmkerr/mongo-monitor/compare/v1.1.0...v1.1.1) (2019-11-05) 6 | 7 | ## [1.1.0](https://github.com/dwmkerr/mongo-monitor/compare/v1.0.0...v1.1.0) (2019-11-05) 8 | 9 | 10 | ### Features 11 | 12 | * show connection statistics ([279fc43](https://github.com/dwmkerr/mongo-monitor/commit/279fc4397a872a001ae3e15a6ae6cea354c7fd58)) 13 | 14 | 15 | # [1.0.0](https://github.com/dwmkerr/mongo-monitor/compare/v0.3.0...v1.0.0) (2018-06-23) 16 | 17 | 18 | 19 | 20 | # [0.3.0](https://github.com/dwmkerr/mongo-monitor/compare/v0.2.1...v0.3.0) (2018-05-15) 21 | 22 | 23 | ### Bug Fixes 24 | 25 | * **sharded-cluster:** correctly show shard replset status ([17c2a1f](https://github.com/dwmkerr/mongo-monitor/commit/17c2a1f)), closes [#8](https://github.com/dwmkerr/mongo-monitor/issues/8) 26 | 27 | 28 | ### Features 29 | 30 | * support standalone shards ([cb3e058](https://github.com/dwmkerr/mongo-monitor/commit/cb3e058)) 31 | * **event-logging:** log topology events, cleanup structure ([c15b649](https://github.com/dwmkerr/mongo-monitor/commit/c15b649)) 32 | 33 | 34 | 35 | 36 | ## [0.2.1](https://github.com/dwmkerr/mongo-monitor/compare/v0.2.0...v0.2.1) (2018-05-02) 37 | 38 | 39 | ### Bug Fixes 40 | 41 | * **connection-string:** use the `mongodb://` protocol by default ([886b05f](https://github.com/dwmkerr/mongo-monitor/commit/886b05f)) 42 | 43 | 44 | 45 | 46 | # [0.2.0](https://github.com/dwmkerr/mongo-monitor/compare/v0.1.0...v0.2.0) (2018-03-14) 47 | 48 | 49 | ### Features 50 | 51 | * improve connection string handling ([68f8d41](https://github.com/dwmkerr/mongo-monitor/commit/68f8d41)) 52 | 53 | 54 | 55 | 56 | # 0.1.0 (2018-03-10) 57 | -------------------------------------------------------------------------------- /test/replset.replSetGetStatus.json: -------------------------------------------------------------------------------- 1 | { 2 | "set": "replset", 3 | "date": "2018-03-04T12:43:20.135Z", 4 | "myState": 1, 5 | "term": 5, 6 | "heartbeatIntervalMillis": 2000, 7 | "optimes": { 8 | "lastCommittedOpTime": { 9 | "ts": "6529069263150383105", 10 | "t": 5 11 | }, 12 | "appliedOpTime": { 13 | "ts": "6529069263150383105", 14 | "t": 5 15 | }, 16 | "durableOpTime": { 17 | "ts": "6529069263150383105", 18 | "t": 5 19 | } 20 | }, 21 | "members": [ 22 | { 23 | "_id": 1, 24 | "name": "mongo1.mongo-cluster.com:27017", 25 | "health": 1, 26 | "state": 1, 27 | "stateStr": "PRIMARY", 28 | "uptime": 190139, 29 | "optime": { 30 | "ts": "6529069263150383105", 31 | "t": 5 32 | }, 33 | "optimeDate": "2018-03-04T12:43:19.000Z", 34 | "electionTime": "6528258008252678145", 35 | "electionDate": "2018-03-02T08:15:14.000Z", 36 | "configVersion": 2, 37 | "self": true 38 | }, 39 | { 40 | "_id": 2, 41 | "name": "mongo2.mongo-cluster.com:27017", 42 | "health": 1, 43 | "state": 2, 44 | "stateStr": "SECONDARY", 45 | "uptime": 188827, 46 | "optime": { 47 | "ts": "6529069220200710145", 48 | "t": 5 49 | }, 50 | "optimeDurable": { 51 | "ts": "6529069220200710145", 52 | "t": 5 53 | }, 54 | "optimeDate": "2018-03-04T12:43:09.000Z", 55 | "optimeDurableDate": "2018-03-04T12:43:09.000Z", 56 | "lastHeartbeat": "2018-03-04T12:43:18.820Z", 57 | "lastHeartbeatRecv": "2018-03-04T12:43:19.218Z", 58 | "pingMs": 0, 59 | "syncingTo": "mongo3.mongo-cluster.com:27017", 60 | "configVersion": 2 61 | }, 62 | { 63 | "_id": 3, 64 | "name": "mongo3.mongo-cluster.com:27017", 65 | "health": 1, 66 | "state": 2, 67 | "stateStr": "SECONDARY", 68 | "uptime": 190137, 69 | "optime": { 70 | "ts": "6529069220200710145", 71 | "t": 5 72 | }, 73 | "optimeDurable": { 74 | "ts": "6529069220200710145", 75 | "t": 5 76 | }, 77 | "optimeDate": "2018-03-04T12:43:09.000Z", 78 | "optimeDurableDate": "2018-03-04T12:43:09.000Z", 79 | "lastHeartbeat": "2018-03-04T12:43:18.820Z", 80 | "lastHeartbeatRecv": "2018-03-04T12:43:18.248Z", 81 | "pingMs": 0, 82 | "syncingTo": "mongo1.mongo-cluster.com:27017", 83 | "configVersion": 2 84 | } 85 | ], 86 | "ok": 1 87 | } 88 | -------------------------------------------------------------------------------- /test/shard.replSet.replSetGetStatus.json: -------------------------------------------------------------------------------- 1 | { 2 | "set": "shard1replset", 3 | "date": "2018-03-04T12:43:20.135Z", 4 | "myState": 1, 5 | "term": 5, 6 | "heartbeatIntervalMillis": 2000, 7 | "optimes": { 8 | "lastCommittedOpTime": { 9 | "ts": "6529069263150383105", 10 | "t": 5 11 | }, 12 | "appliedOpTime": { 13 | "ts": "6529069263150383105", 14 | "t": 5 15 | }, 16 | "durableOpTime": { 17 | "ts": "6529069263150383105", 18 | "t": 5 19 | } 20 | }, 21 | "members": [ 22 | { 23 | "_id": 1, 24 | "name": "mongod1.shard1.mongo-cluster.com:27017", 25 | "health": 1, 26 | "state": 1, 27 | "stateStr": "PRIMARY", 28 | "uptime": 190139, 29 | "optime": { 30 | "ts": "6529069263150383105", 31 | "t": 5 32 | }, 33 | "optimeDate": "2018-03-04T12:43:19.000Z", 34 | "electionTime": "6528258008252678145", 35 | "electionDate": "2018-03-02T08:15:14.000Z", 36 | "configVersion": 2, 37 | "self": true 38 | }, 39 | { 40 | "_id": 2, 41 | "name": "mongod2.shard1.mongo-cluster.com:27017", 42 | "health": 1, 43 | "state": 2, 44 | "stateStr": "SECONDARY", 45 | "uptime": 188827, 46 | "optime": { 47 | "ts": "6529069220200710145", 48 | "t": 5 49 | }, 50 | "optimeDurable": { 51 | "ts": "6529069220200710145", 52 | "t": 5 53 | }, 54 | "optimeDate": "2018-03-04T12:43:09.000Z", 55 | "optimeDurableDate": "2018-03-04T12:43:09.000Z", 56 | "lastHeartbeat": "2018-03-04T12:43:18.820Z", 57 | "lastHeartbeatRecv": "2018-03-04T12:43:19.218Z", 58 | "pingMs": 0, 59 | "syncingTo": "mongo3.mongo-cluster.com:27017", 60 | "configVersion": 2 61 | }, 62 | { 63 | "_id": 3, 64 | "name": "mongod3.shard1.mongo-cluster.com:27017", 65 | "health": 1, 66 | "state": 2, 67 | "stateStr": "SECONDARY", 68 | "uptime": 190137, 69 | "optime": { 70 | "ts": "6529069220200710145", 71 | "t": 5 72 | }, 73 | "optimeDurable": { 74 | "ts": "6529069220200710145", 75 | "t": 5 76 | }, 77 | "optimeDate": "2018-03-04T12:43:09.000Z", 78 | "optimeDurableDate": "2018-03-04T12:43:09.000Z", 79 | "lastHeartbeat": "2018-03-04T12:43:18.820Z", 80 | "lastHeartbeatRecv": "2018-03-04T12:43:18.248Z", 81 | "pingMs": 0, 82 | "syncingTo": "mongo1.mongo-cluster.com:27017", 83 | "configVersion": 2 84 | } 85 | ], 86 | "ok": 1 87 | } 88 | -------------------------------------------------------------------------------- /src/load-status.js: -------------------------------------------------------------------------------- 1 | const { MongoClient } = require('mongodb'); 2 | const getShardHosts = require('./lib/get-shard-hosts'); 3 | 4 | async function getReplicaSetStatus(connectionString) { 5 | const client = await MongoClient.connect(connectionString); 6 | const db = client.db('admin'); 7 | const status = await loadReplicasetStatus({ db }); 8 | await client.close(); 9 | return status; 10 | } 11 | 12 | async function loadShardedStatus({ db }) { 13 | // Get the balancer status and shards. 14 | // const balancerStatus = await db.command({ balancerStatus: 1 }); 15 | const balancerStatus = null; 16 | const listShards = await db.command({ listShards: 1 }); 17 | 18 | // Go through each shard, getting status. This'll be async for each one. 19 | const promises = listShards.shards.map(async (shard) => { 20 | // Get the shard id and hosts. 21 | const { connectionString, replicaSet, hosts } = getShardHosts(shard.host); 22 | 23 | // If there is no set name, we're standalone. 24 | if (!replicaSet) { 25 | return { 26 | id: shard._id, 27 | replicaSet, 28 | hosts: hosts.map(host => ({ status: '(standalone)', host })) 29 | }; 30 | } 31 | 32 | // Try and get the details of the replicaset which make up the shard. 33 | try { 34 | const rsStatus = await getReplicaSetStatus(connectionString); 35 | 36 | // Otherwise, work out the status. 37 | const shardHosts = rsStatus.members.map((host) => { 38 | return { 39 | state: host.state, 40 | host: host.name 41 | }; 42 | }); 43 | 44 | return { 45 | id: shard._id, 46 | connections: rsStatus.connections, 47 | replicaSet, 48 | hosts: shardHosts 49 | }; 50 | } catch (err) { 51 | const shardHosts = hosts.map((host) => { 52 | return { 53 | state: -1, 54 | host 55 | }; 56 | }); 57 | return { id: shard._id, replicaSet, hosts: shardHosts }; 58 | } 59 | }); 60 | 61 | const shardDetails = await Promise.all(promises); 62 | 63 | // Return the shard details. 64 | return { 65 | configuration: 'sharded', 66 | balancer: balancerStatus, 67 | shards: shardDetails 68 | }; 69 | } 70 | 71 | async function loadReplicasetStatus({ db }) { 72 | const replSetStatus = await db.command({ replSetGetStatus: 1 }); 73 | const serverStatus = await db.command({ serverStatus: 1} ); 74 | 75 | return { 76 | configuration: 'replicaset', 77 | connections: serverStatus.connections, 78 | replsetName: replSetStatus.set, 79 | members: replSetStatus.members.map(({ state, name }) => { return { state, name }; }) 80 | }; 81 | } 82 | 83 | async function loadStandaloneStatus({ db }) { 84 | const serverStatus = await db.command({ serverStatus: 1} ); 85 | return { 86 | configuration: 'standalone', 87 | connections: serverStatus.connections 88 | }; 89 | } 90 | 91 | async function loadStatus(client) { 92 | // Connect, switch to admin, get the status. 93 | const db = client.db('admin'); 94 | const isMaster = await db.command({ isMaster: 1 }); 95 | 96 | let status; 97 | 98 | // Are we a sharded cluster? 99 | if (isMaster.msg === 'isdbgrid') { 100 | status = await loadShardedStatus({ isMaster, db }); 101 | } else if (isMaster.setName) { 102 | status = await loadReplicasetStatus({ isMaster, db }); 103 | } else { 104 | status = await loadStandaloneStatus({ isMaster, db }); 105 | } 106 | 107 | return status; 108 | } 109 | 110 | module.exports = loadStatus; 111 | -------------------------------------------------------------------------------- /src/mongo-monitor.js: -------------------------------------------------------------------------------- 1 | const { MongoClient } = require('mongodb'); 2 | const chalk = require('chalk'); 3 | const loadStatus = require('./load-status'); 4 | const state = require('./lib/status'); 5 | const eventHandlers = require('./lib/event-handlers'); 6 | const printConnections = require('./lib/print-connections'); 7 | 8 | // Keep track of interesting events. 9 | const events = []; 10 | 11 | // Our single mongo client, which we might take few attempts to establish. 12 | let client = null; 13 | 14 | function printEvents() { 15 | // Finally, write out recent events. 16 | console.log('\nEvents:'); 17 | const eventCount = 10; 18 | const start = (events.length - eventCount) > 0 ? (events.length - eventCount) : 0; 19 | for (let i=events.length - 1; i>=start; --i) { 20 | const time = events[i].time ? events[i].time.toISOString() : 'unknown'; 21 | console.log(` ${time} : ${events[i].message}`); 22 | } 23 | } 24 | 25 | async function checkStatus(params) { 26 | const { connectionString, interval } = params; 27 | 28 | // Get the cluster status. 29 | try { 30 | // If we don't yet have a client, try and create one. Connection issues 31 | // might cause this to fail. 32 | if (client === null) { 33 | // Note that client *might* have been initialised by the time we hit this 34 | // line, so linting warns us correctly it might already have a value. 35 | // However, overriding the client is fine; it will simply be disposed 36 | // on the next GC cycle. 37 | // eslint-disable-next-line require-atomic-updates 38 | client = await MongoClient.connect(connectionString.toURI()); 39 | 40 | Object.keys(eventHandlers).forEach((eventName) => { 41 | client.topology.on(eventName, (e) => { 42 | const message = eventHandlers[eventName](e); 43 | events.push({ time: new Date(), message }); 44 | }); 45 | }); 46 | } 47 | const status = await loadStatus(client); 48 | 49 | process.stdout.write('\x1Bc'); 50 | console.log(`Time : ${chalk.white(new Date().toISOString())}`); 51 | console.log(`Connection : ${chalk.white(connectionString)}`); 52 | console.log(`Configuration : ${chalk.white(status.configuration)}`); 53 | 54 | // If we are standalone, write the number of connections. 55 | if (status.configuration === 'standalone') { 56 | console.log(`Connections : ${printConnections(status.connections.current, status.connections.available)}`); 57 | } 58 | 59 | // If we are sharded, write each shard. 60 | if (status.configuration === 'sharded') { 61 | status.shards.forEach((shard) => { 62 | console.log(`\n Shard: ${chalk.white(shard.id)} (${printConnections(shard.connections.current, shard.connections.available)} connections)\n`); 63 | shard.hosts.forEach((host) => { 64 | console.log(` ${state.writeStatusNameRightAligned(state.getStatusName(host.state))} : ${chalk.white(host.host)}`); 65 | }); 66 | }); 67 | } 68 | 69 | // If we are a replicaset, write each member. 70 | if (status.configuration === 'replicaset') { 71 | console.log(`\n Replicaset: ${chalk.white(status.replsetName)} (${printConnections(status.connections.current, status.connections.available)} connections)\n`); 72 | status.members.forEach((m) => { 73 | console.log(` ${state.writeStatusNameRightAligned(state.getStatusName(m.state))} : ${chalk.white(m.name)}`); 74 | }); 75 | } 76 | 77 | printEvents(); 78 | } catch (err) { 79 | events.push({ time: new Date(), message: err.message }); 80 | process.stdout.write('\x1Bc'); 81 | printEvents(); 82 | } finally { 83 | // No matter what happens, try and check again later. 84 | setTimeout(() => checkStatus(params), interval); 85 | } 86 | } 87 | 88 | async function monitor({ connectionString, interval }) { 89 | return checkStatus({ connectionString, interval }); 90 | } 91 | 92 | module.exports = monitor; 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mongo-monitor 2 | 3 | [![CircleCI](https://circleci.com/gh/dwmkerr/mongo-monitor.svg?style=shield)](https://circleci.com/gh/dwmkerr/mongo-monitor) [![codecov](https://codecov.io/gh/dwmkerr/mongo-monitor/branch/master/graph/badge.svg)](https://codecov.io/gh/dwmkerr/mongo-monitor) [![Greenkeeper badge](https://badges.greenkeeper.io/dwmkerr/mongo-monitor.svg)](https://greenkeeper.io/) [![GuardRails badge](https://badges.guardrails.io/dwmkerr/mongo-monitor.svg?token=569f2cc38a148f785f3a38ef0bcf5f5964995d7ca625abfad9956b14bd06ad96&provider=github)](https://dashboard.guardrails.io/default/gh/dwmkerr/mongo-monitor) 4 | 5 | Simple CLI to monitor the status of a MongoDB cluster. 6 | 7 | ```bash 8 | mongo-monitor mongodb://address:27017 9 | ``` 10 | 11 | ![Replicaset Screenshot](./docs/overview.gif) 12 | 13 | The connection string is handled with [`mongo-connection-string`](https://github.com/dwmkerr/mongo-connection-string), which means it'll handle input which is not URI encoded. 14 | 15 | Install with: 16 | 17 | ```bash 18 | npm install mongo-monitor 19 | ``` 20 | 21 | ## Usage 22 | 23 | The monitor is primary designed to show the status of a MongoDB cluster, updated real-time. This is useful when performing administrative operations such as replicaset or shard configuration. 24 | 25 | On a sharded cluster, if you provide a connection string with admin priviledges to any `mongos` host, you will see the sharding configuration. 26 | 27 | On a replicaset, if you provide a connection string with admin priviledges to any host, or to the entire set, you will see the replicaset configuration: 28 | 29 | ![Replicaset Screenshot](./docs/replicaset.jpg) 30 | 31 | For a standalone, basic info is reported. Sharded clusters show details of the shards, as welll as the replicasets which make up each shard: 32 | 33 | ![Sharded Cluster Screenshot](./docs/sharded-cluster.jpg) 34 | 35 | ## Samples 36 | 37 | To try the monitor out, there are a few samples you can try. 38 | 39 | Monitor a standalone instance: 40 | 41 | ```bash 42 | make sample-standalone 43 | mongo-monitor localhost:271017 44 | ``` 45 | 46 | Monitor a replicaset: 47 | 48 | ```bash 49 | make sample-replicaset 50 | mongo-monitor localhost:27017,localhost:27018,localhost:27019?replicaSet=cluster 51 | ``` 52 | 53 | Monitor a sharded cluster: 54 | 55 | ```bash 56 | make sample-shard 57 | mongo-monitor localhost 58 | ``` 59 | 60 | Cleanup sample processes: 61 | 62 | ```bash 63 | make sample-shutdown 64 | ``` 65 | 66 | ## Developing 67 | 68 | Lint with `npm run lint`, test with `npm test`, release with `npm run release`. 69 | 70 | ## Tests 71 | 72 | The following files are useful for testing: 73 | 74 | | File | Notes | 75 | |------|-------| 76 | | `shard.isMaster.json` | The output of `isMaster` for a `mongos` member of a sharded cluster. | 77 | | `shard.node.isMaster.json` | The output of `isMaster` for a `mongod` member of a sharded cluster. | 78 | | `shard.listShards.json` | The output of `listShards` for a `mongos` member of a sharded cluster. | 79 | | `shard.replset.replSetGetStatus.json` | The output of `replSetGetStatus` for a replicaset in a sharded cluster. | 80 | | `replset.isMaster.json` | The output of `isMaster` for a replicaset. | 81 | | `replset.replSetGetStatus.json` | The output of `replSetGetStatus` for a replicaset. | 82 | | `replset.serverStatus.json` | The output of `serverStatus` for a replicaset. | 83 | | `standalone.isMaster.json` | The output of `isMaster` for a standalone server. | 84 | | `standalone.serverStatus.json` | The output of `serverStatus` for a standalone server. | 85 | 86 | ## Troubleshooting 87 | 88 | ```bash 89 | mongo-monitor : command not found 90 | ``` 91 | 92 | You need to change the npm config prefix 93 | 94 | ```bash 95 | npm config set prefix /usr/local 96 | 97 | npm root -g 98 | ``` 99 | 100 | 101 | ## Notes 102 | 103 | There's also a little blog post on this here: [dwmkerr.com/mongo-monitor-cli/](https://www.dwmkerr.com/mongo-monitor-cli/). 104 | -------------------------------------------------------------------------------- /samples/shard/shard.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Rebuild data and log directories. 4 | if [[ -d './data' ]]; then 5 | rm -rf ./data 6 | fi 7 | if [[ -d './log' ]]; then 8 | rm -rf ./log 9 | fi 10 | mkdir -p ./data/datacs1 11 | mkdir -p ./data/datacs2 12 | mkdir -p ./data/datacs3 13 | mkdir -p ./data/datars1a 14 | mkdir -p ./data/datars1b 15 | mkdir -p ./data/datars1c 16 | mkdir -p ./data/datars1d 17 | mkdir -p ./data/datars2a 18 | mkdir -p ./data/datars2b 19 | mkdir -p ./data/datars2c 20 | mkdir -p ./data/datars3a 21 | mkdir -p ./data/datars3b 22 | mkdir -p ./data/datars3c 23 | mkdir -p ./log 24 | 25 | # Start the config servers. 26 | mongod --fork --logpath ./log/cs1.log --oplogSize 50 --port 27117 --dbpath ./data/datacs1 --configsvr --replSet config 27 | mongod --fork --logpath ./log/cs2.log --oplogSize 50 --port 27118 --dbpath ./data/datacs2 --configsvr --replSet config 28 | mongod --fork --logpath ./log/cs3.log --oplogSize 50 --port 27119 --dbpath ./data/datacs3 --configsvr --replSet config 29 | 30 | # Start the replicasets. 31 | mongod --fork --replSet rs1 --port 27217 --dbpath ./data/datars1a --logpath ./log/rs1a.log --shardsvr 32 | mongod --fork --replSet rs1 --port 27218 --dbpath ./data/datars1b --logpath ./log/rs1b.log --shardsvr 33 | mongod --fork --replSet rs1 --port 27219 --dbpath ./data/datars1c --logpath ./log/rs1c.log --shardsvr 34 | mongod --fork --replSet rs1 --port 27220 --dbpath ./data/datars1d --logpath ./log/rs1d.log --shardsvr 35 | mongod --fork --replSet rs2 --port 27317 --dbpath ./data/datars2a --logpath ./log/rs2a.log --shardsvr 36 | mongod --fork --replSet rs2 --port 27318 --dbpath ./data/datars2b --logpath ./log/rs2b.log --shardsvr 37 | mongod --fork --replSet rs2 --port 27319 --dbpath ./data/datars2c --logpath ./log/rs2c.log --shardsvr 38 | mongod --fork --replSet rs3 --port 27417 --dbpath ./data/datars3a --logpath ./log/rs3a.log --shardsvr 39 | mongod --fork --replSet rs3 --port 27418 --dbpath ./data/datars3b --logpath ./log/rs3b.log --shardsvr 40 | mongod --fork --replSet rs3 --port 27419 --dbpath ./data/datars3c --logpath ./log/rs3c.log --shardsvr 41 | 42 | # Wait a bit of time for the process to start. 43 | sleep 3 44 | 45 | # Initial the replicasets. 46 | mongo --port 27117 --shell <<- EOF 47 | rs.initiate({ 48 | _id: 'config', 49 | members: [{ 50 | _id: 0, 51 | host: 'localhost:27117' 52 | },{ 53 | _id: 1, 54 | host: 'localhost:27118' 55 | },{ 56 | _id: 2, 57 | host: 'localhost:27119' 58 | }] 59 | }); 60 | EOF 61 | mongo --port 27217 --shell <<- EOF 62 | rs.initiate({ 63 | _id: 'rs1', 64 | members: [{ 65 | _id: 0, 66 | host: 'localhost:27217' 67 | },{ 68 | _id: 1, 69 | host: 'localhost:27218' 70 | },{ 71 | _id: 2, 72 | host: 'localhost:27219', 73 | arbiterOnly: true 74 | }] 75 | }); 76 | EOF 77 | mongo --port 27318 --shell <<- EOF 78 | rs.initiate({ 79 | _id: 'rs2', 80 | members: [{ 81 | _id: 0, 82 | host: 'localhost:27317' 83 | },{ 84 | _id: 1, 85 | host: 'localhost:27318' 86 | },{ 87 | _id: 2, 88 | host: 'localhost:27319' 89 | }] 90 | }); 91 | EOF 92 | mongo --port 27419 --shell <<- EOF 93 | rs.initiate({ 94 | _id: 'rs3', 95 | members: [{ 96 | _id: 0, 97 | host: 'localhost:27417' 98 | },{ 99 | _id: 1, 100 | host: 'localhost:27418' 101 | },{ 102 | _id: 2, 103 | host: 'localhost:27419' 104 | }] 105 | }); 106 | EOF 107 | 108 | # Start the sharding process. 109 | mongos --logpath ./log/shard.log --fork --configdb "config/localhost:27117,localhost:27118,localhost:27119" 110 | mongo --shell <<- EOF 111 | sh.addShard("rs1/localhost:27217,localhost:27218,localhost:27219") 112 | sh.addShard("rs2/localhost:27317,localhost:27318,localhost:27319") 113 | sh.addShard("rs3/localhost:27417,localhost:27418,localhost:27419") 114 | EOF 115 | 116 | # Show the process info. 117 | echo 118 | echo "Sharded MongoDB Cluster Running" 119 | echo 120 | echo "Monitor with: " 121 | echo " mongo-monitor localhost" 122 | 123 | -------------------------------------------------------------------------------- /src/load-status.spec.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const sinon = require('sinon'); 3 | const { MongoClient } = require('mongodb'); 4 | const loadStatus = require('./load-status'); 5 | const testStandaloneIsMaster = require('../test/standalone.isMaster.json'); 6 | const testStandaloneServerStatus = require('../test/standalone.serverStatus.json'); 7 | const testReplsetIsMaster = require('../test/replset.isMaster.json'); 8 | const testReplsetReplSetGetStatus = require('../test/replset.replSetGetStatus.json'); 9 | const testReplsetServerStatus = require('../test/replset.serverStatus.json'); 10 | const testShardIsMaster = require('../test/shard.isMaster.json'); 11 | const testShardListShards = require('../test/shard.listShards.json'); 12 | const testShardReplSetGetStatus = require('../test/shard.replSet.replSetGetStatus.json'); 13 | 14 | const noop = () => {}; 15 | 16 | describe('load-status', () => { 17 | 18 | // Create a sandbox for stubs, spies and mocks. Reset after each test. 19 | const sandbox = sinon.createSandbox(); 20 | const stubbedDbs = {}; 21 | 22 | beforeEach(() => { 23 | // Any call to 'connect' returns one of our stubbed dbs, 24 | sandbox.stub(MongoClient, 'connect') 25 | .callsFake(async (connectionString) => { 26 | return stubClient(connectionString); 27 | }); 28 | }); 29 | 30 | afterEach(() => { 31 | sandbox.restore(); 32 | }); 33 | 34 | const stubClient = (connectionString) => { 35 | return { 36 | db: () => { 37 | if (!stubbedDbs[connectionString]) { 38 | // TODO for some reason in the tests this does not bomb the runner... 39 | throw new Error(`DB with connection string '${connectionString}' has not been stubbed`); 40 | } 41 | return stubbedDbs[connectionString]; 42 | }, 43 | close: () => {} 44 | }; 45 | }; 46 | 47 | const stubDb = (connectionString) => { 48 | const stubbedDb = { 49 | command: noop 50 | }; 51 | stubbedDbs[connectionString] = stubbedDb; 52 | return stubbedDb; 53 | }; 54 | 55 | it('should be able to load the status of a standalone server', async () => { 56 | const replsetDb = stubDb('localhost'); 57 | sandbox.stub(replsetDb, 'command') 58 | .withArgs({ isMaster: 1 }) 59 | .callsFake(async () => { return testStandaloneIsMaster; }) 60 | .withArgs({ serverStatus: 1 }) 61 | .callsFake(async () => { return testStandaloneServerStatus; }); 62 | 63 | // Load the status. 64 | const status = await loadStatus(stubClient('localhost')); 65 | 66 | // Assert the expected shape. 67 | expect(status.configuration).to.equal('standalone'); 68 | expect(status.connections.active).to.equal(1); 69 | expect(status.connections.current).to.equal(1); 70 | expect(status.connections.available).to.equal(203); 71 | }); 72 | 73 | it('should be able to load the status of a replicaset', async () => { 74 | const replsetDb = stubDb('localhost'); 75 | sandbox.stub(replsetDb, 'command') 76 | .withArgs({ isMaster: 1 }) 77 | .callsFake(async () => { return testReplsetIsMaster; }) 78 | .withArgs({ replSetGetStatus: 1 }) 79 | .callsFake(async () => { return testReplsetReplSetGetStatus; }) 80 | .withArgs({ serverStatus: 1 }) 81 | .callsFake(async () => { return testReplsetServerStatus; }); 82 | 83 | // Load the status. 84 | const status = await loadStatus(stubClient('localhost')); 85 | 86 | // Assert the expected shape. 87 | expect(status.configuration).to.equal('replicaset'); 88 | expect(status.connections.active).to.equal(3); 89 | expect(status.connections.current).to.equal(21); 90 | expect(status.connections.available).to.equal(183); 91 | expect(status.members.length).to.equal(3); 92 | const member0 = status.members[0]; 93 | expect(member0.name).to.equal('mongo1.mongo-cluster.com:27017'); 94 | expect(member0.state).to.equal(1); 95 | }); 96 | 97 | 98 | it('should be able to load the status of a sharded cluster', async () => { 99 | const shardDb = stubDb('localhost'); 100 | sandbox.stub(shardDb, 'command') 101 | .withArgs({ isMaster: 1 }) 102 | .callsFake(async () => { return testShardIsMaster; }) 103 | .withArgs({ listShards: 1 }) 104 | .callsFake(async () => { return testShardListShards; }); 105 | 106 | // Create the shard db functions. 107 | const node4Db = stubDb('mongodb://mongod1.shard1.mongo-cluster.com:27017,mongod2.shard1.mongo-cluster.com:27017,mongod3.shard1.mongo-cluster.com:27017?replicaSet=shard1rs'); 108 | sandbox.stub(node4Db, 'command') 109 | .withArgs({ replSetGetStatus: 1 }) 110 | .callsFake(async () => { return testShardReplSetGetStatus; }) 111 | .withArgs({ serverStatus: 1 }) 112 | .callsFake(async () => { return testReplsetServerStatus; }); 113 | 114 | // Load the status. 115 | const status = await loadStatus(stubClient('localhost')); 116 | 117 | // Assert the expected shape. 118 | expect(status.configuration).to.equal('sharded'); 119 | expect(status.shards.length).to.equal(3); 120 | const shard1 = status.shards[0]; 121 | expect(shard1.id).to.equal('shard1'); 122 | expect(shard1.replicaSet).to.equal('shard1rs'); 123 | expect(shard1.hosts.length).to.equal(3); 124 | expect(shard1.connections.active).to.equal(3); 125 | expect(shard1.connections.current).to.equal(21); 126 | expect(shard1.connections.available).to.equal(183); 127 | const host1 = shard1.hosts[0]; 128 | expect(host1.state).to.equal(1); // i.e. primary 129 | expect(host1.host).to.equal('mongod1.shard1.mongo-cluster.com:27017'); 130 | const host2 = shard1.hosts[1]; 131 | expect(host2.state).to.equal(2); // i.e. secondary 132 | expect(host2.host).to.equal('mongod2.shard1.mongo-cluster.com:27017'); 133 | const host3 = shard1.hosts[2]; 134 | expect(host3.state).to.equal(2); // i.e. secondary 135 | expect(host3.host).to.equal('mongod3.shard1.mongo-cluster.com:27017'); 136 | }); 137 | }); 138 | -------------------------------------------------------------------------------- /test/standalone.serverStatus.json: -------------------------------------------------------------------------------- 1 | { 2 | "host" : "SIN-71210-C02YC0G9JGH8", 3 | "version" : "4.2.1", 4 | "process" : "mongod", 5 | "pid" : 15383, 6 | "uptime" : 237, 7 | "uptimeMillis" : 236554, 8 | "uptimeEstimate" : 236, 9 | "localTime" : "2019-11-05T07:11:17.477Z", 10 | "asserts" : { 11 | "regular" : 0, 12 | "warning" : 0, 13 | "msg" : 0, 14 | "user" : 3, 15 | "rollovers" : 0 16 | }, 17 | "connections" : { 18 | "current" : 1, 19 | "available" : 203, 20 | "totalCreated" : 1, 21 | "active" : 1 22 | }, 23 | "electionMetrics" : { 24 | "stepUpCmd" : { 25 | "called" : 0, 26 | "successful" : 0 27 | }, 28 | "priorityTakeover" : { 29 | "called" : 0, 30 | "successful" : 0 31 | }, 32 | "catchUpTakeover" : { 33 | "called" : 0, 34 | "successful" : 0 35 | }, 36 | "electionTimeout" : { 37 | "called" : 0, 38 | "successful" : 0 39 | }, 40 | "freezeTimeout" : { 41 | "called" : 0, 42 | "successful" : 0 43 | }, 44 | "numStepDownsCausedByHigherTerm" : 0, 45 | "numCatchUps" : 0, 46 | "numCatchUpsSucceeded" : 0, 47 | "numCatchUpsAlreadyCaughtUp" : 0, 48 | "numCatchUpsSkipped" : 0, 49 | "numCatchUpsTimedOut" : 0, 50 | "numCatchUpsFailedWithError" : 0, 51 | "numCatchUpsFailedWithNewTerm" : 0, 52 | "numCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd" : 0, 53 | "averageCatchUpOps" : 0 54 | }, 55 | "extra_info" : { 56 | "note" : "fields vary by platform", 57 | "page_faults" : 0 58 | }, 59 | "flowControl" : { 60 | "enabled" : true, 61 | "targetRateLimit" : 1000000000, 62 | "timeAcquiringMicros" : 0, 63 | "locksPerOp" : 0, 64 | "sustainerRate" : 0, 65 | "isLagged" : false, 66 | "isLaggedCount" : 0, 67 | "isLaggedTimeMicros" : 0 68 | }, 69 | "freeMonitoring" : { 70 | "state" : "undecided" 71 | }, 72 | "globalLock" : { 73 | "totalTime" : 236549000, 74 | "currentQueue" : { 75 | "total" : 0, 76 | "readers" : 0, 77 | "writers" : 0 78 | }, 79 | "activeClients" : { 80 | "total" : 0, 81 | "readers" : 0, 82 | "writers" : 0 83 | } 84 | }, 85 | "locks" : { 86 | "ParallelBatchWriterMode" : { 87 | "acquireCount" : { 88 | "r" : 22 89 | } 90 | }, 91 | "ReplicationStateTransition" : { 92 | "acquireCount" : { 93 | "w" : 741 94 | } 95 | }, 96 | "Global" : { 97 | "acquireCount" : { 98 | "r" : 727, 99 | "w" : 10, 100 | "W" : 4 101 | } 102 | }, 103 | "Database" : { 104 | "acquireCount" : { 105 | "r" : 256, 106 | "w" : 5, 107 | "W" : 6 108 | } 109 | }, 110 | "Collection" : { 111 | "acquireCount" : { 112 | "r" : 262, 113 | "w" : 5, 114 | "R" : 1, 115 | "W" : 2 116 | } 117 | }, 118 | "Mutex" : { 119 | "acquireCount" : { 120 | "r" : 262 121 | } 122 | }, 123 | "oplog" : { 124 | "acquireCount" : { 125 | "r" : 236 126 | } 127 | } 128 | }, 129 | "logicalSessionRecordCache" : { 130 | "activeSessionsCount" : 1, 131 | "sessionsCollectionJobCount" : 1, 132 | "lastSessionsCollectionJobDurationMillis" : 70, 133 | "lastSessionsCollectionJobTimestamp" : "2019-11-05T07:07:21.572Z", 134 | "lastSessionsCollectionJobEntriesRefreshed" : 0, 135 | "lastSessionsCollectionJobEntriesEnded" : 0, 136 | "lastSessionsCollectionJobCursorsClosed" : 0, 137 | "transactionReaperJobCount" : 1, 138 | "lastTransactionReaperJobDurationMillis" : 0, 139 | "lastTransactionReaperJobTimestamp" : "2019-11-05T07:07:21.572Z", 140 | "lastTransactionReaperJobEntriesCleanedUp" : 0, 141 | "sessionCatalogSize" : 0 142 | }, 143 | "network" : { 144 | "bytesIn" : 1917, 145 | "bytesOut" : 10408, 146 | "physicalBytesIn" : 1917, 147 | "physicalBytesOut" : 10408, 148 | "numRequests" : 17, 149 | "compression" : { 150 | "snappy" : { 151 | "compressor" : { 152 | "bytesIn" : 0, 153 | "bytesOut" : 0 154 | }, 155 | "decompressor" : { 156 | "bytesIn" : 0, 157 | "bytesOut" : 0 158 | } 159 | }, 160 | "zstd" : { 161 | "compressor" : { 162 | "bytesIn" : 0, 163 | "bytesOut" : 0 164 | }, 165 | "decompressor" : { 166 | "bytesIn" : 0, 167 | "bytesOut" : 0 168 | } 169 | }, 170 | "zlib" : { 171 | "compressor" : { 172 | "bytesIn" : 0, 173 | "bytesOut" : 0 174 | }, 175 | "decompressor" : { 176 | "bytesIn" : 0, 177 | "bytesOut" : 0 178 | } 179 | } 180 | }, 181 | "serviceExecutorTaskStats" : { 182 | "executor" : "passthrough", 183 | "threadsRunning" : 1 184 | } 185 | }, 186 | "opLatencies" : { 187 | "reads" : { 188 | "latency" : 0, 189 | "ops" : 0 190 | }, 191 | "writes" : { 192 | "latency" : 0, 193 | "ops" : 0 194 | }, 195 | "commands" : { 196 | "latency" : 352, 197 | "ops" : 16 198 | }, 199 | "transactions" : { 200 | "latency" : 0, 201 | "ops" : 0 202 | } 203 | }, 204 | "opReadConcernCounters" : { 205 | "available" : 0, 206 | "linearizable" : 0, 207 | "local" : 0, 208 | "majority" : 0, 209 | "snapshot" : 0, 210 | "none" : 1 211 | }, 212 | "opcounters" : { 213 | "insert" : 0, 214 | "query" : 1, 215 | "update" : 0, 216 | "delete" : 0, 217 | "getmore" : 0, 218 | "command" : 20 219 | }, 220 | "opcountersRepl" : { 221 | "insert" : 0, 222 | "query" : 0, 223 | "update" : 0, 224 | "delete" : 0, 225 | "getmore" : 0, 226 | "command" : 0 227 | }, 228 | "storageEngine" : { 229 | "name" : "wiredTiger", 230 | "supportsCommittedReads" : true, 231 | "oldestRequiredTimestampForCrashRecovery" : 0, 232 | "supportsPendingDrops" : true, 233 | "dropPendingIdents" : 0, 234 | "supportsSnapshotReadConcern" : true, 235 | "readOnly" : false, 236 | "persistent" : true, 237 | "backupCursorOpen" : false 238 | }, 239 | "trafficRecording" : { 240 | "running" : false 241 | }, 242 | "transactions" : { 243 | "retriedCommandsCount" : 0, 244 | "retriedStatementsCount" : 0, 245 | "transactionsCollectionWriteCount" : 0, 246 | "currentActive" : 0, 247 | "currentInactive" : 0, 248 | "currentOpen" : 0, 249 | "totalAborted" : 0, 250 | "totalCommitted" : 0, 251 | "totalStarted" : 0, 252 | "totalPrepared" : 0, 253 | "totalPreparedThenCommitted" : 0, 254 | "totalPreparedThenAborted" : 0, 255 | "currentPrepared" : 0 256 | }, 257 | "transportSecurity" : { 258 | "1.0" : 0, 259 | "1.1" : 0, 260 | "1.2" : 0, 261 | "1.3" : 0, 262 | "unknown" : 0 263 | }, 264 | "twoPhaseCommitCoordinator" : { 265 | "totalCreated" : 0, 266 | "totalStartedTwoPhaseCommit" : 0, 267 | "totalAbortedTwoPhaseCommit" : 0, 268 | "totalCommittedTwoPhaseCommit" : 0, 269 | "currentInSteps" : { 270 | "writingParticipantList" : 0, 271 | "waitingForVotes" : 0, 272 | "writingDecision" : 0, 273 | "waitingForDecisionAcks" : 0, 274 | "deletingCoordinatorDoc" : 0 275 | } 276 | }, 277 | "wiredTiger" : { 278 | "uri" : "statistics:", 279 | "async" : { 280 | "current work queue length" : 0, 281 | "maximum work queue length" : 0, 282 | "number of allocation state races" : 0, 283 | "number of flush calls" : 0, 284 | "number of operation slots viewed for allocation" : 0, 285 | "number of times operation allocation failed" : 0, 286 | "number of times worker found no work" : 0, 287 | "total allocations" : 0, 288 | "total compact calls" : 0, 289 | "total insert calls" : 0, 290 | "total remove calls" : 0, 291 | "total search calls" : 0, 292 | "total update calls" : 0 293 | }, 294 | "block-manager" : { 295 | "blocks pre-loaded" : 0, 296 | "blocks read" : 2, 297 | "blocks written" : 34, 298 | "bytes read" : 8192, 299 | "bytes written" : 163840, 300 | "bytes written for checkpoint" : 163840, 301 | "mapped blocks read" : 0, 302 | "mapped bytes read" : 0 303 | }, 304 | "cache" : { 305 | "application threads page read from disk to cache count" : 0, 306 | "application threads page read from disk to cache time (usecs)" : 0, 307 | "application threads page write from cache to disk count" : 16, 308 | "application threads page write from cache to disk time (usecs)" : 11859, 309 | "bytes belonging to page images in the cache" : 0, 310 | "bytes belonging to the cache overflow table in the cache" : 182, 311 | "bytes currently in the cache" : 45437, 312 | "bytes dirty in the cache cumulative" : 37709, 313 | "bytes not belonging to page images in the cache" : 45437, 314 | "bytes read into cache" : 0, 315 | "bytes written from cache" : 31449, 316 | "cache overflow cursor application thread wait time (usecs)" : 0, 317 | "cache overflow cursor internal thread wait time (usecs)" : 0, 318 | "cache overflow score" : 0, 319 | "cache overflow table entries" : 0, 320 | "cache overflow table insert calls" : 0, 321 | "cache overflow table max on-disk size" : 0, 322 | "cache overflow table on-disk size" : 0, 323 | "cache overflow table remove calls" : 0, 324 | "checkpoint blocked page eviction" : 0, 325 | "eviction calls to get a page" : 13, 326 | "eviction calls to get a page found queue empty" : 13, 327 | "eviction calls to get a page found queue empty after locking" : 0, 328 | "eviction currently operating in aggressive mode" : 0, 329 | "eviction empty score" : 0, 330 | "eviction passes of a file" : 0, 331 | "eviction server candidate queue empty when topping up" : 0, 332 | "eviction server candidate queue not empty when topping up" : 0, 333 | "eviction server evicting pages" : 0, 334 | "eviction server slept, because we did not make progress with eviction" : 0, 335 | "eviction server unable to reach eviction goal" : 0, 336 | "eviction server waiting for a leaf page" : 2, 337 | "eviction server waiting for an internal page sleep (usec)" : 0, 338 | "eviction server waiting for an internal page yields" : 0, 339 | "eviction state" : 128, 340 | "eviction walk target pages histogram - 0-9" : 0, 341 | "eviction walk target pages histogram - 10-31" : 0, 342 | "eviction walk target pages histogram - 128 and higher" : 0, 343 | "eviction walk target pages histogram - 32-63" : 0, 344 | "eviction walk target pages histogram - 64-128" : 0, 345 | "eviction walks abandoned" : 0, 346 | "eviction walks gave up because they restarted their walk twice" : 0, 347 | "eviction walks gave up because they saw too many pages and found no candidates" : 0, 348 | "eviction walks gave up because they saw too many pages and found too few candidates" : 0, 349 | "eviction walks reached end of tree" : 0, 350 | "eviction walks started from root of tree" : 0, 351 | "eviction walks started from saved location in tree" : 0, 352 | "eviction worker thread active" : 4, 353 | "eviction worker thread created" : 0, 354 | "eviction worker thread evicting pages" : 0, 355 | "eviction worker thread removed" : 0, 356 | "eviction worker thread stable number" : 0, 357 | "files with active eviction walks" : 0, 358 | "files with new eviction walks started" : 0, 359 | "force re-tuning of eviction workers once in a while" : 0, 360 | "forced eviction - pages evicted that were clean count" : 0, 361 | "forced eviction - pages evicted that were clean time (usecs)" : 0, 362 | "forced eviction - pages evicted that were dirty count" : 0, 363 | "forced eviction - pages evicted that were dirty time (usecs)" : 0, 364 | "forced eviction - pages selected because of too many deleted items count" : 0, 365 | "forced eviction - pages selected count" : 0, 366 | "forced eviction - pages selected unable to be evicted count" : 0, 367 | "forced eviction - pages selected unable to be evicted time" : 0, 368 | "hazard pointer blocked page eviction" : 0, 369 | "hazard pointer check calls" : 0, 370 | "hazard pointer check entries walked" : 0, 371 | "hazard pointer maximum array length" : 0, 372 | "in-memory page passed criteria to be split" : 0, 373 | "in-memory page splits" : 0, 374 | "internal pages evicted" : 0, 375 | "internal pages split during eviction" : 0, 376 | "leaf pages split during eviction" : 0, 377 | "maximum bytes configured" : 16642998272, 378 | "maximum page size at eviction" : 0, 379 | "modified pages evicted" : 2, 380 | "modified pages evicted by application threads" : 0, 381 | "operations timed out waiting for space in cache" : 0, 382 | "overflow pages read into cache" : 0, 383 | "page split during eviction deepened the tree" : 0, 384 | "page written requiring cache overflow records" : 0, 385 | "pages currently held in the cache" : 19, 386 | "pages evicted by application threads" : 0, 387 | "pages queued for eviction" : 0, 388 | "pages queued for eviction post lru sorting" : 0, 389 | "pages queued for urgent eviction" : 0, 390 | "pages queued for urgent eviction during walk" : 0, 391 | "pages read into cache" : 0, 392 | "pages read into cache after truncate" : 8, 393 | "pages read into cache after truncate in prepare state" : 0, 394 | "pages read into cache requiring cache overflow entries" : 0, 395 | "pages read into cache requiring cache overflow for checkpoint" : 0, 396 | "pages read into cache skipping older cache overflow entries" : 0, 397 | "pages read into cache with skipped cache overflow entries needed later" : 0, 398 | "pages read into cache with skipped cache overflow entries needed later by checkpoint" : 0, 399 | "pages requested from the cache" : 363, 400 | "pages seen by eviction walk" : 0, 401 | "pages selected for eviction unable to be evicted" : 0, 402 | "pages walked for eviction" : 0, 403 | "pages written from cache" : 16, 404 | "pages written requiring in-memory restoration" : 0, 405 | "percentage overhead" : 8, 406 | "tracked bytes belonging to internal pages in the cache" : 4547, 407 | "tracked bytes belonging to leaf pages in the cache" : 40890, 408 | "tracked dirty bytes in the cache" : 0, 409 | "tracked dirty pages in the cache" : 0, 410 | "unmodified pages evicted" : 0 411 | }, 412 | "capacity" : { 413 | "background fsync file handles considered" : 0, 414 | "background fsync file handles synced" : 0, 415 | "background fsync time (msecs)" : 0, 416 | "bytes read" : 0, 417 | "bytes written for checkpoint" : 31041, 418 | "bytes written for eviction" : 0, 419 | "bytes written for log" : 24960, 420 | "bytes written total" : 56001, 421 | "threshold to call fsync" : 0, 422 | "time waiting due to total capacity (usecs)" : 0, 423 | "time waiting during checkpoint (usecs)" : 0, 424 | "time waiting during eviction (usecs)" : 0, 425 | "time waiting during logging (usecs)" : 0, 426 | "time waiting during read (usecs)" : 0 427 | }, 428 | "connection" : { 429 | "auto adjusting condition resets" : 24, 430 | "auto adjusting condition wait calls" : 1467, 431 | "detected system time went backwards" : 0, 432 | "files currently open" : 14, 433 | "memory allocations" : 10072, 434 | "memory frees" : 9077, 435 | "memory re-allocations" : 1013, 436 | "pthread mutex condition wait calls" : 3885, 437 | "pthread mutex shared lock read-lock calls" : 2983, 438 | "pthread mutex shared lock write-lock calls" : 326, 439 | "total fsync I/Os" : 57, 440 | "total read I/Os" : 25, 441 | "total write I/Os" : 71 442 | }, 443 | "cursor" : { 444 | "cached cursor count" : 14, 445 | "cursor bulk loaded cursor insert calls" : 0, 446 | "cursor close calls that result in cache" : 25, 447 | "cursor create calls" : 81, 448 | "cursor insert calls" : 61, 449 | "cursor insert key and value bytes" : 31914, 450 | "cursor modify calls" : 0, 451 | "cursor modify key and value bytes affected" : 0, 452 | "cursor modify value bytes modified" : 0, 453 | "cursor next calls" : 31, 454 | "cursor operation restarted" : 0, 455 | "cursor prev calls" : 7, 456 | "cursor remove calls" : 3, 457 | "cursor remove key bytes removed" : 119, 458 | "cursor reserve calls" : 0, 459 | "cursor reset calls" : 367, 460 | "cursor search calls" : 265, 461 | "cursor search near calls" : 10, 462 | "cursor sweep buckets" : 42, 463 | "cursor sweep cursors closed" : 0, 464 | "cursor sweep cursors examined" : 0, 465 | "cursor sweeps" : 7, 466 | "cursor truncate calls" : 0, 467 | "cursor update calls" : 0, 468 | "cursor update key and value bytes" : 0, 469 | "cursor update value size change" : 0, 470 | "cursors reused from cache" : 11, 471 | "open cursor count" : 22 472 | }, 473 | "data-handle" : { 474 | "connection data handle size" : 448, 475 | "connection data handles currently active" : 21, 476 | "connection sweep candidate became referenced" : 0, 477 | "connection sweep dhandles closed" : 1, 478 | "connection sweep dhandles removed from hash list" : 3, 479 | "connection sweep time-of-death sets" : 27, 480 | "connection sweeps" : 24, 481 | "session dhandles swept" : 2, 482 | "session sweep attempts" : 39 483 | }, 484 | "lock" : { 485 | "checkpoint lock acquisitions" : 3, 486 | "checkpoint lock application thread wait time (usecs)" : 0, 487 | "checkpoint lock internal thread wait time (usecs)" : 0, 488 | "dhandle lock application thread time waiting (usecs)" : 0, 489 | "dhandle lock internal thread time waiting (usecs)" : 0, 490 | "dhandle read lock acquisitions" : 977, 491 | "dhandle write lock acquisitions" : 28, 492 | "durable timestamp queue lock application thread time waiting (usecs)" : 0, 493 | "durable timestamp queue lock internal thread time waiting (usecs)" : 0, 494 | "durable timestamp queue read lock acquisitions" : 0, 495 | "durable timestamp queue write lock acquisitions" : 0, 496 | "metadata lock acquisitions" : 3, 497 | "metadata lock application thread wait time (usecs)" : 0, 498 | "metadata lock internal thread wait time (usecs)" : 0, 499 | "read timestamp queue lock application thread time waiting (usecs)" : 0, 500 | "read timestamp queue lock internal thread time waiting (usecs)" : 0, 501 | "read timestamp queue read lock acquisitions" : 0, 502 | "read timestamp queue write lock acquisitions" : 0, 503 | "schema lock acquisitions" : 20, 504 | "schema lock application thread wait time (usecs)" : 0, 505 | "schema lock internal thread wait time (usecs)" : 0, 506 | "table lock application thread time waiting for the table lock (usecs)" : 0, 507 | "table lock internal thread time waiting for the table lock (usecs)" : 0, 508 | "table read lock acquisitions" : 0, 509 | "table write lock acquisitions" : 13, 510 | "txn global lock application thread time waiting (usecs)" : 0, 511 | "txn global lock internal thread time waiting (usecs)" : 0, 512 | "txn global read lock acquisitions" : 24, 513 | "txn global write lock acquisitions" : 13 514 | }, 515 | "log" : { 516 | "busy returns attempting to switch slots" : 0, 517 | "force archive time sleeping (usecs)" : 0, 518 | "log bytes of payload data" : 18551, 519 | "log bytes written" : 24832, 520 | "log files manually zero-filled" : 0, 521 | "log flush operations" : 2281, 522 | "log force write operations" : 2531, 523 | "log force write operations skipped" : 2528, 524 | "log records compressed" : 20, 525 | "log records not compressed" : 18, 526 | "log records too small to compress" : 26, 527 | "log release advances write LSN" : 15, 528 | "log scan operations" : 0, 529 | "log scan records requiring two reads" : 0, 530 | "log server thread advances write LSN" : 3, 531 | "log server thread write LSN walk skipped" : 1221, 532 | "log sync operations" : 18, 533 | "log sync time duration (usecs)" : 115610, 534 | "log sync_dir operations" : 1, 535 | "log sync_dir time duration (usecs)" : 5249, 536 | "log write operations" : 64, 537 | "logging bytes consolidated" : 24320, 538 | "maximum log file size" : 104857600, 539 | "number of pre-allocated log files to create" : 2, 540 | "pre-allocated log files not ready and missed" : 1, 541 | "pre-allocated log files prepared" : 2, 542 | "pre-allocated log files used" : 0, 543 | "records processed by log scan" : 0, 544 | "slot close lost race" : 0, 545 | "slot close unbuffered waits" : 0, 546 | "slot closures" : 18, 547 | "slot join atomic update races" : 0, 548 | "slot join calls atomic updates raced" : 0, 549 | "slot join calls did not yield" : 64, 550 | "slot join calls found active slot closed" : 0, 551 | "slot join calls slept" : 0, 552 | "slot join calls yielded" : 0, 553 | "slot join found active slot closed" : 0, 554 | "slot joins yield time (usecs)" : 0, 555 | "slot transitions unable to find free slot" : 0, 556 | "slot unbuffered writes" : 0, 557 | "total in-memory size of compressed records" : 29778, 558 | "total log buffer size" : 33554432, 559 | "total size of compressed records" : 14806, 560 | "written slots coalesced" : 0, 561 | "yields waiting for previous log file close" : 0 562 | }, 563 | "perf" : { 564 | "file system read latency histogram (bucket 1) - 10-49ms" : 0, 565 | "file system read latency histogram (bucket 2) - 50-99ms" : 0, 566 | "file system read latency histogram (bucket 3) - 100-249ms" : 0, 567 | "file system read latency histogram (bucket 4) - 250-499ms" : 0, 568 | "file system read latency histogram (bucket 5) - 500-999ms" : 0, 569 | "file system read latency histogram (bucket 6) - 1000ms+" : 0, 570 | "file system write latency histogram (bucket 1) - 10-49ms" : 0, 571 | "file system write latency histogram (bucket 2) - 50-99ms" : 0, 572 | "file system write latency histogram (bucket 3) - 100-249ms" : 0, 573 | "file system write latency histogram (bucket 4) - 250-499ms" : 0, 574 | "file system write latency histogram (bucket 5) - 500-999ms" : 0, 575 | "file system write latency histogram (bucket 6) - 1000ms+" : 0, 576 | "operation read latency histogram (bucket 1) - 100-249us" : 0, 577 | "operation read latency histogram (bucket 2) - 250-499us" : 0, 578 | "operation read latency histogram (bucket 3) - 500-999us" : 0, 579 | "operation read latency histogram (bucket 4) - 1000-9999us" : 0, 580 | "operation read latency histogram (bucket 5) - 10000us+" : 0, 581 | "operation write latency histogram (bucket 1) - 100-249us" : 0, 582 | "operation write latency histogram (bucket 2) - 250-499us" : 0, 583 | "operation write latency histogram (bucket 3) - 500-999us" : 0, 584 | "operation write latency histogram (bucket 4) - 1000-9999us" : 0, 585 | "operation write latency histogram (bucket 5) - 10000us+" : 0 586 | }, 587 | "reconciliation" : { 588 | "fast-path pages deleted" : 0, 589 | "page reconciliation calls" : 19, 590 | "page reconciliation calls for eviction" : 1, 591 | "pages deleted" : 4, 592 | "split bytes currently awaiting free" : 0, 593 | "split objects currently awaiting free" : 0 594 | }, 595 | "session" : { 596 | "open session count" : 19, 597 | "session query timestamp calls" : 0, 598 | "table alter failed calls" : 0, 599 | "table alter successful calls" : 0, 600 | "table alter unchanged and skipped" : 0, 601 | "table compact failed calls" : 0, 602 | "table compact successful calls" : 0, 603 | "table create failed calls" : 0, 604 | "table create successful calls" : 10, 605 | "table drop failed calls" : 0, 606 | "table drop successful calls" : 1, 607 | "table import failed calls" : 0, 608 | "table import successful calls" : 0, 609 | "table rebalance failed calls" : 0, 610 | "table rebalance successful calls" : 0, 611 | "table rename failed calls" : 0, 612 | "table rename successful calls" : 0, 613 | "table salvage failed calls" : 0, 614 | "table salvage successful calls" : 0, 615 | "table truncate failed calls" : 0, 616 | "table truncate successful calls" : 0, 617 | "table verify failed calls" : 0, 618 | "table verify successful calls" : 0 619 | }, 620 | "thread-state" : { 621 | "active filesystem fsync calls" : 0, 622 | "active filesystem read calls" : 0, 623 | "active filesystem write calls" : 0 624 | }, 625 | "thread-yield" : { 626 | "application thread time evicting (usecs)" : 0, 627 | "application thread time waiting for cache (usecs)" : 0, 628 | "connection close blocked waiting for transaction state stabilization" : 0, 629 | "connection close yielded for lsm manager shutdown" : 0, 630 | "data handle lock yielded" : 0, 631 | "get reference for page index and slot time sleeping (usecs)" : 0, 632 | "log server sync yielded for log write" : 0, 633 | "page access yielded due to prepare state change" : 0, 634 | "page acquire busy blocked" : 0, 635 | "page acquire eviction blocked" : 0, 636 | "page acquire locked blocked" : 0, 637 | "page acquire read blocked" : 0, 638 | "page acquire time sleeping (usecs)" : 0, 639 | "page delete rollback time sleeping for state change (usecs)" : 0, 640 | "page reconciliation yielded due to child modification" : 0 641 | }, 642 | "transaction" : { 643 | "Number of prepared updates" : 0, 644 | "Number of prepared updates added to cache overflow" : 0, 645 | "Number of prepared updates resolved" : 0, 646 | "durable timestamp queue entries walked" : 0, 647 | "durable timestamp queue insert to empty" : 0, 648 | "durable timestamp queue inserts to head" : 0, 649 | "durable timestamp queue inserts total" : 0, 650 | "durable timestamp queue length" : 0, 651 | "number of named snapshots created" : 0, 652 | "number of named snapshots dropped" : 0, 653 | "prepared transactions" : 0, 654 | "prepared transactions committed" : 0, 655 | "prepared transactions currently active" : 0, 656 | "prepared transactions rolled back" : 0, 657 | "query timestamp calls" : 237, 658 | "read timestamp queue entries walked" : 0, 659 | "read timestamp queue insert to empty" : 0, 660 | "read timestamp queue inserts to head" : 0, 661 | "read timestamp queue inserts total" : 0, 662 | "read timestamp queue length" : 0, 663 | "rollback to stable calls" : 0, 664 | "rollback to stable updates aborted" : 0, 665 | "rollback to stable updates removed from cache overflow" : 0, 666 | "set timestamp calls" : 0, 667 | "set timestamp durable calls" : 0, 668 | "set timestamp durable updates" : 0, 669 | "set timestamp oldest calls" : 0, 670 | "set timestamp oldest updates" : 0, 671 | "set timestamp stable calls" : 0, 672 | "set timestamp stable updates" : 0, 673 | "transaction begins" : 24, 674 | "transaction checkpoint currently running" : 0, 675 | "transaction checkpoint generation" : 4, 676 | "transaction checkpoint max time (msecs)" : 80, 677 | "transaction checkpoint min time (msecs)" : 15, 678 | "transaction checkpoint most recent time (msecs)" : 15, 679 | "transaction checkpoint scrub dirty target" : 0, 680 | "transaction checkpoint scrub time (msecs)" : 0, 681 | "transaction checkpoint total time (msecs)" : 135, 682 | "transaction checkpoints" : 3, 683 | "transaction checkpoints skipped because database was clean" : 0, 684 | "transaction failures due to cache overflow" : 0, 685 | "transaction fsync calls for checkpoint after allocating the transaction ID" : 3, 686 | "transaction fsync duration for checkpoint after allocating the transaction ID (usecs)" : 0, 687 | "transaction range of IDs currently pinned" : 0, 688 | "transaction range of IDs currently pinned by a checkpoint" : 0, 689 | "transaction range of IDs currently pinned by named snapshots" : 0, 690 | "transaction range of timestamps currently pinned" : 0, 691 | "transaction range of timestamps pinned by a checkpoint" : 0, 692 | "transaction range of timestamps pinned by the oldest active read timestamp" : 0, 693 | "transaction range of timestamps pinned by the oldest timestamp" : 0, 694 | "transaction read timestamp of the oldest active reader" : 0, 695 | "transaction sync calls" : 0, 696 | "transactions committed" : 9, 697 | "transactions rolled back" : 15, 698 | "update conflicts" : 0 699 | }, 700 | "concurrentTransactions" : { 701 | "write" : { 702 | "out" : 0, 703 | "available" : 128, 704 | "totalTickets" : 128 705 | }, 706 | "read" : { 707 | "out" : 1, 708 | "available" : 127, 709 | "totalTickets" : 128 710 | } 711 | }, 712 | "snapshot-window-settings" : { 713 | "cache pressure percentage threshold" : 95, 714 | "current cache pressure percentage" : 0, 715 | "total number of SnapshotTooOld errors" : 0, 716 | "max target available snapshots window size in seconds" : 5, 717 | "target available snapshots window size in seconds" : 5, 718 | "current available snapshots window size in seconds" : 0, 719 | "latest majority snapshot timestamp available" : "Jan 1 07:30:00:0", 720 | "oldest majority snapshot timestamp available" : "Jan 1 07:30:00:0" 721 | } 722 | }, 723 | "mem" : { 724 | "bits" : 64, 725 | "resident" : 37, 726 | "virtual" : 6566, 727 | "supported" : true 728 | }, 729 | "metrics" : { 730 | "commands" : { 731 | "buildInfo" : { 732 | "failed" : 0, 733 | "total" : 3 734 | }, 735 | "createIndexes" : { 736 | "failed" : 0, 737 | "total" : 1 738 | }, 739 | "find" : { 740 | "failed" : 0, 741 | "total" : 1 742 | }, 743 | "getCmdLineOpts" : { 744 | "failed" : 0, 745 | "total" : 1 746 | }, 747 | "getFreeMonitoringStatus" : { 748 | "failed" : 0, 749 | "total" : 1 750 | }, 751 | "getLog" : { 752 | "failed" : 0, 753 | "total" : 1 754 | }, 755 | "isMaster" : { 756 | "failed" : 0, 757 | "total" : 6 758 | }, 759 | "listCollections" : { 760 | "failed" : 0, 761 | "total" : 2 762 | }, 763 | "listIndexes" : { 764 | "failed" : 2, 765 | "total" : 2 766 | }, 767 | "replSetGetStatus" : { 768 | "failed" : 1, 769 | "total" : 1 770 | }, 771 | "serverStatus" : { 772 | "failed" : 0, 773 | "total" : 1 774 | }, 775 | "whatsmyuri" : { 776 | "failed" : 0, 777 | "total" : 1 778 | } 779 | }, 780 | "cursor" : { 781 | "timedOut" : 0, 782 | "open" : { 783 | "noTimeout" : 0, 784 | "pinned" : 0, 785 | "total" : 0 786 | } 787 | }, 788 | "document" : { 789 | "deleted" : 0, 790 | "inserted" : 0, 791 | "returned" : 0, 792 | "updated" : 0 793 | }, 794 | "getLastError" : { 795 | "wtime" : { 796 | "num" : 0, 797 | "totalMillis" : 0 798 | }, 799 | "wtimeouts" : 0 800 | }, 801 | "operation" : { 802 | "scanAndOrder" : 0, 803 | "writeConflicts" : 0 804 | }, 805 | "query" : { 806 | "planCacheTotalSizeEstimateBytes" : 0, 807 | "updateOneOpStyleBroadcastWithExactIDCount" : 0 808 | }, 809 | "queryExecutor" : { 810 | "scanned" : 0, 811 | "scannedObjects" : 0 812 | }, 813 | "record" : { 814 | "moves" : 0 815 | }, 816 | "repl" : { 817 | "executor" : { 818 | "pool" : { 819 | "inProgressCount" : 0 820 | }, 821 | "queues" : { 822 | "networkInProgress" : 0, 823 | "sleepers" : 0 824 | }, 825 | "unsignaledEvents" : 0, 826 | "shuttingDown" : false, 827 | "networkInterface" : "DEPRECATED: getDiagnosticString is deprecated in NetworkInterfaceTL" 828 | }, 829 | "apply" : { 830 | "attemptsToBecomeSecondary" : 0, 831 | "batchSize" : 0, 832 | "batches" : { 833 | "num" : 0, 834 | "totalMillis" : 0 835 | }, 836 | "ops" : 0 837 | }, 838 | "buffer" : { 839 | "count" : 0, 840 | "maxSizeBytes" : 0, 841 | "sizeBytes" : 0 842 | }, 843 | "initialSync" : { 844 | "completed" : 0, 845 | "failedAttempts" : 0, 846 | "failures" : 0 847 | }, 848 | "network" : { 849 | "bytes" : 0, 850 | "getmores" : { 851 | "num" : 0, 852 | "totalMillis" : 0 853 | }, 854 | "notMasterLegacyUnacknowledgedWrites" : 0, 855 | "notMasterUnacknowledgedWrites" : 0, 856 | "ops" : 0, 857 | "readersCreated" : 0 858 | }, 859 | "stepDown" : { 860 | "userOperationsKilled" : 0, 861 | "userOperationsRunning" : 0 862 | } 863 | }, 864 | "ttl" : { 865 | "deletedDocuments" : 0, 866 | "passes" : 3 867 | } 868 | }, 869 | "ok" : 1 870 | } 871 | -------------------------------------------------------------------------------- /test/replset.serverStatus.json: -------------------------------------------------------------------------------- 1 | { 2 | "host": "SIN-71210-C02YC0G9JGH8:27217", 3 | "version": "4.2.1", 4 | "process": "mongod", 5 | "pid": 76420, 6 | "uptime": 916, 7 | "uptimeMillis": 916612, 8 | "uptimeEstimate": 916, 9 | "localTime": "2019-11-03T13:18:55.663Z", 10 | "asserts": { 11 | "regular": 0, 12 | "warning": 0, 13 | "msg": 0, 14 | "user": 22, 15 | "rollovers": 0 16 | }, 17 | "connections": { 18 | "current": 21, 19 | "available": 183, 20 | "totalCreated": 519, 21 | "active": 3 22 | }, 23 | "electionMetrics": { 24 | "stepUpCmd": { 25 | "called": 0, 26 | "successful": 0 27 | }, 28 | "priorityTakeover": { 29 | "called": 0, 30 | "successful": 0 31 | }, 32 | "catchUpTakeover": { 33 | "called": 0, 34 | "successful": 0 35 | }, 36 | "electionTimeout": { 37 | "called": 1, 38 | "successful": 1 39 | }, 40 | "freezeTimeout": { 41 | "called": 0, 42 | "successful": 0 43 | }, 44 | "numStepDownsCausedByHigherTerm": 0, 45 | "numCatchUps": 0, 46 | "numCatchUpsSucceeded": 0, 47 | "numCatchUpsAlreadyCaughtUp": 1, 48 | "numCatchUpsSkipped": 0, 49 | "numCatchUpsTimedOut": 0, 50 | "numCatchUpsFailedWithError": 0, 51 | "numCatchUpsFailedWithNewTerm": 0, 52 | "numCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd": 0, 53 | "averageCatchUpOps": 0 54 | }, 55 | "extra_info": { 56 | "note": "fields vary by platform", 57 | "page_faults": 0 58 | }, 59 | "flowControl": { 60 | "enabled": true, 61 | "targetRateLimit": 1000000000, 62 | "timeAcquiringMicros": 0, 63 | "locksPerOp": 0, 64 | "sustainerRate": 0, 65 | "isLagged": false, 66 | "isLaggedCount": 0, 67 | "isLaggedTimeMicros": 0 68 | }, 69 | "freeMonitoring": { 70 | "state": "undecided" 71 | }, 72 | "globalLock": { 73 | "totalTime": 916608000, 74 | "currentQueue": { 75 | "total": 0, 76 | "readers": 0, 77 | "writers": 0 78 | }, 79 | "activeClients": { 80 | "total": 0, 81 | "readers": 0, 82 | "writers": 0 83 | } 84 | }, 85 | "locks": { 86 | "ParallelBatchWriterMode": { 87 | "acquireCount": { 88 | "r": 2292 89 | } 90 | }, 91 | "ReplicationStateTransition": { 92 | "acquireCount": { 93 | "w": 7846, 94 | "W": 2 95 | }, 96 | "acquireWaitCount": { 97 | "w": 1, 98 | "W": 2 99 | }, 100 | "timeAcquiringMicros": { 101 | "w": 34, 102 | "W": 90 103 | } 104 | }, 105 | "Global": { 106 | "acquireCount": { 107 | "r": 6726, 108 | "w": 1114, 109 | "R": 1, 110 | "W": 5 111 | }, 112 | "acquireWaitCount": { 113 | "r": 1, 114 | "w": 1, 115 | "W": 1 116 | }, 117 | "timeAcquiringMicros": { 118 | "r": 107454, 119 | "w": 133932, 120 | "W": 33 121 | } 122 | }, 123 | "Database": { 124 | "acquireCount": { 125 | "r": 4667, 126 | "w": 987, 127 | "W": 36 128 | }, 129 | "acquireWaitCount": { 130 | "r": 1, 131 | "W": 2 132 | }, 133 | "timeAcquiringMicros": { 134 | "r": 30321, 135 | "W": 276 136 | } 137 | }, 138 | "Collection": { 139 | "acquireCount": { 140 | "r": 1054, 141 | "w": 974, 142 | "R": 2, 143 | "W": 10 144 | } 145 | }, 146 | "Mutex": { 147 | "acquireCount": { 148 | "r": 6139, 149 | "W": 6 150 | } 151 | }, 152 | "oplog": { 153 | "acquireCount": { 154 | "r": 3657, 155 | "w": 2 156 | } 157 | } 158 | }, 159 | "logicalSessionRecordCache": { 160 | "activeSessionsCount": 14, 161 | "sessionsCollectionJobCount": 4, 162 | "lastSessionsCollectionJobDurationMillis": 15018, 163 | "lastSessionsCollectionJobTimestamp": "2019-11-03T13:18:39.789Z", 164 | "lastSessionsCollectionJobEntriesRefreshed": 0, 165 | "lastSessionsCollectionJobEntriesEnded": 0, 166 | "lastSessionsCollectionJobCursorsClosed": 0, 167 | "transactionReaperJobCount": 4, 168 | "lastTransactionReaperJobDurationMillis": 3, 169 | "lastTransactionReaperJobTimestamp": "2019-11-03T13:18:39.789Z", 170 | "lastTransactionReaperJobEntriesCleanedUp": 0, 171 | "sessionCatalogSize": 0 172 | }, 173 | "network": { 174 | "bytesIn": 940676, 175 | "bytesOut": 10353662, 176 | "physicalBytesIn": 773728, 177 | "physicalBytesOut": 5836182, 178 | "numRequests": 3340, 179 | "compression": { 180 | "snappy": { 181 | "compressor": { 182 | "bytesIn": 7862984, 183 | "bytesOut": 3290544 184 | }, 185 | "decompressor": { 186 | "bytesIn": 910220, 187 | "bytesOut": 1354756 188 | } 189 | }, 190 | "zstd": { 191 | "compressor": { 192 | "bytesIn": 0, 193 | "bytesOut": 0 194 | }, 195 | "decompressor": { 196 | "bytesIn": 0, 197 | "bytesOut": 0 198 | } 199 | }, 200 | "zlib": { 201 | "compressor": { 202 | "bytesIn": 0, 203 | "bytesOut": 0 204 | }, 205 | "decompressor": { 206 | "bytesIn": 0, 207 | "bytesOut": 0 208 | } 209 | } 210 | }, 211 | "serviceExecutorTaskStats": { 212 | "executor": "passthrough", 213 | "threadsRunning": 21 214 | } 215 | }, 216 | "opLatencies": { 217 | "reads": { 218 | "latency": 27090, 219 | "ops": 194 220 | }, 221 | "writes": { 222 | "latency": 75094461, 223 | "ops": 10 224 | }, 225 | "commands": { 226 | "latency": 1236229, 227 | "ops": 3133 228 | }, 229 | "transactions": { 230 | "latency": 0, 231 | "ops": 0 232 | } 233 | }, 234 | "opReadConcernCounters": { 235 | "available": 0, 236 | "linearizable": 0, 237 | "local": 1, 238 | "majority": 0, 239 | "snapshot": 0, 240 | "none": 37 241 | }, 242 | "opcounters": { 243 | "insert": 1, 244 | "query": 38, 245 | "update": 25, 246 | "delete": 915, 247 | "getmore": 185, 248 | "command": 3139 249 | }, 250 | "opcountersRepl": { 251 | "insert": 0, 252 | "query": 0, 253 | "update": 0, 254 | "delete": 0, 255 | "getmore": 0, 256 | "command": 0 257 | }, 258 | "oplogTruncation": { 259 | "totalTimeProcessingMicros": 52, 260 | "processingMethod": "scanning", 261 | "totalTimeTruncatingMicros": 0, 262 | "truncateCount": 0 263 | }, 264 | "repl": { 265 | "hosts": [ 266 | "localhost:27217", 267 | "localhost:27218" 268 | ], 269 | "arbiters": [ 270 | "localhost:27219" 271 | ], 272 | "setName": "rs1", 273 | "setVersion": 1, 274 | "ismaster": true, 275 | "secondary": false, 276 | "primary": "localhost:27217", 277 | "me": "localhost:27217", 278 | "electionId": "7fffffff0000000000000001", 279 | "lastWrite": { 280 | "opTime": { 281 | "ts": "6755069269739831300", 282 | "t": 1 283 | }, 284 | "lastWriteDate": "2019-11-03T13:18:46.000Z", 285 | "majorityOpTime": { 286 | "ts": "6755067620472389633", 287 | "t": 1 288 | }, 289 | "majorityWriteDate": "2019-11-03T13:12:22.000Z" 290 | }, 291 | "rbid": 1 292 | }, 293 | "sharding": { 294 | "configsvrConnectionString": "config/localhost:27117,localhost:27118,localhost:27119", 295 | "lastSeenConfigServerOpTime": { 296 | "ts": "6755069243970027521", 297 | "t": 1 298 | }, 299 | "maxChunkSizeInBytes": 67108864 300 | }, 301 | "shardingStatistics": { 302 | "countStaleConfigErrors": 0, 303 | "countDonorMoveChunkStarted": 0, 304 | "totalDonorChunkCloneTimeMillis": 0, 305 | "totalCriticalSectionCommitTimeMillis": 0, 306 | "totalCriticalSectionTimeMillis": 0, 307 | "countDocsClonedOnRecipient": 0, 308 | "countDocsClonedOnDonor": 0, 309 | "countRecipientMoveChunkStarted": 0, 310 | "countDocsDeletedOnDonor": 0, 311 | "countDonorMoveChunkLockTimeout": 0, 312 | "catalogCache": { 313 | "numDatabaseEntries": 1, 314 | "numCollectionEntries": 1, 315 | "countStaleConfigErrors": 0, 316 | "totalRefreshWaitTimeMicros": 19066, 317 | "numActiveIncrementalRefreshes": 0, 318 | "countIncrementalRefreshesStarted": 4, 319 | "numActiveFullRefreshes": 0, 320 | "countFullRefreshesStarted": 1, 321 | "countFailedRefreshes": 0 322 | } 323 | }, 324 | "storageEngine": { 325 | "name": "wiredTiger", 326 | "supportsCommittedReads": true, 327 | "oldestRequiredTimestampForCrashRecovery": "6755067620472389633", 328 | "supportsPendingDrops": true, 329 | "dropPendingIdents": 0, 330 | "supportsSnapshotReadConcern": true, 331 | "readOnly": false, 332 | "persistent": true, 333 | "backupCursorOpen": false 334 | }, 335 | "trafficRecording": { 336 | "running": false 337 | }, 338 | "transactions": { 339 | "retriedCommandsCount": 0, 340 | "retriedStatementsCount": 0, 341 | "transactionsCollectionWriteCount": 0, 342 | "currentActive": 0, 343 | "currentInactive": 0, 344 | "currentOpen": 0, 345 | "totalAborted": 0, 346 | "totalCommitted": 0, 347 | "totalStarted": 0, 348 | "totalPrepared": 0, 349 | "totalPreparedThenCommitted": 0, 350 | "totalPreparedThenAborted": 0, 351 | "currentPrepared": 0 352 | }, 353 | "transportSecurity": { 354 | "1.0": 0, 355 | "1.1": 0, 356 | "1.2": 0, 357 | "1.3": 0, 358 | "unknown": 0 359 | }, 360 | "twoPhaseCommitCoordinator": { 361 | "totalCreated": 0, 362 | "totalStartedTwoPhaseCommit": 0, 363 | "totalAbortedTwoPhaseCommit": 0, 364 | "totalCommittedTwoPhaseCommit": 0, 365 | "currentInSteps": { 366 | "writingParticipantList": 0, 367 | "waitingForVotes": 0, 368 | "writingDecision": 0, 369 | "waitingForDecisionAcks": 0, 370 | "deletingCoordinatorDoc": 0 371 | } 372 | }, 373 | "wiredTiger": { 374 | "uri": "statistics:", 375 | "async": { 376 | "current work queue length": 0, 377 | "maximum work queue length": 0, 378 | "number of allocation state races": 0, 379 | "number of flush calls": 0, 380 | "number of operation slots viewed for allocation": 0, 381 | "number of times operation allocation failed": 0, 382 | "number of times worker found no work": 0, 383 | "total allocations": 0, 384 | "total compact calls": 0, 385 | "total insert calls": 0, 386 | "total remove calls": 0, 387 | "total search calls": 0, 388 | "total update calls": 0 389 | }, 390 | "block-manager": { 391 | "blocks pre-loaded": 0, 392 | "blocks read": 65, 393 | "blocks written": 369, 394 | "bytes read": 266240, 395 | "bytes written": 2019328, 396 | "bytes written for checkpoint": 2019328, 397 | "mapped blocks read": 0, 398 | "mapped bytes read": 0 399 | }, 400 | "cache": { 401 | "application threads page read from disk to cache count": 0, 402 | "application threads page read from disk to cache time (usecs)": 0, 403 | "application threads page write from cache to disk count": 188, 404 | "application threads page write from cache to disk time (usecs)": 61900, 405 | "bytes belonging to page images in the cache": 86, 406 | "bytes belonging to the cache overflow table in the cache": 182, 407 | "bytes currently in the cache": 189105, 408 | "bytes dirty in the cache cumulative": 1464826, 409 | "bytes not belonging to page images in the cache": 189019, 410 | "bytes read into cache": 0, 411 | "bytes written from cache": 742887, 412 | "cache overflow cursor application thread wait time (usecs)": 0, 413 | "cache overflow cursor internal thread wait time (usecs)": 0, 414 | "cache overflow score": 0, 415 | "cache overflow table entries": 0, 416 | "cache overflow table insert calls": 0, 417 | "cache overflow table max on-disk size": 0, 418 | "cache overflow table on-disk size": 0, 419 | "cache overflow table remove calls": 0, 420 | "checkpoint blocked page eviction": 0, 421 | "eviction calls to get a page": 54, 422 | "eviction calls to get a page found queue empty": 55, 423 | "eviction calls to get a page found queue empty after locking": 0, 424 | "eviction currently operating in aggressive mode": 0, 425 | "eviction empty score": 0, 426 | "eviction passes of a file": 0, 427 | "eviction server candidate queue empty when topping up": 0, 428 | "eviction server candidate queue not empty when topping up": 0, 429 | "eviction server evicting pages": 0, 430 | "eviction server slept, because we did not make progress with eviction": 0, 431 | "eviction server unable to reach eviction goal": 0, 432 | "eviction server waiting for a leaf page": 4, 433 | "eviction server waiting for an internal page sleep (usec)": 0, 434 | "eviction server waiting for an internal page yields": 0, 435 | "eviction state": 128, 436 | "eviction walk target pages histogram - 0-9": 0, 437 | "eviction walk target pages histogram - 10-31": 0, 438 | "eviction walk target pages histogram - 128 and higher": 0, 439 | "eviction walk target pages histogram - 32-63": 0, 440 | "eviction walk target pages histogram - 64-128": 0, 441 | "eviction walks abandoned": 0, 442 | "eviction walks gave up because they restarted their walk twice": 0, 443 | "eviction walks gave up because they saw too many pages and found no candidates": 0, 444 | "eviction walks gave up because they saw too many pages and found too few candidates": 0, 445 | "eviction walks reached end of tree": 0, 446 | "eviction walks started from root of tree": 0, 447 | "eviction walks started from saved location in tree": 0, 448 | "eviction worker thread active": 4, 449 | "eviction worker thread created": 0, 450 | "eviction worker thread evicting pages": 0, 451 | "eviction worker thread removed": 0, 452 | "eviction worker thread stable number": 0, 453 | "files with active eviction walks": 0, 454 | "files with new eviction walks started": 0, 455 | "force re-tuning of eviction workers once in a while": 0, 456 | "forced eviction - pages evicted that were clean count": 0, 457 | "forced eviction - pages evicted that were clean time (usecs)": 0, 458 | "forced eviction - pages evicted that were dirty count": 2, 459 | "forced eviction - pages evicted that were dirty time (usecs)": 56, 460 | "forced eviction - pages selected because of too many deleted items count": 0, 461 | "forced eviction - pages selected count": 2, 462 | "forced eviction - pages selected unable to be evicted count": 0, 463 | "forced eviction - pages selected unable to be evicted time": 0, 464 | "hazard pointer blocked page eviction": 0, 465 | "hazard pointer check calls": 2, 466 | "hazard pointer check entries walked": 0, 467 | "hazard pointer maximum array length": 0, 468 | "in-memory page passed criteria to be split": 0, 469 | "in-memory page splits": 0, 470 | "internal pages evicted": 0, 471 | "internal pages split during eviction": 0, 472 | "leaf pages split during eviction": 0, 473 | "maximum bytes configured": 16642998272, 474 | "maximum page size at eviction": 0, 475 | "modified pages evicted": 6, 476 | "modified pages evicted by application threads": 0, 477 | "operations timed out waiting for space in cache": 0, 478 | "overflow pages read into cache": 0, 479 | "page split during eviction deepened the tree": 0, 480 | "page written requiring cache overflow records": 0, 481 | "pages currently held in the cache": 59, 482 | "pages evicted by application threads": 0, 483 | "pages queued for eviction": 0, 484 | "pages queued for eviction post lru sorting": 0, 485 | "pages queued for urgent eviction": 0, 486 | "pages queued for urgent eviction during walk": 0, 487 | "pages read into cache": 0, 488 | "pages read into cache after truncate": 28, 489 | "pages read into cache after truncate in prepare state": 0, 490 | "pages read into cache requiring cache overflow entries": 0, 491 | "pages read into cache requiring cache overflow for checkpoint": 0, 492 | "pages read into cache skipping older cache overflow entries": 0, 493 | "pages read into cache with skipped cache overflow entries needed later": 0, 494 | "pages read into cache with skipped cache overflow entries needed later by checkpoint": 0, 495 | "pages requested from the cache": 10062, 496 | "pages seen by eviction walk": 0, 497 | "pages selected for eviction unable to be evicted": 0, 498 | "pages walked for eviction": 0, 499 | "pages written from cache": 188, 500 | "pages written requiring in-memory restoration": 2, 501 | "percentage overhead": 8, 502 | "tracked bytes belonging to internal pages in the cache": 13975, 503 | "tracked bytes belonging to leaf pages in the cache": 175130, 504 | "tracked dirty bytes in the cache": 45287, 505 | "tracked dirty pages in the cache": 7, 506 | "unmodified pages evicted": 0 507 | }, 508 | "capacity": { 509 | "background fsync file handles considered": 0, 510 | "background fsync file handles synced": 0, 511 | "background fsync time (msecs)": 0, 512 | "bytes read": 0, 513 | "bytes written for checkpoint": 635233, 514 | "bytes written for eviction": 0, 515 | "bytes written for log": 119040, 516 | "bytes written total": 754273, 517 | "threshold to call fsync": 0, 518 | "time waiting due to total capacity (usecs)": 0, 519 | "time waiting during checkpoint (usecs)": 0, 520 | "time waiting during eviction (usecs)": 0, 521 | "time waiting during logging (usecs)": 0, 522 | "time waiting during read (usecs)": 0 523 | }, 524 | "connection": { 525 | "auto adjusting condition resets": 232, 526 | "auto adjusting condition wait calls": 5840, 527 | "detected system time went backwards": 0, 528 | "files currently open": 34, 529 | "memory allocations": 94569, 530 | "memory frees": 91698, 531 | "memory re-allocations": 15557, 532 | "pthread mutex condition wait calls": 15263, 533 | "pthread mutex shared lock read-lock calls": 25898, 534 | "pthread mutex shared lock write-lock calls": 2844, 535 | "total fsync I/Os": 391, 536 | "total read I/Os": 138, 537 | "total write I/Os": 616 538 | }, 539 | "cursor": { 540 | "cached cursor count": 58, 541 | "cursor bulk loaded cursor insert calls": 0, 542 | "cursor close calls that result in cache": 2283, 543 | "cursor create calls": 2971, 544 | "cursor insert calls": 487, 545 | "cursor insert key and value bytes": 202080, 546 | "cursor modify calls": 1, 547 | "cursor modify key and value bytes affected": 109, 548 | "cursor modify value bytes modified": 45, 549 | "cursor next calls": 3382, 550 | "cursor operation restarted": 0, 551 | "cursor prev calls": 730, 552 | "cursor remove calls": 10, 553 | "cursor remove key bytes removed": 286, 554 | "cursor reserve calls": 0, 555 | "cursor reset calls": 11605, 556 | "cursor search calls": 6574, 557 | "cursor search near calls": 987, 558 | "cursor sweep buckets": 846, 559 | "cursor sweep cursors closed": 0, 560 | "cursor sweep cursors examined": 19, 561 | "cursor sweeps": 141, 562 | "cursor truncate calls": 0, 563 | "cursor update calls": 0, 564 | "cursor update key and value bytes": 0, 565 | "cursor update value size change": 0, 566 | "cursors reused from cache": 2199, 567 | "open cursor count": 35 568 | }, 569 | "data-handle": { 570 | "connection data handle size": 448, 571 | "connection data handles currently active": 62, 572 | "connection sweep candidate became referenced": 0, 573 | "connection sweep dhandles closed": 2, 574 | "connection sweep dhandles removed from hash list": 81, 575 | "connection sweep time-of-death sets": 333, 576 | "connection sweeps": 92, 577 | "session dhandles swept": 4, 578 | "session sweep attempts": 110 579 | }, 580 | "lock": { 581 | "checkpoint lock acquisitions": 16, 582 | "checkpoint lock application thread wait time (usecs)": 0, 583 | "checkpoint lock internal thread wait time (usecs)": 0, 584 | "dhandle lock application thread time waiting (usecs)": 0, 585 | "dhandle lock internal thread time waiting (usecs)": 0, 586 | "dhandle read lock acquisitions": 3918, 587 | "dhandle write lock acquisitions": 226, 588 | "durable timestamp queue lock application thread time waiting (usecs)": 0, 589 | "durable timestamp queue lock internal thread time waiting (usecs)": 0, 590 | "durable timestamp queue read lock acquisitions": 0, 591 | "durable timestamp queue write lock acquisitions": 121, 592 | "metadata lock acquisitions": 16, 593 | "metadata lock application thread wait time (usecs)": 0, 594 | "metadata lock internal thread wait time (usecs)": 0, 595 | "read timestamp queue lock application thread time waiting (usecs)": 0, 596 | "read timestamp queue lock internal thread time waiting (usecs)": 0, 597 | "read timestamp queue read lock acquisitions": 0, 598 | "read timestamp queue write lock acquisitions": 36, 599 | "schema lock acquisitions": 58, 600 | "schema lock application thread wait time (usecs)": 75937, 601 | "schema lock internal thread wait time (usecs)": 0, 602 | "table lock application thread time waiting for the table lock (usecs)": 0, 603 | "table lock internal thread time waiting for the table lock (usecs)": 0, 604 | "table read lock acquisitions": 0, 605 | "table write lock acquisitions": 941, 606 | "txn global lock application thread time waiting (usecs)": 0, 607 | "txn global lock internal thread time waiting (usecs)": 0, 608 | "txn global read lock acquisitions": 347, 609 | "txn global write lock acquisitions": 366 610 | }, 611 | "log": { 612 | "busy returns attempting to switch slots": 0, 613 | "force archive time sleeping (usecs)": 0, 614 | "log bytes of payload data": 87127, 615 | "log bytes written": 118912, 616 | "log files manually zero-filled": 0, 617 | "log flush operations": 9022, 618 | "log force write operations": 10024, 619 | "log force write operations skipped": 9882, 620 | "log records compressed": 62, 621 | "log records not compressed": 176, 622 | "log records too small to compress": 91, 623 | "log release advances write LSN": 51, 624 | "log scan operations": 0, 625 | "log scan records requiring two reads": 0, 626 | "log server thread advances write LSN": 142, 627 | "log server thread write LSN walk skipped": 2752, 628 | "log sync operations": 193, 629 | "log sync time duration (usecs)": 1429177, 630 | "log sync_dir operations": 1, 631 | "log sync_dir time duration (usecs)": 4860, 632 | "log write operations": 329, 633 | "logging bytes consolidated": 118400, 634 | "maximum log file size": 104857600, 635 | "number of pre-allocated log files to create": 2, 636 | "pre-allocated log files not ready and missed": 1, 637 | "pre-allocated log files prepared": 2, 638 | "pre-allocated log files used": 0, 639 | "records processed by log scan": 0, 640 | "slot close lost race": 0, 641 | "slot close unbuffered waits": 0, 642 | "slot closures": 193, 643 | "slot join atomic update races": 0, 644 | "slot join calls atomic updates raced": 0, 645 | "slot join calls did not yield": 329, 646 | "slot join calls found active slot closed": 0, 647 | "slot join calls slept": 0, 648 | "slot join calls yielded": 0, 649 | "slot join found active slot closed": 0, 650 | "slot joins yield time (usecs)": 0, 651 | "slot transitions unable to find free slot": 0, 652 | "slot unbuffered writes": 0, 653 | "total in-memory size of compressed records": 155334, 654 | "total log buffer size": 33554432, 655 | "total size of compressed records": 53781, 656 | "written slots coalesced": 0, 657 | "yields waiting for previous log file close": 0 658 | }, 659 | "perf": { 660 | "file system read latency histogram (bucket 1) - 10-49ms": 0, 661 | "file system read latency histogram (bucket 2) - 50-99ms": 0, 662 | "file system read latency histogram (bucket 3) - 100-249ms": 0, 663 | "file system read latency histogram (bucket 4) - 250-499ms": 0, 664 | "file system read latency histogram (bucket 5) - 500-999ms": 0, 665 | "file system read latency histogram (bucket 6) - 1000ms+": 0, 666 | "file system write latency histogram (bucket 1) - 10-49ms": 0, 667 | "file system write latency histogram (bucket 2) - 50-99ms": 0, 668 | "file system write latency histogram (bucket 3) - 100-249ms": 0, 669 | "file system write latency histogram (bucket 4) - 250-499ms": 0, 670 | "file system write latency histogram (bucket 5) - 500-999ms": 0, 671 | "file system write latency histogram (bucket 6) - 1000ms+": 0, 672 | "operation read latency histogram (bucket 1) - 100-249us": 1, 673 | "operation read latency histogram (bucket 2) - 250-499us": 2, 674 | "operation read latency histogram (bucket 3) - 500-999us": 0, 675 | "operation read latency histogram (bucket 4) - 1000-9999us": 0, 676 | "operation read latency histogram (bucket 5) - 10000us+": 0, 677 | "operation write latency histogram (bucket 1) - 100-249us": 0, 678 | "operation write latency histogram (bucket 2) - 250-499us": 0, 679 | "operation write latency histogram (bucket 3) - 500-999us": 0, 680 | "operation write latency histogram (bucket 4) - 1000-9999us": 0, 681 | "operation write latency histogram (bucket 5) - 10000us+": 0 682 | }, 683 | "reconciliation": { 684 | "fast-path pages deleted": 0, 685 | "page reconciliation calls": 184, 686 | "page reconciliation calls for eviction": 4, 687 | "pages deleted": 6, 688 | "split bytes currently awaiting free": 0, 689 | "split objects currently awaiting free": 0 690 | }, 691 | "session": { 692 | "open session count": 22, 693 | "session query timestamp calls": 0, 694 | "table alter failed calls": 0, 695 | "table alter successful calls": 0, 696 | "table alter unchanged and skipped": 0, 697 | "table compact failed calls": 0, 698 | "table compact successful calls": 0, 699 | "table create failed calls": 0, 700 | "table create successful calls": 31, 701 | "table drop failed calls": 0, 702 | "table drop successful calls": 2, 703 | "table import failed calls": 0, 704 | "table import successful calls": 0, 705 | "table rebalance failed calls": 0, 706 | "table rebalance successful calls": 0, 707 | "table rename failed calls": 0, 708 | "table rename successful calls": 0, 709 | "table salvage failed calls": 0, 710 | "table salvage successful calls": 0, 711 | "table truncate failed calls": 0, 712 | "table truncate successful calls": 0, 713 | "table verify failed calls": 0, 714 | "table verify successful calls": 0 715 | }, 716 | "thread-state": { 717 | "active filesystem fsync calls": 0, 718 | "active filesystem read calls": 0, 719 | "active filesystem write calls": 0 720 | }, 721 | "thread-yield": { 722 | "application thread time evicting (usecs)": 0, 723 | "application thread time waiting for cache (usecs)": 0, 724 | "connection close blocked waiting for transaction state stabilization": 0, 725 | "connection close yielded for lsm manager shutdown": 0, 726 | "data handle lock yielded": 0, 727 | "get reference for page index and slot time sleeping (usecs)": 0, 728 | "log server sync yielded for log write": 0, 729 | "page access yielded due to prepare state change": 0, 730 | "page acquire busy blocked": 0, 731 | "page acquire eviction blocked": 0, 732 | "page acquire locked blocked": 0, 733 | "page acquire read blocked": 0, 734 | "page acquire time sleeping (usecs)": 0, 735 | "page delete rollback time sleeping for state change (usecs)": 0, 736 | "page reconciliation yielded due to child modification": 0 737 | }, 738 | "transaction": { 739 | "Number of prepared updates": 0, 740 | "Number of prepared updates added to cache overflow": 0, 741 | "Number of prepared updates resolved": 0, 742 | "durable timestamp queue entries walked": 15, 743 | "durable timestamp queue insert to empty": 106, 744 | "durable timestamp queue inserts to head": 13, 745 | "durable timestamp queue inserts total": 121, 746 | "durable timestamp queue length": 1, 747 | "number of named snapshots created": 0, 748 | "number of named snapshots dropped": 0, 749 | "prepared transactions": 0, 750 | "prepared transactions committed": 0, 751 | "prepared transactions currently active": 0, 752 | "prepared transactions rolled back": 0, 753 | "query timestamp calls": 3860, 754 | "read timestamp queue entries walked": 13, 755 | "read timestamp queue insert to empty": 23, 756 | "read timestamp queue inserts to head": 13, 757 | "read timestamp queue inserts total": 36, 758 | "read timestamp queue length": 1, 759 | "rollback to stable calls": 0, 760 | "rollback to stable updates aborted": 0, 761 | "rollback to stable updates removed from cache overflow": 0, 762 | "set timestamp calls": 133, 763 | "set timestamp durable calls": 0, 764 | "set timestamp durable updates": 0, 765 | "set timestamp oldest calls": 66, 766 | "set timestamp oldest updates": 66, 767 | "set timestamp stable calls": 67, 768 | "set timestamp stable updates": 66, 769 | "transaction begins": 3680, 770 | "transaction checkpoint currently running": 0, 771 | "transaction checkpoint generation": 17, 772 | "transaction checkpoint max time (msecs)": 147, 773 | "transaction checkpoint min time (msecs)": 42, 774 | "transaction checkpoint most recent time (msecs)": 79, 775 | "transaction checkpoint scrub dirty target": 0, 776 | "transaction checkpoint scrub time (msecs)": 0, 777 | "transaction checkpoint total time (msecs)": 1165, 778 | "transaction checkpoints": 16, 779 | "transaction checkpoints skipped because database was clean": 0, 780 | "transaction failures due to cache overflow": 0, 781 | "transaction fsync calls for checkpoint after allocating the transaction ID": 16, 782 | "transaction fsync duration for checkpoint after allocating the transaction ID (usecs)": 41832, 783 | "transaction range of IDs currently pinned": 0, 784 | "transaction range of IDs currently pinned by a checkpoint": 0, 785 | "transaction range of IDs currently pinned by named snapshots": 0, 786 | "transaction range of timestamps currently pinned": 1670742278147, 787 | "transaction range of timestamps pinned by a checkpoint": "6755069269739831300", 788 | "transaction range of timestamps pinned by the oldest active read timestamp": 0, 789 | "transaction range of timestamps pinned by the oldest timestamp": 1670742278147, 790 | "transaction read timestamp of the oldest active reader": 0, 791 | "transaction sync calls": 0, 792 | "transactions committed": 160, 793 | "transactions rolled back": 3520, 794 | "update conflicts": 0 795 | }, 796 | "concurrentTransactions": { 797 | "write": { 798 | "out": 0, 799 | "available": 128, 800 | "totalTickets": 128 801 | }, 802 | "read": { 803 | "out": 1, 804 | "available": 127, 805 | "totalTickets": 128 806 | } 807 | }, 808 | "snapshot-window-settings": { 809 | "cache pressure percentage threshold": 95, 810 | "current cache pressure percentage": 0, 811 | "total number of SnapshotTooOld errors": 0, 812 | "max target available snapshots window size in seconds": 5, 813 | "target available snapshots window size in seconds": 5, 814 | "current available snapshots window size in seconds": 5, 815 | "latest majority snapshot timestamp available": "Nov 3 21:12:22:1", 816 | "oldest majority snapshot timestamp available": "Nov 3 21:12:17:1" 817 | } 818 | }, 819 | "mem": { 820 | "bits": 64, 821 | "resident": 54, 822 | "virtual": 7024, 823 | "supported": true 824 | }, 825 | "metrics": { 826 | "commands": { 827 | "": 0, 828 | "_addShard": { 829 | "failed": 0, 830 | "total": 1 831 | }, 832 | "_cloneCatalogData": { 833 | "failed": 0, 834 | "total": 0 835 | }, 836 | "_cloneCollectionOptionsFromPrimaryShard": { 837 | "failed": 0, 838 | "total": 0 839 | }, 840 | "_configsvrAddShard": { 841 | "failed": 0, 842 | "total": 0 843 | }, 844 | "_configsvrAddShardToZone": { 845 | "failed": 0, 846 | "total": 0 847 | }, 848 | "_configsvrBalancerStart": { 849 | "failed": 0, 850 | "total": 0 851 | }, 852 | "_configsvrBalancerStatus": { 853 | "failed": 0, 854 | "total": 0 855 | }, 856 | "_configsvrBalancerStop": { 857 | "failed": 0, 858 | "total": 0 859 | }, 860 | "_configsvrCommitChunkMerge": { 861 | "failed": 0, 862 | "total": 0 863 | }, 864 | "_configsvrCommitChunkMigration": { 865 | "failed": 0, 866 | "total": 0 867 | }, 868 | "_configsvrCommitChunkSplit": { 869 | "failed": 0, 870 | "total": 0 871 | }, 872 | "_configsvrCommitMovePrimary": { 873 | "failed": 0, 874 | "total": 0 875 | }, 876 | "_configsvrCreateCollection": { 877 | "failed": 0, 878 | "total": 0 879 | }, 880 | "_configsvrCreateDatabase": { 881 | "failed": 0, 882 | "total": 0 883 | }, 884 | "_configsvrDropCollection": { 885 | "failed": 0, 886 | "total": 0 887 | }, 888 | "_configsvrDropDatabase": { 889 | "failed": 0, 890 | "total": 0 891 | }, 892 | "_configsvrEnableSharding": { 893 | "failed": 0, 894 | "total": 0 895 | }, 896 | "_configsvrMoveChunk": { 897 | "failed": 0, 898 | "total": 0 899 | }, 900 | "_configsvrMovePrimary": { 901 | "failed": 0, 902 | "total": 0 903 | }, 904 | "_configsvrRemoveShard": { 905 | "failed": 0, 906 | "total": 0 907 | }, 908 | "_configsvrRemoveShardFromZone": { 909 | "failed": 0, 910 | "total": 0 911 | }, 912 | "_configsvrShardCollection": { 913 | "failed": 0, 914 | "total": 0 915 | }, 916 | "_configsvrUpdateZoneKeyRange": { 917 | "failed": 0, 918 | "total": 0 919 | }, 920 | "_flushDatabaseCacheUpdates": { 921 | "failed": 0, 922 | "total": 1 923 | }, 924 | "_flushRoutingTableCacheUpdates": { 925 | "failed": 0, 926 | "total": 1 927 | }, 928 | "_getNextSessionMods": { 929 | "failed": 0, 930 | "total": 0 931 | }, 932 | "_getUserCacheGeneration": { 933 | "failed": 0, 934 | "total": 0 935 | }, 936 | "_isSelf": { 937 | "failed": 0, 938 | "total": 2 939 | }, 940 | "_mergeAuthzCollections": { 941 | "failed": 0, 942 | "total": 0 943 | }, 944 | "_migrateClone": { 945 | "failed": 0, 946 | "total": 0 947 | }, 948 | "_movePrimary": { 949 | "failed": 0, 950 | "total": 0 951 | }, 952 | "_recvChunkAbort": { 953 | "failed": 0, 954 | "total": 0 955 | }, 956 | "_recvChunkCommit": { 957 | "failed": 0, 958 | "total": 0 959 | }, 960 | "_recvChunkStart": { 961 | "failed": 0, 962 | "total": 0 963 | }, 964 | "_recvChunkStatus": { 965 | "failed": 0, 966 | "total": 0 967 | }, 968 | "_shardsvrShardCollection": { 969 | "failed": 0, 970 | "total": 1 971 | }, 972 | "_transferMods": { 973 | "failed": 0, 974 | "total": 0 975 | }, 976 | "abortTransaction": { 977 | "failed": 0, 978 | "total": 0 979 | }, 980 | "aggregate": { 981 | "failed": 0, 982 | "total": 0 983 | }, 984 | "appendOplogNote": { 985 | "failed": 0, 986 | "total": 0 987 | }, 988 | "applyOps": { 989 | "failed": 0, 990 | "total": 0 991 | }, 992 | "authenticate": { 993 | "failed": 0, 994 | "total": 0 995 | }, 996 | "availableQueryOptions": { 997 | "failed": 0, 998 | "total": 1 999 | }, 1000 | "buildInfo": { 1001 | "failed": 0, 1002 | "total": 2 1003 | }, 1004 | "checkShardingIndex": { 1005 | "failed": 0, 1006 | "total": 0 1007 | }, 1008 | "cleanupOrphaned": { 1009 | "failed": 0, 1010 | "total": 0 1011 | }, 1012 | "cloneCollection": { 1013 | "failed": 0, 1014 | "total": 0 1015 | }, 1016 | "cloneCollectionAsCapped": { 1017 | "failed": 0, 1018 | "total": 0 1019 | }, 1020 | "collMod": { 1021 | "failed": 0, 1022 | "total": 0 1023 | }, 1024 | "collStats": { 1025 | "failed": 0, 1026 | "total": 0 1027 | }, 1028 | "commitTransaction": { 1029 | "failed": 0, 1030 | "total": 0 1031 | }, 1032 | "compact": { 1033 | "failed": 0, 1034 | "total": 0 1035 | }, 1036 | "connPoolStats": { 1037 | "failed": 0, 1038 | "total": 0 1039 | }, 1040 | "connPoolSync": { 1041 | "failed": 0, 1042 | "total": 0 1043 | }, 1044 | "connectionStatus": { 1045 | "failed": 0, 1046 | "total": 0 1047 | }, 1048 | "convertToCapped": { 1049 | "failed": 0, 1050 | "total": 0 1051 | }, 1052 | "coordinateCommitTransaction": { 1053 | "failed": 0, 1054 | "total": 0 1055 | }, 1056 | "count": { 1057 | "failed": 0, 1058 | "total": 1 1059 | }, 1060 | "create": { 1061 | "failed": 0, 1062 | "total": 0 1063 | }, 1064 | "createIndexes": { 1065 | "failed": 0, 1066 | "total": 10 1067 | }, 1068 | "createRole": { 1069 | "failed": 0, 1070 | "total": 0 1071 | }, 1072 | "createUser": { 1073 | "failed": 0, 1074 | "total": 0 1075 | }, 1076 | "currentOp": { 1077 | "failed": 0, 1078 | "total": 0 1079 | }, 1080 | "dataSize": { 1081 | "failed": 0, 1082 | "total": 0 1083 | }, 1084 | "dbHash": { 1085 | "failed": 0, 1086 | "total": 0 1087 | }, 1088 | "dbStats": { 1089 | "failed": 0, 1090 | "total": 0 1091 | }, 1092 | "delete": { 1093 | "failed": 0, 1094 | "total": 6 1095 | }, 1096 | "distinct": { 1097 | "failed": 0, 1098 | "total": 0 1099 | }, 1100 | "driverOIDTest": { 1101 | "failed": 0, 1102 | "total": 0 1103 | }, 1104 | "drop": { 1105 | "failed": 1, 1106 | "total": 1 1107 | }, 1108 | "dropAllRolesFromDatabase": { 1109 | "failed": 0, 1110 | "total": 0 1111 | }, 1112 | "dropAllUsersFromDatabase": { 1113 | "failed": 0, 1114 | "total": 0 1115 | }, 1116 | "dropConnections": { 1117 | "failed": 0, 1118 | "total": 0 1119 | }, 1120 | "dropDatabase": { 1121 | "failed": 0, 1122 | "total": 0 1123 | }, 1124 | "dropIndexes": { 1125 | "failed": 0, 1126 | "total": 0 1127 | }, 1128 | "dropRole": { 1129 | "failed": 0, 1130 | "total": 0 1131 | }, 1132 | "dropUser": { 1133 | "failed": 0, 1134 | "total": 0 1135 | }, 1136 | "endSessions": { 1137 | "failed": 0, 1138 | "total": 477 1139 | }, 1140 | "explain": { 1141 | "failed": 0, 1142 | "total": 0 1143 | }, 1144 | "features": { 1145 | "failed": 0, 1146 | "total": 88 1147 | }, 1148 | "filemd5": { 1149 | "failed": 0, 1150 | "total": 0 1151 | }, 1152 | "find": { 1153 | "failed": 0, 1154 | "total": 38 1155 | }, 1156 | "findAndModify": { 1157 | "failed": 0, 1158 | "total": 0 1159 | }, 1160 | "flushRouterConfig": { 1161 | "failed": 0, 1162 | "total": 0 1163 | }, 1164 | "fsync": { 1165 | "failed": 0, 1166 | "total": 0 1167 | }, 1168 | "fsyncUnlock": { 1169 | "failed": 0, 1170 | "total": 0 1171 | }, 1172 | "geoSearch": { 1173 | "failed": 0, 1174 | "total": 0 1175 | }, 1176 | "getCmdLineOpts": { 1177 | "failed": 0, 1178 | "total": 0 1179 | }, 1180 | "getDatabaseVersion": { 1181 | "failed": 0, 1182 | "total": 0 1183 | }, 1184 | "getDiagnosticData": { 1185 | "failed": 0, 1186 | "total": 0 1187 | }, 1188 | "getFreeMonitoringStatus": { 1189 | "failed": 0, 1190 | "total": 0 1191 | }, 1192 | "getLastError": { 1193 | "failed": 0, 1194 | "total": 0 1195 | }, 1196 | "getLog": { 1197 | "failed": 0, 1198 | "total": 0 1199 | }, 1200 | "getMore": { 1201 | "failed": 0, 1202 | "total": 185 1203 | }, 1204 | "getParameter": { 1205 | "failed": 0, 1206 | "total": 0 1207 | }, 1208 | "getShardMap": { 1209 | "failed": 0, 1210 | "total": 0 1211 | }, 1212 | "getShardVersion": { 1213 | "failed": 0, 1214 | "total": 0 1215 | }, 1216 | "getnonce": { 1217 | "failed": 0, 1218 | "total": 0 1219 | }, 1220 | "grantPrivilegesToRole": { 1221 | "failed": 0, 1222 | "total": 0 1223 | }, 1224 | "grantRolesToRole": { 1225 | "failed": 0, 1226 | "total": 0 1227 | }, 1228 | "grantRolesToUser": { 1229 | "failed": 0, 1230 | "total": 0 1231 | }, 1232 | "hostInfo": { 1233 | "failed": 0, 1234 | "total": 0 1235 | }, 1236 | "insert": { 1237 | "failed": 0, 1238 | "total": 1 1239 | }, 1240 | "invalidateUserCache": { 1241 | "failed": 0, 1242 | "total": 0 1243 | }, 1244 | "isMaster": { 1245 | "failed": 0, 1246 | "total": 931 1247 | }, 1248 | "killAllSessions": { 1249 | "failed": 0, 1250 | "total": 0 1251 | }, 1252 | "killAllSessionsByPattern": { 1253 | "failed": 0, 1254 | "total": 0 1255 | }, 1256 | "killCursors": { 1257 | "failed": 0, 1258 | "total": 0 1259 | }, 1260 | "killOp": { 1261 | "failed": 0, 1262 | "total": 0 1263 | }, 1264 | "killSessions": { 1265 | "failed": 0, 1266 | "total": 0 1267 | }, 1268 | "listCollections": { 1269 | "failed": 0, 1270 | "total": 3 1271 | }, 1272 | "listCommands": { 1273 | "failed": 0, 1274 | "total": 0 1275 | }, 1276 | "listDatabases": { 1277 | "failed": 0, 1278 | "total": 2 1279 | }, 1280 | "listIndexes": { 1281 | "failed": 1, 1282 | "total": 2 1283 | }, 1284 | "lockInfo": { 1285 | "failed": 0, 1286 | "total": 0 1287 | }, 1288 | "logRotate": { 1289 | "failed": 0, 1290 | "total": 0 1291 | }, 1292 | "logout": { 1293 | "failed": 0, 1294 | "total": 0 1295 | }, 1296 | "mapReduce": { 1297 | "failed": 0, 1298 | "total": 0 1299 | }, 1300 | "mapreduce": { 1301 | "shardedfinish": { 1302 | "failed": 0, 1303 | "total": 0 1304 | } 1305 | }, 1306 | "mergeChunks": { 1307 | "failed": 0, 1308 | "total": 0 1309 | }, 1310 | "moveChunk": { 1311 | "failed": 0, 1312 | "total": 0 1313 | }, 1314 | "ping": { 1315 | "failed": 0, 1316 | "total": 0 1317 | }, 1318 | "planCacheClear": { 1319 | "failed": 0, 1320 | "total": 0 1321 | }, 1322 | "planCacheClearFilters": { 1323 | "failed": 0, 1324 | "total": 0 1325 | }, 1326 | "planCacheListFilters": { 1327 | "failed": 0, 1328 | "total": 0 1329 | }, 1330 | "planCacheListPlans": { 1331 | "failed": 0, 1332 | "total": 0 1333 | }, 1334 | "planCacheListQueryShapes": { 1335 | "failed": 0, 1336 | "total": 0 1337 | }, 1338 | "planCacheSetFilter": { 1339 | "failed": 0, 1340 | "total": 0 1341 | }, 1342 | "prepareTransaction": { 1343 | "failed": 0, 1344 | "total": 0 1345 | }, 1346 | "profile": { 1347 | "failed": 0, 1348 | "total": 0 1349 | }, 1350 | "reIndex": { 1351 | "failed": 0, 1352 | "total": 0 1353 | }, 1354 | "refreshSessions": { 1355 | "failed": 0, 1356 | "total": 0 1357 | }, 1358 | "renameCollection": { 1359 | "failed": 0, 1360 | "total": 0 1361 | }, 1362 | "repairCursor": { 1363 | "failed": 0, 1364 | "total": 0 1365 | }, 1366 | "repairDatabase": { 1367 | "failed": 0, 1368 | "total": 0 1369 | }, 1370 | "replSetAbortPrimaryCatchUp": { 1371 | "failed": 0, 1372 | "total": 0 1373 | }, 1374 | "replSetFreeze": { 1375 | "failed": 0, 1376 | "total": 0 1377 | }, 1378 | "replSetGetConfig": { 1379 | "failed": 0, 1380 | "total": 0 1381 | }, 1382 | "replSetGetRBID": { 1383 | "failed": 0, 1384 | "total": 3 1385 | }, 1386 | "replSetGetStatus": { 1387 | "failed": 1, 1388 | "total": 481 1389 | }, 1390 | "replSetHeartbeat": { 1391 | "failed": 0, 1392 | "total": 730 1393 | }, 1394 | "replSetInitiate": { 1395 | "failed": 0, 1396 | "total": 1 1397 | }, 1398 | "replSetMaintenance": { 1399 | "failed": 0, 1400 | "total": 0 1401 | }, 1402 | "replSetReconfig": { 1403 | "failed": 0, 1404 | "total": 0 1405 | }, 1406 | "replSetRequestVotes": { 1407 | "failed": 0, 1408 | "total": 0 1409 | }, 1410 | "replSetResizeOplog": { 1411 | "failed": 0, 1412 | "total": 0 1413 | }, 1414 | "replSetStepDown": { 1415 | "failed": 0, 1416 | "total": 0 1417 | }, 1418 | "replSetStepDownWithForce": { 1419 | "failed": 0, 1420 | "total": 0 1421 | }, 1422 | "replSetStepUp": { 1423 | "failed": 0, 1424 | "total": 0 1425 | }, 1426 | "replSetSyncFrom": { 1427 | "failed": 0, 1428 | "total": 0 1429 | }, 1430 | "replSetUpdatePosition": { 1431 | "failed": 0, 1432 | "total": 189 1433 | }, 1434 | "resetError": { 1435 | "failed": 0, 1436 | "total": 0 1437 | }, 1438 | "revokePrivilegesFromRole": { 1439 | "failed": 0, 1440 | "total": 0 1441 | }, 1442 | "revokeRolesFromRole": { 1443 | "failed": 0, 1444 | "total": 0 1445 | }, 1446 | "revokeRolesFromUser": { 1447 | "failed": 0, 1448 | "total": 0 1449 | }, 1450 | "rolesInfo": { 1451 | "failed": 0, 1452 | "total": 0 1453 | }, 1454 | "saslContinue": { 1455 | "failed": 0, 1456 | "total": 0 1457 | }, 1458 | "saslStart": { 1459 | "failed": 0, 1460 | "total": 0 1461 | }, 1462 | "serverStatus": { 1463 | "failed": 0, 1464 | "total": 209 1465 | }, 1466 | "setFeatureCompatibilityVersion": { 1467 | "failed": 0, 1468 | "total": 1 1469 | }, 1470 | "setFreeMonitoring": { 1471 | "failed": 0, 1472 | "total": 0 1473 | }, 1474 | "setIndexCommitQuorum": { 1475 | "failed": 0, 1476 | "total": 0 1477 | }, 1478 | "setParameter": { 1479 | "failed": 0, 1480 | "total": 0 1481 | }, 1482 | "setShardVersion": { 1483 | "failed": 0, 1484 | "total": 0 1485 | }, 1486 | "shardConnPoolStats": { 1487 | "failed": 0, 1488 | "total": 0 1489 | }, 1490 | "shardingState": { 1491 | "failed": 0, 1492 | "total": 0 1493 | }, 1494 | "shutdown": { 1495 | "failed": 0, 1496 | "total": 0 1497 | }, 1498 | "splitChunk": { 1499 | "failed": 0, 1500 | "total": 0 1501 | }, 1502 | "splitVector": { 1503 | "failed": 0, 1504 | "total": 0 1505 | }, 1506 | "startRecordingTraffic": { 1507 | "failed": 0, 1508 | "total": 0 1509 | }, 1510 | "startSession": { 1511 | "failed": 0, 1512 | "total": 0 1513 | }, 1514 | "stopRecordingTraffic": { 1515 | "failed": 0, 1516 | "total": 0 1517 | }, 1518 | "top": { 1519 | "failed": 0, 1520 | "total": 0 1521 | }, 1522 | "touch": { 1523 | "failed": 0, 1524 | "total": 0 1525 | }, 1526 | "twoPhaseCreateIndexes": { 1527 | "failed": 0, 1528 | "total": 0 1529 | }, 1530 | "unsetSharding": { 1531 | "failed": 0, 1532 | "total": 0 1533 | }, 1534 | "update": { 1535 | "failed": 0, 1536 | "total": 14 1537 | }, 1538 | "updateRole": { 1539 | "failed": 0, 1540 | "total": 0 1541 | }, 1542 | "updateUser": { 1543 | "failed": 0, 1544 | "total": 0 1545 | }, 1546 | "usersInfo": { 1547 | "failed": 0, 1548 | "total": 0 1549 | }, 1550 | "validate": { 1551 | "failed": 0, 1552 | "total": 0 1553 | }, 1554 | "voteCommitIndexBuild": { 1555 | "failed": 0, 1556 | "total": 0 1557 | }, 1558 | "whatsmyuri": { 1559 | "failed": 0, 1560 | "total": 1 1561 | } 1562 | }, 1563 | "cursor": { 1564 | "timedOut": 1, 1565 | "open": { 1566 | "noTimeout": 0, 1567 | "pinned": 0, 1568 | "total": 1 1569 | } 1570 | }, 1571 | "document": { 1572 | "deleted": 0, 1573 | "inserted": 1, 1574 | "returned": 95, 1575 | "updated": 8 1576 | }, 1577 | "getLastError": { 1578 | "wtime": { 1579 | "num": 18, 1580 | "totalMillis": 75127 1581 | }, 1582 | "wtimeouts": 5 1583 | }, 1584 | "operation": { 1585 | "scanAndOrder": 0, 1586 | "writeConflicts": 0 1587 | }, 1588 | "query": { 1589 | "planCacheTotalSizeEstimateBytes": 0, 1590 | "updateOneOpStyleBroadcastWithExactIDCount": 0 1591 | }, 1592 | "queryExecutor": { 1593 | "scanned": 29, 1594 | "scannedObjects": 103 1595 | }, 1596 | "record": { 1597 | "moves": 0 1598 | }, 1599 | "repl": { 1600 | "executor": { 1601 | "pool": { 1602 | "inProgressCount": 0 1603 | }, 1604 | "queues": { 1605 | "networkInProgress": 0, 1606 | "sleepers": 3 1607 | }, 1608 | "unsignaledEvents": 0, 1609 | "shuttingDown": false, 1610 | "networkInterface": "DEPRECATED: getDiagnosticString is deprecated in NetworkInterfaceTL" 1611 | }, 1612 | "apply": { 1613 | "attemptsToBecomeSecondary": 1, 1614 | "batchSize": 0, 1615 | "batches": { 1616 | "num": 0, 1617 | "totalMillis": 0 1618 | }, 1619 | "ops": 0 1620 | }, 1621 | "buffer": { 1622 | "count": 0, 1623 | "maxSizeBytes": 268435456, 1624 | "sizeBytes": 0 1625 | }, 1626 | "initialSync": { 1627 | "completed": 0, 1628 | "failedAttempts": 0, 1629 | "failures": 0 1630 | }, 1631 | "network": { 1632 | "bytes": 0, 1633 | "getmores": { 1634 | "num": 0, 1635 | "totalMillis": 0 1636 | }, 1637 | "notMasterLegacyUnacknowledgedWrites": 0, 1638 | "notMasterUnacknowledgedWrites": 0, 1639 | "ops": 0, 1640 | "readersCreated": 0 1641 | }, 1642 | "stepDown": { 1643 | "userOperationsKilled": 0, 1644 | "userOperationsRunning": 0 1645 | } 1646 | }, 1647 | "ttl": { 1648 | "deletedDocuments": 0, 1649 | "passes": 15 1650 | } 1651 | }, 1652 | "ok": 1, 1653 | "$gleStats": { 1654 | "lastOpTime": "0", 1655 | "electionId": "7fffffff0000000000000001" 1656 | }, 1657 | "lastCommittedOpTime": "6755067620472389633", 1658 | "$configServerState": { 1659 | "opTime": { 1660 | "ts": "6755069243970027521", 1661 | "t": 1 1662 | } 1663 | }, 1664 | "$clusterTime": { 1665 | "clusterTime": "6755069269739831300", 1666 | "signature": { 1667 | "hash": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 1668 | "keyId": 0 1669 | } 1670 | }, 1671 | "operationTime": "6755069269739831300" 1672 | } 1673 | --------------------------------------------------------------------------------