├── ExplainPlanSteps.md ├── README.md ├── examples ├── Example_ServerStats.js └── example.out ├── misc ├── alarms.json └── metrics.js ├── mongoTuning.js ├── sampleData ├── customerDOBFix.js ├── customersDateOfBirth.js ├── growCustomers.js └── modCustomers.js └── scripts ├── .eslintrc.yml ├── .gitignore ├── CurrentOp.js ├── Explain.js ├── QueryProfiler.js ├── ServerStats.js ├── Views.js ├── compact.js └── package.json /ExplainPlanSteps.md: -------------------------------------------------------------------------------- 1 | 2 | **MongoDB Explain() plan steps** 3 | 4 | Interpreting explain plans is a core competency for any MongoDB 5 | performance practitioner, and we've seen many examples of explain plans 6 | in the book. In this appendix, we list all of the individual step 7 | definitions that we are aware of and give a brief explanation of each. 8 | 9 | |Step|Description| 10 | |--- | -------------------------------------------------- | 11 | |**AND_HASH** | Merge two or more index outputs for an index intersection plan. 12 | |**AND_SORTED** |Merge two or more index outputs for an index intersection plan. 13 | |**CACHED_PLAN**| Indicates that a plan was retrieved from the plan cache rather than from a real-time optimizer action. 14 | | **COLLSCAN** | Read every document in the collection. 15 | | **COUNT** | Count the documents supplied by the input step. 16 | | **COUNT_SCAN** | A quick count of the documents returned by an index scan. 17 | | **DELETE** | Documents are being deleted. 18 | | **DISTINCT_SCAN** | An index scan that returns only unique values for a key. 19 | | **ENSURE_SORTED** | Checks the output of the previous stage to ensure that it is in the expected sorted order. 20 | | **EOF** | Usually means that the collection queried does not exist. 21 | | **FETCH**| Get documents from a collection. This usually occurs after in index scan when additional attributes are required for filtering or projecting. 22 | | **GEO_NEAR_2D**| Get documents using a geospatial query against a 2d index to calculate geometries. Intended for legacy coordinate pairs in earlier versions of MongoDB. 23 | | **GEO_NEAR_2DSPHERE** | Get documents using a geospatial query against a 2dsphere index to calculate geometries on a sphere. More commonly found in modern versions of MongoDB. 24 | | **IDHACK** | Get a document using the "\_id" attribute. 25 | | **IXSCAN** | An index is scanned looking for matching documents or to return documents in sorted order. 26 | | **LIMIT** | Restricts the number of documents returned in the subsequent stages. 27 | | **MOCK** | Used only in unit tests to mock results for testing. 28 | | **MULTI_ITERATOR** | Iterates over a collection. This stage is commonly seen in \$sample stages of aggregations. 29 | | **MULTI_PLAN** | Multiple query plans were evaluated during command execution. 30 | | **OR** | Two results were merged for a \$or operations -- usually associated with an index merge. 31 | | **PROJECTION_COVERED** | An explicit projection which is supported by an index scan (eg, for a "covered" index query). 32 | | **PROJECTION_DEFAULT** | A "default" projection, where no explicit projection is requested. 33 | | **PROJECTION_SIMPLE** | An explicit projection which is supported by a collection access -- usually preceded by a FETCH or COLLSCAN step. 34 | | **QUEUED_DATA** | Usually associated with a GetMore operation that retrieves data from an open cursor. 35 | | **RECORD_STORE_FAST_COUNT** | Access the MongoDB "fast count" value for a collection -- to avoid having to scan the collection when a document count is requested. 36 | | **RETURN_KEY** | Indicates that the returnkey() modifier was issued, to return index key values only. 37 | |**SHARD_MERGE** | Data from multiple shards was merged. 38 | | **SHARDING_FILTER** | Data is returned from an individual shard to the mongos process. 39 | | **SKIP** | Documents were skipped. 40 | | **SORT** | Documents from the previous step where sorted. 41 | | **SORT_KEY_GENERATOR** | Keys from the previous step are extracted to be fed into a subsequent sort step. 42 | | **SORT_MERGE** | An index merge in which the results from multiple index scans are sorted and merged 43 | | **SUBPLAN** | The outputs of multiple query plans (usually index scans) are combined to support an \$or operation. 44 | | **TEXT** | Returns documents that have resulted from a text search. 45 | | **TEXT_MATCH** | Returns any documents output from the previous stage matching a given text search query. 46 | | **TEXT_OR** | Returns documents containing positive terms in a text search query, along with their scores. 47 | | **TRIAL** | Stage for choosing between two alternate plans based on an initial trial period. 48 | | **UPDATE** | Documents are updated. 49 | 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MongoDB Performance Tuning Book 2 | 3 | ## Usage 4 | 5 | This repository contains helper scripts and examples for the Apress book "MongoDB Performance Tuning". - https://www.apress.com/us/book/9781484268780 6 | 7 | The master script [mongoTuning.js](mongoTuning.js) provides access to all these scripts from within a MongoDB shell session. To use these scripts from within a MongoDB shell, simply issue the mongo command with the script name as an argument and add the '--shell' option, for example: 8 | 9 | ``` 10 | $ mongo --shell mongoTuning.js 11 | MongoDB shell version v4.2.0 12 | connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb 13 | 14 | MongoDB server version: 4.2.0 15 | 16 | rs0:PRIMARY> 17 | ``` 18 | 19 | ## Contents 20 | 21 | - [mongoTuning.js](mongoTuning.js) is a master script, compiling a number of helper functions into a single object that is used throughout the book. 22 | - The [examples](examples) folder contains some example scripts for using some of our helper functions along with output.. 23 | - The [sampleData](sampleData) directory contains all the data used in our examples as a compressed dump file. Instructions on how to load the data can be found in the same folder. 24 | - The scripts directory contains all the individual scripts which together create the master script(_mongoTuning.js_) along with some additional scripts that may not be used. 25 | - The [misc](misc) directory contains some files or data that is referenced in the book but not directly by the scripts, for example some sample alarms and metric calculations that readers may find userful. 26 | - [ExplainPlanSteps.md](ExplainPlanSteps.md) contains a breakdown of the various different stages you may encounter in an explain plan, along with a simple explanation of that stage. 27 | 28 | ## Contact Us 29 | 30 | If you have any queries about this repository, please contact Guy at or Mike at . 31 | -------------------------------------------------------------------------------- /examples/Example_ServerStats.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Example script which uses the Server Statistics sub-script to gather some performance data. 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:49:07+10:00 8 | * 9 | */ 10 | load('../scripts/ServerStats.js'); 11 | var sample1 = mongoTuning.serverStatSample(); 12 | sleep(60000); 13 | var sample2 = mongoTuning.serverStatSample(); 14 | mongoTuning.serverStatSummary(sample1, sample2); 15 | var deltas = mongoTuning.serverStatDeltas(sample1, sample2); 16 | deltas['opcounters.query']; 17 | mongoTuning.serverStatSearch(deltas, /opLatencies.writes/); 18 | -------------------------------------------------------------------------------- /examples/example.out: -------------------------------------------------------------------------------- 1 | Centos8(mongod-4.2.6) MongoDBTuningBook> var sample1=mongoTuning.serverStatSample() 2 | Centos8(mongod-4.2.6) MongoDBTuningBook> sleep(60000) 3 | 4 | Centos8(mongod-4.2.6) MongoDBTuningBook> var sample2=mongoTuning.serverStatSample(); 5 | Centos8(mongod-4.2.6) MongoDBTuningBook> mongoTuning.ServerStatSummary(sample1,sample2); 6 | { 7 | "netKBInPS": "1789.5788", 8 | "netKBOutPS": "4642.3350", 9 | "intervalSeconds": 60, 10 | "queryPS": "3562.6167", 11 | "getmorePS": 0, 12 | "commandPS": "0.2500", 13 | "insertPS": 0, 14 | "updatePS": "3557.8000", 15 | "deletePS": 0, 16 | "readLatencyMs": "0.7714", 17 | "writeLatencyMs": "1.3141", 18 | "cmdLatencyMs": "4.6339", 19 | "connections": 102, 20 | "availableConnections": 51098, 21 | "assertsPS": "0.0333", 22 | "cacheGetsPS": "28120.0500", 23 | "cacheHighWaterMB": 3462, 24 | "cacheSizeMB": "563.6672", 25 | "diskBlockReadsPS": "0.0333", 26 | "diskBlockWritesPS": "5.8000", 27 | "logKBRatePS": "1206.8542", 28 | "logSyncTimeRateMsPS": "151.0971" 29 | } 30 | Centos8(mongod-4.2.6) MongoDBTuningBook> var deltas=mongoTuning.serverStatDeltas(sample1,sample2); 31 | Centos8(mongod-4.2.6) MongoDBTuningBook> mongoTuning.searchSample(deltas,/optLatencies.writes/) 32 | { 33 | 34 | } 35 | Centos8(mongod-4.2.6) MongoDBTuningBook> mongoTuning.searchSample(deltas,/opLatencies.writes/) 36 | { 37 | "opLatencies.writes.latency": { 38 | "lastValue": 3618866612, 39 | "firstValue": 3338357289, 40 | "delta": 280509323, 41 | "rate": 4675155.383333334 42 | }, 43 | "opLatencies.writes.ops": { 44 | "lastValue": 8548962, 45 | "firstValue": 8335496, 46 | "delta": 213466, 47 | "rate": 3557.766666666667 48 | } 49 | } -------------------------------------------------------------------------------- /misc/alarms.json: -------------------------------------------------------------------------------- 1 | { 2 | "simpleThresholds": [ 3 | { 4 | "metric": "query_scanToDocRatio", 5 | "l1": 100, 6 | "l2": 1000, 7 | "alarmPath": "alarm.mongo.scanToDocRatio", 8 | "warningMessage": "%d documents scanned for every document returned: review Indexes and slow queries", 9 | "notes": "large scans - enronloop.js for instance will cause this alarm" 10 | }, 11 | { 12 | "metric": "connections_inusePct", 13 | "l1": 80, 14 | "l2": 90, 15 | "alarmPath": "alarm.mongo.connectionsInUse", 16 | "warningMessage": "%d%% of connections are in use", 17 | "notes": "use makeconnections to cause this alarm" 18 | }, 19 | { 20 | "metric": "queue_queuedPct", 21 | "l1": 20, 22 | "l2": 90, 23 | "alarmPath": "alarm.mongo.queue_queuedPct", 24 | "warningMessage": "%d%% of read write operations are queued. Possible lock contention", 25 | "notes": "inccounter.sh with 500+ connections may cause this alarm" 26 | }, 27 | { 28 | "metric": "wtTransactions_readPct", 29 | "l1": 50, 30 | "l2": 90, 31 | "alarmPath": "alarm.wiredtiger.wtTransactions_readPct", 32 | "warningMessage": "%d%% of wiredTiger read transaction tickets are in use. Consider increasing wiredTigerConcurrentReadTransactions", 33 | "notes": "inccounter.sh with 500+ connections may cause this alarm. Also you can reduce the number of tickets with wiredTigerConcurrentReadTransactions" 34 | }, 35 | { 36 | "metric": "wtTransactions_writePct", 37 | "l1": 50, 38 | "l2": 90, 39 | "alarmPath": "alarm.wiredtiger.wtTransactions_writePct", 40 | "warningMessage": "%d%% of wiredTiger write transaction tickets are in use. Consider increasing wiredTigerConcurrentWriteTransactions ", 41 | "notes": "inccounter.sh with 500+ connections may cause this alarm. Also you can reduce the number of tickets with wiredTigerConcurrentWriteTransactions" 42 | }, 43 | { 44 | "metric": "wtIO_logSyncLatencyUs", 45 | "l1": 10000, 46 | "l2": 50000, 47 | "alarmPath": "alarm.disk.wtIO_logSyncLatencyUs", 48 | "warningMessage": "wiredTiger log (sync) writes are taking %d microseconds on average. Consider tuning disk layout/type", 49 | "notes": "This alarm should fire under moderate load on our underpowered system" 50 | }, 51 | { 52 | "metric": "wtIO_writeLatencyUs", 53 | "l1": 2000, 54 | "l2": 10000, 55 | "alarmPath": "alarm.disk.wtIO_writeLatencyUs", 56 | "warningMessage": "wiredTiger disk writes are taking %d microseconds on average. Consider tuning disk layout/type", 57 | "notes": "This alarm should fire under moderate load on our underpowered system" 58 | }, 59 | { 60 | "metric": "wtIO_readLatencyUs", 61 | "l1": 2000, 62 | "l2": 10000, 63 | "alarmPath": "alarm.disk.wtIO_readLatencyUs", 64 | "warningMessage": "wiredTiger disk reads are taking %d microseconds on average. Consider tuning disk layout/type", 65 | "notes": "This alarm should fire under moderate load on our underpowered system" 66 | }, 67 | { 68 | "metric": "wtCache_MissPct", 69 | "l1": 40, 70 | "l2": 80, 71 | "alarmPath": "alarm.wiredtiger.wtCache_MissPct", 72 | "warningMessage": "Required data is not found in wiredTiger cache in %d%% of requests. Consider increasing cache size", 73 | "notes": "This alarm should fire under moderate load providing you reduce the wiredTiger cache size" 74 | } 75 | ], 76 | "standardDeviationThresholds": { 77 | "minimumSamples": 20, 78 | "thresholds": [ 79 | { 80 | "metric": "connections_current", 81 | "l1": 3, 82 | "l2": 4, 83 | "alarmPath": "alarm.mongo.connections_current", 84 | "warningMessage": "You have an unusually high number of connections (mean=%.2f,sd=%.2f, current=%d)", 85 | "notes": "Use node makeconnections.js to fire" 86 | }, 87 | { 88 | "metric": "latency_readWaitUsPs", 89 | "l1": 2, 90 | "l2": 3, 91 | "alarmPath": "alarm.mongo.connections_current", 92 | "warningMessage": "Connections are spending an unusually large amount of time waiting for reads (mean=%.2f,sd=%.2f, current=%d us/s)", 93 | "notes": "randCrud.js, randQry.js enronloop2.js" 94 | }, 95 | { 96 | "metric": "latency_writeWaitUsPs", 97 | "l1": 2, 98 | "l2": 3, 99 | "alarmPath": "alarm.mongo.connections_current", 100 | "warningMessage": "Connections are spending an unusually large amount of time waiting for writes (mean=%.2f,sd=%.2f, current=%d us/s)", 101 | "notes": "randCrud.js, randQry.js enronloop2.js" 102 | }, 103 | { 104 | "metric": "wtCache_evictionsPs", 105 | "l1": 2, 106 | "l2": 3, 107 | "alarmPath": "alarm.wiredtiger.wtCache_evictionsPs", 108 | "warningMessage": "Rate of evictions from wiredTiger cache is unusually high (mean=%.2f,sd=%.2f, current=%d evictions/s)", 109 | "notes": "randCrud.js, randQry.js enronloop2.js" 110 | } 111 | ] 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /misc/metrics.js: -------------------------------------------------------------------------------- 1 | export default { 2 | statisticDefinitions: [ 3 | { 4 | name: 'activeReadSample', 5 | type: 'final', 6 | defaultSource: 'globalLock.activeClients.readers', 7 | versions: [ 8 | { 9 | versionMask: '3.2.*', 10 | source: 'globalLock.active.readers' 11 | } 12 | ] 13 | }, 14 | // 15 | // Network in-out 16 | // 17 | { 18 | name: 'network_bytesInPs', 19 | type: 'rate', 20 | description: 'data read into mongo server from network', 21 | unit: 'bytes', 22 | defaultSource: 'network.bytesIn' 23 | }, 24 | { 25 | name: 'network_bytesOutPs', 26 | type: 'rate', 27 | description: 'data written to network from mongo server', 28 | unit: 'bytes', 29 | defaultSource: 'network.bytesOut' 30 | }, 31 | // 32 | // mongoDB panel statistics 33 | // 34 | // operations per second 35 | { 36 | name: 'operations_QueryPs', 37 | type: 'rate', 38 | description: 'Querys executed per second', 39 | unit: 'OpPerSecond', 40 | defaultSource: 'opcounters.query' 41 | }, 42 | { 43 | name: 'operations_CommandPs', 44 | type: 'rate', 45 | description: 'Commands executed per second', 46 | unit: 'OpPerSecond', 47 | defaultSource: 'opcounters.command' 48 | }, 49 | { 50 | name: 'operations_InsertPs', 51 | type: 'rate', 52 | description: 'Inserts executed per second', 53 | unit: 'OpPerSecond', 54 | defaultSource: 'opcounters.insert' 55 | }, 56 | { 57 | name: 'operations_UpdatePs', 58 | type: 'rate', 59 | description: 'Updates executed per second', 60 | unit: 'OpPerSecond', 61 | defaultSource: 'opcounters.update' 62 | }, 63 | { 64 | name: 'operations_DeletePs', 65 | type: 'rate', 66 | description: 'Deletes executed per second', 67 | unit: 'OpPerSecond', 68 | defaultSource: 'opcounters.delete' 69 | }, 70 | // 71 | // Document counters 72 | // 73 | { 74 | name: 'document_returned', 75 | type: 'rate', 76 | description: 'Documents returned per second', 77 | unit: 'OpPerSecond', 78 | defaultSource: 'metrics.document.returned' 79 | }, 80 | { 81 | name: 'document_updated', 82 | type: 'rate', 83 | description: 'Documents updated per second', 84 | unit: 'OpPerSecond', 85 | defaultSource: 'metrics.document.updated' 86 | }, 87 | { 88 | name: 'document_deleted', 89 | type: 'rate', 90 | description: 'Documents deleted per second', 91 | unit: 'OpPerSecond', 92 | defaultSource: 'metrics.document.deleted' 93 | }, 94 | { 95 | name: 'document_inserted', 96 | type: 'rate', 97 | description: 'Documents inserted per second', 98 | unit: 'OpPerSecond', 99 | defaultSource: 'metrics.document.inserted' 100 | }, 101 | // 102 | // Scans, scans and sorts 103 | // 104 | { 105 | name: 'query_ixscanDocs', 106 | type: 'rate', 107 | description: 'Number of scan operations per seecond', 108 | unit: 'OpPerSecond', 109 | defaultSource: 'metrics.queryExecutor.scanned' 110 | }, 111 | { 112 | name: 'query_collscanDocs', 113 | type: 'rate', 114 | description: 'Number of objects scanned per seecond', 115 | unit: 'OpPerSecond', 116 | defaultSource: 'metrics.queryExecutor.scannedObjects' 117 | }, 118 | { 119 | name: 'query_scanAndOrder', 120 | type: 'rate', 121 | description: 'Number of collscans including a sort per second', 122 | unit: 'OpPerSecond', 123 | defaultSource: 'metrics.operation.scanAndOrder' 124 | }, 125 | // 126 | // Connections 127 | // 128 | { 129 | name: 'connections_current', 130 | type: 'final', 131 | description: 'Number of connections currently open', 132 | unit: 'integerValue', 133 | defaultSource: 'connections.current' 134 | }, 135 | { 136 | name: 'connections_available', 137 | type: 'final', 138 | description: 'Number of connections available', 139 | unit: 'integerValue', 140 | defaultSource: 'connections.available' 141 | }, 142 | // 143 | // Read/Write queues 144 | // 145 | { 146 | name: 'queue_readersActive', 147 | type: 'final', 148 | description: 'Current number of read operations', 149 | unit: 'integerValue', 150 | defaultSource: 'globalLock.activeClients.readers' 151 | }, 152 | { 153 | name: 'queue_readersQueued', 154 | type: 'final', 155 | description: 'Current number of queued read operations', 156 | unit: 'integerValue', 157 | defaultSource: 'globalLock.currentQueue.readers' 158 | }, 159 | { 160 | name: 'queue_writersActive', 161 | type: 'final', 162 | description: 'Current number of write operations', 163 | unit: 'integerValue', 164 | defaultSource: 'globalLock.activeClients.writers' 165 | }, 166 | { 167 | name: 'queue_writersQueued', 168 | type: 'final', 169 | description: 'Current number of queued write operations', 170 | unit: 'integerValue', 171 | defaultSource: 'globalLock.currentQueue.writers' 172 | }, 173 | { 174 | name: 'queue_totalActive', 175 | type: 'final', 176 | description: 'Current number of all operations', 177 | unit: 'integerValue', 178 | defaultSource: 'globalLock.activeClients.total' 179 | }, 180 | { 181 | name: 'queue_totalQueued', 182 | type: 'final', 183 | description: 'Current number of queued operations', 184 | unit: 'integerValue', 185 | defaultSource: 'globalLock.currentQueue.total' 186 | }, 187 | { 188 | name: 'latency_writeOpsPs', 189 | type: 'rate', 190 | description: 'Number of write operations per second', 191 | unit: 'OpPerSecond', 192 | defaultSource: 'opLatencies.writes.ops' 193 | }, 194 | { 195 | name: 'latency_writeWaitUsPs', 196 | type: 'rate', 197 | description: 'Time spent per second for write operations', 198 | unit: 'microseconds', 199 | defaultSource: 'opLatencies.writes.latency' 200 | }, 201 | { 202 | name: 'latency_readOpsPs', 203 | type: 'rate', 204 | description: 'Number of read operations per second', 205 | unit: 'OpPerSecond', 206 | defaultSource: 'opLatencies.reads.ops' 207 | }, 208 | { 209 | name: 'latency_readWaitUsPs', 210 | type: 'rate', 211 | description: 'Time spent per second for read operations', 212 | unit: 'microseconds', 213 | defaultSource: 'opLatencies.reads.latency' 214 | }, 215 | { 216 | name: 'latency_commandOpsPs', 217 | type: 'rate', 218 | description: 'Number of command operations per second', 219 | unit: 'OpPerSecond', 220 | defaultSource: 'opLatencies.commands.ops' 221 | }, 222 | { 223 | name: 'latency_commandWaitUsPs', 224 | type: 'rate', 225 | description: 'Time spent per second for command operations', 226 | unit: 'microseconds', 227 | defaultSource: 'opLatencies.commands.latency' 228 | }, 229 | // 230 | // MOngoDB memory utilization 231 | // 232 | { 233 | name: 'mem_resident', 234 | type: 'final', 235 | description: 'Resident memory size', 236 | unit: 'MB', 237 | defaultSource: 'mem.resident' 238 | }, 239 | { 240 | name: 'mem_virtual', 241 | type: 'final', 242 | description: 'Virtual memory size', 243 | unit: 'MB', 244 | defaultSource: 'mem.virtual' 245 | }, 246 | // 247 | // Blocks in/out wiredTiger cache 248 | // 249 | { 250 | name: 'wtCache_readRequestsPs', 251 | type: 'rate', 252 | description: 'number of pages requested from the wiredTiger cache per second', 253 | unit: 'OpPerSecond', 254 | defaultSource: 'wiredTiger.cache.pages requested from the cache' 255 | }, 256 | { 257 | name: 'wtCache_readIntoCachePs', 258 | type: 'rate', 259 | description: 'number of pages read into the wiredTiger cache per second', 260 | unit: 'OpPerSecond', 261 | defaultSource: 'wiredTiger.cache.pages read into cache' 262 | }, 263 | { 264 | name: 'wtCache_maxBytes', 265 | type: 'final', 266 | description: 'Maximum size of the wiredTiger cache', 267 | unit: 'bytes', 268 | defaultSource: 'wiredTiger.cache.maximum bytes configured' 269 | }, 270 | { 271 | name: 'wtCache_currentBytes', 272 | type: 'final', 273 | description: 'Current size of the wiredTiger cache', 274 | unit: 'bytes', 275 | defaultSource: 'wiredTiger.cache.bytes currently in the cache' 276 | }, 277 | { 278 | name: 'wtCache_dirtyBytes', 279 | type: 'final', 280 | description: 'Modified bytes in the wiredTiger cache', 281 | unit: 'bytes', 282 | defaultSource: 'wiredTiger.cache.tracked dirty bytes in the cache' 283 | }, 284 | { 285 | name: 'wtCache_evictionsPs', 286 | type: 'rate', 287 | description: 'Pages evicted from the wiredTiger cache per second', 288 | unit: 'OpPerSecond', 289 | defaultSource: 'wiredTiger.cache.eviction worker thread evicting pages' 290 | }, 291 | // 292 | // Wired Tiger transaction tickets 293 | // 294 | { 295 | name: 'wtTransactions_readAvailable', 296 | type: 'final', 297 | description: 'Number of read tickets available in wiredTiger', 298 | unit: 'integerValue', 299 | defaultSource: 'wiredTiger.concurrentTransactions.read.available' 300 | }, 301 | { 302 | name: 'wtTransactions_readOut', 303 | type: 'final', 304 | description: 'Number of read tickets in use in wiredTiger', 305 | unit: 'integerValue', 306 | defaultSource: 'wiredTiger.concurrentTransactions.read.out' 307 | }, 308 | { 309 | name: 'wtTransactions_writeAvailable', 310 | type: 'final', 311 | description: 'Number of write tickets available in wiredTiger', 312 | unit: 'integerValue', 313 | defaultSource: 'wiredTiger.concurrentTransactions.write.available' 314 | }, 315 | { 316 | name: 'wtTransactions_writeOut', 317 | type: 'final', 318 | description: 'Number of write tickets in use in wiredTiger', 319 | unit: 'integerValue', 320 | defaultSource: 'wiredTiger.concurrentTransactions.write.out' 321 | }, 322 | // 323 | // WiredTiger IOs 324 | // 325 | { 326 | name: 'wtIO_writeIOps', 327 | type: 'rate', 328 | description: 'wiredTiger write IO rate', 329 | unit: 'OpPerSecond', 330 | defaultSource: 'wiredTiger.connection.total write I/Os' 331 | }, 332 | { 333 | name: 'wtIO_readIOps', 334 | type: 'rate', 335 | description: 'wiredTiger read IO rate', 336 | unit: 'OpPerSecond', 337 | defaultSource: 'wiredTiger.connection.total read I/Os' 338 | }, 339 | { 340 | name: 'wtIO_fsyncIOps', 341 | type: 'rate', 342 | description: 'wiredTiger fsync IO rate', 343 | unit: 'OpPerSecond', 344 | defaultSource: 'wiredTiger.connection.total fsync I/Os' 345 | }, 346 | // 347 | // WiredTiger readLatencyRate 348 | // 349 | { 350 | name: 'wtIO_diskToCachePs', 351 | type: 'rate', 352 | description: 'wiredTiger disk to cache IO rate', 353 | unit: 'OpPerSecond', 354 | defaultSource: 'wiredTiger.cache.application threads page read from disk to cache count' 355 | }, 356 | { 357 | name: 'wtIO_diskToCacheUsPs', 358 | type: 'rate', 359 | description: 'wiredTiger disk to cache time per second', 360 | unit: 'microseconds', 361 | defaultSource: 362 | 'wiredTiger.cache.application threads page read from disk to cache time (usecs)' 363 | }, 364 | { 365 | name: 'wtIO_cacheToDiskPs', 366 | type: 'rate', 367 | description: 'wiredTiger cache to disk IO rate', 368 | unit: 'OpPerSecond', 369 | defaultSource: 'wiredTiger.cache.application threads page write from cache to disk count' 370 | }, 371 | { 372 | name: 'wtIO_cacheToDiskUsPs', 373 | type: 'rate', 374 | description: 'wiredTiger cache to disk time per second', 375 | unit: 'microseconds', 376 | defaultSource: 377 | 'wiredTiger.cache.application threads page write from cache to disk time (usecs)' 378 | }, 379 | { 380 | name: 'wtIO_logSyncTimeUsPs', 381 | type: 'rate', 382 | description: 'wiredTiger log sync time per second', 383 | unit: 'microseconds', 384 | defaultSource: 'wiredTiger.log.log sync time duration (usecs)' 385 | }, 386 | { 387 | name: 'wtIO_logSyncPs', 388 | type: 'rate', 389 | description: 'wiredTiger log sync operations per second', 390 | unit: 'OpPerSecond', 391 | defaultSource: 'wiredTiger.log.log sync operations' 392 | }, 393 | // 394 | // wiredTiger log 395 | // 396 | { 397 | name: 'wtLog_maxLogSize', 398 | type: 'final', 399 | description: 'wiredTiger maximum log file size', 400 | unit: 'bytes', 401 | defaultSource: 'wiredTiger.log.maximum log file size' 402 | }, 403 | { 404 | name: 'wtLog_currentLogSize', 405 | type: 'final', 406 | description: 'wiredTiger current log file size', 407 | unit: 'bytes', 408 | defaultSource: 'wiredTiger.log.total log buffer size' 409 | } 410 | ], 411 | calculations: [ 412 | { 413 | name: 'wtCache_cleanBytes', 414 | expression: 'wtCache_currentBytes-wtCache_dirtyBytes', 415 | description: 'Unmodified bytes in the wiredTiger cache', 416 | unit: 'milliseconds', 417 | ifZeroDivide: 0 418 | }, 419 | { 420 | name: 'latency_writeAvgLatencyMs', 421 | expression: '(latency_writeWaitUsPs/1000)/latency_writeOpsPs', 422 | description: 'average time for a mongoDB write request', 423 | unit: 'milliseconds', 424 | ifZeroDivide: 0 425 | }, 426 | { 427 | name: 'latency_readAvgLatencyMs', 428 | expression: '(latency_readWaitUsPs/1000)/latency_readOpsPs', 429 | description: 'average time for a mongoDB read request', 430 | unit: 'milliseconds', 431 | ifZeroDivide: 0 432 | }, 433 | 434 | { 435 | name: 'wtCache_MissPct', 436 | expression: 'wtCache_readIntoCachePs*100/wtCache_readRequestsPs', 437 | description: 'Percentage of time a needed page is not found in wiredTiger cache', 438 | unit: 'Percentage', 439 | ifZeroDivide: 0 440 | }, 441 | { 442 | name: 'wtIO_readLatencyUs', 443 | expression: '(wtIO_diskToCacheUsPs)/wtIO_diskToCachePs', 444 | description: 'Average time for a wiredTiger disk read', 445 | unit: 'microseconds', 446 | ifZeroDivide: 0 447 | }, 448 | { 449 | name: 'wtIO_logSyncLatencyUs', 450 | expression: '(wtIO_logSyncTimeUsPs)/wtIO_logSyncPs', 451 | description: 'Average time for a wiredTiger log Sync', 452 | unit: 'microseconds', 453 | ifZeroDivide: 0 454 | }, 455 | { 456 | name: 'wtIO_writeLatencyUs', 457 | expression: '(wtIO_cacheToDiskUsPs)/wtIO_cacheToDiskPs', 458 | description: 'Average time for a wiredTiger disk write', 459 | unit: 'microseconds', 460 | ifZeroDivide: 0 461 | }, 462 | { 463 | name: 'wtTransactions_readPct', 464 | expression: 465 | 'wtTransactions_readOut*100/(wtTransactions_readOut+wtTransactions_readAvailable)', 466 | description: 'Percentage of wiredTiger read transactions in use', 467 | unit: 'Percentage', 468 | ifZeroDivide: 0 469 | }, 470 | { 471 | name: 'wtTransactions_writePct', 472 | expression: 473 | 'wtTransactions_writeOut*100/(wtTransactions_writeOut+wtTransactions_writeAvailable)', 474 | description: 'Percentage of wiredTiger write transactions in use', 475 | unit: 'Percentage', 476 | ifZeroDivide: 0 477 | }, 478 | { 479 | name: 'query_scanToDocRatio', 480 | expression: '(query_collscanDocs+query_ixscanDocs)/document_returned', 481 | description: 'Ratio of documents examined to documents returned', 482 | unit: 'Float', 483 | ifZeroDivide: 0 484 | }, 485 | { 486 | name: 'query_pctIxDocs', 487 | expression: 'query_ixscanDocs*100/(query_collscanDocs+query_ixscanDocs)', 488 | description: 'Percentage of documents examined by index', 489 | unit: 'Percentage', 490 | ifZeroDivide: 0 491 | }, 492 | { 493 | name: 'connections_inusePct', 494 | expression: 'connections_current*100/(connections_current+connections_available)', 495 | description: 'Percentage connections in use', 496 | unit: 'Percentage', 497 | ifZeroDivide: 0 498 | }, 499 | { 500 | name: 'queue_queuedPct', 501 | expression: 502 | '(queue_readersQueued+queue_writersQueued)*100/(queue_readersActive+queue_readersQueued+queue_writersActive+ queue_writersQueued)', 503 | description: 'Percentage of read/writes which are queued', 504 | unit: 'Percentage', 505 | ifZeroDivide: 0 506 | } 507 | ] 508 | }; 509 | -------------------------------------------------------------------------------- /mongoTuning.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Master utility script for the Apress book "MongoDB Performance Tuning" containing all functions from the "scripts" directory. 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:53:12+10:00 8 | * 9 | */ 10 | 11 | var mongoTuning = {}; 12 | 13 | // SERVER STATS 14 | /** 15 | * Base function that will collect and shape raw server statistics data. 16 | * 17 | * @returns {FlatServerStats} An object containing many different server statistics. 18 | */ 19 | mongoTuning.serverStatistics = function () { 20 | const output = {}; 21 | let value; 22 | let rate; 23 | output.statistics = []; 24 | var serverStats = mongoTuning.flattenServerStatus(db.serverStatus()).stats; // eslint-disable-line 25 | const uptime = serverStats.uptimeMillis / 1000; // seconds with precision 26 | Object.keys(serverStats).forEach((stat) => { 27 | // print(stat); 28 | value = serverStats[stat]; 29 | rate = null; 30 | if (typeof value === 'number') { 31 | rate = (value / uptime).toFixed(4); 32 | } 33 | if (!stat.match(/_mongo/)) { 34 | output.statistics.push({ 35 | statistic: stat, 36 | value, 37 | ratePs: rate, 38 | }); 39 | } 40 | }); 41 | return output; 42 | }; 43 | /** 44 | * Helper function for monitoring the MongoDB server over the duration and then 45 | * calculating delta and final values across the duration for key statistics. 46 | * 47 | * @param {int} duration - How long to monitor the server for. 48 | * @returns {Object} - Data object containing Deltas and final values. 49 | */ 50 | mongoTuning.monitorServer = function (duration) { 51 | let runningStats; 52 | let initialStats; 53 | const runTime = 0; 54 | initialStats = mongoTuning.serverStatistics(); 55 | sleep(duration); 56 | finalStats = mongoTuning.serverStatistics(); 57 | const deltas = mongoTuning.serverStatDeltas(initialStats, finalStats); 58 | const finals = mongoTuning.convertStat(finalStats); 59 | return { deltas, finals }; 60 | }; 61 | mongoTuning.keyServerStats = function (duration, regex) { 62 | const monitoringData = mongoTuning.monitorServer(duration); 63 | return mongoTuning.keyServerStatsFromSample(monitoringData, regex); 64 | }; 65 | mongoTuning.keyServerStatsFromSample = function (monitoringData, regex) { 66 | const data = mongoTuning.derivedStatistics(monitoringData); 67 | if (regex) { 68 | return mongoTuning.serverStatSearch(data, regex); 69 | } 70 | return data; 71 | }; 72 | /** 73 | * Monitor the MongoDB server for a given duration and return some derived statistics. 74 | * 75 | * @param {int} duration - How many milliseconds to monitor the server for. 76 | * @param {string} regex - OPTIONAL - A string to perform a match against returned statistic keys. 77 | * @returns {Object} - An object containing the derived statistics matching the regex if given. 78 | */ 79 | mongoTuning.monitorServerDerived = function (duration, regex) { 80 | if (!duration) { 81 | duration = 5000; 82 | } 83 | const monitoringData = mongoTuning.monitorServer(duration); 84 | const derivedStats = mongoTuning.derivedStatistics(monitoringData); 85 | 86 | if (regex) { 87 | return mongoTuning.serverStatSearch(derivedStats, regex); 88 | } 89 | return derivedStats; 90 | }; 91 | /** 92 | * Monitor the MongoDB server for a given duration and return the raw statistics. 93 | * 94 | * @param {int} duration - How many milliseconds to monitor the server for. 95 | * @param {string} regex - OPTIONAL - A string to perform a match against returned statistic keys. 96 | * @returns {Object} - An object containing the derived statistics matching the regex if given. 97 | */ 98 | mongoTuning.monitorServerRaw = function (duration, regex) { 99 | if (!duration) { 100 | duration = 5000; 101 | } 102 | const monitoringData = mongoTuning.monitorServer(duration); 103 | if (regex) { 104 | return mongoTuning.serverStatSearchRaw(monitoringData, regex); 105 | } 106 | return monitoringData; 107 | }; 108 | /** 109 | * Converts structured mongodb ServerStatus object into a flattened array of stats. 110 | * 111 | * @param {RawServerStatus} dbServerStatus - The raw result of the mongodb command. 112 | * @returns {FlatServerStatus} - Flattened array of server status metrics. 113 | */ 114 | mongoTuning.flattenServerStatus = function (dbServerStatus) { 115 | const flattenedServerStatus = {}; 116 | flattenedServerStatus.stats = {}; 117 | 118 | function internalflattenServerStatus(serverStatus, rootTerm) { 119 | let prefix = ''; 120 | if (arguments.length > 1) { 121 | prefix = rootTerm + '.'; 122 | } 123 | Object.getOwnPropertyNames(serverStatus).forEach((key) => { 124 | if (key !== '_mongo') { 125 | let value = serverStatus[key]; 126 | // eslint-disable-next-line 127 | if (value.constructor === NumberLong) { 128 | value = value.toNumber(); 129 | } 130 | const valtype = typeof value; 131 | const fullkey = prefix + key; 132 | // print(key, value, valtype, fullkey); 133 | if (valtype == 'object') { 134 | // recurse into nested objects 135 | internalflattenServerStatus(value, prefix + key); 136 | } else { 137 | /* No more nesting */ 138 | flattenedServerStatus.stats[fullkey] = value; 139 | } 140 | } 141 | }); 142 | } 143 | internalflattenServerStatus(dbServerStatus); 144 | return flattenedServerStatus; 145 | }; 146 | /** 147 | * Flattens complex server statistics into a simpler form. 148 | * 149 | * @param {RawServerStatus} serverStat - The raw server statistics result from Mongo. 150 | * @returns {SimpleServerStatus} - A single object with key value pairs for each stat. 151 | */ 152 | mongoTuning.convertStat = function (serverStat) { 153 | const returnStat = {}; 154 | serverStat.statistics.forEach((stat) => { 155 | returnStat[stat.statistic] = stat.value; 156 | }); 157 | return returnStat; 158 | }; 159 | /** 160 | * Takes two sets of server statistics and calculates the difference and rate of change. 161 | * @param {Object} initialStats - First set of statistics. 162 | * @param {Object} finalStats - Second set of statistics. 163 | * @returns {Array} - Array of delta information. 164 | */ 165 | mongoTuning.serverStatDeltas = function (initialStats, finalStats) { 166 | const stat1 = mongoTuning.convertStat(initialStats); 167 | const stat2 = mongoTuning.convertStat(finalStats); 168 | let delta; 169 | let rate; 170 | const statDelta = {}; 171 | statDelta.timeDelta = stat2.uptime - stat1.uptime; 172 | 173 | Object.keys(stat2).forEach((key) => { 174 | // print(key,typeof stat2[key]); 175 | if (typeof stat2[key] === 'number') { 176 | delta = stat2[key] - stat1[key]; 177 | rate = delta / statDelta.timeDelta; 178 | } else { 179 | delta = null; 180 | rate = null; 181 | } 182 | statDelta[key] = { 183 | lastValue: stat2[key], 184 | firstValue: stat1[key], 185 | delta, 186 | rate, 187 | }; 188 | }); 189 | return statDelta; 190 | }; 191 | /** 192 | * Simple helper function for searching derived server stats for matching keys. 193 | * 194 | * @param {Object} stats - The server statistics to search. 195 | * @param {String} regex - Regex to search for statistic keys. 196 | * @returns {Array} - An array of matching key value pairs. 197 | */ 198 | mongoTuning.serverStatSearch = function (stats, regex) { 199 | const returnArray = {}; 200 | Object.keys(stats).forEach((key) => { 201 | if (key.match(regex)) { 202 | returnArray[key] = stats[key]; 203 | } 204 | }); 205 | return returnArray; 206 | }; 207 | /** 208 | * Simple helper function for searching raw server stats for matching keys. 209 | * 210 | * @param {Object} stats - The server statistics to search. 211 | * @param {String} regex - Regex to search for statistic keys. 212 | * @returns {Array} - An array of matching key value pairs. 213 | */ 214 | mongoTuning.serverStatSearchRaw = function (stats, regex) { 215 | const returnArray = { deltas: {}, finals: {} }; 216 | // First filter deltas. 217 | Object.keys(stats.deltas).forEach((key) => { 218 | if (key.match(regex)) { 219 | returnArray.deltas[key] = stats.deltas[key]; 220 | } 221 | }); 222 | // Then filter finals 223 | Object.keys(stats.finals).forEach((key) => { 224 | if (key.match(regex)) { 225 | returnArray.finals[key] = stats.finals[key]; 226 | } 227 | }); 228 | return returnArray; 229 | }; 230 | /** 231 | * Derive some summary statistics from observed values. 232 | * @param {Object} serverData - Server data gathered from mongoTuning.monitorServer, should contain deltas and final values. 233 | * @returns {Object} - Data object containing the derived statistics. 234 | */ 235 | mongoTuning.derivedStatistics = function (serverData) { 236 | const { deltas, finals } = serverData; 237 | const data = {}; 238 | const descriptions = {}; 239 | // ********************************************* 240 | // Network counters 241 | // ********************************************* 242 | 243 | data.netKBInPS = deltas['network.bytesIn'].rate / 1024; 244 | data.netKBOutPS = deltas['network.bytesOut'].rate / 1024; 245 | 246 | // ******************************************** 247 | // Activity counters 248 | // ******************************************** 249 | data.intervalSeconds = deltas.timeDelta; 250 | data.queryPS = deltas['opcounters.query'].rate; 251 | data.getmorePS = deltas['opcounters.getmore'].rate; 252 | data.commandPS = deltas['opcounters.command'].rate; 253 | data.insertPS = deltas['opcounters.insert'].rate; 254 | data.updatePS = deltas['opcounters.update'].rate; 255 | data.deletePS = deltas['opcounters.delete'].rate; 256 | 257 | // ******************************************** 258 | // Document counters 259 | // ******************************************** 260 | data.docsReturnedPS = deltas['metrics.document.returned'].rate; 261 | data.docsUpdatedPS = deltas['metrics.document.updated'].rate; 262 | data.docsInsertedPS = deltas['metrics.document.inserted'].rate; 263 | data.ixscanDocsPS = deltas['metrics.queryExecutor.scanned'].rate; 264 | data.collscanDocsPS = deltas['metrics.queryExecutor.scannedObjects'].rate; 265 | 266 | descriptions.scansToDocumentRatio = 267 | 'Ratio of documents scanned to documents returned'; 268 | if (data.docsReturnedPS > 0) { 269 | data.scansToDocumentRatio = 270 | (data.ixscanDocsPS + data.collscanDocsPS) / data.docsReturnedPS; 271 | } else { 272 | data.scansToDocumentRatio = 0; 273 | } 274 | 275 | // ******************************************** 276 | // Transaction statistics 277 | // ******************************************** 278 | data.transactionsStartedPS = deltas['transactions.totalStarted'].rate; 279 | data.transactionsAbortedPS = deltas['transactions.totalAborted'].rate; 280 | data.transactionsCommittedPS = deltas['transactions.totalCommitted'].rate; 281 | if (data.transactionsStartedPS > 0) { 282 | data.transactionAbortPct = 283 | (data.transactionsAbortedPS * 100) / data.transactionsStartedPS; 284 | } else { 285 | data.transactionAbortPct = 0; 286 | } 287 | 288 | if (deltas['opLatencies.reads.ops'].delta > 0) { 289 | data.readLatencyMs = 290 | deltas['opLatencies.reads.latency'].delta / 291 | deltas['opLatencies.reads.ops'].delta / 292 | 1000; 293 | } else data.readLatency = 0; 294 | 295 | if (deltas['opLatencies.writes.ops'].delta > 0) { 296 | data.writeLatencyMs = 297 | deltas['opLatencies.writes.latency'].delta / 298 | deltas['opLatencies.writes.ops'].delta / 299 | 1000; 300 | } else data.writeLatency = 0; 301 | 302 | if (deltas['opLatencies.commands.ops'].delta > 0) { 303 | data.cmdLatencyMs = 304 | deltas['opLatencies.commands.latency'].delta / 305 | deltas['opLatencies.commands.ops'].delta / 306 | 1000; 307 | } else data.cmdLatency = 0; 308 | 309 | data.connections = deltas['connections.current'].lastValue; 310 | data.availableConnections = deltas['connections.available'].firstValue; 311 | data.assertsPS = 312 | deltas['asserts.regular'].rate + 313 | deltas['asserts.warning'].rate + 314 | deltas['asserts.msg'].rate + 315 | deltas['asserts.user'].rate + 316 | deltas['asserts.rollovers'].rate; 317 | 318 | data.activeReaders = finals['globalLock.activeClients.readers']; 319 | data.activeWriters = finals['globalLock.activeClients.writers']; 320 | data.queuedReaders = finals['globalLock.currentQueue.readers']; 321 | data.queuedWriters = finals['globalLock.currentQueue.writers']; 322 | data.globalLockQueue = { 323 | readActive: data.activeReaders, 324 | readQueued: data.queuedReaders, 325 | writeActive: data.activeWriters, 326 | writeQueued: data.queuedWriters, 327 | }; 328 | 329 | // ********************************************************* 330 | // Memory counters 331 | // ********************************************************* 332 | 333 | data.cacheReadQAvailable = 334 | deltas['wiredTiger.concurrentTransactions.read.available'].lastValue; 335 | data.cacheReadQUsed = 336 | deltas['wiredTiger.concurrentTransactions.read.out'].lastValue; 337 | 338 | data.cacheWriteQAvailable = 339 | deltas['wiredTiger.concurrentTransactions.write.available'].lastValue; 340 | data.cacheWriteQUsed = 341 | deltas['wiredTiger.concurrentTransactions.write.out'].lastValue; 342 | 343 | data.cacheGetsPS = 344 | deltas['wiredTiger.cache.pages requested from the cache'].rate; 345 | 346 | data.cacheReadInsPS = deltas['wiredTiger.cache.pages read into cache'].rate; 347 | 348 | descriptions.cacheHitRate = 'Hit Rate in the wiredTigerCache '; 349 | if (data.cacheGetsPS > 0) { 350 | data.cacheHitRate = 351 | ((data.cacheGetsPS - data.cacheReadInsPS) * 100) / data.cacheGetsPS; 352 | } else { 353 | data.cacheHitRate = 0; 354 | } 355 | 356 | data.evictionsPs = deltas['wiredTiger.cache.internal pages evicted'].rate; 357 | data.evictionBlockedPs = 358 | deltas['wiredTiger.thread-yield.page acquire eviction blocked'].rate; 359 | if (data.evictionsPs > 0) { 360 | data.evictionBlockRate = (data.evictionBlockedPs * 100) / data.evictionsPs; 361 | } else data.evictionBlockRate = 0; 362 | 363 | if (data.cacheReadInsPS > 0) { 364 | data.evictionRate = (data.evictionsPs * 100) / data.cacheReadInsPS; 365 | } else data.evictionRate = 0; 366 | 367 | data.cacheHighWaterMB = 368 | deltas['wiredTiger.cache.maximum bytes configured'].lastValue / 1048576; 369 | 370 | data.cacheSizeMB = 371 | deltas['wiredTiger.cache.bytes currently in the cache'].lastValue / 1048576; 372 | 373 | data.diskBlockReadsPS = deltas['wiredTiger.block-manager.blocks read'].rate; 374 | data.diskBlockWritesPS = 375 | deltas['wiredTiger.block-manager.blocks written'].rate; 376 | 377 | data.logKBRatePS = deltas['wiredTiger.log.log bytes written'].rate / 1024; 378 | 379 | data.logSyncTimeRateMsPS = 380 | deltas['wiredTiger.log.log sync time duration (usecs)'].rate / 1000; 381 | 382 | data.logSyncOpsPS = deltas['wiredTiger.log.log sync operations'].rate; 383 | 384 | if (data.logSyncOpsPS > 0) { 385 | data.logAvgSyncTime = data.logSyncTimeRateMsPS / data.logSyncOpsPS; 386 | } else data.logAvgSyncTime = 0; 387 | 388 | // ********************************************************* 389 | // Disk IO 390 | // ********************************************************* 391 | 392 | Object.keys(data).forEach((key) => { 393 | if (data[key] % 1 > 0.01) { 394 | data[key] = data[key].toFixed(4); 395 | } 396 | }); 397 | return data; 398 | }; 399 | mongoTuning.memoryReport = () => { 400 | const serverStats = db.serverStatus(); 401 | print('Mongod virtual memory ', serverStats.mem.virtual); 402 | print('Mongod resident memory', serverStats.mem.resident); 403 | print( 404 | 'Wired Tiger cache size', 405 | Math.round( 406 | serverStats.wiredTiger.cache['bytes currently in the cache'] / 1048576 407 | ) 408 | ); 409 | }; 410 | 411 | // QUERY PROFILER 412 | mongoTuning.profileQuery = () => { 413 | const profileQuery = db.system.profile.aggregate([ 414 | { 415 | $group: { 416 | _id: { cursorid: '$cursorid' }, 417 | count: { $sum: 1 }, 418 | 'queryHash-max': { $max: '$queryHash' }, 419 | 'millis-sum': { $sum: '$millis' }, 420 | 'ns-max': { $max: '$ns' }, 421 | }, 422 | }, 423 | { 424 | $group: { 425 | _id: { 426 | queryHash: '$queryHash-max', 427 | collection: '$ns-max', 428 | }, 429 | count: { $sum: 1 }, 430 | millis: { $sum: '$millis-sum' }, 431 | }, 432 | }, 433 | { $sort: { millis: -1 } }, 434 | { $limit: 10 }, 435 | ]); 436 | return profileQuery; 437 | }; 438 | /** 439 | * Get details of a query from system.profile using the queryhash 440 | * 441 | * @param {string} queryHash - The queryHash of the query of interest. 442 | * 443 | * @returns {queryDetails} query ns, command and basic statistics 444 | */ 445 | mongoTuning.getQueryByHash = function (queryHash) { 446 | return db.system.profile.findOne( 447 | { queryHash }, 448 | { ns: 1, command: 1, docsExamined: 1, millis: 1, planSummary: 1 } 449 | ); 450 | }; 451 | /** 452 | * Fetch simplified profiling info for a given database and namespace. 453 | * 454 | * @param {string} dbName - The name of the database to fetch profiling data for. 455 | * @param {string} collectionName - The name of the collection to fetch profiling data for. 456 | * 457 | * @returns {ProfilingData} Profiling data for the given namespace (queries only), grouped and simplified. 458 | */ 459 | mongoTuning.getProfileData = function (dbName, collectionName) { 460 | var mydb = db.getSiblingDB(dbName); // eslint-disable-line 461 | const ns = dbName + '.' + collectionName; 462 | const profileData = mydb 463 | .getSiblingDB(dbName) 464 | .getCollection('system.profile') 465 | .aggregate([ 466 | { 467 | $match: { 468 | ns, 469 | op: 'query', 470 | }, 471 | }, 472 | { 473 | $group: { 474 | _id: { 475 | filter: '$query.filter', 476 | }, 477 | count: { 478 | $sum: 1, 479 | }, 480 | 'millis-sum': { 481 | $sum: '$millis', 482 | }, 483 | 'nreturned-sum': { 484 | $sum: '$nreturned', 485 | }, 486 | 'planSummary-first': { 487 | $first: '$planSummary', 488 | }, 489 | 'docsExamined-sum': { 490 | $sum: '$docsExamined', 491 | }, 492 | }, 493 | }, 494 | { 495 | $sort: { 496 | 'millis-sum': -1, 497 | }, 498 | }, 499 | ]); 500 | return profileData; 501 | }; 502 | 503 | // CURENT OP 504 | mongoTuning.printCurrentOps = function (printZeroSecs, printInternalProcess) { 505 | // console.log(COps); 506 | var mydb = db.getSiblingDB('admin'); // eslint-disable-line 507 | var output = []; 508 | var result = {}; 509 | var currentOps = mydb.currentOp(); 510 | if (currentOps.hasOwnProperty('errmsg')) { 511 | output.push({ 512 | error: currentOps.errmsg, 513 | }); 514 | } else { 515 | var opArray = []; 516 | // print(clusterOps); print("+++++++++++++++"); print(JSON.stringify(currentOps)); 517 | var inprog = currentOps.inprog; 518 | var server = currentOps.server; 519 | inprog.forEach(function (currentOp) { 520 | // printjson(currentOp); 521 | var secs = 0; 522 | 523 | if (currentOp.hasOwnProperty('secs_running')) { 524 | secs = currentOp.secs_running; 525 | } 526 | var myop = currentOp.op; 527 | var query = {}; 528 | if ('query' in currentOp) { 529 | query = JSON.stringify(currentOp.query); 530 | } else if ('command' in currentOp) { 531 | query = JSON.stringify(currentOp.command); 532 | } 533 | if (query.length > 2) { 534 | myop = query; 535 | } 536 | opArray.push({ 537 | server: server, 538 | desc: currentOp.desc, 539 | secs: secs, 540 | ns: currentOp.ns, 541 | op: myop, 542 | opid: currentOp.opid, 543 | }); 544 | // 545 | }); 546 | 547 | opArray.sort(function (a, b) { 548 | // Sort in desc order of seconds active 549 | return b.secs - a.secs; 550 | }); 551 | // printjson(opArray); // eslint-disable-line 552 | opArray.forEach(function (op) { 553 | if ( 554 | (printZeroSecs === true || op.secs > 0) && 555 | (printInternalProcess === true || 556 | (op.desc !== 'rsBackgroundSync' && 557 | op.desc !== 'ReplBatcher' && 558 | op.desc !== 'rsSync' && 559 | op.desc !== 'WT RecordStoreThread: local.oplog.rs' && 560 | op.desc !== 'SyncSourceFeedback' && 561 | op.desc !== 'NoopWriter' && 562 | op.ns != 'local.oplog.rs')) 563 | ) { 564 | output.push({ 565 | desc: op.desc, 566 | secs: op.secs, 567 | ns: op.ns, 568 | op: op.op, 569 | opid: op.opid, 570 | }); 571 | } 572 | }); 573 | } 574 | result.ops = output; 575 | return result; 576 | }; 577 | 578 | mongoTuning.opForKillList = function () { 579 | var output = []; 580 | mongoTuning.printCurrentOps(true, false).ops.forEach(function (op) { 581 | var outStr = 582 | op.opid + ' ' + op.secs + ' seconds running. ' + op.desc + ' ' + op.ns; 583 | output.push(outStr); 584 | }); 585 | return output; 586 | }; 587 | 588 | mongoTuning.killOp = function (opIdString) { 589 | var opid = opIdString.split(' ')[0]; 590 | if (opid.indexOf(':') == -1) { 591 | opid = parseInt(opid); // eslint-disable-line 592 | } 593 | print('Issuing kill on ' + opid); 594 | var ret = db.killOp(opid); //eslint-disable-line 595 | printjson(ret); // eslint-disable-line 596 | }; 597 | 598 | // EXPLAIN 599 | mongoTuning.prepExplain = (explainInput) => { 600 | // Takes as input explain output in one of the follow formats: 601 | // A fully explain JSON document, in which case emits winningPlan 602 | // An explain() cursor in which case, extracts the winningPlan from the cursor 603 | // A specific plan step in which case just returns that 604 | 605 | const keys = Object.keys(explainInput); 606 | // printjson(keys); 607 | if (keys.includes('queryPlanner')) { 608 | // This looks like a top level Explain 609 | return explainInput.queryPlanner.winningPlan; 610 | } else if (keys.includes('hasNext')) { 611 | // This looks like a cursor 612 | if (explainInput.hasNext()) { 613 | return mongoTuning.prepExplain(explainInput.next()); 614 | } 615 | return { ok: 0, error: 'No plan found' }; 616 | } else if (keys.includes('stage')) { 617 | // This looks like an actual plan 618 | return explainInput; 619 | } 620 | return { ok: 0, error: 'No plan found' }; 621 | }; 622 | mongoTuning.quickExplain = (inputPlan) => { 623 | // Takes as input an explain Plan. Emits a simplified 624 | // version of that plan 625 | const explainPlan = mongoTuning.prepExplain(inputPlan); 626 | let stepNo = 1; 627 | 628 | const printSpaces = function (n) { 629 | let s = ''; 630 | for (let i = 1; i < n; i++) { 631 | s += ' '; 632 | } 633 | return s; 634 | }; 635 | const printInputStage = function (step, depth) { 636 | if ('inputStage' in step) { 637 | printInputStage(step.inputStage, depth + 1); 638 | } 639 | if ('inputStages' in step) { 640 | step.inputStages.forEach((inputStage) => { 641 | printInputStage(inputStage, depth + 1); 642 | }); 643 | } 644 | if ('indexName' in step) { 645 | print(stepNo++, printSpaces(depth), step.stage, step.indexName); 646 | } else { 647 | print(stepNo++, printSpaces(depth), step.stage); 648 | } 649 | }; 650 | printInputStage(explainPlan, 1); 651 | }; 652 | mongoTuning.prepExecutionStats = (explainInput) => { 653 | // Takes as input explain output in one of the follow formats: 654 | // A fully explain JSON document, in which case emits executionStats 655 | // An explain() cursor in which case, extracts the exectionStats from the cursor 656 | 657 | const keys = Object.keys(explainInput); 658 | 659 | if (keys.includes('executionStats')) { 660 | // This looks like a top level Explain 661 | return explainInput.executionStats; 662 | } else if (keys.includes('hasNext')) { 663 | // This looks like a cursor 664 | 665 | if (explainInput.hasNext()) { 666 | return mongoTuning.prepExecutionStats(explainInput.next()); 667 | } 668 | } else if (explainInput.stages) { 669 | } else return { ok: 0, error: 'No executionStats found' }; 670 | }; 671 | mongoTuning.executionStats = (execStatsIn) => { 672 | if (execStatsIn.stages) { 673 | return aggregationExecutionStats(execStatsIn); 674 | } 675 | const execStats = mongoTuning.prepExecutionStats(execStatsIn); 676 | // printjson(execStats); 677 | let stepNo = 1; 678 | print('\n'); 679 | const printSpaces = function (n) { 680 | let s = ''; 681 | for (let i = 1; i < n; i++) { 682 | s += ' '; 683 | } 684 | return s; 685 | }; 686 | var printInputStage = function (step, depth) { 687 | if ('inputStage' in step) { 688 | printInputStage(step.inputStage, depth + 1); 689 | } 690 | if ('inputStages' in step) { 691 | step.inputStages.forEach((inputStage) => { 692 | printInputStage(inputStage, depth + 1); 693 | }); 694 | } 695 | if ('shards' in step) { 696 | step.shards.forEach((inputShard) => { 697 | printInputStage(inputShard, depth + 1); 698 | }); 699 | } 700 | if ('shardName' in step) { 701 | printInputStage(step.executionStages, depth + 1); 702 | } 703 | let extraData = '('; 704 | let printStage = 'unknown'; 705 | if ('stage' in step) { 706 | printStage = step.stage; 707 | } 708 | if ('shardName' in step) { 709 | printStage = 'Shard ==> ' + step.shardName; 710 | } 711 | if ('indexName' in step) extraData += ' ' + step.indexName; 712 | if ('executionTimeMillisEstimate' in step) { 713 | extraData += ' ms:' + step.executionTimeMillisEstimate; 714 | } 715 | if ('executionTimeMillis' in step) { 716 | extraData += ' ms:' + step.executionTimeMillis; 717 | } 718 | if ('nReturned' in step) { 719 | extraData += ' returned:' + step.nReturned; 720 | } 721 | if ('keysExamined' in step) extraData += ' keys:' + step.keysExamined; 722 | if ('docsExamined' in step) extraData += ' docs:' + step.docsExamined; 723 | if ('nWouldModify' in step && step.nWouldModify !== false) 724 | extraData += ' upd:' + step.nWouldModify; 725 | if ('wouldInsert' in step && step.wouldInsert !== false) 726 | extraData += ' ins:' + step.wouldInsert; 727 | extraData += ')'; 728 | print(stepNo++, printSpaces(depth), printStage, extraData); 729 | }; 730 | printInputStage(execStats.executionStages, 1); 731 | print( 732 | '\nTotals: ms:', 733 | execStats.executionTimeMillis, 734 | ' keys:', 735 | execStats.totalKeysExamined, 736 | ' Docs:', 737 | execStats.totalDocsExamined 738 | ); 739 | }; 740 | mongoTuning.aggregationExecutionStats = (execStatsIn) => { 741 | // printjson(execStatsIn); 742 | let execStats = {}; 743 | let stepNo = 1; 744 | if ( 745 | execStatsIn.stages && 746 | execStatsIn.stages[0].$cursor && 747 | execStatsIn.stages[0].$cursor.executionStats 748 | ) { 749 | execStats = execStatsIn.stages[0].$cursor.executionStats; 750 | } else if (execStatsIn.executionStats) { 751 | execStats = execStatsIn.executionStats; 752 | } 753 | print('\n'); 754 | const printSpaces = function (n) { 755 | let s = ''; 756 | for (let i = 1; i < n; i++) { 757 | s += ' '; 758 | } 759 | return s; 760 | }; 761 | var printInputStage = function (step, depth) { 762 | if ('inputStage' in step) { 763 | printInputStage(step.inputStage, depth + 1); 764 | } 765 | if ('inputStages' in step) { 766 | step.inputStages.forEach((inputStage) => { 767 | printInputStage(inputStage, depth + 1); 768 | }); 769 | } 770 | let extraData = '('; 771 | if ('indexName' in step) extraData += ' ' + step.indexName; 772 | if ('executionTimeMillisEstimate' in step) { 773 | extraData += ' ms:' + step.executionTimeMillisEstimate; 774 | } 775 | if ('keysExamined' in step) extraData += ' keys:' + step.keysExamined; 776 | if ('docsExamined' in step) { 777 | extraData += ' docsExamined:' + step.docsExamined; 778 | } 779 | if ('nReturned' in step) extraData += ' nReturned:' + step.nReturned; 780 | extraData += ')'; 781 | print(stepNo++, printSpaces(1), step.stage, extraData); 782 | }; 783 | 784 | const printAggStage = function (stage, depth) { 785 | let extraData = '('; 786 | if ('executionTimeMillisEstimate' in stage) { 787 | extraData += ' ms:' + stage.executionTimeMillisEstimate; 788 | } 789 | if ('docsExamined' in stage) extraData += ' examined:' + stage.docsExamined; 790 | if ('nReturned' in stage) extraData += ' returned:' + stage.nReturned; 791 | extraData += ')'; 792 | print( 793 | stepNo++, 794 | printSpaces(depth), 795 | Object.keys(stage) 796 | .find((key) => key.match(/$/)) 797 | .toUpperCase(), 798 | extraData 799 | ); 800 | }; 801 | if (execStats.executionStages) { 802 | printInputStage(execStats.executionStages, 1); 803 | } 804 | 805 | if (execStatsIn && execStatsIn.stages) { 806 | for (let stageNum = 1; stageNum < execStatsIn.stages.length; stageNum++) { 807 | if (execStatsIn.stages[stageNum]) { 808 | printAggStage(execStatsIn.stages[stageNum], 1); 809 | } 810 | } 811 | } 812 | 813 | print( 814 | '\nTotals: ms:', 815 | execStats.executionTimeMillis, 816 | ' keys:', 817 | execStats.totalKeysExamined, 818 | ' Docs:', 819 | execStats.totalDocsExamined 820 | ); 821 | }; 822 | 823 | // COMPACT 824 | mongoTuning.reusablePct = function (collectionName) { 825 | let collstats = db.getCollection(collectionName).stats(); 826 | let reusable = 827 | collstats.wiredTiger['block-manager']['file bytes available for reuse']; 828 | let size = collstats.wiredTiger['block-manager']['file size in bytes']; 829 | let reusablePct = Math.round((reusable * 100) / size); 830 | print('Size:', size, ' Reusable: ', reusable, ' ', reusablePct, '%'); 831 | return Math.round((reusable * 100) / size); 832 | }; 833 | -------------------------------------------------------------------------------- /sampleData/customerDOBFix.js: -------------------------------------------------------------------------------- 1 | db.customers.find({dob:{$type:6}}).forEach((cust)=>{ 2 | var year=1000*3600*24*365; 3 | randDate=new Date(new Date()-(Math.random()*year*55)-10*year); 4 | let rc=db.customers.update({_id:cust['_id']},{$set:{dob:randDate}}); 5 | printjson(rc); 6 | }); 7 | -------------------------------------------------------------------------------- /sampleData/customersDateOfBirth.js: -------------------------------------------------------------------------------- 1 | use MongoDBTuningBook 2 | 3 | db.customers.find().forEach((c)=>{ 4 | db.customers.update({'_id':c['_id']},{$set:{dateOfBirth:c['dob']}}); 5 | }); 6 | 7 | var c=db.customers.findOne(); 8 | delete c.dob; 9 | Object.keys(c); 10 | 11 | -------------------------------------------------------------------------------- /sampleData/growCustomers.js: -------------------------------------------------------------------------------- 1 | 2 | use MongoDBTuningBook; 3 | var firstnames=db.customers.distinct("FirstName"); 4 | var lastnames=db.customers.distinct('LastName'); 5 | var phones=db.customers.distinct("Phone"); 6 | var dobs=db.customers.distinct("dob"); 7 | 8 | function getRandom(arrayIn) { 9 | let elem=Math.round((Math.random()*arrayIn.length+1)); 10 | return(arrayIn[elem]); 11 | } 12 | db.customers.count(); 13 | var baseCustomers=db.customers.find({},{'_id':0}).toArray(); 14 | baseCustomers.forEach((cust)=>{ 15 | 16 | cust.FirstName=getRandom(firstnames); 17 | cust.LastName=getRandom(lastnames); 18 | cust.Phone=getRandom(phones); 19 | cust.dob=getRandom(dobs); 20 | var rc=db.customers.insert(cust); 21 | }) 22 | db.customers.count(); -------------------------------------------------------------------------------- /sampleData/modCustomers.js: -------------------------------------------------------------------------------- 1 | use MongoDBTuningBook 2 | db.customers.findOne(); 3 | db.customers.find().forEach(cust => { 4 | if ('Rentals' in cust) { 5 | let rentals = cust.Rentals; 6 | let id = cust['_id']; 7 | delete cust['Rentals']; 8 | delete cust['_id']; 9 | 10 | let views = []; 11 | rentals.forEach(rental => { 12 | let randomDate = new Date( 13 | new Date() - Math.random() * 1000 * 3600 * 24 * 365 * 10 14 | ); 15 | views.push({ 16 | viewDate: randomDate, 17 | filmId: rental.filmId, 18 | title: rental['Film Title'] 19 | }); 20 | }); 21 | cust.views = views; 22 | //printjson(cust); 23 | let rc = db.customers.update({ _id: id }, cust); 24 | printjson(rc); 25 | } 26 | }); 27 | -------------------------------------------------------------------------------- /scripts/.eslintrc.yml: -------------------------------------------------------------------------------- 1 | # @Author: Guan Gui 2 | # @Date: 2016-06-02T14:58:00+10:00 3 | # @Email: root@guiguan.net 4 | # @Last modified by: guiguan 5 | # @Last modified time: 2017-03-08T16:48:31+11:00 6 | 7 | extends: airbnb 8 | parser: babel-eslint 9 | env: 10 | browser: true 11 | node: true 12 | mocha: true 13 | es6: true 14 | rules: 15 | arrow-body-style: 0 16 | key-spacing: 0 17 | object-property-newline: 0 18 | no-cond-assign: [error, except-parens] 19 | no-nested-ternary: 0 20 | no-shadow: 0 21 | eqeqeq: 0 22 | array-callback-return: 0 23 | no-param-reassign: 0 24 | space-before-function-paren: 0 25 | global-require: 0 26 | consistent-return: 0 27 | quote-props: 0 28 | no-underscore-dangle: 0 29 | prefer-const: [error, {ignoreReadBeforeAssign: true}] 30 | max-len: 0 31 | prefer-template: 0 32 | object-curly-spacing: 0 33 | comma-dangle: 0 34 | indent: 0 35 | no-console: 0 36 | no-alert: 0 37 | no-multiple-empty-lines: [2, {max: 3}] 38 | func-names: 0 39 | new-cap: [2, {capIsNew: false}] 40 | import/default: 0 41 | import/no-duplicates: 0 42 | import/named: 0 43 | import/namespace: 0 44 | import/no-named-as-default: 2 45 | # this requires eslint-import-resolver-node@0.3.0 to be installed in order for import/resolver settings to work 46 | import/no-unresolved: [2, {ignore: ["^[~]", "^[#]"]}] 47 | import/prefer-default-export: 0 48 | import/no-extraneous-dependencies: 0 49 | import/extensions: 0 50 | react/prefer-stateless-function: 0 51 | react/jsx-space-before-closing: 0 52 | react/jsx-first-prop-new-line: 0 53 | react/jsx-closing-bracket-location: 0 54 | react/no-multi-comp: 0 55 | react/no-did-mount-set-state: 0 56 | react/prop-types: 0 57 | no-unused-vars: [error, {argsIgnorePattern: ^_}] 58 | no-restricted-syntax: 0 59 | no-unused-expressions: 0 60 | newline-per-chained-call: 0 61 | no-undef: 0 62 | class-methods-use-this: 0 63 | no-use-before-define: [error, {functions: false, classes: true}] 64 | plugins: [react, import] 65 | settings: 66 | import/resolver: 67 | node: 68 | moduleDirectory: 69 | - node_modules 70 | - src 71 | globals: 72 | l: false 73 | -------------------------------------------------------------------------------- /scripts/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | 3 | .DS_Store 4 | 5 | yarn\.lock 6 | yarn.lock 7 | yarn-error\.log 8 | 9 | package-lock.json 10 | -------------------------------------------------------------------------------- /scripts/CurrentOp.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Current Op helper functions for the Apress book "MongoDB Performance Tuning" 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:50:37+10:00 8 | * 9 | */ 10 | mongoTuning.printCurrentOps = function (printZeroSecs, printInternalProcess) { 11 | // console.log(COps); 12 | var mydb = db.getSiblingDB('admin'); // eslint-disable-line 13 | var output = []; 14 | var result = {}; 15 | var currentOps = mydb.currentOp(); 16 | if (currentOps.hasOwnProperty('errmsg')) { 17 | output.push({ 18 | error: currentOps.errmsg, 19 | }); 20 | } else { 21 | var opArray = []; 22 | // print(clusterOps); print("+++++++++++++++"); print(JSON.stringify(currentOps)); 23 | var inprog = currentOps.inprog; 24 | var server = currentOps.server; 25 | inprog.forEach(function (currentOp) { 26 | // printjson(currentOp); 27 | var secs = 0; 28 | 29 | if (currentOp.hasOwnProperty('secs_running')) { 30 | secs = currentOp.secs_running; 31 | } 32 | var myop = currentOp.op; 33 | var query = {}; 34 | if ('query' in currentOp) { 35 | query = JSON.stringify(currentOp.query); 36 | } else if ('command' in currentOp) { 37 | query = JSON.stringify(currentOp.command); 38 | } 39 | if (query.length > 2) { 40 | myop = query; 41 | } 42 | opArray.push({ 43 | server: server, 44 | desc: currentOp.desc, 45 | secs: secs, 46 | ns: currentOp.ns, 47 | op: myop, 48 | opid: currentOp.opid, 49 | }); 50 | // 51 | }); 52 | 53 | opArray.sort(function (a, b) { 54 | // Sort in desc order of seconds active 55 | return b.secs - a.secs; 56 | }); 57 | // printjson(opArray); // eslint-disable-line 58 | opArray.forEach(function (op) { 59 | if ( 60 | (printZeroSecs === true || op.secs > 0) && 61 | (printInternalProcess === true || 62 | (op.desc !== 'rsBackgroundSync' && 63 | op.desc !== 'ReplBatcher' && 64 | op.desc !== 'rsSync' && 65 | op.desc !== 'WT RecordStoreThread: local.oplog.rs' && 66 | op.desc !== 'SyncSourceFeedback' && 67 | op.desc !== 'NoopWriter' && 68 | op.ns != 'local.oplog.rs')) 69 | ) { 70 | output.push({ 71 | desc: op.desc, 72 | secs: op.secs, 73 | ns: op.ns, 74 | op: op.op, 75 | opid: op.opid, 76 | }); 77 | } 78 | }); 79 | } 80 | result.ops = output; 81 | return result; 82 | }; 83 | 84 | mongoTuning.opForKillList = function () { 85 | var output = []; 86 | mongoTuning.printCurrentOps(true, false).ops.forEach(function (op) { 87 | var outStr = 88 | op.opid + ' ' + op.secs + ' seconds running. ' + op.desc + ' ' + op.ns; 89 | output.push(outStr); 90 | }); 91 | return output; 92 | }; 93 | 94 | mongoTuning.killOp = function (opIdString) { 95 | var opid = opIdString.split(' ')[0]; 96 | if (opid.indexOf(':') == -1) { 97 | opid = parseInt(opid); // eslint-disable-line 98 | } 99 | print('Issuing kill on ' + opid); 100 | var ret = db.killOp(opid); //eslint-disable-line 101 | printjson(ret); // eslint-disable-line 102 | }; 103 | -------------------------------------------------------------------------------- /scripts/Explain.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Explain Plan helper functions for the Apress book "MongoDB Performance Tuning" 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:51:11+10:00 8 | * 9 | */ 10 | 11 | mongoTuning.prepExplain = (explainInput) => { 12 | // Takes as input explain output in one of the follow formats: 13 | // A fully explain JSON document, in which case emits winningPlan 14 | // An explain() cursor in which case, extracts the winningPlan from the cursor 15 | // A specific plan step in which case just returns that 16 | 17 | const keys = Object.keys(explainInput); 18 | // printjson(keys); 19 | if (keys.includes('queryPlanner')) { 20 | // This looks like a top level Explain 21 | return explainInput.queryPlanner.winningPlan; 22 | } else if (keys.includes('hasNext')) { 23 | // This looks like a cursor 24 | if (explainInput.hasNext()) { 25 | return mongoTuning.prepExplain(explainInput.next()); 26 | } 27 | return { ok: 0, error: 'No plan found' }; 28 | } else if (keys.includes('stage')) { 29 | // This looks like an actual plan 30 | return explainInput; 31 | } 32 | return { ok: 0, error: 'No plan found' }; 33 | }; 34 | 35 | mongoTuning.quickExplain = (inputPlan) => { 36 | // Takes as input an explain Plan. Emits a simplified 37 | // version of that plan 38 | const explainPlan = mongoTuning.prepExplain(inputPlan); 39 | let stepNo = 1; 40 | 41 | const printSpaces = function (n) { 42 | let s = ''; 43 | for (let i = 1; i < n; i++) { 44 | s += ' '; 45 | } 46 | return s; 47 | }; 48 | const printInputStage = function (step, depth) { 49 | if ('inputStage' in step) { 50 | printInputStage(step.inputStage, depth + 1); 51 | } 52 | if ('inputStages' in step) { 53 | step.inputStages.forEach((inputStage) => { 54 | printInputStage(inputStage, depth + 1); 55 | }); 56 | } 57 | if ('indexName' in step) { 58 | print(stepNo++, printSpaces(depth), step.stage, step.indexName); 59 | } else { 60 | print(stepNo++, printSpaces(depth), step.stage); 61 | } 62 | }; 63 | printInputStage(explainPlan, 1); 64 | }; 65 | 66 | mongoTuning.prepExecutionStats = (explainInput) => { 67 | // Takes as input explain output in one of the follow formats: 68 | // A fully explain JSON document, in which case emits executionStats 69 | // An explain() cursor in which case, extracts the exectionStats from the cursor 70 | 71 | const keys = Object.keys(explainInput); 72 | 73 | if (keys.includes('executionStats')) { 74 | // This looks like a top level Explain 75 | return explainInput.executionStats; 76 | } else if (keys.includes('hasNext')) { 77 | // This looks like a cursor 78 | 79 | if (explainInput.hasNext()) { 80 | return mongoTuning.prepExecutionStats(explainInput.next()); 81 | } 82 | } else if (explainInput.stages) { 83 | } else return { ok: 0, error: 'No executionStats found' }; 84 | }; 85 | 86 | mongoTuning.executionStats = (execStatsIn) => { 87 | if (execStatsIn.stages) { 88 | return aggregationExecutionStats(execStatsIn); 89 | } 90 | const execStats = mongoTuning.prepExecutionStats(execStatsIn); 91 | // printjson(execStats); 92 | let stepNo = 1; 93 | print('\n'); 94 | const printSpaces = function (n) { 95 | let s = ''; 96 | for (let i = 1; i < n; i++) { 97 | s += ' '; 98 | } 99 | return s; 100 | }; 101 | var printInputStage = function (step, depth) { 102 | if ('inputStage' in step) { 103 | printInputStage(step.inputStage, depth + 1); 104 | } 105 | if ('inputStages' in step) { 106 | step.inputStages.forEach((inputStage) => { 107 | printInputStage(inputStage, depth + 1); 108 | }); 109 | } 110 | if ('shards' in step) { 111 | step.shards.forEach((inputShard) => { 112 | printInputStage(inputShard, depth + 1); 113 | }); 114 | } 115 | if ('shardName' in step) { 116 | printInputStage(step.executionStages, depth + 1); 117 | } 118 | let extraData = '('; 119 | let printStage = 'unknown'; 120 | if ('stage' in step) { 121 | printStage = step.stage; 122 | } 123 | if ('shardName' in step) { 124 | printStage = 'Shard ==> ' + step.shardName; 125 | } 126 | if ('indexName' in step) extraData += ' ' + step.indexName; 127 | if ('executionTimeMillisEstimate' in step) { 128 | extraData += ' ms:' + step.executionTimeMillisEstimate; 129 | } 130 | if ('executionTimeMillis' in step) { 131 | extraData += ' ms:' + step.executionTimeMillis; 132 | } 133 | if ('nReturned' in step) { 134 | extraData += ' returned:' + step.nReturned; 135 | } 136 | if ('keysExamined' in step) extraData += ' keys:' + step.keysExamined; 137 | if ('docsExamined' in step) extraData += ' docs:' + step.docsExamined; 138 | if ('nWouldModify' in step && step.nWouldModify !== false) 139 | extraData += ' upd:' + step.nWouldModify; 140 | if ('wouldInsert' in step && step.wouldInsert !== false) 141 | extraData += ' ins:' + step.wouldInsert; 142 | extraData += ')'; 143 | print(stepNo++, printSpaces(depth), printStage, extraData); 144 | }; 145 | printInputStage(execStats.executionStages, 1); 146 | print( 147 | '\nTotals: ms:', 148 | execStats.executionTimeMillis, 149 | ' keys:', 150 | execStats.totalKeysExamined, 151 | ' Docs:', 152 | execStats.totalDocsExamined 153 | ); 154 | }; 155 | 156 | mongoTuning.aggregationExecutionStats = (execStatsIn) => { 157 | // printjson(execStatsIn); 158 | let execStats = {}; 159 | let stepNo = 1; 160 | if ( 161 | execStatsIn.stages && 162 | execStatsIn.stages[0].$cursor && 163 | execStatsIn.stages[0].$cursor.executionStats 164 | ) { 165 | execStats = execStatsIn.stages[0].$cursor.executionStats; 166 | } else if (execStatsIn.executionStats) { 167 | execStats = execStatsIn.executionStats; 168 | } 169 | print('\n'); 170 | const printSpaces = function (n) { 171 | let s = ''; 172 | for (let i = 1; i < n; i++) { 173 | s += ' '; 174 | } 175 | return s; 176 | }; 177 | var printInputStage = function (step, depth) { 178 | if ('inputStage' in step) { 179 | printInputStage(step.inputStage, depth + 1); 180 | } 181 | if ('inputStages' in step) { 182 | step.inputStages.forEach((inputStage) => { 183 | printInputStage(inputStage, depth + 1); 184 | }); 185 | } 186 | let extraData = '('; 187 | if ('indexName' in step) extraData += ' ' + step.indexName; 188 | if ('executionTimeMillisEstimate' in step) { 189 | extraData += ' ms:' + step.executionTimeMillisEstimate; 190 | } 191 | if ('keysExamined' in step) extraData += ' keys:' + step.keysExamined; 192 | if ('docsExamined' in step) { 193 | extraData += ' docsExamined:' + step.docsExamined; 194 | } 195 | if ('nReturned' in step) extraData += ' nReturned:' + step.nReturned; 196 | extraData += ')'; 197 | print(stepNo++, printSpaces(1), step.stage, extraData); 198 | }; 199 | 200 | const printAggStage = function (stage, depth) { 201 | let extraData = '('; 202 | if ('executionTimeMillisEstimate' in stage) { 203 | extraData += ' ms:' + stage.executionTimeMillisEstimate; 204 | } 205 | if ('docsExamined' in stage) extraData += ' examined:' + stage.docsExamined; 206 | if ('nReturned' in stage) extraData += ' returned:' + stage.nReturned; 207 | extraData += ')'; 208 | print( 209 | stepNo++, 210 | printSpaces(depth), 211 | Object.keys(stage) 212 | .find((key) => key.match(/$/)) 213 | .toUpperCase(), 214 | extraData 215 | ); 216 | }; 217 | if (execStats.executionStages) { 218 | printInputStage(execStats.executionStages, 1); 219 | } 220 | 221 | if (execStatsIn && execStatsIn.stages) { 222 | for (let stageNum = 1; stageNum < execStatsIn.stages.length; stageNum++) { 223 | if (execStatsIn.stages[stageNum]) { 224 | printAggStage(execStatsIn.stages[stageNum], 1); 225 | } 226 | } 227 | } 228 | 229 | print( 230 | '\nTotals: ms:', 231 | execStats.executionTimeMillis, 232 | ' keys:', 233 | execStats.totalKeysExamined, 234 | ' Docs:', 235 | execStats.totalDocsExamined 236 | ); 237 | }; 238 | -------------------------------------------------------------------------------- /scripts/QueryProfiler.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Query Profiler helper functions for the Apress book "MongoDB Performance Tuning" 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:50:52+10:00 8 | * 9 | */ 10 | mongoTuning.profileQuery = () => { 11 | const profileQuery = db.system.profile.aggregate([ 12 | { 13 | $group: { 14 | _id: { cursorid: '$cursorid' }, 15 | count: { $sum: 1 }, 16 | 'queryHash-max': { $max: '$queryHash' }, 17 | 'millis-sum': { $sum: '$millis' }, 18 | 'ns-max': { $max: '$ns' }, 19 | }, 20 | }, 21 | { 22 | $group: { 23 | _id: { 24 | queryHash: '$queryHash-max', 25 | collection: '$ns-max', 26 | }, 27 | count: { $sum: 1 }, 28 | millis: { $sum: '$millis-sum' }, 29 | }, 30 | }, 31 | { $sort: { millis: -1 } }, 32 | { $limit: 10 }, 33 | ]); 34 | return profileQuery; 35 | }; 36 | 37 | /** 38 | * Get details of a query from system.profile using the queryhash 39 | * 40 | * @param {string} queryHash - The queryHash of the query of interest. 41 | * 42 | * @returns {queryDetails} query ns, command and basic statistics 43 | */ 44 | 45 | mongoTuning.getQueryByHash = function (queryHash) { 46 | return db.system.profile.findOne( 47 | { queryHash }, 48 | { ns: 1, command: 1, docsExamined: 1, millis: 1, planSummary: 1 } 49 | ); 50 | }; 51 | 52 | /** 53 | * Fetch simplified profiling info for a given database and namespace. 54 | * 55 | * @param {string} dbName - The name of the database to fetch profiling data for. 56 | * @param {string} collectionName - The name of the collection to fetch profiling data for. 57 | * 58 | * @returns {ProfilingData} Profiling data for the given namespace (queries only), grouped and simplified. 59 | */ 60 | 61 | mongoTuning.getProfileData = function (dbName, collectionName) { 62 | var mydb = db.getSiblingDB(dbName); // eslint-disable-line 63 | const ns = dbName + '.' + collectionName; 64 | const profileData = mydb 65 | .getSiblingDB(dbName) 66 | .getCollection('system.profile') 67 | .aggregate([ 68 | { 69 | $match: { 70 | ns, 71 | op: 'query', 72 | }, 73 | }, 74 | { 75 | $group: { 76 | _id: { 77 | filter: '$query.filter', 78 | }, 79 | count: { 80 | $sum: 1, 81 | }, 82 | 'millis-sum': { 83 | $sum: '$millis', 84 | }, 85 | 'nreturned-sum': { 86 | $sum: '$nreturned', 87 | }, 88 | 'planSummary-first': { 89 | $first: '$planSummary', 90 | }, 91 | 'docsExamined-sum': { 92 | $sum: '$docsExamined', 93 | }, 94 | }, 95 | }, 96 | { 97 | $sort: { 98 | 'millis-sum': -1, 99 | }, 100 | }, 101 | ]); 102 | return profileData; 103 | }; 104 | -------------------------------------------------------------------------------- /scripts/ServerStats.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Server Statistic helper functions for the Apress book "MongoDB Performance Tuning" 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2020-09-14T08:56:29+10:00 8 | * 9 | */ 10 | 11 | // 12 | // ─── DATA GATHERING ───────────────────────────────────────────────────────────── 13 | // 14 | /** 15 | * Base function that will collect and shape raw server statistics data. 16 | * 17 | * @returns {FlatServerStats} An object containing many different server statistics. 18 | */ 19 | mongoTuning.serverStatistics = function () { 20 | const output = {}; 21 | let value; 22 | let rate; 23 | output.statistics = []; 24 | var serverStats = mongoTuning.flattenServerStatus(db.serverStatus()).stats; // eslint-disable-line 25 | const uptime = serverStats.uptimeMillis / 1000; // seconds with precision 26 | Object.keys(serverStats).forEach((stat) => { 27 | // print(stat); 28 | value = serverStats[stat]; 29 | rate = null; 30 | if (typeof value === 'number') { 31 | rate = (value / uptime).toFixed(4); 32 | } 33 | if (!stat.match(/_mongo/)) { 34 | output.statistics.push({ 35 | statistic: stat, 36 | value, 37 | ratePs: rate, 38 | }); 39 | } 40 | }); 41 | return output; 42 | }; 43 | 44 | // 45 | // ─── MONITORING ───────────────────────────────────────────────────────────────── 46 | // 47 | /** 48 | * Helper function for monitoring the MongoDB server over the duration and then 49 | * calculating delta and final values across the duration for key statistics. 50 | * 51 | * @param {int} duration - How long to monitor the server for. 52 | * @returns {Object} - Data object containing Deltas and final values. 53 | */ 54 | mongoTuning.monitorServer = function (duration) { 55 | let runningStats; 56 | let initialStats; 57 | const runTime = 0; 58 | initialStats = mongoTuning.serverStatistics(); 59 | sleep(duration); 60 | finalStats = mongoTuning.serverStatistics(); 61 | const deltas = mongoTuning.serverStatDeltas(initialStats, finalStats); 62 | const finals = mongoTuning.convertStat(finalStats); 63 | return { deltas, finals }; 64 | }; 65 | 66 | mongoTuning.keyServerStats = function (duration, regex) { 67 | const monitoringData = mongoTuning.monitorServer(duration); 68 | return mongoTuning.keyServerStatsFromSample(monitoringData, regex); 69 | }; 70 | 71 | mongoTuning.keyServerStatsFromSample = function (monitoringData, regex) { 72 | const data = mongoTuning.derivedStatistics(monitoringData); 73 | if (regex) { 74 | return mongoTuning.serverStatSearch(data, regex); 75 | } 76 | return data; 77 | }; 78 | 79 | /** 80 | * Monitor the MongoDB server for a given duration and return some derived statistics. 81 | * 82 | * @param {int} duration - How many milliseconds to monitor the server for. 83 | * @param {string} regex - OPTIONAL - A string to perform a match against returned statistic keys. 84 | * @returns {Object} - An object containing the derived statistics matching the regex if given. 85 | */ 86 | mongoTuning.monitorServerDerived = function (duration, regex) { 87 | if (!duration) { 88 | duration = 5000; 89 | } 90 | const monitoringData = mongoTuning.monitorServer(duration); 91 | const derivedStats = mongoTuning.derivedStatistics(monitoringData); 92 | 93 | if (regex) { 94 | return mongoTuning.serverStatSearch(derivedStats, regex); 95 | } 96 | return derivedStats; 97 | }; 98 | 99 | /** 100 | * Monitor the MongoDB server for a given duration and return the raw statistics. 101 | * 102 | * @param {int} duration - How many milliseconds to monitor the server for. 103 | * @param {string} regex - OPTIONAL - A string to perform a match against returned statistic keys. 104 | * @returns {Object} - An object containing the derived statistics matching the regex if given. 105 | */ 106 | mongoTuning.monitorServerRaw = function (duration, regex) { 107 | if (!duration) { 108 | duration = 5000; 109 | } 110 | const monitoringData = mongoTuning.monitorServer(duration); 111 | if (regex) { 112 | return mongoTuning.serverStatSearchRaw(monitoringData, regex); 113 | } 114 | return monitoringData; 115 | }; 116 | 117 | // 118 | // ─── HELPERS ──────────────────────────────────────────────────────────────────── 119 | // 120 | 121 | /** 122 | * Converts structured mongodb ServerStatus object into a flattened array of stats. 123 | * 124 | * @param {RawServerStatus} dbServerStatus - The raw result of the mongodb command. 125 | * @returns {FlatServerStatus} - Flattened array of server status metrics. 126 | */ 127 | mongoTuning.flattenServerStatus = function (dbServerStatus) { 128 | const flattenedServerStatus = {}; 129 | flattenedServerStatus.stats = {}; 130 | 131 | function internalflattenServerStatus(serverStatus, rootTerm) { 132 | let prefix = ''; 133 | if (arguments.length > 1) { 134 | prefix = rootTerm + '.'; 135 | } 136 | Object.getOwnPropertyNames(serverStatus).forEach((key) => { 137 | if (key !== '_mongo') { 138 | let value = serverStatus[key]; 139 | // eslint-disable-next-line 140 | if (value.constructor === NumberLong) { 141 | value = value.toNumber(); 142 | } 143 | const valtype = typeof value; 144 | const fullkey = prefix + key; 145 | // print(key, value, valtype, fullkey); 146 | if (valtype == 'object') { 147 | // recurse into nested objects 148 | internalflattenServerStatus(value, prefix + key); 149 | } else { 150 | /* No more nesting */ 151 | flattenedServerStatus.stats[fullkey] = value; 152 | } 153 | } 154 | }); 155 | } 156 | internalflattenServerStatus(dbServerStatus); 157 | return flattenedServerStatus; 158 | }; 159 | 160 | /** 161 | * Flattens complex server statistics into a simpler form. 162 | * 163 | * @param {RawServerStatus} serverStat - The raw server statistics result from Mongo. 164 | * @returns {SimpleServerStatus} - A single object with key value pairs for each stat. 165 | */ 166 | mongoTuning.convertStat = function (serverStat) { 167 | const returnStat = {}; 168 | serverStat.statistics.forEach((stat) => { 169 | returnStat[stat.statistic] = stat.value; 170 | }); 171 | return returnStat; 172 | }; 173 | 174 | /** 175 | * Takes two sets of server statistics and calculates the difference and rate of change. 176 | * @param {Object} initialStats - First set of statistics. 177 | * @param {Object} finalStats - Second set of statistics. 178 | * @returns {Array} - Array of delta information. 179 | */ 180 | mongoTuning.serverStatDeltas = function (initialStats, finalStats) { 181 | const stat1 = mongoTuning.convertStat(initialStats); 182 | const stat2 = mongoTuning.convertStat(finalStats); 183 | let delta; 184 | let rate; 185 | const statDelta = {}; 186 | statDelta.timeDelta = stat2.uptime - stat1.uptime; 187 | 188 | Object.keys(stat2).forEach((key) => { 189 | // print(key,typeof stat2[key]); 190 | if (typeof stat2[key] === 'number') { 191 | delta = stat2[key] - stat1[key]; 192 | rate = delta / statDelta.timeDelta; 193 | } else { 194 | delta = null; 195 | rate = null; 196 | } 197 | statDelta[key] = { 198 | lastValue: stat2[key], 199 | firstValue: stat1[key], 200 | delta, 201 | rate, 202 | }; 203 | }); 204 | return statDelta; 205 | }; 206 | 207 | /** 208 | * Simple helper function for searching derived server stats for matching keys. 209 | * 210 | * @param {Object} stats - The server statistics to search. 211 | * @param {String} regex - Regex to search for statistic keys. 212 | * @returns {Array} - An array of matching key value pairs. 213 | */ 214 | mongoTuning.serverStatSearch = function (stats, regex) { 215 | const returnArray = {}; 216 | Object.keys(stats).forEach((key) => { 217 | if (key.match(regex)) { 218 | returnArray[key] = stats[key]; 219 | } 220 | }); 221 | return returnArray; 222 | }; 223 | 224 | /** 225 | * Simple helper function for searching raw server stats for matching keys. 226 | * 227 | * @param {Object} stats - The server statistics to search. 228 | * @param {String} regex - Regex to search for statistic keys. 229 | * @returns {Array} - An array of matching key value pairs. 230 | */ 231 | mongoTuning.serverStatSearchRaw = function (stats, regex) { 232 | const returnArray = { deltas: {}, finals: {} }; 233 | // First filter deltas. 234 | Object.keys(stats.deltas).forEach((key) => { 235 | if (key.match(regex)) { 236 | returnArray.deltas[key] = stats.deltas[key]; 237 | } 238 | }); 239 | // Then filter finals 240 | Object.keys(stats.finals).forEach((key) => { 241 | if (key.match(regex)) { 242 | returnArray.finals[key] = stats.finals[key]; 243 | } 244 | }); 245 | return returnArray; 246 | }; 247 | 248 | /** 249 | * Derive some summary statistics from observed values. 250 | * @param {Object} serverData - Server data gathered from mongoTuning.monitorServer, should contain deltas and final values. 251 | * @returns {Object} - Data object containing the derived statistics. 252 | */ 253 | mongoTuning.derivedStatistics = function (serverData) { 254 | const { deltas, finals } = serverData; 255 | const data = {}; 256 | const descriptions = {}; 257 | // ********************************************* 258 | // Network counters 259 | // ********************************************* 260 | 261 | data.netKBInPS = deltas['network.bytesIn'].rate / 1024; 262 | data.netKBOutPS = deltas['network.bytesOut'].rate / 1024; 263 | 264 | // ******************************************** 265 | // Activity counters 266 | // ******************************************** 267 | data.intervalSeconds = deltas.timeDelta; 268 | data.queryPS = deltas['opcounters.query'].rate; 269 | data.getmorePS = deltas['opcounters.getmore'].rate; 270 | data.commandPS = deltas['opcounters.command'].rate; 271 | data.insertPS = deltas['opcounters.insert'].rate; 272 | data.updatePS = deltas['opcounters.update'].rate; 273 | data.deletePS = deltas['opcounters.delete'].rate; 274 | 275 | // ******************************************** 276 | // Document counters 277 | // ******************************************** 278 | data.docsReturnedPS = deltas['metrics.document.returned'].rate; 279 | data.docsUpdatedPS = deltas['metrics.document.updated'].rate; 280 | data.docsInsertedPS = deltas['metrics.document.inserted'].rate; 281 | data.ixscanDocsPS = deltas['metrics.queryExecutor.scanned'].rate; 282 | data.collscanDocsPS = deltas['metrics.queryExecutor.scannedObjects'].rate; 283 | 284 | descriptions.scansToDocumentRatio = 285 | 'Ratio of documents scanned to documents returned'; 286 | if (data.docsReturnedPS > 0) { 287 | data.scansToDocumentRatio = 288 | (data.ixscanDocsPS + data.collscanDocsPS) / data.docsReturnedPS; 289 | } else { 290 | data.scansToDocumentRatio = 0; 291 | } 292 | 293 | // ******************************************** 294 | // Transaction statistics 295 | // ******************************************** 296 | data.transactionsStartedPS = deltas['transactions.totalStarted'].rate; 297 | data.transactionsAbortedPS = deltas['transactions.totalAborted'].rate; 298 | data.transactionsCommittedPS = deltas['transactions.totalCommitted'].rate; 299 | if (data.transactionsStartedPS > 0) { 300 | data.transactionAbortPct = 301 | (data.transactionsAbortedPS * 100) / data.transactionsStartedPS; 302 | } else { 303 | data.transactionAbortPct = 0; 304 | } 305 | 306 | if (deltas['opLatencies.reads.ops'].delta > 0) { 307 | data.readLatencyMs = 308 | deltas['opLatencies.reads.latency'].delta / 309 | deltas['opLatencies.reads.ops'].delta / 310 | 1000; 311 | } else data.readLatency = 0; 312 | 313 | if (deltas['opLatencies.writes.ops'].delta > 0) { 314 | data.writeLatencyMs = 315 | deltas['opLatencies.writes.latency'].delta / 316 | deltas['opLatencies.writes.ops'].delta / 317 | 1000; 318 | } else data.writeLatency = 0; 319 | 320 | if (deltas['opLatencies.commands.ops'].delta > 0) { 321 | data.cmdLatencyMs = 322 | deltas['opLatencies.commands.latency'].delta / 323 | deltas['opLatencies.commands.ops'].delta / 324 | 1000; 325 | } else data.cmdLatency = 0; 326 | 327 | data.connections = deltas['connections.current'].lastValue; 328 | data.availableConnections = deltas['connections.available'].firstValue; 329 | data.assertsPS = 330 | deltas['asserts.regular'].rate + 331 | deltas['asserts.warning'].rate + 332 | deltas['asserts.msg'].rate + 333 | deltas['asserts.user'].rate + 334 | deltas['asserts.rollovers'].rate; 335 | 336 | data.activeReaders = finals['globalLock.activeClients.readers']; 337 | data.activeWriters = finals['globalLock.activeClients.writers']; 338 | data.queuedReaders = finals['globalLock.currentQueue.readers']; 339 | data.queuedWriters = finals['globalLock.currentQueue.writers']; 340 | data.globalLockQueue = {readActive: data.activeReaders,readQueued:data.queuedReaders, 341 | writeActive: data.activeWriters, writeQueued:data.queuedWriters }; 342 | 343 | // ********************************************************* 344 | // Memory counters 345 | // ********************************************************* 346 | 347 | data.cacheReadQAvailable = 348 | deltas['wiredTiger.concurrentTransactions.read.available'].lastValue; 349 | data.cacheReadQUsed = 350 | deltas['wiredTiger.concurrentTransactions.read.out'].lastValue; 351 | 352 | data.cacheWriteQAvailable = 353 | deltas['wiredTiger.concurrentTransactions.write.available'].lastValue; 354 | data.cacheWriteQUsed = 355 | deltas['wiredTiger.concurrentTransactions.write.out'].lastValue; 356 | 357 | data.cacheGetsPS = 358 | deltas['wiredTiger.cache.pages requested from the cache'].rate; 359 | 360 | data.cacheReadInsPS = deltas['wiredTiger.cache.pages read into cache'].rate; 361 | 362 | descriptions.cacheHitRate = 'Hit Rate in the wiredTigerCache '; 363 | if (data.cacheGetsPS > 0) { 364 | data.cacheHitRate = 365 | ((data.cacheGetsPS - data.cacheReadInsPS) * 100) / data.cacheGetsPS; 366 | } else { 367 | data.cacheHitRate = 0; 368 | } 369 | 370 | data.evictionsPs = 371 | deltas['wiredTiger.cache.internal pages evicted'].rate; 372 | data.evictionBlockedPs = 373 | deltas['wiredTiger.thread-yield.page acquire eviction blocked'].rate; 374 | if (data.evictionsPs > 0) { 375 | data.evictionBlockRate = (data.evictionBlockedPs * 100) / data.evictionsPs; 376 | } else data.evictionBlockRate = 0; 377 | 378 | if (data.cacheReadInsPS > 0) { 379 | data.evictionRate = (data.evictionsPs * 100) / data.cacheReadInsPS; 380 | } else data.evictionRate = 0; 381 | 382 | data.cacheHighWaterMB = 383 | deltas['wiredTiger.cache.maximum bytes configured'].lastValue / 1048576; 384 | 385 | data.cacheSizeMB = 386 | deltas['wiredTiger.cache.bytes currently in the cache'].lastValue / 1048576; 387 | 388 | data.diskBlockReadsPS = deltas['wiredTiger.block-manager.blocks read'].rate; 389 | data.diskBlockWritesPS = 390 | deltas['wiredTiger.block-manager.blocks written'].rate; 391 | 392 | data.logKBRatePS = deltas['wiredTiger.log.log bytes written'].rate / 1024; 393 | 394 | data.logSyncTimeRateMsPS = 395 | deltas['wiredTiger.log.log sync time duration (usecs)'].rate / 1000; 396 | 397 | data.logSyncOpsPS = 398 | deltas['wiredTiger.log.log sync operations'].rate; 399 | 400 | if (data.logSyncOpsPS>0) { 401 | data.logAvgSyncTime=data.logSyncTimeRateMsPS/data.logSyncOpsPS; 402 | } 403 | else data.logAvgSyncTime=0; 404 | 405 | // ********************************************************* 406 | // Disk IO 407 | // ********************************************************* 408 | 409 | 410 | 411 | Object.keys(data).forEach((key) => { 412 | if (data[key] % 1 > 0.01) { 413 | data[key] = data[key].toFixed(4); 414 | } 415 | }); 416 | return data; 417 | }; 418 | 419 | mongoTuning.memoryReport = () => { 420 | const serverStats = db.serverStatus(); 421 | print('Mongod virtual memory ', serverStats.mem.virtual); 422 | print('Mongod resident memory', serverStats.mem.resident); 423 | print( 424 | 'Wired Tiger cache size', 425 | Math.round( 426 | serverStats.wiredTiger.cache['bytes currently in the cache'] / 1048576 427 | ) 428 | ); 429 | }; 430 | -------------------------------------------------------------------------------- /scripts/Views.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Utility script used for comparing view versus aggregation performance. 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:46:25+10:00 8 | * 9 | */ 10 | let viewTests = {}; 11 | viewTests.runAggregates = function (runs) { 12 | let results = []; 13 | let runningAverage = 0; 14 | for (let i = 0; i < runs; i++) { 15 | print('Run #' + i); 16 | let ep = db.movies.explain('executionStats').aggregate([ 17 | { 18 | $project: { 19 | genres: 1, 20 | plot: 1, 21 | title: 1, 22 | countries: 1, 23 | type: 1, 24 | released: 1, 25 | }, 26 | }, 27 | { $unwind: '$genres' }, 28 | { $unwind: '$countries' }, 29 | { $match: { countries: 'USA', genres: 'Short' } }, 30 | { 31 | $lookup: { 32 | from: 'comments', 33 | localField: '_id', 34 | foreignField: 'movie_id', 35 | as: 'comments', 36 | }, 37 | }, 38 | { $match: { comments: { $size: 1 } } }, 39 | { $limit: 10 }, 40 | ]); 41 | results.push(ep.stages[0]['$cursor'].executionStats.executionTimeMillis); 42 | runningAverage += 43 | ep.stages[0]['$cursor'].executionStats.executionTimeMillis; 44 | } 45 | printjson(results); 46 | print(runningAverage / runs); 47 | }; 48 | 49 | viewTests.runViewQueries = function (runs) { 50 | let results = []; 51 | let runningAverage = 0; 52 | for (let i = 0; i < runs; i++) { 53 | print('Run #' + i); 54 | let ep = db.usa_short_films 55 | .find({ comments: { $size: 1 } }) 56 | .limit(10) 57 | .explain('executionStats'); 58 | // printjson(ep); 59 | results.push(ep.stages[0]['$cursor'].executionStats.executionTimeMillis); 60 | runningAverage += 61 | ep.stages[0]['$cursor'].executionStats.executionTimeMillis; 62 | } 63 | printjson(results); 64 | print(runningAverage / runs); 65 | }; 66 | 67 | viewTests.createMaterialisedView = function () { 68 | db.movies.aggregate([ 69 | { 70 | $project: { 71 | genres: 1, 72 | plot: 1, 73 | title: 1, 74 | countries: 1, 75 | type: 1, 76 | released: 1, 77 | }, 78 | }, 79 | { $unwind: '$genres' }, 80 | { $unwind: '$countries' }, 81 | { $match: { countries: 'USA', genres: 'Short' } }, 82 | { 83 | $lookup: { 84 | from: 'comments', 85 | localField: '_id', 86 | foreignField: 'movie_id', 87 | as: 'comments', 88 | }, 89 | }, 90 | { 91 | $merge: { 92 | into: 'usa_short_films_merged', 93 | whenMatched: 'replace', 94 | }, 95 | }, 96 | ]); 97 | }; 98 | 99 | viewTests.runMaterialisedViewQueries = function (runs) { 100 | let results = []; 101 | let runningAverage = 0; 102 | for (let i = 0; i < runs; i++) { 103 | print('Run #' + i); 104 | let ep = db.usa_short_films_merged 105 | .find({ comments: { $size: 1 } }) 106 | .limit(10) 107 | .explain('executionStats'); 108 | // printjson(ep); 109 | results.push(ep.executionStats.executionTimeMillis); 110 | runningAverage += ep.executionStats.executionTimeMillis; 111 | } 112 | printjson(results); 113 | print(runningAverage / runs); 114 | }; 115 | -------------------------------------------------------------------------------- /scripts/compact.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Compact helper functions for the Apress book "MongoDB Performance Tuning" 3 | * 4 | * @Authors: Michael Harrison (Michael.J.Harrison@outlook.com) and Guy Harrison (Guy.A.Harrison@gmail.com). 5 | * @Date: 2020-09-03T17:54:50+10:00 6 | * @Last modified by: Michael Harrison 7 | * @Last modified time: 2021-04-08T10:52:59+10:00 8 | * 9 | */ 10 | mongoTuning.reusablePct = function (collectionName) { 11 | let collstats = db.getCollection(collectionName).stats(); 12 | let reusable = 13 | collstats.wiredTiger['block-manager']['file bytes available for reuse']; 14 | let size = collstats.wiredTiger['block-manager']['file size in bytes']; 15 | let reusablePct = Math.round((reusable * 100) / size); 16 | print('Size:', size, ' Reusable: ', reusable, ' ', reusablePct, '%'); 17 | return Math.round((reusable * 100) / size); 18 | }; 19 | -------------------------------------------------------------------------------- /scripts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MongoDBPerformanceTuningScripts", 3 | "version": "1.0.0", 4 | "description": "", 5 | "dependencies": { 6 | "babel-eslint": "*", 7 | "bson": "^4.0.0", 8 | "colors": "^1.3.2", 9 | "command-line-args": "*", 10 | "dateformat": "^3.0.3", 11 | "dotenv": "^6.2.0", 12 | "esdoc": "^0.5.2", 13 | "eslint": "^3.19.0", 14 | "eslint-config-airbnb": "^13.0.0", 15 | "eslint-config-standard": "^7.1.0", 16 | "eslint-plugin-import": "*", 17 | "eslint-plugin-jsx-a11y": "^2.2.3", 18 | "eslint-plugin-promise": "^3.5.0", 19 | "eslint-plugin-react": "^7.19.0", 20 | "eslint-plugin-standard": "^2.1.1", 21 | "eslint-scope": "^5.0.0", 22 | "jest": "^23.3", 23 | "mongodb": "^3.5.5", 24 | "skip-if": "^1.1.1", 25 | "sprintf-js": "*", 26 | "util": "^0.11.0" 27 | }, 28 | "repository": { 29 | "type": "git", 30 | "url": "git+https://github.com/gharriso/MongoDBPerformanceTuningBook" 31 | }, 32 | "author": "guy.a.harrison@gmail.com", 33 | "license": "AGPL-3.0" 34 | } 35 | --------------------------------------------------------------------------------