├── README.md ├── TODO ├── mstat.py ├── run.simple.bash ├── run.simple.bash.Ln.Qn └── src └── jmongoiibench.java /README.md: -------------------------------------------------------------------------------- 1 | iibench-mongodb 2 | =============== 3 | 4 | iiBench Benchmark for MongoDB and TokuMX 5 | 6 | 7 | Requirements 8 | ===================== 9 | 10 | * Java 1.6 or 1.7 11 | * The MongoDB Java driver must exist and be in the CLASSPATH, as in "export CLASSPATH=/home/tcallaghan/java_goodies/mongo-2.11.4.jar:.". If you don't already have the MongoDB Java driver, then execute the following two commands: 12 | 13 | ```bash 14 | wget http://central.maven.org/maven2/org/mongodb/mongo-java-driver/2.11.4/mongo-java-driver-2.11.4.jar 15 | export CLASSPATH=$PWD/mongo-java-driver-2.11.4.jar:$CLASSPATH 16 | 17 | ``` 18 | 19 | * This example assumes that you already have a MongoDB or TokuMX server running on the same machine as the iiBench client application. 20 | * You can connect a different server or port by editing the run.simple.bash script. 21 | 22 | 23 | Running the benchmark 24 | ===================== 25 | 26 | In the default configuration the benchmark will run for 1 hour, or 100 million inserts, whichever comes first. 27 | 28 | ```bash 29 | git clone https://github.com/tmcallaghan/iibench-mongodb.git 30 | cd iibench-mongodb 31 | 32 | ``` 33 | 34 | *[optionally edit run.simple.bash to modify the benchmark behavior]* 35 | 36 | ```bash 37 | ./run.simple.bash 38 | 39 | ``` 40 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | - add an optional padding field to get big quickly 2 | - user defined size 3 | - user defined compressible amount 4 | - improve command line argument passing 5 | - use "--argument value" syntax 6 | - add defaults 7 | - additional query workload features 8 | - randomly select query type (based on indexes) 9 | - compare other features in Launchpad version -------------------------------------------------------------------------------- /mstat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Copyright (C) 2009 Google Inc. 4 | # Copyright (C) 2009 Facebook Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # Originally published by Google. 19 | # Additional improvements from Facebook. 20 | # 21 | 22 | """Multiple-stat reporter 23 | 24 | This gathers performance statistics from multiple data sources and reports 25 | them over the same interval. It supports iostat and vmstat on modern Linux 26 | distributions and SHOW GLOBAL STATUS output from MySQL. It is a convenient way 27 | to collect data during benchmarks and can perform some computations on the 28 | collected data including 'rate', 'avg' and 'max'. It can also aggregate 29 | data from multiple devices from iostat. 30 | 31 | This reports all data from vmstat, iostat and SHOW GLOBAL STATUS as counters 32 | and as rates. Other stats can be derived from them and defined on the command 33 | line or via file named by the --sources option. 34 | 35 | Run it like: 36 | mstat.py --loops 1000000 --interval 60 37 | """ 38 | 39 | __author__ = 'Mark Callaghan (mdcallag@gmail.com)' 40 | 41 | import itertools 42 | import optparse 43 | import subprocess 44 | import sys 45 | import time 46 | import re 47 | import pdb 48 | 49 | import MySQLdb 50 | 51 | class MyIterable: 52 | def __iter__(self): 53 | return self 54 | 55 | class Timestamper(MyIterable): 56 | def next(self): 57 | return time.strftime('%Y-%m-%d_%H:%M:%S') 58 | 59 | class Counter(MyIterable): 60 | def __init__(self, interval): 61 | self.interval = interval 62 | self.value = 0 63 | 64 | def next(self): 65 | result = str(self.value) 66 | self.value += self.interval 67 | return result 68 | 69 | class DiskSizer(MyIterable): 70 | def __init__(self, path): 71 | self.path = path 72 | 73 | def next(self): 74 | return subprocess.Popen(['du', '-s', '-b', self.path], 75 | stdout=subprocess.PIPE).communicate()[0].split()[0] 76 | 77 | class ScanMysql(MyIterable): 78 | def __init__(self, db_user, db_password, db_host, db_port, db_name, sql, 79 | retries, err_data): 80 | self.db_user = db_user 81 | self.db_password = db_password 82 | self.db_host = db_host 83 | self.db_port = db_port 84 | self.db_name = db_name 85 | self.sql = sql 86 | self.retries = retries 87 | self.err_data = err_data 88 | 89 | def next(self): 90 | r = self.retries 91 | while r >= 0: 92 | connect = None 93 | try: 94 | connect = MySQLdb.connect(host=self.db_host, port=self.db_port, 95 | user=self.db_user, passwd=self.db_password, 96 | db = self.db_name) 97 | cursor = connect.cursor() 98 | cursor.execute(self.sql) 99 | result = [] 100 | for row in cursor.fetchall(): 101 | result.append(' '.join(row)) 102 | connect.close() 103 | # print 'Connectdb' 104 | if result: 105 | return '\n'.join(result) 106 | else: 107 | return self.err_data 108 | except MySQLdb.Error, e: 109 | print 'sql (%s) fails (%s)' % (self.sql, e) 110 | if connect is not None: 111 | try: 112 | connect.close() 113 | except MySQLdb.Error, e: 114 | pass 115 | time.sleep(0.1) 116 | r -= 1 117 | return self.err_data 118 | 119 | class ScanFork(MyIterable): 120 | def __init__(self, cmdline, skiplines): 121 | self.proc = subprocess.Popen(cmdline, shell=True, bufsize=1, 122 | stdout=subprocess.PIPE, 123 | stderr=subprocess.PIPE) 124 | self.cmdline = cmdline 125 | self.lines_to_skip = skiplines 126 | print >>sys.stderr, "forked pid %d for (%s)" % ( 127 | self.proc.pid, cmdline) 128 | 129 | def next(self): 130 | while True: 131 | line = self.proc.stdout.readline() 132 | if not line: 133 | raise StopIteration 134 | elif self.lines_to_skip > 0: 135 | self.lines_to_skip -= 1 136 | continue 137 | else: 138 | return line 139 | 140 | class ScanDiskstats(MyIterable): 141 | def next(self): 142 | proc = subprocess.Popen(['cat', '/proc/diskstats'], 143 | stdout=subprocess.PIPE, 144 | stderr=subprocess.PIPE) 145 | output, err = proc.communicate() 146 | return output 147 | 148 | class ScanFio(MyIterable): 149 | def __init__(self, devices): 150 | self.devices = devices 151 | 152 | def next(self): 153 | result = [] 154 | non_decimal = re.compile(r'[^\d.]+') 155 | 156 | for dev in self.devices: 157 | result.append('%s' % dev) 158 | proc = subprocess.Popen(['fio-status', '-a', '/dev/%s' % dev], 159 | stdout=subprocess.PIPE, 160 | stderr=subprocess.PIPE) 161 | output, err = proc.communicate() 162 | 163 | res_len = len(result) 164 | for line in output.split('\n'): 165 | line = line.strip() 166 | 167 | if line.startswith('Physical bytes written'): 168 | result.append(non_decimal.sub('', line.split(':')[1])) 169 | elif line.startswith('Physical bytes read'): 170 | result.append(non_decimal.sub('', line.split(':')[1])) 171 | break 172 | 173 | if len(result) == res_len: 174 | print >>sys.stderr, "Unable to get fio-status" 175 | return "fio-status error" 176 | 177 | proc = subprocess.Popen(['cat', '/proc/fusion/fio/%s/data/groomer/stats' % dev], 178 | stdout=subprocess.PIPE, 179 | stderr=subprocess.PIPE) 180 | output, err = proc.communicate() 181 | res_len = len(result) 182 | for line in output.split('\n'): 183 | if line.startswith('Blocks Erased:'): 184 | ar = line.split() 185 | result.append(ar[2]) 186 | result.append(ar[4]) 187 | elif line.startswith('Data copied:'): 188 | result.append(line.split()[2]) 189 | break 190 | 191 | if len(result) == res_len: 192 | print >>sys.stderr, "Unable to get groomer stats" 193 | return "groomer error" 194 | 195 | result.append('\n') 196 | 197 | return ' '.join(result) 198 | 199 | class ScanMicron(MyIterable): 200 | def __init__(self, devices): 201 | self.devices = devices 202 | 203 | def next(self): 204 | result = [] 205 | 206 | for dev in self.devices: 207 | result.append('%s' % dev) 208 | proc = subprocess.Popen(['smartctl', '-A', '-l', 'devstat,1', '/dev/%s' % dev], 209 | stdout=subprocess.PIPE, 210 | stderr=subprocess.PIPE) 211 | output, err = proc.communicate() 212 | 213 | res_len = len(result) 214 | for line in output.split('\n'): 215 | line = line.strip() 216 | 217 | if line.startswith('247 Unknown_Attribute') or line.startswith('248 Unknown_Attribute'): 218 | field = 9 219 | factor = 1 220 | elif line.endswith('Sectors Written') or line.endswith('Sectors Read'): 221 | field = 4 222 | factor = 512 223 | else: 224 | continue 225 | 226 | ar = line.split(" ") 227 | found = None 228 | for col in ar: 229 | if len(col) > 0: 230 | field = field - 1 231 | if field == 0: 232 | found = col.strip() 233 | break 234 | if found is None: 235 | print >>sys.stderr, "Unable to get micron stats for %s" % dev 236 | return "micron stat parsing error" 237 | result.append(str(int(found) * factor)) 238 | 239 | if len(result) == res_len: 240 | print >>sys.stderr, "Unable to get smartctl data" 241 | return "smartctl error" 242 | 243 | # bytes copied is not exported yet 244 | result.append('0') 245 | result.append('\n') 246 | 247 | return ' '.join(result) 248 | 249 | class FilterEquals(MyIterable): 250 | def __init__(self, pos, value, iterable, iostat_hack=False): 251 | self.pos = pos 252 | self.value = value 253 | self.iter = iterable 254 | self.iostat_hack = iostat_hack 255 | 256 | def next(self): 257 | while True: 258 | lines = self.iter.next() 259 | for line in lines.split('\n'): 260 | cols = line.split() 261 | if len(cols) >= (self.pos + 1) and cols[self.pos] == self.value: 262 | return line 263 | # Ugly hack for long device name split over 2 lines 264 | # elif self.iostat_hack and len(cols) == 1 and cols[self.pos] == self.value: 265 | # return '%s %s' % (self.value, self.iter.next()) 266 | 267 | class Project(MyIterable): 268 | def __init__(self, pos, iterable): 269 | self.pos = pos 270 | self.iter = iter(iterable) 271 | 272 | def next(self): 273 | line = self.iter.next() 274 | cols = line.split() 275 | try: 276 | v = float(cols[self.pos]) 277 | return cols[self.pos] 278 | except ValueError, e: 279 | return 0.0 280 | 281 | class ExprAbsToRel(MyIterable): 282 | def __init__(self, interval, iterable): 283 | self.interval = interval 284 | self.iter = iter(iterable) 285 | self.prev = None 286 | 287 | def next(self): 288 | current = float(self.iter.next()) 289 | if self.prev is None: 290 | self.prev = current 291 | return '0' 292 | else: 293 | diff = current - self.prev 294 | rate = diff / self.interval 295 | self.prev = current 296 | return str(rate) 297 | 298 | class ExprFunc(MyIterable): 299 | def __init__(self, func, iterables): 300 | self.func = func 301 | self.iters = [iter(i) for i in iterables] 302 | 303 | def next(self): 304 | return str(self.func([float(i.next()) for i in self.iters])) 305 | 306 | class ExprAvg(MyIterable): 307 | def __init__(self, iterables): 308 | self.iters = [iter(i) for i in iterables] 309 | 310 | def next(self): 311 | return str(sum([float(i.next()) for i in self.iters]) / len(self.iters)) 312 | 313 | vmstat_cols = { 'swpd':2, 'free':3, 'buff':4, 'cache':5, 'si':6, 'so':7, 314 | 'bi':8, 'bo':9, 'in':10, 'cs':11, 'us':12, 'sy':13, 315 | 'id':14, 'wa':15 } 316 | 317 | iostat_cols = { 'rrqm/s':1, 'wrqm/s':2, 'r/s':3, 'w/s':4, 'rkB/s':5, 318 | 'wkB/s':6, 'avgrq-sz':7, 'avgqu-sz':8, 'await':9, 319 | 'svctm':10, '%util':11 } 320 | 321 | fio_cols = { 'bytes_written':1, 'bytes_read':2, 'blocks_erased':3, 322 | 'bytes_erased':4, 'bytes_copied':5 } 323 | 324 | micron_cols = { 'host_pages':1, 'copy_pages':2, 'bytes_written':3, 325 | 'bytes_read':4 } 326 | 327 | # du_cols = [ '/data/mysql/rmy/data/rocksdb' ] 328 | du_cols = [ '/data/mysql/toku5539/data' ] 329 | 330 | agg_funcs = [ 'sum', 'rate', 'ratesum', 'max', 'avg' ] 331 | 332 | diskstats_cols = { 'reads completed successfully':3, 333 | 'reads merged':4, 334 | 'sectors read':5, 335 | 'time spent reaing (ms)':6, 336 | 'writes completed':7, 337 | 'writes merged':8, 338 | 'sectors written':9, 339 | 'time spent writing (ms)':10, 340 | 'I/Os currently in progress':11, 341 | 'time spent doing I/Os (ms)':12, 342 | 'weighted time spent doing I/Os (ms)':13 343 | } 344 | 345 | def iostat_get_devices(): 346 | scan_iostat = ScanFork('iostat -x 1 1', 0) 347 | saw_device = False 348 | devices = [] 349 | for line in scan_iostat: 350 | if line.startswith('Device:'): 351 | saw_device = True 352 | elif saw_device: 353 | cols = line.split() 354 | if cols: 355 | devices.append(cols[0]) 356 | return devices 357 | 358 | def fio_get_devices(): 359 | scan_fio = ScanFork('ls -d /dev/fio?', 0) 360 | saw_device = False 361 | devices = [] 362 | for line in scan_fio: 363 | devices.append(line.split('/')[2].strip()) 364 | return devices 365 | 366 | def micron_get_devices(): 367 | scan_micron = ScanFork('ls -d /sys/block/sd*', 0) 368 | devices = [] 369 | for line in scan_micron: 370 | model = ScanFork('cat %s/device/model' % line.strip(), 0) 371 | for name in model: 372 | if name.strip() == 'Micron_M500_MTFD': 373 | devices.append(line.split('/')[3].strip()) 374 | return devices 375 | 376 | def get_matched_devices(prefix, devices): 377 | assert prefix[-1] == '*' 378 | matched = [] 379 | for d in devices: 380 | if d.startswith(prefix[:-1]) and len(d) > len(prefix[:-1]): 381 | matched.append(d) 382 | return matched 383 | 384 | def get_my_cols(db_user, db_password, db_host, db_port, db_name): 385 | names = [] 386 | try: 387 | connect = MySQLdb.connect(host=db_host, port=db_port, user=db_user, 388 | passwd=db_password, db=db_name) 389 | cursor = connect.cursor() 390 | cursor.execute('SHOW GLOBAL STATUS') 391 | for row in cursor.fetchall(): 392 | if len(row) == 2: 393 | try: 394 | v = float(row[1]) 395 | names.append(row[0]) 396 | except ValueError, e: 397 | pass 398 | connect.close() 399 | return names 400 | except MySQLdb.Error, e: 401 | print 'SHOW GLOBAL STATUS fails (%s)' % e 402 | return [] 403 | 404 | def parse_args(arg, counters, expanded_args, devices, fio_devices, micron_devices): 405 | # print 'parse_args(%s)' % arg 406 | parts = arg.split('.') 407 | pix = 0 408 | pend = len(parts) 409 | use_agg = False 410 | expand = False 411 | ignore = False 412 | 413 | if parts[pix] in agg_funcs: 414 | pix += 1 415 | use_agg = True 416 | 417 | while pix != pend: 418 | if parts[pix] == 'timer': 419 | pix += 1 420 | assert pix == pend 421 | elif parts[pix] == 'timestamp': 422 | pix += 1 423 | assert pix == pend 424 | elif parts[pix] == 'counter': 425 | pix += 1 426 | assert pix == pend 427 | elif parts[pix] == 'vmstat': 428 | assert pend - pix >= 2 429 | assert parts[pix+1] in vmstat_cols 430 | counters['vmstat'] += 1 431 | pix += 2 432 | elif parts[pix] == 'iostat': 433 | assert pend - pix >= 3 434 | assert parts[pix+2] in iostat_cols 435 | 436 | if parts[pix+1][-1] == '*': 437 | assert pix + 3 == pend 438 | if use_agg: 439 | assert pix == 1 440 | expand = True 441 | else: 442 | assert pix == 0 443 | expand = True 444 | else: 445 | if parts[pix+1] in devices: 446 | counters['iostat'] += 1 447 | else: 448 | ignore = True 449 | pix += 3 450 | elif parts[pix] == 'fio': 451 | assert pend - pix >= 3 452 | assert parts[pix+2] in fio_cols 453 | counters['fio'] += 1 454 | pix += 3 455 | elif parts[pix] == 'micron': 456 | assert pend - pix >= 3 457 | assert parts[pix+2] in micron_cols 458 | counters['micron'] += 1 459 | pix += 3 460 | elif parts[pix] == 'my': 461 | assert parts[pix+1] == 'status' 462 | assert pend - pix >= 3 463 | counters['my.status'] += 1 464 | pix += 3 465 | elif parts[pix] == 'du': 466 | assert pend - pix == 2 467 | assert parts[pix+1] in du_cols 468 | pix += 2 469 | elif parts[pix] == 'diskstats': 470 | assert pend - pix == 3 471 | assert parts[pix+2] in diskstats_cols 472 | counters['diskstats'] += 1 473 | pix += 3 474 | else: 475 | # print 'pix %d, pend %d, parts :: %s' % (pix, pend, parts) 476 | assert False 477 | 478 | if expand: 479 | if use_agg: 480 | new_parts = [parts[0]] 481 | matched = get_matched_devices(parts[2], devices) 482 | if matched: 483 | counters['iostat'] += len(matched) 484 | for m in matched: 485 | new_parts.extend([parts[1], m, parts[3]]) 486 | expanded_args.append('.'.join(new_parts)) 487 | else: 488 | matched = get_matched_devices(parts[1], devices) 489 | if matched: 490 | counters['iostat'] += len(matched) 491 | for m in matched: 492 | expanded_args.append('.'.join([parts[0], m, parts[2]])) 493 | elif not ignore: 494 | expanded_args.append(arg) 495 | else: 496 | print 'Ignoring %s' % arg 497 | 498 | def make_data_inputs(arg, inputs, counters, interval, db_user, 499 | db_password, db_host, db_port, db_name, db_retries, 500 | tee_vmstat, tee_iostat, tee_fio, tee_micron, tee_mystat, tee_diskstats): 501 | parts = arg.split('.') 502 | pix = 0 503 | pend = len(parts) 504 | use_agg = None 505 | 506 | if parts[pix] in agg_funcs: 507 | use_agg = parts[pix] 508 | pix += 1 509 | 510 | sources = [] 511 | while pix != pend: 512 | if parts[pix] == 'timer': 513 | sources.append((arg, Counter(interval))) 514 | pix += 1 515 | elif parts[pix] == 'timestamp': 516 | sources.append((arg, Timestamper())) 517 | pix += 1 518 | elif parts[pix] == 'counter': 519 | sources.append((arg, Counter(1))) 520 | pix += 1 521 | elif parts[pix] == 'vmstat': 522 | sources.append((arg, Project(vmstat_cols[parts[pix+1]], 523 | tee_vmstat[counters['vmstat']]))) 524 | counters['vmstat'] += 1 525 | pix += 2 526 | elif parts[pix] == 'iostat': 527 | f = FilterEquals(0, parts[pix+1], tee_iostat[counters['iostat']], True) 528 | sources.append((arg, Project(iostat_cols[parts[pix+2]], f))) 529 | counters['iostat'] += 1 530 | pix += 3 531 | elif parts[pix] == 'fio': 532 | f = FilterEquals(0, parts[pix+1], tee_fio[counters['fio']], True) 533 | sources.append((arg, Project(fio_cols[parts[pix+2]], f))) 534 | counters['fio'] += 1 535 | pix += 3 536 | elif parts[pix] == 'micron': 537 | f = FilterEquals(0, parts[pix+1], tee_micron[counters['micron']], True) 538 | sources.append((arg, Project(micron_cols[parts[pix+2]], f))) 539 | counters['micron'] += 1 540 | pix += 3 541 | elif parts[pix] == 'my': 542 | assert parts[pix+1] == 'status' 543 | # print 'use my.status tee %d for %s' % (counters['my.status'], arg) 544 | f = FilterEquals(0, parts[pix+2], tee_mystat[counters['my.status']], True) 545 | sources.append((arg, Project(1, f))) 546 | counters['my.status'] += 1 547 | pix += 3 548 | elif parts[pix] == 'du': 549 | sources.append((arg, DiskSizer(parts[pix+1]))) 550 | pix += 2 551 | elif parts[pix] == 'diskstats': 552 | f = FilterEquals(2, parts[pix+1], tee_diskstats[counters['diskstats']], True) 553 | sources.append((arg, Project(diskstats_cols[parts[pix+2]], f))) 554 | counters['diskstats'] += 1 555 | pix += 3 556 | else: 557 | assert False 558 | 559 | if use_agg is None: 560 | assert len(sources) == 1 561 | inputs.append(sources[0]) 562 | elif use_agg == 'rate': 563 | assert len(sources) == 1 564 | inputs.append((arg, ExprAbsToRel(interval, sources[0][1]))) 565 | elif use_agg == 'sum': 566 | assert len(sources) >= 1 567 | inputs.append((arg, ExprFunc(sum, [s[1] for s in sources]))) 568 | elif use_agg == 'ratesum': 569 | assert len(sources) >= 1 570 | sum_iter = ExprFunc(sum, [s[1] for s in sources]) 571 | inputs.append((arg, ExprAbsToRel(interval, sum_iter))) 572 | elif use_agg == 'avg': 573 | assert len(sources) >= 1 574 | inputs.append((arg, ExprAvg([s[1] for s in sources]))) 575 | elif use_agg == 'max': 576 | assert len(sources) >= 1 577 | inputs.append((arg, ExprFunc(max, [s[1] for s in sources]))) 578 | else: 579 | assert False 580 | 581 | def build_inputs(args, interval, loops, db_user, db_password, db_host, db_port, 582 | db_name, db_retries, data_sources): 583 | scan_vmstat = None 584 | scan_iostat = None 585 | scan_fio = None 586 | scan_micron = None 587 | scan_diskstats = None 588 | inputs = [] 589 | devices = iostat_get_devices() 590 | fio_devices = fio_get_devices() 591 | micron_devices = micron_get_devices() 592 | parse_counters = { 'iostat' : 0, 'vmstat' : 0, 'fio' : 0, 'micron': 0, 'my.status' : 0, 'du' : 0, 'diskstats' : 0 } 593 | 594 | if data_sources: 595 | f = open(data_sources) 596 | args.extend([l[:-1] for l in f.xreadlines()]) 597 | 598 | expanded_args = [] 599 | 600 | for arg in ['timestamp', 'timer', 'counter']: 601 | parse_args(arg, parse_counters, expanded_args, devices, fio_devices, micron_devices) 602 | 603 | for dev in devices: 604 | for col in iostat_cols: 605 | parse_args('iostat.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 606 | parse_args('rate.iostat.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 607 | 608 | for col in vmstat_cols: 609 | parse_args('vmstat.%s' % col, parse_counters, expanded_args, devices, fio_devices, micron_devices) 610 | parse_args('rate.vmstat.%s' % col, parse_counters, expanded_args, devices, fio_devices, micron_devices) 611 | 612 | for dev in fio_devices: 613 | for col in fio_cols: 614 | parse_args('fio.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 615 | parse_args('rate.fio.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 616 | 617 | for dev in micron_devices: 618 | for col in micron_cols: 619 | parse_args('micron.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 620 | #parse_args('rate.micron.%s.%s' % (dev, col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 621 | 622 | for col in get_my_cols(db_user, db_password, db_host, db_port, db_name): 623 | parse_args('my.status.%s' % col, parse_counters, expanded_args, devices, fio_devices, micron_devices) 624 | parse_args('rate.my.status.%s' % col, parse_counters, expanded_args, devices, fio_devices, micron_devices) 625 | 626 | for col in du_cols: 627 | parse_args('du.%s' % col, parse_counters, expanded_args, devices, fio_devices, micron_devices) 628 | 629 | for col in diskstats_cols: 630 | parse_args('diskstats.%s.%s' % (dev,col), parse_counters, expanded_args, devices, fio_devices, micron_devices) 631 | 632 | for arg in args: 633 | parse_args(arg, parse_counters, expanded_args, devices, fio_devices, micron_devices) 634 | 635 | tee_vmstat, tee_iostat, tee_fio, tee_micron, tee_mystat, tee_diskstats = None, None, None, None, None, None 636 | 637 | if parse_counters['vmstat']: 638 | scan_vmstat = ScanFork('vmstat -n %d %d' % (interval, loops+1), 2) 639 | tee_vmstat = itertools.tee(scan_vmstat, parse_counters['vmstat']) 640 | 641 | if parse_counters['iostat']: 642 | scan_iostat = ScanFork('iostat -kx %d %d' % (interval, loops+1), 0) 643 | tee_iostat = itertools.tee(scan_iostat, parse_counters['iostat']) 644 | 645 | if parse_counters['fio']: 646 | scan_fio = ScanFio(fio_devices) 647 | tee_fio = itertools.tee(scan_fio, parse_counters['fio']) 648 | 649 | if parse_counters['micron']: 650 | scan_micron = ScanMicron(micron_devices) 651 | tee_micron = itertools.tee(scan_micron, parse_counters['micron']) 652 | 653 | if parse_counters['my.status']: 654 | scan_mystat = ScanMysql(db_user, db_password, db_host, db_port, db_name, 655 | 'SHOW GLOBAL STATUS', db_retries, 'Foo 0') 656 | tee_mystat = itertools.tee(scan_mystat, parse_counters['my.status']) 657 | 658 | if parse_counters['diskstats']: 659 | scan_diskstats = ScanDiskstats() 660 | tee_diskstats = itertools.tee(scan_diskstats, parse_counters['diskstats']) 661 | 662 | # print expanded_args 663 | 664 | source_counters = { 'iostat' : 0, 'vmstat' : 0, 'fio' : 0, 'micron' : 0, 665 | 'my.status' : 0, 'diskstats' : 0 } 666 | for arg in expanded_args: 667 | make_data_inputs(arg, inputs, source_counters, interval, db_user, 668 | db_password, db_host, db_port, db_name, db_retries, 669 | tee_vmstat, tee_iostat, tee_fio, tee_micron, tee_mystat, tee_diskstats) 670 | 671 | return inputs 672 | 673 | def parse_opts(args): 674 | parser = optparse.OptionParser() 675 | parser.add_option("--db_user", action="store", 676 | type="string",dest="db_user", 677 | default="root", 678 | help="Username for database") 679 | parser.add_option("--db_password", action="store", 680 | type="string",dest="db_password", 681 | default="", 682 | help="Password for database") 683 | parser.add_option("--db_host", action="store", 684 | type="string",dest="db_host", 685 | default="localhost", 686 | help="Hostname for database") 687 | parser.add_option("--db_port", action="store", 688 | type="int",dest="db_port", 689 | default="0", 690 | help="Port for database") 691 | parser.add_option("--db_name", action="store", 692 | type="string",dest="db_name", 693 | default="test", 694 | help="Database name") 695 | parser.add_option("--db_retries", action="store", 696 | type="int",dest="db_retries", 697 | default="3", 698 | help="Number of times to retry failed queries") 699 | parser.add_option("--sources", action="store", 700 | type="string",dest="data_sources", 701 | default="", 702 | help="File that lists data sources to plot in addition" 703 | "to those listed on the command line") 704 | parser.add_option("--interval", action="store", 705 | type="int", dest="interval", 706 | default="60", 707 | help="Report every interval seconds") 708 | parser.add_option("--loops", action="store", 709 | type="int", dest="loops", 710 | default="2880000", 711 | help="Stop after this number of intervals") 712 | return parser.parse_args(args) 713 | 714 | def main(argv=None): 715 | if argv is None: 716 | argv = sys.argv 717 | options, args = parse_opts(argv[1:]) 718 | 719 | inputs = build_inputs(args, options.interval, options.loops, 720 | options.db_user, options.db_password, 721 | options.db_host, options.db_port, 722 | options.db_name, options.db_retries, 723 | options.data_sources) 724 | for i,v in enumerate(inputs): 725 | print i+1, v[0] 726 | 727 | print 'START' 728 | 729 | iters = [iter(i[1]) for i in inputs] 730 | try: 731 | for x in xrange(options.loops): 732 | print ' '.join([i.next() for i in iters]) 733 | sys.stdout.flush() 734 | time.sleep(options.interval) 735 | except StopIteration, e: 736 | pass 737 | 738 | if __name__ == "__main__": 739 | sys.exit(main()) 740 | -------------------------------------------------------------------------------- /run.simple.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # simple script to run against running MongoDB/TokuMX server localhost:(default port) 4 | 5 | # if running TokuMX, need to select compression for collection and secondary indexes (zlib is default) 6 | # valid values : lzma, quicklz, zlib, none 7 | export MONGO_COMPRESSION=zlib 8 | 9 | # if running TokuMX, need to select basement node size (65536 is default) 10 | # valid values : integer > 0 : 65536 for 64K 11 | export MONGO_BASEMENT=65536 12 | 13 | # run the benchmark for this many inserts (or the number of minutes defined by RUN_MINUTES) 14 | # valid values : integer > 0 15 | export MAX_ROWS=100000000 16 | 17 | # run the benchmark for this many minutes (or the number of inserts defined by MAX_ROWS) 18 | # valid values : intever > 0 19 | export RUN_MINUTES=60 20 | export RUN_SECONDS=$[RUN_MINUTES*60] 21 | 22 | # total number of documents to insert per "batch" 23 | # valid values : integer > 0 24 | export NUM_DOCUMENTS_PER_INSERT=1000 25 | 26 | # total number of documents to insert per second, allows for the benchmark to be rate limited 27 | # valid values : integer > 0 28 | export MAX_INSERTS_PER_SECOND=999999 29 | 30 | # total number of simultaneous insertion threads 31 | # valid values : integer > 0 32 | export NUM_LOADER_THREADS=1 33 | 34 | # database in which to run the benchmark 35 | # valid values : character 36 | export DB_NAME=iibench 37 | 38 | # write concern for the benchmark client 39 | # valid values : FSYNC_SAFE, NONE, NORMAL, REPLICAS_SAFE, SAFE 40 | export WRITE_CONCERN=SAFE 41 | 42 | # name of the server to connect to 43 | export MONGO_SERVER=localhost 44 | 45 | # port of the server to connect to 46 | export MONGO_PORT=27017 47 | 48 | # display performance information every time the client application inserts this many documents 49 | # valid values : integer > 0, set to -1 if using NUM_SECONDS_PER_FEEDBACK 50 | export NUM_INSERTS_PER_FEEDBACK=100000 51 | 52 | # display performance information every time the client application has run for this many seconds 53 | # valid values : integer > 0, set to -1 if using NUM_INSERTS_PER_FEEDBACK 54 | export NUM_SECONDS_PER_FEEDBACK=-1 55 | 56 | # number of additional character fields (semi-compressible) to add to each inserted document 57 | # valid values : integer >= 0 58 | export NUM_CHAR_FIELDS=1 59 | 60 | # size (in bytes) of each additional semi-compressible character field 61 | # valid values : integer >= 0 62 | export LENGTH_CHAR_FIELDS=1000 63 | 64 | # percentage of highly compressible data (repeated character "a") in character field 65 | # valid values : integer >= 0 and <= 100 66 | export PERCENT_COMPRESSIBLE=90 67 | 68 | # number of secondary indexes to maintain 69 | # valid values : integer >= 0 and <= 3 70 | export NUM_SECONDARY_INDEXES=3 71 | 72 | # the following 4 parameters allow an insert plus query workload benchmark 73 | 74 | # number of queries to perform per QUERY_INTERVAL_SECONDS seconds 75 | # valid values : integer > 0, set to zero for insert only workload 76 | export QUERIES_PER_INTERVAL=0 77 | 78 | # number of seconds during which to perform QUERIES_PER_INTERVAL queries 79 | # valid values : integer > 0 80 | export QUERY_INTERVAL_SECONDS=15 81 | 82 | # number of documents to return per query 83 | # valid values : integer > 0 84 | export QUERY_LIMIT=10 85 | 86 | # wait this many inserts to begin the query workload 87 | # valid values : integer > 0 88 | export QUERY_NUM_DOCS_BEGIN=1000000 89 | 90 | # create the collection 91 | # valid values : Y/N 92 | export CREATE_COLLECTION=Y 93 | 94 | 95 | javac -cp $CLASSPATH:$PWD/src src/jmongoiibench.java 96 | 97 | 98 | export LOG_NAME=mongoiibench-${MAX_ROWS}-${NUM_DOCUMENTS_PER_INSERT}-${MAX_INSERTS_PER_SECOND}-${NUM_LOADER_THREADS}-${QUERIES_PER_INTERVAL}-${QUERY_INTERVAL_SECONDS}.txt 99 | export BENCHMARK_TSV=${LOG_NAME}.tsv 100 | 101 | rm -f $LOG_NAME 102 | rm -f $BENCHMARK_TSV 103 | 104 | T="$(date +%s)" 105 | java -cp $CLASSPATH:$PWD/src jmongoiibench $DB_NAME $NUM_LOADER_THREADS $MAX_ROWS $NUM_DOCUMENTS_PER_INSERT $NUM_INSERTS_PER_FEEDBACK $NUM_SECONDS_PER_FEEDBACK $BENCHMARK_TSV $MONGO_COMPRESSION $MONGO_BASEMENT $RUN_SECONDS $QUERIES_PER_INTERVAL $QUERY_INTERVAL_SECONDS $QUERY_LIMIT $QUERY_NUM_DOCS_BEGIN $MAX_INSERTS_PER_SECOND $WRITE_CONCERN $MONGO_SERVER $MONGO_PORT $NUM_CHAR_FIELDS $LENGTH_CHAR_FIELDS $NUM_SECONDARY_INDEXES $PERCENT_COMPRESSIBLE $CREATE_COLLECTION | tee -a $LOG_NAME 106 | echo "" | tee -a $LOG_NAME 107 | T="$(($(date +%s)-T))" 108 | printf "`date` | iibench duration = %02d:%02d:%02d:%02d\n" "$((T/86400))" "$((T/3600%24))" "$((T/60%60))" "$((T%60))" | tee -a $LOG_NAME 109 | -------------------------------------------------------------------------------- /run.simple.bash.Ln.Qn: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # simple script to run against running MongoDB/TokuMX server localhost:(default port) 3 | 4 | # if running TokuMX, need to select compression for collection and secondary indexes (zlib is default) 5 | # valid values : lzma, quicklz, zlib, none 6 | export MONGO_COMPRESSION=quicklz 7 | 8 | # if running TokuMX, need to select basement node size (65536 is default) 9 | # valid values : integer > 0 : 65536 for 64K 10 | # export MONGO_BASEMENT=65536 11 | export MONGO_BASEMENT=16384 12 | 13 | # run the benchmark for this many inserts (or the number of minutes defined by RUN_MINUTES) 14 | # valid values : integer > 0 15 | # export MAX_ROWS=200000000 16 | export MAX_ROWS=$1 17 | 18 | # run the benchmark for this many minutes (or the number of inserts defined by MAX_ROWS) 19 | # valid values : intever > 0 20 | export RUN_MINUTES=$2 21 | export RUN_SECONDS=$[RUN_MINUTES*60] 22 | 23 | # total number of documents to insert per "batch" 24 | # valid values : integer > 0 25 | export NUM_DOCUMENTS_PER_INSERT=$3 26 | 27 | # total number of documents to insert per second, allows for the benchmark to be rate limited 28 | # valid values : integer > 0 29 | export MAX_INSERTS_PER_SECOND=$4 30 | 31 | # total number of simultaneous insertion threads 32 | # valid values : integer > 0 33 | export NUM_LOADER_THREADS=$5 34 | 35 | # database in which to run the benchmark 36 | # valid values : character 37 | export DB_NAME=iibench 38 | 39 | # write concern for the benchmark client 40 | # valid values : FSYNC_SAFE, NONE, NORMAL, REPLICAS_SAFE, SAFE 41 | export WRITE_CONCERN=SAFE 42 | 43 | # name of the server to connect to 44 | export MONGO_SERVER=localhost 45 | 46 | # port of the server to connect to 47 | export MONGO_PORT=27017 48 | 49 | # display performance information every time the client application inserts this many documents 50 | # valid values : integer > 0, set to -1 if using NUM_SECONDS_PER_FEEDBACK 51 | export NUM_INSERTS_PER_FEEDBACK=-1 52 | 53 | # display performance information every time the client application has run for this many seconds 54 | # valid values : integer > 0, set to -1 if using NUM_INSERTS_PER_FEEDBACK 55 | export NUM_SECONDS_PER_FEEDBACK=10 56 | 57 | # number of additional character fields (semi-compressible) to add to each inserted document 58 | # valid values : integer >= 0 59 | export NUM_CHAR_FIELDS=1 60 | 61 | # size (in bytes) of each additional semi-compressible character field 62 | # valid values : integer >= 0 63 | export LENGTH_CHAR_FIELDS=$6 64 | 65 | # percentage of highly compressible data (repeated character "a") in character field 66 | # valid values : integer >= 0 and <= 100 67 | export PERCENT_COMPRESSIBLE=50 68 | 69 | # number of secondary indexes to maintain 70 | # valid values : integer >= 0 and <= 3 71 | export NUM_SECONDARY_INDEXES=$7 72 | 73 | # the following 4 parameters allow an insert plus query workload benchmark 74 | 75 | # number of documents to return per query 76 | # valid values : integer > 0 77 | export QUERY_LIMIT=4 78 | 79 | # wait this many inserts to begin the query workload 80 | # valid values : integer > 0 81 | export QUERY_NUM_DOCS_BEGIN=10000 82 | 83 | # number of query threads, must be >= 0 84 | export QUERY_THREADS=$8 85 | 86 | # sleep time before a thread starts the next query, must be >= 0 87 | export MS_BETWEEN_QUERIES=0 88 | 89 | # create the collection 90 | # valid values : Y/N 91 | export CREATE_COLLECTION=Y 92 | 93 | # 1 = forward, -1 = reverse 94 | export QUERY_DIRECTION=1 95 | 96 | JAVA=/usr/local/jdk-8u25-64/bin/java 97 | CLASSPATH=mongo-java-driver-2.13.1.jar 98 | # javac -cp $CLASSPATH:$PWD/src src/jmongoiibench.java 99 | 100 | export LOG_NAME=mongoiibench-${MAX_ROWS}-${NUM_DOCUMENTS_PER_INSERT}-${MAX_INSERTS_PER_SECOND}-${NUM_LOADER_THREADS}-${QUERY_THREADS}-${MS_BETWEEN_QUERIES}.txt 101 | export BENCHMARK_TSV=${LOG_NAME}.tsv 102 | 103 | rm -f $LOG_NAME 104 | rm -f $BENCHMARK_TSV 105 | 106 | iostat -kx 10 >& o.io.r & 107 | iid=$! 108 | vmstat 10 >& o.vm.r & 109 | vid=$! 110 | 111 | # python mstat.py --loops 1000000 --interval 10 >& o.mstat.r & 112 | # mid=$! 113 | 114 | T="$(date +%s)" 115 | $JAVA -cp $CLASSPATH:$PWD/src jmongoiibench $DB_NAME $NUM_LOADER_THREADS $MAX_ROWS $NUM_DOCUMENTS_PER_INSERT $NUM_INSERTS_PER_FEEDBACK $NUM_SECONDS_PER_FEEDBACK $BENCHMARK_TSV $MONGO_COMPRESSION $MONGO_BASEMENT $RUN_SECONDS $QUERY_LIMIT $QUERY_NUM_DOCS_BEGIN $MAX_INSERTS_PER_SECOND $WRITE_CONCERN $MONGO_SERVER $MONGO_PORT $NUM_CHAR_FIELDS $LENGTH_CHAR_FIELDS $NUM_SECONDARY_INDEXES $PERCENT_COMPRESSIBLE $CREATE_COLLECTION $QUERY_THREADS $MS_BETWEEN_QUERIES $QUERY_DIRECTION 2>&1 | tee -a $LOG_NAME 116 | echo "" | tee -a $LOG_NAME 117 | T="$(($(date +%s)-T))" 118 | printf "`date` | iibench duration = %02d:%02d:%02d:%02d\n" "$((T/86400))" "$((T/3600%24))" "$((T/60%60))" "$((T%60))" | tee -a $LOG_NAME 119 | 120 | export mdir=$9 121 | echo "db.serverStatus()" | ${mdir}/bin/mongo > o.status 122 | du -hs ${mdir}/data >> $LOG_NAME 123 | du -hs --apparent-size ${mdir}/data >> $LOG_NAME 124 | ps aux | grep mongod | grep -v grep >> $LOG_NAME 125 | tail -3 $LOG_NAME 126 | 127 | # kill $mid 128 | kill $iid 129 | kill $vid 130 | -------------------------------------------------------------------------------- /src/jmongoiibench.java: -------------------------------------------------------------------------------- 1 | //import com.mongodb.Mongo; 2 | import com.mongodb.MongoClient; 3 | import com.mongodb.MongoClientOptions; 4 | import com.mongodb.DB; 5 | import com.mongodb.DBCollection; 6 | import com.mongodb.DBCursor; 7 | import com.mongodb.BasicDBObject; 8 | import com.mongodb.DBObject; 9 | import com.mongodb.DBCursor; 10 | import com.mongodb.ServerAddress; 11 | import com.mongodb.WriteConcern; 12 | import com.mongodb.CommandResult; 13 | 14 | import java.util.Arrays; 15 | import java.util.ArrayList; 16 | import java.util.Date; 17 | import java.util.Properties; 18 | import java.io.BufferedWriter; 19 | import java.io.FileWriter; 20 | import java.io.File; 21 | import java.io.Writer; 22 | import java.io.FileNotFoundException; 23 | import java.io.IOException; 24 | import java.util.concurrent.atomic.AtomicInteger; 25 | import java.util.concurrent.atomic.AtomicLong; 26 | import java.util.concurrent.locks.ReentrantLock; 27 | 28 | public class jmongoiibench { 29 | public static AtomicLong globalInserts = new AtomicLong(0); 30 | public static AtomicLong globalWriterThreads = new AtomicLong(0); 31 | public static AtomicLong globalQueryThreads = new AtomicLong(0); 32 | public static AtomicLong globalQueriesExecuted = new AtomicLong(0); 33 | public static AtomicLong globalQueriesTimeMs = new AtomicLong(0); 34 | public static AtomicLong globalQueriesStarted = new AtomicLong(0); 35 | public static AtomicLong globalInsertExceptions = new AtomicLong(0); 36 | 37 | public static Writer writer = null; 38 | public static boolean outputHeader = true; 39 | 40 | public static int numCashRegisters = 1000; 41 | public static int numProducts = 10000; 42 | public static int numCustomers = 100000; 43 | public static double maxPrice = 500.0; 44 | 45 | public static String dbName; 46 | public static int writerThreads; 47 | public static int queryThreads; 48 | public static Integer numMaxInserts; 49 | public static int documentsPerInsert; 50 | public static long insertsPerFeedback; 51 | public static long secondsPerFeedback; 52 | public static String compressionType; 53 | public static int basementSize; 54 | public static String logFileName; 55 | public static String indexTechnology; 56 | public static Long numSeconds; 57 | public static Integer msBetweenQueries; 58 | public static Integer queryLimit; 59 | public static Integer queryIndexDirection = 1; 60 | public static Integer queryBeginNumDocs; 61 | public static Integer maxInsertsPerSecond; 62 | public static Integer maxThreadInsertsPerSecond; 63 | public static String myWriteConcern; 64 | public static String serverName; 65 | public static String createCollection; 66 | public static int serverPort; 67 | public static int numCharFields; 68 | public static int lengthCharFields; 69 | public static int numSecondaryIndexes; 70 | public static int percentCompressible; 71 | public static int numCompressibleCharacters; 72 | public static int numUncompressibleCharacters; 73 | 74 | public static int randomStringLength = 4*1024*1024; 75 | public static String randomStringHolder; 76 | public static int compressibleStringLength = 4*1024*1024; 77 | public static String compressibleStringHolder; 78 | 79 | public static int allDone = 0; 80 | 81 | public jmongoiibench() { 82 | } 83 | 84 | public static void main (String[] args) throws Exception { 85 | if (args.length != 24) { 86 | logMe("*** ERROR : CONFIGURATION ISSUE ***"); 87 | logMe("jmongoiibench [database name] [number of writer threads] [documents per collection] [documents per insert] [inserts feedback] [seconds feedback] [log file name] [compression type] [basement node size (bytes)] [number of seconds to run] [query limit] [inserts for begin query] [max inserts per second] [writeconcern] [server] [port] [num char fields] [length char fields] [num secondary indexes] [percent compressible] [create collection] [number of query threads] [millisecs between queries] [query index direction]"); 88 | System.exit(1); 89 | } 90 | 91 | dbName = args[0]; 92 | writerThreads = Integer.valueOf(args[1]); 93 | numMaxInserts = Integer.valueOf(args[2]); 94 | documentsPerInsert = Integer.valueOf(args[3]); 95 | insertsPerFeedback = Long.valueOf(args[4]); 96 | secondsPerFeedback = Long.valueOf(args[5]); 97 | logFileName = args[6]; 98 | compressionType = args[7]; 99 | basementSize = Integer.valueOf(args[8]); 100 | numSeconds = Long.valueOf(args[9]); 101 | queryLimit = Integer.valueOf(args[10]); 102 | queryBeginNumDocs = Integer.valueOf(args[11]); 103 | maxInsertsPerSecond = Integer.valueOf(args[12]); 104 | myWriteConcern = args[13]; 105 | serverName = args[14]; 106 | serverPort = Integer.valueOf(args[15]); 107 | numCharFields = Integer.valueOf(args[16]); 108 | lengthCharFields = Integer.valueOf(args[17]); 109 | numSecondaryIndexes = Integer.valueOf(args[18]); 110 | percentCompressible = Integer.valueOf(args[19]); 111 | createCollection = args[20].toLowerCase(); 112 | queryThreads = Integer.valueOf(args[21]); 113 | msBetweenQueries = Integer.valueOf(args[22]); 114 | queryIndexDirection = Integer.valueOf(args[23]); 115 | 116 | if (queryIndexDirection != 1 && queryIndexDirection != -1) { 117 | logMe("*** ERROR: queryIndexDirection must be 1 or -1 ***"); 118 | System.exit(1); 119 | } 120 | 121 | maxThreadInsertsPerSecond = (int) ((double)maxInsertsPerSecond / (writerThreads > 0 ? writerThreads : 1)); 122 | 123 | WriteConcern myWC = new WriteConcern(); 124 | if (myWriteConcern.toLowerCase().equals("fsync_safe")) { 125 | myWC = WriteConcern.FSYNC_SAFE; 126 | } 127 | else if ((myWriteConcern.toLowerCase().equals("none"))) { 128 | myWC = WriteConcern.NONE; 129 | } 130 | else if ((myWriteConcern.toLowerCase().equals("normal"))) { 131 | myWC = WriteConcern.NORMAL; 132 | } 133 | else if ((myWriteConcern.toLowerCase().equals("replicas_safe"))) { 134 | myWC = WriteConcern.REPLICAS_SAFE; 135 | } 136 | else if ((myWriteConcern.toLowerCase().equals("safe"))) { 137 | myWC = WriteConcern.SAFE; 138 | } 139 | else { 140 | logMe("*** ERROR : WRITE CONCERN ISSUE ***"); 141 | logMe(" write concern %s is not supported",myWriteConcern); 142 | System.exit(1); 143 | } 144 | 145 | if ((numSecondaryIndexes < 0) || (numSecondaryIndexes > 3)) { 146 | logMe("*** ERROR : INVALID NUMBER OF SECONDARY INDEXES, MUST BE >=0 and <= 3 ***"); 147 | logMe(" %d secondary indexes is not supported",numSecondaryIndexes); 148 | System.exit(1); 149 | } 150 | 151 | if ((percentCompressible < 0) || (percentCompressible > 100)) { 152 | logMe("*** ERROR : INVALID PERCENT COMPRESSIBLE, MUST BE >=0 and <= 100 ***"); 153 | logMe(" %d secondary indexes is not supported",percentCompressible); 154 | System.exit(1); 155 | } 156 | 157 | numCompressibleCharacters = (int) (((double) percentCompressible / 100.0) * (double) lengthCharFields); 158 | numUncompressibleCharacters = (int) (((100.0 - (double) percentCompressible) / 100.0) * (double) lengthCharFields); 159 | 160 | logMe("Application Parameters"); 161 | logMe("--------------------------------------------------"); 162 | logMe(" database name = %s",dbName); 163 | logMe(" %d writer thread(s)",writerThreads); 164 | logMe(" %d query thread(s)",queryThreads); 165 | logMe(" %,d documents per collection",numMaxInserts); 166 | logMe(" %d character fields",numCharFields); 167 | logMe(" %d bytes per character field",lengthCharFields); 168 | logMe(" %d secondary indexes",numSecondaryIndexes); 169 | logMe(" Documents Per Insert = %d",documentsPerInsert); 170 | logMe(" Maximum of %,d insert(s) per second",maxInsertsPerSecond); 171 | logMe(" Maximum of %,d insert(s) per second per writer thread",maxThreadInsertsPerSecond); 172 | logMe(" Feedback every %,d seconds(s)",secondsPerFeedback); 173 | logMe(" Feedback every %,d inserts(s)",insertsPerFeedback); 174 | logMe(" logging to file %s",logFileName); 175 | logMe(" Run for %,d second(s)",numSeconds); 176 | logMe(" Extra character fields are %d percent compressible",percentCompressible); 177 | logMe(" %,d milliseconds between queries", msBetweenQueries); 178 | logMe(" Queries limited to %,d document(s) with index direction %,d", queryLimit, queryIndexDirection); 179 | logMe(" Starting queries after %,d document(s) inserted",queryBeginNumDocs); 180 | logMe(" write concern = %s",myWriteConcern); 181 | logMe(" Server:Port = %s:%d",serverName,serverPort); 182 | 183 | MongoClientOptions clientOptions = new MongoClientOptions.Builder().connectionsPerHost(2048).socketTimeout(600000).writeConcern(myWC).build(); 184 | ServerAddress srvrAdd = new ServerAddress(serverName,serverPort); 185 | MongoClient m = new MongoClient(srvrAdd, clientOptions); 186 | 187 | logMe("mongoOptions | " + m.getMongoOptions().toString()); 188 | logMe("mongoWriteConcern | " + m.getWriteConcern().toString()); 189 | 190 | DB db = m.getDB(dbName); 191 | 192 | // determine server type : mongo or tokumx 193 | DBObject checkServerCmd = new BasicDBObject(); 194 | CommandResult commandResult = db.command("buildInfo"); 195 | 196 | // check if tokumxVersion exists, otherwise assume mongo 197 | if (commandResult.toString().contains("tokumxVersion")) { 198 | indexTechnology = "tokumx"; 199 | } 200 | else 201 | { 202 | indexTechnology = "mongo"; 203 | } 204 | 205 | if ((!indexTechnology.toLowerCase().equals("tokumx")) && (!indexTechnology.toLowerCase().equals("mongo"))) { 206 | // unknown index technology, abort 207 | logMe(" *** Unknown Indexing Technology %s, shutting down",indexTechnology); 208 | System.exit(1); 209 | } 210 | 211 | logMe(" index technology = %s",indexTechnology); 212 | 213 | if (indexTechnology.toLowerCase().equals("tokumx")) { 214 | logMe(" + compression type = %s",compressionType); 215 | logMe(" + basement node size (bytes) = %d",basementSize); 216 | } 217 | 218 | logMe("--------------------------------------------------"); 219 | 220 | if (writerThreads > 1) { 221 | numMaxInserts = numMaxInserts / writerThreads; 222 | } 223 | 224 | try { 225 | writer = new BufferedWriter(new FileWriter(new File(logFileName))); 226 | } catch (IOException e) { 227 | e.printStackTrace(); 228 | } 229 | 230 | if (createCollection.equals("n")) 231 | { 232 | logMe("Skipping collection creation"); 233 | } 234 | else 235 | { 236 | // create the collection 237 | String collectionName = "purchases_index"; 238 | 239 | if (indexTechnology.toLowerCase().equals("tokumx")) { 240 | DBObject cmd = new BasicDBObject(); 241 | cmd.put("create", collectionName); 242 | cmd.put("compression", compressionType); 243 | cmd.put("readPageSize", basementSize); 244 | CommandResult result = db.command(cmd); 245 | //logMe(result.toString()); 246 | } else if (indexTechnology.toLowerCase().equals("mongo")) { 247 | // nothing special to do for a regular mongo collection 248 | } else { 249 | // unknown index technology, abort 250 | logMe(" *** Unknown Indexing Technology %s, shutting down",indexTechnology); 251 | System.exit(1); 252 | } 253 | 254 | DBCollection coll = db.getCollection(collectionName); 255 | 256 | BasicDBObject idxOptions = new BasicDBObject(); 257 | idxOptions.put("background",false); 258 | 259 | if (indexTechnology.toLowerCase().equals("tokumx")) { 260 | idxOptions.put("compression",compressionType); 261 | idxOptions.put("readPageSize",basementSize); 262 | } 263 | 264 | if (numSecondaryIndexes >= 1) { 265 | logMe(" *** creating secondary index on price + customerid"); 266 | coll.ensureIndex(new BasicDBObject("price", 1).append("customerid", 1), idxOptions); 267 | } 268 | if (numSecondaryIndexes >= 2) { 269 | logMe(" *** creating secondary index on cashregisterid + price + customerid"); 270 | coll.ensureIndex(new BasicDBObject("cashregisterid", 1).append("price", 1).append("customerid", 1), idxOptions); 271 | } 272 | if (numSecondaryIndexes >= 3) { 273 | logMe(" *** creating secondary index on price + dateandtime + customerid"); 274 | coll.ensureIndex(new BasicDBObject("price", 1).append("dateandtime", 1).append("customerid", 1), idxOptions); 275 | } 276 | // END: create the collection 277 | } 278 | 279 | java.util.Random rand = new java.util.Random(); 280 | 281 | // create random string holder 282 | logMe(" creating %,d bytes of random character data...",randomStringLength); 283 | char[] tempString = new char[randomStringLength]; 284 | for (int i = 0 ; i < randomStringLength ; i++) { 285 | tempString[i] = (char) (rand.nextInt(26) + 'a'); 286 | } 287 | randomStringHolder = new String(tempString); 288 | 289 | // create compressible string holder 290 | logMe(" creating %,d bytes of compressible character data...",compressibleStringLength); 291 | char[] tempStringCompressible = new char[compressibleStringLength]; 292 | for (int i = 0 ; i < compressibleStringLength ; i++) { 293 | tempStringCompressible[i] = 'a'; 294 | } 295 | compressibleStringHolder = new String(tempStringCompressible); 296 | 297 | 298 | jmongoiibench t = new jmongoiibench(); 299 | 300 | Thread reporterThread = new Thread(t.new MyReporter()); 301 | reporterThread.start(); 302 | 303 | Thread[] tWriterThreads = new Thread[writerThreads]; 304 | 305 | // start the loaders 306 | for (int i=0; i 0) { 315 | if (writerThreads > 0) { 316 | while (globalInserts.get() < queryBeginNumDocs) { 317 | try { 318 | Thread.sleep(100); 319 | } catch (InterruptedException e) { 320 | } 321 | } 322 | } 323 | globalQueriesStarted.set(System.currentTimeMillis()); 324 | 325 | for (int i=0; i= maxInsertsPerSecond) { 403 | // pause until a second has passed 404 | while (System.currentTimeMillis() < nextMs) { 405 | try { 406 | Thread.sleep(20); 407 | } catch (Exception e) { 408 | e.printStackTrace(); 409 | } 410 | } 411 | numLastInserts = numInserts; 412 | nextMs = System.currentTimeMillis() + 1000; 413 | } 414 | 415 | for (int i = 0; i < documentsPerInsert; i++) { 416 | //id++; 417 | int thisCustomerId = rand.nextInt(numCustomers); 418 | double thisPrice= ((rand.nextDouble() * maxPrice) + (double) thisCustomerId) / 100.0; 419 | BasicDBObject doc = new BasicDBObject(); 420 | //doc.put("_id",id); 421 | doc.put("dateandtime", System.currentTimeMillis()); 422 | doc.put("cashregisterid", rand.nextInt(numCashRegisters)); 423 | doc.put("customerid", thisCustomerId); 424 | doc.put("productid", rand.nextInt(numProducts)); 425 | doc.put("price", thisPrice); 426 | for (int charField = 1; charField <= numCharFields; charField++) { 427 | int startPosition = rand.nextInt(randomStringLength-lengthCharFields); 428 | doc.put("cf"+Integer.toString(charField), randomStringHolder.substring(startPosition,startPosition+numUncompressibleCharacters) + compressibleStringHolder.substring(startPosition,startPosition+numCompressibleCharacters)); 429 | } 430 | aDocs[i]=doc; 431 | } 432 | 433 | try { 434 | coll.insert(aDocs); 435 | numInserts += documentsPerInsert; 436 | globalInserts.addAndGet(documentsPerInsert); 437 | 438 | } catch (Exception e) { 439 | logMe("Writer thread %d : EXCEPTION",threadNumber); 440 | e.printStackTrace(); 441 | globalInsertExceptions.incrementAndGet(); 442 | } 443 | 444 | if (allDone == 1) 445 | break; 446 | } 447 | 448 | } catch (Exception e) { 449 | logMe("Writer thread %d : EXCEPTION",threadNumber); 450 | e.printStackTrace(); 451 | } 452 | 453 | long numWriters = globalWriterThreads.decrementAndGet(); 454 | if (numWriters == 0) 455 | allDone = 1; 456 | } 457 | } 458 | 459 | 460 | class MyQuery implements Runnable { 461 | int threadCount; 462 | int threadNumber; 463 | DB db; 464 | 465 | java.util.Random rand; 466 | 467 | MyQuery(int threadCount, int threadNumber, DB db) { 468 | this.threadCount = threadCount; 469 | this.threadNumber = threadNumber; 470 | this.db = db; 471 | rand = new java.util.Random((long) threadNumber + globalQueriesStarted.get()); 472 | } 473 | public void run() { 474 | long t0 = System.currentTimeMillis(); 475 | 476 | String collectionName = "purchases_index"; 477 | 478 | DBCollection coll = db.getCollection(collectionName); 479 | 480 | long numQueriesExecuted = 0; 481 | long numQueriesTimeMs = 0; 482 | 483 | int whichQuery = 0; 484 | 485 | try { 486 | logMe("Query thread %d : ready to query collection %s",threadNumber, collectionName); 487 | 488 | while (allDone == 0) { 489 | 490 | // wait until my next runtime 491 | if (msBetweenQueries > 0) { 492 | try { 493 | Thread.sleep(msBetweenQueries); 494 | } catch (Exception e) { 495 | e.printStackTrace(); 496 | } 497 | } 498 | 499 | long thisNow = System.currentTimeMillis(); 500 | 501 | whichQuery++; 502 | if (whichQuery > 3) { 503 | whichQuery = 1; 504 | } 505 | 506 | int thisCustomerId = rand.nextInt(numCustomers); 507 | double thisPrice = ((rand.nextDouble() * maxPrice) + (double) thisCustomerId) / 100.0; 508 | int thisCashRegisterId = rand.nextInt(numCashRegisters); 509 | int thisProductId = rand.nextInt(numProducts); 510 | long thisRandomTime = t0 + (long) ((double) (thisNow - t0) * rand.nextDouble()); 511 | 512 | BasicDBObject query = new BasicDBObject(); 513 | BasicDBObject keys = new BasicDBObject(); 514 | 515 | if (whichQuery == 1) { 516 | BasicDBObject query1a = new BasicDBObject(); 517 | query1a.put("price", thisPrice); 518 | query1a.put("dateandtime", thisRandomTime); 519 | query1a.put("customerid", new BasicDBObject("$gte", thisCustomerId)); 520 | 521 | BasicDBObject query1b = new BasicDBObject(); 522 | query1b.put("price", thisPrice); 523 | query1b.put("dateandtime", new BasicDBObject("$gt", thisRandomTime)); 524 | 525 | BasicDBObject query1c = new BasicDBObject(); 526 | query1c.put("price", new BasicDBObject("$gt", thisPrice)); 527 | 528 | ArrayList list1 = new ArrayList(); 529 | list1.add(query1a); 530 | list1.add(query1b); 531 | list1.add(query1c); 532 | 533 | query.put("$or", list1); 534 | 535 | keys.put("price",1); 536 | keys.put("dateandtime",1); 537 | keys.put("customerid",1); 538 | keys.put("_id",0); 539 | 540 | } else if (whichQuery == 2) { 541 | BasicDBObject query2a = new BasicDBObject(); 542 | query2a.put("price", thisPrice); 543 | query2a.put("customerid", new BasicDBObject("$gte", thisCustomerId)); 544 | 545 | BasicDBObject query2b = new BasicDBObject(); 546 | query2b.put("price", new BasicDBObject("$gt", thisPrice)); 547 | 548 | ArrayList list2 = new ArrayList(); 549 | list2.add(query2a); 550 | list2.add(query2b); 551 | 552 | query.put("$or", list2); 553 | 554 | keys.put("price",1); 555 | keys.put("customerid",1); 556 | keys.put("_id",0); 557 | 558 | } else if (whichQuery == 3) { 559 | BasicDBObject query3a = new BasicDBObject(); 560 | query3a.put("cashregisterid", thisCashRegisterId); 561 | query3a.put("price", thisPrice); 562 | query3a.put("customerid", new BasicDBObject("$gte", thisCustomerId)); 563 | 564 | BasicDBObject query3b = new BasicDBObject(); 565 | query3b.put("cashregisterid", thisCashRegisterId); 566 | query3b.put("price", new BasicDBObject("$gt", thisPrice)); 567 | 568 | BasicDBObject query3c = new BasicDBObject(); 569 | query3c.put("cashregisterid", new BasicDBObject("$gt", thisCashRegisterId)); 570 | 571 | ArrayList list3 = new ArrayList(); 572 | list3.add(query3a); 573 | list3.add(query3b); 574 | list3.add(query3c); 575 | 576 | query.put("$or", list3); 577 | 578 | keys.put("cashregisterid",1); 579 | keys.put("price",1); 580 | keys.put("customerid",1); 581 | keys.put("_id",0); 582 | } 583 | 584 | //logMe("Executed query %d",whichQuery); 585 | long now = System.currentTimeMillis(); 586 | DBCursor cursor = null; 587 | try { 588 | cursor = coll.find(query,keys).limit(queryLimit); 589 | while(cursor.hasNext()) { 590 | //System.out.println(cursor.next()); 591 | cursor.next(); 592 | } 593 | cursor.close(); 594 | cursor = null; 595 | } catch (Exception e) { 596 | logMe("Query thread %d : EXCEPTION",threadNumber); 597 | e.printStackTrace(); 598 | if (cursor != null) 599 | cursor.close(); 600 | } 601 | long elapsed = System.currentTimeMillis() - now; 602 | 603 | //logMe("Query thread %d : performing : %s",threadNumber,thisSelect); 604 | 605 | globalQueriesExecuted.incrementAndGet(); 606 | globalQueriesTimeMs.addAndGet(elapsed); 607 | } 608 | 609 | } catch (Exception e) { 610 | logMe("Query thread %d : EXCEPTION",threadNumber); 611 | e.printStackTrace(); 612 | } 613 | 614 | long numQueries = globalQueryThreads.decrementAndGet(); 615 | } 616 | } 617 | 618 | 619 | // reporting thread, outputs information to console and file 620 | class MyReporter implements Runnable { 621 | public void run() 622 | { 623 | long t0 = System.currentTimeMillis(); 624 | long lastInserts = 0; 625 | long lastQueriesNum = 0; 626 | long lastQueriesMs = 0; 627 | long lastMs = t0; 628 | long intervalNumber = 0; 629 | long nextFeedbackMillis = t0 + (1000 * secondsPerFeedback * (intervalNumber + 1)); 630 | long nextFeedbackInserts = lastInserts + insertsPerFeedback; 631 | long thisInserts = 0; 632 | long thisQueriesNum = 0; 633 | long thisQueriesMs = 0; 634 | long thisQueriesStarted = 0; 635 | long endDueToTime = System.currentTimeMillis() + (1000 * numSeconds); 636 | 637 | while (allDone == 0) 638 | { 639 | try { 640 | Thread.sleep(100); 641 | } catch (Exception e) { 642 | e.printStackTrace(); 643 | } 644 | 645 | long now = System.currentTimeMillis(); 646 | 647 | if (now >= endDueToTime) 648 | { 649 | allDone = 1; 650 | } 651 | 652 | thisInserts = globalInserts.get(); 653 | thisQueriesNum = globalQueriesExecuted.get(); 654 | thisQueriesMs = globalQueriesTimeMs.get(); 655 | thisQueriesStarted = globalQueriesStarted.get(); 656 | if (((now > nextFeedbackMillis) && (secondsPerFeedback > 0)) || 657 | ((thisInserts >= nextFeedbackInserts) && (insertsPerFeedback > 0))) 658 | { 659 | intervalNumber++; 660 | nextFeedbackMillis = t0 + (1000 * secondsPerFeedback * (intervalNumber + 1)); 661 | nextFeedbackInserts = (intervalNumber + 1) * insertsPerFeedback; 662 | 663 | long elapsed = now - t0; 664 | long thisIntervalMs = now - lastMs; 665 | 666 | long thisIntervalInserts = thisInserts - lastInserts; 667 | double thisIntervalInsertsPerSecond = thisIntervalInserts/(double)thisIntervalMs*1000.0; 668 | double thisInsertsPerSecond = thisInserts/(double)elapsed*1000.0; 669 | 670 | long thisIntervalQueriesNum = thisQueriesNum - lastQueriesNum; 671 | long thisIntervalQueriesMs = thisQueriesMs - lastQueriesMs; 672 | double thisIntervalQueryAvgMs = 0; 673 | double thisQueryAvgMs = 0; 674 | double thisIntervalAvgQPS = 0; 675 | double thisAvgQPS = 0; 676 | 677 | long thisInsertExceptions = globalInsertExceptions.get(); 678 | 679 | if (thisIntervalQueriesNum > 0) { 680 | thisIntervalQueryAvgMs = thisIntervalQueriesMs/(double)thisIntervalQueriesNum; 681 | } 682 | if (thisQueriesNum > 0) { 683 | thisQueryAvgMs = thisQueriesMs/(double)thisQueriesNum; 684 | } 685 | 686 | if (thisQueriesStarted > 0) 687 | { 688 | long adjustedElapsed = now - thisQueriesStarted; 689 | if (adjustedElapsed > 0) 690 | { 691 | thisAvgQPS = (double)thisQueriesNum/((double)adjustedElapsed/1000.0); 692 | } 693 | if (thisIntervalMs > 0) 694 | { 695 | thisIntervalAvgQPS = (double)thisIntervalQueriesNum/((double)thisIntervalMs/1000.0); 696 | } 697 | } 698 | 699 | if (secondsPerFeedback > 0) 700 | { 701 | logMe("%,d inserts : %,d seconds : cum ips=%,.2f : int ips=%,.2f : cum avg qry=%,.2f : int avg qry=%,.2f : cum avg qps=%,.2f : int avg qps=%,.2f : exceptions=%,d", thisInserts, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS, thisInsertExceptions); 702 | } else { 703 | logMe("%,d inserts : %,d seconds : cum ips=%,.2f : int ips=%,.2f : cum avg qry=%,.2f : int avg qry=%,.2f : cum avg qps=%,.2f : int avg qps=%,.2f : exceptions=%,d", intervalNumber * insertsPerFeedback, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS, thisInsertExceptions); 704 | } 705 | 706 | try { 707 | if (outputHeader) 708 | { 709 | writer.write("tot_inserts\telap_secs\tcum_ips\tint_ips\tcum_qry_avg\tint_qry_avg\tcum_qps\tint_qps\texceptions\n"); 710 | outputHeader = false; 711 | } 712 | 713 | String statusUpdate = ""; 714 | 715 | if (secondsPerFeedback > 0) 716 | { 717 | statusUpdate = String.format("%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%,d\n",thisInserts, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS, thisInsertExceptions); 718 | } else { 719 | statusUpdate = String.format("%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%,d\n",intervalNumber * insertsPerFeedback, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS, thisInsertExceptions); 720 | } 721 | writer.write(statusUpdate); 722 | writer.flush(); 723 | } catch (IOException e) { 724 | e.printStackTrace(); 725 | } 726 | 727 | lastInserts = thisInserts; 728 | lastQueriesNum = thisQueriesNum; 729 | lastQueriesMs = thisQueriesMs; 730 | 731 | lastMs = now; 732 | } 733 | } 734 | 735 | // output final numbers... 736 | long now = System.currentTimeMillis(); 737 | thisInserts = globalInserts.get(); 738 | thisQueriesNum = globalQueriesExecuted.get(); 739 | thisQueriesMs = globalQueriesTimeMs.get(); 740 | thisQueriesStarted = globalQueriesStarted.get(); 741 | intervalNumber++; 742 | nextFeedbackMillis = t0 + (1000 * secondsPerFeedback * (intervalNumber + 1)); 743 | nextFeedbackInserts = (intervalNumber + 1) * insertsPerFeedback; 744 | long elapsed = now - t0; 745 | long thisIntervalMs = now - lastMs; 746 | long thisIntervalInserts = thisInserts - lastInserts; 747 | double thisIntervalInsertsPerSecond = thisIntervalInserts/(double)thisIntervalMs*1000.0; 748 | double thisInsertsPerSecond = thisInserts/(double)elapsed*1000.0; 749 | long thisIntervalQueriesNum = thisQueriesNum - lastQueriesNum; 750 | long thisIntervalQueriesMs = thisQueriesMs - lastQueriesMs; 751 | double thisIntervalQueryAvgMs = 0; 752 | double thisQueryAvgMs = 0; 753 | double thisIntervalAvgQPS = 0; 754 | double thisAvgQPS = 0; 755 | if (thisIntervalQueriesNum > 0) { 756 | thisIntervalQueryAvgMs = thisIntervalQueriesMs/(double)thisIntervalQueriesNum; 757 | } 758 | if (thisQueriesNum > 0) { 759 | thisQueryAvgMs = thisQueriesMs/(double)thisQueriesNum; 760 | } 761 | if (thisQueriesStarted > 0) 762 | { 763 | long adjustedElapsed = now - thisQueriesStarted; 764 | if (adjustedElapsed > 0) 765 | { 766 | thisAvgQPS = (double)thisQueriesNum/((double)adjustedElapsed/1000.0); 767 | } 768 | if (thisIntervalMs > 0) 769 | { 770 | thisIntervalAvgQPS = (double)thisIntervalQueriesNum/((double)thisIntervalMs/1000.0); 771 | } 772 | } 773 | if (secondsPerFeedback > 0) 774 | { 775 | logMe("%,d inserts : %,d seconds : cum ips=%,.2f : int ips=%,.2f : cum avg qry=%,.2f : int avg qry=%,.2f : cum avg qps=%,.2f : int avg qps=%,.2f", thisInserts, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS); 776 | } else { 777 | logMe("%,d inserts : %,d seconds : cum ips=%,.2f : int ips=%,.2f : cum avg qry=%,.2f : int avg qry=%,.2f : cum avg qps=%,.2f : int avg qps=%,.2f", intervalNumber * insertsPerFeedback, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS); 778 | } 779 | try { 780 | if (outputHeader) 781 | { 782 | writer.write("tot_inserts\telap_secs\tcum_ips\tint_ips\tcum_qry_avg\tint_qry_avg\tcum_qps\tint_qps\n"); 783 | outputHeader = false; 784 | } 785 | String statusUpdate = ""; 786 | if (secondsPerFeedback > 0) 787 | { 788 | statusUpdate = String.format("%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",thisInserts, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS); 789 | } else { 790 | statusUpdate = String.format("%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",intervalNumber * insertsPerFeedback, elapsed / 1000l, thisInsertsPerSecond, thisIntervalInsertsPerSecond, thisQueryAvgMs, thisIntervalQueryAvgMs, thisAvgQPS, thisIntervalAvgQPS); 791 | } 792 | writer.write(statusUpdate); 793 | writer.flush(); 794 | } catch (IOException e) { 795 | e.printStackTrace(); 796 | } 797 | 798 | } 799 | } 800 | 801 | 802 | public static void logMe(String format, Object... args) { 803 | System.out.println(Thread.currentThread() + String.format(format, args)); 804 | } 805 | } 806 | --------------------------------------------------------------------------------