├── HOW-TO-RUN.txt ├── README ├── README.md ├── build.xml ├── doc └── src │ └── TimedDriver.odt ├── lib ├── .gitignore ├── apache-log4j-extras-1.1.jar ├── firebird │ ├── connector-api-1.5.jar │ └── jaybird-2.2.9.jar ├── log4j-1.2.17.jar ├── mysql │ └── mysql-connector-java-8.0.13.jar ├── oracle │ ├── .gitignore │ └── README.txt └── postgres │ └── postgresql-9.3-1102.jdbc41.jar ├── run ├── .gitignore ├── funcs.sh ├── generateGraphs.sh ├── generateReport.sh ├── log4j.properties ├── misc │ ├── blk_device_iops.R │ ├── blk_device_kbps.R │ ├── cpu_utilization.R │ ├── dirty_buffers.R │ ├── latency.R │ ├── net_device_iops.R │ ├── net_device_kbps.R │ ├── os_collector_linux.py │ └── tpm_nopm.R ├── props.fb ├── props.mysql ├── props.ora ├── props.pg ├── runBenchmark.sh ├── runDatabaseBuild.sh ├── runDatabaseDestroy.sh ├── runLoader.sh ├── runSQL.sh ├── sql.common │ ├── buildFinish.sql │ ├── foreignKeys.sql │ ├── indexCreates.sql │ ├── indexDrops.sql │ ├── tableCreates.sql │ ├── tableDrops.sql │ ├── tableTruncates.sql │ └── test.sql ├── sql.firebird │ └── extraHistID.sql ├── sql.mysql │ ├── buildFinish.sql │ ├── indexCreates.sql │ ├── indexDrops.sql │ ├── tableCreates.sql │ ├── tableCreates_partition.sql │ ├── tableDrops.sql │ └── tableTruncates.sql ├── sql.oracle │ └── extraHistID.sql └── sql.postgres │ ├── buildFinish.sql │ ├── extraHistID.sql │ └── tableCopies.sql └── src ├── LoadData ├── LoadData.java └── LoadDataWorker.java ├── OSCollector └── OSCollector.java ├── client ├── CommitException.java ├── jTPCC.java ├── jTPCCConfig.java ├── jTPCCConnection.java ├── jTPCCRandom.java ├── jTPCCTData.java ├── jTPCCTerminal.java └── jTPCCUtil.java └── jdbc └── ExecJDBC.java /HOW-TO-RUN.txt: -------------------------------------------------------------------------------- 1 | 2 | Instructions for running BenchmarkSQL on PostgreSQL 3 | --------------------------------------------------- 4 | 5 | 0. Requirements 6 | 7 | Use of JDK7 is required. 8 | 9 | 1. Create the benchmarksql user and a database 10 | 11 | As Unix user postgres use the psql shell to connect to the postgres 12 | database and issue the CREATE USER and CREATE DATABASE commands. 13 | 14 | [postgres#localhost ~] $ psql postgres 15 | psql (9.5.2) 16 | Type "help" for help. 17 | 18 | postgres=# CREATE USER benchmarksql WITH ENCRYPTED PASSWORD 'changeme'; 19 | postgres=# CREATE DATABASE benchmarksql OWNER benchmarksql; 20 | postgres=# \q 21 | [postgres#localhost ~] $ 22 | 23 | 2. Compile the BenchmarkSQL source code 24 | 25 | As your own UNIX user change into the toplevel directory of the 26 | benchmarksql git repository checkout or the directory that was 27 | created by unpacking the release tarball/zipfile. Use the ant 28 | command to compile the code. 29 | 30 | [wieck@localhost ~] $ cd benchmarksql 31 | [wieck@localhost benchmarksql] $ ant 32 | Buildfile: /nas1/home/wieck/benchmarksql.git/build.xml 33 | 34 | init: 35 | [mkdir] Created dir: /home/wieck/benchmarksql/build 36 | 37 | compile: 38 | [javac] Compiling 11 source files to /home/wieck/benchmarksql/build 39 | 40 | dist: 41 | [mkdir] Created dir: /home/wieck/benchmarksql/dist 42 | [jar] Building jar: /home/wieck/benchmarksql/dist/BenchmarkSQL-5.0.jar 43 | BUILD SUCCESSFUL 44 | Total time: 1 second 45 | [wieck@localhost benchmarksql] $ 46 | 47 | 3. Create the benchmark configuration file 48 | 49 | Change the the run directory, copy the props.pg file and edit 50 | the copy to match your system setup and desired scaling. 51 | 52 | [wieck@localhost benchmarksql] $ cd run 53 | [wieck@localhost run] $ cp props.pg my_postgres.properties 54 | [wieck@localhost run] $ vi my_postgres.properties 55 | [wieck@localhost run] $ 56 | 57 | Note that the provided example configuration is meant to test 58 | the functionality of your setupr. That benchmarksql can connect 59 | to the database and execute transactions. That configuration 60 | is NOT a benchmark run. To make it into one you need to have a 61 | configuration that matches your database server size and 62 | workload. Leave the sizing for now and perform a first functional 63 | test. 64 | 65 | The BenchmarkSQL database has an initial size of approximately 66 | 100-100MB per configured warehouse. A typical setup would be 67 | a database of 2-5 times the physical RAM of the server. 68 | 69 | Likewise the number of concurrent database connections (config 70 | parameter terminals) should be something about 2-6 times the 71 | number of CPU threads. 72 | 73 | Last but not least benchmark runs are normally done for hours, 74 | if not days. This is because on the database sizes above it 75 | will take that long to reach a steady state and make sure that 76 | all performance relevant functionality of the database, like 77 | checkpointing and vacuuming, is included in the measurement. 78 | 79 | So you can see that with a modern server, that has 32-256 CPU 80 | threads and 64-512GBi, of RAM we are talking about thousands of 81 | warehouses and hundreds of concurrent database connections. 82 | 83 | 4. Build the schema and initial database load 84 | 85 | Execute the runDatabaseBuild.sh script with your configuration file. 86 | 87 | [wieck@localhost run]$ ./runDatabaseBuild.sh my_postgres.properties 88 | # ------------------------------------------------------------ 89 | # Loading SQL file ./sql.common/tableCreates.sql 90 | # ------------------------------------------------------------ 91 | create table bmsql_config ( 92 | cfg_name varchar(30) primary key, 93 | cfg_value varchar(50) 94 | ); 95 | create table bmsql_warehouse ( 96 | w_id integer not null, 97 | w_ytd decimal(12,2), 98 | [...] 99 | Starting BenchmarkSQL LoadData 100 | 101 | driver=org.postgresql.Driver 102 | conn=jdbc:postgresql://localhost:5432/benchmarksql 103 | user=benchmarksql 104 | password=*********** 105 | warehouses=30 106 | loadWorkers=10 107 | fileLocation (not defined) 108 | csvNullValue (not defined - using default 'NULL') 109 | 110 | Worker 000: Loading ITEM 111 | Worker 001: Loading Warehouse 1 112 | Worker 002: Loading Warehouse 2 113 | Worker 003: Loading Warehouse 3 114 | [...] 115 | Worker 000: Loading Warehouse 30 done 116 | Worker 008: Loading Warehouse 29 done 117 | # ------------------------------------------------------------ 118 | # Loading SQL file ./sql.common/indexCreates.sql 119 | # ------------------------------------------------------------ 120 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey 121 | primary key (w_id); 122 | alter table bmsql_district add constraint bmsql_district_pkey 123 | primary key (d_w_id, d_id); 124 | [...] 125 | vacuum analyze; 126 | [wieck@localhost run]$ 127 | 128 | 5. Run the configured benchmark 129 | 130 | [wieck@localhost run]$ ./runBenchmark.sh my_postgres.properties 131 | 132 | The benchmark should run for the number of configured concurrent 133 | connections (terminals) and the duration or number of transactions. 134 | 135 | The end result of the benchmark will be reported like this: 136 | 137 | 01:58:09,081 [Thread-1] INFO jTPCC : Term-00, 138 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmC (NewOrders) = 179.55 139 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmTOTAL = 329.17 140 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session Start = 2016-05-25 01:58:07 141 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session End = 2016-05-25 01:58:09 142 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Transaction Count = 10 143 | 144 | At this point you have a working setup. 145 | 146 | 6. Scale the benchmark configuration. 147 | 148 | Change the my_postgres.properties file to the correct scaling 149 | (number of warehouses and concurrent connections/terminals). Switch 150 | from using a transaction count to time based: 151 | 152 | runTxnsPerTerminal=0 153 | runMins=180 154 | 155 | Rebuild the database (if needed) by running 156 | 157 | [wieck@localhost run]$ ./runDatabaseDestroy.sh my_postgres.properties 158 | [wieck@localhost run]$ ./runDatabaseBuild.sh my_postgres.properties 159 | 160 | Then run the benchmark again. 161 | 162 | Rinse and repeat. 163 | 164 | 7. Result report 165 | 166 | BenchmarkSQL collects detailed performance statistics and (if 167 | configured) OS performance data. The example configuration file 168 | defaults to a directory starting with my_result_. 169 | 170 | Use the generateReport.sh DIRECTORY script to create an HTML file 171 | with graphs. This requires R to be installed, which is beyond the 172 | scope of this HOW-TO. 173 | 174 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | The test process for MySQL/TiDB 2 | # Create the tables 3 | ./runSQL.sh props.mysql sql.mysql/tableCreates.sql 4 | # Add the indices (TiDB prefers to import data after adding the indices) 5 | ./runSQL.sh props.mysql sql.mysql/indexCreates.sql 6 | # Import Data 7 | ./runLoader.sh props.mysql 8 | # Run the benchmark 9 | ./runBenchmark.sh props.mysql 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | BENCHMARKSQL README 2 | =================== 3 | 4 | CHANGE LOG: 5 | ----------- 6 | 7 | Version 5.0 lussman & jannicash: 8 | -------------------------------------- 9 | + Upgrade to PostgreSQL 9.3 JDBC 4.1 version 1102 driver 10 | + Improve support for Oracle 11 | + Re-implement the non-uniform random generator in TPC-C style. 12 | + Conform to clause 4.3.3.1 and enable lookup by last name 13 | + Add a switch to disable terminal-warehouse association, spreading 14 | the data access over all configured warehouses. 15 | + Re-worked the run shell scripts and the location of SQL files to 16 | make support of more database types easier. 17 | + Add support for Firebird (http://www.firebirdsql.org). 18 | + Add FOREIGN KEYS as defined by TPC-C 1.3. 19 | + Major code overhaul. The per transaction type terminal data 20 | generation, execution and terminal trace code is moved into a 21 | module jTPCCTData. The database connection with all prepared 22 | statements has moved into a module jTPCCConnection. 23 | + Add collecting per transaction result data and OS Level 24 | resource usage collection. The R statistics package is used 25 | to graph detailed information and a complete report in HTML 26 | can be generated from the data. 27 | 28 | Version 4.1.2 TBD jannicash: 29 | ----------------------------------- 30 | + Fixed one more preparedStatement() leak. Hopefully with the help 31 | of Oracle's V$OPEN_CURSOR view we got them all now. 32 | + Fixed a possible deadlock problem in the NEW_ORDER transaction. 33 | Multiple parallel transaction could attempt to lock the same 34 | STOCK rows in reverse order. Sorting the order lines by item ID 35 | avoids this problem. 36 | 37 | Version 4.1.1 2016-01-31 jannicash: 38 | ----------------------------------- 39 | + Changed the status line to update only once per second. The previous 40 | implementation was getting rather noisy at high throughput. 41 | + Fixed two preparedStatement() leaks that could cause ORA-01000 errors 42 | on longer runs with high throughput. 43 | + Fixed a problem in the calculation of sleep time between 44 | transactions when using limitTxnsPerMin that could cause the test 45 | to hang at the end. 46 | + Added support for escaping ; as \; in SQL files to be able to load 47 | functions and execute anonymous PL blocks (needed for next item). 48 | + Changed the definition of history.hist_id into a plain integer with 49 | no special functionality. Two new database vendor specific SQL 50 | scripts allow to enable the column after data load as an auto 51 | incrementing primary key. See HOW-TO-RUN.txt for details. 52 | 53 | Version 4.1.0 2014-03-13 lussman: 54 | --------------------------------- 55 | + Upgrade to using JDK 7 56 | + Upgrade to PostgreSQL JDBC 4.1 version 1101 driver 57 | + Stop claiming to support DB2 (only Postgres & Oracle are well tested) 58 | 59 | Version 4.0.9 2013-11-04 cadym: 60 | ------------------------------- 61 | + Incorporate new PostgreSQL JDBC 4 version 1100 driver 62 | + Changed default user from postgres to benchmarksql 63 | + Added id column as primary key to history table 64 | + Renamed schema to benchmarksql 65 | + Changed log4j format to be more readable 66 | + Created the "benchmark" schema to contain all tables 67 | + Incorporate new PostgreSQL JDBC4 version 1003 driver 68 | + Transaction rate pacing mechanism 69 | + Correct error with loading customer table from csv file 70 | + Status line report dynamically shown on terminal 71 | + Fix lookup by name in PaymentStatus and Delivery Transactions 72 | (in order to be more compatible with the TPC-C spec) 73 | + Rationalized the variable naming in the input parameter files 74 | (now that the GUI is gone, variable names still make sense) 75 | + Default log4j settings only writes to file (not terminal) 76 | 77 | Version 4.0.2 2013-06-06 lussman & cadym: 78 | -------------------------------------------- 79 | + Removed Swing & AWT GUI so that this program is runnable from 80 | the command line 81 | + Remove log4j usage from runSQL & runLoader (only used now for 82 | the actual running of the Benchmark) 83 | + Fix truncation problem with customer.csv file 84 | + Comment out "BadCredit" business logic that was not working 85 | and throwing stack traces 86 | + Fix log4j messages to always show the terminal name 87 | + Remove bogus log4j messages 88 | 89 | Version 3.0.9 2013-03-21 lussman: 90 | ---------------------------------- 91 | + Config log4j for rotating log files once per minute 92 | + Default flat file location to '/tmp/csv/' in 93 | table copies script 94 | + Drop incomplete & untested Windoze '.bat' scripts 95 | + Standardize logging with log4j 96 | + Improve Logging with meaningful DEBUG and INFO levels 97 | + Simplify "build.xml" to eliminate nbproject dependency 98 | + Defaults read in from propeerties 99 | + Groudwork laid to eliminate the GUI 100 | + Default GUI console to PostgreSQL and 10 Warehouses 101 | 102 | Version 2.3.5 2013-01-29 lussman: 103 | ----------------------------------- 104 | + Default build is now with JDK 1.6 and JDBC 4 Postgres 9.2 driver 105 | + Remove outdated JDBC 3 drivers (for JDK 1.5). You can run as 106 | before by a JDBC4 driver from any supported vendor. 107 | + Remove ExecJDBC warning about trying to rollback when in 108 | autocommit mode 109 | + Remove the extraneous COMMIT statements from the DDL scripts 110 | since ExecJDBC runs in autocommit mode 111 | + Fix the version number displayed in the console 112 | 113 | Versions 1.0 thru 2.2 2004 - 2012 lussman: 114 | ------------------------------------------- 115 | + Dare to Compare 116 | + Forked from the jTPCC project 117 | -------------------------------------------------------------------------------- /build.xml: -------------------------------------------------------------------------------- 1 | 2 | BenchmarkSQL Build File 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /doc/src/TimedDriver.odt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/doc/src/TimedDriver.odt -------------------------------------------------------------------------------- /lib/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/.gitignore -------------------------------------------------------------------------------- /lib/apache-log4j-extras-1.1.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/apache-log4j-extras-1.1.jar -------------------------------------------------------------------------------- /lib/firebird/connector-api-1.5.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/firebird/connector-api-1.5.jar -------------------------------------------------------------------------------- /lib/firebird/jaybird-2.2.9.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/firebird/jaybird-2.2.9.jar -------------------------------------------------------------------------------- /lib/log4j-1.2.17.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/log4j-1.2.17.jar -------------------------------------------------------------------------------- /lib/mysql/mysql-connector-java-8.0.13.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/mysql/mysql-connector-java-8.0.13.jar -------------------------------------------------------------------------------- /lib/oracle/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | !README.txt 4 | -------------------------------------------------------------------------------- /lib/oracle/README.txt: -------------------------------------------------------------------------------- 1 | Copy the ojdbc.jar to use with Oracle here, or make 2 | sure that the environment variable ORACLE_HOME is set properly 3 | and the JDBC driver is found at $ORACLE_HOME/lib. 4 | -------------------------------------------------------------------------------- /lib/postgres/postgresql-9.3-1102.jdbc41.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/lib/postgres/postgresql-9.3-1102.jdbc41.jar -------------------------------------------------------------------------------- /run/.gitignore: -------------------------------------------------------------------------------- 1 | my_* 2 | *.log 3 | .jTPCC_run_seq.dat 4 | -------------------------------------------------------------------------------- /run/funcs.sh: -------------------------------------------------------------------------------- 1 | # ---- 2 | # $1 is the properties file 3 | # ---- 4 | PROPS=$1 5 | if [ ! -f ${PROPS} ] ; then 6 | echo "${PROPS}: no such file" >&2 7 | exit 1 8 | fi 9 | 10 | # ---- 11 | # getProp() 12 | # 13 | # Get a config value from the properties file. 14 | # ---- 15 | function getProp() 16 | { 17 | grep "^${1}=" ${PROPS} | sed -e "s/^${1}=//" 18 | } 19 | 20 | # ---- 21 | # getCP() 22 | # 23 | # Determine the CLASSPATH based on the database system. 24 | # ---- 25 | function setCP() 26 | { 27 | case "$(getProp db)" in 28 | firebird) 29 | cp="../lib/firebird/*:../lib/*" 30 | ;; 31 | oracle) 32 | cp="../lib/oracle/*" 33 | if [ ! -z "${ORACLE_HOME}" -a -d ${ORACLE_HOME}/lib ] ; then 34 | cp="${cp}:${ORACLE_HOME}/lib/*" 35 | fi 36 | cp="${cp}:../lib/*" 37 | ;; 38 | postgres) 39 | cp="../lib/postgres/*:../lib/*" 40 | ;; 41 | mysql) 42 | cp="../lib/mysql/*:../lib/*" 43 | ;; 44 | esac 45 | myCP=".:${cp}:../dist/*" 46 | export myCP 47 | } 48 | 49 | # ---- 50 | # Make sure that the properties file does have db= and the value 51 | # is a database, we support. 52 | # ---- 53 | case "$(getProp db)" in 54 | firebird|oracle|postgres|mysql) 55 | ;; 56 | "") echo "ERROR: missing db= config option in ${PROPS}" >&2 57 | exit 1 58 | ;; 59 | *) echo "ERROR: unsupported database type 'db=$(getProp db)' in ${PROPS}" >&2 60 | exit 1 61 | ;; 62 | esac 63 | 64 | -------------------------------------------------------------------------------- /run/generateGraphs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ---- 3 | # Script to generate the detail graphs of a BenchmarkSQL run. 4 | # 5 | # Copyright (C) 2016, Denis Lussier 6 | # Copyright (C) 2016, Jan Wieck 7 | # ---- 8 | 9 | if [ $# -lt 1 ] ; then 10 | echo "usage: $(basename $0) RESULT_DIR [...]" >&2 11 | exit 2 12 | fi 13 | 14 | WIDTH=1200 15 | HEIGHT=400 16 | 17 | SIMPLE_GRAPHS="tpm_nopm latency cpu_utilization dirty_buffers" 18 | 19 | for resdir in $* ; do 20 | cd "${resdir}" || exit 1 21 | 22 | for graph in $SIMPLE_GRAPHS ; do 23 | echo -n "Generating ${resdir}/${graph}.png ... " 24 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" -e "s/@HEIGHT@/${HEIGHT}/g" \ 25 | <../misc/${graph}.R | R --no-save) 26 | if [ $? -ne 0 ] ; then 27 | echo "ERROR" 28 | echo "$out" >&2 29 | exit 3 30 | fi 31 | echo "OK" 32 | done 33 | 34 | for fname in ./data/blk_*.csv ; do 35 | if [ ! -f "${fname}" ] ; then 36 | continue 37 | fi 38 | devname=$(basename ${fname} .csv) 39 | 40 | echo -n "Generating ${resdir}/${devname}_iops.png ... " 41 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" -e "s/@HEIGHT@/${HEIGHT}/g" \ 42 | -e "s/@DEVICE@/${devname}/g" <../misc/blk_device_iops.R | R --no-save) 43 | if [ $? -ne 0 ] ; then 44 | echo "ERROR" 45 | echo "$out" >&2 46 | exit 3 47 | fi 48 | echo "OK" 49 | 50 | echo -n "Generating ${resdir}/${devname}_kbps.png ... " 51 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" -e "s/@HEIGHT@/${HEIGHT}/g" \ 52 | -e "s/@DEVICE@/${devname}/g" <../misc/blk_device_kbps.R | R --no-save) 53 | if [ $? -ne 0 ] ; then 54 | echo "ERROR" 55 | echo "$out" >&2 56 | exit 3 57 | fi 58 | echo "OK" 59 | done 60 | 61 | for fname in ./data/net_*.csv ; do 62 | if [ ! -f "${fname}" ] ; then 63 | continue 64 | fi 65 | devname=$(basename ${fname} .csv) 66 | 67 | echo -n "Generating ${resdir}/${devname}_iops.png ... " 68 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" -e "s/@HEIGHT@/${HEIGHT}/g" \ 69 | -e "s/@DEVICE@/${devname}/g" <../misc/net_device_iops.R | R --no-save) 70 | if [ $? -ne 0 ] ; then 71 | echo "ERROR" 72 | echo "$out" >&2 73 | exit 3 74 | fi 75 | echo "OK" 76 | 77 | echo -n "Generating ${resdir}/${devname}_kbps.png ... " 78 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" -e "s/@HEIGHT@/${HEIGHT}/g" \ 79 | -e "s/@DEVICE@/${devname}/g" <../misc/net_device_kbps.R | R --no-save) 80 | if [ $? -ne 0 ] ; then 81 | echo "ERROR" 82 | echo "$out" >&2 83 | exit 3 84 | fi 85 | echo "OK" 86 | done 87 | 88 | cd .. 89 | done 90 | 91 | -------------------------------------------------------------------------------- /run/generateReport.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ $# -ne 1 ] ; then 4 | echo "usage: $(basename $0) RESULT_DIR" >&2 5 | exit 2 6 | fi 7 | 8 | TABLE_WIDTH="1100px" 9 | 10 | function getRunInfo() 11 | { 12 | exec 3< data/runInfo.csv 13 | read hdrs <&3 14 | hdrs=$(echo ${hdrs} | tr ',' ' ') 15 | IFS=, read $hdrs <&3 16 | exec <&3- 17 | 18 | eval echo "\$$1" 19 | } 20 | 21 | function getRunInfoColumns() 22 | { 23 | exec 3< data/runInfo.csv 24 | read hdrs <&3 25 | hdrs=$(echo ${hdrs} | tr ',' ' ') 26 | exec <&3- 27 | 28 | echo "${hdrs}" 29 | } 30 | 31 | function getProp() 32 | { 33 | grep "^${1}=" run.properties | sed -e "s/^${1}=//" 34 | } 35 | 36 | ./generateGraphs.sh "${1}" 37 | cd "${1}" 38 | echo -n "Generating ${1}/report.html ... " 39 | 40 | # ---- 41 | # Start the report. 42 | # ---- 43 | cat >report.html <<_EOF_ 44 | 45 | 46 | 47 | BenchmarkSQL Run #$(getRunInfo run) started $(getRunInfo sessionStart) 48 | 49 | 90 | 91 | 92 |

93 | BenchmarkSQL Run #$(getRunInfo run) started $(getRunInfo sessionStart) 94 |

95 | 96 |

97 | This TPC-C style benchmark run was performed by the "$(getRunInfo driver)" 98 | driver of BenchmarkSQL version $(getRunInfo driverVersion). 99 |

100 | _EOF_ 101 | 102 | # ---- 103 | # Show the run properties. 104 | # ---- 105 | cat >>report.html <<_EOF_ 106 |

107 | Run Properties 108 |

109 |

110 | 111 | 118 |
112 |

113 | _EOF_
114 | sed -e 's/^password=.*/password=\*\*\*\*\*\*/' >report.html
115 | cat >>report.html <<_EOF_
116 |     
117 |
119 |

120 | 121 | _EOF_ 122 | 123 | # ---- 124 | # Show the result summary. 125 | # ---- 126 | cat >>report.html <<_EOF_ 127 |

128 | Result Summary 129 |

130 | _EOF_ 131 | 132 | if [ $(getRunInfo driver) == "simple" ] ; then 133 | cat >> report.html <<_EOF_ 134 |

135 | Note that the "simple" driver is not a true TPC-C implementation. 136 | This driver only measures the database response time, not the 137 | response time of a System under Test as it would be experienced 138 | by an end-user in a 3-tier test implementation. 139 |

140 | _EOF_ 141 | fi 142 | 143 | cat >> report.html <<_EOF_ 144 |

145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | _EOF_ 160 | 161 | tr ',' ' ' " 168 | echo " " 169 | echo " " 170 | echo " " 171 | echo " " 172 | echo " " 173 | echo " " 174 | echo " " 175 | echo " " 176 | echo " " 177 | done >>report.html 178 | 179 | tpmC=$(grep "^tpmC," data/tx_summary.csv | sed -e 's/[^,]*,//' -e 's/,.*//') 180 | tpmCpct=$(grep "^tpmC," data/tx_summary.csv | sed -e 's/[^,]*,[^,]*,//' -e 's/,.*//') 181 | tpmTotal=$(grep "^tpmTotal," data/tx_summary.csv | sed -e 's/[^,]*,//' -e 's/,.*//') 182 | cat >>report.html <<_EOF_ 183 |
Transaction
Type
LatencyCountPercentRollbackErrorsSkipped
Deliveries
90th %Maximum
${name}${ninth}${max}${count}${percent}${rbk}${error}${dskipped}
184 |

185 | 186 |

187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 |
Overall tpmC:${tpmC}
Overall tpmTotal:${tpmTotal}
197 |

198 |

199 | The TPC-C specification has an theoretical maximum of 12.86 NEW_ORDER 200 | transactions per minute per warehouse. In reality this value cannot 201 | be reached because it would require a perfect mix with 45% of NEW_ORDER 202 | transactions and a ZERO response time from the System under Test 203 | including the database. 204 |

205 |

206 | The above tpmC of ${tpmC} is ${tpmCpct} of that theoretical maximum for a 207 | database with $(getRunInfo runWarehouses) warehouses. 208 |

209 | 210 | _EOF_ 211 | 212 | # ---- 213 | # Show the graphs for tpmC/tpmTOTAL and latency. 214 | # ---- 215 | cat >>report.html <<_EOF_ 216 |

217 | Transactions per Minute and Transaction Latency 218 |

219 |

220 | tpmC is the number of NEW_ORDER Transactions, that where processed 221 | per minute. tpmTOTAL is the number of Transactions processed per 222 | minute for all transaction types, but without the background part 223 | of the DELIVERY transaction. 224 | 225 |
226 | 227 |
228 | 229 |

230 | _EOF_ 231 | 232 | # ---- 233 | # Add all the System Resource graphs. First the CPU and dirty buffers. 234 | # ---- 235 | cat >>report.html <<_EOF_ 236 |

237 | System Resource Usage 238 |

239 |

240 | CPU Utilization 241 |

242 |

243 | The percentages for User, System and IOWait CPU time are stacked 244 | on top of each other. 245 | 246 |
247 | 248 |

249 | 250 |

251 | Dirty Kernel Buffers 252 |

253 |

254 | We track the number of dirty kernel buffers, as measured by 255 | the "nr_dirty" line in /proc/vmstat, to be able to correlate 256 | IO problems with when the kernel's IO schedulers are flushing 257 | writes to disk. A write(2) system call does not immediately 258 | cause real IO on a storage device. The data written is just 259 | copied into a kernel buffer. Several tuning parameters control 260 | when the OS is actually transferring these dirty buffers to 261 | the IO controller(s) in order to eventually get written to 262 | real disks (or similar). 263 | 264 |
265 | 266 |

267 | _EOF_ 268 | 269 | # ---- 270 | # Add all the block device IOPS and KBPS 271 | # --- 272 | for devdata in data/blk_*.csv ; do 273 | if [ ! -f "$devdata" ] ; then 274 | break 275 | fi 276 | 277 | dev=$(basename ${devdata} .csv) 278 | cat >>report.html <<_EOF_ 279 |

280 | Block Device ${dev} 281 |

282 |

283 | 284 |
285 | 286 |

287 | _EOF_ 288 | done 289 | 290 | # ---- 291 | # Add all the network device IOPS and KBPS 292 | # --- 293 | for devdata in data/net_*.csv ; do 294 | if [ ! -f "$devdata" ] ; then 295 | break 296 | fi 297 | 298 | dev=$(basename ${devdata} .csv) 299 | cat >>report.html <<_EOF_ 300 |

301 | Network Device ${dev} 302 |

303 |

304 | 305 |
306 | 307 |

308 | _EOF_ 309 | done 310 | 311 | # ---- 312 | # Finish the document. 313 | # ---- 314 | cat >>report.html <<_EOF_ 315 | 316 | 317 | 318 | _EOF_ 319 | 320 | echo "OK" 321 | -------------------------------------------------------------------------------- /run/log4j.properties: -------------------------------------------------------------------------------- 1 | # log4j.rootLogger=TRACE, CONSOLE, E, T 2 | log4j.rootLogger=INFO, CONSOLE, E 3 | 4 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 5 | log4j.appender.CONSOLE.Threshold=INFO 6 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.CONSOLE.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n 8 | 9 | log4j.appender.E=org.apache.log4j.RollingFileAppender 10 | log4j.appender.E.Threshold=WARN 11 | log4j.appender.E.File=benchmarksql-error.log 12 | log4j.appender.E.MaxFileSize=100MB 13 | log4j.appender.E.MaxBackupIndex=1 14 | log4j.appender.E.layout=org.apache.log4j.PatternLayout 15 | log4j.appender.E.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n 16 | 17 | log4j.appender.T=org.apache.log4j.FileAppender 18 | log4j.appender.T.Threshold=TRACE 19 | log4j.appender.T.File=benchmarksql-trace.log 20 | log4j.appender.T.append=false 21 | log4j.appender.T.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.T.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n 23 | 24 | -------------------------------------------------------------------------------- /run/misc/blk_device_iops.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show IOPS of a block device. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded IO data for the block devide 24 | # and aggregate it for the desired interval. 25 | # ---- 26 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE) 27 | aggReads <- setNames(aggregate(rawData$rdiops, 28 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 29 | c('elapsed', 'rdiops')) 30 | aggWrites <- setNames(aggregate(rawData$wriops, 31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 32 | c('elapsed', 'wriops')) 33 | rawData 34 | aggReads 35 | aggWrites 36 | 37 | # ---- 38 | # Determine the ymax by increasing in sqrt(2) steps until the 39 | # maximum of both IOPS fits. The multiply that with 1.2 to 40 | # give a little head room for the legend. 41 | # ---- 42 | ymax_rd <- max(aggReads$rdiops) 43 | ymax_wr <- max(aggWrites$wriops) 44 | ymax <- 1 45 | sqrt2 <- sqrt(2.0) 46 | while (ymax < ymax_rd || ymax < ymax_wr) { 47 | ymax <- ymax * sqrt2 48 | } 49 | if (ymax < (ymax_rd * 1.2) || ymax < (ymax_wr * 1.2)) { 50 | ymax <- ymax * 1.2 51 | } 52 | 53 | 54 | 55 | # ---- 56 | # Start the output image. 57 | # ---- 58 | png("@DEVICE@_iops.png", width=@WIDTH@, height=@HEIGHT@) 59 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 60 | 61 | # ---- 62 | # Plot the RDIOPS 63 | # ---- 64 | plot ( 65 | aggReads$elapsed / 60000.0, aggReads$rdiops, 66 | type='l', col="blue3", lwd=2, 67 | axes=TRUE, 68 | xlab="Elapsed Minutes", 69 | ylab="IO Operations per Second", 70 | xlim=c(0, xmax), 71 | ylim=c(0, ymax) 72 | ) 73 | 74 | # ---- 75 | # Plot the WRIOPS 76 | # ---- 77 | par (new=T) 78 | plot ( 79 | aggWrites$elapsed / 60000.0, aggWrites$wriops, 80 | type='l', col="red3", lwd=2, 81 | axes=FALSE, 82 | xlab="", 83 | ylab="", 84 | xlim=c(0, xmax), 85 | ylim=c(0, ymax) 86 | ) 87 | 88 | # ---- 89 | # Add legend, title and other decorations. 90 | # ---- 91 | legend ("topleft", 92 | c("Read Operations on @DEVICE@", "Write Operations on @DEVICE@"), 93 | fill=c("blue3", "red3")) 94 | title (main=c( 95 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 96 | "Block Device @DEVICE@ IOPS" 97 | )) 98 | grid() 99 | box() 100 | -------------------------------------------------------------------------------- /run/misc/blk_device_kbps.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show KBPS of a block device. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded IO data for the block devide 24 | # and aggregate it for the desired interval. 25 | # ---- 26 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE) 27 | aggReads <- setNames(aggregate(rawData$rdkbps, 28 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 29 | c('elapsed', 'rdkbps')) 30 | aggWrites <- setNames(aggregate(rawData$wrkbps, 31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 32 | c('elapsed', 'wrkbps')) 33 | 34 | # ---- 35 | # Determine the ymax by increasing in sqrt(2) steps until the 36 | # maximum of both KBPS fits. The multiply that with 1.2 to 37 | # give a little head room for the legend. 38 | # ---- 39 | ymax_rd <- max(aggReads$rdkbps) 40 | ymax_wr <- max(aggWrites$wrkbps) 41 | ymax <- 1 42 | sqrt2 <- sqrt(2.0) 43 | while (ymax < ymax_rd || ymax < ymax_wr) { 44 | ymax <- ymax * sqrt2 45 | } 46 | if (ymax < (ymax_rd * 1.2) || ymax < (ymax_wr * 1.2)) { 47 | ymax <- ymax * 1.2 48 | } 49 | 50 | 51 | 52 | # ---- 53 | # Start the output image. 54 | # ---- 55 | png("@DEVICE@_kbps.png", width=@WIDTH@, height=@HEIGHT@) 56 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 57 | 58 | # ---- 59 | # Plot the RDKBPS 60 | # ---- 61 | plot ( 62 | aggReads$elapsed / 60000.0, aggReads$rdkbps, 63 | type='l', col="blue3", lwd=2, 64 | axes=TRUE, 65 | xlab="Elapsed Minutes", 66 | ylab="Kilobytes per Second", 67 | xlim=c(0, xmax), 68 | ylim=c(0, ymax) 69 | ) 70 | 71 | # ---- 72 | # Plot the WRKBPS 73 | # ---- 74 | par (new=T) 75 | plot ( 76 | aggWrites$elapsed / 60000.0, aggWrites$wrkbps, 77 | type='l', col="red3", lwd=2, 78 | axes=FALSE, 79 | xlab="", 80 | ylab="", 81 | xlim=c(0, xmax), 82 | ylim=c(0, ymax) 83 | ) 84 | 85 | # ---- 86 | # Add legend, title and other decorations. 87 | # ---- 88 | legend ("topleft", 89 | c("Read Kb/s on @DEVICE@", "Write Kb/s on @DEVICE@"), 90 | fill=c("blue3", "red3")) 91 | title (main=c( 92 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 93 | "Block Device @DEVICE@ Kb/s" 94 | )) 95 | grid() 96 | box() 97 | -------------------------------------------------------------------------------- /run/misc/cpu_utilization.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show CPU utilization 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded CPU data and aggregate it for the desired interval. 24 | # ---- 25 | rawData <- read.csv("data/sys_info.csv", head=TRUE) 26 | aggUser <- setNames(aggregate(rawData$cpu_user, 27 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 28 | c('elapsed', 'cpu_user')) 29 | aggSystem <- setNames(aggregate(rawData$cpu_system, 30 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 31 | c('elapsed', 'cpu_system')) 32 | aggWait <- setNames(aggregate(rawData$cpu_iowait, 33 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 34 | c('elapsed', 'cpu_wait')) 35 | 36 | # ---- 37 | # ymax is 100% 38 | # ---- 39 | ymax = 100 40 | 41 | 42 | # ---- 43 | # Start the output image. 44 | # ---- 45 | png("cpu_utilization.png", width=@WIDTH@, height=@HEIGHT@) 46 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 47 | 48 | # ---- 49 | # Plot USER+SYSTEM+WAIT 50 | # ---- 51 | plot ( 52 | aggUser$elapsed / 60000.0, (aggUser$cpu_user + aggSystem$cpu_system + aggWait$cpu_wait) * 100.0, 53 | type='l', col="red3", lwd=2, 54 | axes=TRUE, 55 | xlab="Elapsed Minutes", 56 | ylab="CPU Utilization in Percent", 57 | xlim=c(0, xmax), 58 | ylim=c(0, ymax) 59 | ) 60 | 61 | # ---- 62 | # Plot the USER+SYSTEM 63 | # ---- 64 | par (new=T) 65 | plot ( 66 | aggUser$elapsed / 60000.0, (aggUser$cpu_user + aggSystem$cpu_system) * 100.0, 67 | type='l', col="cyan3", lwd=2, 68 | axes=FALSE, 69 | xlab="", 70 | ylab="", 71 | xlim=c(0, xmax), 72 | ylim=c(0, ymax) 73 | ) 74 | 75 | # ---- 76 | # Plot the USER 77 | # ---- 78 | par (new=T) 79 | plot ( 80 | aggUser$elapsed / 60000.0, aggUser$cpu_user * 100.0, 81 | type='l', col="blue3", lwd=2, 82 | axes=FALSE, 83 | xlab="", 84 | ylab="", 85 | xlim=c(0, xmax), 86 | ylim=c(0, ymax) 87 | ) 88 | 89 | # ---- 90 | # Add legend, title and other decorations. 91 | # ---- 92 | legend ("topleft", 93 | c("% User", "% System", "% IOWait"), 94 | fill=c("blue3", "cyan3", "red3")) 95 | title (main=c( 96 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 97 | "CPU Utilization" 98 | )) 99 | grid() 100 | box() 101 | -------------------------------------------------------------------------------- /run/misc/dirty_buffers.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show number of dirty kernel buffers 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded CPU data and aggregate it for the desired interval. 24 | # ---- 25 | rawData <- read.csv("data/sys_info.csv", head=TRUE) 26 | aggDirty <- setNames(aggregate(rawData$vm_nr_dirty, 27 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 28 | c('elapsed', 'vm_nr_dirty')) 29 | 30 | # ---- 31 | # Determine ymax 32 | # ---- 33 | ymax_dirty = max(aggDirty$vm_nr_dirty) 34 | sqrt2 <- sqrt(2.0) 35 | ymax <- 1 36 | while (ymax < ymax_dirty) { 37 | ymax <- ymax * sqrt2 38 | } 39 | if (ymax < (ymax_dirty * 1.2)) { 40 | ymax <- ymax * 1.2 41 | } 42 | 43 | 44 | # ---- 45 | # Start the output image. 46 | # ---- 47 | png("dirty_buffers.png", width=@WIDTH@, height=@HEIGHT@) 48 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 49 | 50 | # ---- 51 | # Plot dirty buffers 52 | # ---- 53 | plot ( 54 | aggDirty$elapsed / 60000.0, aggDirty$vm_nr_dirty, 55 | type='l', col="red3", lwd=2, 56 | axes=TRUE, 57 | xlab="Elapsed Minutes", 58 | ylab="Number dirty kernel buffers", 59 | xlim=c(0, xmax), 60 | ylim=c(0, ymax) 61 | ) 62 | 63 | # ---- 64 | # Add legend, title and other decorations. 65 | # ---- 66 | legend ("topleft", 67 | c("vmstat nr_dirty"), 68 | fill=c("red3")) 69 | title (main=c( 70 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 71 | "Dirty Kernel Buffers" 72 | )) 73 | grid() 74 | box() 75 | -------------------------------------------------------------------------------- /run/misc/latency.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show latency of all transaction types. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the result.csv and then filter the raw data 24 | # by transaction type 25 | # ---- 26 | rawData <- read.csv("data/result.csv", head=TRUE) 27 | noBGData <- rawData[rawData$ttype != 'DELIVERY_BG', ] 28 | newOrder <- rawData[rawData$ttype == 'NEW_ORDER', ] 29 | payment <- rawData[rawData$ttype == 'PAYMENT', ] 30 | orderStatus <- rawData[rawData$ttype == 'ORDER_STATUS', ] 31 | stockLevel <- rawData[rawData$ttype == 'STOCK_LEVEL', ] 32 | delivery <- rawData[rawData$ttype == 'DELIVERY', ] 33 | deliveryBG <- rawData[rawData$ttype == 'DELIVERY_BG', ] 34 | 35 | # ---- 36 | # Aggregate the latency grouped by interval. 37 | # ---- 38 | aggNewOrder <- setNames(aggregate(newOrder$latency, list(elapsed=trunc(newOrder$elapsed / idiv) * idiv), mean), 39 | c('elapsed', 'latency')); 40 | aggPayment <- setNames(aggregate(payment$latency, list(elapsed=trunc(payment$elapsed / idiv) * idiv), mean), 41 | c('elapsed', 'latency')); 42 | aggOrderStatus <- setNames(aggregate(orderStatus$latency, list(elapsed=trunc(orderStatus$elapsed / idiv) * idiv), mean), 43 | c('elapsed', 'latency')); 44 | aggStockLevel <- setNames(aggregate(stockLevel$latency, list(elapsed=trunc(stockLevel$elapsed / idiv) * idiv), mean), 45 | c('elapsed', 'latency')); 46 | aggDelivery <- setNames(aggregate(delivery$latency, list(elapsed=trunc(delivery$elapsed / idiv) * idiv), mean), 47 | c('elapsed', 'latency')); 48 | 49 | # ---- 50 | # Determine the ymax by increasing in sqrt(2) steps until 98% 51 | # of ALL latencies fit into the graph. Then multiply with 1.2 52 | # to give some headroom for the legend. 53 | # ---- 54 | ymax_total <- quantile(noBGData$latency, probs = 0.98) 55 | 56 | ymax <- 1 57 | sqrt2 <- sqrt(2.0) 58 | while (ymax < ymax_total) { 59 | ymax <- ymax * sqrt2 60 | } 61 | if (ymax < (ymax_total * 1.2)) { 62 | ymax <- ymax * 1.2 63 | } 64 | 65 | 66 | 67 | # ---- 68 | # Start the output image. 69 | # ---- 70 | png("latency.png", width=@WIDTH@, height=@HEIGHT@) 71 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 72 | 73 | # ---- 74 | # Plot the Delivery latency graph. 75 | # ---- 76 | plot ( 77 | aggDelivery$elapsed / 60000.0, aggDelivery$latency, 78 | type='l', col="blue3", lwd=2, 79 | axes=TRUE, 80 | xlab="Elapsed Minutes", 81 | ylab="Latency in Milliseconds", 82 | xlim=c(0, xmax), 83 | ylim=c(0, ymax) 84 | ) 85 | 86 | # ---- 87 | # Plot the StockLevel latency graph. 88 | # ---- 89 | par(new=T) 90 | plot ( 91 | aggStockLevel$elapsed / 60000.0, aggStockLevel$latency, 92 | type='l', col="gray70", lwd=2, 93 | axes=FALSE, 94 | xlab="", 95 | ylab="", 96 | xlim=c(0, xmax), 97 | ylim=c(0, ymax) 98 | ) 99 | 100 | # ---- 101 | # Plot the OrderStatus latency graph. 102 | # ---- 103 | par(new=T) 104 | plot ( 105 | aggOrderStatus$elapsed / 60000.0, aggOrderStatus$latency, 106 | type='l', col="green3", lwd=2, 107 | axes=FALSE, 108 | xlab="", 109 | ylab="", 110 | xlim=c(0, xmax), 111 | ylim=c(0, ymax) 112 | ) 113 | 114 | # ---- 115 | # Plot the Payment latency graph. 116 | # ---- 117 | par(new=T) 118 | plot ( 119 | aggPayment$elapsed / 60000.0, aggPayment$latency, 120 | type='l', col="magenta3", lwd=2, 121 | axes=FALSE, 122 | xlab="", 123 | ylab="", 124 | xlim=c(0, xmax), 125 | ylim=c(0, ymax) 126 | ) 127 | 128 | # ---- 129 | # Plot the NewOrder latency graph. 130 | # ---- 131 | par(new=T) 132 | plot ( 133 | aggNewOrder$elapsed / 60000.0, aggNewOrder$latency, 134 | type='l', col="red3", lwd=2, 135 | axes=FALSE, 136 | xlab="", 137 | ylab="", 138 | xlim=c(0, xmax), 139 | ylim=c(0, ymax) 140 | ) 141 | 142 | # ---- 143 | # Add legend, title and other decorations. 144 | # ---- 145 | legend ("topleft", 146 | c("NEW_ORDER", "PAYMENT", "ORDER_STATUS", "STOCK_LEVEL", "DELIVERY"), 147 | fill=c("red3", "magenta3", "green3", "gray70", "blue3")) 148 | title (main=c( 149 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 150 | "Transaction Latency" 151 | )) 152 | grid() 153 | box() 154 | 155 | # ---- 156 | # Generate the transaction summary and write it to 157 | # data/tx_summary.csv 158 | # ---- 159 | tx_total <- NROW(noBGData) 160 | 161 | tx_name <- c( 162 | 'NEW_ORDER', 163 | 'PAYMENT', 164 | 'ORDER_STATUS', 165 | 'STOCK_LEVEL', 166 | 'DELIVERY', 167 | 'DELIVERY_BG', 168 | 'tpmC', 169 | 'tpmTotal') 170 | tx_count <- c( 171 | NROW(newOrder), 172 | NROW(payment), 173 | NROW(orderStatus), 174 | NROW(stockLevel), 175 | NROW(delivery), 176 | NROW(deliveryBG), 177 | sprintf("%.2f", NROW(newOrder) / runInfo$runMins), 178 | sprintf("%.2f", NROW(noBGData) / runInfo$runMins)) 179 | tx_percent <- c( 180 | sprintf("%.3f%%", NROW(newOrder) / tx_total * 100.0), 181 | sprintf("%.3f%%", NROW(payment) / tx_total * 100.0), 182 | sprintf("%.3f%%", NROW(orderStatus) / tx_total * 100.0), 183 | sprintf("%.3f%%", NROW(stockLevel) / tx_total * 100.0), 184 | sprintf("%.3f%%", NROW(delivery) / tx_total * 100.0), 185 | NA, 186 | sprintf("%.3f%%", NROW(newOrder) / runInfo$runMins / 187 | runInfo$runWarehouses / 0.1286), 188 | NA) 189 | tx_90th <- c( 190 | sprintf("%.3fs", quantile(newOrder$latency, probs=0.90) / 1000.0), 191 | sprintf("%.3fs", quantile(payment$latency, probs=0.90) / 1000.0), 192 | sprintf("%.3fs", quantile(orderStatus$latency, probs=0.90) / 1000.0), 193 | sprintf("%.3fs", quantile(stockLevel$latency, probs=0.90) / 1000.0), 194 | sprintf("%.3fs", quantile(delivery$latency, probs=0.90) / 1000.0), 195 | sprintf("%.3fs", quantile(deliveryBG$latency, probs=0.90) / 1000.0), 196 | NA, NA) 197 | tx_max <- c( 198 | sprintf("%.3fs", max(newOrder$latency) / 1000.0), 199 | sprintf("%.3fs", max(payment$latency) / 1000.0), 200 | sprintf("%.3fs", max(orderStatus$latency) / 1000.0), 201 | sprintf("%.3fs", max(stockLevel$latency) / 1000.0), 202 | sprintf("%.3fs", max(delivery$latency) / 1000.0), 203 | sprintf("%.3fs", max(deliveryBG$latency) / 1000.0), 204 | NA, NA) 205 | tx_limit <- c("5.0", "5.0", "5.0", "20.0", "5.0", "80.0", NA, NA) 206 | tx_rbk <- c( 207 | sprintf("%.3f%%", sum(newOrder$rbk) / NROW(newOrder) * 100.0), 208 | NA, NA, NA, NA, NA, NA, NA) 209 | tx_error <- c( 210 | sum(newOrder$error), 211 | sum(payment$error), 212 | sum(orderStatus$error), 213 | sum(stockLevel$error), 214 | sum(delivery$error), 215 | sum(deliveryBG$error), 216 | NA, NA) 217 | tx_dskipped <- c( 218 | NA, NA, NA, NA, NA, 219 | sum(deliveryBG$dskipped), 220 | NA, NA) 221 | tx_info <- data.frame( 222 | tx_name, 223 | tx_count, 224 | tx_percent, 225 | tx_90th, 226 | tx_max, 227 | tx_limit, 228 | tx_rbk, 229 | tx_error, 230 | tx_dskipped) 231 | 232 | write.csv(tx_info, file = "data/tx_summary.csv", quote = FALSE, na = "N/A", 233 | row.names = FALSE) 234 | 235 | -------------------------------------------------------------------------------- /run/misc/net_device_iops.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show Packets of a network device. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded IO data for the network devide 24 | # and aggregate it for the desired interval. 25 | # ---- 26 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE) 27 | aggRecv <- setNames(aggregate(rawData$rxpktsps, 28 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 29 | c('elapsed', 'rxpktsps')) 30 | aggSend <- setNames(aggregate(rawData$txpktsps, 31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 32 | c('elapsed', 'txpktsps')) 33 | 34 | # ---- 35 | # Determine the ymax by increasing in sqrt(2) steps until the 36 | # maximum of both IOPS fits. The multiply that with 1.2 to 37 | # give a little head room for the legend. 38 | # ---- 39 | ymax_rx <- max(aggRecv$rxpktsps) 40 | ymax_tx <- max(aggSend$txpktsps) 41 | ymax <- 1 42 | sqrt2 <- sqrt(2.0) 43 | while (ymax < ymax_rx || ymax < ymax_tx) { 44 | ymax <- ymax * sqrt2 45 | } 46 | if (ymax < (ymax_rx * 1.2) || ymax < (ymax_tx * 1.2)) { 47 | ymax <- ymax * 1.2 48 | } 49 | 50 | 51 | 52 | # ---- 53 | # Start the output image. 54 | # ---- 55 | png("@DEVICE@_iops.png", width=@WIDTH@, height=@HEIGHT@) 56 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 57 | 58 | # ---- 59 | # Plot the RXPKTSPS 60 | # ---- 61 | plot ( 62 | aggRecv$elapsed / 60000.0, aggRecv$rxpktsps, 63 | type='l', col="blue3", lwd=2, 64 | axes=TRUE, 65 | xlab="Elapsed Minutes", 66 | ylab="Packets per Second", 67 | xlim=c(0, xmax), 68 | ylim=c(0, ymax) 69 | ) 70 | 71 | # ---- 72 | # Plot the TXPKTSPS 73 | # ---- 74 | par (new=T) 75 | plot ( 76 | aggSend$elapsed / 60000.0, aggSend$txpktsps, 77 | type='l', col="red3", lwd=2, 78 | axes=FALSE, 79 | xlab="", 80 | ylab="", 81 | xlim=c(0, xmax), 82 | ylim=c(0, ymax) 83 | ) 84 | 85 | # ---- 86 | # Add legend, title and other decorations. 87 | # ---- 88 | legend ("topleft", 89 | c("RX Packets/s on @DEVICE@", "TX Packets/s on @DEVICE@"), 90 | fill=c("blue3", "red3")) 91 | title (main=c( 92 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 93 | "Network Device @DEVICE@ Packets per Second" 94 | )) 95 | grid() 96 | box() 97 | -------------------------------------------------------------------------------- /run/misc/net_device_kbps.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show Kb/s of a network device. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the recorded IO data for the network devide 24 | # and aggregate it for the desired interval. 25 | # ---- 26 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE) 27 | aggRecv <- setNames(aggregate(rawData$rxkbps, 28 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 29 | c('elapsed', 'rxkbps')) 30 | aggSend <- setNames(aggregate(rawData$txkbps, 31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean), 32 | c('elapsed', 'txkbps')) 33 | 34 | # ---- 35 | # Determine the ymax by increasing in sqrt(2) steps until the 36 | # maximum of both Kb/s fits. The multiply that with 1.2 to 37 | # give a little head room for the legend. 38 | # ---- 39 | ymax_rx <- max(aggRecv$rxkbps) 40 | ymax_tx <- max(aggSend$txkbps) 41 | ymax <- 1 42 | sqrt2 <- sqrt(2.0) 43 | while (ymax < ymax_rx || ymax < ymax_tx) { 44 | ymax <- ymax * sqrt2 45 | } 46 | if (ymax < (ymax_rx * 1.2) || ymax < (ymax_tx * 1.2)) { 47 | ymax <- ymax * 1.2 48 | } 49 | 50 | 51 | 52 | # ---- 53 | # Start the output image. 54 | # ---- 55 | png("@DEVICE@_kbps.png", width=@WIDTH@, height=@HEIGHT@) 56 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 57 | 58 | # ---- 59 | # Plot the RXKBPS 60 | # ---- 61 | plot ( 62 | aggRecv$elapsed / 60000.0, aggRecv$rxkbps, 63 | type='l', col="blue3", lwd=2, 64 | axes=TRUE, 65 | xlab="Elapsed Minutes", 66 | ylab="Kilobytes per Second", 67 | xlim=c(0, xmax), 68 | ylim=c(0, ymax) 69 | ) 70 | 71 | # ---- 72 | # Plot the TXKBPS 73 | # ---- 74 | par (new=T) 75 | plot ( 76 | aggSend$elapsed / 60000.0, aggSend$txkbps, 77 | type='l', col="red3", lwd=2, 78 | axes=FALSE, 79 | xlab="", 80 | ylab="", 81 | xlim=c(0, xmax), 82 | ylim=c(0, ymax) 83 | ) 84 | 85 | # ---- 86 | # Add legend, title and other decorations. 87 | # ---- 88 | legend ("topleft", 89 | c("RX Kb/s on @DEVICE@", "TX Kb/s on @DEVICE@"), 90 | fill=c("blue3", "red3")) 91 | title (main=c( 92 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 93 | "Network Device @DEVICE@ Kb per Second" 94 | )) 95 | grid() 96 | box() 97 | -------------------------------------------------------------------------------- /run/misc/os_collector_linux.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ---------------------------------------------------------------------- 3 | # os_collector_linux.py - 4 | # 5 | # Script used to collect OS level resource utilization data like 6 | # CPU usage and disk IO. 7 | # 8 | # This code is used in the jTPCCOSCollect class. It is launched as 9 | # a separate process, possibly via ssh(1) on the remote database 10 | # server. The ability of Python to receive a script to execute on 11 | # stdin allows us to execute this script via ssh(1) on the database 12 | # server without installing any programs/scripts there. 13 | # 14 | # The command line arguments for this script are the runID, the 15 | # interval in seconds at which to collect information and a variable 16 | # number of devices in the form "blk_" "net_", 17 | # for example "blk_sda" for the first SCSI disk or "net_eth0". 18 | # 19 | # The output on stdout is one line for CPU/VM info, followed by a 20 | # line for each of the specified devices in CSV format. The first 21 | # set of lines are the CSV headers. The output is prefixed with the 22 | # runID, elapsed_ms and for the devices the blk_ or net_ name that 23 | # was specified on the command line. This format makes it easy to 24 | # load the data into a result database where it can be analyzed 25 | # together with the BenchmarkSQL per transaction results and compared 26 | # to other benchmark runs. 27 | # 28 | # It is the caller's responsibility to split the output lines into 29 | # separate result CSV files. 30 | # ---------------------------------------------------------------------- 31 | 32 | import errno 33 | import math 34 | import os 35 | import sys 36 | import time 37 | 38 | # ---- 39 | # main 40 | # ---- 41 | def main(argv): 42 | global deviceFDs 43 | global lastDeviceData 44 | 45 | # ---- 46 | # Get the runID and collection interval from the command line 47 | # ---- 48 | runID = (int)(argv[0]) 49 | interval = (float)(argv[1]) 50 | 51 | # ---- 52 | # Our start time is now. Since most of the information is deltas 53 | # we can only produce the first data after the first interval. 54 | # ---- 55 | startTime = time.time(); 56 | nextDue = startTime + interval 57 | 58 | # ---- 59 | # Initialize CPU and vmstat collection and output the CSV header. 60 | # ---- 61 | sysInfo = ['run', 'elapsed', ] 62 | sysInfo += initSystemUsage() 63 | print ",".join([str(x) for x in sysInfo]) 64 | 65 | # ---- 66 | # Get all the devices from the command line. 67 | # ---- 68 | devices = [] 69 | deviceFDs = {} 70 | lastDeviceData = {} 71 | for dev in argv[2:]: 72 | if dev.startswith('blk_'): 73 | devices.append(dev) 74 | elif dev.startswith('net_'): 75 | devices.append(dev) 76 | else: 77 | raise Exception("unknown device type '" + dev + "'") 78 | 79 | # ---- 80 | # Initialize usage collection per device depending on the type. 81 | # Output all the headers in the order, the devices are given. 82 | # ---- 83 | for dev in devices: 84 | if dev.startswith('blk_'): 85 | devInfo = ['run', 'elapsed', 'device', ] 86 | devInfo += initBlockDevice(dev) 87 | print ",".join([str(x) for x in devInfo]) 88 | elif dev.startswith('net_'): 89 | devInfo = ['run', 'elapsed', 'device', ] 90 | devInfo += initNetDevice(dev) 91 | print ",".join([str(x) for x in devInfo]) 92 | 93 | # ---- 94 | # Flush all header lines. 95 | # ---- 96 | sys.stdout.flush() 97 | 98 | try: 99 | while True: 100 | # ---- 101 | # Wait until our next collection interval and calculate the 102 | # elapsed time in milliseconds. 103 | # ---- 104 | now = time.time() 105 | if nextDue > now: 106 | time.sleep(nextDue - now) 107 | elapsed = (int)((nextDue - startTime) * 1000.0) 108 | 109 | # ---- 110 | # Collect CPU and vmstat information. 111 | # ---- 112 | sysInfo = [runID, elapsed, ] 113 | sysInfo += getSystemUsage() 114 | print ",".join([str(x) for x in sysInfo]) 115 | 116 | # ---- 117 | # Collect all device utilization data. 118 | # ---- 119 | for dev in devices: 120 | if dev.startswith('blk_'): 121 | devInfo = [runID, elapsed, dev, ] 122 | devInfo += getBlockUsage(dev, interval) 123 | print ",".join([str(x) for x in devInfo]) 124 | elif dev.startswith('net_'): 125 | devInfo = [runID, elapsed, dev, ] 126 | devInfo += getNetUsage(dev, interval) 127 | print ",".join([str(x) for x in devInfo]) 128 | 129 | # ---- 130 | # Bump the time when we are next due. 131 | # ---- 132 | nextDue += interval 133 | 134 | sys.stdout.flush() 135 | 136 | # ---- 137 | # Running on the command line for test purposes? 138 | # ---- 139 | except KeyboardInterrupt: 140 | print "" 141 | return 0 142 | 143 | # ---- 144 | # The OSCollector class will just close our stdout on the other 145 | # side, so this is expected. 146 | # ---- 147 | except IOError as e: 148 | if e.errno == errno.EPIPE: 149 | return 0 150 | else: 151 | raise e 152 | 153 | def initSystemUsage(): 154 | global procStatFD 155 | global procVMStatFD 156 | global lastStatData 157 | global lastVMStatData 158 | 159 | procStatFD = open("/proc/stat", "r", buffering = 0) 160 | for line in procStatFD: 161 | line = line.split() 162 | if line[0] == "cpu": 163 | lastStatData = [int(x) for x in line[1:]] 164 | break 165 | if len(lastStatData) != 10: 166 | raise Exception("cpu line in /proc/stat too short"); 167 | 168 | procVMStatFD = open("/proc/vmstat", "r", buffering = 0) 169 | lastVMStatData = {} 170 | for line in procVMStatFD: 171 | line = line.split() 172 | if line[0] in ['nr_dirty', ]: 173 | lastVMStatData['vm_' + line[0]] = int(line[1]) 174 | if len(lastVMStatData.keys()) != 1: 175 | raise Exception("not all elements found in /proc/vmstat") 176 | 177 | return [ 178 | 'cpu_user', 'cpu_nice', 'cpu_system', 179 | 'cpu_idle', 'cpu_iowait', 'cpu_irq', 180 | 'cpu_softirq', 'cpu_steal', 181 | 'cpu_guest', 'cpu_guest_nice', 182 | 'vm_nr_dirty', 183 | ] 184 | 185 | 186 | def getSystemUsage(): 187 | global procStatFD 188 | global procVMStatFD 189 | global lastStatData 190 | global lastVMStatData 191 | 192 | procStatFD.seek(0, 0); 193 | for line in procStatFD: 194 | line = line.split() 195 | if line[0] != "cpu": 196 | continue 197 | statData = [int(x) for x in line[1:]] 198 | deltaTotal = (float)(sum(statData) - sum(lastStatData)) 199 | if deltaTotal == 0: 200 | result = [0.0 for x in statData] 201 | else: 202 | result = [] 203 | for old, new in zip(lastStatData, statData): 204 | result.append((float)(new - old) / deltaTotal) 205 | procStatLast = statData 206 | break 207 | 208 | procVMStatFD.seek(0, 0) 209 | newVMStatData = {} 210 | for line in procVMStatFD: 211 | line = line.split() 212 | if line[0] in ['nr_dirty', ]: 213 | newVMStatData['vm_' + line[0]] = int(line[1]) 214 | 215 | for key in ['vm_nr_dirty', ]: 216 | result.append(newVMStatData[key]) 217 | 218 | return result 219 | 220 | 221 | def initBlockDevice(dev): 222 | global deviceFDs 223 | global lastDeviceData 224 | 225 | devPath = os.path.join("/sys/block", dev[4:], "stat") 226 | deviceFDs[dev] = open(devPath, "r", buffering = 0) 227 | line = deviceFDs[dev].readline().split() 228 | 229 | newData = [] 230 | for idx, mult in [ 231 | (0, 1.0), (1, 1.0), (2, 0.5), 232 | (4, 1.0), (5, 1.0), (6, 0.5), 233 | ]: 234 | newData.append((int)(line[idx])) 235 | lastDeviceData[dev] = newData 236 | 237 | return ['rdiops', 'rdmerges', 'rdkbps', 'wriops', 'wrmerges', 'wrkbps', ] 238 | 239 | 240 | def getBlockUsage(dev, interval): 241 | global deviceFDs 242 | 243 | deviceFDs[dev].seek(0, 0) 244 | line = deviceFDs[dev].readline().split() 245 | 246 | oldData = lastDeviceData[dev] 247 | newData = [] 248 | result = [] 249 | ridx = 0 250 | for idx, mult in [ 251 | (0, 1.0), (1, 1.0), (2, 0.5), 252 | (4, 1.0), (5, 1.0), (6, 0.5), 253 | ]: 254 | newData.append((int)(line[idx])) 255 | result.append((float)(newData[ridx] - oldData[ridx]) * mult / interval) 256 | ridx += 1 257 | lastDeviceData[dev] = newData 258 | return result 259 | 260 | def initNetDevice(dev): 261 | global deviceFDs 262 | global lastDeviceData 263 | 264 | devPath = os.path.join("/sys/class/net", dev[4:], "statistics") 265 | deviceData = [] 266 | for fname in ['rx_packets', 'rx_bytes', 'tx_packets', 'tx_bytes', ]: 267 | key = dev + "." + fname 268 | deviceFDs[key] = open(os.path.join(devPath, fname), 269 | "r", buffering = 0) 270 | deviceData.append((int)(deviceFDs[key].read())) 271 | 272 | lastDeviceData[dev] = deviceData 273 | 274 | return ['rxpktsps', 'rxkbps', 'txpktsps', 'txkbps', ] 275 | 276 | 277 | def getNetUsage(dev, interval): 278 | global deviceFDs 279 | global lastDeviceData 280 | 281 | oldData = lastDeviceData[dev] 282 | newData = [] 283 | for fname in ['rx_packets', 'rx_bytes', 'tx_packets', 'tx_bytes', ]: 284 | key = dev + "." + fname 285 | deviceFDs[key].seek(0, 0) 286 | newData.append((int)(deviceFDs[key].read())) 287 | 288 | result = [ 289 | (float)(newData[0] - oldData[0]) / interval, 290 | (float)(newData[1] - oldData[1]) / interval / 1024.0, 291 | (float)(newData[2] - oldData[2]) / interval, 292 | (float)(newData[3] - oldData[3]) / interval / 1024.0, 293 | ] 294 | lastDeviceData[dev] = newData 295 | return result 296 | 297 | 298 | if __name__ == '__main__': 299 | sys.exit(main(sys.argv[1:])) 300 | -------------------------------------------------------------------------------- /run/misc/tpm_nopm.R: -------------------------------------------------------------------------------- 1 | # ---- 2 | # R graph to show tpmC and tpmTOTAL. 3 | # ---- 4 | 5 | # ---- 6 | # Read the runInfo.csv file. 7 | # ---- 8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE) 9 | 10 | # ---- 11 | # Determine the grouping interval in seconds based on the 12 | # run duration. 13 | # ---- 14 | xmax <- runInfo$runMins 15 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) { 16 | if ((xmax * 60) / interval <= 1000) { 17 | break 18 | } 19 | } 20 | idiv <- interval * 1000.0 21 | 22 | # ---- 23 | # Read the result.csv and then filter the raw data 24 | # for != DELIVERY_BG and == NEW_ORDER transactions. 25 | # ---- 26 | data1 <- read.csv("data/result.csv", head=TRUE) 27 | total1 <- data1[data1$ttype != 'DELIVERY_BG', ] 28 | neworder1 <- data1[data1$ttype == 'NEW_ORDER', ] 29 | 30 | # ---- 31 | # Aggregate the counts of both data sets grouped by second. 32 | # ---- 33 | countTotal <- setNames(aggregate(total1$latency, list(elapsed=trunc(total1$elapsed / idiv) * idiv), NROW), 34 | c('elapsed', 'count')); 35 | countNewOrder <- setNames(aggregate(neworder1$latency, list(elapsed=trunc(neworder1$elapsed / idiv) * idiv), NROW), 36 | c('elapsed', 'count')); 37 | 38 | # ---- 39 | # Determine the ymax by increasing in sqrt(2) steps until the 40 | # maximum of tpmTOTAL fits, then make sure that we have at least 41 | # 1.2 times that to give a little head room for the legend. 42 | # ---- 43 | ymax_count <- max(countTotal$count) * 60.0 / interval 44 | ymax <- 1 45 | sqrt2 <- sqrt(2.0) 46 | while (ymax < ymax_count) { 47 | ymax <- ymax * sqrt2 48 | } 49 | if (ymax < (ymax_count * 1.2)) { 50 | ymax <- ymax * 1.2 51 | } 52 | 53 | 54 | 55 | # ---- 56 | # Start the output image. 57 | # ---- 58 | png("tpm_nopm.png", width=@WIDTH@, height=@HEIGHT@) 59 | par(mar=c(4,4,4,4), xaxp=c(10,200,19)) 60 | 61 | # ---- 62 | # Plot the tpmTOTAL graph. 63 | # ---- 64 | plot ( 65 | countTotal$elapsed / 60000.0, countTotal$count * 60.0 / interval, 66 | type='l', col="blue3", lwd=2, 67 | axes=TRUE, 68 | xlab="Elapsed Minutes", 69 | ylab="Transactions per Minute", 70 | xlim=c(0, xmax), 71 | ylim=c(0, ymax) 72 | ) 73 | 74 | # ---- 75 | # Plot the tpmC graph. 76 | # ---- 77 | par (new=T) 78 | plot ( 79 | countNewOrder$elapsed / 60000.0, countNewOrder$count * 60.0 / interval, 80 | type='l', col="red3", lwd=2, 81 | axes=FALSE, 82 | xlab="", 83 | ylab="", 84 | xlim=c(0, xmax), 85 | ylim=c(0, ymax) 86 | ) 87 | 88 | # ---- 89 | # Add legend, title and other decorations. 90 | # ---- 91 | legend ("topleft", 92 | c("tpmTOTAL", "tpmC (NewOrder only)"), 93 | fill=c("blue3", "red3")) 94 | title (main=c( 95 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion), 96 | "Transactions per Minute" 97 | )) 98 | grid() 99 | box() 100 | -------------------------------------------------------------------------------- /run/props.fb: -------------------------------------------------------------------------------- 1 | db=firebird 2 | driver=org.firebirdsql.jdbc.FBDriver 3 | conn=jdbc:firebirdsql://localhost:3050//var/lib/firebird/data/benchmarksql1.fdb 4 | user=benchmarksql 5 | password=PWbmsql 6 | 7 | warehouses=1 8 | loadWorkers=4 9 | 10 | terminals=1 11 | //To run specified transactions per terminal- runMins must equal zero 12 | runTxnsPerTerminal=10 13 | //To run for specified minutes- runTxnsPerTerminal must equal zero 14 | runMins=0 15 | //Number of total transactions per minute 16 | limitTxnsPerMin=300 17 | 18 | //Set to true to run in 4.x compatible mode. Set to false to use the 19 | //entire configured database evenly. 20 | terminalWarehouseFixed=false 21 | 22 | //The following five values must add up to 100 23 | //The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec 24 | newOrderWeight=45 25 | paymentWeight=43 26 | orderStatusWeight=4 27 | deliveryWeight=4 28 | stockLevelWeight=4 29 | 30 | // Directory name to create for collecting detailed result data. 31 | // Comment this out to suppress. 32 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS 33 | osCollectorScript=./misc/os_collector_linux.py 34 | osCollectorInterval=1 35 | //osCollectorSSHAddr=user@dbhost 36 | osCollectorDevices=net_eth0 blk_sda 37 | -------------------------------------------------------------------------------- /run/props.mysql: -------------------------------------------------------------------------------- 1 | db=mysql 2 | driver=com.mysql.jdbc.Driver 3 | conn=jdbc:mysql://localhost:4000/tpcc?useSSL=false&useServerPrepStmts=true&useConfigs=maxPerformance&rewriteBatchedStatements=true&cachePrepStmts=true&prepStmtCacheSize=1000&prepStmtCacheSqlLimit=2048 4 | user=root 5 | password= 6 | 7 | warehouses=1 8 | loadWorkers=4 9 | 10 | terminals=1 11 | //To run specified transactions per terminal- runMins must equal zero 12 | runTxnsPerTerminal=0 13 | //To run for specified minutes- runTxnsPerTerminal must equal zero 14 | runMins=10 15 | //Number of total transactions per minute 16 | limitTxnsPerMin=0 17 | 18 | //Set to true to run in 4.x compatible mode. Set to false to use the 19 | //entire configured database evenly. 20 | terminalWarehouseFixed=true 21 | 22 | //The following five values must add up to 100 23 | //The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec 24 | newOrderWeight=45 25 | paymentWeight=43 26 | orderStatusWeight=4 27 | deliveryWeight=4 28 | stockLevelWeight=4 29 | 30 | // Directory name to create for collecting detailed result data. 31 | // Comment this out to suppress. 32 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS 33 | //osCollectorScript=./misc/os_collector_linux.py 34 | //osCollectorInterval=1 35 | //osCollectorSSHAddr=user@dbhost 36 | //osCollectorDevices=net_eth0 blk_sda 37 | -------------------------------------------------------------------------------- /run/props.ora: -------------------------------------------------------------------------------- 1 | db=oracle 2 | driver=oracle.jdbc.driver.OracleDriver 3 | conn=jdbc:oracle:thin:@localhost:1521:XE 4 | user=scott 5 | password=tiger 6 | 7 | warehouses=1 8 | loadWorkers=4 9 | 10 | terminals=1 11 | //To run specified transactions per terminal- runMins must equal zero 12 | runTxnsPerTerminal=10 13 | //To run for specified minutes- runTxnsPerTerminal must equal zero 14 | runMins=0 15 | //Number of total transactions per minute 16 | limitTxnsPerMin=300 17 | 18 | //Set to true to run in 4.x compatible mode. Set to false to use the 19 | //entire configured database evenly. 20 | terminalWarehouseFixed=true 21 | 22 | //The following five values must add up to 100 23 | newOrderWeight=45 24 | paymentWeight=43 25 | orderStatusWeight=4 26 | deliveryWeight=4 27 | stockLevelWeight=4 28 | 29 | // Directory name to create for collecting detailed result data. 30 | // Comment this out to suppress. 31 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS 32 | osCollectorScript=./misc/os_collector_linux.py 33 | osCollectorInterval=1 34 | //osCollectorSSHAddr=user@dbhost 35 | osCollectorDevices=net_eth0 blk_sda 36 | -------------------------------------------------------------------------------- /run/props.pg: -------------------------------------------------------------------------------- 1 | db=postgres 2 | driver=org.postgresql.Driver 3 | conn=jdbc:postgresql://localhost:5432/postgres 4 | user=benchmarksql 5 | password=PWbmsql 6 | 7 | warehouses=1 8 | loadWorkers=4 9 | 10 | terminals=1 11 | //To run specified transactions per terminal- runMins must equal zero 12 | runTxnsPerTerminal=10 13 | //To run for specified minutes- runTxnsPerTerminal must equal zero 14 | runMins=0 15 | //Number of total transactions per minute 16 | limitTxnsPerMin=300 17 | 18 | //Set to true to run in 4.x compatible mode. Set to false to use the 19 | //entire configured database evenly. 20 | terminalWarehouseFixed=true 21 | 22 | //The following five values must add up to 100 23 | //The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec 24 | newOrderWeight=45 25 | paymentWeight=43 26 | orderStatusWeight=4 27 | deliveryWeight=4 28 | stockLevelWeight=4 29 | 30 | // Directory name to create for collecting detailed result data. 31 | // Comment this out to suppress. 32 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS 33 | osCollectorScript=./misc/os_collector_linux.py 34 | osCollectorInterval=1 35 | //osCollectorSSHAddr=user@dbhost 36 | osCollectorDevices=net_eth0 blk_sda 37 | -------------------------------------------------------------------------------- /run/runBenchmark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ $# -ne 1 ] ; then 4 | echo "usage: $(basename $0) PROPS_FILE" >&2 5 | exit 2 6 | fi 7 | 8 | SEQ_FILE="./.jTPCC_run_seq.dat" 9 | if [ ! -f "${SEQ_FILE}" ] ; then 10 | echo "0" > "${SEQ_FILE}" 11 | fi 12 | SEQ=$(expr $(cat "${SEQ_FILE}") + 1) || exit 1 13 | echo "${SEQ}" > "${SEQ_FILE}" 14 | 15 | source funcs.sh $1 16 | 17 | setCP || exit 1 18 | 19 | myOPTS="-Dprop=$1 -DrunID=${SEQ}" 20 | 21 | java -cp "$myCP" $myOPTS jTPCC 22 | -------------------------------------------------------------------------------- /run/runDatabaseBuild.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ $# -lt 1 ] ; then 4 | echo "usage: $(basename $0) PROPS [OPT VAL [...]]" >&2 5 | exit 2 6 | fi 7 | 8 | PROPS="$1" 9 | shift 10 | if [ ! -f "${PROPS}" ] ; then 11 | echo "${PROPS}: no such file or directory" >&2 12 | exit 1 13 | fi 14 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')" 15 | 16 | BEFORE_LOAD="tableCreates" 17 | AFTER_LOAD="indexCreates foreignKeys extraHistID buildFinish" 18 | 19 | for step in ${BEFORE_LOAD} ; do 20 | ./runSQL.sh "${PROPS}" $step 21 | done 22 | 23 | ./runLoader.sh "${PROPS}" $* 24 | 25 | for step in ${AFTER_LOAD} ; do 26 | ./runSQL.sh "${PROPS}" $step 27 | done 28 | -------------------------------------------------------------------------------- /run/runDatabaseDestroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ $# -ne 1 ] ; then 4 | echo "usage: $(basename $0) PROPS" >&2 5 | exit 2 6 | fi 7 | 8 | PROPS="$1" 9 | if [ ! -f "${PROPS}" ] ; then 10 | echo "${PROPS}: no such file or directory" >&2 11 | exit 1 12 | fi 13 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')" 14 | 15 | STEPS="tableDrops" 16 | 17 | for step in ${STEPS} ; do 18 | ./runSQL.sh "${PROPS}" $step 19 | done 20 | -------------------------------------------------------------------------------- /run/runLoader.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ $# -lt 1 ] ; then 4 | echo "usage: $(basename $0) PROPS_FILE [ARGS]" >&2 5 | exit 2 6 | fi 7 | 8 | source funcs.sh $1 9 | shift 10 | 11 | setCP || exit 1 12 | 13 | java -cp "$myCP" -Dprop=$PROPS LoadData $* 14 | -------------------------------------------------------------------------------- /run/runSQL.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ---- 4 | # Check command line usage 5 | # ---- 6 | if [ $# -ne 2 ] ; then 7 | echo "usage: $(basename $0) PROPS_FILE SQL_FILE" >&2 8 | exit 2 9 | fi 10 | 11 | # ---- 12 | # Load common functions 13 | # ---- 14 | source funcs.sh $1 15 | 16 | # ---- 17 | # Determine which SQL file to use. 18 | # 19 | # 1) If $2 specifies a file that ends in .sql, we use that. 20 | # 2) If a file ./sql./$2.sql exists, we use that. 21 | # 3) If none of the above, use ./sql.common/$2.sql. 22 | # ---- 23 | if echo "$2" | grep -q -e '\.sql$' ; then 24 | ENDS_WITH_SQL=1 25 | else 26 | ENDS_WITH_SQL=0 27 | fi 28 | 29 | if [ -f "${2}" -a $ENDS_WITH_SQL -eq 1 ] ; then 30 | SQL_FILE="$2" 31 | else 32 | if [ -f "./sql.$(getProp db)/${2}.sql" ] ; then 33 | SQL_FILE="./sql.$(getProp db)/${2}.sql" 34 | else 35 | SQL_FILE="./sql.common/${2}.sql" 36 | if [ ! -f "${SQL_FILE}" ] ; then 37 | echo "ERROR: Cannot locate SQL file for ${2}" >&2 38 | exit 1 39 | fi 40 | fi 41 | fi 42 | 43 | # ---- 44 | # Set myCP according to the database type. 45 | # ---- 46 | setCP || exit 1 47 | 48 | echo "# ------------------------------------------------------------" 49 | echo "# Loading SQL file ${SQL_FILE}" 50 | echo "# ------------------------------------------------------------" 51 | myOPTS="-Dprop=$1" 52 | myOPTS="$myOPTS -DcommandFile=${SQL_FILE}" 53 | java -cp "$myCP" $myOPTS ExecJDBC 54 | -------------------------------------------------------------------------------- /run/sql.common/buildFinish.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra commands to run after the tables are created, loaded, 3 | -- indexes built and extra's created. 4 | -- ---- 5 | -------------------------------------------------------------------------------- /run/sql.common/foreignKeys.sql: -------------------------------------------------------------------------------- 1 | 2 | alter table bmsql_district add constraint d_warehouse_fkey 3 | foreign key (d_w_id) 4 | references bmsql_warehouse (w_id); 5 | 6 | alter table bmsql_customer add constraint c_district_fkey 7 | foreign key (c_w_id, c_d_id) 8 | references bmsql_district (d_w_id, d_id); 9 | 10 | alter table bmsql_history add constraint h_customer_fkey 11 | foreign key (h_c_w_id, h_c_d_id, h_c_id) 12 | references bmsql_customer (c_w_id, c_d_id, c_id); 13 | alter table bmsql_history add constraint h_district_fkey 14 | foreign key (h_w_id, h_d_id) 15 | references bmsql_district (d_w_id, d_id); 16 | 17 | alter table bmsql_new_order add constraint no_order_fkey 18 | foreign key (no_w_id, no_d_id, no_o_id) 19 | references bmsql_oorder (o_w_id, o_d_id, o_id); 20 | 21 | alter table bmsql_oorder add constraint o_customer_fkey 22 | foreign key (o_w_id, o_d_id, o_c_id) 23 | references bmsql_customer (c_w_id, c_d_id, c_id); 24 | 25 | alter table bmsql_order_line add constraint ol_order_fkey 26 | foreign key (ol_w_id, ol_d_id, ol_o_id) 27 | references bmsql_oorder (o_w_id, o_d_id, o_id); 28 | alter table bmsql_order_line add constraint ol_stock_fkey 29 | foreign key (ol_supply_w_id, ol_i_id) 30 | references bmsql_stock (s_w_id, s_i_id); 31 | 32 | alter table bmsql_stock add constraint s_warehouse_fkey 33 | foreign key (s_w_id) 34 | references bmsql_warehouse (w_id); 35 | alter table bmsql_stock add constraint s_item_fkey 36 | foreign key (s_i_id) 37 | references bmsql_item (i_id); 38 | -------------------------------------------------------------------------------- /run/sql.common/indexCreates.sql: -------------------------------------------------------------------------------- 1 | 2 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey 3 | primary key (w_id); 4 | 5 | alter table bmsql_district add constraint bmsql_district_pkey 6 | primary key (d_w_id, d_id); 7 | 8 | alter table bmsql_customer add constraint bmsql_customer_pkey 9 | primary key (c_w_id, c_d_id, c_id); 10 | 11 | create index bmsql_customer_idx1 12 | on bmsql_customer (c_w_id, c_d_id, c_last, c_first); 13 | 14 | alter table bmsql_oorder add constraint bmsql_oorder_pkey 15 | primary key (o_w_id, o_d_id, o_id); 16 | 17 | create unique index bmsql_oorder_idx1 18 | on bmsql_oorder (o_w_id, o_d_id, o_carrier_id, o_id); 19 | 20 | alter table bmsql_new_order add constraint bmsql_new_order_pkey 21 | primary key (no_w_id, no_d_id, no_o_id); 22 | 23 | alter table bmsql_order_line add constraint bmsql_order_line_pkey 24 | primary key (ol_w_id, ol_d_id, ol_o_id, ol_number); 25 | 26 | alter table bmsql_stock add constraint bmsql_stock_pkey 27 | primary key (s_w_id, s_i_id); 28 | 29 | alter table bmsql_item add constraint bmsql_item_pkey 30 | primary key (i_id); 31 | 32 | -------------------------------------------------------------------------------- /run/sql.common/indexDrops.sql: -------------------------------------------------------------------------------- 1 | 2 | alter table bmsql_warehouse drop constraint bmsql_warehouse_pkey; 3 | 4 | alter table bmsql_district drop constraint bmsql_district_pkey; 5 | 6 | alter table bmsql_customer drop constraint bmsql_customer_pkey; 7 | drop index bmsql_customer_idx1; 8 | 9 | -- history table has no primary key 10 | -- commit; 11 | 12 | alter table bmsql_oorder drop constraint bmsql_oorder_pkey; 13 | drop index bmsql_oorder_idx1; 14 | 15 | alter table bmsql_new_order drop constraint bmsql_new_order_pkey; 16 | 17 | alter table bmsql_order_line drop constraint bmsql_order_line_pkey; 18 | 19 | alter table bmsql_stock drop constraint bmsql_stock_pkey; 20 | 21 | alter table bmsql_item drop constraint bmsql_item_pkey; 22 | -------------------------------------------------------------------------------- /run/sql.common/tableCreates.sql: -------------------------------------------------------------------------------- 1 | create table bmsql_config ( 2 | cfg_name varchar(30) primary key, 3 | cfg_value varchar(50) 4 | ); 5 | 6 | create table bmsql_warehouse ( 7 | w_id integer not null, 8 | w_ytd decimal(12,2), 9 | w_tax decimal(4,4), 10 | w_name varchar(10), 11 | w_street_1 varchar(20), 12 | w_street_2 varchar(20), 13 | w_city varchar(20), 14 | w_state char(2), 15 | w_zip char(9) 16 | ); 17 | 18 | create table bmsql_district ( 19 | d_w_id integer not null, 20 | d_id integer not null, 21 | d_ytd decimal(12,2), 22 | d_tax decimal(4,4), 23 | d_next_o_id integer, 24 | d_name varchar(10), 25 | d_street_1 varchar(20), 26 | d_street_2 varchar(20), 27 | d_city varchar(20), 28 | d_state char(2), 29 | d_zip char(9) 30 | ); 31 | 32 | create table bmsql_customer ( 33 | c_w_id integer not null, 34 | c_d_id integer not null, 35 | c_id integer not null, 36 | c_discount decimal(4,4), 37 | c_credit char(2), 38 | c_last varchar(16), 39 | c_first varchar(16), 40 | c_credit_lim decimal(12,2), 41 | c_balance decimal(12,2), 42 | c_ytd_payment decimal(12,2), 43 | c_payment_cnt integer, 44 | c_delivery_cnt integer, 45 | c_street_1 varchar(20), 46 | c_street_2 varchar(20), 47 | c_city varchar(20), 48 | c_state char(2), 49 | c_zip char(9), 50 | c_phone char(16), 51 | c_since timestamp, 52 | c_middle char(2), 53 | c_data varchar(500) 54 | ); 55 | 56 | create sequence bmsql_hist_id_seq; 57 | 58 | create table bmsql_history ( 59 | hist_id integer, 60 | h_c_id integer, 61 | h_c_d_id integer, 62 | h_c_w_id integer, 63 | h_d_id integer, 64 | h_w_id integer, 65 | h_date timestamp, 66 | h_amount decimal(6,2), 67 | h_data varchar(24) 68 | ); 69 | 70 | create table bmsql_new_order ( 71 | no_w_id integer not null, 72 | no_d_id integer not null, 73 | no_o_id integer not null 74 | ); 75 | 76 | create table bmsql_oorder ( 77 | o_w_id integer not null, 78 | o_d_id integer not null, 79 | o_id integer not null, 80 | o_c_id integer, 81 | o_carrier_id integer, 82 | o_ol_cnt integer, 83 | o_all_local integer, 84 | o_entry_d timestamp 85 | ); 86 | 87 | create table bmsql_order_line ( 88 | ol_w_id integer not null, 89 | ol_d_id integer not null, 90 | ol_o_id integer not null, 91 | ol_number integer not null, 92 | ol_i_id integer not null, 93 | ol_delivery_d timestamp, 94 | ol_amount decimal(6,2), 95 | ol_supply_w_id integer, 96 | ol_quantity integer, 97 | ol_dist_info char(24) 98 | ); 99 | 100 | create table bmsql_item ( 101 | i_id integer not null, 102 | i_name varchar(24), 103 | i_price decimal(5,2), 104 | i_data varchar(50), 105 | i_im_id integer 106 | ); 107 | 108 | create table bmsql_stock ( 109 | s_w_id integer not null, 110 | s_i_id integer not null, 111 | s_quantity integer, 112 | s_ytd integer, 113 | s_order_cnt integer, 114 | s_remote_cnt integer, 115 | s_data varchar(50), 116 | s_dist_01 char(24), 117 | s_dist_02 char(24), 118 | s_dist_03 char(24), 119 | s_dist_04 char(24), 120 | s_dist_05 char(24), 121 | s_dist_06 char(24), 122 | s_dist_07 char(24), 123 | s_dist_08 char(24), 124 | s_dist_09 char(24), 125 | s_dist_10 char(24) 126 | ); 127 | 128 | 129 | -------------------------------------------------------------------------------- /run/sql.common/tableDrops.sql: -------------------------------------------------------------------------------- 1 | drop table bmsql_config; 2 | 3 | drop table bmsql_new_order; 4 | 5 | drop table bmsql_order_line; 6 | 7 | drop table bmsql_oorder; 8 | 9 | drop table bmsql_history; 10 | 11 | drop table bmsql_customer; 12 | 13 | drop table bmsql_stock; 14 | 15 | drop table bmsql_item; 16 | 17 | drop table bmsql_district; 18 | 19 | drop table bmsql_warehouse; 20 | 21 | drop sequence bmsql_hist_id_seq; 22 | 23 | -------------------------------------------------------------------------------- /run/sql.common/tableTruncates.sql: -------------------------------------------------------------------------------- 1 | 2 | truncate table bmsql_warehouse; 3 | 4 | truncate table bmsql_item; 5 | 6 | truncate table bmsql_stock; 7 | 8 | truncate table bmsql_district; 9 | 10 | truncate table bmsql_customer; 11 | 12 | truncate table bmsql_history; 13 | 14 | truncate table bmsql_oorder; 15 | 16 | truncate table bmsql_order_line; 17 | 18 | truncate table bmsql_new_order; 19 | -------------------------------------------------------------------------------- /run/sql.common/test.sql: -------------------------------------------------------------------------------- 1 | -- SET search_path TO TPCC; 2 | -- Condition 1: W_YTD = sum(D_YTD) 3 | SELECT * FROM (SELECT w.w_id, w.w_ytd, d.sum_d_ytd 4 | FROM bmsql_warehouse w, 5 | (SELECT d_w_id, SUM(d_ytd) sum_d_ytd 6 | FROM bmsql_district 7 | GROUP BY d_w_id) d 8 | WHERE w.w_id = d.d_w_id) as x 9 | WHERE w_ytd != sum_d_ytd; 10 | 11 | -- Condition 2: D_NEXT_O_ID - 1 = max(O_ID) = max(NO_O_ID) 12 | SELECT * FROM (SELECT d.d_w_id, d.d_id, d.d_next_o_id, o.max_o_id, no.max_no_o_id 13 | FROM bmsql_district d, 14 | (SELECT o_w_id, o_d_id, MAX(o_id) max_o_id 15 | FROM bmsql_oorder 16 | GROUP BY o_w_id, o_d_id) o, 17 | (SELECT no_w_id, no_d_id, MAX(no_o_id) max_no_o_id 18 | FROM bmsql_new_order 19 | GROUP BY no_w_id, no_d_id) no 20 | WHERE d.d_w_id = o.o_w_id AND d.d_w_id = no.no_w_id AND 21 | d.d_id = o.o_d_id AND d.d_id = no.no_d_id) as x 22 | WHERE d_next_o_id - 1 != max_o_id OR d_next_o_id - 1 != max_no_o_id; 23 | 24 | -- Condition 3: max(NO_O_ID) - min(NO_O_ID) + 1 25 | -- = [number of rows in the NEW-ORDER table for this bmsql_district] 26 | SELECT * FROM (SELECT no_w_id, no_d_id, MAX(no_o_id) max_no_o_id, 27 | MIN(no_o_id) min_no_o_id, COUNT(*) count_no 28 | FROM bmsql_new_order 29 | GROUP BY no_w_id, no_d_Id) as x 30 | WHERE max_no_o_id - min_no_o_id + 1 != count_no; 31 | 32 | -- Condition 4: sum(O_OL_CNT) 33 | -- = [number of rows in the ORDER-LINE table for this bmsql_district] 34 | SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.sum_o_ol_cnt, ol.count_ol 35 | FROM (SELECT o_w_id, o_d_id, SUM(o_ol_cnt) sum_o_ol_cnt 36 | FROM bmsql_oorder 37 | GROUP BY o_w_id, o_d_id) o, 38 | (SELECT ol_w_id, ol_d_id, COUNT(*) count_ol 39 | FROM bmsql_order_line 40 | GROUP BY ol_w_id, ol_d_id) ol 41 | WHERE o.o_w_id = ol.ol_w_id AND 42 | o.o_d_id = ol.ol_d_id) as x 43 | WHERE sum_o_ol_cnt != count_ol; 44 | 45 | -- Condition 5: For any row in the ORDER table, O_CARRIER_ID is set to a null 46 | -- value if and only if there is a corresponding row in the 47 | -- NEW-ORDER table 48 | SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.o_id, o.o_carrier_id, no.count_no 49 | FROM bmsql_oorder o, 50 | (SELECT no_w_id, no_d_id, no_o_id, COUNT(*) count_no 51 | FROM bmsql_new_order 52 | GROUP BY no_w_id, no_d_id, no_o_id) no 53 | WHERE o.o_w_id = no.no_w_id AND 54 | o.o_d_id = no.no_d_id AND 55 | o.o_id = no.no_o_id) as x 56 | WHERE (o_carrier_id IS NULL AND count_no = 0) OR 57 | (o_carrier_id IS NOT NULL AND count_no != 0); 58 | 59 | -- Condition 6: For any row in the ORDER table, O_OL_CNT must equal the number 60 | -- of rows in the ORDER-LINE table for the corresponding order 61 | SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.o_id, o.o_ol_cnt, ol.count_ol 62 | FROM bmsql_oorder o, 63 | (SELECT ol_w_id, ol_d_id, ol_o_id, COUNT(*) count_ol 64 | FROM bmsql_order_line 65 | GROUP BY ol_w_id, ol_d_id, ol_o_id) ol 66 | WHERE o.o_w_id = ol.ol_w_id AND 67 | o.o_d_id = ol.ol_d_id AND 68 | o.o_id = ol.ol_o_id) as x 69 | WHERE o_ol_cnt != count_ol; 70 | 71 | -- Condition 7: For any row in the ORDER-LINE table, OL_DELIVERY_D is set to 72 | -- a null date/time if and only if the corresponding row in the 73 | -- ORDER table has O_CARRIER_ID set to a null value 74 | SELECT * FROM (SELECT ol.ol_w_id, ol.ol_d_id, ol.ol_o_id, ol.ol_delivery_d, 75 | o.o_carrier_id 76 | FROM bmsql_order_line ol, 77 | bmsql_oorder o 78 | WHERE ol.ol_w_id = o.o_w_id AND 79 | ol.ol_d_id = o.o_d_id AND 80 | ol.ol_o_id = o.o_id) as x 81 | WHERE (ol_delivery_d IS NULL AND o_carrier_id IS NOT NULL) OR 82 | (ol_delivery_d IS NOT NULL AND o_carrier_id IS NULL); 83 | 84 | -- Condition 8: W_YTD = sum(H_AMOUNT) 85 | SELECT * 86 | FROM (SELECT w.w_id, w.w_ytd, h.sum_h_amount 87 | FROM bmsql_warehouse w, 88 | (SELECT h_w_id, SUM(h_amount) sum_h_amount FROM bmsql_history GROUP BY h_w_id) h 89 | WHERE w.w_id = h.h_w_id) as x 90 | WHERE w_ytd != sum_h_amount; 91 | 92 | -- Condition 9: D_YTD = sum(H_AMOUNT) 93 | SELECT * 94 | FROM (SELECT d.d_w_id, d.d_id, d.d_ytd, h.sum_h_amount 95 | FROM bmsql_district d, 96 | (SELECT h_w_id, h_d_id, SUM(h_amount) sum_h_amount 97 | FROM bmsql_history 98 | GROUP BY h_w_id, h_d_id) h 99 | WHERE d.d_w_id = h.h_w_id 100 | AND d.d_id = h.h_d_id) as x 101 | WHERE d_ytd != sum_h_amount; 102 | -------------------------------------------------------------------------------- /run/sql.firebird/extraHistID.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra Schema objects/definitions for history.hist_id in Firebird 3 | -- ---- 4 | 5 | -- ---- 6 | -- This is an extra column not present in the TPC-C 7 | -- specs. It is useful for replication systems like 8 | -- Bucardo and Slony-I, which like to have a primary 9 | -- key on a table. It is an auto-increment or serial 10 | -- column type. The definition below is compatible 11 | -- with Firebird, using the sequence in a trigger. 12 | -- ---- 13 | -- Adjust the sequence above the current max(hist_id) 14 | execute block 15 | as 16 | declare max_hist_id integer\; 17 | declare dummy integer\; 18 | begin 19 | max_hist_id = (select max(hist_id) + 1 from bmsql_history)\; 20 | dummy = GEN_ID(bmsql_hist_id_seq, -GEN_ID(bmsql_hist_id_seq, 0))\; 21 | dummy = GEN_ID(bmsql_hist_id_seq, max_hist_id)\; 22 | end; 23 | 24 | -- Create a trigger that forces hist_id to be hist_id_seq.nextval 25 | create trigger bmsql_hist_id_gen for bmsql_history 26 | active before insert 27 | as 28 | begin 29 | if (new.hist_id is null) then 30 | new.hist_id = GEN_ID(bmsql_hist_id_seq, 1)\; 31 | end; 32 | 33 | -- Add a primary key history(hist_id) 34 | -- Firebird lacks the capacity to declare an existing column NOT NULL. 35 | -- In order to not impose overhead due to CHECK constraints or other 36 | -- constructs, we leave the column nullable because the above trigger 37 | -- makes sure it isn't (at least on insert, which is all we ever do). 38 | create unique index bmsql_history_idx1 on bmsql_history (hist_id); 39 | -------------------------------------------------------------------------------- /run/sql.mysql/buildFinish.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra commands to run after the tables are created, loaded, 3 | -- indexes built and extra's created. 4 | -- ---- 5 | 6 | analyze table bmsql_warehouse; 7 | analyze table bmsql_district; 8 | analyze table bmsql_customer; 9 | analyze table bmsql_history; 10 | analyze table bmsql_oorder; 11 | analyze table bmsql_new_order; 12 | analyze table bmsql_order_line; 13 | analyze table bmsql_stock; 14 | analyze table bmsql_item; 15 | analyze table bmsql_config; 16 | -------------------------------------------------------------------------------- /run/sql.mysql/indexCreates.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pingcap/benchmarksql/815fdb24b53c523c2883278f3c5d038c6df915ce/run/sql.mysql/indexCreates.sql -------------------------------------------------------------------------------- /run/sql.mysql/indexDrops.sql: -------------------------------------------------------------------------------- 1 | 2 | alter table bmsql_warehouse drop constraint bmsql_warehouse_pkey; 3 | 4 | alter table bmsql_district drop constraint bmsql_district_pkey; 5 | 6 | alter table bmsql_customer drop constraint bmsql_customer_pkey; 7 | drop index bmsql_customer_idx1; 8 | 9 | -- history table has no primary key 10 | -- commit; 11 | 12 | alter table bmsql_oorder drop constraint bmsql_oorder_pkey; 13 | drop index bmsql_oorder_idx1; 14 | 15 | alter table bmsql_new_order drop constraint bmsql_new_order_pkey; 16 | 17 | alter table bmsql_order_line drop constraint bmsql_order_line_pkey; 18 | 19 | alter table bmsql_stock drop constraint bmsql_stock_pkey; 20 | 21 | alter table bmsql_item drop constraint bmsql_item_pkey; 22 | -------------------------------------------------------------------------------- /run/sql.mysql/tableCreates.sql: -------------------------------------------------------------------------------- 1 | create table bmsql_config ( 2 | cfg_name varchar(30) primary key, 3 | cfg_value varchar(50) 4 | ); 5 | 6 | create table bmsql_warehouse ( 7 | w_id integer not null, 8 | w_ytd decimal(12,2), 9 | w_tax decimal(4,4), 10 | w_name varchar(10), 11 | w_street_1 varchar(20), 12 | w_street_2 varchar(20), 13 | w_city varchar(20), 14 | w_state char(2), 15 | w_zip char(9), 16 | constraint pk_warehouse primary key (w_id) 17 | ); 18 | 19 | create table bmsql_district ( 20 | d_w_id integer not null, 21 | d_id integer not null, 22 | d_ytd decimal(12,2), 23 | d_tax decimal(4,4), 24 | d_next_o_id integer, 25 | d_name varchar(10), 26 | d_street_1 varchar(20), 27 | d_street_2 varchar(20), 28 | d_city varchar(20), 29 | d_state char(2), 30 | d_zip char(9), 31 | constraint pk_district primary key (d_w_id, d_id) 32 | ); 33 | 34 | create table bmsql_customer ( 35 | c_w_id integer not null, 36 | c_d_id integer not null, 37 | c_id integer not null, 38 | c_discount decimal(4,4), 39 | c_credit char(2), 40 | c_last varchar(16), 41 | c_first varchar(16), 42 | c_credit_lim decimal(12,2), 43 | c_balance decimal(12,2), 44 | c_ytd_payment decimal(12,2), 45 | c_payment_cnt integer, 46 | c_delivery_cnt integer, 47 | c_street_1 varchar(20), 48 | c_street_2 varchar(20), 49 | c_city varchar(20), 50 | c_state char(2), 51 | c_zip char(9), 52 | c_phone char(16), 53 | c_since timestamp, 54 | c_middle char(2), 55 | c_data varchar(500), 56 | constraint pk_customer primary key (c_w_id, c_d_id, c_id), 57 | key bmsql_customer_idx1 (c_w_id, c_d_id, c_last, c_first) 58 | ); 59 | 60 | -- create sequence bmsql_hist_id_seq; 61 | 62 | create table bmsql_history ( 63 | hist_id integer not null auto_increment primary key, 64 | h_c_id integer, 65 | h_c_d_id integer, 66 | h_c_w_id integer, 67 | h_d_id integer, 68 | h_w_id integer, 69 | h_date timestamp, 70 | h_amount decimal(6,2), 71 | h_data varchar(24) 72 | ); 73 | 74 | create table bmsql_new_order ( 75 | no_w_id integer not null, 76 | no_d_id integer not null, 77 | no_o_id integer not null, 78 | constraint pk_new_order primary key (no_w_id, no_d_id, no_o_id) 79 | ); 80 | 81 | create table bmsql_oorder ( 82 | o_w_id integer not null, 83 | o_d_id integer not null, 84 | o_id integer not null, 85 | o_c_id integer, 86 | o_carrier_id integer, 87 | o_ol_cnt integer, 88 | o_all_local integer, 89 | o_entry_d timestamp, 90 | constraint pk_oorder primary key (o_w_id, o_d_id, o_id), 91 | constraint bmsql_oorder_idx1 unique key (o_w_id, o_d_id, o_c_id, o_id) 92 | ); 93 | 94 | create table bmsql_order_line ( 95 | ol_w_id integer not null, 96 | ol_d_id integer not null, 97 | ol_o_id integer not null, 98 | ol_number integer not null, 99 | ol_i_id integer not null, 100 | ol_delivery_d timestamp, 101 | ol_amount decimal(6,2), 102 | ol_supply_w_id integer, 103 | ol_quantity integer, 104 | ol_dist_info char(24), 105 | constraint pk_order_line primary key (ol_w_id, ol_d_id, ol_o_id, ol_number) 106 | ); 107 | 108 | create table bmsql_item ( 109 | i_id integer not null, 110 | i_name varchar(24), 111 | i_price decimal(5,2), 112 | i_data varchar(50), 113 | i_im_id integer, 114 | constraint pk_item primary key (i_id) 115 | ); 116 | 117 | create table bmsql_stock ( 118 | s_w_id integer not null, 119 | s_i_id integer not null, 120 | s_quantity integer, 121 | s_ytd integer, 122 | s_order_cnt integer, 123 | s_remote_cnt integer, 124 | s_data varchar(50), 125 | s_dist_01 char(24), 126 | s_dist_02 char(24), 127 | s_dist_03 char(24), 128 | s_dist_04 char(24), 129 | s_dist_05 char(24), 130 | s_dist_06 char(24), 131 | s_dist_07 char(24), 132 | s_dist_08 char(24), 133 | s_dist_09 char(24), 134 | s_dist_10 char(24), 135 | constraint pk_stock primary key (s_w_id, s_i_id) 136 | ); 137 | 138 | -------------------------------------------------------------------------------- /run/sql.mysql/tableCreates_partition.sql: -------------------------------------------------------------------------------- 1 | create table bmsql_config ( 2 | cfg_name varchar(30) primary key, 3 | cfg_value varchar(50) 4 | ); 5 | 6 | create table bmsql_warehouse ( 7 | w_id integer not null, 8 | w_ytd decimal(12,2), 9 | w_tax decimal(4,4), 10 | w_name varchar(10), 11 | w_street_1 varchar(20), 12 | w_street_2 varchar(20), 13 | w_city varchar(20), 14 | w_state char(2), 15 | w_zip char(9), 16 | constraint pk_warehouse primary key (w_id) 17 | ) partition by hash(w_id) partitions 1024; 18 | 19 | create table bmsql_district ( 20 | d_w_id integer not null, 21 | d_id integer not null, 22 | d_ytd decimal(12,2), 23 | d_tax decimal(4,4), 24 | d_next_o_id integer, 25 | d_name varchar(10), 26 | d_street_1 varchar(20), 27 | d_street_2 varchar(20), 28 | d_city varchar(20), 29 | d_state char(2), 30 | d_zip char(9), 31 | constraint pk_district primary key (d_w_id, d_id) 32 | ) partition by hash(d_w_id) partitions 1024; 33 | 34 | create table bmsql_customer ( 35 | c_w_id integer not null, 36 | c_d_id integer not null, 37 | c_id integer not null, 38 | c_discount decimal(4,4), 39 | c_credit char(2), 40 | c_last varchar(16), 41 | c_first varchar(16), 42 | c_credit_lim decimal(12,2), 43 | c_balance decimal(12,2), 44 | c_ytd_payment decimal(12,2), 45 | c_payment_cnt integer, 46 | c_delivery_cnt integer, 47 | c_street_1 varchar(20), 48 | c_street_2 varchar(20), 49 | c_city varchar(20), 50 | c_state char(2), 51 | c_zip char(9), 52 | c_phone char(16), 53 | c_since timestamp, 54 | c_middle char(2), 55 | c_data varchar(500), 56 | constraint pk_customer primary key (c_w_id, c_d_id, c_id), 57 | key bmsql_customer_idx1 (c_w_id, c_d_id, c_last, c_first) 58 | ); 59 | 60 | -- create sequence bmsql_hist_id_seq; 61 | 62 | create table bmsql_history ( 63 | hist_id integer not null auto_increment primary key, 64 | h_c_id integer, 65 | h_c_d_id integer, 66 | h_c_w_id integer, 67 | h_d_id integer, 68 | h_w_id integer, 69 | h_date timestamp, 70 | h_amount decimal(6,2), 71 | h_data varchar(24) 72 | ); 73 | 74 | create table bmsql_new_order ( 75 | no_w_id integer not null, 76 | no_d_id integer not null, 77 | no_o_id integer not null, 78 | constraint pk_new_order primary key (no_w_id, no_d_id, no_o_id) 79 | ) partition by hash(no_w_id) partitions 128; 80 | 81 | create table bmsql_oorder ( 82 | o_w_id integer not null, 83 | o_d_id integer not null, 84 | o_id integer not null, 85 | o_c_id integer, 86 | o_carrier_id integer, 87 | o_ol_cnt integer, 88 | o_all_local integer, 89 | o_entry_d timestamp, 90 | constraint pk_oorder primary key (o_w_id, o_d_id, o_id), 91 | constraint bmsql_oorder_idx1 unique key (o_w_id, o_d_id, o_carrier_id, o_id) 92 | ) partition by hash(o_w_id) partitions 128; 93 | 94 | create table bmsql_order_line ( 95 | ol_w_id integer not null, 96 | ol_d_id integer not null, 97 | ol_o_id integer not null, 98 | ol_number integer not null, 99 | ol_i_id integer not null, 100 | ol_delivery_d timestamp, 101 | ol_amount decimal(6,2), 102 | ol_supply_w_id integer, 103 | ol_quantity integer, 104 | ol_dist_info char(24), 105 | constraint pk_order_line primary key (ol_w_id, ol_d_id, ol_o_id, ol_number) 106 | ) partition by hash(ol_w_id) partitions 1024; 107 | 108 | create table bmsql_item ( 109 | i_id integer not null, 110 | i_name varchar(24), 111 | i_price decimal(5,2), 112 | i_data varchar(50), 113 | i_im_id integer, 114 | constraint pk_item primary key (i_id) 115 | ); 116 | 117 | create table bmsql_stock ( 118 | s_w_id integer not null, 119 | s_i_id integer not null, 120 | s_quantity integer, 121 | s_ytd integer, 122 | s_order_cnt integer, 123 | s_remote_cnt integer, 124 | s_data varchar(50), 125 | s_dist_01 char(24), 126 | s_dist_02 char(24), 127 | s_dist_03 char(24), 128 | s_dist_04 char(24), 129 | s_dist_05 char(24), 130 | s_dist_06 char(24), 131 | s_dist_07 char(24), 132 | s_dist_08 char(24), 133 | s_dist_09 char(24), 134 | s_dist_10 char(24), 135 | constraint pk_stock primary key (s_w_id, s_i_id) 136 | ) partition by hash(s_w_id) partitions 1024; 137 | 138 | -------------------------------------------------------------------------------- /run/sql.mysql/tableDrops.sql: -------------------------------------------------------------------------------- 1 | drop table bmsql_config; 2 | drop table bmsql_new_order; 3 | drop table bmsql_order_line; 4 | drop table bmsql_oorder; 5 | drop table bmsql_history; 6 | drop table bmsql_customer; 7 | drop table bmsql_stock; 8 | drop table bmsql_item; 9 | drop table bmsql_district; 10 | drop table bmsql_warehouse; 11 | drop table bmsql_config; 12 | -------------------------------------------------------------------------------- /run/sql.mysql/tableTruncates.sql: -------------------------------------------------------------------------------- 1 | truncate table bmsql_warehouse; 2 | truncate table bmsql_item; 3 | truncate table bmsql_stock; 4 | truncate table bmsql_district; 5 | truncate table bmsql_customer; 6 | truncate table bmsql_history; 7 | truncate table bmsql_oorder; 8 | truncate table bmsql_order_line; 9 | truncate table bmsql_new_order; 10 | truncate table bmsql_config; 11 | -------------------------------------------------------------------------------- /run/sql.oracle/extraHistID.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra Schema objects/definitions for history.hist_id in Oracle 3 | -- ---- 4 | 5 | -- ---- 6 | -- This is an extra column not present in the TPC-C 7 | -- specs. It is useful for replication systems like 8 | -- Bucardo and Slony-I, which like to have a primary 9 | -- key on a table. It is an auto-increment or serial 10 | -- column type. The definition below is compatible 11 | -- with Oracle 11g, using the sequence in a trigger. 12 | -- ---- 13 | -- Adjust the sequence above the current max(hist_id) 14 | alter sequence bmsql_hist_id_seq increment by 30000; 15 | declare 16 | n integer\; 17 | i integer\; 18 | dummy integer\; 19 | begin 20 | select max(hist_id) into n from bmsql_history\; 21 | i := 0\; 22 | while i <= n loop 23 | select bmsql_hist_id_seq.nextval into dummy from dual\; 24 | i := i + 30000\; 25 | end loop\; 26 | end\; 27 | ; 28 | alter sequence bmsql_hist_id_seq increment by 1; 29 | 30 | -- Create a trigger that forces hist_id to be hist_id_seq.nextval 31 | create trigger bmsql_history_before_insert 32 | before insert on bmsql_history 33 | for each row 34 | begin 35 | if :new.hist_id is null then 36 | select bmsql_hist_id_seq.nextval into :new.hist_id from dual\; 37 | end if\; 38 | end\; 39 | ; 40 | 41 | -- Add a primary key history(hist_id) 42 | alter table bmsql_history add primary key (hist_id); 43 | -------------------------------------------------------------------------------- /run/sql.postgres/buildFinish.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra commands to run after the tables are created, loaded, 3 | -- indexes built and extra's created. 4 | -- PostgreSQL version. 5 | -- ---- 6 | 7 | vacuum analyze; 8 | -------------------------------------------------------------------------------- /run/sql.postgres/extraHistID.sql: -------------------------------------------------------------------------------- 1 | -- ---- 2 | -- Extra Schema objects/definitions for history.hist_id in PostgreSQL 3 | -- ---- 4 | 5 | -- ---- 6 | -- This is an extra column not present in the TPC-C 7 | -- specs. It is useful for replication systems like 8 | -- Bucardo and Slony-I, which like to have a primary 9 | -- key on a table. It is an auto-increment or serial 10 | -- column type. The definition below is compatible 11 | -- with Oracle 11g, using a sequence and a trigger. 12 | -- ---- 13 | -- Adjust the sequence above the current max(hist_id) 14 | select setval('bmsql_hist_id_seq', (select max(hist_id) from bmsql_history)); 15 | 16 | -- Make nextval(seq) the default value of the hist_id column. 17 | alter table bmsql_history 18 | alter column hist_id set default nextval('bmsql_hist_id_seq'); 19 | 20 | -- Add a primary key history(hist_id) 21 | alter table bmsql_history add primary key (hist_id); 22 | -------------------------------------------------------------------------------- /run/sql.postgres/tableCopies.sql: -------------------------------------------------------------------------------- 1 | 2 | copy bmsql_config 3 | (cfg_name, cfg_value) 4 | from '/tmp/csv/bmsql_config.csv' WITH CSV; 5 | 6 | copy bmsql_warehouse 7 | (w_id, w_ytd, w_tax, w_name, w_street_1, w_street_2, w_city, w_state, w_zip) 8 | from '/tmp/csv/bmsql_warehouse.csv' WITH CSV; 9 | 10 | copy bmsql_item 11 | (i_id, i_name, i_price, i_data, i_im_id) 12 | from '/tmp/csv/bmsql_item.csv' WITH CSV; 13 | 14 | copy bmsql_stock 15 | (s_i_id, s_w_id, s_quantity, s_ytd, s_order_cnt, s_remote_cnt, s_data, 16 | s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05, 17 | s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10) 18 | from '/tmp/csv/bmsql_stock.csv' WITH CSV; 19 | 20 | copy bmsql_district 21 | (d_id, d_w_id, d_ytd, d_tax, d_next_o_id, d_name, d_street_1, 22 | d_street_2, d_city, d_state, d_zip) 23 | from '/tmp/csv/bmsql_district.csv' WITH CSV; 24 | 25 | copy bmsql_customer 26 | (c_id, c_d_id, c_w_id, c_discount, c_credit, c_last, c_first, c_credit_lim, 27 | c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_street_1, 28 | c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_middle, c_data) 29 | from '/tmp/csv/bmsql_customer.csv' WITH CSV; 30 | 31 | copy bmsql_history 32 | (hist_id, h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, h_date, h_amount, h_data) 33 | from '/tmp/csv/bmsql_history.csv' WITH CSV; 34 | 35 | copy bmsql_oorder 36 | (o_id, o_w_id, o_d_id, o_c_id, o_carrier_id, o_ol_cnt, o_all_local, o_entry_d) 37 | from '/tmp/csv/bmsql_oorder.csv' WITH CSV NULL AS 'NULL'; 38 | 39 | copy bmsql_order_line 40 | (ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_delivery_d, 41 | ol_amount, ol_supply_w_id, ol_quantity, ol_dist_info) 42 | from '/tmp/csv/bmsql_order_line.csv' WITH CSV NULL AS 'NULL'; 43 | 44 | copy bmsql_new_order 45 | (no_w_id, no_d_id, no_o_id) 46 | from '/tmp/csv/bmsql_new_order.csv' WITH CSV; 47 | -------------------------------------------------------------------------------- /src/LoadData/LoadData.java: -------------------------------------------------------------------------------- 1 | /* 2 | * LoadData - Load Sample Data directly into database tables or into 3 | * CSV files using multiple parallel workers. 4 | * 5 | * Copyright (C) 2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | 10 | import java.sql.*; 11 | import java.util.*; 12 | import java.io.*; 13 | import java.lang.Integer; 14 | 15 | public class LoadData 16 | { 17 | private static Properties ini = new Properties(); 18 | private static String db; 19 | private static Properties dbProps; 20 | private static jTPCCRandom rnd; 21 | private static String fileLocation = null; 22 | private static String csvNullValue = null; 23 | 24 | private static int numWarehouses; 25 | private static int numWorkers; 26 | private static int nextJob = 0; 27 | private static Object nextJobLock = new Object(); 28 | 29 | private static LoadDataWorker[] workers; 30 | private static Thread[] workerThreads; 31 | 32 | private static String[] argv; 33 | 34 | private static boolean writeCSV = false; 35 | private static BufferedWriter configCSV = null; 36 | private static BufferedWriter itemCSV = null; 37 | private static BufferedWriter warehouseCSV = null; 38 | private static BufferedWriter districtCSV = null; 39 | private static BufferedWriter stockCSV = null; 40 | private static BufferedWriter customerCSV = null; 41 | private static BufferedWriter historyCSV = null; 42 | private static BufferedWriter orderCSV = null; 43 | private static BufferedWriter orderLineCSV = null; 44 | private static BufferedWriter newOrderCSV = null; 45 | 46 | public static void main(String[] args) { 47 | int i; 48 | 49 | System.out.println("Starting BenchmarkSQL LoadData"); 50 | System.out.println(""); 51 | 52 | /* 53 | * Load the Benchmark properties file. 54 | */ 55 | try 56 | { 57 | ini.load(new FileInputStream(System.getProperty("prop"))); 58 | } 59 | catch (IOException e) 60 | { 61 | System.err.println("ERROR: " + e.getMessage()); 62 | System.exit(1); 63 | } 64 | argv = args; 65 | 66 | /* 67 | * Initialize the global Random generator that picks the 68 | * C values for the load. 69 | */ 70 | rnd = new jTPCCRandom(); 71 | 72 | /* 73 | * Load the JDBC driver and prepare the db and dbProps. 74 | */ 75 | try { 76 | Class.forName(iniGetString("driver")); 77 | } 78 | catch (Exception e) 79 | { 80 | System.err.println("ERROR: cannot load JDBC driver - " + 81 | e.getMessage()); 82 | System.exit(1); 83 | } 84 | db = iniGetString("conn"); 85 | dbProps = new Properties(); 86 | dbProps.setProperty("user", iniGetString("user")); 87 | dbProps.setProperty("password", iniGetString("password")); 88 | 89 | /* 90 | * Parse other vital information from the props file. 91 | */ 92 | numWarehouses = iniGetInt("warehouses"); 93 | numWorkers = iniGetInt("loadWorkers", 4); 94 | fileLocation = iniGetString("fileLocation"); 95 | csvNullValue = iniGetString("csvNullValue", "NULL"); 96 | 97 | /* 98 | * If CSV files are requested, open them all. 99 | */ 100 | if (fileLocation != null) 101 | { 102 | writeCSV = true; 103 | 104 | try 105 | { 106 | configCSV = new BufferedWriter(new FileWriter(fileLocation + 107 | "bmsql_config.csv")); 108 | itemCSV = new BufferedWriter(new FileWriter(fileLocation + 109 | "bmsql_item.csv")); 110 | warehouseCSV = new BufferedWriter(new FileWriter(fileLocation + 111 | "bmsql_warehouse.csv")); 112 | districtCSV = new BufferedWriter(new FileWriter(fileLocation + 113 | "bmsql_district.csv")); 114 | stockCSV = new BufferedWriter(new FileWriter(fileLocation + 115 | "bmsql_stock.csv")); 116 | customerCSV = new BufferedWriter(new FileWriter(fileLocation + 117 | "bmsql_customer.csv")); 118 | historyCSV = new BufferedWriter(new FileWriter(fileLocation + 119 | "bmsql_history.csv")); 120 | orderCSV = new BufferedWriter(new FileWriter(fileLocation + 121 | "bmsql_oorder.csv")); 122 | orderLineCSV = new BufferedWriter(new FileWriter(fileLocation + 123 | "bmsql_order_line.csv")); 124 | newOrderCSV = new BufferedWriter(new FileWriter(fileLocation + 125 | "bmsql_new_order.csv")); 126 | } 127 | catch (IOException ie) 128 | { 129 | System.err.println(ie.getMessage()); 130 | System.exit(3); 131 | } 132 | } 133 | 134 | System.out.println(""); 135 | 136 | /* 137 | * Create the number of requested workers and start them. 138 | */ 139 | workers = new LoadDataWorker[numWorkers]; 140 | workerThreads = new Thread[numWorkers]; 141 | for (i = 0; i < numWorkers; i++) 142 | { 143 | Connection dbConn; 144 | 145 | try 146 | { 147 | dbConn = DriverManager.getConnection(db, dbProps); 148 | dbConn.setAutoCommit(false); 149 | if (writeCSV) 150 | workers[i] = new LoadDataWorker(i, csvNullValue, 151 | rnd.newRandom()); 152 | else 153 | workers[i] = new LoadDataWorker(i, dbConn, 154 | rnd.newRandom()); 155 | workerThreads[i] = new Thread(workers[i]); 156 | workerThreads[i].start(); 157 | } 158 | catch (SQLException se) 159 | { 160 | System.err.println("ERROR: " + se.getMessage()); 161 | System.exit(3); 162 | return; 163 | } 164 | 165 | } 166 | 167 | for (i = 0; i < numWorkers; i++) 168 | { 169 | try { 170 | workerThreads[i].join(); 171 | } 172 | catch (InterruptedException ie) 173 | { 174 | System.err.println("ERROR: worker " + i + " - " + 175 | ie.getMessage()); 176 | System.exit(4); 177 | } 178 | } 179 | 180 | /* 181 | * Close the CSV files if we are writing them. 182 | */ 183 | if (writeCSV) 184 | { 185 | try 186 | { 187 | configCSV.close(); 188 | itemCSV.close(); 189 | warehouseCSV.close(); 190 | districtCSV.close(); 191 | stockCSV.close(); 192 | customerCSV.close(); 193 | historyCSV.close(); 194 | orderCSV.close(); 195 | orderLineCSV.close(); 196 | newOrderCSV.close(); 197 | } 198 | catch (IOException ie) 199 | { 200 | System.err.println(ie.getMessage()); 201 | System.exit(3); 202 | } 203 | } 204 | } // End of main() 205 | 206 | public static void configAppend(StringBuffer buf) 207 | throws IOException 208 | { 209 | synchronized(configCSV) 210 | { 211 | configCSV.write(buf.toString()); 212 | } 213 | buf.setLength(0); 214 | } 215 | 216 | public static void itemAppend(StringBuffer buf) 217 | throws IOException 218 | { 219 | synchronized(itemCSV) 220 | { 221 | itemCSV.write(buf.toString()); 222 | } 223 | buf.setLength(0); 224 | } 225 | 226 | public static void warehouseAppend(StringBuffer buf) 227 | throws IOException 228 | { 229 | synchronized(warehouseCSV) 230 | { 231 | warehouseCSV.write(buf.toString()); 232 | } 233 | buf.setLength(0); 234 | } 235 | 236 | public static void districtAppend(StringBuffer buf) 237 | throws IOException 238 | { 239 | synchronized(districtCSV) 240 | { 241 | districtCSV.write(buf.toString()); 242 | } 243 | buf.setLength(0); 244 | } 245 | 246 | public static void stockAppend(StringBuffer buf) 247 | throws IOException 248 | { 249 | synchronized(stockCSV) 250 | { 251 | stockCSV.write(buf.toString()); 252 | } 253 | buf.setLength(0); 254 | } 255 | 256 | public static void customerAppend(StringBuffer buf) 257 | throws IOException 258 | { 259 | synchronized(customerCSV) 260 | { 261 | customerCSV.write(buf.toString()); 262 | } 263 | buf.setLength(0); 264 | } 265 | 266 | public static void historyAppend(StringBuffer buf) 267 | throws IOException 268 | { 269 | synchronized(historyCSV) 270 | { 271 | historyCSV.write(buf.toString()); 272 | } 273 | buf.setLength(0); 274 | } 275 | 276 | public static void orderAppend(StringBuffer buf) 277 | throws IOException 278 | { 279 | synchronized(orderCSV) 280 | { 281 | orderCSV.write(buf.toString()); 282 | } 283 | buf.setLength(0); 284 | } 285 | 286 | public static void orderLineAppend(StringBuffer buf) 287 | throws IOException 288 | { 289 | synchronized(orderLineCSV) 290 | { 291 | orderLineCSV.write(buf.toString()); 292 | } 293 | buf.setLength(0); 294 | } 295 | 296 | public static void newOrderAppend(StringBuffer buf) 297 | throws IOException 298 | { 299 | synchronized(newOrderCSV) 300 | { 301 | newOrderCSV.write(buf.toString()); 302 | } 303 | buf.setLength(0); 304 | } 305 | 306 | public static int getNextJob() 307 | { 308 | int job; 309 | 310 | synchronized(nextJobLock) 311 | { 312 | if (nextJob > numWarehouses) 313 | job = -1; 314 | else 315 | job = nextJob++; 316 | } 317 | 318 | return job; 319 | } 320 | 321 | public static int getNumWarehouses() 322 | { 323 | return numWarehouses; 324 | } 325 | 326 | private static String iniGetString(String name) 327 | { 328 | String strVal = null; 329 | 330 | for (int i = 0; i < argv.length - 1; i += 2) 331 | { 332 | if (name.toLowerCase().equals(argv[i].toLowerCase())) 333 | { 334 | strVal = argv[i + 1]; 335 | break; 336 | } 337 | } 338 | 339 | if (strVal == null) 340 | strVal = ini.getProperty(name); 341 | 342 | if (strVal == null) 343 | System.out.println(name + " (not defined)"); 344 | else 345 | if (name.equals("password")) 346 | System.out.println(name + "=***********"); 347 | else 348 | System.out.println(name + "=" + strVal); 349 | return strVal; 350 | } 351 | 352 | private static String iniGetString(String name, String defVal) 353 | { 354 | String strVal = null; 355 | 356 | for (int i = 0; i < argv.length - 1; i += 2) 357 | { 358 | if (name.toLowerCase().equals(argv[i].toLowerCase())) 359 | { 360 | strVal = argv[i + 1]; 361 | break; 362 | } 363 | } 364 | 365 | if (strVal == null) 366 | strVal = ini.getProperty(name); 367 | 368 | if (strVal == null) 369 | { 370 | System.out.println(name + " (not defined - using default '" + 371 | defVal + "')"); 372 | return defVal; 373 | } 374 | else 375 | if (name.equals("password")) 376 | System.out.println(name + "=***********"); 377 | else 378 | System.out.println(name + "=" + strVal); 379 | return strVal; 380 | } 381 | 382 | private static int iniGetInt(String name) 383 | { 384 | String strVal = iniGetString(name); 385 | 386 | if (strVal == null) 387 | return 0; 388 | return Integer.parseInt(strVal); 389 | } 390 | 391 | private static int iniGetInt(String name, int defVal) 392 | { 393 | String strVal = iniGetString(name); 394 | 395 | if (strVal == null) 396 | return defVal; 397 | return Integer.parseInt(strVal); 398 | } 399 | } 400 | -------------------------------------------------------------------------------- /src/LoadData/LoadDataWorker.java: -------------------------------------------------------------------------------- 1 | /* 2 | * LoadDataWorker - Class to load one Warehouse (or in a special case 3 | * the ITEM table). 4 | * 5 | * Copyright (C) 2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | 10 | import java.sql.*; 11 | import java.util.*; 12 | import java.io.*; 13 | 14 | public class LoadDataWorker implements Runnable 15 | { 16 | private int worker; 17 | private Connection dbConn; 18 | private jTPCCRandom rnd; 19 | 20 | private StringBuffer sb; 21 | private Formatter fmt; 22 | 23 | private boolean writeCSV = false; 24 | private String csvNull = null; 25 | 26 | private PreparedStatement stmtConfig = null; 27 | private PreparedStatement stmtItem = null; 28 | private PreparedStatement stmtWarehouse = null; 29 | private PreparedStatement stmtDistrict = null; 30 | private PreparedStatement stmtStock = null; 31 | private PreparedStatement stmtCustomer = null; 32 | private PreparedStatement stmtHistory = null; 33 | private PreparedStatement stmtOrder = null; 34 | private PreparedStatement stmtOrderLine = null; 35 | private PreparedStatement stmtNewOrder = null; 36 | 37 | private StringBuffer sbConfig = null; 38 | private Formatter fmtConfig = null; 39 | private StringBuffer sbItem = null; 40 | private Formatter fmtItem = null; 41 | private StringBuffer sbWarehouse = null; 42 | private Formatter fmtWarehouse = null; 43 | private StringBuffer sbDistrict = null; 44 | private Formatter fmtDistrict = null; 45 | private StringBuffer sbStock = null; 46 | private Formatter fmtStock = null; 47 | private StringBuffer sbCustomer = null; 48 | private Formatter fmtCustomer = null; 49 | private StringBuffer sbHistory = null; 50 | private Formatter fmtHistory = null; 51 | private StringBuffer sbOrder = null; 52 | private Formatter fmtOrder = null; 53 | private StringBuffer sbOrderLine = null; 54 | private Formatter fmtOrderLine = null; 55 | private StringBuffer sbNewOrder = null; 56 | private Formatter fmtNewOrder = null; 57 | 58 | LoadDataWorker(int worker, String csvNull, jTPCCRandom rnd) 59 | { 60 | this.worker = worker; 61 | this.csvNull = csvNull; 62 | this.rnd = rnd; 63 | 64 | this.sb = new StringBuffer(); 65 | this.fmt = new Formatter(sb); 66 | this.writeCSV = true; 67 | 68 | this.sbConfig = new StringBuffer(); 69 | this.fmtConfig = new Formatter(sbConfig); 70 | this.sbItem = new StringBuffer(); 71 | this.fmtItem = new Formatter(sbItem); 72 | this.sbWarehouse = new StringBuffer(); 73 | this.fmtWarehouse = new Formatter(sbWarehouse); 74 | this.sbDistrict = new StringBuffer(); 75 | this.fmtDistrict = new Formatter(sbDistrict); 76 | this.sbStock = new StringBuffer(); 77 | this.fmtStock = new Formatter(sbStock); 78 | this.sbCustomer = new StringBuffer(); 79 | this.fmtCustomer = new Formatter(sbCustomer); 80 | this.sbHistory = new StringBuffer(); 81 | this.fmtHistory = new Formatter(sbHistory); 82 | this.sbOrder = new StringBuffer(); 83 | this.fmtOrder = new Formatter(sbOrder); 84 | this.sbOrderLine = new StringBuffer(); 85 | this.fmtOrderLine = new Formatter(sbOrderLine); 86 | this.sbNewOrder = new StringBuffer(); 87 | this.fmtNewOrder = new Formatter(sbNewOrder); 88 | } 89 | 90 | LoadDataWorker(int worker, Connection dbConn, jTPCCRandom rnd) 91 | throws SQLException 92 | { 93 | this.worker = worker; 94 | this.dbConn = dbConn; 95 | this.rnd = rnd; 96 | 97 | this.sb = new StringBuffer(); 98 | this.fmt = new Formatter(sb); 99 | 100 | stmtConfig = dbConn.prepareStatement( 101 | "INSERT INTO bmsql_config (" + 102 | " cfg_name, cfg_value) " + 103 | "VALUES (?, ?)" 104 | ); 105 | stmtItem = dbConn.prepareStatement( 106 | "INSERT INTO bmsql_item (" + 107 | " i_id, i_name, i_price, i_data, i_im_id) " + 108 | "VALUES (?, ?, ?, ?, ?)" 109 | ); 110 | stmtWarehouse = dbConn.prepareStatement( 111 | "INSERT INTO bmsql_warehouse (" + 112 | " w_id, w_ytd, w_tax, w_name, w_street_1, w_street_2, w_city, " + 113 | " w_state, w_zip) " + 114 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" 115 | ); 116 | stmtStock = dbConn.prepareStatement( 117 | "INSERT INTO bmsql_stock ("+ 118 | " s_w_id, s_i_id, s_quantity, s_ytd, s_order_cnt, s_remote_cnt, s_data, s_dist_01, s_dist_02, " + 119 | " s_dist_03, s_dist_04, s_dist_05, s_dist_06, " + 120 | " s_dist_07, s_dist_08, s_dist_09, s_dist_10) " + 121 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" 122 | ); 123 | stmtDistrict = dbConn.prepareStatement( 124 | "INSERT INTO bmsql_district ("+ 125 | " d_w_id, d_id, d_ytd, d_tax, d_next_o_id, d_name, d_street_1, d_street_2, " + 126 | " d_city, d_state, d_zip) " + 127 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" 128 | ); 129 | stmtCustomer = dbConn.prepareStatement( 130 | "INSERT INTO bmsql_customer (" + 131 | " c_w_id, c_d_id, c_id, c_discount, c_credit, c_last, c_first, c_credit_lim, " + 132 | " c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, " + 133 | " c_street_1, c_street_2, c_city, c_state, c_zip, " + 134 | " c_phone, c_since, c_middle, c_data) " + 135 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " + 136 | " ?, ?, ?, ?, ?, ?)" 137 | ); 138 | stmtHistory = dbConn.prepareStatement( 139 | "INSERT INTO bmsql_history (" + 140 | " hist_id, h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, " + 141 | " h_date, h_amount, h_data) " + 142 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" 143 | ); 144 | stmtOrder = dbConn.prepareStatement( 145 | "INSERT INTO bmsql_oorder (" + 146 | " o_w_id, o_d_id, o_id, o_c_id, " + 147 | " o_carrier_id, o_ol_cnt, o_all_local, o_entry_d) " + 148 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?)" 149 | ); 150 | stmtOrderLine = dbConn.prepareStatement( 151 | "INSERT INTO bmsql_order_line (" + 152 | " ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, " + 153 | " ol_delivery_d, ol_amount, ol_supply_w_id, ol_quantity, " + 154 | " ol_dist_info) " + 155 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" 156 | ); 157 | stmtNewOrder = dbConn.prepareStatement( 158 | "INSERT INTO bmsql_new_order (" + 159 | " no_w_id, no_d_id, no_o_id) " + 160 | "VALUES (?, ?, ?)" 161 | ); 162 | } 163 | 164 | /* 165 | * run() 166 | */ 167 | public void run() 168 | { 169 | int job; 170 | 171 | try 172 | { 173 | while ((job = LoadData.getNextJob()) >= 0) 174 | { 175 | if (job == 0) 176 | { 177 | fmt.format("Worker %03d: Loading ITEM", worker); 178 | System.out.println(sb.toString()); 179 | sb.setLength(0); 180 | 181 | loadItem(); 182 | 183 | fmt.format("Worker %03d: Loading ITEM done", worker); 184 | System.out.println(sb.toString()); 185 | sb.setLength(0); 186 | } 187 | else 188 | { 189 | fmt.format("Worker %03d: Loading Warehouse %6d", 190 | worker, job); 191 | System.out.println(sb.toString()); 192 | sb.setLength(0); 193 | 194 | loadWarehouse(job); 195 | 196 | fmt.format("Worker %03d: Loading Warehouse %6d done", 197 | worker, job); 198 | System.out.println(sb.toString()); 199 | sb.setLength(0); 200 | } 201 | } 202 | 203 | /* 204 | * Close the DB connection if in direct DB mode. 205 | */ 206 | if (!writeCSV) 207 | dbConn.close(); 208 | } 209 | catch (SQLException se) 210 | { 211 | while (se != null) 212 | { 213 | fmt.format("Worker %03d: ERROR: %s", worker, se.getMessage()); 214 | System.err.println(sb.toString()); 215 | sb.setLength(0); 216 | se = se.getNextException(); 217 | } 218 | } 219 | catch (Exception e) 220 | { 221 | fmt.format("Worker %03d: ERROR: %s", worker, e.getMessage()); 222 | System.err.println(sb.toString()); 223 | sb.setLength(0); 224 | e.printStackTrace(); 225 | return; 226 | } 227 | } // End run() 228 | 229 | /* ---- 230 | * loadItem() 231 | * 232 | * Load the content of the ITEM table. 233 | * ---- 234 | */ 235 | private void loadItem() 236 | throws SQLException, IOException 237 | { 238 | int i_id; 239 | 240 | if (writeCSV) 241 | { 242 | /* 243 | * Saving CONFIG information in CSV mode. 244 | */ 245 | fmtConfig.format("warehouses,%d\n", LoadData.getNumWarehouses()); 246 | fmtConfig.format("nURandCLast,%d\n", rnd.getNURandCLast()); 247 | fmtConfig.format("nURandCC_ID,%d\n", rnd.getNURandCC_ID()); 248 | fmtConfig.format("nURandCI_ID,%d\n", rnd.getNURandCI_ID()); 249 | 250 | LoadData.configAppend(sbConfig); 251 | } 252 | else 253 | { 254 | /* 255 | * Saving CONFIG information in DB mode. 256 | */ 257 | stmtConfig.setString(1, "warehouses"); 258 | stmtConfig.setString(2, "" + LoadData.getNumWarehouses()); 259 | stmtConfig.execute(); 260 | 261 | stmtConfig.setString(1, "nURandCLast"); 262 | stmtConfig.setString(2, "" + rnd.getNURandCLast()); 263 | stmtConfig.execute(); 264 | 265 | stmtConfig.setString(1, "nURandCC_ID"); 266 | stmtConfig.setString(2, "" + rnd.getNURandCC_ID()); 267 | stmtConfig.execute(); 268 | 269 | stmtConfig.setString(1, "nURandCI_ID"); 270 | stmtConfig.setString(2, "" + rnd.getNURandCI_ID()); 271 | stmtConfig.execute(); 272 | } 273 | 274 | for (i_id = 1; i_id <= 100000; i_id++) 275 | { 276 | String iData; 277 | 278 | if (i_id != 1 && (i_id - 1) % 100 == 0) 279 | { 280 | if (writeCSV) 281 | { 282 | LoadData.itemAppend(sbItem); 283 | } 284 | else 285 | { 286 | stmtItem.executeBatch(); 287 | stmtItem.clearBatch(); 288 | dbConn.commit(); 289 | } 290 | } 291 | 292 | // Clause 4.3.3.1 for ITEM 293 | if (rnd.nextInt(1, 100) <= 10) 294 | { 295 | int len = rnd.nextInt(26, 50); 296 | int off = rnd.nextInt(0, len - 8); 297 | 298 | iData = rnd.getAString(off, off) + 299 | "ORIGINAL" + 300 | rnd.getAString(len - off - 8, len - off - 8); 301 | } 302 | else 303 | { 304 | iData = rnd.getAString(26, 50); 305 | } 306 | 307 | if (writeCSV) 308 | { 309 | fmtItem.format("%d,%s,%.2f,%s,%d\n", 310 | i_id, 311 | rnd.getAString(14, 24), 312 | ((double)rnd.nextLong(100, 10000)) / 100.0, 313 | iData, 314 | rnd.nextInt(1, 10000)); 315 | 316 | } 317 | else 318 | { 319 | stmtItem.setInt(1, i_id); 320 | stmtItem.setString(2, rnd.getAString(14, 24)); 321 | stmtItem.setDouble(3, ((double)rnd.nextLong(100, 10000)) / 100.0); 322 | stmtItem.setString(4, iData); 323 | stmtItem.setInt(5, rnd.nextInt(1, 10000)); 324 | 325 | stmtItem.addBatch(); 326 | } 327 | } 328 | 329 | if (writeCSV) 330 | { 331 | LoadData.itemAppend(sbItem); 332 | } 333 | else 334 | { 335 | stmtItem.executeBatch(); 336 | stmtItem.clearBatch(); 337 | stmtItem.close(); 338 | 339 | dbConn.commit(); 340 | } 341 | 342 | } // End loadItem() 343 | 344 | /* ---- 345 | * loadWarehouse() 346 | * 347 | * Load the content of one warehouse. 348 | * ---- 349 | */ 350 | private void loadWarehouse(int w_id) 351 | throws SQLException, IOException 352 | { 353 | /* 354 | * Load the WAREHOUSE row. 355 | */ 356 | if (writeCSV) 357 | { 358 | fmtWarehouse.format("%d,%.2f,%.4f,%s,%s,%s,%s,%s,%s\n", 359 | w_id, 360 | 300000.0, 361 | ((double)rnd.nextLong(0, 2000)) / 10000.0, 362 | rnd.getAString(6, 10), 363 | rnd.getAString(10, 20), 364 | rnd.getAString(10, 20), 365 | rnd.getAString(10, 20), 366 | rnd.getState(), 367 | rnd.getNString(4, 4) + "11111"); 368 | 369 | LoadData.warehouseAppend(sbWarehouse); 370 | } 371 | else 372 | { 373 | stmtWarehouse.setInt(1, w_id); 374 | stmtWarehouse.setDouble(2, 300000.0); 375 | stmtWarehouse.setDouble(3, ((double)rnd.nextLong(0, 2000)) / 10000.0); 376 | stmtWarehouse.setString(4, rnd.getAString(6, 10)); 377 | stmtWarehouse.setString(5, rnd.getAString(10, 20)); 378 | stmtWarehouse.setString(6, rnd.getAString(10, 20)); 379 | stmtWarehouse.setString(7, rnd.getAString(10, 20)); 380 | stmtWarehouse.setString(8, rnd.getState()); 381 | stmtWarehouse.setString(9, rnd.getNString(4, 4) + "11111"); 382 | 383 | stmtWarehouse.execute(); 384 | } 385 | 386 | /* 387 | * For each WAREHOUSE there are 100,000 STOCK rows. 388 | */ 389 | for (int s_i_id = 1; s_i_id <= 100000; s_i_id++) 390 | { 391 | String sData; 392 | /* 393 | * Load the data in batches of 500 rows. 394 | */ 395 | if (s_i_id != 1 && (s_i_id - 1) % 500 == 0) 396 | { 397 | if (writeCSV) 398 | LoadData.warehouseAppend(sbWarehouse); 399 | else 400 | { 401 | stmtStock.executeBatch(); 402 | stmtStock.clearBatch(); 403 | dbConn.commit(); 404 | } 405 | } 406 | 407 | // Clause 4.3.3.1 for STOCK 408 | if (rnd.nextInt(1, 100) <= 10) 409 | { 410 | int len = rnd.nextInt(26, 50); 411 | int off = rnd.nextInt(0, len - 8); 412 | 413 | sData = rnd.getAString(off, off) + 414 | "ORIGINAL" + 415 | rnd.getAString(len - off - 8, len - off - 8); 416 | } 417 | else 418 | { 419 | sData = rnd.getAString(26, 50); 420 | } 421 | 422 | if (writeCSV) 423 | { 424 | fmtStock.format("%d,%d,%d,%d,%d,%d,%s," + 425 | "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n", 426 | w_id, 427 | s_i_id, 428 | rnd.nextInt(10, 100), 429 | 0, 430 | 0, 431 | 0, 432 | sData, 433 | rnd.getAString(24, 24), 434 | rnd.getAString(24, 24), 435 | rnd.getAString(24, 24), 436 | rnd.getAString(24, 24), 437 | rnd.getAString(24, 24), 438 | rnd.getAString(24, 24), 439 | rnd.getAString(24, 24), 440 | rnd.getAString(24, 24), 441 | rnd.getAString(24, 24), 442 | rnd.getAString(24, 24)); 443 | } 444 | else 445 | { 446 | stmtStock.setInt(1, w_id); 447 | stmtStock.setInt(2, s_i_id); 448 | stmtStock.setInt(3, rnd.nextInt(10, 100)); 449 | stmtStock.setInt(4, 0); 450 | stmtStock.setInt(5, 0); 451 | stmtStock.setInt(6, 0); 452 | stmtStock.setString(7, sData); 453 | stmtStock.setString(8, rnd.getAString(24, 24)); 454 | stmtStock.setString(9, rnd.getAString(24, 24)); 455 | stmtStock.setString(10, rnd.getAString(24, 24)); 456 | stmtStock.setString(11, rnd.getAString(24, 24)); 457 | stmtStock.setString(12, rnd.getAString(24, 24)); 458 | stmtStock.setString(13, rnd.getAString(24, 24)); 459 | stmtStock.setString(14, rnd.getAString(24, 24)); 460 | stmtStock.setString(15, rnd.getAString(24, 24)); 461 | stmtStock.setString(16, rnd.getAString(24, 24)); 462 | stmtStock.setString(17, rnd.getAString(24, 24)); 463 | 464 | stmtStock.addBatch(); 465 | } 466 | 467 | } 468 | if (writeCSV) 469 | { 470 | LoadData.stockAppend(sbStock); 471 | } 472 | else 473 | { 474 | stmtStock.executeBatch(); 475 | stmtStock.clearBatch(); 476 | dbConn.commit(); 477 | } 478 | 479 | /* 480 | * For each WAREHOUSE there are 10 DISTRICT rows. 481 | */ 482 | for (int d_id = 1; d_id <= 10; d_id++) 483 | { 484 | if (writeCSV) 485 | { 486 | fmtDistrict.format("%d,%d,%.2f,%.4f,%d,%s,%s,%s,%s,%s,%s\n", 487 | w_id, 488 | d_id, 489 | 30000.0, 490 | ((double)rnd.nextLong(0, 2000)) / 10000.0, 491 | 3001, 492 | rnd.getAString(6, 10), 493 | rnd.getAString(10, 20), 494 | rnd.getAString(10, 20), 495 | rnd.getAString(10, 20), 496 | rnd.getState(), 497 | rnd.getNString(4, 4) + "11111"); 498 | 499 | LoadData.districtAppend(sbDistrict); 500 | } 501 | else 502 | { 503 | stmtDistrict.setInt(1, w_id); 504 | stmtDistrict.setInt(2, d_id); 505 | stmtDistrict.setDouble(3, 30000.0); 506 | stmtDistrict.setDouble(4, ((double)rnd.nextLong(0, 2000)) / 10000.0); 507 | stmtDistrict.setInt(5, 3001); 508 | stmtDistrict.setString(6, rnd.getAString(6, 10)); 509 | stmtDistrict.setString(7, rnd.getAString(10, 20)); 510 | stmtDistrict.setString(8, rnd.getAString(10, 20)); 511 | stmtDistrict.setString(9, rnd.getAString(10, 20)); 512 | stmtDistrict.setString(10, rnd.getState()); 513 | stmtDistrict.setString(11, rnd.getNString(4, 4) + "11111"); 514 | 515 | stmtDistrict.execute(); 516 | } 517 | 518 | /* 519 | * Within each DISTRICT there are 3,000 CUSTOMERs. 520 | */ 521 | for (int c_id = 1; c_id <= 3000; c_id++) 522 | { 523 | // commit district and history when 200 records 524 | if (c_id != 1 && (c_id - 1) % 200 == 0) 525 | { 526 | if (writeCSV){ 527 | LoadData.customerAppend(sbCustomer); 528 | LoadData.historyAppend(sbHistory); 529 | } 530 | else 531 | { 532 | stmtCustomer.executeBatch(); 533 | stmtCustomer.clearBatch(); 534 | dbConn.commit(); 535 | 536 | stmtHistory.executeBatch(); 537 | stmtHistory.clearBatch(); 538 | dbConn.commit(); 539 | } 540 | } 541 | 542 | if (writeCSV) 543 | { 544 | fmtCustomer.format("%d,%d,%d,%.4f,%s,%s,%s," + 545 | "%.2f,%.2f,%.2f,%d,%d," + 546 | "%s,%s,%s,%s,%s,%s,%s,%s,%s\n", 547 | w_id, 548 | d_id, 549 | c_id, 550 | ((double)rnd.nextLong(0, 5000)) / 10000.0, 551 | (rnd.nextInt(1, 100) <= 90) ? "GC" : "BC", 552 | (c_id <= 1000) ? rnd.getCLast(c_id - 1) : rnd.getCLast(), 553 | rnd.getAString(8, 16), 554 | 50000.00, 555 | -10.00, 556 | 10.00, 557 | 1, 558 | 1, 559 | rnd.getAString(10, 20), 560 | rnd.getAString(10, 20), 561 | rnd.getAString(10, 20), 562 | rnd.getState(), 563 | rnd.getNString(4, 4) + "11111", 564 | rnd.getNString(16, 16), 565 | new java.sql.Timestamp(System.currentTimeMillis()).toString(), 566 | "OE", 567 | rnd.getAString(300, 500)); 568 | } 569 | else 570 | { 571 | stmtCustomer.setInt(1, w_id); 572 | stmtCustomer.setInt(2, d_id); 573 | stmtCustomer.setInt(3, c_id); 574 | stmtCustomer.setDouble(4, ((double)rnd.nextLong(0, 5000)) / 10000.0); 575 | if (rnd.nextInt(1, 100) <= 90) 576 | stmtCustomer.setString(5, "GC"); 577 | else 578 | stmtCustomer.setString(5, "BC"); 579 | if (c_id <= 1000) 580 | stmtCustomer.setString(6, rnd.getCLast(c_id - 1)); 581 | else 582 | stmtCustomer.setString(6, rnd.getCLast()); 583 | stmtCustomer.setString(7, rnd.getAString(8, 16)); 584 | stmtCustomer.setDouble(8, 50000.00); 585 | stmtCustomer.setDouble(9, -10.00); 586 | stmtCustomer.setDouble(10, 10.00); 587 | stmtCustomer.setInt(11, 1); 588 | stmtCustomer.setInt(12, 1); 589 | stmtCustomer.setString(13, rnd.getAString(10, 20)); 590 | stmtCustomer.setString(14, rnd.getAString(10, 20)); 591 | stmtCustomer.setString(15, rnd.getAString(10, 20)); 592 | stmtCustomer.setString(16, rnd.getState()); 593 | stmtCustomer.setString(17, rnd.getNString(4, 4) + "11111"); 594 | stmtCustomer.setString(18, rnd.getNString(16, 16)); 595 | stmtCustomer.setTimestamp(19, new java.sql.Timestamp(System.currentTimeMillis())); 596 | stmtCustomer.setString(20, "OE"); 597 | stmtCustomer.setString(21, rnd.getAString(300, 500)); 598 | 599 | stmtCustomer.addBatch(); 600 | } 601 | 602 | /* 603 | * For each CUSTOMER there is one row in HISTORY. 604 | */ 605 | if (writeCSV) 606 | { 607 | fmtHistory.format("%d,%d,%d,%d,%d,%d,%s,%.2f,%s\n", 608 | (w_id - 1) * 30000 + (d_id - 1) * 3000 + c_id, 609 | c_id, 610 | d_id, 611 | w_id, 612 | d_id, 613 | w_id, 614 | new java.sql.Timestamp(System.currentTimeMillis()).toString(), 615 | 10.00, 616 | rnd.getAString(12, 24)); 617 | } 618 | else 619 | { 620 | stmtHistory.setInt(1, (w_id - 1) * 30000 + (d_id - 1) * 3000 + c_id); 621 | stmtHistory.setInt(2, c_id); 622 | stmtHistory.setInt(3, d_id); 623 | stmtHistory.setInt(4, w_id); 624 | stmtHistory.setInt(5, d_id); 625 | stmtHistory.setInt(6, w_id); 626 | stmtHistory.setTimestamp(7, new java.sql.Timestamp(System.currentTimeMillis())); 627 | stmtHistory.setDouble(8, 10.00); 628 | stmtHistory.setString(9, rnd.getAString(12, 24)); 629 | 630 | stmtHistory.addBatch(); 631 | } 632 | } 633 | 634 | if (writeCSV) 635 | { 636 | LoadData.customerAppend(sbCustomer); 637 | LoadData.historyAppend(sbHistory); 638 | } 639 | else 640 | { 641 | stmtCustomer.executeBatch(); 642 | stmtCustomer.clearBatch(); 643 | dbConn.commit(); 644 | stmtHistory.executeBatch(); 645 | stmtHistory.clearBatch(); 646 | dbConn.commit(); 647 | } 648 | 649 | /* 650 | * For the ORDER rows the TPC-C specification demands that they 651 | * are generated using a random permutation of all 3,000 652 | * customers. To do that we set up an array with all C_IDs 653 | * and then randomly shuffle it. 654 | */ 655 | int randomCID[] = new int[3000]; 656 | for (int i = 0; i < 3000; i++) 657 | randomCID[i] = i + 1; 658 | for (int i = 0; i < 3000; i++) 659 | { 660 | int x = rnd.nextInt(0, 2999); 661 | int y = rnd.nextInt(0, 2999); 662 | int tmp = randomCID[x]; 663 | randomCID[x] = randomCID[y]; 664 | randomCID[y] = tmp; 665 | } 666 | 667 | for (int o_id = 1; o_id <= 3000; o_id++) 668 | { 669 | int o_ol_cnt = rnd.nextInt(5, 15); 670 | 671 | // commit district and history when 100 records 672 | if (o_id != 1 && (o_id - 1) % 100 == 0) 673 | { 674 | if (writeCSV) 675 | { 676 | LoadData.orderAppend(sbOrder); 677 | LoadData.orderLineAppend(sbOrderLine); 678 | LoadData.newOrderAppend(sbNewOrder); 679 | } 680 | else 681 | { 682 | stmtOrder.executeBatch(); 683 | stmtOrder.clearBatch(); 684 | dbConn.commit(); 685 | 686 | stmtOrderLine.executeBatch(); 687 | stmtOrderLine.clearBatch(); 688 | dbConn.commit(); 689 | 690 | stmtNewOrder.executeBatch(); 691 | stmtNewOrder.clearBatch(); 692 | dbConn.commit(); 693 | } 694 | } 695 | 696 | if (writeCSV) 697 | { 698 | fmtOrder.format("%d,%d,%d,%d,%s,%d,%d,%s\n", 699 | w_id, 700 | d_id, 701 | o_id, 702 | randomCID[o_id - 1], 703 | (o_id < 2101) ? rnd.nextInt(1, 10) : csvNull, 704 | o_ol_cnt, 705 | 1, 706 | new java.sql.Timestamp(System.currentTimeMillis()).toString()); 707 | } 708 | else 709 | { 710 | stmtOrder.setInt(1, w_id); 711 | stmtOrder.setInt(2, d_id); 712 | stmtOrder.setInt(3, o_id); 713 | stmtOrder.setInt(4, randomCID[o_id - 1]); 714 | if (o_id < 2101) 715 | stmtOrder.setInt(5, rnd.nextInt(1, 10)); 716 | else 717 | stmtOrder.setNull(5, java.sql.Types.INTEGER); 718 | stmtOrder.setInt(6, o_ol_cnt); 719 | stmtOrder.setInt(7, 1); 720 | stmtOrder.setTimestamp(8, new java.sql.Timestamp(System.currentTimeMillis())); 721 | 722 | stmtOrder.addBatch(); 723 | } 724 | 725 | /* 726 | * Create the ORDER_LINE rows for this ORDER. 727 | */ 728 | for (int ol_number = 1; ol_number <= o_ol_cnt; ol_number++) 729 | { 730 | long now = System.currentTimeMillis(); 731 | 732 | if (writeCSV) 733 | { 734 | fmtOrderLine.format("%d,%d,%d,%d,%d,%s,%.2f,%d,%d,%s\n", 735 | w_id, 736 | d_id, 737 | o_id, 738 | ol_number, 739 | rnd.nextInt(1, 100000), 740 | (o_id < 2101) ? new java.sql.Timestamp(now).toString() : csvNull, 741 | (o_id < 2101) ? 0.00 : ((double)rnd.nextLong(1, 999999)) / 100.0, 742 | w_id, 743 | 5, 744 | rnd.getAString(24, 24)); 745 | } 746 | else 747 | { 748 | stmtOrderLine.setInt(1, w_id); 749 | stmtOrderLine.setInt(2, d_id); 750 | stmtOrderLine.setInt(3, o_id); 751 | stmtOrderLine.setInt(4, ol_number); 752 | stmtOrderLine.setInt(5, rnd.nextInt(1, 100000)); 753 | if (o_id < 2101) 754 | stmtOrderLine.setTimestamp(6, new java.sql.Timestamp(now)); 755 | else 756 | stmtOrderLine.setNull(6, java.sql.Types.TIMESTAMP); 757 | if (o_id < 2101) 758 | stmtOrderLine.setDouble(7, 0.00); 759 | else 760 | stmtOrderLine.setDouble(7, ((double)rnd.nextLong(1, 999999)) / 100.0); 761 | stmtOrderLine.setInt(8, w_id); 762 | stmtOrderLine.setInt(9, 5); 763 | stmtOrderLine.setString(10, rnd.getAString(24, 24)); 764 | 765 | stmtOrderLine.addBatch(); 766 | } 767 | } 768 | 769 | /* 770 | * The last 900 ORDERs are not yet delieverd and have a 771 | * row in NEW_ORDER. 772 | */ 773 | if (o_id >= 2101) 774 | { 775 | if (writeCSV) 776 | { 777 | fmtNewOrder.format("%d,%d,%d\n", 778 | w_id, 779 | d_id, 780 | o_id); 781 | } 782 | else 783 | { 784 | stmtNewOrder.setInt(1, w_id); 785 | stmtNewOrder.setInt(2, d_id); 786 | stmtNewOrder.setInt(3, o_id); 787 | 788 | stmtNewOrder.addBatch(); 789 | } 790 | } 791 | } 792 | 793 | if (writeCSV) 794 | { 795 | LoadData.orderAppend(sbOrder); 796 | LoadData.orderLineAppend(sbOrderLine); 797 | LoadData.newOrderAppend(sbNewOrder); 798 | } 799 | else 800 | { 801 | stmtOrder.executeBatch(); 802 | stmtOrder.clearBatch(); 803 | dbConn.commit(); 804 | stmtOrderLine.executeBatch(); 805 | stmtOrderLine.clearBatch(); 806 | dbConn.commit(); 807 | stmtNewOrder.executeBatch(); 808 | stmtNewOrder.clearBatch(); 809 | dbConn.commit(); 810 | } 811 | } 812 | 813 | if (!writeCSV) 814 | dbConn.commit(); 815 | } // End loadWarehouse() 816 | } 817 | -------------------------------------------------------------------------------- /src/OSCollector/OSCollector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * OSCollector.java 3 | * 4 | * Copyright (C) 2016, Denis Lussier 5 | * Copyright (C) 2016, Jan Wieck 6 | * 7 | */ 8 | 9 | import org.apache.log4j.*; 10 | 11 | import java.lang.*; 12 | import java.io.*; 13 | import java.util.*; 14 | 15 | public class OSCollector 16 | { 17 | private String script; 18 | private int interval; 19 | private String sshAddress; 20 | private String devices; 21 | private File outputDir; 22 | private Logger log; 23 | 24 | private CollectData collector = null; 25 | private Thread collectorThread = null; 26 | private boolean endCollection = false; 27 | private Process collProc; 28 | 29 | private BufferedWriter resultCSVs[]; 30 | 31 | public OSCollector(String script, int runID, int interval, 32 | String sshAddress, String devices, File outputDir, 33 | Logger log) 34 | { 35 | List cmdLine = new ArrayList(); 36 | String deviceNames[]; 37 | 38 | this.script = script; 39 | this.interval = interval; 40 | this.sshAddress = sshAddress; 41 | this.devices = devices; 42 | this.outputDir = outputDir; 43 | this.log = log; 44 | 45 | if (sshAddress != null) 46 | { 47 | cmdLine.add("ssh"); 48 | // cmdLine.add("-t"); 49 | cmdLine.add(sshAddress); 50 | } 51 | cmdLine.add("python"); 52 | cmdLine.add("-"); 53 | cmdLine.add(Integer.toString(runID)); 54 | cmdLine.add(Integer.toString(interval)); 55 | if (devices != null) 56 | deviceNames = devices.split("[ \t]+"); 57 | else 58 | deviceNames = new String[0]; 59 | 60 | try 61 | { 62 | resultCSVs = new BufferedWriter[deviceNames.length + 1]; 63 | resultCSVs[0] = new BufferedWriter(new FileWriter( 64 | new File(outputDir, "sys_info.csv"))); 65 | for (int i = 0; i < deviceNames.length; i++) 66 | { 67 | cmdLine.add(deviceNames[i]); 68 | resultCSVs[i + 1] = new BufferedWriter(new FileWriter( 69 | new File(outputDir, deviceNames[i] + ".csv"))); 70 | } 71 | } 72 | catch (Exception e) 73 | { 74 | log.error("OSCollector, " + e.getMessage()); 75 | System.exit(1); 76 | } 77 | 78 | try 79 | { 80 | ProcessBuilder pb = new ProcessBuilder(cmdLine); 81 | pb.redirectError(ProcessBuilder.Redirect.INHERIT); 82 | 83 | collProc = pb.start(); 84 | 85 | BufferedReader scriptReader = new BufferedReader(new FileReader(script)); 86 | BufferedWriter scriptWriter = new BufferedWriter( 87 | new OutputStreamWriter(collProc.getOutputStream())); 88 | String line; 89 | while ((line = scriptReader.readLine()) != null) 90 | { 91 | scriptWriter.write(line); 92 | scriptWriter.newLine(); 93 | } 94 | scriptWriter.close(); 95 | scriptReader.close(); 96 | } 97 | catch (Exception e) 98 | { 99 | log.error("OSCollector " + e.getMessage()); 100 | e.printStackTrace(); 101 | System.exit(1); 102 | } 103 | 104 | collector = new CollectData(this); 105 | collectorThread = new Thread(this.collector); 106 | collectorThread.start(); 107 | } 108 | 109 | public void stop() 110 | { 111 | endCollection = true; 112 | try 113 | { 114 | collectorThread.join(); 115 | } 116 | catch (InterruptedException ie) 117 | { 118 | log.error("OSCollector, " + ie.getMessage()); 119 | return; 120 | } 121 | } 122 | 123 | private class CollectData implements Runnable 124 | { 125 | private OSCollector parent; 126 | 127 | public CollectData(OSCollector parent) 128 | { 129 | this.parent = parent; 130 | } 131 | 132 | public void run() 133 | { 134 | BufferedReader osData; 135 | String line; 136 | int resultIdx = 0; 137 | 138 | osData = new BufferedReader(new InputStreamReader( 139 | parent.collProc.getInputStream())); 140 | 141 | while (!endCollection || resultIdx != 0) 142 | { 143 | try 144 | { 145 | line = osData.readLine(); 146 | if (line == null) 147 | { 148 | log.error("OSCollector, unexpected EOF " + 149 | "while reading from external " + 150 | "helper process"); 151 | break; 152 | } 153 | parent.resultCSVs[resultIdx].write(line); 154 | parent.resultCSVs[resultIdx].newLine(); 155 | parent.resultCSVs[resultIdx].flush(); 156 | if (++resultIdx >= parent.resultCSVs.length) 157 | resultIdx = 0; 158 | } 159 | catch (Exception e) 160 | { 161 | log.error("OSCollector, " + e.getMessage()); 162 | break; 163 | } 164 | } 165 | 166 | try 167 | { 168 | osData.close(); 169 | for (int i = 0; i < parent.resultCSVs.length; i++) 170 | parent.resultCSVs[i].close(); 171 | } 172 | catch (Exception e) 173 | { 174 | log.error("OSCollector, " + e.getMessage()); 175 | } 176 | } 177 | } 178 | } 179 | 180 | 181 | -------------------------------------------------------------------------------- /src/client/CommitException.java: -------------------------------------------------------------------------------- 1 | public class CommitException extends RuntimeException { 2 | 3 | private static final long serialVersionUID = 2135244094396431474L; 4 | 5 | @Override 6 | public synchronized Throwable fillInStackTrace() { 7 | return this; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/client/jTPCC.java: -------------------------------------------------------------------------------- 1 | /* 2 | * jTPCC - Open Source Java implementation of a TPC-C like benchmark 3 | * 4 | * Copyright (C) 2003, Raul Barbosa 5 | * Copyright (C) 2004-2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | 10 | import org.apache.log4j.*; 11 | 12 | import java.io.*; 13 | import java.nio.file.*; 14 | import java.sql.*; 15 | import java.util.*; 16 | import java.util.regex.Pattern; 17 | import java.text.*; 18 | 19 | 20 | public class jTPCC implements jTPCCConfig 21 | { 22 | private static org.apache.log4j.Logger log = Logger.getLogger(jTPCC.class); 23 | private static String resultDirName = null; 24 | private static BufferedWriter resultCSV = null; 25 | private static BufferedWriter runInfoCSV = null; 26 | private static int runID = 0; 27 | 28 | private int dbType = DB_UNKNOWN; 29 | private int currentlyDisplayedTerminal; 30 | 31 | private jTPCCTerminal[] terminals; 32 | private String[] terminalNames; 33 | private boolean terminalsBlockingExit = false; 34 | private long terminalsStarted = 0, sessionCount = 0, transactionCount = 0; 35 | private Object counterLock = new Object(); 36 | 37 | private long newOrderCounter = 0, sessionStartTimestamp, sessionEndTimestamp, sessionNextTimestamp=0, sessionNextKounter=0; 38 | private long sessionEndTargetTime = -1, fastNewOrderCounter, recentTpmC=0, recentTpmTotal=0; 39 | private boolean signalTerminalsRequestEndSent = false, databaseDriverLoaded = false; 40 | 41 | private FileOutputStream fileOutputStream; 42 | private PrintStream printStreamReport; 43 | private String sessionStart, sessionEnd; 44 | private int limPerMin_Terminal; 45 | 46 | private double tpmC; 47 | private jTPCCRandom rnd; 48 | private OSCollector osCollector = null; 49 | private HashMap costPerWorkerload; 50 | 51 | public static void main(String args[]) 52 | { 53 | PropertyConfigurator.configure("log4j.properties"); 54 | new jTPCC(); 55 | } 56 | 57 | private String getProp (Properties p, String pName) 58 | { 59 | String prop = p.getProperty(pName); 60 | costPerWorkerload = new HashMap(); 61 | log.info("Term-00, " + pName + "=" + prop); 62 | return(prop); 63 | } 64 | 65 | public jTPCC() 66 | { 67 | 68 | // load the ini file 69 | Properties ini = new Properties(); 70 | try { 71 | ini.load( new FileInputStream(System.getProperty("prop"))); 72 | } catch (IOException e) { 73 | errorMessage("Term-00, could not load properties file"); 74 | } 75 | 76 | log.info("Term-00, "); 77 | log.info("Term-00, +-------------------------------------------------------------+"); 78 | log.info("Term-00, BenchmarkSQL v" + JTPCCVERSION); 79 | log.info("Term-00, +-------------------------------------------------------------+"); 80 | log.info("Term-00, (c) 2003, Raul Barbosa"); 81 | log.info("Term-00, (c) 2004-2016, Denis Lussier"); 82 | log.info("Term-00, (c) 2016, Jan Wieck"); 83 | log.info("Term-00, +-------------------------------------------------------------+"); 84 | log.info("Term-00, "); 85 | String iDB = getProp(ini,"db"); 86 | String iDriver = getProp(ini,"driver"); 87 | String iConn = getProp(ini,"conn"); 88 | String iUser = getProp(ini,"user"); 89 | String iPassword = ini.getProperty("password"); 90 | 91 | log.info("Term-00, "); 92 | String iWarehouses = getProp(ini,"warehouses"); 93 | String iTerminals = getProp(ini,"terminals"); 94 | 95 | String iRunTxnsPerTerminal = ini.getProperty("runTxnsPerTerminal"); 96 | String iRunMins = ini.getProperty("runMins"); 97 | if (Integer.parseInt(iRunTxnsPerTerminal) ==0 && Integer.parseInt(iRunMins)!=0){ 98 | log.info("Term-00, runMins" + "=" + iRunMins); 99 | }else if(Integer.parseInt(iRunTxnsPerTerminal) !=0 && Integer.parseInt(iRunMins)==0){ 100 | log.info("Term-00, runTxnsPerTerminal" + "=" + iRunTxnsPerTerminal); 101 | }else{ 102 | errorMessage("Term-00, Must indicate either transactions per terminal or number of run minutes!"); 103 | }; 104 | String limPerMin = getProp(ini,"limitTxnsPerMin"); 105 | String iTermWhseFixed = getProp(ini,"terminalWarehouseFixed"); 106 | log.info("Term-00, "); 107 | String iNewOrderWeight = getProp(ini,"newOrderWeight"); 108 | String iPaymentWeight = getProp(ini,"paymentWeight"); 109 | String iOrderStatusWeight = getProp(ini,"orderStatusWeight"); 110 | String iDeliveryWeight = getProp(ini,"deliveryWeight"); 111 | String iStockLevelWeight = getProp(ini,"stockLevelWeight"); 112 | 113 | log.info("Term-00, "); 114 | String resultDirectory = getProp(ini, "resultDirectory"); 115 | String osCollectorScript = getProp(ini, "osCollectorScript"); 116 | 117 | log.info("Term-00, "); 118 | 119 | if (iDB.equals("firebird")) 120 | dbType = DB_FIREBIRD; 121 | else if (iDB.equals("oracle")) 122 | dbType = DB_ORACLE; 123 | else if (iDB.equals("postgres")) 124 | dbType = DB_POSTGRES; 125 | else if (iDB.equals("mysql")) 126 | dbType = DB_MYSQL; 127 | else 128 | { 129 | log.error("unknown database type '" + iDB + "'"); 130 | return; 131 | } 132 | 133 | if(Integer.parseInt(limPerMin) !=0){ 134 | limPerMin_Terminal = Integer.parseInt(limPerMin)/Integer.parseInt(iTerminals); 135 | } 136 | else{ 137 | limPerMin_Terminal = -1; 138 | } 139 | 140 | 141 | boolean iRunMinsBool=false; 142 | 143 | try 144 | { 145 | String driver = iDriver; 146 | printMessage("Loading database driver: \'" + driver + "\'..."); 147 | Class.forName(iDriver); 148 | databaseDriverLoaded = true; 149 | } 150 | catch(Exception ex) 151 | { 152 | errorMessage("Unable to load the database driver!"); 153 | databaseDriverLoaded = false; 154 | } 155 | 156 | if (databaseDriverLoaded && resultDirectory != null) 157 | { 158 | StringBuffer sb = new StringBuffer(); 159 | Formatter fmt = new Formatter(sb); 160 | Pattern p = Pattern.compile("%t"); 161 | Calendar cal = Calendar.getInstance(); 162 | 163 | String iRunID; 164 | 165 | iRunID = System.getProperty("runID"); 166 | if (iRunID != null) 167 | { 168 | runID = Integer.parseInt(iRunID); 169 | } 170 | 171 | /* 172 | * Split the resultDirectory into strings around 173 | * patterns of %t and then insert date/time formatting 174 | * based on the current time. That way the resultDirectory 175 | * in the properties file can have date/time format 176 | * elements like in result_%tY-%tm-%td to embed the current 177 | * date in the directory name. 178 | */ 179 | String[] parts = p.split(resultDirectory, -1); 180 | sb.append(parts[0]); 181 | for (int i = 1; i < parts.length; i++) 182 | { 183 | fmt.format("%t" + parts[i].substring(0, 1), cal); 184 | sb.append(parts[i].substring(1)); 185 | } 186 | resultDirName = sb.toString(); 187 | File resultDir = new File(resultDirName); 188 | File resultDataDir = new File(resultDir, "data"); 189 | 190 | // Create the output directory structure. 191 | if (!resultDir.mkdir()) 192 | { 193 | log.error("Failed to create directory '" + 194 | resultDir.getPath() + "'"); 195 | System.exit(1); 196 | } 197 | if (!resultDataDir.mkdir()) 198 | { 199 | log.error("Failed to create directory '" + 200 | resultDataDir.getPath() + "'"); 201 | System.exit(1); 202 | } 203 | 204 | // Copy the used properties file into the resultDirectory. 205 | try 206 | { 207 | Files.copy(new File(System.getProperty("prop")).toPath(), 208 | new File(resultDir, "run.properties").toPath()); 209 | } 210 | catch (IOException e) 211 | { 212 | log.error(e.getMessage()); 213 | System.exit(1); 214 | } 215 | log.info("Term-00, copied " + System.getProperty("prop") + 216 | " to " + new File(resultDir, "run.properties").toPath()); 217 | 218 | // Create the runInfo.csv file. 219 | String runInfoCSVName = new File(resultDataDir, "runInfo.csv").getPath(); 220 | try 221 | { 222 | runInfoCSV = new BufferedWriter( 223 | new FileWriter(runInfoCSVName)); 224 | runInfoCSV.write("run,driver,driverVersion,db,sessionStart," + 225 | "runMins," + 226 | "loadWarehouses,runWarehouses,numSUTThreads," + 227 | "limitTxnsPerMin," + 228 | "thinkTimeMultiplier,keyingTimeMultiplier\n"); 229 | } 230 | catch (IOException e) 231 | { 232 | log.error(e.getMessage()); 233 | System.exit(1); 234 | } 235 | log.info("Term-00, created " + runInfoCSVName + " for runID " + 236 | runID); 237 | 238 | // Open the per transaction result.csv file. 239 | String resultCSVName = new File(resultDataDir, "result.csv").getPath(); 240 | try 241 | { 242 | resultCSV = new BufferedWriter(new FileWriter(resultCSVName)); 243 | resultCSV.write("run,elapsed,latency,dblatency," + 244 | "ttype,rbk,dskipped,error\n"); 245 | } 246 | catch (IOException e) 247 | { 248 | log.error(e.getMessage()); 249 | System.exit(1); 250 | } 251 | log.info("Term-00, writing per transaction results to " + 252 | resultCSVName); 253 | 254 | if (osCollectorScript != null) 255 | { 256 | osCollector = new OSCollector(getProp(ini, "osCollectorScript"), 257 | runID, 258 | Integer.parseInt(getProp(ini, "osCollectorInterval")), 259 | getProp(ini, "osCollectorSSHAddr"), 260 | getProp(ini, "osCollectorDevices"), 261 | resultDataDir, log); 262 | } 263 | 264 | log.info("Term-00,"); 265 | } 266 | 267 | if(databaseDriverLoaded) 268 | { 269 | try 270 | { 271 | boolean limitIsTime = iRunMinsBool; 272 | int numTerminals = -1; 273 | int transactionsPerTerminal = -1; 274 | int numWarehouses = -1; 275 | int loadWarehouses = -1; 276 | int newOrderWeightValue = -1, paymentWeightValue = -1, orderStatusWeightValue = -1, deliveryWeightValue = -1, stockLevelWeightValue = -1; 277 | long executionTimeMillis = -1; 278 | boolean terminalWarehouseFixed = true; 279 | long CLoad; 280 | 281 | Properties dbProps = new Properties(); 282 | dbProps.setProperty("user", iUser); 283 | dbProps.setProperty("password", iPassword); 284 | 285 | /* 286 | * Fine tuning of database conneciton parameters if needed. 287 | */ 288 | switch (dbType) 289 | { 290 | case DB_FIREBIRD: 291 | /* 292 | * Firebird needs no_rec_version for our load 293 | * to work. Even with that some "deadlocks" 294 | * occur. Note that the message "deadlock" in 295 | * Firebird can mean something completely different, 296 | * namely that there was a conflicting write to 297 | * a row that could not be resolved. 298 | */ 299 | dbProps.setProperty("TRANSACTION_READ_COMMITTED", 300 | "isc_tpb_read_committed," + 301 | "isc_tpb_no_rec_version," + 302 | "isc_tpb_write," + 303 | "isc_tpb_wait"); 304 | break; 305 | 306 | default: 307 | break; 308 | } 309 | 310 | try { 311 | loadWarehouses = Integer.parseInt(jTPCCUtil.getConfig(iConn, 312 | dbProps, "warehouses")); 313 | CLoad = Long.parseLong(jTPCCUtil.getConfig(iConn, 314 | dbProps, "nURandCLast")); 315 | } catch (Exception e) { 316 | errorMessage(e.getMessage()); 317 | throw e; 318 | } 319 | this.rnd = new jTPCCRandom(CLoad); 320 | log.info("Term-00, C value for C_LAST during load: " + CLoad); 321 | log.info("Term-00, C value for C_LAST this run: " + rnd.getNURandCLast()); 322 | log.info("Term-00, "); 323 | 324 | fastNewOrderCounter = 0; 325 | updateStatusLine(); 326 | 327 | try 328 | { 329 | if (Integer.parseInt(iRunMins) != 0 && Integer.parseInt(iRunTxnsPerTerminal) ==0) 330 | { 331 | iRunMinsBool = true; 332 | } 333 | else if (Integer.parseInt(iRunMins) == 0 && Integer.parseInt(iRunTxnsPerTerminal) !=0) 334 | { 335 | iRunMinsBool = false; 336 | } 337 | else 338 | { 339 | throw new NumberFormatException(); 340 | } 341 | } 342 | catch(NumberFormatException e1) 343 | { 344 | errorMessage("Must indicate either transactions per terminal or number of run minutes!"); 345 | throw new Exception(); 346 | } 347 | 348 | try 349 | { 350 | numWarehouses = Integer.parseInt(iWarehouses); 351 | if(numWarehouses <= 0) 352 | throw new NumberFormatException(); 353 | } 354 | catch(NumberFormatException e1) 355 | { 356 | errorMessage("Invalid number of warehouses!"); 357 | throw new Exception(); 358 | } 359 | if(numWarehouses > loadWarehouses) 360 | { 361 | errorMessage("numWarehouses cannot be greater " + 362 | "than the warehouses loaded in the database"); 363 | throw new Exception(); 364 | } 365 | 366 | try 367 | { 368 | numTerminals = Integer.parseInt(iTerminals); 369 | if(numTerminals <= 0 || numTerminals > 10*numWarehouses) 370 | throw new NumberFormatException(); 371 | } 372 | catch(NumberFormatException e1) 373 | { 374 | errorMessage("Invalid number of terminals!"); 375 | throw new Exception(); 376 | } 377 | 378 | 379 | 380 | if(Long.parseLong(iRunMins) != 0 && Integer.parseInt(iRunTxnsPerTerminal) == 0) 381 | { 382 | try 383 | { 384 | executionTimeMillis = Long.parseLong(iRunMins) * 60000; 385 | if(executionTimeMillis <= 0) 386 | throw new NumberFormatException(); 387 | } 388 | catch(NumberFormatException e1) 389 | { 390 | errorMessage("Invalid number of minutes!"); 391 | throw new Exception(); 392 | } 393 | } 394 | else 395 | { 396 | try 397 | { 398 | transactionsPerTerminal = Integer.parseInt(iRunTxnsPerTerminal); 399 | if(transactionsPerTerminal <= 0) 400 | throw new NumberFormatException(); 401 | } 402 | catch(NumberFormatException e1) 403 | { 404 | errorMessage("Invalid number of transactions per terminal!"); 405 | throw new Exception(); 406 | } 407 | } 408 | 409 | terminalWarehouseFixed = Boolean.parseBoolean(iTermWhseFixed); 410 | 411 | try 412 | { 413 | newOrderWeightValue = Integer.parseInt(iNewOrderWeight); 414 | paymentWeightValue = Integer.parseInt(iPaymentWeight); 415 | orderStatusWeightValue = Integer.parseInt(iOrderStatusWeight); 416 | deliveryWeightValue = Integer.parseInt(iDeliveryWeight); 417 | stockLevelWeightValue = Integer.parseInt(iStockLevelWeight); 418 | 419 | if(newOrderWeightValue < 0 ||paymentWeightValue < 0 || orderStatusWeightValue < 0 || deliveryWeightValue < 0 || stockLevelWeightValue < 0) 420 | throw new NumberFormatException(); 421 | else if(newOrderWeightValue == 0 && paymentWeightValue == 0 && orderStatusWeightValue == 0 && deliveryWeightValue == 0 && stockLevelWeightValue == 0) 422 | throw new NumberFormatException(); 423 | } 424 | catch(NumberFormatException e1) 425 | { 426 | errorMessage("Invalid number in mix percentage!"); 427 | throw new Exception(); 428 | } 429 | 430 | if(newOrderWeightValue + paymentWeightValue + orderStatusWeightValue + deliveryWeightValue + stockLevelWeightValue > 100) 431 | { 432 | errorMessage("Sum of mix percentage parameters exceeds 100%!"); 433 | throw new Exception(); 434 | } 435 | 436 | newOrderCounter = 0; 437 | printMessage("Session started!"); 438 | if(!limitIsTime) 439 | printMessage("Creating " + numTerminals + " terminal(s) with " + transactionsPerTerminal + " transaction(s) per terminal..."); 440 | else 441 | printMessage("Creating " + numTerminals + " terminal(s) with " + (executionTimeMillis/60000) + " minute(s) of execution..."); 442 | if (terminalWarehouseFixed) 443 | printMessage("Terminal Warehouse is fixed"); 444 | else 445 | printMessage("Terminal Warehouse is NOT fixed"); 446 | printMessage("Transaction Weights: " + newOrderWeightValue + "% New-Order, " + paymentWeightValue + "% Payment, " + orderStatusWeightValue + "% Order-Status, " + deliveryWeightValue + "% Delivery, " + stockLevelWeightValue + "% Stock-Level"); 447 | 448 | printMessage("Number of Terminals\t" + numTerminals); 449 | 450 | terminals = new jTPCCTerminal[numTerminals]; 451 | terminalNames = new String[numTerminals]; 452 | terminalsStarted = numTerminals; 453 | try 454 | { 455 | String database = iConn; 456 | String username = iUser; 457 | String password = iPassword; 458 | 459 | int[][] usedTerminals = new int[numWarehouses][10]; 460 | for(int i = 0; i < numWarehouses; i++) 461 | for(int j = 0; j < 10; j++) 462 | usedTerminals[i][j] = 0; 463 | 464 | for(int i = 0; i < numTerminals; i++) 465 | { 466 | int terminalWarehouseID; 467 | int terminalDistrictID; 468 | do 469 | { 470 | terminalWarehouseID = rnd.nextInt(1, numWarehouses); 471 | terminalDistrictID = rnd.nextInt(1, 10); 472 | } 473 | while(usedTerminals[terminalWarehouseID-1][terminalDistrictID-1] == 1); 474 | usedTerminals[terminalWarehouseID-1][terminalDistrictID-1] = 1; 475 | 476 | String terminalName = "Term-" + (i>=9 ? ""+(i+1) : "0"+(i+1)); 477 | Connection conn = null; 478 | printMessage("Creating database connection for " + terminalName + "..."); 479 | conn = DriverManager.getConnection(database, dbProps); 480 | conn.setAutoCommit(false); 481 | 482 | jTPCCTerminal terminal = new jTPCCTerminal 483 | (terminalName, terminalWarehouseID, terminalDistrictID, 484 | conn, dbType, 485 | transactionsPerTerminal, terminalWarehouseFixed, 486 | paymentWeightValue, orderStatusWeightValue, 487 | deliveryWeightValue, stockLevelWeightValue, numWarehouses, limPerMin_Terminal, this); 488 | 489 | terminals[i] = terminal; 490 | terminalNames[i] = terminalName; 491 | printMessage(terminalName + "\t" + terminalWarehouseID); 492 | } 493 | 494 | sessionEndTargetTime = executionTimeMillis; 495 | signalTerminalsRequestEndSent = false; 496 | 497 | 498 | printMessage("Transaction\tWeight"); 499 | printMessage("% New-Order\t" + newOrderWeightValue); 500 | printMessage("% Payment\t" + paymentWeightValue); 501 | printMessage("% Order-Status\t" + orderStatusWeightValue); 502 | printMessage("% Delivery\t" + deliveryWeightValue); 503 | printMessage("% Stock-Level\t" + stockLevelWeightValue); 504 | 505 | printMessage("Transaction Number\tTerminal\tType\tExecution Time (ms)\t\tComment"); 506 | 507 | printMessage("Created " + numTerminals + " terminal(s) successfully!"); 508 | boolean dummvar = true; 509 | 510 | 511 | 512 | // Create Terminals, Start Transactions 513 | sessionStart = getCurrentTime(); 514 | sessionStartTimestamp = System.currentTimeMillis(); 515 | sessionNextTimestamp = sessionStartTimestamp; 516 | if(sessionEndTargetTime != -1) 517 | sessionEndTargetTime += sessionStartTimestamp; 518 | 519 | // Record run parameters in runInfo.csv 520 | if (runInfoCSV != null) 521 | { 522 | try 523 | { 524 | StringBuffer infoSB = new StringBuffer(); 525 | Formatter infoFmt = new Formatter(infoSB); 526 | infoFmt.format("%d,simple,%s,%s,%s,%s,%d,%d,%d,%d,1.0,1.0\n", 527 | runID, JTPCCVERSION, iDB, 528 | new java.sql.Timestamp(sessionStartTimestamp).toString(), 529 | iRunMins, 530 | loadWarehouses, 531 | numWarehouses, 532 | numTerminals, 533 | Integer.parseInt(limPerMin)); 534 | runInfoCSV.write(infoSB.toString()); 535 | runInfoCSV.close(); 536 | } 537 | catch (Exception e) 538 | { 539 | log.error(e.getMessage()); 540 | System.exit(1); 541 | } 542 | } 543 | 544 | synchronized(terminals) 545 | { 546 | printMessage("Starting all terminals..."); 547 | transactionCount = 1; 548 | for(int i = 0; i < terminals.length; i++) 549 | (new Thread(terminals[i])).start(); 550 | 551 | } 552 | 553 | printMessage("All terminals started executing " + sessionStart); 554 | } 555 | 556 | catch(Exception e1) 557 | { 558 | errorMessage("This session ended with errors!"); 559 | printStreamReport.close(); 560 | fileOutputStream.close(); 561 | 562 | throw new Exception(); 563 | } 564 | 565 | } 566 | catch(Exception ex) 567 | { 568 | } 569 | } 570 | updateStatusLine(); 571 | } 572 | 573 | private void signalTerminalsRequestEnd(boolean timeTriggered) 574 | { 575 | synchronized(terminals) 576 | { 577 | if(!signalTerminalsRequestEndSent) 578 | { 579 | if(timeTriggered) 580 | printMessage("The time limit has been reached."); 581 | printMessage("Signalling all terminals to stop..."); 582 | signalTerminalsRequestEndSent = true; 583 | 584 | for(int i = 0; i < terminals.length; i++) 585 | if(terminals[i] != null) 586 | terminals[i].stopRunningWhenPossible(); 587 | 588 | printMessage("Waiting for all active transactions to end..."); 589 | } 590 | } 591 | } 592 | 593 | public void signalTerminalEnded(jTPCCTerminal terminal, long countNewOrdersExecuted) 594 | { 595 | synchronized(terminals) 596 | { 597 | boolean found = false; 598 | terminalsStarted--; 599 | for(int i = 0; i < terminals.length && !found; i++) 600 | { 601 | if(terminals[i] == terminal) 602 | { 603 | terminals[i] = null; 604 | terminalNames[i] = "(" + terminalNames[i] + ")"; 605 | newOrderCounter += countNewOrdersExecuted; 606 | found = true; 607 | } 608 | } 609 | } 610 | 611 | if(terminalsStarted == 0) 612 | { 613 | sessionEnd = getCurrentTime(); 614 | sessionEndTimestamp = System.currentTimeMillis(); 615 | sessionEndTargetTime = -1; 616 | printMessage("All terminals finished executing " + sessionEnd); 617 | endReport(); 618 | terminalsBlockingExit = false; 619 | printMessage("Session finished!"); 620 | 621 | // If we opened a per transaction result file, close it. 622 | if (resultCSV != null) 623 | { 624 | try { 625 | resultCSV.close(); 626 | } catch (IOException e) { 627 | log.error(e.getMessage()); 628 | }; 629 | } 630 | 631 | // Stop the OSCollector, if it is active. 632 | if (osCollector != null) 633 | { 634 | osCollector.stop(); 635 | osCollector = null; 636 | } 637 | } 638 | } 639 | 640 | public void signalTerminalEndedTransaction(String terminalName, String transactionType, long executionTime, String comment, int newOrder) 641 | { 642 | synchronized (counterLock) 643 | { 644 | transactionCount++; 645 | fastNewOrderCounter += newOrder; 646 | Long counter = costPerWorkerload.get(transactionType); 647 | if (counter == null) { 648 | costPerWorkerload.put(transactionType, Long.valueOf(executionTime)); 649 | } else { 650 | costPerWorkerload.put(transactionType, counter + executionTime); 651 | } 652 | } 653 | 654 | if(sessionEndTargetTime != -1 && System.currentTimeMillis() > sessionEndTargetTime) 655 | { 656 | signalTerminalsRequestEnd(true); 657 | } 658 | 659 | updateStatusLine(); 660 | 661 | } 662 | 663 | public jTPCCRandom getRnd() 664 | { 665 | return rnd; 666 | } 667 | 668 | public void resultAppend(jTPCCTData term) 669 | { 670 | if (resultCSV != null) 671 | { 672 | try 673 | { 674 | resultCSV.write(runID + "," + 675 | term.resultLine(sessionStartTimestamp)); 676 | } 677 | catch (IOException e) 678 | { 679 | log.error("Term-00, " + e.getMessage()); 680 | } 681 | } 682 | } 683 | 684 | private void endReport() 685 | { 686 | long currTimeMillis = System.currentTimeMillis(); 687 | long freeMem = Runtime.getRuntime().freeMemory() / (1024*1024); 688 | long totalMem = Runtime.getRuntime().totalMemory() / (1024*1024); 689 | double tpmC = (6000000*fastNewOrderCounter/(currTimeMillis - sessionStartTimestamp))/100.0; 690 | double tpmTotal = (6000000*transactionCount/(currTimeMillis - sessionStartTimestamp))/100.0; 691 | 692 | System.out.println(""); 693 | log.info("Term-00, "); 694 | log.info("Term-00, "); 695 | log.info("Term-00, Measured tpmC (NewOrders) = " + tpmC); 696 | log.info("Term-00, Measured tpmTOTAL = " + tpmTotal); 697 | log.info("Term-00, Session Start = " + sessionStart ); 698 | log.info("Term-00, Session End = " + sessionEnd); 699 | log.info("Term-00, Transaction Count = " + (transactionCount-1)); 700 | for (String key : costPerWorkerload.keySet()) { 701 | Long value = costPerWorkerload.get(key); 702 | log.info("executeTime[" + key + "]=" + value.toString()); 703 | } 704 | } 705 | 706 | private void printMessage(String message) 707 | { 708 | log.trace("Term-00, " + message); 709 | } 710 | 711 | private void errorMessage(String message) 712 | { 713 | log.error("Term-00, "+ message); 714 | } 715 | 716 | private void exit() 717 | { 718 | System.exit(0); 719 | } 720 | 721 | private String getCurrentTime() 722 | { 723 | return dateFormat.format(new java.util.Date()); 724 | } 725 | 726 | private String getFileNameSuffix() 727 | { 728 | SimpleDateFormat dateFormat = new SimpleDateFormat("yyyyMMddHHmmss"); 729 | return dateFormat.format(new java.util.Date()); 730 | } 731 | 732 | synchronized private void updateStatusLine() 733 | { 734 | long currTimeMillis = System.currentTimeMillis(); 735 | 736 | if(currTimeMillis > sessionNextTimestamp) 737 | { 738 | StringBuilder informativeText = new StringBuilder(""); 739 | Formatter fmt = new Formatter(informativeText); 740 | double tpmC = (6000000*fastNewOrderCounter/(currTimeMillis - sessionStartTimestamp))/100.0; 741 | double tpmTotal = (6000000*transactionCount/(currTimeMillis - sessionStartTimestamp))/100.0; 742 | 743 | sessionNextTimestamp += 1000; /* update this every seconds */ 744 | 745 | fmt.format("Term-00, Running Average tpmTOTAL: %.2f", tpmTotal); 746 | 747 | /* XXX What is the meaning of these numbers? */ 748 | recentTpmC = (fastNewOrderCounter - sessionNextKounter) * 12; 749 | recentTpmTotal= (transactionCount-sessionNextKounter)*12; 750 | sessionNextKounter = fastNewOrderCounter; 751 | fmt.format(" Current tpmTOTAL: %d", recentTpmTotal); 752 | 753 | long freeMem = Runtime.getRuntime().freeMemory() / (1024*1024); 754 | long totalMem = Runtime.getRuntime().totalMemory() / (1024*1024); 755 | fmt.format(" Memory Usage: %dMB / %dMB ", (totalMem - freeMem), totalMem); 756 | 757 | System.out.print(informativeText); 758 | for (int count = 0; count < 1+informativeText.length(); count++) 759 | System.out.print("\b"); 760 | } 761 | } 762 | } 763 | -------------------------------------------------------------------------------- /src/client/jTPCCConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * jTPCCConfig - Basic configuration parameters for jTPCC 3 | * 4 | * Copyright (C) 2003, Raul Barbosa 5 | * Copyright (C) 2004-2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | 10 | import java.text.*; 11 | 12 | public interface jTPCCConfig 13 | { 14 | public final static String JTPCCVERSION = "5.0"; 15 | 16 | public final static int DB_UNKNOWN = 0, 17 | DB_FIREBIRD = 1, 18 | DB_ORACLE = 2, 19 | DB_POSTGRES = 3, 20 | DB_MYSQL = 4; 21 | 22 | public final static int NEW_ORDER = 1, 23 | PAYMENT = 2, 24 | ORDER_STATUS = 3, 25 | DELIVERY = 4, 26 | STOCK_LEVEL = 5; 27 | 28 | public final static String[] nameTokens = {"BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING"}; 29 | 30 | public final static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); 31 | 32 | public final static int configCommitCount = 10000; // commit every n records in LoadData 33 | 34 | public final static int configWhseCount = 10; 35 | public final static int configItemCount = 100000; // tpc-c std = 100,000 36 | public final static int configDistPerWhse = 10; // tpc-c std = 10 37 | public final static int configCustPerDist = 3000; // tpc-c std = 3,000 38 | } 39 | -------------------------------------------------------------------------------- /src/client/jTPCCConnection.java: -------------------------------------------------------------------------------- 1 | /* 2 | * jTPCCConnection 3 | * 4 | * One connection to the database. Used by either the old style 5 | * Terminal or the new TimedSUT. 6 | * 7 | * Copyright (C) 2004-2016, Denis Lussier 8 | * Copyright (C) 2016, Jan Wieck 9 | * 10 | */ 11 | 12 | import java.util.*; 13 | import java.sql.*; 14 | 15 | public class jTPCCConnection 16 | { 17 | private Connection dbConn = null; 18 | private int dbType = 0; 19 | 20 | public PreparedStatement stmtNewOrderSelectWhseCust; 21 | public PreparedStatement stmtNewOrderSelectDist; 22 | public PreparedStatement stmtNewOrderUpdateDist; 23 | public PreparedStatement stmtNewOrderInsertOrder; 24 | public PreparedStatement stmtNewOrderInsertNewOrder; 25 | public PreparedStatement stmtNewOrderSelectStock; 26 | public PreparedStatement stmtNewOrderSelectStockBatch[]; 27 | public PreparedStatement stmtNewOrderSelectItem; 28 | public PreparedStatement stmtNewOrderSelectItemBatch[]; 29 | public PreparedStatement stmtNewOrderUpdateStock; 30 | public PreparedStatement stmtNewOrderInsertOrderLine; 31 | 32 | public PreparedStatement stmtPaymentSelectWarehouse; 33 | public PreparedStatement stmtPaymentSelectDistrict; 34 | public PreparedStatement stmtPaymentSelectCustomerListByLast; 35 | public PreparedStatement stmtPaymentSelectCustomer; 36 | public PreparedStatement stmtPaymentSelectCustomerData; 37 | public PreparedStatement stmtPaymentUpdateWarehouse; 38 | public PreparedStatement stmtPaymentUpdateDistrict; 39 | public PreparedStatement stmtPaymentUpdateCustomer; 40 | public PreparedStatement stmtPaymentUpdateCustomerWithData; 41 | public PreparedStatement stmtPaymentInsertHistory; 42 | 43 | public PreparedStatement stmtOrderStatusSelectCustomerListByLast; 44 | public PreparedStatement stmtOrderStatusSelectCustomer; 45 | public PreparedStatement stmtOrderStatusSelectLastOrder; 46 | public PreparedStatement stmtOrderStatusSelectOrderLine; 47 | 48 | public PreparedStatement stmtStockLevelSelectLow; 49 | 50 | public PreparedStatement stmtDeliveryBGSelectOldestNewOrder; 51 | public PreparedStatement stmtDeliveryBGDeleteOldestNewOrder; 52 | public PreparedStatement stmtDeliveryBGSelectOrder; 53 | public PreparedStatement stmtDeliveryBGUpdateOrder; 54 | public PreparedStatement stmtDeliveryBGSelectSumOLAmount; 55 | public PreparedStatement stmtDeliveryBGUpdateOrderLine; 56 | public PreparedStatement stmtDeliveryBGUpdateCustomer; 57 | 58 | public jTPCCConnection(Connection dbConn, int dbType) 59 | throws SQLException 60 | { 61 | this.dbConn = dbConn; 62 | this.dbType = dbType; 63 | stmtNewOrderSelectStockBatch = new PreparedStatement[16]; 64 | String st = "SELECT s_i_id, s_w_id, s_quantity, s_data, " + 65 | " s_dist_01, s_dist_02, s_dist_03, s_dist_04, " + 66 | " s_dist_05, s_dist_06, s_dist_07, s_dist_08, " + 67 | " s_dist_09, s_dist_10 " + 68 | " FROM bmsql_stock " + 69 | " WHERE (s_w_id, s_i_id) in ((?,?)"; 70 | for (int i = 1; i <= 15; i ++) { 71 | String stmtStr = st + ") FOR UPDATE"; 72 | stmtNewOrderSelectStockBatch[i] = dbConn.prepareStatement(stmtStr); 73 | st += ",(?,?)"; 74 | } 75 | stmtNewOrderSelectItemBatch = new PreparedStatement[16]; 76 | st = "SELECT i_id, i_price, i_name, i_data " + 77 | " FROM bmsql_item WHERE i_id in (?"; 78 | for (int i = 1; i <= 15; i ++) { 79 | String stmtStr = st + ")"; 80 | stmtNewOrderSelectItemBatch[i] = dbConn.prepareStatement(stmtStr); 81 | st += ",?"; 82 | } 83 | 84 | // PreparedStataments for NEW_ORDER 85 | stmtNewOrderSelectWhseCust = dbConn.prepareStatement( 86 | "SELECT c_discount, c_last, c_credit, w_tax " + 87 | " FROM bmsql_customer " + 88 | " JOIN bmsql_warehouse ON (w_id = c_w_id) " + 89 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 90 | stmtNewOrderSelectDist = dbConn.prepareStatement( 91 | "SELECT d_tax, d_next_o_id " + 92 | " FROM bmsql_district " + 93 | " WHERE d_w_id = ? AND d_id = ? " + 94 | " FOR UPDATE"); 95 | stmtNewOrderUpdateDist = dbConn.prepareStatement( 96 | "UPDATE bmsql_district " + 97 | " SET d_next_o_id = d_next_o_id + 1 " + 98 | " WHERE d_w_id = ? AND d_id = ?"); 99 | stmtNewOrderInsertOrder = dbConn.prepareStatement( 100 | "INSERT INTO bmsql_oorder (" + 101 | " o_id, o_d_id, o_w_id, o_c_id, o_entry_d, " + 102 | " o_ol_cnt, o_all_local) " + 103 | "VALUES (?, ?, ?, ?, ?, ?, ?)"); 104 | stmtNewOrderInsertNewOrder = dbConn.prepareStatement( 105 | "INSERT INTO bmsql_new_order (" + 106 | " no_o_id, no_d_id, no_w_id) " + 107 | "VALUES (?, ?, ?)"); 108 | stmtNewOrderSelectStock = dbConn.prepareStatement( 109 | "SELECT s_quantity, s_data, " + 110 | " s_dist_01, s_dist_02, s_dist_03, s_dist_04, " + 111 | " s_dist_05, s_dist_06, s_dist_07, s_dist_08, " + 112 | " s_dist_09, s_dist_10 " + 113 | " FROM bmsql_stock " + 114 | " WHERE s_w_id = ? AND s_i_id = ? " + 115 | " FOR UPDATE"); 116 | stmtNewOrderSelectItem = dbConn.prepareStatement( 117 | "SELECT i_price, i_name, i_data " + 118 | " FROM bmsql_item " + 119 | " WHERE i_id = ?"); 120 | stmtNewOrderUpdateStock = dbConn.prepareStatement( 121 | "UPDATE bmsql_stock " + 122 | " SET s_quantity = ?, s_ytd = s_ytd + ?, " + 123 | " s_order_cnt = s_order_cnt + 1, " + 124 | " s_remote_cnt = s_remote_cnt + ? " + 125 | " WHERE s_w_id = ? AND s_i_id = ?"); 126 | stmtNewOrderInsertOrderLine = dbConn.prepareStatement( 127 | "INSERT INTO bmsql_order_line (" + 128 | " ol_o_id, ol_d_id, ol_w_id, ol_number, " + 129 | " ol_i_id, ol_supply_w_id, ol_quantity, " + 130 | " ol_amount, ol_dist_info) " + 131 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"); 132 | 133 | // PreparedStatements for PAYMENT 134 | stmtPaymentSelectWarehouse = dbConn.prepareStatement( 135 | "SELECT w_name, w_street_1, w_street_2, w_city, " + 136 | " w_state, w_zip " + 137 | " FROM bmsql_warehouse " + 138 | " WHERE w_id = ? "); 139 | stmtPaymentSelectDistrict = dbConn.prepareStatement( 140 | "SELECT d_name, d_street_1, d_street_2, d_city, " + 141 | " d_state, d_zip " + 142 | " FROM bmsql_district " + 143 | " WHERE d_w_id = ? AND d_id = ?"); 144 | stmtPaymentSelectCustomerListByLast = dbConn.prepareStatement( 145 | "SELECT c_id " + 146 | " FROM bmsql_customer " + 147 | " WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? " + 148 | " ORDER BY c_first"); 149 | stmtPaymentSelectCustomer = dbConn.prepareStatement( 150 | "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, " + 151 | " c_city, c_state, c_zip, c_phone, c_since, c_credit, " + 152 | " c_credit_lim, c_discount, c_balance " + 153 | " FROM bmsql_customer " + 154 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ? " + 155 | " FOR UPDATE"); 156 | stmtPaymentSelectCustomerData = dbConn.prepareStatement( 157 | "SELECT c_data " + 158 | " FROM bmsql_customer " + 159 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 160 | stmtPaymentUpdateWarehouse = dbConn.prepareStatement( 161 | "UPDATE bmsql_warehouse " + 162 | " SET w_ytd = w_ytd + ? " + 163 | " WHERE w_id = ?"); 164 | stmtPaymentUpdateDistrict = dbConn.prepareStatement( 165 | "UPDATE bmsql_district " + 166 | " SET d_ytd = d_ytd + ? " + 167 | " WHERE d_w_id = ? AND d_id = ?"); 168 | stmtPaymentUpdateCustomer = dbConn.prepareStatement( 169 | "UPDATE bmsql_customer " + 170 | " SET c_balance = c_balance - ?, " + 171 | " c_ytd_payment = c_ytd_payment + ?, " + 172 | " c_payment_cnt = c_payment_cnt + 1 " + 173 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 174 | stmtPaymentUpdateCustomerWithData = dbConn.prepareStatement( 175 | "UPDATE bmsql_customer " + 176 | " SET c_balance = c_balance - ?, " + 177 | " c_ytd_payment = c_ytd_payment + ?, " + 178 | " c_payment_cnt = c_payment_cnt + 1, " + 179 | " c_data = ? " + 180 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 181 | stmtPaymentInsertHistory = dbConn.prepareStatement( 182 | "INSERT INTO bmsql_history (" + 183 | " h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, " + 184 | " h_date, h_amount, h_data) " + 185 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?)"); 186 | 187 | // PreparedStatements for ORDER_STATUS 188 | stmtOrderStatusSelectCustomerListByLast = dbConn.prepareStatement( 189 | "SELECT c_id " + 190 | " FROM bmsql_customer " + 191 | " WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? " + 192 | " ORDER BY c_first"); 193 | stmtOrderStatusSelectCustomer = dbConn.prepareStatement( 194 | "SELECT c_first, c_middle, c_last, c_balance " + 195 | " FROM bmsql_customer " + 196 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 197 | stmtOrderStatusSelectLastOrder = dbConn.prepareStatement( 198 | "SELECT o_id, o_entry_d, o_carrier_id " + 199 | " FROM bmsql_oorder " + 200 | " WHERE o_w_id = ? AND o_d_id = ? AND o_c_id = ? " + 201 | " ORDER BY o_id DESC LIMIT 1"); 202 | stmtOrderStatusSelectOrderLine = dbConn.prepareStatement( 203 | "SELECT ol_i_id, ol_supply_w_id, ol_quantity, " + 204 | " ol_amount, ol_delivery_d " + 205 | " FROM bmsql_order_line " + 206 | " WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ? " + 207 | " ORDER BY ol_w_id, ol_d_id, ol_o_id, ol_number"); 208 | 209 | // PreparedStatements for STOCK_LEVEL 210 | switch (dbType) 211 | { 212 | case jTPCCConfig.DB_POSTGRES: 213 | case jTPCCConfig.DB_MYSQL: 214 | stmtStockLevelSelectLow = dbConn.prepareStatement( 215 | "SELECT count(*) AS low_stock FROM (" + 216 | " SELECT s_w_id, s_i_id, s_quantity " + 217 | " FROM bmsql_stock " + 218 | " WHERE s_w_id = ? AND s_quantity < ? AND s_i_id IN (" + 219 | " SELECT /*+ TIDB_INLJ(bmsql_order_line) */ ol_i_id " + 220 | " FROM bmsql_district " + 221 | " JOIN bmsql_order_line ON ol_w_id = d_w_id " + 222 | " AND ol_d_id = d_id " + 223 | " AND ol_o_id >= d_next_o_id - 20 " + 224 | " AND ol_o_id < d_next_o_id " + 225 | " WHERE d_w_id = ? AND d_id = ? " + 226 | " ) " + 227 | " ) AS L"); 228 | break; 229 | 230 | default: 231 | stmtStockLevelSelectLow = dbConn.prepareStatement( 232 | "SELECT count(*) AS low_stock FROM (" + 233 | " SELECT s_w_id, s_i_id, s_quantity " + 234 | " FROM bmsql_stock " + 235 | " WHERE s_w_id = ? AND s_quantity < ? AND s_i_id IN (" + 236 | " SELECT ol_i_id " + 237 | " FROM bmsql_district " + 238 | " JOIN bmsql_order_line ON ol_w_id = d_w_id " + 239 | " AND ol_d_id = d_id " + 240 | " AND ol_o_id >= d_next_o_id - 20 " + 241 | " AND ol_o_id < d_next_o_id " + 242 | " WHERE d_w_id = ? AND d_id = ? " + 243 | " ) " + 244 | " )"); 245 | break; 246 | } 247 | 248 | 249 | // PreparedStatements for DELIVERY_BG 250 | stmtDeliveryBGSelectOldestNewOrder = dbConn.prepareStatement( 251 | "SELECT no_o_id " + 252 | " FROM bmsql_new_order " + 253 | " WHERE no_w_id = ? AND no_d_id = ? " + 254 | " ORDER BY no_o_id ASC" + 255 | " LIMIT 1" + 256 | " FOR UPDATE"); 257 | stmtDeliveryBGDeleteOldestNewOrder = dbConn.prepareStatement( 258 | "DELETE FROM bmsql_new_order " + 259 | " WHERE (no_w_id,no_d_id,no_o_id) IN (" + 260 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)," + 261 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?))"); 262 | 263 | stmtDeliveryBGSelectOrder = dbConn.prepareStatement( 264 | "SELECT o_c_id, o_d_id" + 265 | " FROM bmsql_oorder " + 266 | " WHERE (o_w_id,o_d_id,o_id) IN (" + 267 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)," + 268 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?))"); 269 | 270 | stmtDeliveryBGUpdateOrder = dbConn.prepareStatement( 271 | "UPDATE bmsql_oorder " + 272 | " SET o_carrier_id = ? " + 273 | " WHERE (o_w_id,o_d_id,o_id) IN (" + 274 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)," + 275 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?))"); 276 | 277 | stmtDeliveryBGSelectSumOLAmount = dbConn.prepareStatement( 278 | "SELECT sum(ol_amount) AS sum_ol_amount, ol_d_id" + 279 | " FROM bmsql_order_line " + 280 | " WHERE (ol_w_id,ol_d_id,ol_o_id) IN (" + 281 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)," + 282 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)" + 283 | ") GROUP BY ol_d_id"); 284 | 285 | 286 | stmtDeliveryBGUpdateOrderLine = dbConn.prepareStatement( 287 | "UPDATE bmsql_order_line " + 288 | " SET ol_delivery_d = ? " + 289 | " WHERE (ol_w_id,ol_d_id,ol_o_id) IN (" + 290 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)," + 291 | "(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?))"); 292 | 293 | stmtDeliveryBGUpdateCustomer = dbConn.prepareStatement( 294 | "UPDATE bmsql_customer " + 295 | " SET c_balance = c_balance + ?, " + 296 | " c_delivery_cnt = c_delivery_cnt + 1 " + 297 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?"); 298 | } 299 | 300 | public jTPCCConnection(String connURL, Properties connProps, int dbType) 301 | throws SQLException 302 | { 303 | this(DriverManager.getConnection(connURL, connProps), dbType); 304 | } 305 | 306 | public void commit() 307 | throws SQLException 308 | { 309 | try { 310 | dbConn.commit(); 311 | } catch(SQLException e) { 312 | throw new CommitException(); 313 | } 314 | } 315 | 316 | public void rollback() 317 | throws SQLException 318 | { 319 | dbConn.rollback(); 320 | } 321 | } 322 | -------------------------------------------------------------------------------- /src/client/jTPCCRandom.java: -------------------------------------------------------------------------------- 1 | /* 2 | * jTPCCUtil - utility functions for the Open Source Java implementation of 3 | * the TPC-C benchmark 4 | * 5 | * Copyright (C) 2003, Raul Barbosa 6 | * Copyright (C) 2004-2016, Denis Lussier 7 | * Copyright (C) 2016, Jan Wieck 8 | * 9 | */ 10 | 11 | 12 | import java.io.*; 13 | import java.sql.*; 14 | import java.util.*; 15 | import java.text.*; 16 | 17 | public class jTPCCRandom 18 | { 19 | private static final char[] aStringChars = { 20 | 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 21 | 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 22 | 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 23 | 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 24 | '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}; 25 | private static final String[] cLastTokens = { 26 | "BAR", "OUGHT", "ABLE", "PRI", "PRES", 27 | "ESE", "ANTI", "CALLY", "ATION", "EING"}; 28 | 29 | private static long nURandCLast; 30 | private static long nURandCC_ID; 31 | private static long nURandCI_ID; 32 | private static boolean initialized = false; 33 | 34 | private Random random; 35 | 36 | /* 37 | * jTPCCRandom() 38 | * 39 | * Used to create the master jTPCCRandom() instance for loading 40 | * the database. See below. 41 | */ 42 | jTPCCRandom() 43 | { 44 | if (initialized) 45 | throw new IllegalStateException("Global instance exists"); 46 | 47 | this.random = new Random(System.nanoTime()); 48 | jTPCCRandom.nURandCLast = nextLong(0, 255); 49 | jTPCCRandom.nURandCC_ID = nextLong(0, 1023); 50 | jTPCCRandom.nURandCI_ID = nextLong(0, 8191); 51 | 52 | initialized = true; 53 | } 54 | 55 | /* 56 | * jTPCCRandom(CLoad) 57 | * 58 | * Used to create the master jTPCCRandom instance for running 59 | * a benchmark load. 60 | * 61 | * TPC-C 2.1.6 defines the rules for picking the C values of 62 | * the non-uniform random number generator. In particular 63 | * 2.1.6.1 defines what numbers for the C value for generating 64 | * C_LAST must be excluded from the possible range during run 65 | * time, based on the number used during the load. 66 | */ 67 | jTPCCRandom(long CLoad) 68 | { 69 | long delta; 70 | 71 | if (initialized) 72 | throw new IllegalStateException("Global instance exists"); 73 | 74 | this.random = new Random(System.nanoTime()); 75 | jTPCCRandom.nURandCC_ID = nextLong(0, 1023); 76 | jTPCCRandom.nURandCI_ID = nextLong(0, 8191); 77 | 78 | do 79 | { 80 | jTPCCRandom.nURandCLast = nextLong(0, 255); 81 | 82 | delta = Math.abs(jTPCCRandom.nURandCLast - CLoad); 83 | if (delta == 96 || delta == 112) 84 | continue; 85 | if (delta < 65 || delta > 119) 86 | continue; 87 | break; 88 | } while(true); 89 | 90 | initialized = true; 91 | } 92 | 93 | private jTPCCRandom(jTPCCRandom parent) 94 | { 95 | this.random = new Random(System.nanoTime()); 96 | } 97 | 98 | /* 99 | * newRandom() 100 | * 101 | * Creates a derived random data generator to be used in another 102 | * thread of the current benchmark load or run process. As per 103 | * TPC-C 2.1.6 all terminals during a run must use the same C 104 | * values per field. The jTPCCRandom Class therefore cannot 105 | * generate them per instance, but each thread's instance must 106 | * inherit those numbers from a global instance. 107 | */ 108 | jTPCCRandom newRandom() 109 | { 110 | return new jTPCCRandom(this); 111 | } 112 | 113 | 114 | /* 115 | * nextLong(x, y) 116 | * 117 | * Produce a random number uniformly distributed in [x .. y] 118 | */ 119 | public long nextLong(long x, long y) 120 | { 121 | return (long)(random.nextDouble() * (y - x + 1) + x); 122 | } 123 | 124 | /* 125 | * nextInt(x, y) 126 | * 127 | * Produce a random number uniformly distributed in [x .. y] 128 | */ 129 | public int nextInt(int x, int y) 130 | { 131 | return (int)(random.nextDouble() * (y - x + 1) + x); 132 | } 133 | 134 | /* 135 | * getAString(x, y) 136 | * 137 | * Procude a random alphanumeric string of length [x .. y]. 138 | * 139 | * Note: TPC-C 4.3.2.2 asks for an "alhpanumeric" string. 140 | * Comment 1 about the character set does NOT mean that this 141 | * function must eventually produce 128 different characters, 142 | * only that the "character set" used to store this data must 143 | * be able to represent 128 different characters. '#@!%%ÄÖß' 144 | * is not an alphanumeric string. We can save ourselves a lot 145 | * of UTF8 related trouble by producing alphanumeric only 146 | * instead of cartoon style curse-bubbles. 147 | */ 148 | public String getAString(long x, long y) 149 | { 150 | String result = new String(); 151 | long len = nextLong(x, y); 152 | long have = 1; 153 | 154 | if (y <= 0) 155 | return result; 156 | 157 | result += aStringChars[(int)nextLong(0, 51)]; 158 | while (have < len) 159 | { 160 | result += aStringChars[(int)nextLong(0, 61)]; 161 | have++; 162 | } 163 | 164 | return result; 165 | } 166 | 167 | /* 168 | * getNString(x, y) 169 | * 170 | * Produce a random numeric string of length [x .. y]. 171 | */ 172 | public String getNString(long x, long y) 173 | { 174 | String result = new String(); 175 | long len = nextLong(x, y); 176 | long have = 0; 177 | 178 | while (have < len) 179 | { 180 | result += (char)(nextLong((long)'0', (long)'9')); 181 | have++; 182 | } 183 | 184 | return result; 185 | } 186 | 187 | /* 188 | * getItemID() 189 | * 190 | * Produce a non uniform random Item ID. 191 | */ 192 | public int getItemID() 193 | { 194 | return (int)((((nextLong(0, 8191) | nextLong(1, 100000)) + nURandCI_ID) 195 | % 100000) + 1); 196 | } 197 | 198 | /* 199 | * getCustomerID() 200 | * 201 | * Produce a non uniform random Customer ID. 202 | */ 203 | public int getCustomerID() 204 | { 205 | return (int)((((nextLong(0, 1023) | nextLong(1, 3000)) + nURandCC_ID) 206 | % 3000) + 1); 207 | } 208 | 209 | /* 210 | * getCLast(num) 211 | * 212 | * Produce the syllable representation for C_LAST of [0 .. 999] 213 | */ 214 | public String getCLast(int num) 215 | { 216 | String result = new String(); 217 | 218 | for (int i = 0; i < 3; i++) 219 | { 220 | result = cLastTokens[num % 10] + result; 221 | num /= 10; 222 | } 223 | 224 | return result; 225 | } 226 | 227 | /* 228 | * getCLast() 229 | * 230 | * Procude a non uniform random Customer Last Name. 231 | */ 232 | public String getCLast() 233 | { 234 | long num; 235 | num = (((nextLong(0, 255) | nextLong(0, 999)) + nURandCLast) % 1000); 236 | return getCLast((int)num); 237 | } 238 | 239 | public String getState() 240 | { 241 | String result = new String(); 242 | 243 | result += (char)nextInt((int)'A', (int)'Z'); 244 | result += (char)nextInt((int)'A', (int)'Z'); 245 | 246 | return result; 247 | } 248 | 249 | /* 250 | * Methods to retrieve the C values used. 251 | */ 252 | public long getNURandCLast() 253 | { 254 | return nURandCLast; 255 | } 256 | 257 | public long getNURandCC_ID() 258 | { 259 | return nURandCC_ID; 260 | } 261 | 262 | public long getNURandCI_ID() 263 | { 264 | return nURandCI_ID; 265 | } 266 | } // end jTPCCRandom 267 | -------------------------------------------------------------------------------- /src/client/jTPCCTerminal.java: -------------------------------------------------------------------------------- 1 | /* 2 | * jTPCCTerminal - Terminal emulator code for jTPCC (transactions) 3 | * 4 | * Copyright (C) 2003, Raul Barbosa 5 | * Copyright (C) 2004-2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | import org.apache.log4j.*; 10 | 11 | import java.io.*; 12 | import java.sql.*; 13 | import java.sql.Date; 14 | import java.util.*; 15 | import javax.swing.*; 16 | 17 | 18 | public class jTPCCTerminal implements jTPCCConfig, Runnable 19 | { 20 | private static org.apache.log4j.Logger log = Logger.getLogger(jTPCCTerminal.class); 21 | 22 | private String terminalName; 23 | private Connection conn = null; 24 | private Statement stmt = null; 25 | private Statement stmt1 = null; 26 | private ResultSet rs = null; 27 | private int terminalWarehouseID, terminalDistrictID; 28 | private boolean terminalWarehouseFixed; 29 | private int paymentWeight, orderStatusWeight, deliveryWeight, stockLevelWeight, limPerMin_Terminal; 30 | private jTPCC parent; 31 | private jTPCCRandom rnd; 32 | 33 | private int transactionCount = 1; 34 | private int numTransactions; 35 | private int numWarehouses; 36 | private int newOrderCounter; 37 | private long totalTnxs = 1; 38 | private StringBuffer query = null; 39 | private int result = 0; 40 | private boolean stopRunningSignal = false; 41 | 42 | long terminalStartTime = 0; 43 | long transactionEnd = 0; 44 | 45 | jTPCCConnection db = null; 46 | int dbType = 0; 47 | 48 | public jTPCCTerminal 49 | (String terminalName, int terminalWarehouseID, int terminalDistrictID, 50 | Connection conn, int dbType, 51 | int numTransactions, boolean terminalWarehouseFixed, 52 | int paymentWeight, int orderStatusWeight, 53 | int deliveryWeight, int stockLevelWeight, int numWarehouses, int limPerMin_Terminal, jTPCC parent) throws SQLException 54 | { 55 | this.terminalName = terminalName; 56 | this.conn = conn; 57 | this.dbType = dbType; 58 | this.stmt = conn.createStatement(); 59 | this.stmt.setMaxRows(200); 60 | this.stmt.setFetchSize(100); 61 | 62 | this.stmt1 = conn.createStatement(); 63 | this.stmt1.setMaxRows(1); 64 | 65 | this.terminalWarehouseID = terminalWarehouseID; 66 | this.terminalDistrictID = terminalDistrictID; 67 | this.terminalWarehouseFixed = terminalWarehouseFixed; 68 | this.parent = parent; 69 | this.rnd = parent.getRnd().newRandom(); 70 | this.numTransactions = numTransactions; 71 | this.paymentWeight = paymentWeight; 72 | this.orderStatusWeight = orderStatusWeight; 73 | this.deliveryWeight = deliveryWeight; 74 | this.stockLevelWeight = stockLevelWeight; 75 | this.numWarehouses = numWarehouses; 76 | this.newOrderCounter = 0; 77 | this.limPerMin_Terminal = limPerMin_Terminal; 78 | 79 | this.db = new jTPCCConnection(conn, dbType); 80 | 81 | terminalMessage(""); 82 | terminalMessage("Terminal \'" + terminalName + "\' has WarehouseID=" + terminalWarehouseID + " and DistrictID=" + terminalDistrictID + "."); 83 | terminalStartTime = System.currentTimeMillis(); 84 | } 85 | 86 | public void run() 87 | { 88 | executeTransactions(numTransactions); 89 | try 90 | { 91 | printMessage(""); 92 | printMessage("Closing statement and connection..."); 93 | 94 | stmt.close(); 95 | conn.close(); 96 | } 97 | catch(Exception e) 98 | { 99 | printMessage(""); 100 | printMessage("An error occurred!"); 101 | logException(e); 102 | } 103 | 104 | printMessage(""); 105 | printMessage("Terminal \'" + terminalName + "\' finished after " + (transactionCount-1) + " transaction(s)."); 106 | 107 | parent.signalTerminalEnded(this, newOrderCounter); 108 | } 109 | 110 | public void stopRunningWhenPossible() 111 | { 112 | stopRunningSignal = true; 113 | printMessage(""); 114 | printMessage("Terminal received stop signal!"); 115 | printMessage("Finishing current transaction before exit..."); 116 | } 117 | 118 | private void executeTransactions(int numTransactions) 119 | { 120 | boolean stopRunning = false; 121 | 122 | if(numTransactions != -1) 123 | printMessage("Executing " + numTransactions + " transactions..."); 124 | else 125 | printMessage("Executing for a limited time..."); 126 | 127 | for(int i = 0; (i < numTransactions || numTransactions == -1) && !stopRunning; i++) 128 | { 129 | 130 | long transactionType = rnd.nextLong(1, 100); 131 | int skippedDeliveries = 0, newOrder = 0; 132 | String transactionTypeName; 133 | 134 | long transactionStart = System.currentTimeMillis(); 135 | 136 | /* 137 | * TPC/C specifies that each terminal has a fixed 138 | * "home" warehouse. However, since this implementation 139 | * does not simulate "terminals", but rather simulates 140 | * "application threads", that association is no longer 141 | * valid. In the case of having less clients than 142 | * warehouses (which should be the normal case), it 143 | * leaves the warehouses without a client without any 144 | * significant traffic, changing the overall database 145 | * access pattern significantly. 146 | */ 147 | if(!terminalWarehouseFixed) 148 | terminalWarehouseID = rnd.nextInt(1, numWarehouses); 149 | 150 | if(transactionType <= paymentWeight) 151 | { 152 | jTPCCTData term = new jTPCCTData(); 153 | term.setNumWarehouses(numWarehouses); 154 | term.setWarehouse(terminalWarehouseID); 155 | term.setDistrict(terminalDistrictID); 156 | try 157 | { 158 | term.generatePayment(log, rnd, 0); 159 | term.traceScreen(log); 160 | term.execute(log, db); 161 | parent.resultAppend(term); 162 | term.traceScreen(log); 163 | } 164 | catch (CommitException e) 165 | { 166 | continue; 167 | } 168 | catch (Exception e) 169 | { 170 | log.fatal(e.getMessage()); 171 | e.printStackTrace(); 172 | System.exit(4); 173 | } 174 | transactionTypeName = "Payment"; 175 | } 176 | else if(transactionType <= paymentWeight + stockLevelWeight) 177 | { 178 | jTPCCTData term = new jTPCCTData(); 179 | term.setNumWarehouses(numWarehouses); 180 | term.setWarehouse(terminalWarehouseID); 181 | term.setDistrict(terminalDistrictID); 182 | try 183 | { 184 | term.generateStockLevel(log, rnd, 0); 185 | term.traceScreen(log); 186 | term.execute(log, db); 187 | parent.resultAppend(term); 188 | term.traceScreen(log); 189 | } 190 | catch (CommitException e) 191 | { 192 | continue; 193 | } 194 | catch (Exception e) 195 | { 196 | log.fatal(e.getMessage()); 197 | e.printStackTrace(); 198 | System.exit(4); 199 | } 200 | transactionTypeName = "Stock-Level"; 201 | } 202 | else if(transactionType <= paymentWeight + stockLevelWeight + orderStatusWeight) 203 | { 204 | jTPCCTData term = new jTPCCTData(); 205 | term.setNumWarehouses(numWarehouses); 206 | term.setWarehouse(terminalWarehouseID); 207 | term.setDistrict(terminalDistrictID); 208 | try 209 | { 210 | term.generateOrderStatus(log, rnd, 0); 211 | term.traceScreen(log); 212 | term.execute(log, db); 213 | parent.resultAppend(term); 214 | term.traceScreen(log); 215 | } 216 | catch (CommitException e) 217 | { 218 | continue; 219 | } 220 | catch (Exception e) 221 | { 222 | log.fatal(e.getMessage()); 223 | e.printStackTrace(); 224 | System.exit(4); 225 | } 226 | transactionTypeName = "Order-Status"; 227 | } 228 | else if(transactionType <= paymentWeight + stockLevelWeight + orderStatusWeight + deliveryWeight) 229 | { 230 | jTPCCTData term = new jTPCCTData(); 231 | term.setNumWarehouses(numWarehouses); 232 | term.setWarehouse(terminalWarehouseID); 233 | term.setDistrict(terminalDistrictID); 234 | try 235 | { 236 | term.generateDelivery(log, rnd, 0); 237 | term.traceScreen(log); 238 | term.execute(log, db); 239 | parent.resultAppend(term); 240 | term.traceScreen(log); 241 | 242 | /* 243 | * The old style driver does not have a delivery 244 | * background queue, so we have to execute that 245 | * part here as well. 246 | */ 247 | jTPCCTData bg = term.getDeliveryBG(); 248 | bg.traceScreen(log); 249 | bg.execute(log, db); 250 | parent.resultAppend(bg); 251 | bg.traceScreen(log); 252 | 253 | skippedDeliveries = bg.getSkippedDeliveries(); 254 | } 255 | catch (CommitException e) 256 | { 257 | continue; 258 | } 259 | catch (Exception e) 260 | { 261 | log.fatal(e.getMessage()); 262 | e.printStackTrace(); 263 | System.exit(4); 264 | } 265 | transactionTypeName = "Delivery"; 266 | } 267 | else 268 | { 269 | jTPCCTData term = new jTPCCTData(); 270 | term.setNumWarehouses(numWarehouses); 271 | term.setWarehouse(terminalWarehouseID); 272 | term.setDistrict(terminalDistrictID); 273 | try 274 | { 275 | term.generateNewOrder(log, rnd, 0); 276 | term.traceScreen(log); 277 | term.execute(log, db); 278 | parent.resultAppend(term); 279 | term.traceScreen(log); 280 | } 281 | catch (CommitException e) 282 | { 283 | continue; 284 | } 285 | catch (Exception e) 286 | { 287 | log.fatal(e.getMessage()); 288 | e.printStackTrace(); 289 | System.exit(4); 290 | } 291 | transactionTypeName = "New-Order"; 292 | newOrderCounter++; 293 | newOrder = 1; 294 | } 295 | 296 | long transactionEnd = System.currentTimeMillis(); 297 | 298 | if(!transactionTypeName.equals("Delivery")) 299 | { 300 | parent.signalTerminalEndedTransaction(this.terminalName, transactionTypeName, transactionEnd - transactionStart, null, newOrder); 301 | } 302 | else 303 | { 304 | parent.signalTerminalEndedTransaction(this.terminalName, transactionTypeName, transactionEnd - transactionStart, (skippedDeliveries == 0 ? "None" : "" + skippedDeliveries + " delivery(ies) skipped."), newOrder); 305 | } 306 | 307 | if(limPerMin_Terminal>0){ 308 | long elapse = transactionEnd-transactionStart; 309 | long timePerTx = 60000/limPerMin_Terminal; 310 | 311 | if(elapse 6 ? dS.substring(0, 6) : dS; 65 | } 66 | 67 | public static String getConfig(String db, Properties dbProps, String option) 68 | throws Exception 69 | { 70 | ResultSet rs; 71 | String value; 72 | 73 | if (dbConn == null) 74 | { 75 | dbConn = DriverManager.getConnection(db, dbProps); 76 | stmtGetConfig = dbConn.prepareStatement( 77 | "SELECT cfg_value FROM bmsql_config " + 78 | " WHERE cfg_name = ?"); 79 | } 80 | stmtGetConfig.setString(1, option); 81 | rs = stmtGetConfig.executeQuery(); 82 | if (!rs.next()) 83 | throw new Exception("DB Load configuration parameter '" + 84 | option + "' not found"); 85 | value = rs.getString("cfg_value"); 86 | rs.close(); 87 | 88 | return value; 89 | } 90 | 91 | } // end jTPCCUtil 92 | -------------------------------------------------------------------------------- /src/jdbc/ExecJDBC.java: -------------------------------------------------------------------------------- 1 | /* 2 | * ExecJDBC - Command line program to process SQL DDL statements, from 3 | * a text input file, to any JDBC Data Source 4 | * 5 | * Copyright (C) 2004-2016, Denis Lussier 6 | * Copyright (C) 2016, Jan Wieck 7 | * 8 | */ 9 | 10 | import java.io.*; 11 | import java.sql.*; 12 | import java.util.*; 13 | 14 | 15 | public class ExecJDBC { 16 | 17 | 18 | public static void main(String[] args) { 19 | 20 | Connection conn = null; 21 | Statement stmt = null; 22 | String rLine = null; 23 | StringBuffer sql = new StringBuffer(); 24 | 25 | try { 26 | 27 | Properties ini = new Properties(); 28 | ini.load( new FileInputStream(System.getProperty("prop"))); 29 | 30 | // Register jdbcDriver 31 | Class.forName(ini.getProperty( "driver" )); 32 | 33 | // make connection 34 | conn = DriverManager.getConnection(ini.getProperty("conn"), 35 | ini.getProperty("user"),ini.getProperty("password")); 36 | conn.setAutoCommit(true); 37 | 38 | // Create Statement 39 | stmt = conn.createStatement(); 40 | 41 | // Open inputFile 42 | BufferedReader in = new BufferedReader 43 | (new FileReader(jTPCCUtil.getSysProp("commandFile",null))); 44 | 45 | // loop thru input file and concatenate SQL statement fragments 46 | while((rLine = in.readLine()) != null) { 47 | 48 | String line = rLine.trim(); 49 | 50 | if (line.length() != 0) { 51 | if (line.startsWith("--")) { 52 | System.out.println(line); // print comment line 53 | } else { 54 | if (line.endsWith("\\;")) 55 | { 56 | sql.append(line.replaceAll("\\\\;", ";")); 57 | sql.append("\n"); 58 | } 59 | else 60 | { 61 | sql.append(line.replaceAll("\\\\;", ";")); 62 | if (line.endsWith(";")) { 63 | String query = sql.toString(); 64 | 65 | execJDBC(stmt, query.substring(0, query.length() - 1)); 66 | sql = new StringBuffer(); 67 | } else { 68 | sql.append("\n"); 69 | } 70 | } 71 | } 72 | 73 | } //end if 74 | 75 | } //end while 76 | 77 | in.close(); 78 | 79 | } catch(IOException ie) { 80 | System.out.println(ie.getMessage()); 81 | 82 | } catch(SQLException se) { 83 | System.out.println(se.getMessage()); 84 | 85 | } catch(Exception e) { 86 | e.printStackTrace(); 87 | 88 | //exit Cleanly 89 | } finally { 90 | try { 91 | if (conn !=null) 92 | conn.close(); 93 | } catch(SQLException se) { 94 | se.printStackTrace(); 95 | } // end finally 96 | 97 | } // end try 98 | 99 | } // end main 100 | 101 | 102 | static void execJDBC(Statement stmt, String query) { 103 | 104 | System.out.println(query + ";"); 105 | 106 | try { 107 | stmt.execute(query); 108 | }catch(SQLException se) { 109 | System.out.println(se.getMessage()); 110 | } // end try 111 | 112 | } // end execJDBCCommand 113 | 114 | } // end ExecJDBC Class 115 | --------------------------------------------------------------------------------