├── .gitignore
├── .pydevproject
├── .readthedocs.yaml
├── Dockerfile
├── README.md
├── benchmarkctl.py
├── docs
├── BUILDING-AWS-EC2.md
├── BUILDING-ROCKY8.md
├── BUILDING.md
├── BenchmarkSQL.png
├── CHANGE-LOG.md
├── CONTRIBUTING.md
├── DOCKER.md
├── HOW-TO-RUN-Oracle.md
├── HOW-TO-RUN-Postgres.md
├── HOW-TO-RUN.md
├── LICENSE.txt
├── PROPERTIES.md
├── RELEASE.md
├── TPC-C_ERD.svg
├── TPCC.md
├── TUTORIAL-1.md
├── TimedDriver-1.svg
├── TimedDriver.md
├── index.md
├── screenshots
│ └── tut1-bmsql-ui-1.png
└── tutorial-1
│ └── screen-001.png
├── mkdocs.yml
├── podman-build.sh
├── podman-run.sh
├── pom.xml
└── src
├── .gitignore
└── main
├── java
└── com
│ └── github
│ └── pgsqlio
│ └── benchmarksql
│ ├── application
│ ├── AppGeneric.java
│ ├── dummy
│ │ └── AppOracleStoredProc.java
│ ├── oracle
│ │ └── AppOracleStoredProc.java
│ └── postgres
│ │ └── AppPostgreSQLStoredProc.java
│ ├── jdbc
│ └── ExecJDBC.java
│ ├── jtpcc
│ ├── jTPCC.java
│ ├── jTPCCApplication.java
│ ├── jTPCCConfig.java
│ ├── jTPCCMonkey.java
│ ├── jTPCCRandom.java
│ ├── jTPCCResult.java
│ ├── jTPCCSUT.java
│ ├── jTPCCScheduler.java
│ ├── jTPCCTData.java
│ └── jTPCCTDataList.java
│ ├── loader
│ ├── LoadData.java
│ ├── LoadDataWorker.java
│ └── LoadJob.java
│ └── oscollector
│ └── OSCollector.java
└── resources
├── .gitignore
├── FlaskService
├── benchmarksql.py
├── main.py
├── sample.last.properties
└── templates
│ └── main.html
├── benchmarkctl.py
├── checks
├── check_details.sql
└── checks.sql
├── funcs.sh
├── generateReport.py
├── generateReport
├── __init__.py
├── bmsqlPlot.py
├── bmsqlResult.py
├── main.py
└── templates
│ ├── bmsql.css
│ ├── img_cpu.html
│ ├── img_disk_octets.html
│ ├── img_disk_ops.html
│ ├── img_interface_octets.html
│ ├── img_interface_packets.html
│ ├── img_latency.html
│ ├── img_memory.html
│ ├── img_nopm.html
│ ├── inc_latency_explain.html
│ ├── inc_nopm_explain.html
│ ├── inc_summary_explain.html
│ ├── inc_summary_table.html
│ ├── report_extended.html
│ └── report_simple.html
├── log4j2.xml
├── mcCollectdGraphite.py
├── mcCollectdMqtt.py
├── mcPrometheus.py
├── requirements.txt
├── runBenchmark.sh
├── runDatabaseBuild.sh
├── runDatabaseDestroy.sh
├── runLoader.sh
├── runSQL.sh
├── sample.firebird.properties
├── sample.mariadb.properties
├── sample.oracle.properties
├── sample.postgresql.properties
├── sample.transact-sql.properties
├── sql.babelfish
└── tableCreates.sql
├── sql.common
├── buildFinish.sql
├── extraCommandsBeforeLoad.sql
├── foreignKeys.sql
├── indexCreates.sql
├── indexDrops.sql
├── storedProcedureCreates.sql
├── storedProcedureDrops.sql
├── tableCreates.sql
├── tableDrops.sql
└── tableTruncates.sql
├── sql.mariadb
├── tableCreates.sql
└── tableDrops.sql
├── sql.oracle
├── storedProcedureCreates.sql
├── storedProcedureDrops.sql
└── tableCreates.sql
├── sql.postgres
├── buildFinish.sql
├── extraCommandsBeforeLoad.sql
├── storedProcedureCreates.sql
├── storedProcedureDrops.sql
└── tableCopies.sql
└── sql.transact-sql
└── tableCreates.sql
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | dist
3 | results
4 | service_data
5 | /bin/
6 | /target/
7 | .classpath
8 | .project
9 | .settings
10 | __pycache__
11 |
--------------------------------------------------------------------------------
/.pydevproject:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Default
5 |
6 | python interpreter
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | mkdocs:
9 | configuration: mkdocs.yml
10 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Note: This should be run, once Maven has build correctly the target directory.
2 |
3 | FROM rockylinux:9
4 |
5 | RUN dnf -y update
6 |
7 | RUN dnf -y install epel-release \
8 | java-17-openjdk-headless \
9 | dnf-plugins-core \
10 | python3
11 | RUN dnf config-manager --set-enabled crb
12 | RUN dnf install python3 python3-pip -y
13 | RUN pip3 install pip --upgrade
14 |
15 | ENV JAVA_TOOL_OPTIONS=-Dfile.encoding=UTF8
16 | ENV FLASK_ENV=development
17 |
18 | COPY ./target/BenchmarkSQL.jar /benchmarksql/BenchmarkSQL.jar
19 | COPY ./target/lib/ /benchmarksql/lib
20 | COPY ./target/run/ /benchmarksql/run
21 |
22 | RUN mkdir -p /service_data && \
23 | rm -f /benchmarksql/run/.jTPCC_run_seq.dat && \
24 | rm -f /benchmarksql/run/benchmark.log && \
25 | rm -f /benchmarksql/run/terminalio.log && \
26 | mkdir -p /benchmarksql/.config/matplotlib && \
27 | chmod 777 /benchmarksql/.config/matplotlib && \
28 | mkdir -p /benchmarksql/.cache/matplotlib && \
29 | chmod 777 /benchmarksql/.cache/matplotlib && \
30 | ln -s /service_data/run_seq.dat /benchmarksql/run/.jTPCC_run_seq.dat && \
31 | ln -s /service_data/benchmark.log /benchmarksql/run/benchmark.log && \
32 | ln -s /service_data/terminalio.log /benchmarksql/run/terminalio.log && \
33 | pip3 install -r /benchmarksql/run/requirements.txt
34 |
35 | CMD ["python3", "/benchmarksql/run/FlaskService/main.py"]
36 |
37 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BenchmarkSQL
2 |
3 | BenchmarkSQL is a [GPLv2](docs/LICENSE.txt) fair-use TPC-C like testing
4 | tool.
5 |
6 | ## Overview
7 |
8 | BenchmarkSQL is implemented in Java, using JDBC to stress test SQL databases.
9 | The overall architecture is a series of data structures, queues and thread
10 | groups that handle the simulated terminals, users and application threads.
11 |
12 | Its architecture allows BenchmarkSQL to drive TPC-C configurations up to many
13 | thousands of warehouses (known as the scaling factor) without overwhelming the
14 | job scheduler of the test driver itself.
15 | Yet it is capable of doing so without sacrificing one of the most important
16 | measurements in a TPC-C, the end-user experienced response time at the terminal.
17 |
18 | 
19 |
20 | Please read the [Full Architecture Description](docs/TimedDriver.md)
21 | for a detailed explanation of the above diagram.
22 |
23 | ## Building
24 |
25 | BenchmarkSQL V6 is meant to be built into a [Docker](https://www.docker.com/)
26 | container and controlled via its Flask based WEB UI and/or API. This allows
27 | for easy deployment of the benchmark driver on servers and cloud systems
28 | while controlling it through a browser or scripted.
29 |
30 | See the [build instructions](docs/BUILDING.md) for details.
31 |
32 |
33 | # Configuring and Running a Benchmark
34 |
35 | BenchmarkSQL is configured with files in Java properties format.
36 | A detailed description of all parameters in that file can be found
37 | [here](docs/PROPERTIES.md).
38 |
39 | [comment]: # (TODO: ##Automation. Point to a tutorial walking through using the (yet to be written) command line interface.)
40 |
41 |
42 |
--------------------------------------------------------------------------------
/benchmarkctl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import requests
4 | import json
5 | import re
6 | import sys
7 |
8 | def main():
9 | if len(sys.argv) < 5:
10 | usage()
11 | sys.exit(2)
12 |
13 | with open(sys.argv[1], 'r') as fd:
14 | config = json.load(fd)
15 |
16 | command = sys.argv[2]
17 | appnode = sys.argv[3]
18 | dbnode = sys.argv[4]
19 |
20 | extra_opts = {}
21 | for opt in sys.argv[5:]:
22 | key, val = opt.split('=')
23 | extra_opts[key] = val
24 |
25 | # TODO: sanity checks for all args
26 |
27 | bmsql = BenchmarkSQL(config, appnode, dbnode, extra_opts)
28 |
29 | if command == 'build':
30 | result = bmsql.build()
31 | print(result['current_job_type'])
32 | elif command == 'destroy':
33 | result = bmsql.destroy()
34 | print(result['current_job_type'])
35 | elif command == 'run':
36 | result = bmsql.run()
37 | print(result['current_job_type'])
38 | print(result['current_job_id'])
39 | elif command == 'cancel':
40 | result = bmsql.cancel()
41 | print(result['current_job_type'])
42 | elif command == 'status':
43 | result = bmsql.status()
44 | print(result['current_job_type'])
45 | print(result['current_job_id'])
46 | elif command == 'txsummary':
47 | result = bmsql.txsummary(sys.argv[5])
48 | print(json.dumps(result['txsummary'], indent=2))
49 | else:
50 | print("unknown command '%s'"%(command,))
51 | sys.exit(2)
52 |
53 | def usage():
54 | print("""usage: benchmarkctl CONFIG.json COMMAND APPNODE DBNODE
55 | """, file = sys.stderr)
56 |
57 |
58 | class BenchmarkSQL:
59 | def __init__(self, config, appnode, dbnode, extra_opts = None):
60 | with open(config['properties_template'], 'r') as fd:
61 | properties = fd.read()
62 | overrides = config['properties']
63 | overrides.update(config['dbnodes'][dbnode]['properties'])
64 | overrides.update(config['appnodes'][appnode]['properties'])
65 | if extra_opts is not None:
66 | overrides.update(extra_opts)
67 | for key in overrides:
68 | properties, n = re.subn('^%s=.*$'%(key),
69 | '%s=%s'%(key, overrides[key]),
70 | properties,
71 | flags = re.MULTILINE)
72 | if n == 0:
73 | properties += "\n\n" + key + "=" + overrides[key] + "\n"
74 |
75 | self.config = config
76 | self.appnode = appnode
77 | self.appconf = config['appnodes'][appnode]
78 | self.dbnode = dbnode
79 | self.dbconf = config['dbnodes'][dbnode]
80 | self.properties = properties
81 |
82 | def status(self):
83 | url = self.appconf['api_url']
84 | req = {
85 | 'command': 'status'
86 | }
87 | res = requests.post(url, data = {'request': json.dumps(req)})
88 | return json.loads(res.text)
89 |
90 | def txsummary(self, run_id):
91 | url = self.appconf['api_url']
92 | req = {
93 | 'command': 'txsummary',
94 | 'run_id': int(run_id)
95 | }
96 | res = requests.post(url, data = {'request': json.dumps(req)})
97 | return json.loads(res.text)
98 |
99 | def build(self):
100 | url = self.appconf['api_url']
101 | req = {
102 | 'command': 'build',
103 | 'properties': self.properties
104 | }
105 | res = requests.post(url, data = {'request': json.dumps(req)})
106 | return json.loads(res.text)
107 |
108 | def destroy(self):
109 | url = self.appconf['api_url']
110 | req = {
111 | 'command': 'destroy',
112 | 'properties': self.properties
113 | }
114 | res = requests.post(url, data = {'request': json.dumps(req)})
115 | return json.loads(res.text)
116 |
117 | def run(self):
118 | url = self.appconf['api_url']
119 | req = {
120 | 'command': 'run',
121 | 'properties': self.properties
122 | }
123 | res = requests.post(url, data = {'request': json.dumps(req)})
124 | return json.loads(res.text)
125 |
126 | def cancel(self):
127 | url = self.appconf['api_url']
128 | req = {
129 | 'command': 'cancel',
130 | 'properties': self.properties
131 | }
132 | res = requests.post(url, data = {'request': json.dumps(req)})
133 | return json.loads(res.text)
134 |
135 | if __name__ == '__main__':
136 | main()
137 |
--------------------------------------------------------------------------------
/docs/BUILDING.md:
--------------------------------------------------------------------------------
1 | # Building BenchmarkSQL
2 |
3 | BenchmarkSQL V6 is meant to be built into a [Docker](https://www.docker.com/)
4 | container and controlled via its Flask based WEB UI and/or API.
5 | This allows for easy deployment of the benchmark driver on servers and cloud
6 | systems while controlling it through a browser or scripted.
7 |
8 | ## Requirements
9 |
10 | The requirements to run BenchmarkSQL are:
11 |
12 | [comment]: # (TODO update java version, and package to install.)
13 |
14 | * Java development environment (java-1.8.0-openjdk-devel or newer).
15 | * Maven build tool for Java.
16 | * Docker and a user account authorized to use it. This depends on your OS.
17 | On RedHat based systems the usual way is to install Docker via
18 | `sudo yum install -y docker` and make the users, who are allowed to use, its
19 | members of the group **docker** by running the command
20 | `sudo usermod -a -G docker `.
21 |
22 | ## Building process
23 |
24 | The Java development environment and Maven are required on the build machine
25 | because the Docker container will only have the Java runtime installed. So
26 | the `BenchmarkSQL.jar` file needs to be built outside the container.
27 |
28 | After installing the above requirements and cloning the BenchmarkSQL
29 | git repository (assuming username **wieck** and cloned into ~/benchmarksql):
30 |
31 | ```
32 | $ cd ~/benchmarksql
33 | $ mvn
34 | ```
35 |
36 | This will create a lot of output:
37 |
38 | ```
39 | [INFO] Scanning for projects...
40 | [INFO]
41 | [INFO] ------------------< com.github.pgsql-io:benchmarksql >------------------
42 | [INFO] Building A TPC-C like test tool 6.0.0-SNAPSHOT
43 | [INFO] --------------------------------[ jar ]---------------------------------
44 | [INFO]
45 | [INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ benchmarksql ---
46 | [INFO] Deleting /Users/wieck/git/benchmarksql-6/target
47 | [INFO]
48 | [INFO] --- maven-resources-plugin:3.2.0:resources (default-resources) @ benchmarksql ---
49 | [INFO] Using 'UTF-8' encoding to copy filtered resources.
50 | [INFO] Using 'UTF-8' encoding to copy filtered properties files.
51 | [INFO] Copying 49 resources
52 | [INFO]
53 | [INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ benchmarksql ---
54 | [INFO] Changes detected - recompiling the module!
55 | [INFO] Compiling 18 source files to /Users/wieck/git/benchmarksql-6/target/classes
56 | [INFO]
57 | [INFO] --- maven-resources-plugin:3.2.0:testResources (default-testResources) @ benchmarksql ---
58 | [INFO] Using 'UTF-8' encoding to copy filtered resources.
59 | [INFO] Using 'UTF-8' encoding to copy filtered properties files.
60 | [INFO] skip non existing resourceDirectory /Users/wieck/git/benchmarksql-6/src/test/resources
61 | [INFO]
62 | [INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ benchmarksql ---
63 | [INFO] No sources to compile
64 | [INFO]
65 | [INFO] --- maven-surefire-plugin:2.12.4:test (default-test) @ benchmarksql ---
66 | [INFO] No tests to run.
67 | [INFO]
68 | [INFO] --- maven-jar-plugin:2.4:jar (default-jar) @ benchmarksql ---
69 | [INFO] Building jar: /Users/wieck/git/benchmarksql-6/target/BenchmarkSQL.jar
70 | [INFO]
71 | [INFO] --- maven-dependency-plugin:3.0.0:copy-dependencies (copy-dependencies) @ benchmarksql ---
72 | [INFO] Copying postgresql-42.2.19.jar to /Users/wieck/git/benchmarksql-6/target/lib/postgresql-42.2.19.jar
73 | [INFO] Copying jcc-11.5.5.0.jar to /Users/wieck/git/benchmarksql-6/target/lib/jcc-11.5.5.0.jar
74 | [INFO] Copying mysql-connector-java-8.0.23.jar to /Users/wieck/git/benchmarksql-6/target/lib/mysql-connector-java-8.0.23.jar
75 | [INFO] Copying protobuf-java-3.11.4.jar to /Users/wieck/git/benchmarksql-6/target/lib/protobuf-java-3.11.4.jar
76 | [INFO] Copying jaybird-4.0.3.java11.jar to /Users/wieck/git/benchmarksql-6/target/lib/jaybird-4.0.3.java11.jar
77 | [INFO] Copying mssql-jdbc-9.2.1.jre8.jar to /Users/wieck/git/benchmarksql-6/target/lib/mssql-jdbc-9.2.1.jre8.jar
78 | [INFO] Copying antlr4-runtime-4.7.2.jar to /Users/wieck/git/benchmarksql-6/target/lib/antlr4-runtime-4.7.2.jar
79 | [INFO] Copying log4j-api-2.14.1.jar to /Users/wieck/git/benchmarksql-6/target/lib/log4j-api-2.14.1.jar
80 | [INFO] Copying ojdbc8-21.1.0.0.jar to /Users/wieck/git/benchmarksql-6/target/lib/ojdbc8-21.1.0.0.jar
81 | [INFO] Copying connector-api-1.5.jar to /Users/wieck/git/benchmarksql-6/target/lib/connector-api-1.5.jar
82 | [INFO] Copying checker-qual-3.5.0.jar to /Users/wieck/git/benchmarksql-6/target/lib/checker-qual-3.5.0.jar
83 | [INFO] Copying log4j-core-2.14.1.jar to /Users/wieck/git/benchmarksql-6/target/lib/log4j-core-2.14.1.jar
84 | [INFO] Copying mariadb-java-client-2.7.2.jar to /Users/wieck/git/benchmarksql-6/target/lib/mariadb-java-client-2.7.2.jar
85 | [INFO] ------------------------------------------------------------------------
86 | [INFO] BUILD SUCCESS
87 | [INFO] ------------------------------------------------------------------------
88 | [INFO] Total time: 5.360 s
89 | [INFO] Finished at: 2021-04-13T17:56:05-05:00
90 | [INFO] ------------------------------------------------------------------------
91 | ```
92 |
93 | Your "Total time" will most likely not be 3 seconds on the first
94 | run.
95 |
96 | [comment]: # (TODO To include the docker creation. docker-maven-plugin can be used.)
97 |
98 | Expect it to run for a few minutes as the resulting Docker
99 | image is about 1.7GB in size and a lot of that will be pulled in
100 | over your Internet connection.
101 |
102 |
--------------------------------------------------------------------------------
/docs/BenchmarkSQL.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wieck/benchmarksql/98f5cb4b446f4296ca5e85ad27303482a6bbd624/docs/BenchmarkSQL.png
--------------------------------------------------------------------------------
/docs/CHANGE-LOG.md:
--------------------------------------------------------------------------------
1 | # BenchmarSQL change log
2 |
3 |
4 | ## Version 6.0
5 |
6 | TODO. jannicash:
7 |
8 | * Complete rewrite of the benchmark driver into a scheduler based system that
9 | can measure end user (terminal) experienced response times.
10 | * Refactoring
11 | * Project lifecycle manager with Maven instead of Ant.
12 | * Classes in packages.
13 | * Updated to log4j v2, and all output is managed by this framework.
14 | * TODO
15 |
16 | ## Version 5.1
17 |
18 | 2018-12-13. lussman & jannicash:
19 |
20 | * Clarify License.
21 | * Remove deprecated Oracle proprietary features (allows to build without the
22 | ORA JDBC driver).
23 | * Change result graphs to inlined SVGs.
24 | * Add an option to skip rampup time in the report.
25 | * Add CPU utilization summary to report.
26 | * Numerous bug fixes.
27 |
28 | ## Version 5.0
29 |
30 | 2016-05-25. lussman & jannicash:
31 |
32 | * Upgrade to PostgreSQL 9.3 JDBC 4.1 version 1102 driver.
33 | * Improve support for Oracle.
34 | * Re-implement the non-uniform random generator in TPC-C style.
35 | * Conform to clause 4.3.3.1 and enable lookup by last name.
36 | * Add a switch to disable terminal-warehouse association, spreading the data
37 | access over all configured warehouses.
38 | * Re-worked the run shell scripts and the location of SQL files to make support
39 | of more database types easier.
40 | * Add support for Firebirdsql.
41 | * Add FOREIGN KEYS as defined by TPC-C 1.3.
42 | * Major code overhaul. The per transaction type terminal data generation,
43 | execution and terminal trace code is moved into a module jTPCCTData. The
44 | database connection with all prepared statements has moved into a module
45 | jTPCCConnection.
46 | * Add collecting per transaction result data and OS Level resource usage
47 | collection. The R statistics package is used to graph detailed information
48 | and a complete report in HTML can be generated from the data.
49 |
50 | ## Version 4.1.2
51 |
52 | 2016-05-16. jannicash:
53 |
54 | * Fixed one more preparedStatement() leak. Hopefully with the help of Oracle's
55 | V$OPEN_CURSOR view we got them all now.
56 | * Fixed a possible deadlock problem in the NEW_ORDER transaction. Multiple
57 | parallel transaction could attempt to lock the same STOCK rows in reverse
58 | order. Sorting the order lines by item ID avoids this problem.
59 |
60 | ## Version 4.1.1
61 |
62 | 2016-01-31. jannicash:
63 |
64 | * Changed the status line to update only once per second. The previous
65 | implementation was getting rather noisy at high throughput.
66 | * Fixed two preparedStatement() leaks that could cause ORA-01000 errors on
67 | longer runs with high throughput.
68 | * Fixed a problem in the calculation of sleep time between transactions when
69 | using limitTxnsPerMin that could cause the test to hang at the end.
70 | * Added support for escaping ; as \; in SQL files to be able to load functions
71 | and execute anonymous PL blocks (needed for next item).
72 | * Changed the definition of history.hist_id into a plain integer with no
73 | special functionality. Two new database vendor specific SQL scripts allow to
74 | enable the column after data load as an auto incrementing primary key. See
75 | HOW-TO-RUN.txt for details.
76 |
77 | ## Version 4.1.0
78 |
79 | 2014-03-13. lussman:
80 |
81 | * Upgrade to using JDK 7.
82 | * Upgrade to PostgreSQL JDBC 4.1 version 1101 driver.
83 | * Stop claiming to support DB2 (only Postgres & Oracle are well tested).
84 |
85 | ## Version 4.0.9
86 |
87 | 2013-11-04. cadym:
88 |
89 | * Incorporate new PostgreSQL JDBC 4 version 1100 driver.
90 | * Changed default user from postgres to benchmarksql.
91 | * Added id column as primary key to history table.
92 | * Renamed schema to benchmarksql.
93 | * Changed log4j format to be more readable.
94 | * Created the "benchmark" schema to contain all tables.
95 | * Incorporate new PostgreSQL JDBC4 version 1003 driver.
96 | * Transaction rate pacing mechanism.
97 | * Correct error with loading customer table from csv file.
98 | * Status line report dynamically shown on terminal.
99 | * Fix lookup by name in PaymentStatus and Delivery Transactions (in order to be
100 | more compatible with the TPC-C spec).
101 | * Rationalized the variable naming in the input parameter files (now that the
102 | GUI is gone, variable names still make sense).
103 | * Default log4j settings only writes to file (not terminal).
104 |
105 | ## Version 4.0.2
106 |
107 | 2013-06-06. lussman & cadym:
108 |
109 | * Removed Swing & AWT GUI so that this program is runnable from the command
110 | line.
111 | * Remove log4j usage from runSQL & runLoader (only used now for the actual
112 | running of the Benchmark).
113 | * Fix truncation problem with customer.csv file.
114 | * Comment out "BadCredit" business logic that was not working and throwing
115 | stack traces.
116 | * Fix log4j messages to always show the terminal name.
117 | * Remove bogus log4j messages.
118 |
119 | ## Version 3.0.9
120 |
121 | 2013-03-21. lussman:
122 |
123 | * Config log4j for rotating log files once per minute.
124 | * Default flat file location to '/tmp/csv/' in table copies script.
125 | * Drop incomplete & untested Windoze '.bat' scripts.
126 | * Standardize logging with log4j.
127 | * Improve Logging with meaningful DEBUG and INFO levels.
128 | * Simplify "build.xml" to eliminate nbproject dependency.
129 | * Defaults read in from properties.
130 | * Groudwork laid to eliminate the GUI.
131 | * Default GUI console to PostgreSQL and 10 Warehouses.
132 |
133 | ## Version 2.3.5
134 |
135 | 2013-01-29. lussman:
136 |
137 | * Default build is now with JDK 1.6 and JDBC 4 Postgres 9.2 driver.
138 | * Remove outdated JDBC 3 drivers (for JDK 1.5). You can run as before by a
139 | JDBC4 driver from any supported vendor.
140 | * Remove ExecJDBC warning about trying to rollback when in autocommit mode.
141 | * Remove the extraneous COMMIT statements from the DDL scripts since ExecJDBC
142 | runs in autocommit mode.
143 | * Fix the version number displayed in the console.
144 |
145 | ## Versions 1.0 thru 2.2
146 |
147 | 2004 - 2012. lussman:
148 |
149 | * Dare to Compare.
150 | * Forked from the [jTPCC project](http://jtpcc.sourceforge.net/) -
151 | [Changes](http://jtpcc.sourceforge.net/CHANGES.TXT).
152 |
153 |
--------------------------------------------------------------------------------
/docs/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to BenchmarkSQL
2 |
3 | ## Code
4 |
5 | The code is formatted with Google Java code style.
6 | The details of this format can be found at:
7 | [https://google.github.io/styleguide/javaguide.html](https://google.github.io/styleguide/javaguide.html)
8 |
9 | If you use Eclipse IDE, you can configure this editor with this file:
10 | [https://github.com/google/styleguide/blob/gh-pages/eclipse-java-google-style.xml](https://github.com/google/styleguide/blob/gh-pages/eclipse-java-google-style.xml)
11 |
12 | To configure the formatter, you need to download the xml file and then import it
13 | into Eclipse.
14 |
15 | Preferences > Java > Code style > Formatter > Active profile > Import > Select the downloaded xml file.
16 |
17 | Format the code:
18 |
19 | Right click on the source directory (src/main/java) > Source > Format
20 |
21 | Alternatively, in the class being edited, you can press:
22 |
23 | Ctrl + shift + F
24 | cmd + shift + F
25 |
--------------------------------------------------------------------------------
/docs/DOCKER.md:
--------------------------------------------------------------------------------
1 | # Launching the Service Container
2 |
3 | Once the Docker image is built, a container can be started with the
4 | `service-start.sh` script.
5 |
6 | ```
7 | #!/bin/sh
8 |
9 | mkdir -p ./service_data
10 |
11 | docker run --rm -it --name benchmarksql-service \
12 | --publish 5000:5000 \
13 | --volume "`pwd`/service_data:/service_data" \
14 | --user `id -u`:`id -g` \
15 | benchmarksql-v6.0
16 | ```
17 |
18 | * It creates a local directory to preserve configuration and result data.
19 | This directory is mounted into the container.
20 | * It runs the docker image **benchmarksql-v6.0** as a container
21 | with a tag **benchmarksql-service**. This container is running the
22 | service under the current user (not root) and it forwards port 5000/tcp
23 | into the container for the Flask UI and API.
24 |
25 | This container will run in the foreground and show the Flask log for debugging
26 | purposes.
27 | To run it in the background simply replace theflags `-it` with `-d`.
28 |
29 | At this point the BenchmarkSQL service is running and you can connect to it with
30 | you browser on [http://localhost:5000](http://localhost:5000).
31 |
32 | If you created this service on a remote machine, don't simply open port 5000/tcp
33 | in the firewall.
34 | **NOTE:** Keep in mind that the configuration file, controlling the benchmark
35 | run settings, contains all the connection credentials for your database in clear
36 | text!
37 | The plan is to substantially enhance the Flask GUI and API with user and
38 | configuration management.
39 | Then, provide instructions on how to secure the container behind an
40 | [nginx](https://www.nginx.com/) reverse proxy for encryption.
41 | In the meantime, please use ssh to tunnel port 5000/tcp securely to the
42 | benchmark driver machine.
43 | Since that tunnel is only for the WEB UI and API traffic, it won't affect the
44 | benchmark results at all.
45 |
46 | [comment]: # (TODO: Tutorial of how to use the WEB UI.)
47 |
--------------------------------------------------------------------------------
/docs/HOW-TO-RUN-Oracle.md:
--------------------------------------------------------------------------------
1 | # Instructions for running BenchmarkSQL on Oracle
2 |
3 | Please follow the general instruction for any RDBMS in the
4 | [How to run section](HOW-TO-RUN.md).
5 |
6 | ## Create a user and a database
7 |
8 | The following assumes a default installation of Oracle.
9 |
10 | Creating the `benchmarksql` user run the following commands in `sqlplus`
11 | under the `sysdba` account:
12 |
13 | ```
14 | <<_EOF_
15 |
16 | CREATE USER benchmarksql
17 | IDENTIFIED BY "password"
18 | DEFAULT TABLESPACE users
19 | TEMPORARY TABLESPACE temp;
20 |
21 | GRANT CONNECT TO benchmarksql;
22 | GRANT CREATE PROCEDURE TO benchmarksql;
23 | GRANT CREATE SEQUENCE TO benchmarksql;
24 | GRANT CREATE SESSION TO benchmarksql;
25 | GRANT CREATE TABLE TO benchmarksql;
26 | GRANT CREATE TRIGGER TO benchmarksql;
27 | GRANT CREATE TYPE TO benchmarksql;
28 | GRANT UNLIMITED TABLESPACE TO benchmarksql;
29 |
30 | _EOF_
31 | ```
32 |
33 |
--------------------------------------------------------------------------------
/docs/HOW-TO-RUN-Postgres.md:
--------------------------------------------------------------------------------
1 | # Instructions for running BenchmarkSQL on PostgreSQL
2 |
3 | Please follow the general instruction for any RDBMS in the
4 | [How to run section](HOW-TO-RUN.md).
5 |
6 | ## Create a user and a database
7 |
8 | As Unix user `postgres` use the `psql` shell to connect to the `postgres`
9 | database and issue the `CREATE USER` and `CREATE DATABASE` commands.
10 |
11 | ```
12 | $ psql postgres
13 | psql (9.5.2)
14 | Type "help" for help.
15 |
16 | postgres=# CREATE USER benchmarksql WITH ENCRYPTED PASSWORD 'changeme';
17 | postgres=# CREATE DATABASE benchmarksql OWNER benchmarksql;
18 | postgres=# \q
19 | ```
20 |
21 |
--------------------------------------------------------------------------------
/docs/HOW-TO-RUN.md:
--------------------------------------------------------------------------------
1 | # Instructions for running BenchmarkSQL
2 |
3 |
4 | ## Requirements
5 |
6 | Use of JDK8 is required.
7 |
8 | ## Create a user and a database
9 |
10 | Depending on the RDBMS, you need to create the database and a user to access
11 | the database.
12 |
13 | In the HOW-TO-RUN for each RDBMS, you can find the details about this process.
14 |
15 | ## Compile the BenchmarkSQL source code
16 |
17 | For details about how to build the jar file from sources, you can visit the
18 | [Building section](BUILDING.md).
19 |
20 | ## Create the benchmark configuration file
21 |
22 | Under the `target` directory created by Maven, change to the `run` directory,
23 | copy the properties file of your RDBMS and edit the copy to match your system
24 | setup and desired scaling.
25 |
26 | ```
27 | $ cd target/run
28 | $ cp sample.RDBMS.properties my.properties
29 | $ vi my.properties
30 | ```
31 |
32 | Note that the provided example configuration is meant to test the functionality
33 | of your setup.
34 | BenchmarkSQL can connect to the database and execute transactions.
35 | That configuration is NOT a benchmark run.
36 | To make it into one you need to have a configuration that matches your database
37 | server size and workload.
38 | Leave the sizing for now and perform a first functional test.
39 |
40 | The BenchmarkSQL database has an initial size of approximately 100-100MB per
41 | configured warehouse.
42 | A typical setup would be a database of 2-5 times the physical RAM of the server.
43 |
44 | Likewise, the number of concurrent database connections (configuration parameter
45 | `terminals`) should be something about 2-6 times the number of CPU threads.
46 |
47 | Last but not least, benchmark runs are normally done for hours, if not days.
48 | This is because on the database sizes above, it will take that long to reach a
49 | steady state and make sure that all performance relevant functionality of the
50 | database, like checkpointing and vacuuming is included in the measurement.
51 |
52 | So, you can see that with a modern server, that has 32-256 CPU threads and
53 | 64-512GBi, of RAM we are talking about thousands of warehouses and hundreds of
54 | concurrent database connections.
55 |
56 | ## Build the schema and initial database load
57 |
58 | Execute the `runDatabaseBuild.sh` script with your configuration file.
59 |
60 | ```
61 | $ ./runDatabaseBuild.sh my.properties
62 | # ------------------------------------------------------------
63 | # Loading SQL file ./sql.common/tableCreates.sql
64 | # ------------------------------------------------------------
65 | create table bmsql_config (
66 | cfg_namevarchar(30) primary key,
67 | cfg_value varchar(50)
68 | );
69 | create table bmsql_warehouse (
70 | w_id integer not null,
71 | w_ytd decimal(12,2),
72 | [...]
73 | Starting BenchmarkSQL LoadData
74 |
75 | driver=org.postgresql.Driver
76 | conn=jdbc:postgresql://localhost:5432/benchmarksql
77 | user=benchmarksql
78 | password=***********
79 | warehouses=30
80 | loadWorkers=10
81 | fileLocation (not defined)
82 | csvNullValue (not defined - using default 'NULL')
83 |
84 | Worker 000: Loading ITEM
85 | Worker 001: Loading Warehouse 1
86 | Worker 002: Loading Warehouse 2
87 | Worker 003: Loading Warehouse 3
88 | [...]
89 | Worker 000: Loading Warehouse 30 done
90 | Worker 008: Loading Warehouse 29 done
91 | # ------------------------------------------------------------
92 | # Loading SQL file ./sql.common/indexCreates.sql
93 | # ------------------------------------------------------------
94 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey
95 | primary key (w_id);
96 | alter table bmsql_district add constraint bmsql_district_pkey
97 | primary key (d_w_id, d_id);
98 | [...]
99 | vacuum analyze;
100 | ```
101 |
102 | ## Run the configured benchmark
103 |
104 | Once the tables have the necessary data, you can run the benchmark.
105 |
106 | $ ./runBenchmark.sh my.properties
107 |
108 | The benchmark should run for the number of configured concurrent connections
109 | (`terminals`) and the duration or number of transactions.
110 |
111 | The end result of the benchmark will be reported like this:
112 |
113 | ```
114 | 01:58:09,081 [Thread-1] INFO jTPCC : Term-00,
115 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmC (NewOrders) = 179.55
116 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmTOTAL = 329.17
117 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session Start = 2016-05-25 01:58:07
118 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session End = 2016-05-25 01:58:09
119 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Transaction Count = 10
120 | ```
121 |
122 | At this point you have a working setup.
123 |
124 | ## Scale the benchmark configuration.
125 |
126 | Change the `my.properties` file to the correct scaling (number of warehouses and
127 | concurrent connections/terminals).
128 | Switch from using a transaction count to time based:
129 |
130 | runTxnsPerTerminal=0
131 | runMins=180
132 |
133 | Rebuild the database (if needed) by running:
134 |
135 | $ ./runDatabaseDestroy.sh my.properties
136 | $ ./runDatabaseBuild.sh my.properties
137 |
138 | Then run the benchmark again.
139 |
140 | Rinse and repeat.
141 |
142 | ## Result report
143 |
144 | BenchmarkSQL collects detailed performance statistics and (if configured) OS
145 | performance data.
146 | The example configuration file defaults to a directory starting with my_result_.
147 |
148 | [//]: # (R files should be replaces with Python ones.)
149 |
150 | Use the `generateReport.py` script to create an HTML file with graphs.
151 | This requires R to be installed, which is beyond the scope of this HOW-TO.
152 |
153 |
--------------------------------------------------------------------------------
/docs/PROPERTIES.md:
--------------------------------------------------------------------------------
1 | # BenchmarkSQL Properties Files
2 |
3 |
4 | BenchmarkSQL is configured with files in the Java properties format.
5 | The parameters in these files control which JDBC driver to load, what the
6 | database type is, the connection credentials, scaling and so on.
7 |
8 | ## Driver and Connection Parameters
9 |
10 | | Parameter | Description | Example |
11 | | ------------- | ------------------------------- | -------------------------------------- |
12 | | `db` | Database type
This is a string of *firebird*, *mariadb*, *oracle*, *postgres*, *transact-sql* or *babelfish*. There are some differences in SQL dialects that the **Generic** application will handle based on this parameter.
An example of this is the handling of **FOR UPDATE**. MSSQL only allows the SQL Standard syntax for that in cursors, but supports a hint **WITH(UPDLOCK)** in a regular **SELECT**. | postgres |
13 | | `driver` | JDBC driver class name to load. | org.postgresql.Driver |
14 | | `application` | The client side application type
BenchmarkSQL supports all database types in the *Generic* application type, which is using JDBC.PreparedStatement() and business logic implemented in the client only. The two alternatives are *PostgreSQLStoredProc* and *OracleStoredProc*, which implement the business logic in stored procedures written in PL/pgSQL or PL/SQL respectively. The BenchmarkSQL client is still generating all the screen input, transmits it to the database and receives all the screen output back. The main benefit of these implementations is that they greatly reduce the number of network round trips between application and database. | Generic |
15 | | `conn` | JDBC Connection URI | jdbc:postgresql://dbhost/benchmarksql1 |
16 | | `user` | Database User Name | benchmarksql |
17 | | `password` | Database User Password | ChangeOn1nstall |
18 |
19 | ## Scaling Parameters
20 |
21 | | Parameter | Description | Example |
22 | | --------------------------- | ------------ | -------- |
23 | | `warehouses` | Overall Database Size Scaling Parameter
This not only affects the size of the initial database but how many Terminals BenchmarkSQL will simulate. | 2000 |
24 | | `loadWorkers` | The number of parallel threads used to create the initial database content. Should based on the CPU and IO capacity of the database server. | 8 |
25 | | `monkeys` | The number of monkey threads used to process Terminal input and output. Please refer to the [architecture documentation](./TimedDriver.md) for details. | 8 |
26 | | `sutThreads` | The number of application thread to launch. BenchmarkSQL will create this many parallel database connections to process the incoming requests. | 80 |
27 | | `maxDeliveryBGThreads` | The maximum number of sutThreads that at any time are allowed to process the background part of a Delivery transaction. | 40 |
28 | | `maxDeliveryBGPerWarehouse` | The maximum number of sutThreads that are allowed to process background part of a Delivery transaction "for the same warehouse". | 1 |
29 |
30 | Notes on Delivery Background transactions: The TPC-C has this "bulk"
31 | transaction, called Delivery.
32 | It picks the oldest, not yet delivered `Order` of each `District` of one
33 | `Warehouse` and "delivers" it.
34 | This involves selecting 10 rows from the `New-Order` table `FOR UPDATE`, updating
35 | the corresponding 10 Order rows as well as the on average 100 `Order-Line` rows
36 | and more.
37 |
38 | This background transaction has a very relaxed 80 seconds to complete.
39 | Obviously the selection of those `New-Order` rows, all belonging to the same
40 | `Warehouse`, already create a locking conflict, so limiting the concurrent
41 | number of Delivery transactions per Warehouse to 1 is a natural choice.
42 | The `maxDeliveryBGThreads` parameter is meant as a control mechanism to prevent
43 | all SUT threads from being busy with this transaction type, while transactions
44 | with tighter response time constraints are waiting in the SUT FiFo queue.
45 |
46 | ## Timing and Test Duration Parameters
47 |
48 | | Parameter | Description | Example |
49 | | ----------------------------- | ------------ | -------- |
50 | | `rampupMins` | Delay in minutes before the actual Benchmark measurement begins. Any transactions executed before this delay are ignored in the statistics and reports. This should be greater that the rampupTerminalMins parameter to give the database some time to level out at the full load. | 30 |
51 | | `rampupSUTMins` | Duration in minutes over which the SUT threads (database connections) are launched. This should be lower or equal to the rampupTerminalMins. | 15 |
52 | | `rampupTerminalMins` | Duration in minutes over which the simulated Terminals are launched. The Terminals are doing all the keying and thinking delays, so spreading the launch of terminals over some time will cause the transaction load to gradually increase, instead of coming as a 0-100 in zero seconds onslaught. This will give the database time to keep up with the load while warming up caches, instead of building up a large backlog of requests in the client application. | 20 |
53 | | `runMins` | Duration of the actual benchmark measurement in minutes. This should be long enough to cover at least one checkpoint. To get a real picture of how the database behaves several hours or even days are recommended. | 300 |
54 | | `reportIntervalSecs` | Interval in seconds at which the test driver is reporting the current average number to transactions per minute | 60 |
55 | | `restartSUTThreadProbability` | Probability that the SUT threads will schedule the launch of a replacement and terminate after finishing a request. | 0.001 |
56 |
57 | ## Throughput Tuning Parameters
58 |
59 | **Warning: Changing these parameters from their 1.0 default values
60 | will cause the benchmark result to violate the TPC-C timing requirements.**
61 |
62 | That said, the TPC-C benchmark specification was created at a time, when block
63 | terminals and mainframe systems were state of the art.
64 | The ratio between CPU and memory requirements compared to storage size are
65 | outdated.
66 | Todays databases see a lot more transactions per GB than databases back then.
67 |
68 | The following parameters are an attempt to allow users control over how far
69 | they want to scale up the transaction frequency per warehouse.
70 | The examples show speeding up the attempted rate of transactions 10 times.
71 |
72 | | Parameter | Description | Example |
73 | | ---------------------- | ------------ | ------- |
74 | | `keyingTimeMultiplier` | Keying is the simulated time, the user needs to fill the input screen. This is a fixed number of seconds dependent on the transaction type. | 0.1 |
75 | | `thinkTimeMultiplier` | Similar to the Keying the Trink time is the simulated time, the user needs to process the transaction outcome by reading the output screen. This is a random time with a defined mean per transaction time. | 0.1 |
76 |
77 | ## Transaction Mix Parameters
78 |
79 | The TPC-C specification requires a minimum percentage for transaction types:
80 |
81 | * 43.0% Payment.
82 | * 4.0% Order-Status.
83 | * 4.0% Stock-Level.
84 | * 4.0% Delivery.
85 |
86 | What is missing from this is the percentage of `New-Order` transactions.
87 | The specification only requires a MINIMUM of 43.0% Payment.
88 | So the benchmark result is invalid should it only have 42.98% of them.
89 |
90 | BenchmarkSQL uses a random number generator to pick the next transaction
91 | per terminal.
92 | In order to avoid the above problem it is recommended to specify the required
93 | percentages a little bit higher.
94 |
95 | | Parameter | Description | Example |
96 | | ------------------- | ---------------------------------------------------- | ------- |
97 | | `paymentWeight` | Probability of Payment transactions in percent. | 43.1 |
98 | | `orderStatusWeight` | Probability of Order-Status transactions in percent. | 4.1 |
99 | | `stockLevelWeight` | Probability of Stock-Level transactions in percent. | 4.1 |
100 | | `deliveryWeight` | Probability of Delivery transactions in percent. | 4.1 |
101 |
--------------------------------------------------------------------------------
/docs/RELEASE.md:
--------------------------------------------------------------------------------
1 | # Release process
2 |
3 | These are the tasks to do when performing a release:
4 |
5 | * Update the `JTPCCVERSION` constant about the version at
6 | `com.github.pgsqlio.benchmarksql.jtpcc.jTPCCConfig` class.
7 | * Update the `BUILDING.md` file, to update the version.
8 | * Update the `CHANGE-LOG.md` file with the modifications made in this release.
9 | * Format the Java code, according to Google Java code style.
10 | For more details, please check the [Contributing section](CONTRIBUTING).
11 | * Execute the `release` plugin in Maven:
12 |
13 | ```
14 | mvn release:prepare
15 | mvn release:perform
16 | ```
17 | * Verify the new version in `pom.xml` file.
18 | * Generate the jar file, via `mvn`.
19 | * Publish the `BenchmarkSQL.jar` in GitHub releases.
20 | The file can be obtained from `target` directory.
21 |
--------------------------------------------------------------------------------
/docs/TPCC.md:
--------------------------------------------------------------------------------
1 | The TPC-C is an OLTP benchmark defined by the
2 | [Transaction Processing Council](http://tpc.org). It consists of 9 tables
3 | that are connected with 10 Foreign Key Relationships. Except for the **Item**
4 | table, everything is scaled in cardinality by the number of warehouses (**W**),
5 | that are generated during the initial load of the database.
6 |
7 | 
8 |
9 | This schema is used by 5 different transactions that produce a variety of
10 | different access patterns on the tables.
11 |
12 | * **Item** is read only.
13 | * **Warehouse**, **District**, **Customer** and **Stock** are read/write.
14 | * **New-Order** is insert, read and delete, like a queue that at any given
15 | time has approximately W * 9000 rows in it.
16 | * **Order** and **Order-Line** receive inserts and every row inserted will
17 | have a time delayed update to it, after which the row becomes stale and
18 | may be read infrequently in the future.
19 | * **History** is insert only.
20 |
21 | This is an impressive complexity and set of different access patterns for
22 | such a small schema and number of transaction profiles. It is one of the
23 | reasons why **TPC-C** is still one of the most important database benchmarks
24 | today.
25 |
26 | For more information, you can visit the
27 | [Standard specification from TPC](http://tpc.org/tpc_documents_current_versions/pdf/tpc-c_v5.11.0.pdf).
--------------------------------------------------------------------------------
/docs/TUTORIAL-1.md:
--------------------------------------------------------------------------------
1 | [comment]: # (TODO create alternative tutorials for other distros)
2 |
3 | # Building and running BenchmarkSQL on CentOS 8
4 |
5 | BenchmarkSQL is controlled through a Flask based WEB UI. This can
6 | also be packaged into a Docker container for easy cloud deployment.
7 | The build process itself is done via Maven. Building the container
8 | is optional and requires Podman.
9 |
10 | This tutorial will guide you step by step from an empty Linux system
11 | to running a benchmark via the Flask UI. The parts omitted here are
12 | setting up the database server itself. For this tutorial a CentOS 8
13 | "minimal" install is used as the starting point.
14 |
15 | The entire process is
16 |
17 | * [Installing required packages](#installing-required-packages)
18 | * [Cloning the BenchmarkSQL git repository](#cloning-the-benchmarksql-git-repository)
19 | * [Installing Python requirements](#installing-python-requirements)
20 | * [Building the target directory](#building-the-target-directory)
21 | * [Preparing the test database](#preparing-the-test-database)
22 | * [Performing the initial database load](#performing-the-initial-database-load)
23 | * [Launching the BenchmarkSQL UI](#launching-the-benchmarksql-ui)
24 | * [Next Steps](#next-steps)
25 |
26 | ## Installing required packages
27 |
28 | This tutorial assumes that you have a [PostgreSQL](http://postgresql.org)
29 | server available over the network or on your local Linux machine
30 | and that it can be used with password
31 | authentication. It is beyond the scope of this tutorial to show how
32 | to create a PostgreSQL server, or any other database server instance.
33 |
34 | First we need to install and configure a few required packages like
35 | git and Maven.
36 | ```
37 | [user@localhost ~]$ sudo dnf -y install epel-release java-11-openjdk-devel maven git python3
38 | [user@localhost ~]$ sudo dnf config-manager --set-enabled powertools
39 | ```
40 |
41 | Second we make Java-11 the system wide default version via alternatives.
42 | `alternatives(8) --config` is interactive (which is why the output is
43 | included below). Make sure you select the
44 | option for java-11 in the following two commands.
45 | ```
46 | [user@localhost ~]$ sudo alternatives --config java
47 |
48 | There are 2 programs which provide 'java'.
49 |
50 | Selection Command
51 | -----------------------------------------------
52 | *+ 1 java-1.8.0-openjdk.x86_64 (/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-0.el8_3.x86_64/jre/bin/java)
53 | 2 java-11-openjdk.x86_64 (/usr/lib/jvm/java-11-openjdk-11.0.11.0.9-0.el8_3.x86_64/bin/java)
54 |
55 | Enter to keep the current selection[+], or type selection number: 2
56 | [user@localhost ~]$ sudo alternatives --config javac
57 |
58 | There are 2 programs which provide 'javac'.
59 |
60 | Selection Command
61 | -----------------------------------------------
62 | *+ 1 java-1.8.0-openjdk.x86_64 (/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-0.el8_3.x86_64/bin/javac)
63 | 2 java-11-openjdk.x86_64 (/usr/lib/jvm/java-11-openjdk-11.0.11.0.9-0.el8_3.x86_64/bin/javac)
64 |
65 | Enter to keep the current selection[+], or type selection number: 2
66 | ```
67 |
68 | Maven also requires the **JAVA_HOME** environment variable to be
69 | set correctly, so we add the following line to our .bash_profile and
70 | make sure it is set in our current shell.
71 |
72 | ```
73 | export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac)))))
74 | ```
75 |
76 | **Note**: There are other ways to make Java version 11 the default on a
77 | per user base. But that is outside the scope of this tutorial.
78 |
79 | ## Cloning the BenchmarkSQL git repository
80 |
81 | We are now ready to clone the BenchmarkSQL git repository with
82 | ```
83 | [user@localhost ~]$ git clone https://github.com/pgsql-io/benchmarksql.git
84 | ```
85 |
86 | ## Installing Python Requirements
87 |
88 | The remaining dependencies are all Python modules that we will
89 | install using pip3. The are listed in a requirements.txt file in
90 | the repository.
91 | ```
92 | [user@localhost ~]$ cd benchmarksql/
93 | [user@localhost benchmarksql]$ sudo pip3 install -r src/main/resources/requirements.txt
94 | ```
95 |
96 | ## Building the target directory
97 |
98 | We are now ready to build the actual benchmark driver. Assuming we
99 | are still in the *~/benchmarksql* directory
100 | ```
101 | [user@localhost benchmarksql]$ mvn
102 | ```
103 | Yup, that's it. We should now have a directory *target* that contains a
104 | file *BenchmarkSQL.jar* and a bunch of other stuff.
105 | ```
106 | [user@localhost benchmarksql]$ ll target
107 | total 104
108 | -rw-rw-r--. 1 user user 97254 Apr 28 12:14 BenchmarkSQL.jar
109 | drwxrwxr-x. 3 user user 17 Apr 28 12:14 classes
110 | drwxrwxr-x. 3 user user 25 Apr 28 12:14 generated-sources
111 | drwxrwxr-x. 2 user user 4096 Apr 28 12:14 lib
112 | drwxrwxr-x. 2 user user 28 Apr 28 12:14 maven-archiver
113 | drwxrwxr-x. 3 user user 35 Apr 28 12:14 maven-status
114 | drwxrwxr-x. 10 user user 4096 Apr 28 12:14 run
115 | ```
116 |
117 | ## Preparing the test database
118 |
119 | For the remainder of this tutorial it is assumed that the PostgreSQL
120 | database is installed on a remote computer with IP address *172.21.8.33*.
121 | If you are using a local database that is installed on your Linux
122 | desktop, user *127.0.0.1* or *localhost* instead.
123 |
124 | Connect to the PostgreSQL server as user *postgres* and create the user
125 | *benchmarksql* as well as a database *benchmarksql1* owned by that user.
126 | ```
127 | -bash-4.2$ psql -U postgres
128 | psql (14devel)
129 | Type "help" for help.
130 |
131 | postgres=# create user benchmarksql with password 'bmsql1';
132 | CREATE ROLE
133 | postgres=# create database benchmarksql1 owner benchmarksql;
134 | CREATE DATABASE
135 | ```
136 |
137 | ## Performing the initial database load
138 |
139 | If you did everything I did and actually worked on a minimum CentOS 8
140 | install, then the machine you are running the benchmark on will not have
141 | a graphical console. That is actually how BenchmarkSQL is intended to
142 | be run. That is on a remote server that is in close proximity of the actual
143 | database server, network wise and controlled with a browser and its
144 | Flask UI.
145 |
146 | In my example the machine where I built and will be running BenchmarkSQL
147 | is on IP addres *172.21.8.128*. Flask is by default going to listen on
148 | port *5000*, so I need to open up that TCP port in the firewall in order
149 | to connect with my browser to it. Again, if you are running everything
150 | on your local machine, just use *127.0.0.1* or *localhost* and forget
151 | about the firewall.
152 | ```
153 | [user@localhost benchmarksql]$ sudo firewall-cmd --zone public --add-port 5000/tcp
154 | success
155 | [user@localhost benchmarksql]$ sudo firewall-cmd --zone public --add-port 5000/tcp --permanent
156 | success
157 | ```
158 |
159 | ## Launching the BenchmarkSQL UI
160 |
161 | **NOTE: The BenchmarkSQL UI shown here is a Proof of Concept Prototype.
162 | The final UI will be entirely different.**
163 |
164 | Time to launch the actual BenchmarkSQL user interface and populate
165 | the test database created above with the BenchmarkSQL tables, initial
166 | data and other objects. Assuing we are still in the directory
167 | *~/benchmarksql* (where we cloned the git repository):
168 | ```
169 | [user@localhost benchmarksql]$ ./target/run/FlaskService/main.py
170 | created empty directory /home/user/benchmarksql/target/run/service_data
171 | * Serving Flask app "main" (lazy loading)
172 | * Environment: production
173 | WARNING: This is a development server. Do not use it in a production deployment.
174 | Use a production WSGI server instead.
175 | * Debug mode: off
176 | * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
177 | ```
178 | Now we can direct our browser to
179 | [http://172.21.8.128:5000](http://172.21.8.128:5000)
180 | (substitute with the proper IP address where you launched the UI) and
181 | should see something like this:
182 |
183 | 
184 |
185 | In the **Properties** textbox adjust the URI for the JDBC connection
186 | to the IP address of the DB server, then click `BUILD`.
187 |
188 | For the first ever test you may also want to adjust the number of
189 | minutes for the rampup and run times before clicking `RUN`.
190 | Be patient, the benchmark is only going to generate a status line
191 | reporting the current throughput every 30 seconds (there is a config
192 | option in the Properties for that).
193 |
194 | If the run is successful it will appear in the result table at
195 | the bottom of the page and you can view the full HTML report by
196 | clicking on **Show Report**.
197 |
198 | ## Next Steps
199 |
200 | If you got this far you successfully built and ran BenchmarkSQL.
201 | Congratulations!
202 |
203 | [comment]: # (TODO add links to the follow up tutorials)
204 |
205 | ### Scaling the database size and transaction rate
206 |
207 | The defaults in the sample Properties are not representing any
208 | modern database server. How to properly scale the database and
209 | get close to the maximum throughput (without completely violating
210 | all TPC-C constraints at once) will fill an entire tutorial by
211 | itself.
212 |
213 | (TODO: link to tutorial)
214 |
215 | ### Collecting OS level metrics and extended reporting
216 |
217 | Using collectd and an MQTT broker it is possible to collect OS
218 | performance metrics, like CPU usage, disk and network IO. These can
219 | be included in the BenchmarkSQL report.
220 |
221 | (TODO: link to tutorial)
222 |
223 | ### Building the Docker/Podman container
224 |
225 | To be covered in separate tutorial including how to deploy it
226 | in the cloud.
227 |
228 | (TODO: link to tutorial)
229 |
230 |
231 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Welcome to BenchmarkSQL
2 |
3 | BenchmarkSQL is implemented in Java, using JDBC to stress test SQL databases.
4 | The overall architecture is a series of data structures, queues and thread
5 | groups that handle the simulated terminals, users and application threads.
6 |
7 | Its architecture allows BenchmarkSQL to drive TPC-C configurations up to many
8 | thousands of warehouses (known as the scaling factor) without overwhelming the
9 | job scheduler of the test driver itself.
10 | Yet it is capable of doing so without sacrificing one of the most important
11 | measurements in a TPC-C, the end-user experienced response time at the terminal.
12 |
13 | 
14 |
--------------------------------------------------------------------------------
/docs/screenshots/tut1-bmsql-ui-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wieck/benchmarksql/98f5cb4b446f4296ca5e85ad27303482a6bbd624/docs/screenshots/tut1-bmsql-ui-1.png
--------------------------------------------------------------------------------
/docs/tutorial-1/screen-001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wieck/benchmarksql/98f5cb4b446f4296ca5e85ad27303482a6bbd624/docs/tutorial-1/screen-001.png
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: BenchmarkSQL
2 | nav:
3 | - Home: index.md
4 | - 'How to run':
5 | - 'How to run in any RDBMS': HOW-TO-RUN.md
6 | - Oracle: HOW-TO-RUN-Oracle.md
7 | - Postgres: HOW-TO-RUN-Postgres.md
8 | - 'Properties file': PROPERTIES.md
9 | - 'Docker and Flask': DOCKER.md
10 | - Building: BUILDING.md
11 | - 'Timed driver': TimedDriver.md
12 | - 'TPC-C descripcion': TPCC.md
13 | - About:
14 | - Contributing: CONTRIBUTING.md
15 | - 'Change log': CHANGE-LOG.md
16 | - 'Release process': RELEASE.md
17 | - Licence: LICENSE.txt
18 |
19 | theme: readthedocs
20 |
--------------------------------------------------------------------------------
/podman-build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | mvn || exit 1
4 | podman build -t benchmarksql:6.0-rc2 --http-proxy .
5 |
--------------------------------------------------------------------------------
/podman-run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | mkdir -p ./service_data || exit 1
4 |
5 | podman run --rm \
6 | --network="host" \
7 | --user=`id -u`:`id -g` \
8 | --userns=keep-id \
9 | --volume="./service_data:/service_data" \
10 | -w "/benchmarksql" \
11 | localhost/benchmarksql:6.0-rc2
12 |
13 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 | com.github.pgsql-io
6 | benchmarksql
7 | 6.0.0
8 | A TPC-C like test tool
9 | BenchmarkSQL is a TPC-C fair-use implementation.
10 |
11 |
12 | https://github.com/pgsql-io/benchmarksql
13 |
14 |
15 | UTF-8
16 | 1.8
17 | 1.8
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | org.apache.logging.log4j
27 | log4j-core
28 | 2.14.1
29 |
30 |
31 |
32 |
33 |
34 | org.postgresql
35 | postgresql
36 | 42.7.1
37 |
38 |
39 |
40 |
41 |
42 | com.ongres.scram
43 | client
44 | 2.0
45 |
46 |
47 |
48 |
49 |
50 | com.oracle.database.jdbc
51 | ojdbc8
52 | 21.1.0.0
53 |
54 |
55 |
56 |
57 |
58 | com.microsoft.sqlserver
59 | mssql-jdbc
60 | 9.2.1.jre8
61 |
62 |
63 |
64 |
65 | com.ibm.db2
66 | jcc
67 | 11.5.5.0
68 |
69 |
70 |
71 |
72 | mysql
73 | mysql-connector-java
74 | 8.0.23
75 |
76 |
77 |
78 |
79 | org.mariadb.jdbc
80 | mariadb-java-client
81 | 2.7.2
82 |
83 |
84 |
85 |
86 |
87 | org.firebirdsql.jdbc
88 | jaybird
89 | 4.0.3.java11
90 |
91 |
92 |
93 | org.apache.ant
94 | ant
95 | 1.8.2
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 | package
104 |
105 |
106 |
107 |
108 | org.apache.maven.plugins
109 | maven-compiler-plugin
110 | 3.8.0
111 |
112 |
113 |
114 | maven-jar-plugin
115 | 2.4
116 |
117 | BenchmarkSQL
118 |
119 |
120 |
121 |
123 |
124 | maven-dependency-plugin
125 | 3.0.0
126 |
127 |
128 | copy-dependencies
129 | package
130 |
131 | copy-dependencies
132 |
133 |
134 | ${project.build.directory}/lib
135 | false
136 | false
137 | true
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 | maven-resources-plugin
146 | 3.2.0
147 |
148 | ${basedir}/target/run
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 | PGSQL
157 | https://pgsql.io/
158 |
159 |
161 |
162 | https://github.com/pgsql-io/benchmarksql/issues
163 | GitHub
164 |
165 |
166 | https://github.com/pgsql-io/benchmarksql
167 |
168 |
169 |
170 | jannicash
171 | Jan Wieck
172 | https://github.com/wieck
173 |
174 | architect
175 | developer
176 |
177 | America/New_York
178 |
179 | https://avatars.githubusercontent.com/u/362674?v=4
180 |
181 |
182 |
183 | lussman
184 | Denis Lussier
185 | https://github.com/luss
186 |
187 | architect
188 | developer
189 |
190 |
191 | https://avatars.githubusercontent.com/u/1664798?v=4
192 |
193 |
194 |
195 | cadym
196 | Cady
197 |
198 |
199 | smartin3
200 | Scott Martin
201 |
202 |
203 |
204 |
205 | Andres Gomez - angoca
206 | https://github.com/angoca
207 |
208 | developer
209 |
210 | America/Bogota
211 |
212 | https://avatars.githubusercontent.com/u/294317?v=4
213 |
214 |
215 |
216 | Roman Simakov - romansimakov
217 | https://github.com/romansimakov
218 |
219 | https://avatars.githubusercontent.com/u/1625002?v=4
220 |
221 |
222 |
223 |
224 |
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | appstemp
2 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/application/dummy/AppOracleStoredProc.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.application.dummy;
2 |
3 | import org.apache.logging.log4j.LogManager;
4 | import org.apache.logging.log4j.Logger;
5 |
6 | import com.github.pgsqlio.benchmarksql.jtpcc.jTPCC;
7 | import com.github.pgsqlio.benchmarksql.jtpcc.jTPCCApplication;
8 | import com.github.pgsqlio.benchmarksql.jtpcc.jTPCCTData;
9 |
10 | /**
11 | * AppOracleStoredProc - TPC-C Transaction Implementation for using Stored Procedures on Oracle
12 | */
13 | public class AppOracleStoredProc extends jTPCCApplication {
14 | private static Logger log = LogManager.getLogger(AppOracleStoredProc.class);
15 | private jTPCC gdata;
16 | private int sut_id;
17 |
18 | public void init(jTPCC gdata, int sut_id) throws Exception {
19 | throw new Exception(
20 | "Oracle support was not compiled in - please rebuild BenchmarkSQL with -DOracleSupport=true");
21 | }
22 |
23 | public void finish() throws Exception {}
24 |
25 | public void executeNewOrder(jTPCCTData.NewOrderData newOrder, boolean trans_rbk)
26 | throws Exception {}
27 |
28 | public void executePayment(jTPCCTData.PaymentData payment) throws Exception {}
29 |
30 | public void executeOrderStatus(jTPCCTData.OrderStatusData orderStatus) throws Exception {}
31 |
32 | public void executeStockLevel(jTPCCTData.StockLevelData stockLevel) throws Exception {}
33 |
34 | public void executeDeliveryBG(jTPCCTData.DeliveryBGData deliveryBG) throws Exception {}
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jdbc/ExecJDBC.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jdbc;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.FileInputStream;
5 | import java.io.FileReader;
6 | import java.io.IOException;
7 | import java.sql.Connection;
8 | import java.sql.DriverManager;
9 | import java.sql.SQLException;
10 | import java.sql.Statement;
11 | import java.util.Properties;
12 |
13 | import org.apache.logging.log4j.LogManager;
14 | import org.apache.logging.log4j.Logger;
15 |
16 | /**
17 | * ExecJDBC - Command line program to process SQL DDL statements, from a text input file, to any
18 | * JDBC Data Source
19 | *
20 | * Copyright (C) 2004-2016, Denis Lussier Copyright (C) 2016, Jan Wieck
21 | *
22 | */
23 | public class ExecJDBC {
24 |
25 | private static Logger log = LogManager.getLogger(ExecJDBC.class);
26 |
27 | public static void main(String[] args) {
28 |
29 | Connection conn = null;
30 | Statement stmt = null;
31 | String rLine = null;
32 | String sLine = null;
33 | StringBuffer sql = new StringBuffer();
34 |
35 | try {
36 |
37 | Properties ini = new Properties();
38 | ini.load(new FileInputStream(System.getProperty("prop")));
39 |
40 | // Register jdbcDriver
41 | Class.forName(ini.getProperty("driver"));
42 |
43 | // make connection
44 | conn = DriverManager.getConnection(ini.getProperty("conn"), ini.getProperty("user"),
45 | ini.getProperty("password"));
46 | conn.setAutoCommit(true);
47 |
48 | // Retrieve datbase type
49 | String dbType = ini.getProperty("db");
50 |
51 | // For oracle : Boolean that indicates whether or not there is a statement ready to be
52 | // executed.
53 | Boolean ora_ready_to_execute = false;
54 |
55 | // Create Statement
56 | stmt = conn.createStatement();
57 |
58 | // Open inputFile
59 | BufferedReader in = new BufferedReader(new FileReader(getSysProp("commandFile", null)));
60 |
61 | // loop thru input file and concatenate SQL statement fragments
62 | while ((rLine = in.readLine()) != null) {
63 |
64 | if (ora_ready_to_execute == true) {
65 | String query = sql.toString();
66 |
67 | execJDBC(stmt, query);
68 | sql = new StringBuffer();
69 | ora_ready_to_execute = false;
70 | }
71 |
72 | String line = rLine.trim();
73 |
74 | if (line.length() != 0) {
75 | if (line.startsWith("--") && !line.startsWith("-- {")) {
76 | log.info(rLine); // print comment line
77 | } else {
78 | if (line.equals("$$")) {
79 | sql.append(rLine);
80 | sql.append("\n");
81 | while ((rLine = in.readLine()) != null) {
82 | line = rLine.trim();
83 | sql.append(rLine);
84 | sql.append("\n");
85 | if (line.equals("$$")) {
86 | break;
87 | }
88 | }
89 | continue;
90 | }
91 |
92 | if (line.startsWith("-- {")) {
93 | sql.append(rLine);
94 | sql.append("\n");
95 | while ((rLine = in.readLine()) != null) {
96 | line = rLine.trim();
97 | sql.append(rLine);
98 | sql.append("\n");
99 | if (line.startsWith("-- }")) {
100 | ora_ready_to_execute = true;
101 | break;
102 | }
103 | }
104 | continue;
105 | }
106 |
107 | if (line.endsWith("\\;")) {
108 | sql.append(rLine.replaceAll("\\\\;", ";"));
109 | sql.append("\n");
110 | } else {
111 | sql.append(line.replaceAll("\\\\;", ";"));
112 | if (line.endsWith(";")) {
113 | String query = sql.toString();
114 |
115 | execJDBC(stmt, query.substring(0, query.length() - 1));
116 | sql = new StringBuffer();
117 | } else {
118 | sql.append("\n");
119 | }
120 | }
121 | }
122 |
123 | } // end if
124 |
125 | } // end while
126 |
127 | in.close();
128 |
129 | } catch (IOException ie) {
130 | log.error(ie.getMessage());
131 | log.info(ie);
132 | System.exit(1);
133 | } catch (SQLException se) {
134 | log.error(se.getMessage());
135 | log.info(se);
136 | System.exit(1);
137 | } catch (Exception e) {
138 | log.error(e);
139 | System.exit(1);
140 | // exit Cleanly
141 | } finally {
142 | try {
143 | if (conn != null)
144 | conn.close();
145 | } catch (SQLException se) {
146 | log.error(se);
147 | } // end finally
148 |
149 | } // end try
150 |
151 | } // end main
152 |
153 |
154 | static void execJDBC(Statement stmt, String query) {
155 |
156 | log.info("{};", query);
157 |
158 | try {
159 | stmt.execute(query);
160 | } catch (SQLException se) {
161 | log.error(se.getMessage());
162 | log.info(se);
163 | } // end try
164 |
165 | } // end execJDBCCommand
166 |
167 | public static String getSysProp(String inSysProperty, String defaultValue) {
168 |
169 | String outPropertyValue = null;
170 |
171 | try {
172 | outPropertyValue = System.getProperty(inSysProperty, defaultValue);
173 | } catch (Exception e) {
174 | log.error("Error Reading Required System Property '{}'", inSysProperty);
175 | }
176 |
177 | return (outPropertyValue);
178 |
179 | } // end getSysProp
180 |
181 | } // end ExecJDBC Class
182 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jtpcc/jTPCCApplication.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jtpcc;
2 |
3 | /**
4 | * jTPCCApplication - Dummy of the DB specific implementation of the TPC-C Transactions
5 | */
6 | public class jTPCCApplication {
7 | public void init(jTPCC gdata, int sut_id) throws Exception {}
8 |
9 | public void finish() throws Exception {}
10 |
11 | public void executeNewOrder(jTPCCTData.NewOrderData screen, boolean trans_rbk) throws Exception {}
12 |
13 | public void executePayment(jTPCCTData.PaymentData screen) throws Exception {}
14 |
15 | public void executeOrderStatus(jTPCCTData.OrderStatusData screen) throws Exception {}
16 |
17 | public void executeStockLevel(jTPCCTData.StockLevelData screen) throws Exception {}
18 |
19 | public void executeDeliveryBG(jTPCCTData.DeliveryBGData screen) throws Exception {}
20 | }
21 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jtpcc/jTPCCConfig.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jtpcc;
2 |
3 | import java.text.SimpleDateFormat;
4 |
5 | /**
6 | * jTPCCConfig - Basic configuration parameters for jTPCC
7 | */
8 | public interface jTPCCConfig {
9 | public final static String JTPCCVERSION = "6.0";
10 |
11 | public final static int DB_UNKNOWN = 0, DB_ORACLE = 1, DB_POSTGRES = 2, DB_FIREBIRD = 3,
12 | DB_MARIADB = 4, DB_TSQL = 5, DB_BABELFISH = 6;
13 |
14 | public final static int NEW_ORDER = 1, PAYMENT = 2, ORDER_STATUS = 3, DELIVERY = 4,
15 | STOCK_LEVEL = 5;
16 |
17 | public final static String[] nameTokens =
18 | {"BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING"};
19 |
20 | public final static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
21 |
22 | public final static int configCommitCount = 10000; // commit every n records in LoadData
23 |
24 | public final static int configWhseCount = 10;
25 | public final static int configItemCount = 100000; // tpc-c std = 100,000
26 | public final static int configDistPerWhse = 10; // tpc-c std = 10
27 | public final static int configCustPerDist = 3000; // tpc-c std = 3,000
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jtpcc/jTPCCResult.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jtpcc;
2 |
3 | /*
4 | * jTPCCResult - Collecting of statistics and writing results.
5 | */
6 | public class jTPCCResult {
7 | public HistCounter histCounter[];
8 | public ResCounter resCounter[];
9 | private Object lock;
10 | public double statsDivider;
11 | private long resultStartMS;
12 | private long resultNextDue;
13 |
14 | public static final int NUM_BUCKETS = 1000;
15 | public static final double STATS_CUTOFF = 600.0;
16 |
17 | public jTPCCResult() {
18 | histCounter = new HistCounter[jTPCCTData.TT_DELIVERY_BG + 1];
19 | for (int i = 0; i < jTPCCTData.TT_DELIVERY_BG + 1; i++)
20 | histCounter[i] = new HistCounter();
21 | resCounter = new ResCounter[jTPCCTData.TT_DELIVERY_BG + 1];
22 | for (int i = 0; i < jTPCCTData.TT_DELIVERY_BG + 1; i++)
23 | resCounter[i] = new ResCounter();
24 | lock = new Object();
25 | statsDivider = Math.log(STATS_CUTOFF * 1000.0) / (double) (NUM_BUCKETS);
26 |
27 | resultStartMS = jTPCC.csv_begin;
28 | resultNextDue = resultStartMS + (jTPCC.resultIntervalSecs * 1000);
29 | }
30 |
31 | public void collect(jTPCCTData tdata) {
32 | HistCounter hCounter;
33 | ResCounter rCounter;
34 | long latency;
35 | long delay;
36 | int bucket;
37 |
38 | if (tdata.trans_type < 0 || tdata.trans_type > jTPCCTData.TT_DELIVERY_BG)
39 | return;
40 |
41 | hCounter = histCounter[tdata.trans_type];
42 | rCounter = resCounter[tdata.trans_type];
43 |
44 | latency = tdata.trans_end - tdata.trans_due;
45 | delay = tdata.trans_start - tdata.trans_due;
46 | if (latency < 1)
47 | bucket = 0;
48 | else
49 | bucket = (int) Math.round(Math.log((double) latency) / statsDivider);
50 | if (bucket >= NUM_BUCKETS)
51 | bucket = NUM_BUCKETS - 1;
52 |
53 | /* Only collect data within the defined measurement window */
54 | if (tdata.trans_end >= jTPCC.result_begin && tdata.trans_end < jTPCC.result_end) {
55 | synchronized (lock) {
56 | if (hCounter.numTrans == 0) {
57 | hCounter.minMS = latency;
58 | hCounter.maxMS = latency;
59 | } else {
60 | if (hCounter.minMS > latency)
61 | hCounter.minMS = latency;
62 | if (hCounter.maxMS < latency)
63 | hCounter.maxMS = latency;
64 | }
65 | hCounter.numTrans++;
66 | hCounter.sumMS += latency;
67 | if (tdata.trans_error)
68 | hCounter.numError++;
69 | if (tdata.trans_rbk)
70 | hCounter.numRbk++;
71 |
72 | hCounter.bucket[bucket]++;
73 | }
74 | }
75 |
76 | rCounter.numTrans++;
77 | rCounter.sumLatencyMS += latency;
78 | rCounter.sumDelayMS += delay;
79 | if (rCounter.numTrans == 1) {
80 | rCounter.minLatencyMS = latency;
81 | rCounter.maxLatencyMS = latency;
82 | rCounter.minDelayMS = delay;
83 | rCounter.maxDelayMS = delay;
84 | } else {
85 | if (latency < rCounter.minLatencyMS)
86 | rCounter.minLatencyMS = latency;
87 | if (latency > rCounter.maxLatencyMS)
88 | rCounter.maxLatencyMS = latency;
89 | if (delay < rCounter.minDelayMS)
90 | rCounter.minDelayMS = delay;
91 | if (delay > rCounter.maxDelayMS)
92 | rCounter.maxDelayMS = delay;
93 | }
94 |
95 | long now = System.currentTimeMillis();
96 | if (now >= resultNextDue) {
97 | this.emit(now);
98 | }
99 | }
100 |
101 | public void emit(long now) {
102 | long second = (resultNextDue - resultStartMS) / 1000;
103 |
104 | for (int tt = 0; tt <= jTPCCTData.TT_DELIVERY_BG; tt++) {
105 | jTPCC.csv_result_write(
106 | jTPCCTData.trans_type_names[tt] + "," + second + "," + resCounter[tt].numTrans + ","
107 | + resCounter[tt].sumLatencyMS + "," + resCounter[tt].minLatencyMS + ","
108 | + resCounter[tt].maxLatencyMS + "," + resCounter[tt].sumDelayMS + ","
109 | + resCounter[tt].minDelayMS + "," + resCounter[tt].maxDelayMS + "\n");
110 |
111 | resCounter[tt].numTrans = 0;
112 | resCounter[tt].sumLatencyMS = 0;
113 | resCounter[tt].minLatencyMS = 0;
114 | resCounter[tt].maxLatencyMS = 0;
115 | resCounter[tt].sumDelayMS = 0;
116 | resCounter[tt].minDelayMS = 0;
117 | resCounter[tt].maxDelayMS = 0;
118 | }
119 |
120 | while (resultNextDue <= now)
121 | resultNextDue += (jTPCC.resultIntervalSecs * 1000);
122 | }
123 |
124 | public void aggregate(jTPCCResult into) {
125 | synchronized (lock) {
126 | for (int tt = 0; tt <= jTPCCTData.TT_DELIVERY_BG; tt++) {
127 | if (into.histCounter[tt].numTrans == 0) {
128 | into.histCounter[tt].minMS = histCounter[tt].minMS;
129 | into.histCounter[tt].maxMS = histCounter[tt].maxMS;
130 | } else {
131 | if (into.histCounter[tt].minMS > histCounter[tt].minMS)
132 | into.histCounter[tt].minMS = histCounter[tt].minMS;
133 | if (into.histCounter[tt].maxMS < histCounter[tt].maxMS)
134 | into.histCounter[tt].maxMS = histCounter[tt].maxMS;
135 | }
136 | into.histCounter[tt].numTrans += histCounter[tt].numTrans;
137 | into.histCounter[tt].sumMS += histCounter[tt].sumMS;
138 | into.histCounter[tt].numError += histCounter[tt].numError;
139 | into.histCounter[tt].numRbk += histCounter[tt].numRbk;
140 | for (int i = 0; i < NUM_BUCKETS; i++)
141 | into.histCounter[tt].bucket[i] += histCounter[tt].bucket[i];
142 | }
143 | }
144 | }
145 |
146 | public class HistCounter {
147 | public long numTrans = 0;
148 | public long sumMS = 0;
149 | public long minMS = 0;
150 | public long maxMS = 0;
151 | public long numError = 0;
152 | public long numRbk = 0;
153 | public long bucket[] = new long[NUM_BUCKETS];
154 | }
155 |
156 | public class ResCounter {
157 | public long numTrans = 0;
158 | public long sumLatencyMS = 0;
159 | public long minLatencyMS = 0;
160 | public long maxLatencyMS = 0;
161 | public long sumDelayMS = 0;
162 | public long minDelayMS = 0;
163 | public long maxDelayMS = 0;
164 | }
165 | }
166 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jtpcc/jTPCCTData.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jtpcc;
2 |
3 | /*
4 | * jTPCCTData - The simulated terminal input/output data.
5 | */
6 | public class jTPCCTData {
7 | public final static int TT_NEW_ORDER = 0, TT_PAYMENT = 1, TT_ORDER_STATUS = 2, TT_STOCK_LEVEL = 3,
8 | TT_DELIVERY = 4, TT_DELIVERY_BG = 5, TT_NONE = 6, TT_DONE = 7;
9 |
10 | public final static String trans_type_names[] = {"NEW_ORDER", "PAYMENT", "ORDER_STATUS",
11 | "STOCK_LEVEL", "DELIVERY", "DELIVERY_BG", "NONE", "DONE"};
12 |
13 | public int sched_code;
14 | public long sched_fuzz;
15 | public jTPCCTData term_left;
16 | public jTPCCTData term_right;
17 | public int tree_height;
18 |
19 | public int trans_type;
20 | public long trans_due;
21 | public long trans_start;
22 | public long trans_end;
23 | public boolean trans_rbk;
24 | public boolean trans_error;
25 | public String trans_error_reason = null;
26 |
27 | public int term_w_id = 0;
28 | public int term_d_id = 0;
29 |
30 | public NewOrderData new_order = null;
31 | public PaymentData payment = null;
32 | public OrderStatusData order_status = null;
33 | public StockLevelData stock_level = null;
34 | public DeliveryData delivery = null;
35 | public DeliveryBGData delivery_bg = null;
36 |
37 | public String dumpHdr() {
38 | return new String("TData(" + "term_w_id=" + term_w_id + " term_d_id=" + term_d_id
39 | + " sched_code=" + sched_code + " trans_type=" + trans_type + " trans_due=" + trans_due
40 | + " trans_end=" + trans_end + " sched_fuzz=" + sched_fuzz + ")");
41 | }
42 |
43 | public NewOrderData NewOrderData() {
44 | return new NewOrderData();
45 | }
46 |
47 | public class NewOrderData {
48 | /* terminal input data */
49 | public int w_id;
50 | public int d_id;
51 | public int c_id;
52 |
53 | public int ol_supply_w_id[] = new int[15];
54 | public int ol_i_id[] = new int[15];
55 | public int ol_quantity[] = new int[15];
56 |
57 | /* terminal output data */
58 | public String c_last;
59 | public String c_credit;
60 | public double c_discount;
61 | public double w_tax;
62 | public double d_tax;
63 | public int o_ol_cnt;
64 | public int o_id;
65 | public String o_entry_d;
66 | public double total_amount;
67 | public String execution_status;
68 |
69 | public String i_name[] = new String[15];
70 | public int s_quantity[] = new int[15];
71 | public String brand_generic[] = new String[15];
72 | public double i_price[] = new double[15];
73 | public double ol_amount[] = new double[15];
74 | }
75 |
76 | public PaymentData PaymentData() {
77 | return new PaymentData();
78 | }
79 |
80 | public class PaymentData {
81 | /* terminal input data */
82 | public int w_id;
83 | public int d_id;
84 | public int c_id;
85 | public int c_d_id;
86 | public int c_w_id;
87 | public String c_last;
88 | public double h_amount;
89 |
90 | /* terminal output data */
91 | public String w_name;
92 | public String w_street_1;
93 | public String w_street_2;
94 | public String w_city;
95 | public String w_state;
96 | public String w_zip;
97 | public String d_name;
98 | public String d_street_1;
99 | public String d_street_2;
100 | public String d_city;
101 | public String d_state;
102 | public String d_zip;
103 | public String c_first;
104 | public String c_middle;
105 | public String c_street_1;
106 | public String c_street_2;
107 | public String c_city;
108 | public String c_state;
109 | public String c_zip;
110 | public String c_phone;
111 | public String c_since;
112 | public String c_credit;
113 | public double c_credit_lim;
114 | public double c_discount;
115 | public double c_balance;
116 | public String c_data;
117 | public String h_date;
118 | }
119 |
120 | public OrderStatusData OrderStatusData() {
121 | return new OrderStatusData();
122 | }
123 |
124 | public class OrderStatusData {
125 | /* terminal input data */
126 | public int w_id;
127 | public int d_id;
128 | public int c_id;
129 | public String c_last;
130 |
131 | /* terminal output data */
132 | public String c_first;
133 | public String c_middle;
134 | public double c_balance;
135 | public int o_id;
136 | public String o_entry_d;
137 | public int o_carrier_id;
138 |
139 | public int ol_supply_w_id[] = new int[15];
140 | public int ol_i_id[] = new int[15];
141 | public int ol_quantity[] = new int[15];
142 | public double ol_amount[] = new double[15];
143 | public String ol_delivery_d[] = new String[15];
144 | }
145 |
146 | public StockLevelData StockLevelData() {
147 | return new StockLevelData();
148 | }
149 |
150 | public class StockLevelData {
151 | /* terminal input data */
152 | public int w_id;
153 | public int d_id;
154 | public int threshold;
155 |
156 | /* terminal output data */
157 | public int low_stock;
158 | }
159 |
160 | public DeliveryData DeliveryData() {
161 | return new DeliveryData();
162 | }
163 |
164 | public class DeliveryData {
165 | /* terminal input data */
166 | public int w_id;
167 | public int o_carrier_id;
168 |
169 | /* terminal output data */
170 | public String execution_status;
171 | }
172 |
173 | public DeliveryBGData DeliveryBGData() {
174 | return new DeliveryBGData();
175 | }
176 |
177 | public class DeliveryBGData {
178 | /* DELIVERY_BG data */
179 | public int w_id;
180 | public int o_carrier_id;
181 | public String ol_delivery_d;
182 |
183 | public int delivered_o_id[];
184 | }
185 | }
186 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/jtpcc/jTPCCTDataList.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.jtpcc;
2 | /*
3 | * jTPCCTDataList - A double linked list of Terminal Data objects.
4 | *
5 | */
6 |
7 | public class jTPCCTDataList {
8 | private jTPCCTData head;
9 | private jTPCCTData tail;
10 |
11 | public jTPCCTDataList() {
12 | this.head = null;
13 | this.tail = null;
14 | }
15 |
16 | public void append(jTPCCTData tdata) {
17 | if (head == null) {
18 | head = tail = tdata;
19 | tdata.term_left = null;
20 | tdata.term_right = null;
21 | } else {
22 | tail.term_right = tdata;
23 | tdata.term_left = tail;
24 | tdata.term_right = null;
25 | tail = tdata;
26 | }
27 | }
28 |
29 | public void prepend(jTPCCTData tdata) {
30 | if (head == null) {
31 | head = tail = tdata;
32 | tdata.term_left = null;
33 | tdata.term_right = null;
34 | } else {
35 | head.term_left = tdata;
36 | tdata.term_left = null;
37 | tdata.term_right = head;
38 | head = tdata;
39 | }
40 | }
41 |
42 | public void remove(jTPCCTData tdata) {
43 | if (head == tdata)
44 | head = tdata.term_right;
45 | else
46 | tdata.term_left.term_right = tdata.term_right;
47 | if (tail == tdata)
48 | tail = tdata.term_left;
49 | else
50 | tdata.term_right.term_left = tdata.term_left;
51 |
52 | tdata.term_left = null;
53 | tdata.term_right = null;
54 | }
55 |
56 | public jTPCCTData first() {
57 | return head;
58 | }
59 |
60 | public jTPCCTData last() {
61 | return tail;
62 | }
63 |
64 | public jTPCCTData next(jTPCCTData tdata) {
65 | return tdata.term_right;
66 | }
67 |
68 | public jTPCCTData prev(jTPCCTData tdata) {
69 | return tdata.term_left;
70 | }
71 |
72 | public void truncate() {
73 | jTPCCTData next;
74 |
75 | while (head != null) {
76 | next = head.term_right;
77 | head.term_left = null;
78 | head.term_right = null;
79 | head = next;
80 | }
81 | tail = null;
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/loader/LoadJob.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.loader;
2 |
3 | public class LoadJob {
4 | public static final int LOAD_ITEM = 1;
5 | public static final int LOAD_WAREHOUSE = 2;
6 | public static final int LOAD_ORDER = 3;
7 |
8 | public int type;
9 |
10 | public int w_id;
11 | public int d_id;
12 | public int c_id;
13 | public int o_id;
14 | };
15 |
--------------------------------------------------------------------------------
/src/main/java/com/github/pgsqlio/benchmarksql/oscollector/OSCollector.java:
--------------------------------------------------------------------------------
1 | package com.github.pgsqlio.benchmarksql.oscollector;
2 |
3 | import java.io.File;
4 | import java.io.OutputStream;
5 | import java.io.InputStream;
6 | import java.io.IOException;
7 | import java.io.BufferedReader;
8 | import java.io.InputStreamReader;
9 | import java.util.Arrays;
10 | import java.util.List;
11 | import java.util.ArrayList;
12 | import java.lang.ProcessBuilder;
13 |
14 | import org.apache.logging.log4j.LogManager;
15 | import org.apache.logging.log4j.Logger;
16 | import org.apache.tools.ant.types.Commandline;
17 |
18 | import com.github.pgsqlio.benchmarksql.jtpcc.jTPCC;
19 |
20 | /**
21 | * OSCollector.java
22 | *
23 | * Copyright (C) 2016, Denis Lussier Copyright (C) 2016, Jan Wieck
24 | */
25 | public class OSCollector {
26 | private static Logger log = LogManager.getLogger(OSCollector.class);
27 |
28 | private Runtime runTime;
29 | private Process collector;
30 | private OutputStream stdin;
31 |
32 | private Thread stdoutThread;
33 | private Thread stderrThread;
34 |
35 | public OSCollector(String cmdLine, File outputDir)
36 | throws IOException
37 | {
38 | /*
39 | * Assemble the command line for the collector by splitting
40 | * the osCollectorScript property into strings (bash style),
41 | * then append the --startepoch and --resultdir options.
42 | */
43 | ArrayList cmd = new ArrayList();
44 | cmd.addAll(Arrays.asList(Commandline.translateCommandline(cmdLine)));
45 | cmd.add("--startepoch");
46 | cmd.add(String.valueOf(jTPCC.csv_begin / 1000));
47 | cmd.add("--resultdir");
48 | cmd.add(outputDir.getPath());
49 |
50 | /*
51 | * Create a child process executing that command
52 | */
53 | ProcessBuilder pb = new ProcessBuilder(cmd);
54 | collector = pb.start();
55 |
56 | /*
57 | * Create two helpler threads that shovel stdout and stderr of
58 | * the child process into our logs.
59 | */
60 | stdin = collector.getOutputStream();
61 | stdoutThread = new Thread(new stdoutLogger(collector.getInputStream()));
62 | stderrThread = new Thread(new stderrLogger(collector.getErrorStream()));
63 | stdoutThread.start();
64 | stderrThread.start();
65 | }
66 |
67 | public void stop()
68 | throws IOException, InterruptedException
69 | {
70 | /*
71 | * We use closing stdin of the child process to signal it is
72 | * time to exit. So just close that stream and wait for it to
73 | * exit.
74 | */
75 | stdin.close();
76 | collector.waitFor();
77 |
78 | /*
79 | * Now wait until the stdout and stderr logger threads terminate,
80 | * which they will when the collector script child process exits.
81 | */
82 | try {
83 | stdoutThread.join();
84 | } catch (InterruptedException ie) {
85 | log.error("OSCollector, {}", ie.getMessage());
86 | }
87 | try {
88 | stderrThread.join();
89 | } catch (InterruptedException ie) {
90 | log.error("OSCollector, {}", ie.getMessage());
91 | }
92 | }
93 |
94 | private class stdoutLogger implements Runnable {
95 | private BufferedReader stdout;
96 |
97 | public stdoutLogger(InputStream stdout) {
98 | this.stdout = new BufferedReader(new InputStreamReader(stdout));
99 | }
100 |
101 | public void run() {
102 | String line;
103 |
104 | while (true) {
105 | try {
106 | line = stdout.readLine();
107 | } catch (IOException e) {
108 | log.error("OSCollector, {}", e.getMessage());
109 | break;
110 | }
111 | if (line == null)
112 | break;
113 | log.info(line);
114 | }
115 | }
116 | }
117 |
118 | private class stderrLogger implements Runnable {
119 | private BufferedReader stderr;
120 |
121 | public stderrLogger(InputStream stderr) {
122 | this.stderr = new BufferedReader(new InputStreamReader(stderr));
123 | }
124 |
125 | public void run() {
126 | String line;
127 |
128 | while (true) {
129 | try {
130 | line = stderr.readLine();
131 | } catch (IOException e) {
132 | log.error("OSCollector, {}", e.getMessage());
133 | break;
134 | }
135 | if (line == null)
136 | break;
137 | log.error(line);
138 | }
139 | }
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/src/main/resources/.gitignore:
--------------------------------------------------------------------------------
1 | my_*
2 | *.log
3 | .jTPCC_run_seq.dat
4 |
--------------------------------------------------------------------------------
/src/main/resources/FlaskService/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import flask
4 | import werkzeug
5 | import benchmarksql
6 | import os
7 | import json
8 | import sys
9 |
10 | app = flask.Flask(__name__)
11 | bench = benchmarksql.BenchmarkSQL()
12 |
13 | @app.route('/', methods = ['POST', 'GET'])
14 | def index():
15 | form = flask.request.form
16 |
17 | if 'action' not in form:
18 | if 'file' in flask.request.files:
19 | propf = flask.request.files['file']
20 | propf.seek(0)
21 | props = propf.read().decode('utf-8')
22 | if props != "":
23 | bench.save_properties(props)
24 | bench.status_data['filename'] = propf.filename
25 | bench.save_status()
26 | return flask.redirect(flask.url_for("index"))
27 |
28 | if 'action' in form:
29 | state = bench.get_job_type()
30 |
31 | if form['action'] == 'SAVE':
32 | bench.save_properties(form['properties'])
33 | headers = werkzeug.datastructures.Headers()
34 | headers.add('Content-Disposition', 'attachment', filename=bench.status_data['filename'])
35 | return flask.Response(form['properties'],
36 | headers = headers,
37 | mimetype = 'application/octet-stream')
38 |
39 | elif form['action'] == 'RUN' and state == 'IDLE':
40 | bench.save_properties(form['properties'])
41 | bench.run_benchmark()
42 | return flask.redirect(flask.url_for("index"))
43 |
44 | elif form['action'] == 'BUILD' and state == 'IDLE':
45 | bench.save_properties(form['properties'])
46 | bench.run_build()
47 |
48 | elif form['action'] == 'DESTROY' and state == 'IDLE':
49 | bench.save_properties(form['properties'])
50 | bench.run_destroy()
51 |
52 | elif form['action'] == 'CANCEL':
53 | bench.cancel_job()
54 |
55 | data = {}
56 | data['current_job_type'] = bench.get_job_type()
57 | data['current_job_runtime'] = bench.get_job_runtime()
58 | data['form'] = form
59 | data['properties'] = bench.get_properties()
60 |
61 | if data['current_job_type'] == 'IDLE':
62 | data['state_run'] = ''
63 | data['state_build'] = ''
64 | data['state_destroy'] = ''
65 | data['state_cancel'] = 'disabled'
66 | data['state_refresh'] = ''
67 | else:
68 | data['state_run'] = 'disabled'
69 | data['state_build'] = 'disabled'
70 | data['state_destroy'] = 'disabled'
71 | data['state_cancel'] = ''
72 | data['state_refresh'] = ''
73 |
74 | data['current_job_output'] = bench.get_job_output()
75 | data['url_job_status'] = flask.url_for('job_status')
76 |
77 | data['results'] = bench.get_results()
78 |
79 | return flask.render_template('main.html', **data)
80 |
81 | @app.route('/api_call', methods = ['POST', 'GET'])
82 | def api_call():
83 | if flask.request.method == 'POST':
84 | data = flask.request.form
85 | elif flask.request.method == 'GET':
86 | data = flask.request.args
87 | else:
88 | result = {
89 | 'rc': "OK",
90 | 'message': "Unsupported request method '{0}'".format(flask.request.method),
91 | }
92 | return flask.Response(json.dumps(result), mimetype = 'application/json')
93 |
94 | try:
95 | req = json.loads(data['request'])
96 | if req['command'].lower() == 'status':
97 | result = api_call_status()
98 | elif req['command'].lower() == 'run':
99 | status = bench.get_status()
100 | if status['current_job_type'] != 'IDLE':
101 | raise Exception("Current job type is {0}".format(status['current_job_type']))
102 | if 'properties' in req:
103 | bench.save_properties(req['properties'])
104 | bench.run_benchmark()
105 | result = api_call_status()
106 | elif req['command'].lower() == 'build':
107 | status = bench.get_status()
108 | if status['current_job_type'] != 'IDLE':
109 | raise Exception("Current job type is {0}".format(status['current_job_type']))
110 | if 'properties' in req:
111 | bench.save_properties(req['properties'])
112 | bench.run_build()
113 | result = api_call_status()
114 | elif req['command'].lower() == 'destroy':
115 | status = bench.get_status()
116 | if status['current_job_type'] != 'IDLE':
117 | raise Exception("Current job type is {0}".format(status['current_job_type']))
118 | if 'properties' in req:
119 | bench.save_properties(req['properties'])
120 | bench.run_destroy()
121 | result = api_call_status()
122 | elif req['command'].lower() == 'cancel':
123 | bench.cancel_job()
124 | result = api_call_status()
125 | elif req['command'].lower() == 'txsummary':
126 | if 'run_id' not in req:
127 | raise Exception("command txsummary requires run_id")
128 | txsummary = bench.get_job_txsummary(req['run_id'])
129 | result = api_call_status()
130 | result['txsummary'] = txsummary
131 | else:
132 | result = {
133 | 'rc': 'ERROR',
134 | 'message': "Unknown API command '{0}'".format(req['command']),
135 | }
136 | except Exception as e:
137 | result = {
138 | 'rc': 'ERROR',
139 | 'message': str(e),
140 | }
141 | return flask.Response(json.dumps(result), mimetype = 'application/json')
142 |
143 | def api_call_status():
144 | status = bench.get_status()
145 | return {
146 | 'rc': 'OK',
147 | 'message': 'Success',
148 | 'current_job_type': status['current_job_type'],
149 | 'current_job_id': status['current_job_id'],
150 | 'current_job_name': status['current_job_name'],
151 | 'current_job_output': status['current_job_output'],
152 | 'current_job_start': status['current_job_start'],
153 | 'current_job_properties': status['current_job_properties'],
154 | }
155 |
156 |
157 | @app.route('/job_status')
158 | def job_status():
159 | result = [
160 | bench.get_job_type(),
161 | bench.get_job_runtime(),
162 | bench.get_job_output(),
163 | ]
164 | return json.dumps(result)
165 |
166 | @app.route('/cancel_job')
167 | def cancel_job():
168 | result = [
169 | bench.cancel_job(),
170 | ]
171 | return json.dumps(result)
172 |
173 | @app.route('/result_log/')
174 | def result_log():
175 | args = flask.request.args
176 | return flask.Response(bench.get_log(args['run_id']), mimetype='text/plain')
177 |
178 | @app.route('/result_show/')
179 | def result_show():
180 | args = flask.request.args
181 | return bench.get_report(args['run_id'])
182 |
183 | @app.route('/result_delete/')
184 | def result_delete():
185 | args = flask.request.args
186 | bench.delete_result(args['run_id'])
187 | return flask.redirect(flask.url_for("index"))
188 |
189 | def upload_properties():
190 | print("files:", flask.request.files, file=sys.stderr)
191 | pass
192 |
193 | if __name__ == '__main__':
194 | app.run(host='0.0.0.0')
195 |
--------------------------------------------------------------------------------
/src/main/resources/FlaskService/sample.last.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=postgres
10 | driver=org.postgresql.Driver
11 | application=Generic
12 | conn=jdbc:postgresql://localhost:5432/postgres
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | loadWorkers=8
19 | monkeys=2
20 | sutThreads=16
21 | maxDeliveryBGThreads=12
22 | maxDeliveryBGPerWarehouse=1
23 | rampupMins=10
24 | rampupSUTMins=5
25 | rampupTerminalMins=5
26 | runMins=30
27 | reportIntervalSecs=30
28 | restartSUTThreadProbability=0.0
29 | keyingTimeMultiplier=1.0
30 | thinkTimeMultiplier=1.0
31 | traceTerminalIO=false
32 |
33 | # Below are the definitions for the "attempted" transaction mix.
34 | # The TPC-C specification requires minimum percentages for all but
35 | # the NEW_ORDER transaction. If a test run happens to have any of
36 | # those four types fall below those minimums, the entire test is
37 | # invalid. We don't want that to happen, so we specify values just
38 | # a tiny bit above the required minimum.
39 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
40 | paymentWeight=43.2
41 | orderStatusWeight=4.2
42 | deliveryWeight=4.2
43 | stockLevelWeight=4.2
44 |
45 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
46 | # to roll back due to a user entry error (non existing item
47 | # number. Doing it with a strict 1/100th probability can lead to
48 | # undershooting this target, so we default to 1.01% to be sure.
49 | rollbackPercent=1.01
50 |
51 | # Directory name to create for collecting detailed result data.
52 | # Comment this out to suppress. Note that the Flask UI will define
53 | # this by itself, so don't specify it if you run through the UI.
54 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
55 |
56 | # BenchmarkSQL includes three OS metric collector scripts implemented
57 | # in Python3. Two require to have collectd installed on the server
58 | # systems, you want to include in the performance report. The data
59 | # will be saved in resultDirectory/data/os-metric.json. The third
60 | # is based on Prometheus and node_exporter.
61 |
62 | # mcCollectdMqtt.py is a metric collector that expects the collectd
63 | # instances on the servers to send the metric data to an MQTT broker.
64 |
65 | #osCollectorScript=./mcCollectdMqtt.py \
66 | # -h mymqttbroker.localdomain \
67 | # -t collectd/mydbserver.localdomain/# \
68 | # -t collectd/mybackrest.localdomain/#
69 |
70 | # mcCollectdGraphite.py is a metric collector that expects the
71 | # collectd instances on the servers to send the metric data to
72 | # a graphite/whisper database and be available through the /render
73 | # API.
74 |
75 | #osCollectorScript=./mcCollectdGraphite.py \
76 | # -u http://mygraphite.localdomain/render/ \
77 | # -t collectd.mydbserver_localdomain.*.* \
78 | # -t collectd.mydbserver_localdomain.*.*.* \
79 | # -t collectd.mybackrest_localdomain.*.* \
80 | # -t collectd.mybackrest_localdomain.*.*.*
81 |
82 | # mcPrometheus.py retrieves the metric data from a Prometheus
83 | # server through the API. It converts the output into the same
84 | # format as the former two produce. The instances listed are
85 | # the same names given in the "instance" label of the metric
86 | # data scraped by Prometheus. The port number will be removed
87 | # in the os-metric.json output.
88 |
89 | #osCollectorScript=./mcPrometheus.py \
90 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
91 | # -i mydbserver.localdomain:9100 \
92 | # -i mybackrest.localdomain:9100
93 |
94 | # The report script is what generates the detailed HTML report for
95 | # the benchmark run. It is a Jinja2 template based reporting system
96 | # that includes graphs of various metrics, captured during the benchmark.
97 |
98 | reportScript=./generateReport.py -t report_simple.html
99 |
100 | #reportScript=./generateReport.py \
101 | # -t report_extended.html \
102 | # -c 'mydbserver.localdomain:DB server' \
103 | # -d 'mydbserver.localdomain:DB server:hda2' \
104 | # -i 'mydbserver.localdomain:DB server:eth0' \
105 | # -c 'mybackrest.localdomain:pgbackrest server' \
106 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
107 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
108 |
--------------------------------------------------------------------------------
/src/main/resources/FlaskService/templates/main.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | BenchmarkSQL Status
5 |
6 |
41 |
42 |
43 | BenchmarkSQL Status is {{ current_job_type }}
44 | Current job is running for {{ current_job_runtime }}
45 |
77 | Current/Last Job Output
78 |
79 |
83 | Results
84 |
85 |
86 | Result Name |
87 | Start Time |
88 | State |
89 | Action |
90 |
91 | {% for run_id, res_name, res_start, res_state in results %}
92 |
93 | {{ res_name }} |
94 | {{ res_start }} |
95 | {{ res_state }} |
96 | {% if res_state == "RUN" %}
97 | In Progress |
98 | {% endif %}
99 | {% if res_state == "CANCELED" %}
100 | Show Log |
101 | Show Report |
102 | Delete Result |
103 | {% endif %}
104 | {% if res_state == "FINISHED" %}
105 | Show Log |
106 | Show Report |
107 | Delete Result |
108 | {% endif %}
109 |
110 | {% endfor %}
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/src/main/resources/benchmarkctl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import requests
4 | import json
5 | import re
6 | import sys
7 |
8 | def main():
9 | if len(sys.argv) < 5:
10 | usage()
11 | sys.exit(2)
12 |
13 | with open(sys.argv[1], 'r') as fd:
14 | config = json.load(fd)
15 |
16 | command = sys.argv[2]
17 | appnode = sys.argv[3]
18 | dbnode = sys.argv[4]
19 |
20 | extra_opts = {}
21 | for opt in sys.argv[5:]:
22 | key, val = opt.split('=')
23 | extra_opts[key] = val
24 |
25 | # TODO: sanity checks for all args
26 |
27 | bmsql = BenchmarkSQL(config, appnode, dbnode, extra_opts)
28 |
29 | if command == 'build':
30 | result = bmsql.build()
31 | print(result['current_job_type'])
32 | elif command == 'destroy':
33 | result = bmsql.destroy()
34 | print(result['current_job_type'])
35 | elif command == 'run':
36 | result = bmsql.run()
37 | print(result['current_job_type'])
38 | print(result['current_job_id'])
39 | elif command == 'cancel':
40 | result = bmsql.cancel()
41 | print(result['current_job_type'])
42 | elif command == 'status':
43 | result = bmsql.status()
44 | print(result['current_job_type'])
45 | print(result['current_job_id'])
46 | elif command == 'txsummary':
47 | result = bmsql.txsummary(sys.argv[5])
48 | print(json.dumps(result['txsummary'], indent=2))
49 | else:
50 | print("unknown command '%s'"%(command,))
51 | sys.exit(2)
52 |
53 | def usage():
54 | print("""usage: benchmarkctl CONFIG.json COMMAND APPNODE DBNODE
55 | """, file = sys.stderr)
56 |
57 |
58 | class BenchmarkSQL:
59 | def __init__(self, config, appnode, dbnode, extra_opts = None):
60 | with open(config['properties_template'], 'r') as fd:
61 | properties = fd.read()
62 | overrides = config['properties']
63 | overrides.update(config['dbnodes'][dbnode]['properties'])
64 | overrides.update(config['appnodes'][appnode]['properties'])
65 | if extra_opts is not None:
66 | overrides.update(extra_opts)
67 | for key in overrides:
68 | properties = re.sub('^%s=.*$'%(key),
69 | '%s=%s'%(key, overrides[key]),
70 | properties,
71 | flags = re.MULTILINE)
72 |
73 | self.config = config
74 | self.appnode = appnode
75 | self.appconf = config['appnodes'][appnode]
76 | self.dbnode = dbnode
77 | self.dbconf = config['dbnodes'][dbnode]
78 | self.properties = properties
79 |
80 | def status(self):
81 | url = self.appconf['api_url']
82 | req = {
83 | 'command': 'status'
84 | }
85 | res = requests.post(url, data = {'request': json.dumps(req)})
86 | return json.loads(res.text)
87 |
88 | def txsummary(self, run_id):
89 | url = self.appconf['api_url']
90 | req = {
91 | 'command': 'txsummary',
92 | 'run_id': int(run_id)
93 | }
94 | res = requests.post(url, data = {'request': json.dumps(req)})
95 | return json.loads(res.text)
96 |
97 | def build(self):
98 | url = self.appconf['api_url']
99 | req = {
100 | 'command': 'build',
101 | 'properties': self.properties
102 | }
103 | res = requests.post(url, data = {'request': json.dumps(req)})
104 | return json.loads(res.text)
105 |
106 | def destroy(self):
107 | url = self.appconf['api_url']
108 | req = {
109 | 'command': 'destroy',
110 | 'properties': self.properties
111 | }
112 | res = requests.post(url, data = {'request': json.dumps(req)})
113 | return json.loads(res.text)
114 |
115 | def run(self):
116 | url = self.appconf['api_url']
117 | req = {
118 | 'command': 'run',
119 | 'properties': self.properties
120 | }
121 | res = requests.post(url, data = {'request': json.dumps(req)})
122 | return json.loads(res.text)
123 |
124 | def cancel(self):
125 | url = self.appconf['api_url']
126 | req = {
127 | 'command': 'cancel',
128 | 'properties': self.properties
129 | }
130 | res = requests.post(url, data = {'request': json.dumps(req)})
131 | return json.loads(res.text)
132 |
133 | if __name__ == '__main__':
134 | main()
135 |
--------------------------------------------------------------------------------
/src/main/resources/checks/check_details.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- Test 1
3 | --
4 | -- All ORDER rows where O_CARRIER_ID is NULL must have a matching
5 | -- row in NEW_ORDER.
6 | -- ----------------------------------------------------------------------
7 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
8 | count(*) AS "count", 'Undelivered ORDERs not found in NEW_ORDER' AS "Problem"
9 | FROM bmsql_oorder
10 | WHERE o_carrier_id IS NULL
11 | AND NOT EXISTS (
12 | SELECT 1 FROM bmsql_new_order
13 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
14 | );
15 |
16 | -- Detail information
17 | SELECT 'Undelivered ORDER' AS "_", O_W_ID, O_D_ID, O_ID,
18 | 'not found in NEW_ORDER' AS "__"
19 | FROM bmsql_oorder
20 | WHERE o_carrier_id IS NULL
21 | AND NOT EXISTS (
22 | SELECT 1 FROM bmsql_new_order
23 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
24 | );
25 |
26 |
27 | -- ----------------------------------------------------------------------
28 | -- Test 2
29 | --
30 | -- All ORDER rows where O_CARRIER_ID is NOT NULL must not have a matching
31 | -- row in NEW_ORDER.
32 | -- ----------------------------------------------------------------------
33 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
34 | count(*) AS "count", 'Delivered ORDERs still found in NEW_ORDER' AS "Problem"
35 | FROM bmsql_oorder
36 | WHERE o_carrier_id IS NOT NULL
37 | AND EXISTS (
38 | SELECT 1 FROM bmsql_new_order
39 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
40 | );
41 |
42 | -- Detail information
43 | SELECT 'Delivered ORDER' AS "_", O_W_ID, O_D_ID, O_ID,
44 | 'still found in NEW_ORDER' AS "__"
45 | FROM bmsql_oorder
46 | WHERE o_carrier_id IS NOT NULL
47 | AND EXISTS (
48 | SELECT 1 FROM bmsql_new_order
49 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
50 | );
51 |
52 |
53 | -- ----------------------------------------------------------------------
54 | -- Test 3
55 | --
56 | -- All NEW_ORDER rows must have a matching ORDER row.
57 | -- ----------------------------------------------------------------------
58 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
59 | count(*) AS "count", 'Orphaned NEW_ORDER rows' AS "Problem"
60 | FROM bmsql_new_order
61 | WHERE NOT EXISTS (
62 | SELECT 1 FROM bmsql_oorder
63 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
64 | );
65 |
66 | -- Detail information
67 | SELECT 'Orphaned NEW_ORDER row' AS "_", no_w_id, no_d_id, no_o_id
68 | FROM bmsql_new_order
69 | WHERE NOT EXISTS (
70 | SELECT 1 FROM bmsql_oorder
71 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
72 | );
73 |
74 |
75 | -- ----------------------------------------------------------------------
76 | -- Test 4
77 | --
78 | -- ORDER_LINES must have a matching ORDER
79 | -- ----------------------------------------------------------------------
80 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
81 | count(*) AS "count", 'Orphaned ORDER_LINE rows' AS "Problem"
82 | FROM bmsql_order_line
83 | WHERE NOT EXISTS (
84 | SELECT 1 FROM bmsql_oorder
85 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
86 | );
87 |
88 | -- Detail information
89 | SELECT 'Orphaned ORDER_LINE row' AS "_", ol_w_id, ol_d_id, ol_o_id
90 | FROM bmsql_order_line
91 | WHERE NOT EXISTS (
92 | SELECT 1 FROM bmsql_oorder
93 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
94 | );
95 |
96 |
97 | -- ----------------------------------------------------------------------
98 | -- Test 5
99 | --
100 | -- Check the ORDER.O_OL_CNT
101 | -- ----------------------------------------------------------------------
102 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
103 | count(*) AS "count", 'ORDERs with wrong O_OL_CNT' AS "Problem"
104 | FROM (
105 | SELECT o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
106 | FROM bmsql_oorder
107 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
108 | AND ol_o_id = o_id
109 | GROUP BY o_w_id, o_d_id, o_id, o_ol_cnt
110 | HAVING o_ol_cnt <> count(*)
111 | ) AS X;
112 |
113 | -- Detail information
114 | SELECT 'Wrong O_OL_CNT' AS "Problem", o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
115 | FROM bmsql_oorder
116 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
117 | AND ol_o_id = o_id
118 | GROUP BY "Problem", o_w_id, o_d_id, o_id, o_ol_cnt
119 | HAVING o_ol_cnt <> count(*);
120 |
121 |
--------------------------------------------------------------------------------
/src/main/resources/checks/checks.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- Test 1
3 | --
4 | -- All ORDER rows where O_CARRIER_ID is NULL must have a matching
5 | -- row in NEW_ORDER.
6 | -- ----------------------------------------------------------------------
7 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
8 | count(*) AS "count", 'Undelivered ORDERs not found in NEW_ORDER' AS "Description"
9 | FROM bmsql_oorder
10 | WHERE o_carrier_id IS NULL
11 | AND NOT EXISTS (
12 | SELECT 1 FROM bmsql_new_order
13 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
14 | );
15 |
16 | -- ----------------------------------------------------------------------
17 | -- Test 2
18 | --
19 | -- All ORDER rows where O_CARRIER_ID is NOT NULL must not have a matching
20 | -- row in NEW_ORDER.
21 | -- ----------------------------------------------------------------------
22 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
23 | count(*) AS "count", 'Delivered ORDERs still found in NEW_ORDER' AS "Description"
24 | FROM bmsql_oorder
25 | WHERE o_carrier_id IS NOT NULL
26 | AND EXISTS (
27 | SELECT 1 FROM bmsql_new_order
28 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
29 | );
30 |
31 | -- ----------------------------------------------------------------------
32 | -- Test 3
33 | --
34 | -- All NEW_ORDER rows must have a matching ORDER row.
35 | -- ----------------------------------------------------------------------
36 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
37 | count(*) AS "count", 'Orphaned NEW_ORDER rows' AS "Description"
38 | FROM bmsql_new_order
39 | WHERE NOT EXISTS (
40 | SELECT 1 FROM bmsql_oorder
41 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
42 | );
43 |
44 | -- ----------------------------------------------------------------------
45 | -- Test 4
46 | --
47 | -- ORDER_LINES must have a matching ORDER
48 | -- ----------------------------------------------------------------------
49 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
50 | count(*) AS "count", 'Orphaned ORDER_LINE rows' AS "Description"
51 | FROM bmsql_order_line
52 | WHERE NOT EXISTS (
53 | SELECT 1 FROM bmsql_oorder
54 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
55 | );
56 |
57 | -- ----------------------------------------------------------------------
58 | -- Test 5
59 | --
60 | -- Check the ORDER.O_OL_CNT
61 | -- ----------------------------------------------------------------------
62 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
63 | count(*) AS "count", 'ORDERs with wrong O_OL_CNT' AS "Description"
64 | FROM (
65 | SELECT o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
66 | FROM bmsql_oorder
67 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
68 | AND ol_o_id = o_id
69 | GROUP BY o_w_id, o_d_id, o_id, o_ol_cnt
70 | HAVING o_ol_cnt <> count(*)
71 | ) AS X;
72 |
73 | -- ----------------------------------------------------------------------
74 | -- Test 6
75 | --
76 | -- The W_YTD must match the sum(D_YTD) for the 10 districts of the
77 | -- Warehouse.
78 | -- ----------------------------------------------------------------------
79 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
80 | count(*) AS "count", 'Warehouses where W_YTD <> sum(D_YTD)' AS "Description"
81 | FROM (
82 | SELECT w_id, w_ytd, sum(d_ytd) AS sum_d_ytd
83 | FROM bmsql_warehouse
84 | LEFT JOIN bmsql_district ON d_w_id = w_id
85 | GROUP BY w_id, w_ytd
86 | HAVING w_ytd <> sum(d_ytd)
87 | ) AS X;
88 |
89 | -- ----------------------------------------------------------------------
90 | -- Test 7
91 | --
92 | -- The sum of all W_YTD must match the sum of all C_YTD_PAYMENT.
93 | -- Because the PAYMENT can happen remote, we cannot match those
94 | -- up by DISTRICT.
95 | -- ----------------------------------------------------------------------
96 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
97 | CASE count(*) WHEN 0 THEN 'sum(w_ytd) = sum(c_ytd_payment)'
98 | ELSE 'sum(w_ytd) <> sum(c_ytd_payment)' END AS "Description"
99 | FROM (
100 | SELECT sum_w_ytd, sum_c_ytd_payment
101 | FROM (SELECT sum(w_ytd) AS sum_w_ytd FROM bmsql_warehouse) AS W,
102 | (SELECT sum(c_ytd_payment) AS sum_c_ytd_payment FROM bmsql_customer) AS C
103 | WHERE sum_w_ytd <> sum_c_ytd_payment
104 | ) AS X;
105 |
106 | -- ----------------------------------------------------------------------
107 | -- Test 8
108 | --
109 | -- The C_BALANCE of a CUSTOMER must be equal to the sum(OL_AMOUNT) of
110 | -- all delivered ORDER_LINES (where OL_DELIVERY_D IS NOT NULL) minus
111 | -- the sum(H_AMOUNT).
112 | -- ----------------------------------------------------------------------
113 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
114 | count(*) AS "count",
115 | 'Customers where C_BALANCE <> sum(OL_AMOUNT) of undelivered orders minus sum(H_AMOUNT)' AS "Description"
116 | FROM (
117 | SELECT c_w_id, c_d_id, c_id, coalesce(sum_ol_amount, 0.0) AS sum_ol_amount,
118 | coalesce(sum_h_amount, 0.0) AS sum_h_amount
119 | FROM bmsql_customer
120 | LEFT JOIN (
121 | SELECT o_w_id, o_d_id, o_c_id, sum(ol_amount) as sum_ol_amount
122 | FROM bmsql_oorder
123 | JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
124 | WHERE o_carrier_id IS NOT NULL AND ol_delivery_d IS NOT NULL
125 | GROUP BY o_w_id, o_d_id, o_c_id
126 | ) AS OL ON o_w_id = c_w_id AND o_d_id = c_d_id AND o_c_id = c_id
127 | LEFT JOIN (
128 | SELECT h_c_w_id, h_c_d_id, h_c_id, sum(h_amount) AS sum_h_amount
129 | FROM bmsql_history
130 | GROUP BY h_c_w_id, h_c_d_id, h_c_id
131 | ) AS H ON h_c_w_id = c_w_id AND h_c_d_id = c_d_id AND h_c_id = c_id
132 | WHERE c_balance <> sum_ol_amount - sum_h_amount
133 | ) AS X;
134 |
135 |
--------------------------------------------------------------------------------
/src/main/resources/funcs.sh:
--------------------------------------------------------------------------------
1 | # ----
2 | # $1 is the properties file
3 | # ----
4 | PROPS="$1"
5 | if [ ! -f ${PROPS} ] ; then
6 | echo "${PROPS}: no such file" >&2
7 | exit 1
8 | fi
9 |
10 | # ----
11 | # getProp()
12 | #
13 | # Get a config value from the properties file.
14 | # ----
15 | function getProp()
16 | {
17 | grep "^${1}=" ${PROPS} | sed -e "s/^${1}=//"
18 | # grep "^${1}=" ${PROPS} | sed -e "s/^${1}=//" -e 's/\s*$//'
19 | }
20 |
21 | # ----
22 | # getCP()
23 | #
24 | # Determine the CLASSPATH based on the database system.
25 | # ----
26 | function setCP()
27 | {
28 | case "$(getProp db)" in
29 | oracle)
30 | cp="../lib/*"
31 | if [ ! -z "${ORACLE_HOME}" -a -d ${ORACLE_HOME}/lib ] ; then
32 | cp="${cp}:${ORACLE_HOME}/lib/*"
33 | fi
34 | cp="${cp}:../lib/*"
35 | ;;
36 | postgres)
37 | cp="../lib/*"
38 | ;;
39 | firebird)
40 | cp="../lib/*"
41 | ;;
42 | mariadb)
43 | cp="../lib/*"
44 | ;;
45 | transact-sql)
46 | cp="../lib/*"
47 | ;;
48 | babelfish)
49 | cp="../lib/*"
50 | ;;
51 | esac
52 | myCP="./:../BenchmarkSQL.jar:${cp}"
53 | export myCP
54 | }
55 |
56 | # ----
57 | # Make sure that the properties file does have db= and the value
58 | # is a database, we support.
59 | # ----
60 | db=$(getProp db)
61 | case "${db}" in
62 | oracle|postgres|firebird|mariadb|transact-sql|babelfish)
63 | ;;
64 | "") echo "ERROR: missing db= config option in ${PROPS}" >&2
65 | exit 1
66 | ;;
67 | *) echo "ERROR: unsupported database type db=${db} in ${PROPS}" >&2
68 | exit 1
69 | ;;
70 | esac
71 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import generateReport
4 | generateReport.main.main()
5 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/__init__.py:
--------------------------------------------------------------------------------
1 | from . import bmsqlResult
2 | from . import bmsqlPlot
3 | from . import main
4 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/bmsqlResult.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import csv
3 | import math
4 | import json
5 | import re
6 |
7 | class bmsqlResult:
8 | def __init__(self, resdir):
9 | """
10 | Create a new bmsqlResult instance and load all the data
11 | in the result directory.
12 | """
13 | self.ttypes = [
14 | 'NEW_ORDER',
15 | 'PAYMENT',
16 | 'ORDER_STATUS',
17 | 'STOCK_LEVEL',
18 | 'DELIVERY',
19 | 'DELIVERY_BG',
20 | ]
21 | self.resdir = resdir
22 | self.datadir = os.path.join(resdir, 'data')
23 |
24 | # ----
25 | # Load the run info into a dict
26 | # ----
27 | fname = os.path.join(self.datadir, 'runInfo.csv')
28 | with open(fname, newline = '') as fd:
29 | rdr = csv.DictReader(fd)
30 | self.runinfo = next(rdr)
31 |
32 | # ----
33 | # Load the other CSV files into dicts of arrays.
34 | #
35 | # result_ttype a dict of result_data slices by transaction type
36 | #
37 | # summary_ttype a dict of transaction summary info per type
38 | #
39 | # hist_ttype a dict of hist_data slices by transaction type
40 | #
41 | # hist_bins the number of bins in the histogram
42 | #
43 | # hist_cutoff the edge of the last bin in the histogram
44 | #
45 | # Loading of the summary will fail if the benchmark run is
46 | # still in progress or has been aborted. We then return with
47 | # an incoplete result, which still allows drawing graphs.
48 | # ----
49 | self.result_ttype = self._load_ttype_csv_multiple('result.csv')
50 | try:
51 | self.summary_ttype = self._load_ttype_csv_single('summary.csv')
52 | except StopIteration:
53 | return
54 | self.hist_ttype = self._load_ttype_csv_multiple('histogram.csv')
55 | self.hist_bins = len(self.hist_ttype['NEW_ORDER'])
56 | self.hist_cutoff = self.hist_ttype['NEW_ORDER'][-1][0]
57 | self.hist_statsdiv = math.log(self.hist_cutoff * 1000.0) / self.hist_bins
58 |
59 | # ----
60 | # The total number of "measured" transactions is the sum summary
61 | # counts but without the delivery background transactions.
62 | # ----
63 | self.total_trans = (sum([self.summary_ttype[tt][0]
64 | for tt in self.ttypes])
65 | - self.summary_ttype['DELIVERY_BG'][0])
66 |
67 | # ----
68 | # If an OS metric collector was running, load its data.
69 | # ----
70 | os_metric_fname = os.path.join(self.datadir, 'os-metric.json')
71 | if os.path.exists(os_metric_fname):
72 | with open(os_metric_fname) as fd:
73 | self.os_metric = json.loads(fd.read())
74 | else:
75 | self.os_metric = {}
76 |
77 | # ----
78 | # Load the run.properties but remove the password
79 | # ----
80 | prop_fname = os.path.join(resdir, 'run.properties')
81 | with open(prop_fname, 'r') as fd:
82 | props = fd.read()
83 | self.properties = re.sub(r'(password\s*=\s*).*$', r'\1********',
84 | props, flags = re.M)
85 |
86 | def tpm_c(self):
87 | num_new_order = self.summary_ttype['NEW_ORDER'][0]
88 | return num_new_order / int(self.runinfo['runMins'])
89 |
90 | def tpm_total(self):
91 | return self.total_trans / int(self.runinfo['runMins'])
92 |
93 | def percentile(self, tt, nth):
94 | """
95 | Returns the nth percentile response time of transaction type tt
96 | """
97 | nth_have = 0
98 | nth_need = int(self.summary_ttype[tt][0] * nth)
99 | b = 0
100 | for b in range(0, self.hist_bins):
101 | if nth_have >= nth_need:
102 | break
103 | nth_have += int(self.hist_ttype[tt][b][1])
104 | return math.exp(float(b) * self.hist_statsdiv) / 1000.0
105 |
106 | def num_trans(self, tt):
107 | """
108 | Returns the total number of transaction for the given type
109 | during the measurement cycle
110 | """
111 | return int(self.summary_ttype[tt][0])
112 |
113 | def trans_mix(self, tt):
114 | """
115 | Returns the percentage of the transaction type overall
116 | """
117 | return self.summary_ttype[tt][1]
118 |
119 | def avg_latency(self, tt):
120 | """
121 | Returns the average latency for the given transaction type
122 | during the measurement cycle
123 | """
124 | return self.summary_ttype[tt][2]
125 |
126 | def max_latency(self, tt):
127 | """
128 | Returns the maximum latency for the given transaction type
129 | during the measurement cycle
130 | """
131 | return self.summary_ttype[tt][3]
132 |
133 | def num_rollbacks(self, tt):
134 | """
135 | Returns the number of rollbacks that happened for the transaction
136 | type. This is only useful for NEW_ORDER.
137 | """
138 | return int(self.summary_ttype[tt][4])
139 |
140 | def num_errors(self, tt):
141 | """
142 | Returns the number of errors encountered for the transaction type
143 | during the measurement cycle
144 | """
145 | return int(self.summary_ttype[tt][5])
146 |
147 | def _load_ttype_csv_single(self, fname, skip_header = True):
148 | """
149 | Read a CSV file that has the transaction type as the first element.
150 | We expect a single row per transaction type.
151 | """
152 | ttdict = {}
153 | path = os.path.join(self.datadir, fname)
154 | with open(path, newline = '') as fd:
155 | rdr = csv.reader(fd)
156 | if skip_header:
157 | _ = next(rdr)
158 | for row in rdr:
159 | tt = row[0]
160 | ttdict[tt] = [float(d) for d in row[1:]]
161 |
162 | return ttdict
163 |
164 | def _load_ttype_csv_multiple(self, fname, skip_header = True):
165 | """
166 | Read a CSV file that has the transaction type as the first element.
167 | Return a list of tuples as well as a dict that has lists of tuples
168 | separated by transaction type.
169 | """
170 | ttdict = {}
171 | path = os.path.join(self.datadir, fname)
172 | with open(path, newline = '') as fd:
173 | rdr = csv.reader(fd)
174 | if skip_header:
175 | _ = next(rdr)
176 | data = [[row[0], [float(d) for d in row[1:]]] for row in rdr]
177 |
178 | for ttype in self.ttypes:
179 | tuples = filter(lambda x : x[0] == ttype, data)
180 | ttdict[ttype] = [tup[1] for tup in tuples]
181 |
182 | return ttdict
183 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import sys
5 | import jinja2
6 | import base64
7 | import getopt
8 |
9 | from generateReport import *
10 |
11 | def main():
12 | opt_template = 'report_simple.html'
13 | opt_resultdir = None
14 | opt_os_metrics = []
15 | opt_tt_limit = {
16 | 'NEW_ORDER': 5.0,
17 | 'PAYMENT': 5.0,
18 | 'ORDER_STATUS': 5.0,
19 | 'STOCK_LEVEL': 20.0,
20 | 'DELIVERY': 5.0,
21 | 'DELIVERY_BG': 80.0
22 | }
23 | opt_help = False
24 | errors = False
25 |
26 | opts, args = getopt.getopt(sys.argv[1:], 't:r:l:c:m:d:i:h?',
27 | ['template=', 'resultdir=', 'limit=',
28 | 'cpu=', 'memory=', 'disk=', 'interface=',
29 | 'help'])
30 | for opt, val in opts:
31 | if opt in ['-t', '--template',]:
32 | opt_template = val
33 | elif opt in ['-r', '--resultdir',]:
34 | opt_resultdir = val
35 | elif opt in ['-l', '--limit',]:
36 | sval = val.split('=')
37 | if len(sval) != 2:
38 | print("invalid limit specification: {}".format(val),
39 | file = sys.stderr)
40 | errors = True
41 | continue
42 | tt = sval[0].upper()
43 | if tt not in opt_tt_limit:
44 | print("unknown transaction type: {}".format(tt),
45 | file = sys.stderr)
46 | errors = True
47 | continue
48 | try:
49 | opt_tt_limit[tt] = float(sval[1])
50 | except Exception as e:
51 | print("invalid limit specification: {}".format(str(e)),
52 | file = sys.stderr)
53 | errors = True
54 | continue
55 | elif opt in ['-c', '--cpu',]:
56 | sval = val.split(':')
57 | if len(sval) != 2:
58 | print("invalid host specification: {}".format(val),
59 | file = sys.stderr)
60 | print("use HOSTNAME:ALIAS".format(val),
61 | file = sys.stderr)
62 | errors = True
63 | continue
64 | opt_os_metrics.append(('cpu', sval[0], sval[1]))
65 | elif opt in ['-m', '--memory',]:
66 | sval = val.split(':')
67 | if len(sval) != 2:
68 | print("invalid specification: {}".format(val),
69 | file = sys.stderr)
70 | print("use HOSTNAME:ALIAS".format(val),
71 | file = sys.stderr)
72 | errors = True
73 | continue
74 | opt_os_metrics.append(('memory', sval[0], sval[1]))
75 | elif opt in ['-d', '--disk',]:
76 | sval = val.split(':')
77 | if len(sval) != 3:
78 | print("invalid disk device specification: {}".format(val),
79 | file = sys.stderr)
80 | print("use HOSTNAME:ALIAS:DEVICENAME".format(val),
81 | file = sys.stderr)
82 | errors = True
83 | print("invalid host specification: {}".format(val),
84 | file = sys.stderr)
85 | opt_os_metrics.append(('disk', sval[0], sval[1], sval[2]))
86 | elif opt in ['-i', '--interface',]:
87 | sval = val.split(':')
88 | if len(sval) != 3:
89 | print("invalid interface device specification: {}".format(val),
90 | file = sys.stderr)
91 | print("use HOSTNAME:ALIAS:DEVICENAME".format(val),
92 | file = sys.stderr)
93 | errors = True
94 | continue
95 | opt_os_metrics.append(('interface', sval[0], sval[1], sval[2]))
96 | elif opt in ['-?', '-h', '--help']:
97 | opt_help = True
98 | break
99 |
100 | if errors:
101 | return 2
102 | if opt_help or opt_resultdir is None:
103 | usage()
104 | return 2
105 |
106 | result = bmsqlResult.bmsqlResult(opt_resultdir)
107 | result.tt_limit = opt_tt_limit
108 | for tt in result.ttypes:
109 | break
110 | print("count {} = {}".format(tt, result.num_trans(tt)))
111 | print("mix {} = {:.3f}".format(tt, result.trans_mix(tt)))
112 | print("avg {} = {:.3f}".format(tt, result.avg_latency(tt)))
113 | print("max {} = {:.3f}".format(tt, result.max_latency(tt)))
114 | print("90th {} = {:.3f}".format(tt, result.percentile(tt, 0.9)))
115 | print("rbk {} = {} ({:.3f}%)".format(tt, result.num_rollbacks(tt),
116 | result.num_rollbacks(tt) / result.num_trans(tt) * 100))
117 | print("errors {} = {}".format(tt, result.num_errors(tt)))
118 | print("")
119 |
120 | reportFname = opt_resultdir.rstrip('/\\') + '.html'
121 | with open(reportFname, 'w') as fd:
122 | fd.write(generate_html(result, opt_template, opt_os_metrics))
123 | print("report generated as {}".format(reportFname))
124 |
125 | def generate_html(result, template, os_metrics):
126 | env = jinja2.Environment(
127 | loader = jinja2.PackageLoader('generateReport', 'templates')
128 | )
129 |
130 | plot = bmsqlPlot.bmsqlPlot(result)
131 |
132 | # ----
133 | # Collect all the data the template needs
134 | # ----
135 | data = {
136 | 'ttypes': result.ttypes,
137 | 'runinfo': result.runinfo,
138 | 'summary': summary_data(result),
139 | 'properties': result.properties,
140 | 'mix_warn': False,
141 | 'rbk_warn': False,
142 | 'tpm_c': '{:.2f}'.format(result.tpm_c()),
143 | 'tpm_total': '{:.2f}'.format(result.tpm_total()),
144 | 'tpm_percent': '{:.2f}'.format((result.tpm_c() * 100)
145 | / (12.86 * float(result.runinfo['runWarehouses']))),
146 | 'tpmc_svg': plot.tpmc_svg,
147 | 'delay_avg_svg': plot.delay_avg_svg,
148 | 'delay_max_svg': plot.delay_max_svg,
149 | 'metric_svg': plot.metric_svg,
150 | 'cpu_svg': plot.cpu_svg,
151 | 'memory_svg': plot.memory_svg,
152 | 'os_metrics': os_metrics,
153 | }
154 |
155 | # Propagate the mix_warn flag up to the toplevel
156 | for tt in result.ttypes:
157 | if data['summary'][tt]['mix_warn']:
158 | data['mix_warn'] = True
159 |
160 | template = env.get_template(template)
161 | return template.render(**data)
162 |
163 | def summary_data(result):
164 | color_ok = '#008000'
165 | color_warn = '#f08000'
166 | color_error = '#c00000'
167 |
168 | data = {}
169 | for tt in result.ttypes:
170 | # ----
171 | # Determine the percentiles and the latency limit
172 | # ----
173 | ninth = result.percentile(tt, 0.9)
174 | n5th = result.percentile(tt, 0.95)
175 | n9th = result.percentile(tt, 0.99)
176 | limit = result.tt_limit[tt]
177 |
178 | # ----
179 | # From that numbers we derive the color for the percentile numbers
180 | # ----
181 | color_limit = color_ok
182 | color_ninth = color_ok
183 | color_n5th = color_ok
184 | color_n9th = color_ok
185 | if ninth > limit:
186 | color_limit = color_error
187 | color_ninth = color_error
188 | if n5th > limit:
189 | if ninth <= limit:
190 | color_limit = color_warn
191 | color_n5th = color_warn
192 | if n9th > limit:
193 | if ninth <= limit:
194 | color_limit = color_warn
195 | color_n9th = color_warn
196 |
197 | # ----
198 | # Indicate if the transaction mix percentage is too low
199 | # by turning the number red.
200 | # ----
201 | mix = result.num_trans(tt) / result.total_trans * 100
202 | mix_warn = False
203 | color_mix = '#000000'
204 | if tt == 'NEW_ORDER' or tt == 'DELIVERY_BG':
205 | pass
206 | elif tt == 'PAYMENT':
207 | if mix < 43.0:
208 | color_mix = color_error
209 | mix_warn = True
210 | else:
211 | if mix < 4.0:
212 | color_mix = color_error
213 | mix_warn = True
214 | if tt == 'DELIVERY_BG':
215 | mix = 'N/A'
216 | else:
217 | mix = '{:.3f}%'.format(mix)
218 |
219 | # ----
220 | # Percentage of rollback is only relevant for NEW_ORDER
221 | # ----
222 | rbk = result.num_rollbacks(tt) / result.num_trans(tt) * 100
223 | color_rbk = '#000000'
224 | if tt == 'NEW_ORDER':
225 | if rbk < 1.0:
226 | color_rbk = color_error
227 | rbk = '{:.3f}%'.format(rbk)
228 | else:
229 | rbk = 'N/A'
230 |
231 | data[tt] = {
232 | 'count': result.num_trans(tt),
233 | 'mix': mix,
234 | 'mix_warn': mix_warn,
235 | 'style_mix': 'style="color:{};"'.format(color_mix),
236 | 'avg': "{:.3f}".format(result.avg_latency(tt)),
237 | 'max': "{:.3f}".format(result.max_latency(tt)),
238 | 'ninth': "{:.3f}".format(ninth),
239 | 'n5th': "{:.3f}".format(n5th),
240 | 'n9th': "{:.3f}".format(n9th),
241 | 'limit': "{:.3f}".format(limit),
242 | 'style_ninth': 'style="color:{};"'.format(color_ninth),
243 | 'style_n5th': 'style="color:{};"'.format(color_n5th),
244 | 'style_n9th': 'style="color:{};"'.format(color_n9th),
245 | 'style_limit': 'style="color:{};"'.format(color_limit),
246 | 'rbk': rbk,
247 | 'style_rbk': 'style="color:{};"'.format(color_rbk),
248 | 'errors': result.num_errors(tt),
249 | }
250 | return data
251 |
252 | def usage():
253 | sys.stderr.write("""usage: {} RESULT_DIR\n""".format(
254 | os.path.basename(sys.argv[0])))
255 |
256 | if __name__ == '__main__':
257 | main()
258 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/bmsql.css:
--------------------------------------------------------------------------------
1 |
44 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_cpu.html:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_disk_octets.html:
--------------------------------------------------------------------------------
1 |
42 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_disk_ops.html:
--------------------------------------------------------------------------------
1 |
39 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_interface_octets.html:
--------------------------------------------------------------------------------
1 |
42 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_interface_packets.html:
--------------------------------------------------------------------------------
1 |
38 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_latency.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_memory.html:
--------------------------------------------------------------------------------
1 |
7 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/img_nopm.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/inc_latency_explain.html:
--------------------------------------------------------------------------------
1 |
2 | The total end-user experienced transaction latency is different from
3 | the database transaction latency. Since there is a limited number
4 | of database connections, a transaction submitted to the "System under Test"
5 | may not immediately start processing on the database because all
6 | connections are busy.
7 |
8 |
9 | BenchmarkSQL measures the total latency, experienced by the end-user,
10 | as well as the time, the
11 | request had to wait in the SUT for a database connection to become
12 | available. It is important to point out that increasing the number
13 | of database connections (parameter sutThreads) only helps to a certain
14 | point. That point is usually between two and eight times the number of
15 | CPU cores of the database server. After that point the concurrency in
16 | the database may increase the latency and cause the system to violate
17 | the response time constraints.
18 |
19 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/inc_nopm_explain.html:
--------------------------------------------------------------------------------
1 |
2 | tpmC is the number of NEW_ORDER Transactions, that where processed
3 | per minute. This is often referred to as New Orders Per Minute (NOPM).
4 | A good benchmark result is one that achieves a high tpmC without
5 | violating any of the TPC-C response time or transaction mix
6 | constraints.
7 |
8 |
9 | A steady NOPM over the entire measurement period
10 | (0 .. {{ runinfo['runMins'] }} minutes in this run) is most desirable.
11 | Any frequent or prolonged drops in tpmC are likely caused by spikes in
12 | the transaction latency.
13 |
14 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/inc_summary_explain.html:
--------------------------------------------------------------------------------
1 | {% if mix_warn %}
2 |
3 | Warning: The transaction mix did not meet the TPC-C
4 | specifications! The minimum percentage for transactions
5 | is 43% Payment and 4% for each of OrderStatus,
6 | StockLevel and Delivery. You may need to increase
7 | the probabilities in the configuration.
8 |
9 | {% endif %}
10 |
11 |
12 | The TPC-C specification has a theoretical maximum of 12.86 NEW_ORDER
13 | transactions per minute per warehouse. In reality this value cannot
14 | be reached because it would require a perfect mix with 45% of NEW_ORDER
15 | transactions and a ZERO response time from the System under Test
16 | including the database.
17 |
18 |
19 | The above tpmC of {{ tpm_c }} is {{ tpm_percent }}% of that theoretical
20 | maximum for a database with {{ runinfo['runWarehouses'] }} warehouses.
21 |
22 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/inc_summary_table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Transaction Type |
5 | Latency |
6 | Count |
7 | Percent |
8 | Rollback |
9 | Errors |
10 |
11 |
12 | 90th % |
13 | 95th % |
14 | 99th % |
15 | Avg |
16 | Max |
17 | Limit |
18 |
19 | {% for tt in ttypes %}
20 |
21 | {{ tt }} |
22 | {{ summary[tt]['ninth'] }}s |
23 | {{ summary[tt]['n5th'] }}s |
24 | {{ summary[tt]['n9th'] }}s |
25 | {{ summary[tt]['avg'] }}s |
26 | {{ summary[tt]['max'] }}s |
27 | {{ summary[tt]['limit'] }}s |
28 | {{ summary[tt]['count'] }} |
29 | {{ summary[tt]['mix'] }} |
30 | {{ summary[tt]['rbk'] }} |
31 | {{ summary[tt]['errors'] }} |
32 |
33 | {% endfor %}
34 |
35 |
36 |
37 |
38 |
39 | Overall tpmC: |
40 | {{ tpm_c }} |
41 |
42 |
43 | Overall tpmTotal: |
44 | {{ tpm_total }} |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/report_extended.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | BenchmarkSQL Run #{{ runinfo['runID'] }}
5 |
6 |
7 | {% include "bmsql.css" %}
8 |
9 |
10 |
11 |
12 | BenchmarkSQL Run #{{ runinfo['runID'] }}
13 |
14 |
15 |
16 | This TPC-C style benchmark run was performed using the
17 | {{ runinfo['application'] }} Application Module
18 | of BenchmarkSQL version
19 | {{ runinfo['jTPCCVersion'] }} and the
20 | {{ runinfo['dbType'] }} JDBC driver.
21 |
22 |
23 |
24 | Result Summary
25 |
26 | {% include "inc_summary_table.html" %}
27 | {% include "inc_summary_explain.html" %}
28 |
29 |
30 | NEW_ORDER Transactions per Minute
31 |
32 |
33 | {% include "img_nopm.html" %}
34 |
35 | {% include "inc_nopm_explain.html" %}
36 |
37 |
38 | NEW_ORDER Latency and Delay
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | {% include "inc_latency_explain.html" %}
48 |
49 |
50 | PAYMENT Latency and Delay
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 | ORDER_STATUS Latency and Delay
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 | STOCK_LEVEL Latency and Delay
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 | DELIVERY (background part) Latency and Delay
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
93 | {% for metric in os_metrics %}
94 | {% if metric[0] == 'cpu' %}
95 |
96 | {{ metric[2] }} CPU usage
97 |
98 |
99 | {% include "img_cpu.html" %}
100 |
101 | {% endif %}
102 |
103 | {% if metric[0] == 'memory' %}
104 |
105 | {{ metric[2] }} Memory usage
106 |
107 |
108 | {% include "img_memory.html" %}
109 |
110 | {% endif %}
111 |
112 | {% if metric[0] == 'disk' %}
113 |
114 | {{ metric[2] }} Disk IO {{ metric[3] }}
115 |
116 |
117 | {% include "img_disk_octets.html" %}
118 |
119 | {% include "img_disk_ops.html" %}
120 |
121 | {% endif %}
122 |
123 | {% if metric[0] == 'interface' %}
124 |
125 | {{ metric[2] }} Network IO {{ metric[3] }}
126 |
127 |
128 | {% include "img_interface_octets.html" %}
129 |
130 | {% include "img_interface_packets.html" %}
131 |
132 | {% endif %}
133 | {% endfor %}
134 |
135 | Properties of this Run
136 |
137 | The configuration properties of this BenchmarkSQL run were:
138 |
139 |
140 |
141 |
142 | {{ properties }}
143 | |
144 |
145 |
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/src/main/resources/generateReport/templates/report_simple.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | BenchmarkSQL Run #{{ runinfo['runID'] }}
5 |
6 |
7 | {% include "bmsql.css" %}
8 |
9 |
10 |
11 |
12 | BenchmarkSQL Run #{{ runinfo['runID'] }}
13 |
14 |
15 |
16 | This TPC-C style benchmark run was performed using the
17 | {{ runinfo['application'] }} Application Module
18 | of BenchmarkSQL version
19 | {{ runinfo['jTPCCVersion'] }} and the
20 | {{ runinfo['dbType'] }} JDBC driver.
21 |
22 |
23 |
24 | Result Summary
25 |
26 | {% include "inc_summary_table.html" %}
27 | {% include "inc_summary_explain.html" %}
28 |
29 |
30 | NEW_ORDER Transactions per Minute
31 |
32 |
33 | {% include "img_nopm.html" %}
34 |
35 | {% include "inc_nopm_explain.html" %}
36 |
37 |
38 | NEW_ORDER Latency and Delay
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 | {% include "inc_latency_explain.html" %}
47 |
48 |
49 | PAYMENT Latency and Delay
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 | ORDER_STATUS Latency and Delay
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 | STOCK_LEVEL Latency and Delay
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 | DELIVERY (background part) Latency and Delay
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 | Properties of this Run
89 |
90 | The configuration properties of this BenchmarkSQL run were:
91 |
92 |
93 |
94 |
95 | {{ properties }}
96 | |
97 |
98 |
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | %d %p %c{1.} [%t] %m%n
11 |
12 |
13 |
14 |
15 | %m%n
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
25 |
26 |
27 |
28 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/src/main/resources/mcCollectdGraphite.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # ----------------------------------------------------------------------
3 | # mc_collectd_graphite.py
4 | #
5 | # OS Metric collector script for BenchmarkSQL that retrieves data
6 | # for selected hosts from a graphite-web instance and saves it in
7 | # the os-metric.json file.
8 | # ----------------------------------------------------------------------
9 | import os.path
10 | import sys
11 | import time
12 | import select
13 | import getopt
14 |
15 | import json
16 | import urllib.request
17 | import urllib.parse
18 |
19 | def main():
20 | cargs = {
21 | 'url': 'http://localhost:8080',
22 | 'targets': [
23 | ],
24 | 'startepoch': time.time(),
25 | }
26 |
27 | opts, args = getopt.getopt(sys.argv[1:], "u:t:r:S:", [
28 | "url=", "target=", "resultdir=", "startepoch=",
29 | ])
30 | for opt, val in opts:
31 | if opt in ['-u', '--url']:
32 | cargs['url'] = val
33 | elif opt in ['-t', '--target']:
34 | cargs['targets'].append(val)
35 | elif opt in ['-r', '--resultdir']:
36 | cargs['resultdir'] = val
37 | elif opt in ['-S', '--startepoch']:
38 | cargs['startepoch'] = float(val)
39 |
40 | coll = Collector(**cargs)
41 |
42 | try:
43 | coll.run()
44 | except KeyboardInterrupt:
45 | pass
46 |
47 | coll.shutdown()
48 |
49 | class Collector:
50 | def __init__(self, url = 'http://localhost:8080',
51 | targets = {}, resultdir = '.', startepoch = 0.0):
52 | self.url = url
53 | self.targets = targets
54 | self.resultdir = resultdir
55 | self.startepoch = float(startepoch)
56 | self.starttime = time.time()
57 |
58 | self.output = {}
59 |
60 | def run(self):
61 | # ----
62 | # Run until we receive anything on stdin (which is the way the
63 | # benchmark driver is signaling us to finish).
64 | # ----
65 | while True:
66 | r, w, x = select.select([sys.stdin], [], [], None)
67 | if len(r) > 0:
68 | break
69 |
70 | def shutdown(self):
71 | # ----
72 | # On shutdown we retrieve the metric data from the
73 | # graphite server by calling the /render API with
74 | # &format=json for all the targets specified.
75 | # ----
76 | minutes = int((time.time() - self.starttime) / 60) + 1
77 | params = urllib.parse.urlencode(
78 | [('target', t) for t in self.targets] +
79 | [('from', '-{}min'.format(minutes)), ('format', 'json')])
80 | url = self.url + "?" + params
81 |
82 | try:
83 | with urllib.request.urlopen(url) as fd:
84 | gdata = json.loads(fd.read().decode('utf-8'))
85 | except Exception as ex:
86 | print(str(ex), file = sys.stderr)
87 | print("url was:", url)
88 | return 1
89 |
90 | # ----
91 | # We need to reformat the data slightly since the hostnames
92 | # in the graphite metric paths have '_' instead of '.' as
93 | # domain name separators and the actual metric data is in
94 | # (value, timestamp) order, while we need that the other way
95 | # around.
96 | # ----
97 | result = {}
98 | for entry in gdata:
99 | esplit = entry['target'].split('.')
100 | host = esplit[1].replace('_', '.')
101 | metric = '.'.join(esplit[2:])
102 | if host not in result:
103 | result[host] = {}
104 | result[host][metric] = [(t - self.startepoch, v)
105 | for v, t in entry['datapoints']]
106 |
107 | with open(os.path.join(self.resultdir, 'os-metric.json'), 'w') as fd:
108 | fd.write(json.dumps(result))
109 |
110 | if __name__ == '__main__':
111 | main()
112 |
--------------------------------------------------------------------------------
/src/main/resources/mcCollectdMqtt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # ----------------------------------------------------------------------
3 | # mc_collectd_mqtt.py
4 | #
5 | # OS metric collector script for BenchmarkSQL that retrieves data
6 | # sent to an MQTT broker by collectd on the server system(s)
7 | # ----------------------------------------------------------------------
8 | import os.path
9 | import sys
10 | import paho.mqtt.client as mqttc
11 | import csv
12 | import time
13 | import select
14 | import getopt
15 | import json
16 |
17 | def main():
18 | cargs = {
19 | 'host': 'localhost',
20 | 'topics': [
21 | ],
22 | 'startepoch': time.time(),
23 | }
24 |
25 | opts, args = getopt.getopt(sys.argv[1:], "h:p:U:P:t:r:i:S:", [
26 | "host=", "port=", "user=", "password=", "topic=",
27 | "resultdir=", "clientid=", "startepoch=",
28 | ])
29 | for opt, val in opts:
30 | if opt in ['-h', '--host']:
31 | cargs['host'] = val
32 | elif opt in ['-p', '--port']:
33 | cargs['port'] = val
34 | elif opt in ['-U', '--user']:
35 | cargs['user'] = val
36 | elif opt in ['-P', '--password']:
37 | cargs['password'] = val
38 | elif opt in ['-t', '--topic']:
39 | cargs['topics'].append(val)
40 | elif opt in ['-r', '--resultdir']:
41 | cargs['resultdir'] = val
42 | elif opt in ['-i', '--clientid']:
43 | cargs['clientId'] = val
44 | elif opt in ['-s', '--startepoch']:
45 | cargs['startepoch'] = float(val)
46 |
47 | coll = Collector(**cargs)
48 |
49 | try:
50 | coll.run()
51 | except KeyboardInterrupt:
52 | pass
53 |
54 | coll.shutdown()
55 |
56 | class Collector:
57 | def __init__(self, host = 'localhost', port = '1883',
58 | clientId = None, user = None, password = None,
59 | topics = {}, resultdir = '.', startepoch = 0.0):
60 | self.host = host
61 | self.port = int(port)
62 | self.clientId = clientId
63 | self.user = user
64 | self.password = password
65 | self.topics = topics
66 | self.resultdir = resultdir
67 | self.startepoch = float(startepoch)
68 |
69 | self.result = {}
70 |
71 | self.mqttc = mqttc.Client(self.clientId)
72 | self.mqttc.on_connect = self.on_mqtt_connect
73 | self.mqttc.on_disconnect = self.on_mqtt_disconnect
74 | self.mqttc.on_message = self.on_mqtt_message
75 |
76 | if self.user is not None:
77 | self.mqttc.username_pw_set(self.user, self.password)
78 | self.mqttc.connect(host = self.host, port = self.port)
79 |
80 | def on_mqtt_connect(self, client, userdata, flags, rc):
81 | """
82 | On connect or reconnect we only need to subscribe to all
83 | the specified topics
84 | """
85 | for topic in self.topics:
86 | self.mqttc.subscribe(topic, qos = 0)
87 |
88 | def on_mqtt_disconnect(self, clien, userdata, rc):
89 | """
90 | Nothing to do on disconnect
91 | """
92 | pass
93 |
94 | def on_mqtt_message(self, client, userdata, msg):
95 | """
96 | Collect all metric data received for the specified topics
97 | in self.result
98 | """
99 | # ----
100 | # Extract the hostname and metric path from the topic
101 | # ----
102 | tl = msg.topic.split('/')
103 | host = tl[1]
104 | metrics = ['.'.join(tl[2:]),]
105 |
106 | # ----
107 | # Extract the timestamp adjusted by startepoch from the payload
108 | # ----
109 | pl = msg.payload.decode('utf-8').rstrip('\0').split(':')
110 | epoch = float(pl[0]) - self.startepoch
111 |
112 | # ----
113 | # Some metrics sent by collectd actually have two values in them.
114 | # We handle this by splitting them into two metrics. Every disk-
115 | # with two values will become METRIC.read and METRIC.write and
116 | # every network interface- one becomes METRIC.rx and METRIC.tx.
117 | # Having individual metric names simplifies the code in the
118 | # report generator.
119 | # ----
120 | if metrics[0].startswith('disk-') and len(pl) == 3:
121 | metrics = [
122 | '.'.join(tl[2:] + ['read']),
123 | '.'.join(tl[2:] + ['write']),
124 | ]
125 | elif metrics[0].startswith('interface-') and len(pl) == 3:
126 | metrics = [
127 | '.'.join(tl[2:] + ['rx']),
128 | '.'.join(tl[2:] + ['tx']),
129 | ]
130 |
131 | # ----
132 | # Add the data to self.result
133 | # ----
134 | if host not in self.result:
135 | self.result[host] = {}
136 | for i in range(0, len(metrics)):
137 | if metrics[i] not in self.result[host]:
138 | self.result[host][metrics[i]] = []
139 | self.result[host][metrics[i]].append([
140 | epoch, float(pl[i + 1])])
141 |
142 | def run(self):
143 | # ----
144 | # We call mqttc.loop() which will return after each event. We stop
145 | # running when we receive any input on stdin (which is the way the
146 | # benchmark driver is signaling us to finish).
147 | # ----
148 | while True:
149 | self.mqttc.loop()
150 | r, w, x = select.select([sys.stdin], [], [], 0.0)
151 | if len(r) > 0:
152 | break
153 |
154 | def shutdown(self):
155 | # ----
156 | # On shutdown we dump all the collected data into the output file.
157 | # ----
158 | self.mqttc.loop_stop()
159 | with open(os.path.join(self.resultdir, 'os-metric.json'), 'w') as fd:
160 | fd.write(json.dumps(self.result))
161 |
162 | if __name__ == '__main__':
163 | main()
164 |
--------------------------------------------------------------------------------
/src/main/resources/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | ####### Python3 requirements for BenchmarkSQL
3 | #
4 | Flask
5 | numpy
6 | matplotlib
7 | paho-mqtt
8 | jproperties
9 |
--------------------------------------------------------------------------------
/src/main/resources/runBenchmark.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -ne 1 ] ; then
4 | echo "usage: $(basename $0) PROPS_FILE" >&2
5 | exit 2
6 | fi
7 |
8 | SEQ_FILE="./.jTPCC_run_seq.dat"
9 | if [ ! -f "${SEQ_FILE}" ] ; then
10 | echo "0" > "${SEQ_FILE}"
11 | fi
12 | SEQ=$(expr $(cat "${SEQ_FILE}") + 1) || exit 1
13 | echo "${SEQ}" > "${SEQ_FILE}"
14 |
15 | source ./funcs.sh $1
16 |
17 | setCP || exit 1
18 |
19 | myOPTS="-Dprop=$1 -DrunID=${SEQ}"
20 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
21 |
22 | java -cp "$myCP" $myOPTS com.github.pgsqlio.benchmarksql.jtpcc.jTPCC &
23 | PID=$!
24 | while true ; do
25 | kill -0 $PID 2>/dev/null || break
26 | sleep 1
27 | done
28 |
--------------------------------------------------------------------------------
/src/main/resources/runDatabaseBuild.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -lt 1 ] ; then
4 | echo "usage: $(basename $0) PROPS [OPT VAL [...]]" >&2
5 | exit 2
6 | fi
7 |
8 | PROPS="$1"
9 | shift
10 | if [ ! -f "${PROPS}" ] ; then
11 | echo "${PROPS}: no such file or directory" >&2
12 | exit 1
13 | fi
14 |
15 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')"
16 |
17 | BEFORE_LOAD="tableCreates extraCommandsBeforeLoad storedProcedureCreates"
18 |
19 | AFTER_LOAD="indexCreates foreignKeys buildFinish"
20 |
21 | for step in ${BEFORE_LOAD} ; do
22 | ./runSQL.sh "${PROPS}" $step &
23 | PID=$!
24 | while true ; do
25 | kill -0 $PID 2>/dev/null || break
26 | sleep 1
27 | done
28 | wait $PID
29 | rc=$?
30 | [ $rc -eq 0 ] || exit $rc
31 | done
32 |
33 | ./runLoader.sh "${PROPS}" $* &
34 | PID=$!
35 | while true ; do
36 | kill -0 $PID 2>/dev/null || break
37 | sleep 1
38 | done
39 | wait $PID
40 | rc=$?
41 | [ $rc -eq 0 ] || exit $rc
42 |
43 | for step in ${AFTER_LOAD} ; do
44 | ./runSQL.sh "${PROPS}" $step &
45 | PID=$!
46 | while true ; do
47 | kill -0 $PID 2>/dev/null || break
48 | sleep 1
49 | done
50 | wait $PID
51 | rc=$?
52 | [ $rc -eq 0 ] || exit $rc
53 | done
54 |
--------------------------------------------------------------------------------
/src/main/resources/runDatabaseDestroy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -ne 1 ] ; then
4 | echo "usage: $(basename $0) PROPS" >&2
5 | exit 2
6 | fi
7 |
8 | PROPS="$1"
9 | if [ ! -f "${PROPS}" ] ; then
10 | echo "${PROPS}: no such file or directory" >&2
11 | exit 1
12 | fi
13 |
14 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')"
15 | USER="$(grep '^user=' $PROPS | sed -e 's/^user=//' )"
16 | PASSWORD="$(grep '^password=' $PROPS | sed -e 's/^password=//' )"
17 |
18 | STEPS="tableDrops storedProcedureDrops"
19 |
20 | for step in ${STEPS} ; do
21 | ./runSQL.sh "${PROPS}" $step || exit 1 &
22 | PID=$!
23 | while true ; do
24 | kill -0 $PID 2>/dev/null || break
25 | sleep 1
26 | done
27 | wait $PID
28 | done
29 |
--------------------------------------------------------------------------------
/src/main/resources/runLoader.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -lt 1 ] ; then
4 | echo "usage: $(basename $0) PROPS_FILE [ARGS]" >&2
5 | exit 2
6 | fi
7 |
8 | source funcs.sh $1
9 | shift
10 |
11 | setCP || exit 1
12 |
13 | myOPTS="-Dprop=${PROPS}"
14 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
15 |
16 | java -cp "$myCP" $myOPTS com.github.pgsqlio.benchmarksql.loader.LoadData $*
17 |
--------------------------------------------------------------------------------
/src/main/resources/runSQL.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # ----
4 | # Check command line usage
5 | # ----
6 | if [ $# -ne 2 ] ; then
7 | echo "usage: $(basename $0) PROPS_FILE SQL_FILE" >&2
8 | exit 2
9 | fi
10 |
11 | # ----
12 | # Load common functions
13 | # ----
14 | source funcs.sh $1
15 |
16 | # ----
17 | # Determine which SQL file to use.
18 | #
19 | # 1) If $2 specifies a file that ends in .sql, we use that.
20 | # 2) If a file ./sql./$2.sql exists, we use that.
21 | # 3) If none of the above, use ./sql.common/$2.sql.
22 | # ----
23 | if echo "$2" | grep -q -e '\.sql$' ; then
24 | ENDS_WITH_SQL=1
25 | else
26 | ENDS_WITH_SQL=0
27 | fi
28 |
29 | if [ -f "${2}" -a $ENDS_WITH_SQL -eq 1 ] ; then
30 | SQL_FILE="$2"
31 | else
32 | if [ -f "./sql.$(getProp db)/${2}.sql" ] ; then
33 | SQL_FILE="./sql.$(getProp db)/${2}.sql"
34 | else
35 | SQL_FILE="./sql.common/${2}.sql"
36 | if [ ! -f "${SQL_FILE}" ] ; then
37 | echo "ERROR: Cannot locate SQL file for ${2}" >&2
38 | exit 1
39 | fi
40 | fi
41 | fi
42 |
43 | # ----
44 | # Set myCP according to the database type.
45 | # ----
46 | setCP || exit 1
47 |
48 | echo "# ------------------------------------------------------------"
49 | echo "# Loading SQL file ${SQL_FILE}"
50 | echo "# ------------------------------------------------------------"
51 | myOPTS="-Dprop=$1"
52 | myOPTS="${myOPTS} -DcommandFile=${SQL_FILE}"
53 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
54 |
55 | java -cp "$myCP" $myOPTS com.github.pgsqlio.benchmarksql.jdbc.ExecJDBC
56 |
--------------------------------------------------------------------------------
/src/main/resources/sample.firebird.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=firebird
10 | driver=org.firebirdsql.jdbc.FBDriver
11 | application=Generic
12 | conn=jdbc:firebirdsql://localhost:3050//var/lib/firebird/data/benchmarksql1.fdb?charSet=UTF-8
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | useWarehouseFrom=-1
19 | useWarehouseTo=-1
20 | loadWorkers=8
21 | monkeys=2
22 | sutThreads=16
23 | maxDeliveryBGThreads=12
24 | maxDeliveryBGPerWarehouse=1
25 | rampupMins=10
26 | rampupSUTMins=5
27 | rampupTerminalMins=5
28 | runMins=30
29 | reportIntervalSecs=30
30 | restartSUTThreadProbability=0.0
31 | keyingTimeMultiplier=1.0
32 | thinkTimeMultiplier=1.0
33 | terminalMultiplier=1
34 | traceTerminalIO=false
35 |
36 | # Below are the definitions for the "attempted" transaction mix.
37 | # The TPC-C specification requires minimum percentages for all but
38 | # the NEW_ORDER transaction. If a test run happens to have any of
39 | # those four types fall below those minimums, the entire test is
40 | # invalid. We don't want that to happen, so we specify values just
41 | # a tiny bit above the required minimum.
42 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
43 | paymentWeight=43.2
44 | orderStatusWeight=4.2
45 | deliveryWeight=4.2
46 | stockLevelWeight=4.2
47 |
48 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
49 | # to roll back due to a user entry error (non existing item
50 | # number. Doing it with a strict 1/100th probability can lead to
51 | # undershooting this target, so we default to 1.01% to be sure.
52 | rollbackPercent=1.01
53 |
54 | # Directory name to create for collecting detailed result data.
55 | # Comment this out to suppress. Note that the Flask UI will define
56 | # this by itself, so don't specify it if you run through the UI.
57 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
58 |
59 | # BenchmarkSQL includes three OS metric collector scripts implemented
60 | # in Python3. Two require to have collectd installed on the server
61 | # systems, you want to include in the performance report. The data
62 | # will be saved in resultDirectory/data/os-metric.json. The third
63 | # is based on Prometheus and node_exporter.
64 |
65 | # mcCollectdMqtt.py is a metric collector that expects the collectd
66 | # instances on the servers to send the metric data to an MQTT broker.
67 |
68 | #osCollectorScript=./mcCollectdMqtt.py \
69 | # -h mymqttbroker.localdomain \
70 | # -t collectd/mydbserver.localdomain/# \
71 | # -t collectd/mybackrest.localdomain/#
72 |
73 | # mcCollectdGraphite.py is a metric collector that expects the
74 | # collectd instances on the servers to send the metric data to
75 | # a graphite/whisper database and be available through the /render
76 | # API.
77 |
78 | #osCollectorScript=./mcCollectdGraphite.py \
79 | # -u http://mygraphite.localdomain/render/ \
80 | # -t collectd.mydbserver_localdomain.*.* \
81 | # -t collectd.mydbserver_localdomain.*.*.* \
82 | # -t collectd.mybackrest_localdomain.*.* \
83 | # -t collectd.mybackrest_localdomain.*.*.*
84 |
85 | # mcPrometheus.py retrieves the metric data from a Prometheus
86 | # server through the API. It converts the output into the same
87 | # format as the former two produce. The instances listed are
88 | # the same names given in the "instance" label of the metric
89 | # data scraped by Prometheus. The port number will be removed
90 | # in the os-metric.json output.
91 |
92 | #osCollectorScript=./mcPrometheus.py \
93 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
94 | # -i mydbserver.localdomain:9100 \
95 | # -i mybackrest.localdomain:9100
96 |
97 | # The report script is what generates the detailed HTML report for
98 | # the benchmark run. It is a Jinja2 template based reporting system
99 | # that includes graphs of various metrics, captured during the benchmark.
100 |
101 | reportScript=./generateReport.py -t report_simple.html
102 |
103 | #reportScript=./generateReport.py \
104 | # -t report_extended.html \
105 | # -c 'mydbserver.localdomain:DB server' \
106 | # -d 'mydbserver.localdomain:DB server:hda2' \
107 | # -i 'mydbserver.localdomain:DB server:eth0' \
108 | # -c 'mybackrest.localdomain:pgbackrest server' \
109 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
110 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
111 |
--------------------------------------------------------------------------------
/src/main/resources/sample.mariadb.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=mariadb
10 | driver=org.mariadb.jdbc.Driver
11 | application=Generic
12 | conn=jdbc:mariadb://localhost:3306/benchmarksql1
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | useWarehouseFrom=-1
19 | useWarehouseTo=-1
20 | loadWorkers=8
21 | monkeys=2
22 | sutThreads=16
23 | maxDeliveryBGThreads=12
24 | maxDeliveryBGPerWarehouse=1
25 | rampupMins=10
26 | rampupSUTMins=5
27 | rampupTerminalMins=5
28 | runMins=30
29 | reportIntervalSecs=30
30 | restartSUTThreadProbability=0.0
31 | keyingTimeMultiplier=1.0
32 | thinkTimeMultiplier=1.0
33 | terminalMultiplier=1
34 | traceTerminalIO=false
35 |
36 | # Below are the definitions for the "attempted" transaction mix.
37 | # The TPC-C specification requires minimum percentages for all but
38 | # the NEW_ORDER transaction. If a test run happens to have any of
39 | # those four types fall below those minimums, the entire test is
40 | # invalid. We don't want that to happen, so we specify values just
41 | # a tiny bit above the required minimum.
42 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
43 | paymentWeight=43.2
44 | orderStatusWeight=4.2
45 | deliveryWeight=4.2
46 | stockLevelWeight=4.2
47 |
48 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
49 | # to roll back due to a user entry error (non existing item
50 | # number. Doing it with a strict 1/100th probability can lead to
51 | # undershooting this target, so we default to 1.01% to be sure.
52 | rollbackPercent=1.01
53 |
54 | # Directory name to create for collecting detailed result data.
55 | # Comment this out to suppress. Note that the Flask UI will define
56 | # this by itself, so don't specify it if you run through the UI.
57 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
58 |
59 | # BenchmarkSQL includes three OS metric collector scripts implemented
60 | # in Python3. Two require to have collectd installed on the server
61 | # systems, you want to include in the performance report. The data
62 | # will be saved in resultDirectory/data/os-metric.json. The third
63 | # is based on Prometheus and node_exporter.
64 |
65 | # mcCollectdMqtt.py is a metric collector that expects the collectd
66 | # instances on the servers to send the metric data to an MQTT broker.
67 |
68 | #osCollectorScript=./mcCollectdMqtt.py \
69 | # -h mymqttbroker.localdomain \
70 | # -t collectd/mydbserver.localdomain/# \
71 | # -t collectd/mybackrest.localdomain/#
72 |
73 | # mcCollectdGraphite.py is a metric collector that expects the
74 | # collectd instances on the servers to send the metric data to
75 | # a graphite/whisper database and be available through the /render
76 | # API.
77 |
78 | #osCollectorScript=./mcCollectdGraphite.py \
79 | # -u http://mygraphite.localdomain/render/ \
80 | # -t collectd.mydbserver_localdomain.*.* \
81 | # -t collectd.mydbserver_localdomain.*.*.* \
82 | # -t collectd.mybackrest_localdomain.*.* \
83 | # -t collectd.mybackrest_localdomain.*.*.*
84 |
85 | # mcPrometheus.py retrieves the metric data from a Prometheus
86 | # server through the API. It converts the output into the same
87 | # format as the former two produce. The instances listed are
88 | # the same names given in the "instance" label of the metric
89 | # data scraped by Prometheus. The port number will be removed
90 | # in the os-metric.json output.
91 |
92 | #osCollectorScript=./mcPrometheus.py \
93 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
94 | # -i mydbserver.localdomain:9100 \
95 | # -i mybackrest.localdomain:9100
96 |
97 | # The report script is what generates the detailed HTML report for
98 | # the benchmark run. It is a Jinja2 template based reporting system
99 | # that includes graphs of various metrics, captured during the benchmark.
100 |
101 | reportScript=./generateReport.py -t report_simple.html
102 |
103 | #reportScript=./generateReport.py \
104 | # -t report_extended.html \
105 | # -c 'mydbserver.localdomain:DB server' \
106 | # -d 'mydbserver.localdomain:DB server:hda2' \
107 | # -i 'mydbserver.localdomain:DB server:eth0' \
108 | # -c 'mybackrest.localdomain:pgbackrest server' \
109 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
110 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
111 |
--------------------------------------------------------------------------------
/src/main/resources/sample.oracle.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=oracle
10 | driver=oracle.jdbc.driver.OracleDriver
11 | application=Generic
12 | conn=jdbc:oracle:thin:@localhost:1521:XE
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | useWarehouseFrom=-1
19 | useWarehouseTo=-1
20 | loadWorkers=8
21 | monkeys=2
22 | sutThreads=16
23 | maxDeliveryBGThreads=12
24 | maxDeliveryBGPerWarehouse=1
25 | rampupMins=10
26 | rampupSUTMins=5
27 | rampupTerminalMins=5
28 | runMins=30
29 | reportIntervalSecs=30
30 | restartSUTThreadProbability=0.0
31 | keyingTimeMultiplier=1.0
32 | thinkTimeMultiplier=1.0
33 | terminalMultiplier=1
34 | traceTerminalIO=false
35 |
36 | # Below are the definitions for the "attempted" transaction mix.
37 | # The TPC-C specification requires minimum percentages for all but
38 | # the NEW_ORDER transaction. If a test run happens to have any of
39 | # those four types fall below those minimums, the entire test is
40 | # invalid. We don't want that to happen, so we specify values just
41 | # a tiny bit above the required minimum.
42 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
43 | paymentWeight=43.2
44 | orderStatusWeight=4.2
45 | deliveryWeight=4.2
46 | stockLevelWeight=4.2
47 |
48 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
49 | # to roll back due to a user entry error (non existing item
50 | # number. Doing it with a strict 1/100th probability can lead to
51 | # undershooting this target, so we default to 1.01% to be sure.
52 | rollbackPercent=1.01
53 |
54 | # Directory name to create for collecting detailed result data.
55 | # Comment this out to suppress. Note that the Flask UI will define
56 | # this by itself, so don't specify it if you run through the UI.
57 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
58 |
59 | # BenchmarkSQL includes three OS metric collector scripts implemented
60 | # in Python3. Two require to have collectd installed on the server
61 | # systems, you want to include in the performance report. The data
62 | # will be saved in resultDirectory/data/os-metric.json. The third
63 | # is based on Prometheus and node_exporter.
64 |
65 | # mcCollectdMqtt.py is a metric collector that expects the collectd
66 | # instances on the servers to send the metric data to an MQTT broker.
67 |
68 | #osCollectorScript=./mcCollectdMqtt.py \
69 | # -h mymqttbroker.localdomain \
70 | # -t collectd/mydbserver.localdomain/# \
71 | # -t collectd/mybackrest.localdomain/#
72 |
73 | # mcCollectdGraphite.py is a metric collector that expects the
74 | # collectd instances on the servers to send the metric data to
75 | # a graphite/whisper database and be available through the /render
76 | # API.
77 |
78 | #osCollectorScript=./mcCollectdGraphite.py \
79 | # -u http://mygraphite.localdomain/render/ \
80 | # -t collectd.mydbserver_localdomain.*.* \
81 | # -t collectd.mydbserver_localdomain.*.*.* \
82 | # -t collectd.mybackrest_localdomain.*.* \
83 | # -t collectd.mybackrest_localdomain.*.*.*
84 |
85 | # mcPrometheus.py retrieves the metric data from a Prometheus
86 | # server through the API. It converts the output into the same
87 | # format as the former two produce. The instances listed are
88 | # the same names given in the "instance" label of the metric
89 | # data scraped by Prometheus. The port number will be removed
90 | # in the os-metric.json output.
91 |
92 | #osCollectorScript=./mcPrometheus.py \
93 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
94 | # -i mydbserver.localdomain:9100 \
95 | # -i mybackrest.localdomain:9100
96 |
97 | # The report script is what generates the detailed HTML report for
98 | # the benchmark run. It is a Jinja2 template based reporting system
99 | # that includes graphs of various metrics, captured during the benchmark.
100 |
101 | reportScript=./generateReport.py -t report_simple.html
102 |
103 | #reportScript=./generateReport.py \
104 | # -t report_extended.html \
105 | # -c 'mydbserver.localdomain:DB server' \
106 | # -d 'mydbserver.localdomain:DB server:hda2' \
107 | # -i 'mydbserver.localdomain:DB server:eth0' \
108 | # -c 'mybackrest.localdomain:pgbackrest server' \
109 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
110 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
111 |
--------------------------------------------------------------------------------
/src/main/resources/sample.postgresql.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=postgres
10 | driver=org.postgresql.Driver
11 | application=Generic
12 | conn=jdbc:postgresql://localhost:5432/postgres
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | useWarehouseFrom=-1
19 | useWarehouseTo=-1
20 | loadWorkers=8
21 | monkeys=2
22 | sutThreads=16
23 | maxDeliveryBGThreads=12
24 | maxDeliveryBGPerWarehouse=1
25 | rampupMins=10
26 | rampupSUTMins=5
27 | rampupTerminalMins=5
28 | runMins=30
29 | reportIntervalSecs=30
30 | restartSUTThreadProbability=0.0
31 | keyingTimeMultiplier=1.0
32 | thinkTimeMultiplier=1.0
33 | terminalMultiplier=1
34 | traceTerminalIO=false
35 |
36 | # Below are the definitions for the "attempted" transaction mix.
37 | # The TPC-C specification requires minimum percentages for all but
38 | # the NEW_ORDER transaction. If a test run happens to have any of
39 | # those four types fall below those minimums, the entire test is
40 | # invalid. We don't want that to happen, so we specify values just
41 | # a tiny bit above the required minimum.
42 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
43 | paymentWeight=43.2
44 | orderStatusWeight=4.2
45 | deliveryWeight=4.2
46 | stockLevelWeight=4.2
47 |
48 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
49 | # to roll back due to a user entry error (non existing item
50 | # number. Doing it with a strict 1/100th probability can lead to
51 | # undershooting this target, so we default to 1.01% to be sure.
52 | rollbackPercent=1.01
53 |
54 | # Directory name to create for collecting detailed result data.
55 | # Comment this out to suppress. Note that the Flask UI will define
56 | # this by itself, so don't specify it if you run through the UI.
57 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
58 |
59 | # BenchmarkSQL includes three OS metric collector scripts implemented
60 | # in Python3. Two require to have collectd installed on the server
61 | # systems, you want to include in the performance report. The data
62 | # will be saved in resultDirectory/data/os-metric.json. The third
63 | # is based on Prometheus and node_exporter.
64 |
65 | # mcCollectdMqtt.py is a metric collector that expects the collectd
66 | # instances on the servers to send the metric data to an MQTT broker.
67 |
68 | #osCollectorScript=./mcCollectdMqtt.py \
69 | # -h mymqttbroker.localdomain \
70 | # -t collectd/mydbserver.localdomain/# \
71 | # -t collectd/mybackrest.localdomain/#
72 |
73 | # mcCollectdGraphite.py is a metric collector that expects the
74 | # collectd instances on the servers to send the metric data to
75 | # a graphite/whisper database and be available through the /render
76 | # API.
77 |
78 | #osCollectorScript=./mcCollectdGraphite.py \
79 | # -u http://mygraphite.localdomain/render/ \
80 | # -t collectd.mydbserver_localdomain.*.* \
81 | # -t collectd.mydbserver_localdomain.*.*.* \
82 | # -t collectd.mybackrest_localdomain.*.* \
83 | # -t collectd.mybackrest_localdomain.*.*.*
84 |
85 | # mcPrometheus.py retrieves the metric data from a Prometheus
86 | # server through the API. It converts the output into the same
87 | # format as the former two produce. The instances listed are
88 | # the same names given in the "instance" label of the metric
89 | # data scraped by Prometheus. The port number will be removed
90 | # in the os-metric.json output.
91 |
92 | #osCollectorScript=./mcPrometheus.py \
93 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
94 | # -i mydbserver.localdomain:9100 \
95 | # -i mybackrest.localdomain:9100
96 |
97 | # The report script is what generates the detailed HTML report for
98 | # the benchmark run. It is a Jinja2 template based reporting system
99 | # that includes graphs of various metrics, captured during the benchmark.
100 |
101 | reportScript=./generateReport.py -t report_simple.html
102 |
103 | #reportScript=./generateReport.py \
104 | # -t report_extended.html \
105 | # -c 'mydbserver.localdomain:DB server' \
106 | # -d 'mydbserver.localdomain:DB server:hda2' \
107 | # -i 'mydbserver.localdomain:DB server:eth0' \
108 | # -c 'mybackrest.localdomain:pgbackrest server' \
109 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
110 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
111 |
--------------------------------------------------------------------------------
/src/main/resources/sample.transact-sql.properties:
--------------------------------------------------------------------------------
1 | # General Driver and connection parameters
2 | #
3 | # db={ postgres | oracle | firebird | mariadb | transact-sql }
4 | # driver=
5 | # application={ Generic | PostgreSQLStoredProc | OracleStoredProc }
6 | # conn=
7 | # user=
8 | # password=
9 | db=transact-sql
10 | driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
11 | application=Generic
12 | conn=jdbc:sqlserver://localhost;database=benchmarksql1
13 | user=benchmarksql
14 | password=PWbmsql
15 |
16 | # Scaling and timing configuration
17 | warehouses=50
18 | useWarehouseFrom=-1
19 | useWarehouseTo=-1
20 | loadWorkers=8
21 | monkeys=2
22 | sutThreads=16
23 | maxDeliveryBGThreads=12
24 | maxDeliveryBGPerWarehouse=1
25 | rampupMins=10
26 | rampupSUTMins=5
27 | rampupTerminalMins=5
28 | runMins=30
29 | reportIntervalSecs=30
30 | restartSUTThreadProbability=0.0
31 | keyingTimeMultiplier=1.0
32 | thinkTimeMultiplier=1.0
33 | terminalMultiplier=1
34 | traceTerminalIO=false
35 |
36 | # Below are the definitions for the "attempted" transaction mix.
37 | # The TPC-C specification requires minimum percentages for all but
38 | # the NEW_ORDER transaction. If a test run happens to have any of
39 | # those four types fall below those minimums, the entire test is
40 | # invalid. We don't want that to happen, so we specify values just
41 | # a tiny bit above the required minimum.
42 | # The newOrderWeight is calculated as 100.0 - sum(all_other_types).
43 | paymentWeight=43.2
44 | orderStatusWeight=4.2
45 | deliveryWeight=4.2
46 | stockLevelWeight=4.2
47 |
48 | # The TPC-C require a minimum of 1% of the NEW_ORDER transactions
49 | # to roll back due to a user entry error (non existing item
50 | # number. Doing it with a strict 1/100th probability can lead to
51 | # undershooting this target, so we default to 1.01% to be sure.
52 | rollbackPercent=1.01
53 |
54 | # Directory name to create for collecting detailed result data.
55 | # Comment this out to suppress. Note that the Flask UI will define
56 | # this by itself, so don't specify it if you run through the UI.
57 | resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
58 |
59 | # BenchmarkSQL includes three OS metric collector scripts implemented
60 | # in Python3. Two require to have collectd installed on the server
61 | # systems, you want to include in the performance report. The data
62 | # will be saved in resultDirectory/data/os-metric.json. The third
63 | # is based on Prometheus and node_exporter.
64 |
65 | # mcCollectdMqtt.py is a metric collector that expects the collectd
66 | # instances on the servers to send the metric data to an MQTT broker.
67 |
68 | #osCollectorScript=./mcCollectdMqtt.py \
69 | # -h mymqttbroker.localdomain \
70 | # -t collectd/mydbserver.localdomain/# \
71 | # -t collectd/mybackrest.localdomain/#
72 |
73 | # mcCollectdGraphite.py is a metric collector that expects the
74 | # collectd instances on the servers to send the metric data to
75 | # a graphite/whisper database and be available through the /render
76 | # API.
77 |
78 | #osCollectorScript=./mcCollectdGraphite.py \
79 | # -u http://mygraphite.localdomain/render/ \
80 | # -t collectd.mydbserver_localdomain.*.* \
81 | # -t collectd.mydbserver_localdomain.*.*.* \
82 | # -t collectd.mybackrest_localdomain.*.* \
83 | # -t collectd.mybackrest_localdomain.*.*.*
84 |
85 | # mcPrometheus.py retrieves the metric data from a Prometheus
86 | # server through the API. It converts the output into the same
87 | # format as the former two produce. The instances listed are
88 | # the same names given in the "instance" label of the metric
89 | # data scraped by Prometheus. The port number will be removed
90 | # in the os-metric.json output.
91 |
92 | #osCollectorScript=./mcPrometheus.py \
93 | # -u http://myprometheus.localdomain:9090/api/v1/query_range \
94 | # -i mydbserver.localdomain:9100 \
95 | # -i mybackrest.localdomain:9100
96 |
97 | # The report script is what generates the detailed HTML report for
98 | # the benchmark run. It is a Jinja2 template based reporting system
99 | # that includes graphs of various metrics, captured during the benchmark.
100 |
101 | reportScript=./generateReport.py -t report_simple.html
102 |
103 | #reportScript=./generateReport.py \
104 | # -t report_extended.html \
105 | # -c 'mydbserver.localdomain:DB server' \
106 | # -d 'mydbserver.localdomain:DB server:hda2' \
107 | # -i 'mydbserver.localdomain:DB server:eth0' \
108 | # -c 'mybackrest.localdomain:pgbackrest server' \
109 | # -d 'mybackrest.localdomain:pgbackrest server:hda2' \
110 | # -i 'mybackrest.localdomain:pgbackrest server:eth0'
111 |
--------------------------------------------------------------------------------
/src/main/resources/sql.babelfish/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name nvarchar(30) primary key,
3 | cfg_value nvarchar(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id int not null,
8 | w_ytd decimal(12,2),
9 | w_tax decimal(4,4),
10 | w_name nvarchar(10),
11 | w_street_1 nvarchar(20),
12 | w_street_2 nvarchar(20),
13 | w_city nvarchar(20),
14 | w_state nchar(2),
15 | w_zip nchar(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id int not null,
20 | d_id int not null,
21 | d_ytd decimal(12,2),
22 | d_tax decimal(4,4),
23 | d_next_o_id int,
24 | d_name nvarchar(10),
25 | d_street_1 nvarchar(20),
26 | d_street_2 nvarchar(20),
27 | d_city nvarchar(20),
28 | d_state nchar(2),
29 | d_zip nchar(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id int not null,
34 | c_d_id int not null,
35 | c_id int not null,
36 | c_discount decimal(4,4),
37 | c_credit nchar(2),
38 | c_last nvarchar(16),
39 | c_first nvarchar(16),
40 | c_credit_lim decimal(12,2),
41 | c_balance decimal(12,2),
42 | c_ytd_payment decimal(12,2),
43 | c_payment_cnt int,
44 | c_delivery_cnt int,
45 | c_street_1 nvarchar(20),
46 | c_street_2 nvarchar(20),
47 | c_city nvarchar(20),
48 | c_state nchar(2),
49 | c_zip nchar(9),
50 | c_phone nchar(16),
51 | c_since datetime,
52 | c_middle nchar(2),
53 | c_data nvarchar(500)
54 | );
55 |
56 | create table bmsql_history (
57 | h_c_id int,
58 | h_c_d_id int,
59 | h_c_w_id int,
60 | h_d_id int,
61 | h_w_id int,
62 | h_date datetime,
63 | h_amount decimal(8,2),
64 | h_data nvarchar(24)
65 | );
66 |
67 | create table bmsql_new_order (
68 | no_w_id int not null,
69 | no_d_id int not null,
70 | no_o_id int not null
71 | );
72 |
73 | create table bmsql_oorder (
74 | o_w_id int not null,
75 | o_d_id int not null,
76 | o_id int not null,
77 | o_c_id int,
78 | o_carrier_id int,
79 | o_ol_cnt int,
80 | o_all_local int,
81 | o_entry_d datetime
82 | );
83 |
84 | create table bmsql_order_line (
85 | ol_w_id int not null,
86 | ol_d_id int not null,
87 | ol_o_id int not null,
88 | ol_number int not null,
89 | ol_i_id int not null,
90 | ol_delivery_d datetime,
91 | ol_amount decimal(8,2),
92 | ol_supply_w_id int,
93 | ol_quantity int,
94 | ol_dist_info nchar(24)
95 | );
96 |
97 | create table bmsql_item (
98 | i_id int not null,
99 | i_name nvarchar(24),
100 | i_price decimal(5,2),
101 | i_data nvarchar(50),
102 | i_im_id int
103 | );
104 |
105 | create table bmsql_stock (
106 | s_w_id int not null,
107 | s_i_id int not null,
108 | s_quantity int,
109 | s_ytd int,
110 | s_order_cnt int,
111 | s_remote_cnt int,
112 | s_data nvarchar(50),
113 | s_dist_01 nchar(24),
114 | s_dist_02 nchar(24),
115 | s_dist_03 nchar(24),
116 | s_dist_04 nchar(24),
117 | s_dist_05 nchar(24),
118 | s_dist_06 nchar(24),
119 | s_dist_07 nchar(24),
120 | s_dist_08 nchar(24),
121 | s_dist_09 nchar(24),
122 | s_dist_10 nchar(24)
123 | );
124 |
125 |
126 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/buildFinish.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra commands to run after the tables are created, loaded,
3 | -- indexes built and extra's created.
4 | -- ----
5 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/extraCommandsBeforeLoad.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- sql.common/extraCommandsBeforeLoad.sql
3 | --
4 | -- Empty placeholder file. Some databases might require
5 | -- extra commands to be run between creating the schema
6 | -- and loading the data. For example PostgreSQL, where we
7 | -- set the FILLFACTOR on some tables.
8 | -- ----
9 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/foreignKeys.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_district add constraint d_warehouse_fkey
3 | foreign key (d_w_id)
4 | references bmsql_warehouse (w_id);
5 |
6 | alter table bmsql_customer add constraint c_district_fkey
7 | foreign key (c_w_id, c_d_id)
8 | references bmsql_district (d_w_id, d_id);
9 |
10 | alter table bmsql_history add constraint h_customer_fkey
11 | foreign key (h_c_w_id, h_c_d_id, h_c_id)
12 | references bmsql_customer (c_w_id, c_d_id, c_id);
13 | alter table bmsql_history add constraint h_district_fkey
14 | foreign key (h_w_id, h_d_id)
15 | references bmsql_district (d_w_id, d_id);
16 |
17 | alter table bmsql_new_order add constraint no_order_fkey
18 | foreign key (no_w_id, no_d_id, no_o_id)
19 | references bmsql_oorder (o_w_id, o_d_id, o_id);
20 |
21 | alter table bmsql_oorder add constraint o_customer_fkey
22 | foreign key (o_w_id, o_d_id, o_c_id)
23 | references bmsql_customer (c_w_id, c_d_id, c_id);
24 |
25 | alter table bmsql_order_line add constraint ol_order_fkey
26 | foreign key (ol_w_id, ol_d_id, ol_o_id)
27 | references bmsql_oorder (o_w_id, o_d_id, o_id);
28 | alter table bmsql_order_line add constraint ol_stock_fkey
29 | foreign key (ol_supply_w_id, ol_i_id)
30 | references bmsql_stock (s_w_id, s_i_id);
31 |
32 | alter table bmsql_stock add constraint s_warehouse_fkey
33 | foreign key (s_w_id)
34 | references bmsql_warehouse (w_id);
35 | alter table bmsql_stock add constraint s_item_fkey
36 | foreign key (s_i_id)
37 | references bmsql_item (i_id);
38 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/indexCreates.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey
3 | primary key (w_id);
4 |
5 | alter table bmsql_district add constraint bmsql_district_pkey
6 | primary key (d_w_id, d_id);
7 |
8 | alter table bmsql_customer add constraint bmsql_customer_pkey
9 | primary key (c_w_id, c_d_id, c_id);
10 |
11 | create index bmsql_customer_idx1
12 | on bmsql_customer (c_w_id, c_d_id, c_last, c_first);
13 |
14 | alter table bmsql_oorder add constraint bmsql_oorder_pkey
15 | primary key (o_w_id, o_d_id, o_id);
16 |
17 | create unique index bmsql_oorder_idx1
18 | on bmsql_oorder (o_w_id, o_d_id, o_carrier_id, o_id);
19 |
20 | alter table bmsql_new_order add constraint bmsql_new_order_pkey
21 | primary key (no_w_id, no_d_id, no_o_id);
22 |
23 | alter table bmsql_order_line add constraint bmsql_order_line_pkey
24 | primary key (ol_w_id, ol_d_id, ol_o_id, ol_number);
25 |
26 | alter table bmsql_stock add constraint bmsql_stock_pkey
27 | primary key (s_w_id, s_i_id);
28 |
29 | alter table bmsql_item add constraint bmsql_item_pkey
30 | primary key (i_id);
31 |
32 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/indexDrops.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_warehouse drop constraint bmsql_warehouse_pkey;
3 |
4 | alter table bmsql_district drop constraint bmsql_district_pkey;
5 |
6 | alter table bmsql_customer drop constraint bmsql_customer_pkey;
7 | drop index bmsql_customer_idx1;
8 |
9 | -- history table has no primary key
10 | -- commit;
11 |
12 | alter table bmsql_oorder drop constraint bmsql_oorder_pkey;
13 | drop index bmsql_oorder_idx1;
14 |
15 | alter table bmsql_new_order drop constraint bmsql_new_order_pkey;
16 |
17 | alter table bmsql_order_line drop constraint bmsql_order_line_pkey;
18 |
19 | alter table bmsql_stock drop constraint bmsql_stock_pkey;
20 |
21 | alter table bmsql_item drop constraint bmsql_item_pkey;
22 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/storedProcedureCreates.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- This is a placeholder for databases, that don't support
3 | -- stored procedures or where the support is too limited to
4 | -- reasonably implement the TPCC transaction profiles.
5 | -- ----------------------------------------------------------------------
6 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- This is a placeholder for databases, that don't support
3 | -- stored procedures or where the support is too limited to
4 | -- reasonably implement the TPCC transaction profiles.
5 | -- ----------------------------------------------------------------------
6 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name varchar(30) primary key,
3 | cfg_value varchar(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id integer not null,
8 | w_ytd decimal(12,2),
9 | w_tax decimal(4,4),
10 | w_name varchar(10),
11 | w_street_1 varchar(20),
12 | w_street_2 varchar(20),
13 | w_city varchar(20),
14 | w_state char(2),
15 | w_zip char(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id integer not null,
20 | d_id integer not null,
21 | d_ytd decimal(12,2),
22 | d_tax decimal(4,4),
23 | d_next_o_id integer,
24 | d_name varchar(10),
25 | d_street_1 varchar(20),
26 | d_street_2 varchar(20),
27 | d_city varchar(20),
28 | d_state char(2),
29 | d_zip char(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id integer not null,
34 | c_d_id integer not null,
35 | c_id integer not null,
36 | c_discount decimal(4,4),
37 | c_credit char(2),
38 | c_last varchar(16),
39 | c_first varchar(16),
40 | c_credit_lim decimal(12,2),
41 | c_balance decimal(12,2),
42 | c_ytd_payment decimal(12,2),
43 | c_payment_cnt integer,
44 | c_delivery_cnt integer,
45 | c_street_1 varchar(20),
46 | c_street_2 varchar(20),
47 | c_city varchar(20),
48 | c_state char(2),
49 | c_zip char(9),
50 | c_phone char(16),
51 | c_since timestamp,
52 | c_middle char(2),
53 | c_data varchar(500)
54 | );
55 |
56 | create table bmsql_history (
57 | h_c_id integer,
58 | h_c_d_id integer,
59 | h_c_w_id integer,
60 | h_d_id integer,
61 | h_w_id integer,
62 | h_date timestamp,
63 | h_amount decimal(6,2),
64 | h_data varchar(24)
65 | );
66 |
67 | create table bmsql_new_order (
68 | no_w_id integer not null,
69 | no_d_id integer not null,
70 | no_o_id integer not null
71 | );
72 |
73 | create table bmsql_oorder (
74 | o_w_id integer not null,
75 | o_d_id integer not null,
76 | o_id integer not null,
77 | o_c_id integer,
78 | o_carrier_id integer,
79 | o_ol_cnt integer,
80 | o_all_local integer,
81 | o_entry_d timestamp
82 | );
83 |
84 | create table bmsql_order_line (
85 | ol_w_id integer not null,
86 | ol_d_id integer not null,
87 | ol_o_id integer not null,
88 | ol_number integer not null,
89 | ol_i_id integer not null,
90 | ol_delivery_d timestamp,
91 | ol_amount decimal(6,2),
92 | ol_supply_w_id integer,
93 | ol_quantity integer,
94 | ol_dist_info char(24)
95 | );
96 |
97 | create table bmsql_item (
98 | i_id integer not null,
99 | i_name varchar(24),
100 | i_price decimal(5,2),
101 | i_data varchar(50),
102 | i_im_id integer
103 | );
104 |
105 | create table bmsql_stock (
106 | s_w_id integer not null,
107 | s_i_id integer not null,
108 | s_quantity integer,
109 | s_ytd integer,
110 | s_order_cnt integer,
111 | s_remote_cnt integer,
112 | s_data varchar(50),
113 | s_dist_01 char(24),
114 | s_dist_02 char(24),
115 | s_dist_03 char(24),
116 | s_dist_04 char(24),
117 | s_dist_05 char(24),
118 | s_dist_06 char(24),
119 | s_dist_07 char(24),
120 | s_dist_08 char(24),
121 | s_dist_09 char(24),
122 | s_dist_10 char(24)
123 | );
124 |
125 |
126 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/tableDrops.sql:
--------------------------------------------------------------------------------
1 | drop table bmsql_config;
2 |
3 | drop table bmsql_new_order;
4 |
5 | drop table bmsql_order_line;
6 |
7 | drop table bmsql_oorder;
8 |
9 | drop table bmsql_history;
10 |
11 | drop table bmsql_customer;
12 |
13 | drop table bmsql_stock;
14 |
15 | drop table bmsql_item;
16 |
17 | drop table bmsql_district;
18 |
19 | drop table bmsql_warehouse;
20 |
21 |
--------------------------------------------------------------------------------
/src/main/resources/sql.common/tableTruncates.sql:
--------------------------------------------------------------------------------
1 |
2 | truncate table bmsql_warehouse;
3 |
4 | truncate table bmsql_item;
5 |
6 | truncate table bmsql_stock;
7 |
8 | truncate table bmsql_district;
9 |
10 | truncate table bmsql_customer;
11 |
12 | truncate table bmsql_history;
13 |
14 | truncate table bmsql_oorder;
15 |
16 | truncate table bmsql_order_line;
17 |
18 | truncate table bmsql_new_order;
19 |
--------------------------------------------------------------------------------
/src/main/resources/sql.mariadb/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name varchar(30) primary key,
3 | cfg_value varchar(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id integer not null,
8 | w_ytd decimal(12,2),
9 | w_tax decimal(4,4),
10 | w_name varchar(10),
11 | w_street_1 varchar(20),
12 | w_street_2 varchar(20),
13 | w_city varchar(20),
14 | w_state char(2),
15 | w_zip char(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id integer not null,
20 | d_id integer not null,
21 | d_ytd decimal(12,2),
22 | d_tax decimal(4,4),
23 | d_next_o_id integer,
24 | d_name varchar(10),
25 | d_street_1 varchar(20),
26 | d_street_2 varchar(20),
27 | d_city varchar(20),
28 | d_state char(2),
29 | d_zip char(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id integer not null,
34 | c_d_id integer not null,
35 | c_id integer not null,
36 | c_discount decimal(4,4),
37 | c_credit char(2),
38 | c_last varchar(16),
39 | c_first varchar(16),
40 | c_credit_lim decimal(12,2),
41 | c_balance decimal(12,2),
42 | c_ytd_payment decimal(12,2),
43 | c_payment_cnt integer,
44 | c_delivery_cnt integer,
45 | c_street_1 varchar(20),
46 | c_street_2 varchar(20),
47 | c_city varchar(20),
48 | c_state char(2),
49 | c_zip char(9),
50 | c_phone char(16),
51 | c_since timestamp,
52 | c_middle char(2),
53 | c_data varchar(500)
54 | );
55 |
56 | create table bmsql_history (
57 | h_c_id integer,
58 | h_c_d_id integer,
59 | h_c_w_id integer,
60 | h_d_id integer,
61 | h_w_id integer,
62 | h_date timestamp,
63 | h_amount decimal(6,2),
64 | h_data varchar(24)
65 | );
66 |
67 | create table bmsql_new_order (
68 | no_w_id integer not null,
69 | no_d_id integer not null,
70 | no_o_id integer not null
71 | );
72 |
73 | create table bmsql_oorder (
74 | o_w_id integer not null,
75 | o_d_id integer not null,
76 | o_id integer not null,
77 | o_c_id integer,
78 | o_carrier_id integer,
79 | o_ol_cnt integer,
80 | o_all_local integer,
81 | o_entry_d timestamp
82 | );
83 |
84 | create table bmsql_order_line (
85 | ol_w_id integer not null,
86 | ol_d_id integer not null,
87 | ol_o_id integer not null,
88 | ol_number integer not null,
89 | ol_i_id integer not null,
90 | ol_delivery_d timestamp,
91 | ol_amount decimal(6,2),
92 | ol_supply_w_id integer,
93 | ol_quantity integer,
94 | ol_dist_info char(24)
95 | );
96 |
97 | create table bmsql_item (
98 | i_id integer not null,
99 | i_name varchar(24),
100 | i_price decimal(5,2),
101 | i_data varchar(50),
102 | i_im_id integer
103 | );
104 |
105 | create table bmsql_stock (
106 | s_w_id integer not null,
107 | s_i_id integer not null,
108 | s_quantity integer,
109 | s_ytd integer,
110 | s_order_cnt integer,
111 | s_remote_cnt integer,
112 | s_data varchar(50),
113 | s_dist_01 char(24),
114 | s_dist_02 char(24),
115 | s_dist_03 char(24),
116 | s_dist_04 char(24),
117 | s_dist_05 char(24),
118 | s_dist_06 char(24),
119 | s_dist_07 char(24),
120 | s_dist_08 char(24),
121 | s_dist_09 char(24),
122 | s_dist_10 char(24)
123 | );
124 |
125 |
126 |
--------------------------------------------------------------------------------
/src/main/resources/sql.mariadb/tableDrops.sql:
--------------------------------------------------------------------------------
1 | drop table bmsql_config;
2 |
3 | drop table bmsql_new_order;
4 |
5 | drop table bmsql_order_line;
6 |
7 | drop table bmsql_oorder;
8 |
9 | drop table bmsql_history;
10 |
11 | drop table bmsql_customer;
12 |
13 | drop table bmsql_stock;
14 |
15 | drop table bmsql_item;
16 |
17 | drop table bmsql_district;
18 |
19 | drop table bmsql_warehouse;
20 |
21 |
--------------------------------------------------------------------------------
/src/main/resources/sql.oracle/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | drop package tpccc_oracle;
2 | drop type num_array;
3 | drop type char_array;
4 | drop type varchar24_array;
5 | drop type int_array;
6 | drop type varchar16_array;
7 | drop type timestamp_array;
8 |
--------------------------------------------------------------------------------
/src/main/resources/sql.oracle/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name varchar2(30) primary key,
3 | cfg_value varchar2(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id integer not null,
8 | w_ytd number(12,2),
9 | w_tax number(4,4),
10 | w_name varchar2(10),
11 | w_street_1 varchar2(20),
12 | w_street_2 varchar2(20),
13 | w_city varchar2(20),
14 | w_state char(2),
15 | w_zip char(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id integer not null,
20 | d_id integer not null,
21 | d_ytd number(12,2),
22 | d_tax number(4,4),
23 | d_next_o_id integer,
24 | d_name varchar2(10),
25 | d_street_1 varchar2(20),
26 | d_street_2 varchar2(20),
27 | d_city varchar2(20),
28 | d_state char(2),
29 | d_zip char(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id integer not null,
34 | c_d_id integer not null,
35 | c_id integer not null,
36 | c_discount number(4,4),
37 | c_credit char(2),
38 | c_last varchar2(16),
39 | c_first varchar2(16),
40 | c_credit_lim number(12,2),
41 | c_balance number(12,2),
42 | c_ytd_payment number(12,2),
43 | c_payment_cnt integer,
44 | c_delivery_cnt integer,
45 | c_street_1 varchar2(20),
46 | c_street_2 varchar2(20),
47 | c_city varchar2(20),
48 | c_state char(2),
49 | c_zip char(9),
50 | c_phone char(16),
51 | c_since timestamp,
52 | c_middle char(2),
53 | c_data varchar2(500)
54 | );
55 |
56 | create table bmsql_history (
57 | h_c_id integer,
58 | h_c_d_id integer,
59 | h_c_w_id integer,
60 | h_d_id integer,
61 | h_w_id integer,
62 | h_date timestamp,
63 | h_amount number(6,2),
64 | h_data varchar2(24)
65 | );
66 |
67 | create table bmsql_new_order (
68 | no_w_id integer not null,
69 | no_d_id integer not null,
70 | no_o_id integer not null
71 | );
72 |
73 | create table bmsql_oorder (
74 | o_w_id integer not null,
75 | o_d_id integer not null,
76 | o_id integer not null,
77 | o_c_id integer,
78 | o_carrier_id integer,
79 | o_ol_cnt integer,
80 | o_all_local integer,
81 | o_entry_d timestamp
82 | );
83 |
84 | create table bmsql_order_line (
85 | ol_w_id integer not null,
86 | ol_d_id integer not null,
87 | ol_o_id integer not null,
88 | ol_number integer not null,
89 | ol_i_id integer not null,
90 | ol_delivery_d timestamp,
91 | ol_amount number(6,2),
92 | ol_supply_w_id integer,
93 | ol_quantity integer,
94 | ol_dist_info char(24)
95 | );
96 |
97 | create table bmsql_item (
98 | i_id integer not null,
99 | i_name varchar2(24),
100 | i_price number(5,2),
101 | i_data varchar2(50),
102 | i_im_id integer
103 | );
104 |
105 | create table bmsql_stock (
106 | s_w_id integer not null,
107 | s_i_id integer not null,
108 | s_quantity integer,
109 | s_ytd integer,
110 | s_order_cnt integer,
111 | s_remote_cnt integer,
112 | s_data varchar2(50),
113 | s_dist_01 char(24),
114 | s_dist_02 char(24),
115 | s_dist_03 char(24),
116 | s_dist_04 char(24),
117 | s_dist_05 char(24),
118 | s_dist_06 char(24),
119 | s_dist_07 char(24),
120 | s_dist_08 char(24),
121 | s_dist_09 char(24),
122 | s_dist_10 char(24)
123 | );
124 |
125 |
126 |
--------------------------------------------------------------------------------
/src/main/resources/sql.postgres/buildFinish.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra commands to run after the tables are created, loaded,
3 | -- indexes built and extra's created.
4 | -- PostgreSQL version.
5 | -- ----
6 |
7 | vacuum freeze analyze;
8 |
--------------------------------------------------------------------------------
/src/main/resources/sql.postgres/extraCommandsBeforeLoad.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE bmsql_oorder SET (FILLFACTOR = 85);
2 | ALTER TABLE bmsql_order_line SET (FILLFACTOR = 85);
3 | ALTER TABLE bmsql_warehouse SET (FILLFACTOR = 50);
4 | ALTER TABLE bmsql_district SET (FILLFACTOR = 79);
5 | ALTER TABLE bmsql_customer SET (FILLFACTOR = 90);
6 | ALTER TABLE bmsql_stock SET (FILLFACTOR = 95);
7 |
--------------------------------------------------------------------------------
/src/main/resources/sql.postgres/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | drop function if exists bmsql_proc_new_order (integer, integer, integer, integer[], integer[], integer[]);
2 | drop function if exists bmsql_proc_stock_level(integer, integer, integer);
3 | drop function if exists bmsql_proc_payment(integer, integer, integer, integer, integer, varchar(16), decimal(6,2));
4 | drop function if exists bmsql_proc_order_status (integer, integer, integer, var(16));
5 | drop function if exists bmsql_cid_from_clast(integer, integer, varchar(16));
6 | drop function if exists bmsql_proc_delivery_bg (integer, integer, integer);
7 |
--------------------------------------------------------------------------------
/src/main/resources/sql.postgres/tableCopies.sql:
--------------------------------------------------------------------------------
1 |
2 | copy bmsql_config
3 | (cfg_name, cfg_value)
4 | from '/tmp/csv/config.csv' WITH CSV;
5 |
6 | copy bmsql_warehouse
7 | (w_id, w_ytd, w_tax, w_name, w_street_1, w_street_2, w_city, w_state, w_zip)
8 | from '/tmp/csv/warehouse.csv' WITH CSV;
9 |
10 | copy bmsql_item
11 | (i_id, i_name, i_price, i_data, i_im_id)
12 | from '/tmp/csv/item.csv' WITH CSV;
13 |
14 | copy bmsql_stock
15 | (s_i_id, s_w_id, s_quantity, s_ytd, s_order_cnt, s_remote_cnt, s_data,
16 | s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05,
17 | s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10)
18 | from '/tmp/csv/stock.csv' WITH CSV;
19 |
20 | copy bmsql_district
21 | (d_id, d_w_id, d_ytd, d_tax, d_next_o_id, d_name, d_street_1,
22 | d_street_2, d_city, d_state, d_zip)
23 | from '/tmp/csv/district.csv' WITH CSV;
24 |
25 | copy bmsql_customer
26 | (c_id, c_d_id, c_w_id, c_discount, c_credit, c_last, c_first, c_credit_lim,
27 | c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_street_1,
28 | c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_middle, c_data)
29 | from '/tmp/csv/customer.csv' WITH CSV;
30 |
31 | copy bmsql_history
32 | (h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, h_date, h_amount, h_data)
33 | from '/tmp/csv/cust-hist.csv' WITH CSV;
34 |
35 | copy bmsql_oorder
36 | (o_id, o_w_id, o_d_id, o_c_id, o_carrier_id, o_ol_cnt, o_all_local, o_entry_d)
37 | from '/tmp/csv/order.csv' WITH CSV NULL AS 'NULL';
38 |
39 | copy bmsql_order_line
40 | (ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_delivery_d,
41 | ol_amount, ol_supply_w_id, ol_quantity, ol_dist_info)
42 | from '/tmp/csv/order-line.csv' WITH CSV NULL AS 'NULL';
43 |
44 | copy bmsql_new_order
45 | (no_w_id, no_d_id, no_o_id)
46 | from '/tmp/csv/new-order.csv' WITH CSV;
47 |
--------------------------------------------------------------------------------
/src/main/resources/sql.transact-sql/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name nvarchar(30) primary key,
3 | cfg_value nvarchar(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id int not null,
8 | w_ytd decimal(12,2),
9 | w_tax decimal(4,4),
10 | w_name nvarchar(10),
11 | w_street_1 nvarchar(20),
12 | w_street_2 nvarchar(20),
13 | w_city nvarchar(20),
14 | w_state nchar(2),
15 | w_zip nchar(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id int not null,
20 | d_id int not null,
21 | d_ytd decimal(12,2),
22 | d_tax decimal(4,4),
23 | d_next_o_id int,
24 | d_name nvarchar(10),
25 | d_street_1 nvarchar(20),
26 | d_street_2 nvarchar(20),
27 | d_city nvarchar(20),
28 | d_state nchar(2),
29 | d_zip nchar(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id int not null,
34 | c_d_id int not null,
35 | c_id int not null,
36 | c_discount decimal(4,4),
37 | c_credit nchar(2),
38 | c_last nvarchar(16),
39 | c_first nvarchar(16),
40 | c_credit_lim decimal(12,2),
41 | c_balance decimal(12,2),
42 | c_ytd_payment decimal(12,2),
43 | c_payment_cnt int,
44 | c_delivery_cnt int,
45 | c_street_1 nvarchar(20),
46 | c_street_2 nvarchar(20),
47 | c_city nvarchar(20),
48 | c_state nchar(2),
49 | c_zip nchar(9),
50 | c_phone nchar(16),
51 | c_since datetime,
52 | c_middle nchar(2),
53 | c_data nvarchar(500)
54 | );
55 |
56 | create table bmsql_history (
57 | h_c_id int,
58 | h_c_d_id int,
59 | h_c_w_id int,
60 | h_d_id int,
61 | h_w_id int,
62 | h_date datetime,
63 | h_amount decimal(6,2),
64 | h_data nvarchar(24)
65 | );
66 |
67 | create table bmsql_new_order (
68 | no_w_id int not null,
69 | no_d_id int not null,
70 | no_o_id int not null
71 | );
72 |
73 | create table bmsql_oorder (
74 | o_w_id int not null,
75 | o_d_id int not null,
76 | o_id int not null,
77 | o_c_id int,
78 | o_carrier_id int,
79 | o_ol_cnt int,
80 | o_all_local int,
81 | o_entry_d datetime
82 | );
83 |
84 | create table bmsql_order_line (
85 | ol_w_id int not null,
86 | ol_d_id int not null,
87 | ol_o_id int not null,
88 | ol_number int not null,
89 | ol_i_id int not null,
90 | ol_delivery_d datetime,
91 | ol_amount decimal(6,2),
92 | ol_supply_w_id int,
93 | ol_quantity int,
94 | ol_dist_info nchar(24)
95 | );
96 |
97 | create table bmsql_item (
98 | i_id int not null,
99 | i_name nvarchar(24),
100 | i_price decimal(5,2),
101 | i_data nvarchar(50),
102 | i_im_id int
103 | );
104 |
105 | create table bmsql_stock (
106 | s_w_id int not null,
107 | s_i_id int not null,
108 | s_quantity int,
109 | s_ytd int,
110 | s_order_cnt int,
111 | s_remote_cnt int,
112 | s_data nvarchar(50),
113 | s_dist_01 nchar(24),
114 | s_dist_02 nchar(24),
115 | s_dist_03 nchar(24),
116 | s_dist_04 nchar(24),
117 | s_dist_05 nchar(24),
118 | s_dist_06 nchar(24),
119 | s_dist_07 nchar(24),
120 | s_dist_08 nchar(24),
121 | s_dist_09 nchar(24),
122 | s_dist_10 nchar(24)
123 | );
124 |
125 |
126 |
--------------------------------------------------------------------------------