├── .gitignore
├── HOW-TO-RUN.txt
├── LICENSE.gpl-2.0.txt
├── README.Oracle.txt
├── README.md
├── build.xml
├── doc
└── src
│ └── TimedDriver.odt
├── lib
├── .gitignore
├── apache-log4j-extras-1.1.jar
├── firebird
│ ├── connector-api-1.5.jar
│ └── jaybird-2.2.9.jar
├── log4j-1.2.17.jar
├── oracle
│ ├── .gitignore
│ └── README.txt
└── postgres
│ └── postgresql-42.2.5.jar
├── run
├── .gitignore
├── checks
│ ├── check_details.sql
│ └── checks.sql
├── funcs.sh
├── generateGraphs.sh
├── generateReport.sh
├── log4j.properties
├── misc
│ ├── blk_device_iops.R
│ ├── blk_device_kbps.R
│ ├── cpu_utilization.R
│ ├── dirty_buffers.R
│ ├── latency.R
│ ├── net_device_iops.R
│ ├── net_device_kbps.R
│ ├── os_collector_linux.py
│ └── tpm_nopm.R
├── runBenchmark.sh
├── runDatabaseBuild.sh
├── runDatabaseDestroy.sh
├── runLoader.sh
├── runSQL.sh
├── sample.firebird.properties
├── sample.oracle.properties
├── sample.postgresql.properties
├── sql.common
│ ├── buildFinish.sql
│ ├── extraCommandsBeforeLoad.sql
│ ├── foreignKeys.sql
│ ├── indexCreates.sql
│ ├── indexDrops.sql
│ ├── storedProcedureCreates.sql
│ ├── storedProcedureDrops.sql
│ ├── tableCreates.sql
│ ├── tableDrops.sql
│ └── tableTruncates.sql
├── sql.firebird
│ └── extraHistID.sql
├── sql.oracle
│ ├── extraHistID.sql
│ ├── storedProcedureCreates.sql
│ ├── storedProcedureDrops.sql
│ └── tableCreates.sql
└── sql.postgres
│ ├── buildFinish.sql
│ ├── extraCommandsBeforeLoad.sql
│ ├── extraHistID.sql
│ ├── storedProcedureCreates.sql
│ ├── storedProcedureDrops.sql
│ └── tableCopies.sql
└── src
├── LoadData
├── LoadData.java
└── LoadDataWorker.java
├── OSCollector
└── OSCollector.java
├── client
├── jTPCC.java
├── jTPCCConfig.java
├── jTPCCConnection.java
├── jTPCCRandom.java
├── jTPCCTData.java
├── jTPCCTerminal.java
└── jTPCCUtil.java
└── jdbc
└── ExecJDBC.java
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | dist
3 |
--------------------------------------------------------------------------------
/HOW-TO-RUN.txt:
--------------------------------------------------------------------------------
1 |
2 | Instructions for running BenchmarkSQL on PostgreSQL
3 | ---------------------------------------------------
4 |
5 | 0. Requirements
6 |
7 | Use of JDK8 is required.
8 |
9 | 1. Create the benchmarksql user and a database
10 |
11 | As Unix user postgres use the psql shell to connect to the postgres
12 | database and issue the CREATE USER and CREATE DATABASE commands.
13 |
14 | [postgres#localhost ~] $ psql postgres
15 | psql (9.5.2)
16 | Type "help" for help.
17 |
18 | postgres=# CREATE USER benchmarksql WITH ENCRYPTED PASSWORD 'changeme';
19 | postgres=# CREATE DATABASE benchmarksql OWNER benchmarksql;
20 | postgres=# \q
21 | [postgres#localhost ~] $
22 |
23 | 2. Compile the BenchmarkSQL source code
24 |
25 | As your own UNIX user change into the toplevel directory of the
26 | benchmarksql git repository checkout or the directory that was
27 | created by unpacking the release tarball/zipfile. Use the ant
28 | command to compile the code.
29 |
30 | [wieck@localhost ~] $ cd benchmarksql
31 | [wieck@localhost benchmarksql] $ ant
32 | Buildfile: /nas1/home/wieck/benchmarksql.git/build.xml
33 |
34 | init:
35 | [mkdir] Created dir: /home/wieck/benchmarksql/build
36 |
37 | compile:
38 | [javac] Compiling 11 source files to /home/wieck/benchmarksql/build
39 |
40 | dist:
41 | [mkdir] Created dir: /home/wieck/benchmarksql/dist
42 | [jar] Building jar: /home/wieck/benchmarksql/dist/BenchmarkSQL-6.devel.jar
43 | BUILD SUCCESSFUL
44 | Total time: 1 second
45 | [wieck@localhost benchmarksql] $
46 |
47 | 3. Create the benchmark configuration file
48 |
49 | Change the the run directory, copy the props.pg file and edit
50 | the copy to match your system setup and desired scaling.
51 |
52 | [wieck@localhost benchmarksql] $ cd run
53 | [wieck@localhost run] $ cp sample.postgresql.properties my_postgres.properties
54 | [wieck@localhost run] $ vi my_postgres.properties
55 | [wieck@localhost run] $
56 |
57 | Note that the provided example configuration is meant to test
58 | the functionality of your setupr. That benchmarksql can connect
59 | to the database and execute transactions. That configuration
60 | is NOT a benchmark run. To make it into one you need to have a
61 | configuration that matches your database server size and
62 | workload. Leave the sizing for now and perform a first functional
63 | test.
64 |
65 | The BenchmarkSQL database has an initial size of approximately
66 | 100-100MB per configured warehouse. A typical setup would be
67 | a database of 2-5 times the physical RAM of the server.
68 |
69 | Likewise the number of concurrent database connections (config
70 | parameter terminals) should be something about 2-6 times the
71 | number of CPU threads.
72 |
73 | Last but not least benchmark runs are normally done for hours,
74 | if not days. This is because on the database sizes above it
75 | will take that long to reach a steady state and make sure that
76 | all performance relevant functionality of the database, like
77 | checkpointing and vacuuming, is included in the measurement.
78 |
79 | So you can see that with a modern server, that has 32-256 CPU
80 | threads and 64-512GBi, of RAM we are talking about thousands of
81 | warehouses and hundreds of concurrent database connections.
82 |
83 | 4. Build the schema and initial database load
84 |
85 | Execute the runDatabaseBuild.sh script with your configuration file.
86 |
87 | [wieck@localhost run]$ ./runDatabaseBuild.sh my_postgres.properties
88 | # ------------------------------------------------------------
89 | # Loading SQL file ./sql.common/tableCreates.sql
90 | # ------------------------------------------------------------
91 | create table bmsql_config (
92 | cfg_name varchar(30) primary key,
93 | cfg_value varchar(50)
94 | );
95 | create table bmsql_warehouse (
96 | w_id integer not null,
97 | w_ytd decimal(12,2),
98 | [...]
99 | Starting BenchmarkSQL LoadData
100 |
101 | driver=org.postgresql.Driver
102 | conn=jdbc:postgresql://localhost:5432/benchmarksql
103 | user=benchmarksql
104 | password=***********
105 | warehouses=30
106 | loadWorkers=10
107 | fileLocation (not defined)
108 | csvNullValue (not defined - using default 'NULL')
109 |
110 | Worker 000: Loading ITEM
111 | Worker 001: Loading Warehouse 1
112 | Worker 002: Loading Warehouse 2
113 | Worker 003: Loading Warehouse 3
114 | [...]
115 | Worker 000: Loading Warehouse 30 done
116 | Worker 008: Loading Warehouse 29 done
117 | # ------------------------------------------------------------
118 | # Loading SQL file ./sql.common/indexCreates.sql
119 | # ------------------------------------------------------------
120 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey
121 | primary key (w_id);
122 | alter table bmsql_district add constraint bmsql_district_pkey
123 | primary key (d_w_id, d_id);
124 | [...]
125 | vacuum analyze;
126 | [wieck@localhost run]$
127 |
128 | 5. Run the configured benchmark
129 |
130 | [wieck@localhost run]$ ./runBenchmark.sh my_postgres.properties
131 |
132 | The benchmark should run for the number of configured concurrent
133 | connections (terminals) and the duration or number of transactions.
134 |
135 | The end result of the benchmark will be reported like this:
136 |
137 | 01:58:09,081 [Thread-1] INFO jTPCC : Term-00,
138 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmC (NewOrders) = 179.55
139 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Measured tpmTOTAL = 329.17
140 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session Start = 2016-05-25 01:58:07
141 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Session End = 2016-05-25 01:58:09
142 | 01:58:09,082 [Thread-1] INFO jTPCC : Term-00, Transaction Count = 10
143 |
144 | At this point you have a working setup.
145 |
146 | 6. Scale the benchmark configuration.
147 |
148 | Change the my_postgres.properties file to the correct scaling
149 | (number of warehouses and concurrent connections/terminals). Switch
150 | from using a transaction count to time based:
151 |
152 | runTxnsPerTerminal=0
153 | runMins=180
154 |
155 | Rebuild the database (if needed) by running
156 |
157 | [wieck@localhost run]$ ./runDatabaseDestroy.sh my_postgres.properties
158 | [wieck@localhost run]$ ./runDatabaseBuild.sh my_postgres.properties
159 |
160 | Then run the benchmark again.
161 |
162 | Rinse and repeat.
163 |
164 | 7. Result report
165 |
166 | BenchmarkSQL collects detailed performance statistics and (if
167 | configured) OS performance data. The example configuration file
168 | defaults to a directory starting with my_result_.
169 |
170 | Use the generateReport.sh DIRECTORY script to create an HTML file
171 | with graphs. This requires R to be installed, which is beyond the
172 | scope of this HOW-TO.
173 |
174 |
--------------------------------------------------------------------------------
/LICENSE.gpl-2.0.txt:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 |
294 | Copyright (C)
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | , 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
--------------------------------------------------------------------------------
/README.Oracle.txt:
--------------------------------------------------------------------------------
1 |
2 | The following assumes a default installation of oracle-xe-11.2.0-1.0.
3 |
4 | Creating the benchmarksql user run the following commands in sqlplus
5 | under the sysdba account:
6 |
7 | <<_EOF_
8 |
9 | CREATE USER benchmarksql
10 | IDENTIFIED BY "bmsql1"
11 | DEFAULT TABLESPACE users
12 | TEMPORARY TABLESPACE temp;
13 |
14 | GRANT CONNECT TO benchmarksql;
15 | GRANT CREATE PROCEDURE TO benchmarksql;
16 | GRANT CREATE SEQUENCE TO benchmarksql;
17 | GRANT CREATE SESSION TO benchmarksql;
18 | GRANT CREATE TABLE TO benchmarksql;
19 | GRANT CREATE TRIGGER TO benchmarksql;
20 | GRANT CREATE TYPE TO benchmarksql;
21 | GRANT UNLIMITED TABLESPACE TO benchmarksql;
22 |
23 | _EOF_
24 |
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | BENCHMARKSQL README
2 | ===================
3 |
4 | BenchmarkSQL runs a TPC-C like test against relational databases to compare performance across versions, settings, and vendors. PostgreSQL and other databases are supported.
5 |
6 | BenchmarkSQL is Open Source, distributed under the GNU General Public License version 2.0 (GPLv2) license.
7 |
8 | CHANGE LOG:
9 | -----------
10 |
11 | Version 5.1 lussman & jannicash:
12 | --------------------------------
13 | + Clarify License
14 | + Remove deprecated Oracle proprietary features (allows to build without the ORA JDBC driver)
15 | + Change result graphs to inlined SVGs
16 | + Add an option to skip rampup time in the report
17 | + Add CPU utilization summary to report
18 | + Numerous bug fixes
19 |
20 | Version 5.0 lussman & jannicash:
21 | --------------------------------------
22 | + Upgrade to PostgreSQL 9.3 JDBC 4.1 version 1102 driver
23 | + Improve support for Oracle
24 | + Re-implement the non-uniform random generator in TPC-C style
25 | + Conform to clause 4.3.3.1 and enable lookup by last name
26 | + Add a switch to disable terminal-warehouse association, spreading
27 | the data access over all configured warehouses.
28 | + Re-worked the run shell scripts and the location of SQL files to
29 | make support of more database types easier
30 | + Add support for Firebirdsql
31 | + Add FOREIGN KEYS as defined by TPC-C 1.3
32 | + Major code overhaul. The per transaction type terminal data
33 | generation, execution and terminal trace code is moved into a
34 | module jTPCCTData. The database connection with all prepared
35 | statements has moved into a module jTPCCConnection
36 | + Add collecting per transaction result data and OS Level
37 | resource usage collection. The R statistics package is used
38 | to graph detailed information and a complete report in HTML
39 | can be generated from the data
40 |
41 | Version 4.1.2 jannicash:
42 | ------------------------
43 | + Fixed one more preparedStatement() leak. Hopefully with the help
44 | of Oracle's V$OPEN_CURSOR view we got them all now.
45 | + Fixed a possible deadlock problem in the NEW_ORDER transaction.
46 | Multiple parallel transaction could attempt to lock the same
47 | STOCK rows in reverse order. Sorting the order lines by item ID
48 | avoids this problem.
49 |
50 | Version 4.1.1 2016-01-31 jannicash:
51 | -----------------------------------
52 | + Changed the status line to update only once per second. The previous
53 | implementation was getting rather noisy at high throughput.
54 | + Fixed two preparedStatement() leaks that could cause ORA-01000 errors
55 | on longer runs with high throughput.
56 | + Fixed a problem in the calculation of sleep time between
57 | transactions when using limitTxnsPerMin that could cause the test
58 | to hang at the end.
59 | + Added support for escaping ; as \; in SQL files to be able to load
60 | functions and execute anonymous PL blocks (needed for next item).
61 | + Changed the definition of history.hist_id into a plain integer with
62 | no special functionality. Two new database vendor specific SQL
63 | scripts allow to enable the column after data load as an auto
64 | incrementing primary key. See HOW-TO-RUN.txt for details.
65 |
66 | Version 4.1.0 2014-03-13 lussman:
67 | ---------------------------------
68 | + Upgrade to using JDK 7
69 | + Upgrade to PostgreSQL JDBC 4.1 version 1101 driver
70 | + Stop claiming to support DB2 (only Postgres & Oracle are well tested)
71 |
72 | Version 4.0.9 2013-11-04 cadym:
73 | -------------------------------
74 | + Incorporate new PostgreSQL JDBC 4 version 1100 driver
75 | + Changed default user from postgres to benchmarksql
76 | + Added id column as primary key to history table
77 | + Renamed schema to benchmarksql
78 | + Changed log4j format to be more readable
79 | + Created the "benchmark" schema to contain all tables
80 | + Incorporate new PostgreSQL JDBC4 version 1003 driver
81 | + Transaction rate pacing mechanism
82 | + Correct error with loading customer table from csv file
83 | + Status line report dynamically shown on terminal
84 | + Fix lookup by name in PaymentStatus and Delivery Transactions
85 | (in order to be more compatible with the TPC-C spec)
86 | + Rationalized the variable naming in the input parameter files
87 | (now that the GUI is gone, variable names still make sense)
88 | + Default log4j settings only writes to file (not terminal)
89 |
90 | Version 4.0.2 2013-06-06 lussman & cadym:
91 | --------------------------------------------
92 | + Removed Swing & AWT GUI so that this program is runnable from
93 | the command line
94 | + Remove log4j usage from runSQL & runLoader (only used now for
95 | the actual running of the Benchmark)
96 | + Fix truncation problem with customer.csv file
97 | + Comment out "BadCredit" business logic that was not working
98 | and throwing stack traces
99 | + Fix log4j messages to always show the terminal name
100 | + Remove bogus log4j messages
101 |
102 | Version 3.0.9 2013-03-21 lussman:
103 | ----------------------------------
104 | + Config log4j for rotating log files once per minute
105 | + Default flat file location to '/tmp/csv/' in
106 | table copies script
107 | + Drop incomplete & untested Windoze '.bat' scripts
108 | + Standardize logging with log4j
109 | + Improve Logging with meaningful DEBUG and INFO levels
110 | + Simplify "build.xml" to eliminate nbproject dependency
111 | + Defaults read in from propeerties
112 | + Groudwork laid to eliminate the GUI
113 | + Default GUI console to PostgreSQL and 10 Warehouses
114 |
115 | Version 2.3.5 2013-01-29 lussman:
116 | -----------------------------------
117 | + Default build is now with JDK 1.6 and JDBC 4 Postgres 9.2 driver
118 | + Remove outdated JDBC 3 drivers (for JDK 1.5). You can run as
119 | before by a JDBC4 driver from any supported vendor.
120 | + Remove ExecJDBC warning about trying to rollback when in
121 | autocommit mode
122 | + Remove the extraneous COMMIT statements from the DDL scripts
123 | since ExecJDBC runs in autocommit mode
124 | + Fix the version number displayed in the console
125 |
126 | Versions 1.0 thru 2.2 2004 - 2012 lussman:
127 | -------------------------------------------
128 | + Dare to Compare
129 | + Forked from the jTPCC project
130 |
--------------------------------------------------------------------------------
/build.xml:
--------------------------------------------------------------------------------
1 |
2 | BenchmarkSQL Build File
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/doc/src/TimedDriver.odt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/doc/src/TimedDriver.odt
--------------------------------------------------------------------------------
/lib/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/.gitignore
--------------------------------------------------------------------------------
/lib/apache-log4j-extras-1.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/apache-log4j-extras-1.1.jar
--------------------------------------------------------------------------------
/lib/firebird/connector-api-1.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/firebird/connector-api-1.5.jar
--------------------------------------------------------------------------------
/lib/firebird/jaybird-2.2.9.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/firebird/jaybird-2.2.9.jar
--------------------------------------------------------------------------------
/lib/log4j-1.2.17.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/log4j-1.2.17.jar
--------------------------------------------------------------------------------
/lib/oracle/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
3 | !README.txt
4 |
--------------------------------------------------------------------------------
/lib/oracle/README.txt:
--------------------------------------------------------------------------------
1 | Copy the ojdbc.jar to use with Oracle here, or make
2 | sure that the environment variable ORACLE_HOME is set properly
3 | and the JDBC driver is found at $ORACLE_HOME/lib.
4 |
5 | You can download the Oracle 12c JDBC driver (ojdbc7.jar) here:
6 | http://www.oracle.com/technetwork/database/features/jdbc/jdbc-drivers-12c-download-1958347.html
7 |
--------------------------------------------------------------------------------
/lib/postgres/postgresql-42.2.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petergeoghegan/benchmarksql/f2f39cf42216a7b9f912eed07ccfb38522d02a21/lib/postgres/postgresql-42.2.5.jar
--------------------------------------------------------------------------------
/run/.gitignore:
--------------------------------------------------------------------------------
1 | my_*
2 | *.log
3 | .jTPCC_run_seq.dat
4 |
--------------------------------------------------------------------------------
/run/checks/check_details.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- Test 1
3 | --
4 | -- All ORDER rows where O_CARRIER_ID is NULL must have a matching
5 | -- row in NEW_ORDER.
6 | -- ----------------------------------------------------------------------
7 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
8 | count(*) AS "count", 'Undelivered ORDERs not found in NEW_ORDER' AS "Problem"
9 | FROM bmsql_oorder
10 | WHERE o_carrier_id IS NULL
11 | AND NOT EXISTS (
12 | SELECT 1 FROM bmsql_new_order
13 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
14 | );
15 |
16 | -- Detail information
17 | SELECT 'Undelivered ORDER' AS "_", O_W_ID, O_D_ID, O_ID,
18 | 'not found in NEW_ORDER' AS "__"
19 | FROM bmsql_oorder
20 | WHERE o_carrier_id IS NULL
21 | AND NOT EXISTS (
22 | SELECT 1 FROM bmsql_new_order
23 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
24 | );
25 |
26 |
27 | -- ----------------------------------------------------------------------
28 | -- Test 2
29 | --
30 | -- All ORDER rows where O_CARRIER_ID is NOT NULL must not have a matching
31 | -- row in NEW_ORDER.
32 | -- ----------------------------------------------------------------------
33 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
34 | count(*) AS "count", 'Delivered ORDERs still found in NEW_ORDER' AS "Problem"
35 | FROM bmsql_oorder
36 | WHERE o_carrier_id IS NOT NULL
37 | AND EXISTS (
38 | SELECT 1 FROM bmsql_new_order
39 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
40 | );
41 |
42 | -- Detail information
43 | SELECT 'Delivered ORDER' AS "_", O_W_ID, O_D_ID, O_ID,
44 | 'still found in NEW_ORDER' AS "__"
45 | FROM bmsql_oorder
46 | WHERE o_carrier_id IS NOT NULL
47 | AND EXISTS (
48 | SELECT 1 FROM bmsql_new_order
49 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
50 | );
51 |
52 |
53 | -- ----------------------------------------------------------------------
54 | -- Test 3
55 | --
56 | -- All NEW_ORDER rows must have a matching ORDER row.
57 | -- ----------------------------------------------------------------------
58 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
59 | count(*) AS "count", 'Orphaned NEW_ORDER rows' AS "Problem"
60 | FROM bmsql_new_order
61 | WHERE NOT EXISTS (
62 | SELECT 1 FROM bmsql_oorder
63 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
64 | );
65 |
66 | -- Detail information
67 | SELECT 'Orphaned NEW_ORDER row' AS "_", no_w_id, no_d_id, no_o_id
68 | FROM bmsql_new_order
69 | WHERE NOT EXISTS (
70 | SELECT 1 FROM bmsql_oorder
71 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
72 | );
73 |
74 |
75 | -- ----------------------------------------------------------------------
76 | -- Test 4
77 | --
78 | -- ORDER_LINES must have a matching ORDER
79 | -- ----------------------------------------------------------------------
80 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
81 | count(*) AS "count", 'Orphaned ORDER_LINE rows' AS "Problem"
82 | FROM bmsql_order_line
83 | WHERE NOT EXISTS (
84 | SELECT 1 FROM bmsql_oorder
85 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
86 | );
87 |
88 | -- Detail information
89 | SELECT 'Orphaned ORDER_LINE row' AS "_", ol_w_id, ol_d_id, ol_o_id
90 | FROM bmsql_order_line
91 | WHERE NOT EXISTS (
92 | SELECT 1 FROM bmsql_oorder
93 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
94 | );
95 |
96 |
97 | -- ----------------------------------------------------------------------
98 | -- Test 5
99 | --
100 | -- Check the ORDER.O_OL_CNT
101 | -- ----------------------------------------------------------------------
102 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
103 | count(*) AS "count", 'ORDERs with wrong O_OL_CNT' AS "Problem"
104 | FROM (
105 | SELECT o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
106 | FROM bmsql_oorder
107 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
108 | AND ol_o_id = o_id
109 | GROUP BY o_w_id, o_d_id, o_id, o_ol_cnt
110 | HAVING o_ol_cnt <> count(*)
111 | ) AS X;
112 |
113 | -- Detail information
114 | SELECT 'Wrong O_OL_CNT' AS "Problem", o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
115 | FROM bmsql_oorder
116 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
117 | AND ol_o_id = o_id
118 | GROUP BY "Problem", o_w_id, o_d_id, o_id, o_ol_cnt
119 | HAVING o_ol_cnt <> count(*);
120 |
121 |
--------------------------------------------------------------------------------
/run/checks/checks.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- Test 1
3 | --
4 | -- All ORDER rows where O_CARRIER_ID is NULL must have a matching
5 | -- row in NEW_ORDER.
6 | -- ----------------------------------------------------------------------
7 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
8 | count(*) AS "count", 'Undelivered ORDERs not found in NEW_ORDER' AS "Description"
9 | FROM bmsql_oorder
10 | WHERE o_carrier_id IS NULL
11 | AND NOT EXISTS (
12 | SELECT 1 FROM bmsql_new_order
13 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
14 | );
15 |
16 | -- ----------------------------------------------------------------------
17 | -- Test 2
18 | --
19 | -- All ORDER rows where O_CARRIER_ID is NOT NULL must not have a matching
20 | -- row in NEW_ORDER.
21 | -- ----------------------------------------------------------------------
22 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
23 | count(*) AS "count", 'Delivered ORDERs still found in NEW_ORDER' AS "Description"
24 | FROM bmsql_oorder
25 | WHERE o_carrier_id IS NOT NULL
26 | AND EXISTS (
27 | SELECT 1 FROM bmsql_new_order
28 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
29 | );
30 |
31 | -- ----------------------------------------------------------------------
32 | -- Test 3
33 | --
34 | -- All NEW_ORDER rows must have a matching ORDER row.
35 | -- ----------------------------------------------------------------------
36 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
37 | count(*) AS "count", 'Orphaned NEW_ORDER rows' AS "Description"
38 | FROM bmsql_new_order
39 | WHERE NOT EXISTS (
40 | SELECT 1 FROM bmsql_oorder
41 | WHERE no_w_id = o_w_id AND no_d_id = o_d_id AND no_o_id = o_id
42 | );
43 |
44 | -- ----------------------------------------------------------------------
45 | -- Test 4
46 | --
47 | -- ORDER_LINES must have a matching ORDER
48 | -- ----------------------------------------------------------------------
49 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
50 | count(*) AS "count", 'Orphaned ORDER_LINE rows' AS "Description"
51 | FROM bmsql_order_line
52 | WHERE NOT EXISTS (
53 | SELECT 1 FROM bmsql_oorder
54 | WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
55 | );
56 |
57 | -- ----------------------------------------------------------------------
58 | -- Test 5
59 | --
60 | -- Check the ORDER.O_OL_CNT
61 | -- ----------------------------------------------------------------------
62 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
63 | count(*) AS "count", 'ORDERs with wrong O_OL_CNT' AS "Description"
64 | FROM (
65 | SELECT o_w_id, o_d_id, o_id, o_ol_cnt, count(*) AS "actual"
66 | FROM bmsql_oorder
67 | LEFT JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id
68 | AND ol_o_id = o_id
69 | GROUP BY o_w_id, o_d_id, o_id, o_ol_cnt
70 | HAVING o_ol_cnt <> count(*)
71 | ) AS X;
72 |
73 | -- ----------------------------------------------------------------------
74 | -- Test 6
75 | --
76 | -- The W_YTD must match the sum(D_YTD) for the 10 districts of the
77 | -- Warehouse.
78 | -- ----------------------------------------------------------------------
79 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
80 | count(*) AS "count", 'Warehouses where W_YTD <> sum(D_YTD)' AS "Description"
81 | FROM (
82 | SELECT w_id, w_ytd, sum(d_ytd) AS sum_d_ytd
83 | FROM bmsql_warehouse
84 | LEFT JOIN bmsql_district ON d_w_id = w_id
85 | GROUP BY w_id, w_ytd
86 | HAVING w_ytd <> sum(d_ytd)
87 | ) AS X;
88 |
89 | -- ----------------------------------------------------------------------
90 | -- Test 7
91 | --
92 | -- The sum of all W_YTD must match the sum of all C_YTD_PAYMENT.
93 | -- Because the PAYMENT can happen remote, we cannot match those
94 | -- up by DISTRICT.
95 | -- ----------------------------------------------------------------------
96 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
97 | CASE count(*) WHEN 0 THEN 'sum(w_ytd) = sum(c_ytd_payment)'
98 | ELSE 'sum(w_ytd) <> sum(c_ytd_payment)' END AS "Description"
99 | FROM (
100 | SELECT sum_w_ytd, sum_c_ytd_payment
101 | FROM (SELECT sum(w_ytd) AS sum_w_ytd FROM bmsql_warehouse) AS W,
102 | (SELECT sum(c_ytd_payment) AS sum_c_ytd_payment FROM bmsql_customer) AS C
103 | WHERE sum_w_ytd <> sum_c_ytd_payment
104 | ) AS X;
105 |
106 | -- ----------------------------------------------------------------------
107 | -- Test 8
108 | --
109 | -- The C_BALANCE of a CUSTOMER must be equal to the sum(OL_AMOUNT) of
110 | -- all delivered ORDER_LINES (where OL_DELIVERY_D IS NOT NULL) minus
111 | -- the sum(H_AMOUNT).
112 | -- ----------------------------------------------------------------------
113 | SELECT CASE count(*) WHEN 0 THEN 'OK ' ELSE 'ERROR' END AS "check",
114 | count(*) AS "count",
115 | 'Customers where C_BALANCE <> sum(OL_AMOUNT) of undelivered orders minus sum(H_AMOUNT)' AS "Description"
116 | FROM (
117 | SELECT c_w_id, c_d_id, c_id, coalesce(sum_ol_amount, 0.0) AS sum_ol_amount,
118 | coalesce(sum_h_amount, 0.0) AS sum_h_amount
119 | FROM bmsql_customer
120 | LEFT JOIN (
121 | SELECT o_w_id, o_d_id, o_c_id, sum(ol_amount) as sum_ol_amount
122 | FROM bmsql_oorder
123 | JOIN bmsql_order_line ON ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
124 | WHERE o_carrier_id IS NOT NULL AND ol_delivery_d IS NOT NULL
125 | GROUP BY o_w_id, o_d_id, o_c_id
126 | ) AS OL ON o_w_id = c_w_id AND o_d_id = c_d_id AND o_c_id = c_id
127 | LEFT JOIN (
128 | SELECT h_c_w_id, h_c_d_id, h_c_id, sum(h_amount) AS sum_h_amount
129 | FROM bmsql_history
130 | GROUP BY h_c_w_id, h_c_d_id, h_c_id
131 | ) AS H ON h_c_w_id = c_w_id AND h_c_d_id = c_d_id AND h_c_id = c_id
132 | WHERE c_balance <> sum_ol_amount - sum_h_amount
133 | ) AS X;
134 |
135 |
--------------------------------------------------------------------------------
/run/funcs.sh:
--------------------------------------------------------------------------------
1 | # ----
2 | # $1 is the properties file
3 | # ----
4 | PROPS=$1
5 | if [ ! -f ${PROPS} ] ; then
6 | echo "${PROPS}: no such file" >&2
7 | exit 1
8 | fi
9 |
10 | # ----
11 | # getProp()
12 | #
13 | # Get a config value from the properties file.
14 | # ----
15 | function getProp()
16 | {
17 | grep "^${1}=" ${PROPS} | sed -e "s/^${1}=//"
18 | }
19 |
20 | # ----
21 | # getCP()
22 | #
23 | # Determine the CLASSPATH based on the database system.
24 | # ----
25 | function setCP()
26 | {
27 | case "$(getProp db)" in
28 | firebird)
29 | cp="../lib/firebird/*:../lib/*"
30 | ;;
31 | oracle)
32 | cp="../lib/oracle/*"
33 | if [ ! -z "${ORACLE_HOME}" -a -d ${ORACLE_HOME}/lib ] ; then
34 | cp="${cp}:${ORACLE_HOME}/lib/*"
35 | fi
36 | cp="${cp}:../lib/*"
37 | ;;
38 | postgres)
39 | cp="../lib/postgres/*:../lib/*"
40 | ;;
41 | esac
42 | myCP=".:${cp}:../dist/*"
43 | export myCP
44 | }
45 |
46 | # ----
47 | # Make sure that the properties file does have db= and the value
48 | # is a database, we support.
49 | # ----
50 | case "$(getProp db)" in
51 | firebird|oracle|postgres)
52 | ;;
53 | "") echo "ERROR: missing db= config option in ${PROPS}" >&2
54 | exit 1
55 | ;;
56 | *) echo "ERROR: unsupported database type 'db=$(getProp db)' in ${PROPS}" >&2
57 | exit 1
58 | ;;
59 | esac
60 |
61 |
--------------------------------------------------------------------------------
/run/generateGraphs.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----
3 | # Script to generate the detail graphs of a BenchmarkSQL run.
4 | #
5 | # Copyright (C) 2016, Denis Lussier
6 | # Copyright (C) 2016, Jan Wieck
7 | # ----
8 |
9 | if [ $# -lt 1 ] ; then
10 | echo "usage: $(basename $0) RESULT_DIR [SKIP_MINUTES]" >&2
11 | exit 2
12 | fi
13 |
14 | if [ $# -gt 1 ] ; then
15 | SKIP=$2
16 | else
17 | SKIP=0
18 | fi
19 |
20 | WIDTH=12
21 | HEIGHT=6
22 | POINTSIZE=12
23 |
24 | SIMPLE_GRAPHS="tpm_nopm latency cpu_utilization dirty_buffers"
25 |
26 | resdir="$1"
27 | cd "${resdir}" || exit 1
28 |
29 | for graph in $SIMPLE_GRAPHS ; do
30 | echo -n "Generating ${resdir}/${graph}.svg ... "
31 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" \
32 | -e "s/@HEIGHT@/${HEIGHT}/g" \
33 | -e "s/@POINTSIZE@/${POINTSIZE}/g" \
34 | -e "s/@SKIP@/${SKIP}/g" \
35 | <../misc/${graph}.R | R --no-save)
36 | if [ $? -ne 0 ] ; then
37 | echo "ERROR"
38 | echo "$out" >&2
39 | exit 3
40 | fi
41 | echo "OK"
42 | done
43 |
44 | for fname in ./data/blk_*.csv ; do
45 | if [ ! -f "${fname}" ] ; then
46 | continue
47 | fi
48 | devname=$(basename ${fname} .csv)
49 |
50 | echo -n "Generating ${resdir}/${devname}_iops.svg ... "
51 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" \
52 | -e "s/@HEIGHT@/${HEIGHT}/g" \
53 | -e "s/@POINTSIZE@/${POINTSIZE}/g" \
54 | -e "s/@SKIP@/${SKIP}/g" \
55 | -e "s/@DEVICE@/${devname}/g" <../misc/blk_device_iops.R | R --no-save)
56 | if [ $? -ne 0 ] ; then
57 | echo "ERROR"
58 | echo "$out" >&2
59 | exit 3
60 | fi
61 | echo "OK"
62 |
63 | echo -n "Generating ${resdir}/${devname}_kbps.svn ... "
64 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" \
65 | -e "s/@HEIGHT@/${HEIGHT}/g" \
66 | -e "s/@POINTSIZE@/${POINTSIZE}/g" \
67 | -e "s/@SKIP@/${SKIP}/g" \
68 | -e "s/@DEVICE@/${devname}/g" <../misc/blk_device_kbps.R | R --no-save)
69 | if [ $? -ne 0 ] ; then
70 | echo "ERROR"
71 | echo "$out" >&2
72 | exit 3
73 | fi
74 | echo "OK"
75 | done
76 |
77 | for fname in ./data/net_*.csv ; do
78 | if [ ! -f "${fname}" ] ; then
79 | continue
80 | fi
81 | devname=$(basename ${fname} .csv)
82 |
83 | echo -n "Generating ${resdir}/${devname}_iops.svn ... "
84 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" \
85 | -e "s/@HEIGHT@/${HEIGHT}/g" \
86 | -e "s/@POINTSIZE@/${POINTSIZE}/g" \
87 | -e "s/@SKIP@/${SKIP}/g" \
88 | -e "s/@DEVICE@/${devname}/g" <../misc/net_device_iops.R | R --no-save)
89 | if [ $? -ne 0 ] ; then
90 | echo "ERROR"
91 | echo "$out" >&2
92 | exit 3
93 | fi
94 | echo "OK"
95 |
96 | echo -n "Generating ${resdir}/${devname}_kbps.svn ... "
97 | out=$(sed -e "s/@WIDTH@/${WIDTH}/g" \
98 | -e "s/@HEIGHT@/${HEIGHT}/g" \
99 | -e "s/@POINTSIZE@/${POINTSIZE}/g" \
100 | -e "s/@SKIP@/${SKIP}/g" \
101 | -e "s/@DEVICE@/${devname}/g" <../misc/net_device_kbps.R | R --no-save)
102 | if [ $? -ne 0 ] ; then
103 | echo "ERROR"
104 | echo "$out" >&2
105 | exit 3
106 | fi
107 | echo "OK"
108 | done
109 |
110 | cd ..
111 |
112 |
--------------------------------------------------------------------------------
/run/generateReport.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -lt 1 ] ; then
4 | echo "usage: $(basename $0) RESULT_DIR [SKIP_MINUTES]" >&2
5 | exit 2
6 | fi
7 |
8 | TABLE_WIDTH="1100px"
9 | if [ $# -gt 1 ] ; then
10 | SKIP=$2
11 | else
12 | SKIP=0
13 | fi
14 |
15 | function getRunInfo()
16 | {
17 | exec 3< data/runInfo.csv
18 | read hdrs <&3
19 | hdrs=$(echo ${hdrs} | tr ',' ' ')
20 | IFS=, read $hdrs <&3
21 | exec <&3-
22 |
23 | eval echo "\$$1"
24 | }
25 |
26 | function getRunInfoColumns()
27 | {
28 | exec 3< data/runInfo.csv
29 | read hdrs <&3
30 | hdrs=$(echo ${hdrs} | tr ',' ' ')
31 | exec <&3-
32 |
33 | echo "${hdrs}"
34 | }
35 |
36 | function getProp()
37 | {
38 | grep "^${1}=" run.properties | sed -e "s/^${1}=//"
39 | }
40 |
41 | ./generateGraphs.sh "${1}" $SKIP
42 | cd "${1}"
43 | echo -n "Generating ${1}/report.html ... "
44 |
45 | # ----
46 | # Start the report.
47 | # ----
48 | cat >report.html <<_EOF_
49 |
50 |
51 |
52 | BenchmarkSQL Run #$(getRunInfo run) started $(getRunInfo sessionStart)
53 |
54 |
95 |
96 |
97 |
98 | BenchmarkSQL Run #$(getRunInfo run) started $(getRunInfo sessionStart)
99 |
100 |
101 |
102 | This TPC-C style benchmark run was performed by the "$(getRunInfo driver)"
103 | driver of BenchmarkSQL version $(getRunInfo driverVersion).
104 |
105 | _EOF_
106 |
107 | # ----
108 | # Show the run properties.
109 | # ----
110 | cat >>report.html <<_EOF_
111 |
112 | Run Properties
113 |
114 |
115 |
116 |
117 |
118 | _EOF_
119 | sed -e 's/^password=.*/password=\*\*\*\*\*\*/' >report.html
120 | cat >>report.html <<_EOF_
121 |
122 | |
123 |
124 |
125 |
126 | _EOF_
127 |
128 | # ----
129 | # Show the result summary.
130 | # ----
131 | cat >>report.html <<_EOF_
132 |
133 | Result Summary
134 |
135 | _EOF_
136 |
137 | if [ $(getRunInfo driver) == "simple" ] ; then
138 | cat >> report.html <<_EOF_
139 |
140 | Note that the "simple" driver is not a true TPC-C implementation.
141 | This driver only measures the database response time, not the
142 | response time of a System under Test as it would be experienced
143 | by an end-user in a 3-tier test implementation.
144 |
145 | _EOF_
146 | fi
147 |
148 | cat >> report.html <<_EOF_
149 |
150 |
151 |
152 | Transaction Type |
153 | Latency |
154 | Count |
155 | Percent |
156 | Rollback |
157 | Errors |
158 | Skipped Deliveries |
159 |
160 |
161 | 90th % |
162 | Avg |
163 | Max |
164 |
165 | _EOF_
166 |
167 | tr ',' ' ' "
174 | echo " ${name} | "
175 | echo " ${ninth} | "
176 | echo " ${avg} | "
177 | echo " ${max} | "
178 | echo " ${count} | "
179 | echo " ${percent} | "
180 | echo " ${rbk} | "
181 | echo " ${error} | "
182 | echo " ${dskipped} | "
183 | echo " "
184 | done >>report.html
185 |
186 | tpmC=$(grep "^tpmC," data/tx_summary.csv | sed -e 's/[^,]*,//' -e 's/,.*//')
187 | tpmCpct=$(grep "^tpmC," data/tx_summary.csv | sed -e 's/[^,]*,[^,]*,//' -e 's/,.*//')
188 | tpmTotal=$(grep "^tpmTotal," data/tx_summary.csv | sed -e 's/[^,]*,//' -e 's/,.*//')
189 | cat >>report.html <<_EOF_
190 |
191 |
192 |
193 |
194 |
195 |
196 | Overall tpmC: |
197 | ${tpmC} |
198 |
199 |
200 | Overall tpmTotal: |
201 | ${tpmTotal} |
202 |
203 |
204 |
205 |
206 | The TPC-C specification has an theoretical maximum of 12.86 NEW_ORDER
207 | transactions per minute per warehouse. In reality this value cannot
208 | be reached because it would require a perfect mix with 45% of NEW_ORDER
209 | transactions and a ZERO response time from the System under Test
210 | including the database.
211 |
212 |
213 | The above tpmC of ${tpmC} is ${tpmCpct} of that theoretical maximum for a
214 | database with $(getRunInfo runWarehouses) warehouses.
215 |
216 |
217 | _EOF_
218 |
219 | # ----
220 | # Show the graphs for tpmC/tpmTOTAL and latency.
221 | # ----
222 | cat >>report.html <<_EOF_
223 |
224 | Transactions per Minute and Transaction Latency
225 |
226 |
227 | tpmC is the number of NEW_ORDER Transactions, that where processed
228 | per minute. tpmTOTAL is the number of Transactions processed per
229 | minute for all transaction types, but without the background part
230 | of the DELIVERY transaction.
231 |
232 |
233 |
234 |
235 |
236 |
237 | _EOF_
238 |
239 | # ----
240 | # Add all the System Resource graphs. First the CPU and dirty buffers.
241 | # ----
242 | cat >>report.html <<_EOF_
243 |
244 | System Resource Usage
245 |
246 |
247 | CPU Utilization
248 |
249 |
250 |
251 |
252 | Overall Average CPU Utilization |
253 |
254 | _EOF_
255 |
256 | tr ',' ' ' "
260 | echo " ${category} | "
261 | echo " ${value} | "
262 | echo " "
263 | done >>report.html
264 |
265 | cat >>report.html <<_EOF_
266 |
267 |
268 |
269 |
270 |
271 | Note:In the graph below the percentages for User, System and IOWait CPU time are stacked
272 | on top of each other.
273 |
274 |
275 |
276 |
277 |
278 |
279 | Dirty Kernel Buffers
280 |
281 |
282 | We track the number of dirty kernel buffers, as measured by
283 | the "nr_dirty" line in /proc/vmstat, to be able to correlate
284 | IO problems with when the kernel's IO schedulers are flushing
285 | writes to disk. A write(2) system call does not immediately
286 | cause real IO on a storage device. The data written is just
287 | copied into a kernel buffer. Several tuning parameters control
288 | when the OS is actually transferring these dirty buffers to
289 | the IO controller(s) in order to eventually get written to
290 | real disks (or similar).
291 |
292 |
293 |
294 |
295 | _EOF_
296 |
297 | # ----
298 | # Add all the block device IOPS and KBPS
299 | # ---
300 | for devdata in data/blk_*.csv ; do
301 | if [ ! -f "$devdata" ] ; then
302 | break
303 | fi
304 |
305 | dev=$(basename ${devdata} .csv)
306 | cat >>report.html <<_EOF_
307 |
308 | Block Device ${dev}
309 |
310 |
311 |
312 |
313 |
314 |
315 | _EOF_
316 | done
317 |
318 | # ----
319 | # Add all the network device IOPS and KBPS
320 | # ---
321 | for devdata in data/net_*.csv ; do
322 | if [ ! -f "$devdata" ] ; then
323 | break
324 | fi
325 |
326 | dev=$(basename ${devdata} .csv)
327 | cat >>report.html <<_EOF_
328 |
329 | Network Device ${dev}
330 |
331 |
332 |
333 |
334 |
335 |
336 | _EOF_
337 | done
338 |
339 | # ----
340 | # Finish the document.
341 | # ----
342 | cat >>report.html <<_EOF_
343 |
344 |
345 |
346 | _EOF_
347 |
348 | # ----
349 | # Copy the report to the name of the result directory as .html
350 | # We do the dirname-trick to strip any trailing / that might
351 | # have resulted from tab-completion.
352 | # ----
353 | cp report.html ../$(dirname ${1}/.dummy).html
354 |
355 | echo "OK"
356 |
--------------------------------------------------------------------------------
/run/log4j.properties:
--------------------------------------------------------------------------------
1 | # log4j.rootLogger=TRACE, CONSOLE, E, T
2 | log4j.rootLogger=INFO, CONSOLE, E
3 |
4 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
5 | log4j.appender.CONSOLE.Threshold=INFO
6 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.CONSOLE.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n
8 |
9 | log4j.appender.E=org.apache.log4j.RollingFileAppender
10 | log4j.appender.E.Threshold=WARN
11 | log4j.appender.E.File=benchmarksql-error.log
12 | log4j.appender.E.MaxFileSize=100MB
13 | log4j.appender.E.MaxBackupIndex=1
14 | log4j.appender.E.layout=org.apache.log4j.PatternLayout
15 | log4j.appender.E.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n
16 |
17 | log4j.appender.T=org.apache.log4j.FileAppender
18 | log4j.appender.T.Threshold=TRACE
19 | log4j.appender.T.File=benchmarksql-trace.log
20 | log4j.appender.T.append=false
21 | log4j.appender.T.layout=org.apache.log4j.PatternLayout
22 | log4j.appender.T.layout.ConversionPattern= %d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n
23 |
24 |
--------------------------------------------------------------------------------
/run/misc/blk_device_iops.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show IOPS of a block device.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded IO data for the block devide
26 | # and aggregate it for the desired interval.
27 | # ----
28 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE)
29 | rawData <- rawData[rawData$elapsed >= skip, ]
30 | aggReads <- setNames(aggregate(rawData$rdiops,
31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
32 | c('elapsed', 'rdiops'))
33 | aggWrites <- setNames(aggregate(rawData$wriops,
34 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
35 | c('elapsed', 'wriops'))
36 | rawData
37 | aggReads
38 | aggWrites
39 |
40 | # ----
41 | # Determine the ymax by increasing in sqrt(2) steps until the
42 | # maximum of both IOPS fits. The multiply that with 1.2 to
43 | # give a little head room for the legend.
44 | # ----
45 | ymax_rd <- max(aggReads$rdiops)
46 | ymax_wr <- max(aggWrites$wriops)
47 | ymax <- 1
48 | sqrt2 <- sqrt(2.0)
49 | while (ymax < ymax_rd || ymax < ymax_wr) {
50 | ymax <- ymax * sqrt2
51 | }
52 | if (ymax < (ymax_rd * 1.2) || ymax < (ymax_wr * 1.2)) {
53 | ymax <- ymax * 1.2
54 | }
55 |
56 |
57 |
58 | # ----
59 | # Start the output image.
60 | # ----
61 | svg("@DEVICE@_iops.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
62 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
63 |
64 | # ----
65 | # Plot the RDIOPS
66 | # ----
67 | plot (
68 | aggReads$elapsed / 60000.0, aggReads$rdiops,
69 | type='l', col="blue3", lwd=2,
70 | axes=TRUE,
71 | xlab="Elapsed Minutes",
72 | ylab="IO Operations per Second",
73 | xlim=c(xmin, xmax),
74 | ylim=c(0, ymax)
75 | )
76 |
77 | # ----
78 | # Plot the WRIOPS
79 | # ----
80 | par (new=T)
81 | plot (
82 | aggWrites$elapsed / 60000.0, aggWrites$wriops,
83 | type='l', col="red3", lwd=2,
84 | axes=FALSE,
85 | xlab="",
86 | ylab="",
87 | xlim=c(xmin, xmax),
88 | ylim=c(0, ymax)
89 | )
90 |
91 | # ----
92 | # Add legend, title and other decorations.
93 | # ----
94 | legend ("topleft",
95 | c("Read Operations on @DEVICE@", "Write Operations on @DEVICE@"),
96 | fill=c("blue3", "red3"))
97 | title (main=c(
98 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
99 | "Block Device @DEVICE@ IOPS"
100 | ))
101 | grid()
102 | box()
103 |
--------------------------------------------------------------------------------
/run/misc/blk_device_kbps.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show KBPS of a block device.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded IO data for the block devide
26 | # and aggregate it for the desired interval.
27 | # ----
28 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE)
29 | rawData <- rawData[rawData$elapsed >= skip, ]
30 | aggReads <- setNames(aggregate(rawData$rdkbps,
31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
32 | c('elapsed', 'rdkbps'))
33 | aggWrites <- setNames(aggregate(rawData$wrkbps,
34 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
35 | c('elapsed', 'wrkbps'))
36 |
37 | # ----
38 | # Determine the ymax by increasing in sqrt(2) steps until the
39 | # maximum of both KBPS fits. The multiply that with 1.2 to
40 | # give a little head room for the legend.
41 | # ----
42 | ymax_rd <- max(aggReads$rdkbps)
43 | ymax_wr <- max(aggWrites$wrkbps)
44 | ymax <- 1
45 | sqrt2 <- sqrt(2.0)
46 | while (ymax < ymax_rd || ymax < ymax_wr) {
47 | ymax <- ymax * sqrt2
48 | }
49 | if (ymax < (ymax_rd * 1.2) || ymax < (ymax_wr * 1.2)) {
50 | ymax <- ymax * 1.2
51 | }
52 |
53 |
54 |
55 | # ----
56 | # Start the output image.
57 | # ----
58 | svg("@DEVICE@_kbps.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
59 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
60 |
61 | # ----
62 | # Plot the RDKBPS
63 | # ----
64 | plot (
65 | aggReads$elapsed / 60000.0, aggReads$rdkbps,
66 | type='l', col="blue3", lwd=2,
67 | axes=TRUE,
68 | xlab="Elapsed Minutes",
69 | ylab="Kilobytes per Second",
70 | xlim=c(xmin, xmax),
71 | ylim=c(0, ymax)
72 | )
73 |
74 | # ----
75 | # Plot the WRKBPS
76 | # ----
77 | par (new=T)
78 | plot (
79 | aggWrites$elapsed / 60000.0, aggWrites$wrkbps,
80 | type='l', col="red3", lwd=2,
81 | axes=FALSE,
82 | xlab="",
83 | ylab="",
84 | xlim=c(xmin, xmax),
85 | ylim=c(0, ymax)
86 | )
87 |
88 | # ----
89 | # Add legend, title and other decorations.
90 | # ----
91 | legend ("topleft",
92 | c("Read Kb/s on @DEVICE@", "Write Kb/s on @DEVICE@"),
93 | fill=c("blue3", "red3"))
94 | title (main=c(
95 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
96 | "Block Device @DEVICE@ Kb/s"
97 | ))
98 | grid()
99 | box()
100 |
--------------------------------------------------------------------------------
/run/misc/cpu_utilization.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show CPU utilization
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded CPU data and aggregate it for the desired interval.
26 | # ----
27 | rawData <- read.csv("data/sys_info.csv", head=TRUE)
28 | rawData <- rawData[rawData$elapsed >= skip, ]
29 | aggUser <- setNames(aggregate(rawData$cpu_user,
30 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
31 | c('elapsed', 'cpu_user'))
32 | aggSystem <- setNames(aggregate(rawData$cpu_system,
33 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
34 | c('elapsed', 'cpu_system'))
35 | aggWait <- setNames(aggregate(rawData$cpu_iowait,
36 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
37 | c('elapsed', 'cpu_wait'))
38 |
39 | # ----
40 | # ymax is 100%
41 | # ----
42 | ymax = 100
43 |
44 |
45 | # ----
46 | # Start the output image.
47 | # ----
48 | svg("cpu_utilization.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
49 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
50 |
51 | # ----
52 | # Plot USER+SYSTEM+WAIT
53 | # ----
54 | plot (
55 | aggUser$elapsed / 60000.0, (aggUser$cpu_user + aggSystem$cpu_system + aggWait$cpu_wait) * 100.0,
56 | type='l', col="red3", lwd=2,
57 | axes=TRUE,
58 | xlab="Elapsed Minutes",
59 | ylab="CPU Utilization in Percent",
60 | xlim=c(xmin, xmax),
61 | ylim=c(0, ymax)
62 | )
63 |
64 | # ----
65 | # Plot the USER+SYSTEM
66 | # ----
67 | par (new=T)
68 | plot (
69 | aggUser$elapsed / 60000.0, (aggUser$cpu_user + aggSystem$cpu_system) * 100.0,
70 | type='l', col="cyan3", lwd=2,
71 | axes=FALSE,
72 | xlab="",
73 | ylab="",
74 | xlim=c(xmin, xmax),
75 | ylim=c(0, ymax)
76 | )
77 |
78 | # ----
79 | # Plot the USER
80 | # ----
81 | par (new=T)
82 | plot (
83 | aggUser$elapsed / 60000.0, aggUser$cpu_user * 100.0,
84 | type='l', col="blue3", lwd=2,
85 | axes=FALSE,
86 | xlab="",
87 | ylab="",
88 | xlim=c(xmin, xmax),
89 | ylim=c(0, ymax)
90 | )
91 |
92 | # ----
93 | # Add legend, title and other decorations.
94 | # ----
95 | legend ("topleft",
96 | c("% User", "% System", "% IOWait"),
97 | fill=c("blue3", "cyan3", "red3"))
98 | title (main=c(
99 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
100 | "CPU Utilization"
101 | ))
102 | grid()
103 | box()
104 |
105 | # ----
106 | # Generate the CPU utilization summary and write it to data/cpu_summary.csv
107 | # ----
108 | cpu_category <- c(
109 | 'cpu_user',
110 | 'cpu_system',
111 | 'cpu_iowait',
112 | 'cpu_idle',
113 | 'cpu_nice',
114 | 'cpu_irq',
115 | 'cpu_softirq',
116 | 'cpu_steal',
117 | 'cpu_guest',
118 | 'cpu_guest_nice'
119 | )
120 | cpu_usage <- c(
121 | sprintf("%.3f%%", mean(rawData$cpu_user) * 100.0),
122 | sprintf("%.3f%%", mean(rawData$cpu_system) * 100.0),
123 | sprintf("%.3f%%", mean(rawData$cpu_iowait) * 100.0),
124 | sprintf("%.3f%%", mean(rawData$cpu_idle) * 100.0),
125 | sprintf("%.3f%%", mean(rawData$cpu_nice) * 100.0),
126 | sprintf("%.3f%%", mean(rawData$cpu_irq) * 100.0),
127 | sprintf("%.3f%%", mean(rawData$cpu_softirq) * 100.0),
128 | sprintf("%.3f%%", mean(rawData$cpu_steal) * 100.0),
129 | sprintf("%.3f%%", mean(rawData$cpu_guest) * 100.0),
130 | sprintf("%.3f%%", mean(rawData$cpu_guest_nice) * 100.0)
131 | )
132 | cpu_info <- data.frame(
133 | cpu_category,
134 | cpu_usage
135 | )
136 | write.csv(cpu_info, file = "data/cpu_summary.csv", quote = FALSE, na = "N/A",
137 | row.names = FALSE)
138 |
--------------------------------------------------------------------------------
/run/misc/dirty_buffers.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show number of dirty kernel buffers
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded CPU data and aggregate it for the desired interval.
26 | # ----
27 | rawData <- read.csv("data/sys_info.csv", head=TRUE)
28 | rawData <- rawData[rawData$elapsed >= skip, ]
29 | aggDirty <- setNames(aggregate(rawData$vm_nr_dirty,
30 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
31 | c('elapsed', 'vm_nr_dirty'))
32 |
33 | # ----
34 | # Determine ymax
35 | # ----
36 | ymax_dirty = max(aggDirty$vm_nr_dirty)
37 | sqrt2 <- sqrt(2.0)
38 | ymax <- 1
39 | while (ymax < ymax_dirty) {
40 | ymax <- ymax * sqrt2
41 | }
42 | if (ymax < (ymax_dirty * 1.2)) {
43 | ymax <- ymax * 1.2
44 | }
45 |
46 |
47 | # ----
48 | # Start the output image.
49 | # ----
50 | svg("dirty_buffers.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
51 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
52 |
53 | # ----
54 | # Plot dirty buffers
55 | # ----
56 | plot (
57 | aggDirty$elapsed / 60000.0, aggDirty$vm_nr_dirty,
58 | type='l', col="red3", lwd=2,
59 | axes=TRUE,
60 | xlab="Elapsed Minutes",
61 | ylab="Number dirty kernel buffers",
62 | xlim=c(xmin, xmax),
63 | ylim=c(0, ymax)
64 | )
65 |
66 | # ----
67 | # Add legend, title and other decorations.
68 | # ----
69 | legend ("topleft",
70 | c("vmstat nr_dirty"),
71 | fill=c("red3"))
72 | title (main=c(
73 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
74 | "Dirty Kernel Buffers"
75 | ))
76 | grid()
77 | box()
78 |
--------------------------------------------------------------------------------
/run/misc/latency.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show latency of all transaction types.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the result.csv and then filter the raw data
26 | # by transaction type
27 | # ----
28 | rawData <- read.csv("data/result.csv", head=TRUE)
29 | rawData <- rawData[rawData$elapsed >= skip, ]
30 | noBGData <- rawData[rawData$ttype != 'DELIVERY_BG', ]
31 | newOrder <- rawData[rawData$ttype == 'NEW_ORDER', ]
32 | payment <- rawData[rawData$ttype == 'PAYMENT', ]
33 | orderStatus <- rawData[rawData$ttype == 'ORDER_STATUS', ]
34 | stockLevel <- rawData[rawData$ttype == 'STOCK_LEVEL', ]
35 | delivery <- rawData[rawData$ttype == 'DELIVERY', ]
36 | deliveryBG <- rawData[rawData$ttype == 'DELIVERY_BG', ]
37 |
38 | # ----
39 | # Aggregate the latency grouped by interval.
40 | # ----
41 | aggNewOrder <- setNames(aggregate(newOrder$latency, list(elapsed=trunc(newOrder$elapsed / idiv) * idiv), mean),
42 | c('elapsed', 'latency'));
43 | aggPayment <- setNames(aggregate(payment$latency, list(elapsed=trunc(payment$elapsed / idiv) * idiv), mean),
44 | c('elapsed', 'latency'));
45 | aggOrderStatus <- setNames(aggregate(orderStatus$latency, list(elapsed=trunc(orderStatus$elapsed / idiv) * idiv), mean),
46 | c('elapsed', 'latency'));
47 | aggStockLevel <- setNames(aggregate(stockLevel$latency, list(elapsed=trunc(stockLevel$elapsed / idiv) * idiv), mean),
48 | c('elapsed', 'latency'));
49 | aggDelivery <- setNames(aggregate(delivery$latency, list(elapsed=trunc(delivery$elapsed / idiv) * idiv), mean),
50 | c('elapsed', 'latency'));
51 |
52 | # ----
53 | # Determine the ymax by increasing in sqrt(2) steps until 98%
54 | # of ALL latencies fit into the graph. Then multiply with 1.2
55 | # to give some headroom for the legend.
56 | # ----
57 | ymax_total <- quantile(noBGData$latency, probs = 0.98)
58 |
59 | ymax <- 1
60 | sqrt2 <- sqrt(2.0)
61 | while (ymax < ymax_total) {
62 | ymax <- ymax * sqrt2
63 | }
64 | if (ymax < (ymax_total * 1.2)) {
65 | ymax <- ymax * 1.2
66 | }
67 |
68 |
69 |
70 | # ----
71 | # Start the output image.
72 | # ----
73 | svg("latency.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
74 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
75 |
76 | # ----
77 | # Plot the Delivery latency graph.
78 | # ----
79 | plot (
80 | aggDelivery$elapsed / 60000.0, aggDelivery$latency,
81 | type='l', col="blue3", lwd=2,
82 | axes=TRUE,
83 | xlab="Elapsed Minutes",
84 | ylab="Latency in Milliseconds",
85 | xlim=c(xmin, xmax),
86 | ylim=c(0, ymax)
87 | )
88 |
89 | # ----
90 | # Plot the StockLevel latency graph.
91 | # ----
92 | par(new=T)
93 | plot (
94 | aggStockLevel$elapsed / 60000.0, aggStockLevel$latency,
95 | type='l', col="gray70", lwd=2,
96 | axes=FALSE,
97 | xlab="",
98 | ylab="",
99 | xlim=c(xmin, xmax),
100 | ylim=c(0, ymax)
101 | )
102 |
103 | # ----
104 | # Plot the OrderStatus latency graph.
105 | # ----
106 | par(new=T)
107 | plot (
108 | aggOrderStatus$elapsed / 60000.0, aggOrderStatus$latency,
109 | type='l', col="green3", lwd=2,
110 | axes=FALSE,
111 | xlab="",
112 | ylab="",
113 | xlim=c(xmin, xmax),
114 | ylim=c(0, ymax)
115 | )
116 |
117 | # ----
118 | # Plot the Payment latency graph.
119 | # ----
120 | par(new=T)
121 | plot (
122 | aggPayment$elapsed / 60000.0, aggPayment$latency,
123 | type='l', col="magenta3", lwd=2,
124 | axes=FALSE,
125 | xlab="",
126 | ylab="",
127 | xlim=c(xmin, xmax),
128 | ylim=c(0, ymax)
129 | )
130 |
131 | # ----
132 | # Plot the NewOrder latency graph.
133 | # ----
134 | par(new=T)
135 | plot (
136 | aggNewOrder$elapsed / 60000.0, aggNewOrder$latency,
137 | type='l', col="red3", lwd=2,
138 | axes=FALSE,
139 | xlab="",
140 | ylab="",
141 | xlim=c(xmin, xmax),
142 | ylim=c(0, ymax)
143 | )
144 |
145 | # ----
146 | # Add legend, title and other decorations.
147 | # ----
148 | legend ("topleft",
149 | c("NEW_ORDER", "PAYMENT", "ORDER_STATUS", "STOCK_LEVEL", "DELIVERY"),
150 | fill=c("red3", "magenta3", "green3", "gray70", "blue3"))
151 | title (main=c(
152 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
153 | "Transaction Latency"
154 | ))
155 | grid()
156 | box()
157 |
158 | # ----
159 | # Generate the transaction summary and write it to
160 | # data/tx_summary.csv
161 | # ----
162 | tx_total <- NROW(noBGData)
163 |
164 | tx_name <- c(
165 | 'NEW_ORDER',
166 | 'PAYMENT',
167 | 'ORDER_STATUS',
168 | 'STOCK_LEVEL',
169 | 'DELIVERY',
170 | 'DELIVERY_BG',
171 | 'tpmC',
172 | 'tpmTotal')
173 | tx_count <- c(
174 | NROW(newOrder),
175 | NROW(payment),
176 | NROW(orderStatus),
177 | NROW(stockLevel),
178 | NROW(delivery),
179 | NROW(deliveryBG),
180 | sprintf("%.2f", NROW(newOrder) / runInfo$runMins),
181 | sprintf("%.2f", NROW(noBGData) / runInfo$runMins))
182 | tx_percent <- c(
183 | sprintf("%.3f%%", NROW(newOrder) / tx_total * 100.0),
184 | sprintf("%.3f%%", NROW(payment) / tx_total * 100.0),
185 | sprintf("%.3f%%", NROW(orderStatus) / tx_total * 100.0),
186 | sprintf("%.3f%%", NROW(stockLevel) / tx_total * 100.0),
187 | sprintf("%.3f%%", NROW(delivery) / tx_total * 100.0),
188 | NA,
189 | sprintf("%.3f%%", NROW(newOrder) / runInfo$runMins /
190 | runInfo$runWarehouses / 0.1286),
191 | NA)
192 | tx_90th <- c(
193 | sprintf("%.3fs", quantile(newOrder$latency, probs=0.90) / 1000.0),
194 | sprintf("%.3fs", quantile(payment$latency, probs=0.90) / 1000.0),
195 | sprintf("%.3fs", quantile(orderStatus$latency, probs=0.90) / 1000.0),
196 | sprintf("%.3fs", quantile(stockLevel$latency, probs=0.90) / 1000.0),
197 | sprintf("%.3fs", quantile(delivery$latency, probs=0.90) / 1000.0),
198 | sprintf("%.3fs", quantile(deliveryBG$latency, probs=0.90) / 1000.0),
199 | NA, NA)
200 | tx_avg <- c(
201 | sprintf("%.3fs", mean(newOrder$latency) / 1000.0),
202 | sprintf("%.3fs", mean(payment$latency) / 1000.0),
203 | sprintf("%.3fs", mean(orderStatus$latency) / 1000.0),
204 | sprintf("%.3fs", mean(stockLevel$latency) / 1000.0),
205 | sprintf("%.3fs", mean(delivery$latency) / 1000.0),
206 | sprintf("%.3fs", mean(deliveryBG$latency) / 1000.0),
207 | NA, NA)
208 | tx_max <- c(
209 | sprintf("%.3fs", max(newOrder$latency) / 1000.0),
210 | sprintf("%.3fs", max(payment$latency) / 1000.0),
211 | sprintf("%.3fs", max(orderStatus$latency) / 1000.0),
212 | sprintf("%.3fs", max(stockLevel$latency) / 1000.0),
213 | sprintf("%.3fs", max(delivery$latency) / 1000.0),
214 | sprintf("%.3fs", max(deliveryBG$latency) / 1000.0),
215 | NA, NA)
216 | tx_limit <- c("5.0", "5.0", "5.0", "20.0", "5.0", "80.0", NA, NA)
217 | tx_rbk <- c(
218 | sprintf("%.3f%%", sum(newOrder$rbk) / NROW(newOrder) * 100.0),
219 | NA, NA, NA, NA, NA, NA, NA)
220 | tx_error <- c(
221 | sum(newOrder$error),
222 | sum(payment$error),
223 | sum(orderStatus$error),
224 | sum(stockLevel$error),
225 | sum(delivery$error),
226 | sum(deliveryBG$error),
227 | NA, NA)
228 | tx_dskipped <- c(
229 | NA, NA, NA, NA, NA,
230 | sum(deliveryBG$dskipped),
231 | NA, NA)
232 | tx_info <- data.frame(
233 | tx_name,
234 | tx_count,
235 | tx_percent,
236 | tx_90th,
237 | tx_avg,
238 | tx_max,
239 | tx_limit,
240 | tx_rbk,
241 | tx_error,
242 | tx_dskipped)
243 |
244 | write.csv(tx_info, file = "data/tx_summary.csv", quote = FALSE, na = "N/A",
245 | row.names = FALSE)
246 |
247 |
--------------------------------------------------------------------------------
/run/misc/net_device_iops.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show Packets of a network device.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded IO data for the network devide
26 | # and aggregate it for the desired interval.
27 | # ----
28 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE)
29 | rawData <- rawData[rawData$elapsed >= skip, ]
30 | aggRecv <- setNames(aggregate(rawData$rxpktsps,
31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
32 | c('elapsed', 'rxpktsps'))
33 | aggSend <- setNames(aggregate(rawData$txpktsps,
34 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
35 | c('elapsed', 'txpktsps'))
36 |
37 | # ----
38 | # Determine the ymax by increasing in sqrt(2) steps until the
39 | # maximum of both IOPS fits. The multiply that with 1.2 to
40 | # give a little head room for the legend.
41 | # ----
42 | ymax_rx <- max(aggRecv$rxpktsps)
43 | ymax_tx <- max(aggSend$txpktsps)
44 | ymax <- 1
45 | sqrt2 <- sqrt(2.0)
46 | while (ymax < ymax_rx || ymax < ymax_tx) {
47 | ymax <- ymax * sqrt2
48 | }
49 | if (ymax < (ymax_rx * 1.2) || ymax < (ymax_tx * 1.2)) {
50 | ymax <- ymax * 1.2
51 | }
52 |
53 |
54 |
55 | # ----
56 | # Start the output image.
57 | # ----
58 | svg("@DEVICE@_iops.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
59 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
60 |
61 | # ----
62 | # Plot the RXPKTSPS
63 | # ----
64 | plot (
65 | aggRecv$elapsed / 60000.0, aggRecv$rxpktsps,
66 | type='l', col="blue3", lwd=2,
67 | axes=TRUE,
68 | xlab="Elapsed Minutes",
69 | ylab="Packets per Second",
70 | xlim=c(xmin, xmax),
71 | ylim=c(0, ymax)
72 | )
73 |
74 | # ----
75 | # Plot the TXPKTSPS
76 | # ----
77 | par (new=T)
78 | plot (
79 | aggSend$elapsed / 60000.0, aggSend$txpktsps,
80 | type='l', col="red3", lwd=2,
81 | axes=FALSE,
82 | xlab="",
83 | ylab="",
84 | xlim=c(xmin, xmax),
85 | ylim=c(0, ymax)
86 | )
87 |
88 | # ----
89 | # Add legend, title and other decorations.
90 | # ----
91 | legend ("topleft",
92 | c("RX Packets/s on @DEVICE@", "TX Packets/s on @DEVICE@"),
93 | fill=c("blue3", "red3"))
94 | title (main=c(
95 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
96 | "Network Device @DEVICE@ Packets per Second"
97 | ))
98 | grid()
99 | box()
100 |
--------------------------------------------------------------------------------
/run/misc/net_device_kbps.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show Kb/s of a network device.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the recorded IO data for the network devide
26 | # and aggregate it for the desired interval.
27 | # ----
28 | rawData <- read.csv("data/@DEVICE@.csv", head=TRUE)
29 | rawData <- rawData[rawData$elapsed >= skip, ]
30 | aggRecv <- setNames(aggregate(rawData$rxkbps,
31 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
32 | c('elapsed', 'rxkbps'))
33 | aggSend <- setNames(aggregate(rawData$txkbps,
34 | list(elapsed=trunc(rawData$elapsed / idiv) * idiv), mean),
35 | c('elapsed', 'txkbps'))
36 |
37 | # ----
38 | # Determine the ymax by increasing in sqrt(2) steps until the
39 | # maximum of both Kb/s fits. The multiply that with 1.2 to
40 | # give a little head room for the legend.
41 | # ----
42 | ymax_rx <- max(aggRecv$rxkbps)
43 | ymax_tx <- max(aggSend$txkbps)
44 | ymax <- 1
45 | sqrt2 <- sqrt(2.0)
46 | while (ymax < ymax_rx || ymax < ymax_tx) {
47 | ymax <- ymax * sqrt2
48 | }
49 | if (ymax < (ymax_rx * 1.2) || ymax < (ymax_tx * 1.2)) {
50 | ymax <- ymax * 1.2
51 | }
52 |
53 |
54 |
55 | # ----
56 | # Start the output image.
57 | # ----
58 | svg("@DEVICE@_kbps.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
59 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
60 |
61 | # ----
62 | # Plot the RXKBPS
63 | # ----
64 | plot (
65 | aggRecv$elapsed / 60000.0, aggRecv$rxkbps,
66 | type='l', col="blue3", lwd=2,
67 | axes=TRUE,
68 | xlab="Elapsed Minutes",
69 | ylab="Kilobytes per Second",
70 | xlim=c(xmin, xmax),
71 | ylim=c(0, ymax)
72 | )
73 |
74 | # ----
75 | # Plot the TXKBPS
76 | # ----
77 | par (new=T)
78 | plot (
79 | aggSend$elapsed / 60000.0, aggSend$txkbps,
80 | type='l', col="red3", lwd=2,
81 | axes=FALSE,
82 | xlab="",
83 | ylab="",
84 | xlim=c(xmin, xmax),
85 | ylim=c(0, ymax)
86 | )
87 |
88 | # ----
89 | # Add legend, title and other decorations.
90 | # ----
91 | legend ("topleft",
92 | c("RX Kb/s on @DEVICE@", "TX Kb/s on @DEVICE@"),
93 | fill=c("blue3", "red3"))
94 | title (main=c(
95 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
96 | "Network Device @DEVICE@ Kb per Second"
97 | ))
98 | grid()
99 | box()
100 |
--------------------------------------------------------------------------------
/run/misc/os_collector_linux.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # ----------------------------------------------------------------------
3 | # os_collector_linux.py -
4 | #
5 | # Script used to collect OS level resource utilization data like
6 | # CPU usage and disk IO.
7 | #
8 | # This code is used in the jTPCCOSCollect class. It is launched as
9 | # a separate process, possibly via ssh(1) on the remote database
10 | # server. The ability of Python to receive a script to execute on
11 | # stdin allows us to execute this script via ssh(1) on the database
12 | # server without installing any programs/scripts there.
13 | #
14 | # The command line arguments for this script are the runID, the
15 | # interval in seconds at which to collect information and a variable
16 | # number of devices in the form "blk_" "net_",
17 | # for example "blk_sda" for the first SCSI disk or "net_eth0".
18 | #
19 | # The output on stdout is one line for CPU/VM info, followed by a
20 | # line for each of the specified devices in CSV format. The first
21 | # set of lines are the CSV headers. The output is prefixed with the
22 | # runID, elapsed_ms and for the devices the blk_ or net_ name that
23 | # was specified on the command line. This format makes it easy to
24 | # load the data into a result database where it can be analyzed
25 | # together with the BenchmarkSQL per transaction results and compared
26 | # to other benchmark runs.
27 | #
28 | # It is the caller's responsibility to split the output lines into
29 | # separate result CSV files.
30 | # ----------------------------------------------------------------------
31 |
32 | import errno
33 | import math
34 | import os
35 | import sys
36 | import time
37 |
38 | # ----
39 | # main
40 | # ----
41 | def main(argv):
42 | global deviceFDs
43 | global lastDeviceData
44 |
45 | # ----
46 | # Get the runID and collection interval from the command line
47 | # ----
48 | runID = (int)(argv[0])
49 | interval = (float)(argv[1])
50 |
51 | # ----
52 | # Our start time is now. Since most of the information is deltas
53 | # we can only produce the first data after the first interval.
54 | # ----
55 | startTime = time.time();
56 | nextDue = startTime + interval
57 |
58 | # ----
59 | # Initialize CPU and vmstat collection and output the CSV header.
60 | # ----
61 | sysInfo = ['run', 'elapsed', ]
62 | sysInfo += initSystemUsage()
63 | print ",".join([str(x) for x in sysInfo])
64 |
65 | # ----
66 | # Get all the devices from the command line.
67 | # ----
68 | devices = []
69 | deviceFDs = {}
70 | lastDeviceData = {}
71 | for dev in argv[2:]:
72 | if dev.startswith('blk_'):
73 | devices.append(dev)
74 | elif dev.startswith('net_'):
75 | devices.append(dev)
76 | else:
77 | raise Exception("unknown device type '" + dev + "'")
78 |
79 | # ----
80 | # Initialize usage collection per device depending on the type.
81 | # Output all the headers in the order, the devices are given.
82 | # ----
83 | for dev in devices:
84 | if dev.startswith('blk_'):
85 | devInfo = ['run', 'elapsed', 'device', ]
86 | devInfo += initBlockDevice(dev)
87 | print ",".join([str(x) for x in devInfo])
88 | elif dev.startswith('net_'):
89 | devInfo = ['run', 'elapsed', 'device', ]
90 | devInfo += initNetDevice(dev)
91 | print ",".join([str(x) for x in devInfo])
92 |
93 | # ----
94 | # Flush all header lines.
95 | # ----
96 | sys.stdout.flush()
97 |
98 | try:
99 | while True:
100 | # ----
101 | # Wait until our next collection interval and calculate the
102 | # elapsed time in milliseconds.
103 | # ----
104 | now = time.time()
105 | if nextDue > now:
106 | time.sleep(nextDue - now)
107 | elapsed = (int)((nextDue - startTime) * 1000.0)
108 |
109 | # ----
110 | # Collect CPU and vmstat information.
111 | # ----
112 | sysInfo = [runID, elapsed, ]
113 | sysInfo += getSystemUsage()
114 | print ",".join([str(x) for x in sysInfo])
115 |
116 | # ----
117 | # Collect all device utilization data.
118 | # ----
119 | for dev in devices:
120 | if dev.startswith('blk_'):
121 | devInfo = [runID, elapsed, dev, ]
122 | devInfo += getBlockUsage(dev, interval)
123 | print ",".join([str(x) for x in devInfo])
124 | elif dev.startswith('net_'):
125 | devInfo = [runID, elapsed, dev, ]
126 | devInfo += getNetUsage(dev, interval)
127 | print ",".join([str(x) for x in devInfo])
128 |
129 | # ----
130 | # Bump the time when we are next due.
131 | # ----
132 | nextDue += interval
133 |
134 | sys.stdout.flush()
135 |
136 | # ----
137 | # Running on the command line for test purposes?
138 | # ----
139 | except KeyboardInterrupt:
140 | print ""
141 | return 0
142 |
143 | # ----
144 | # The OSCollector class will just close our stdout on the other
145 | # side, so this is expected.
146 | # ----
147 | except IOError as e:
148 | if e.errno == errno.EPIPE:
149 | return 0
150 | else:
151 | raise e
152 |
153 | def initSystemUsage():
154 | global procStatFD
155 | global procVMStatFD
156 | global lastStatData
157 | global lastVMStatData
158 |
159 | procStatFD = open("/proc/stat", "r", buffering = 0)
160 | for line in procStatFD:
161 | line = line.split()
162 | if line[0] == "cpu":
163 | lastStatData = [int(x) for x in line[1:]]
164 | break
165 | if len(lastStatData) != 10:
166 | raise Exception("cpu line in /proc/stat too short");
167 |
168 | procVMStatFD = open("/proc/vmstat", "r", buffering = 0)
169 | lastVMStatData = {}
170 | for line in procVMStatFD:
171 | line = line.split()
172 | if line[0] in ['nr_dirty', ]:
173 | lastVMStatData['vm_' + line[0]] = int(line[1])
174 | if len(lastVMStatData.keys()) != 1:
175 | raise Exception("not all elements found in /proc/vmstat")
176 |
177 | return [
178 | 'cpu_user', 'cpu_nice', 'cpu_system',
179 | 'cpu_idle', 'cpu_iowait', 'cpu_irq',
180 | 'cpu_softirq', 'cpu_steal',
181 | 'cpu_guest', 'cpu_guest_nice',
182 | 'vm_nr_dirty',
183 | ]
184 |
185 |
186 | def getSystemUsage():
187 | global procStatFD
188 | global procVMStatFD
189 | global lastStatData
190 | global lastVMStatData
191 |
192 | procStatFD.seek(0, 0);
193 | for line in procStatFD:
194 | line = line.split()
195 | if line[0] != "cpu":
196 | continue
197 | statData = [int(x) for x in line[1:]]
198 | deltaTotal = (float)(sum(statData) - sum(lastStatData))
199 | if deltaTotal == 0:
200 | result = [0.0 for x in statData]
201 | else:
202 | result = []
203 | for old, new in zip(lastStatData, statData):
204 | result.append((float)(new - old) / deltaTotal)
205 | lastStatData = statData
206 | break
207 |
208 | procVMStatFD.seek(0, 0)
209 | newVMStatData = {}
210 | for line in procVMStatFD:
211 | line = line.split()
212 | if line[0] in ['nr_dirty', ]:
213 | newVMStatData['vm_' + line[0]] = int(line[1])
214 |
215 | for key in ['vm_nr_dirty', ]:
216 | result.append(newVMStatData[key])
217 |
218 | return result
219 |
220 |
221 | def initBlockDevice(dev):
222 | global deviceFDs
223 | global lastDeviceData
224 |
225 | devPath = os.path.join("/sys/block", dev[4:], "stat")
226 | deviceFDs[dev] = open(devPath, "r", buffering = 0)
227 | line = deviceFDs[dev].readline().split()
228 |
229 | newData = []
230 | for idx, mult in [
231 | (0, 1.0), (1, 1.0), (2, 0.5),
232 | (4, 1.0), (5, 1.0), (6, 0.5),
233 | ]:
234 | newData.append((int)(line[idx]))
235 | lastDeviceData[dev] = newData
236 |
237 | return ['rdiops', 'rdmerges', 'rdkbps', 'wriops', 'wrmerges', 'wrkbps', ]
238 |
239 |
240 | def getBlockUsage(dev, interval):
241 | global deviceFDs
242 |
243 | deviceFDs[dev].seek(0, 0)
244 | line = deviceFDs[dev].readline().split()
245 |
246 | oldData = lastDeviceData[dev]
247 | newData = []
248 | result = []
249 | ridx = 0
250 | for idx, mult in [
251 | (0, 1.0), (1, 1.0), (2, 0.5),
252 | (4, 1.0), (5, 1.0), (6, 0.5),
253 | ]:
254 | newData.append((int)(line[idx]))
255 | result.append((float)(newData[ridx] - oldData[ridx]) * mult / interval)
256 | ridx += 1
257 | lastDeviceData[dev] = newData
258 | return result
259 |
260 | def initNetDevice(dev):
261 | global deviceFDs
262 | global lastDeviceData
263 |
264 | devPath = os.path.join("/sys/class/net", dev[4:], "statistics")
265 | deviceData = []
266 | for fname in ['rx_packets', 'rx_bytes', 'tx_packets', 'tx_bytes', ]:
267 | key = dev + "." + fname
268 | deviceFDs[key] = open(os.path.join(devPath, fname),
269 | "r", buffering = 0)
270 | deviceData.append((int)(deviceFDs[key].read()))
271 |
272 | lastDeviceData[dev] = deviceData
273 |
274 | return ['rxpktsps', 'rxkbps', 'txpktsps', 'txkbps', ]
275 |
276 |
277 | def getNetUsage(dev, interval):
278 | global deviceFDs
279 | global lastDeviceData
280 |
281 | oldData = lastDeviceData[dev]
282 | newData = []
283 | for fname in ['rx_packets', 'rx_bytes', 'tx_packets', 'tx_bytes', ]:
284 | key = dev + "." + fname
285 | deviceFDs[key].seek(0, 0)
286 | newData.append((int)(deviceFDs[key].read()))
287 |
288 | result = [
289 | (float)(newData[0] - oldData[0]) / interval,
290 | (float)(newData[1] - oldData[1]) / interval / 1024.0,
291 | (float)(newData[2] - oldData[2]) / interval,
292 | (float)(newData[3] - oldData[3]) / interval / 1024.0,
293 | ]
294 | lastDeviceData[dev] = newData
295 | return result
296 |
297 |
298 | if __name__ == '__main__':
299 | sys.exit(main(sys.argv[1:]))
300 |
--------------------------------------------------------------------------------
/run/misc/tpm_nopm.R:
--------------------------------------------------------------------------------
1 | # ----
2 | # R graph to show tpmC and tpmTOTAL.
3 | # ----
4 |
5 | # ----
6 | # Read the runInfo.csv file.
7 | # ----
8 | runInfo <- read.csv("data/runInfo.csv", head=TRUE)
9 |
10 | # ----
11 | # Determine the grouping interval in seconds based on the
12 | # run duration.
13 | # ----
14 | xmin <- @SKIP@
15 | xmax <- runInfo$runMins
16 | for (interval in c(1, 2, 5, 10, 20, 60, 120, 300, 600)) {
17 | if ((xmax * 60) / interval <= 1000) {
18 | break
19 | }
20 | }
21 | idiv <- interval * 1000.0
22 | skip <- xmin * 60000
23 |
24 | # ----
25 | # Read the result.csv and then filter the raw data
26 | # for != DELIVERY_BG and == NEW_ORDER transactions.
27 | # ----
28 | data1 <- read.csv("data/result.csv", head=TRUE)
29 | data1 <- data1[data1$elapsed >= skip, ]
30 | total1 <- data1[data1$ttype != 'DELIVERY_BG', ]
31 | neworder1 <- data1[data1$ttype == 'NEW_ORDER', ]
32 |
33 | # ----
34 | # Aggregate the counts of both data sets grouped by second.
35 | # ----
36 | countTotal <- setNames(aggregate(total1$latency, list(elapsed=trunc(total1$elapsed / idiv) * idiv), NROW),
37 | c('elapsed', 'count'));
38 | countNewOrder <- setNames(aggregate(neworder1$latency, list(elapsed=trunc(neworder1$elapsed / idiv) * idiv), NROW),
39 | c('elapsed', 'count'));
40 |
41 | # ----
42 | # Determine the ymax by increasing in sqrt(2) steps until the
43 | # maximum of tpmTOTAL fits, then make sure that we have at least
44 | # 1.2 times that to give a little head room for the legend.
45 | # ----
46 | ymax_count <- max(countTotal$count) * 60.0 / interval
47 | ymax <- 1
48 | sqrt2 <- sqrt(2.0)
49 | while (ymax < ymax_count) {
50 | ymax <- ymax * sqrt2
51 | }
52 | if (ymax < (ymax_count * 1.2)) {
53 | ymax <- ymax * 1.2
54 | }
55 |
56 |
57 |
58 | # ----
59 | # Start the output image.
60 | # ----
61 | svg("tpm_nopm.svg", width=@WIDTH@, height=@HEIGHT@, pointsize=@POINTSIZE@)
62 | par(mar=c(4,4,4,4), xaxp=c(10,200,19))
63 |
64 | # ----
65 | # Plot the tpmTOTAL graph.
66 | # ----
67 | plot (
68 | countTotal$elapsed / 60000.0, countTotal$count * 60.0 / interval,
69 | type='l', col="blue3", lwd=2,
70 | axes=TRUE,
71 | xlab="Elapsed Minutes",
72 | ylab="Transactions per Minute",
73 | xlim=c(xmin, xmax),
74 | ylim=c(0, ymax)
75 | )
76 |
77 | # ----
78 | # Plot the tpmC graph.
79 | # ----
80 | par (new=T)
81 | plot (
82 | countNewOrder$elapsed / 60000.0, countNewOrder$count * 60.0 / interval,
83 | type='l', col="red3", lwd=2,
84 | axes=FALSE,
85 | xlab="",
86 | ylab="",
87 | xlim=c(xmin, xmax),
88 | ylim=c(0, ymax)
89 | )
90 |
91 | # ----
92 | # Add legend, title and other decorations.
93 | # ----
94 | legend ("topleft",
95 | c("tpmTOTAL", "tpmC (NewOrder only)"),
96 | fill=c("blue3", "red3"))
97 | title (main=c(
98 | paste0("Run #", runInfo$run, " of BenchmarkSQL v", runInfo$driverVersion),
99 | "Transactions per Minute"
100 | ))
101 | grid()
102 | box()
103 |
--------------------------------------------------------------------------------
/run/runBenchmark.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -ne 1 ] ; then
4 | echo "usage: $(basename $0) PROPS_FILE" >&2
5 | exit 2
6 | fi
7 |
8 | SEQ_FILE="./.jTPCC_run_seq.dat"
9 | if [ ! -f "${SEQ_FILE}" ] ; then
10 | echo "0" > "${SEQ_FILE}"
11 | fi
12 | SEQ=$(expr $(cat "${SEQ_FILE}") + 1) || exit 1
13 | echo "${SEQ}" > "${SEQ_FILE}"
14 |
15 | source funcs.sh $1
16 |
17 | setCP || exit 1
18 |
19 | myOPTS="-Dprop=$1 -DrunID=${SEQ}"
20 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
21 |
22 | java -cp "$myCP" $myOPTS jTPCC
23 |
--------------------------------------------------------------------------------
/run/runDatabaseBuild.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -lt 1 ] ; then
4 | echo "usage: $(basename $0) PROPS [OPT VAL [...]]" >&2
5 | exit 2
6 | fi
7 |
8 | PROPS="$1"
9 | shift
10 | if [ ! -f "${PROPS}" ] ; then
11 | echo "${PROPS}: no such file or directory" >&2
12 | exit 1
13 | fi
14 |
15 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')"
16 |
17 | BEFORE_LOAD="tableCreates extraCommandsBeforeLoad storedProcedureCreates"
18 |
19 | AFTER_LOAD="indexCreates foreignKeys extraHistID buildFinish"
20 |
21 | for step in ${BEFORE_LOAD} ; do
22 | ./runSQL.sh "${PROPS}" $step
23 | done
24 |
25 | ./runLoader.sh "${PROPS}" $*
26 |
27 | for step in ${AFTER_LOAD} ; do
28 | ./runSQL.sh "${PROPS}" $step
29 | done
30 |
--------------------------------------------------------------------------------
/run/runDatabaseDestroy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -ne 1 ] ; then
4 | echo "usage: $(basename $0) PROPS" >&2
5 | exit 2
6 | fi
7 |
8 | PROPS="$1"
9 | if [ ! -f "${PROPS}" ] ; then
10 | echo "${PROPS}: no such file or directory" >&2
11 | exit 1
12 | fi
13 |
14 | DB="$(grep '^db=' $PROPS | sed -e 's/^db=//')"
15 | USER="$(grep '^user=' $PROPS | sed -e 's/^user=//' )"
16 | PASSWORD="$(grep '^password=' $PROPS | sed -e 's/^password=//' )"
17 |
18 | STEPS="tableDrops storedProcedureDrops"
19 |
20 | for step in ${STEPS} ; do
21 | ./runSQL.sh "${PROPS}" $step
22 | done
23 |
--------------------------------------------------------------------------------
/run/runLoader.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ $# -lt 1 ] ; then
4 | echo "usage: $(basename $0) PROPS_FILE [ARGS]" >&2
5 | exit 2
6 | fi
7 |
8 | source funcs.sh $1
9 | shift
10 |
11 | setCP || exit 1
12 |
13 | myOPTS="-Dprop=${PROPS}"
14 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
15 |
16 | java -cp "$myCP" $myOPTS LoadData $*
17 |
--------------------------------------------------------------------------------
/run/runSQL.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # ----
4 | # Check command line usage
5 | # ----
6 | if [ $# -ne 2 ] ; then
7 | echo "usage: $(basename $0) PROPS_FILE SQL_FILE" >&2
8 | exit 2
9 | fi
10 |
11 | # ----
12 | # Load common functions
13 | # ----
14 | source funcs.sh $1
15 |
16 | # ----
17 | # Determine which SQL file to use.
18 | #
19 | # 1) If $2 specifies a file that ends in .sql, we use that.
20 | # 2) If a file ./sql./$2.sql exists, we use that.
21 | # 3) If none of the above, use ./sql.common/$2.sql.
22 | # ----
23 | if echo "$2" | grep -q -e '\.sql$' ; then
24 | ENDS_WITH_SQL=1
25 | else
26 | ENDS_WITH_SQL=0
27 | fi
28 |
29 | if [ -f "${2}" -a $ENDS_WITH_SQL -eq 1 ] ; then
30 | SQL_FILE="$2"
31 | else
32 | if [ -f "./sql.$(getProp db)/${2}.sql" ] ; then
33 | SQL_FILE="./sql.$(getProp db)/${2}.sql"
34 | else
35 | SQL_FILE="./sql.common/${2}.sql"
36 | if [ ! -f "${SQL_FILE}" ] ; then
37 | echo "ERROR: Cannot locate SQL file for ${2}" >&2
38 | exit 1
39 | fi
40 | fi
41 | fi
42 |
43 | # ----
44 | # Set myCP according to the database type.
45 | # ----
46 | setCP || exit 1
47 |
48 | echo "# ------------------------------------------------------------"
49 | echo "# Loading SQL file ${SQL_FILE}"
50 | echo "# ------------------------------------------------------------"
51 | myOPTS="-Dprop=$1"
52 | myOPTS="${myOPTS} -DcommandFile=${SQL_FILE}"
53 | myOPTS="${myOPTS} -Djava.security.egd=file:/dev/./urandom"
54 |
55 | java -cp "$myCP" $myOPTS ExecJDBC
56 |
--------------------------------------------------------------------------------
/run/sample.firebird.properties:
--------------------------------------------------------------------------------
1 | db=firebird
2 | driver=org.firebirdsql.jdbc.FBDriver
3 | conn=jdbc:firebirdsql://localhost:3050//var/lib/firebird/data/benchmarksql1.fdb
4 | user=benchmarksql
5 | password=PWbmsql
6 |
7 | warehouses=10
8 | loadWorkers=4
9 |
10 | terminals=10
11 | // To run specified transactions per terminal- runMins must equal zero
12 | runTxnsPerTerminal=0
13 | // To run for specified minutes- runTxnsPerTerminal must equal zero
14 | runMins=2
15 | // Number of total transactions per minute
16 | limitTxnsPerMin=10000000
17 |
18 | // Set to true to run in 4.x compatible mode. Set to false to use the
19 | // entire configured database evenly.
20 | terminalWarehouseFixed=false
21 |
22 | // Set to true to use the stored procedure/function implementations. Not
23 | // all of them exist for all databases and the use of stored procedures
24 | // is strongly discouraged for comparing different database vendors as
25 | // they may not have been implemented ideally for all of them. This is
26 | // however useful to test how much network IO can be saved by using
27 | // stored procedures.
28 | useStoredProcedures=false
29 |
30 | // The following five values must add up to 100
31 | // The internal default percentages mathc the probabilities of a
32 | // 23 Card Deck implementation, as described in the TPC-C Specs.
33 | // The values below match the pre-5.1 defaults.
34 | //newOrderWeight=45
35 | //paymentWeight=43
36 | //orderStatusWeight=4
37 | //deliveryWeight=4
38 | //stockLevelWeight=4
39 |
40 | // Directory name to create for collecting detailed result data.
41 | // Comment this out to suppress.
42 | //resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
43 | //osCollectorScript=./misc/os_collector_linux.py
44 | //osCollectorInterval=1
45 | //osCollectorSSHAddr=user@dbhost
46 | //osCollectorDevices=net_eth0 blk_sda
47 |
--------------------------------------------------------------------------------
/run/sample.oracle.properties:
--------------------------------------------------------------------------------
1 | db=oracle
2 | driver=oracle.jdbc.driver.OracleDriver
3 | conn=jdbc:oracle:thin:@localhost:1521:XE
4 | user=scott
5 | password=tiger
6 |
7 | warehouses=10
8 | loadWorkers=4
9 |
10 | terminals=10
11 | // To run specified transactions per terminal- runMins must equal zero
12 | runTxnsPerTerminal=0
13 | // To run for specified minutes- runTxnsPerTerminal must equal zero
14 | runMins=2
15 | // Number of total transactions per minute
16 | limitTxnsPerMin=10000000
17 |
18 | // Set to true to run in 4.x compatible mode. Set to false to use the
19 | // entire configured database evenly.
20 | terminalWarehouseFixed=false
21 |
22 | // Set to true to use the stored procedure/function implementations. Not
23 | // all of them exist for all databases and the use of stored procedures
24 | // is strongly discouraged for comparing different database vendors as
25 | // they may not have been implemented ideally for all of them. This is
26 | // however useful to test how much network IO can be saved by using
27 | // stored procedures.
28 | useStoredProcedures=false
29 |
30 | // The following five values must add up to 100
31 | // The internal default percentages mathc the probabilities of a
32 | // 23 Card Deck implementation, as described in the TPC-C Specs.
33 | // The values below match the pre-5.1 defaults.
34 | //newOrderWeight=45
35 | //paymentWeight=43
36 | //orderStatusWeight=4
37 | //deliveryWeight=4
38 | //stockLevelWeight=4
39 |
40 | // Directory name to create for collecting detailed result data.
41 | // Comment this out to suppress.
42 | //resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
43 | //osCollectorScript=./misc/os_collector_linux.py
44 | //osCollectorInterval=1
45 | //osCollectorSSHAddr=user@dbhost
46 | //osCollectorDevices=net_eth0 blk_sda
47 |
--------------------------------------------------------------------------------
/run/sample.postgresql.properties:
--------------------------------------------------------------------------------
1 | db=postgres
2 | driver=org.postgresql.Driver
3 | conn=jdbc:postgresql://localhost:5432/postgres
4 | user=benchmarksql
5 | password=PWbmsql
6 |
7 | warehouses=10
8 | loadWorkers=4
9 |
10 | terminals=10
11 | // To run specified transactions per terminal- runMins must equal zero
12 | runTxnsPerTerminal=0
13 | // To run for specified minutes- runTxnsPerTerminal must equal zero
14 | runMins=2
15 | // Number of total transactions per minute
16 | limitTxnsPerMin=10000000
17 |
18 | // Set to true to run in 4.x compatible mode. Set to false to use the
19 | // entire configured database evenly.
20 | terminalWarehouseFixed=false
21 |
22 | // Set to true to use the stored procedure/function implementations. Not
23 | // all of them exist for all databases and the use of stored procedures
24 | // is strongly discouraged for comparing different database vendors as
25 | // they may not have been implemented ideally for all of them. This is
26 | // however useful to test how much network IO can be saved by using
27 | // stored procedures.
28 | useStoredProcedures=false
29 |
30 | // The following five values must add up to 100
31 | // The internal default percentages mathc the probabilities of a
32 | // 23 Card Deck implementation, as described in the TPC-C Specs.
33 | // The values below match the pre-5.1 defaults.
34 | //newOrderWeight=45
35 | //paymentWeight=43
36 | //orderStatusWeight=4
37 | //deliveryWeight=4
38 | //stockLevelWeight=4
39 |
40 | // Directory name to create for collecting detailed result data.
41 | // Comment this out to suppress.
42 | //resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
43 | //osCollectorScript=./misc/os_collector_linux.py
44 | //osCollectorInterval=1
45 | //osCollectorSSHAddr=user@dbhost
46 | //osCollectorDevices=net_eth0 blk_sda
47 |
--------------------------------------------------------------------------------
/run/sql.common/buildFinish.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra commands to run after the tables are created, loaded,
3 | -- indexes built and extra's created.
4 | -- ----
5 |
--------------------------------------------------------------------------------
/run/sql.common/extraCommandsBeforeLoad.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- sql.common/extraCommandsBeforeLoad.sql
3 | --
4 | -- Empty placeholder file. Some databases might require
5 | -- extra commands to be run between creating the schema
6 | -- and loading the data. For example PostgreSQL, where we
7 | -- set the FILLFACTOR on some tables.
8 | -- ----
9 |
--------------------------------------------------------------------------------
/run/sql.common/foreignKeys.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_district add constraint d_warehouse_fkey
3 | foreign key (d_w_id)
4 | references bmsql_warehouse (w_id);
5 |
6 | alter table bmsql_customer add constraint c_district_fkey
7 | foreign key (c_w_id, c_d_id)
8 | references bmsql_district (d_w_id, d_id);
9 |
10 | alter table bmsql_history add constraint h_customer_fkey
11 | foreign key (h_c_w_id, h_c_d_id, h_c_id)
12 | references bmsql_customer (c_w_id, c_d_id, c_id);
13 | alter table bmsql_history add constraint h_district_fkey
14 | foreign key (h_w_id, h_d_id)
15 | references bmsql_district (d_w_id, d_id);
16 |
17 | alter table bmsql_new_order add constraint no_order_fkey
18 | foreign key (no_w_id, no_d_id, no_o_id)
19 | references bmsql_oorder (o_w_id, o_d_id, o_id);
20 |
21 | alter table bmsql_oorder add constraint o_customer_fkey
22 | foreign key (o_w_id, o_d_id, o_c_id)
23 | references bmsql_customer (c_w_id, c_d_id, c_id);
24 |
25 | alter table bmsql_order_line add constraint ol_order_fkey
26 | foreign key (ol_w_id, ol_d_id, ol_o_id)
27 | references bmsql_oorder (o_w_id, o_d_id, o_id);
28 | alter table bmsql_order_line add constraint ol_stock_fkey
29 | foreign key (ol_supply_w_id, ol_i_id)
30 | references bmsql_stock (s_w_id, s_i_id);
31 |
32 | alter table bmsql_stock add constraint s_warehouse_fkey
33 | foreign key (s_w_id)
34 | references bmsql_warehouse (w_id);
35 | alter table bmsql_stock add constraint s_item_fkey
36 | foreign key (s_i_id)
37 | references bmsql_item (i_id);
38 |
--------------------------------------------------------------------------------
/run/sql.common/indexCreates.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_warehouse add constraint bmsql_warehouse_pkey
3 | primary key (w_id);
4 |
5 | alter table bmsql_district add constraint bmsql_district_pkey
6 | primary key (d_w_id, d_id);
7 |
8 | alter table bmsql_customer add constraint bmsql_customer_pkey
9 | primary key (c_w_id, c_d_id, c_id);
10 |
11 | create index bmsql_customer_idx1
12 | on bmsql_customer (c_w_id, c_d_id, c_last, c_first);
13 |
14 | alter table bmsql_oorder add constraint bmsql_oorder_pkey
15 | primary key (o_w_id, o_d_id, o_id);
16 |
17 | create unique index bmsql_oorder_idx1
18 | on bmsql_oorder (o_w_id, o_d_id, o_carrier_id, o_id);
19 |
20 | alter table bmsql_new_order add constraint bmsql_new_order_pkey
21 | primary key (no_w_id, no_d_id, no_o_id);
22 |
23 | alter table bmsql_order_line add constraint bmsql_order_line_pkey
24 | primary key (ol_w_id, ol_d_id, ol_o_id, ol_number);
25 |
26 | alter table bmsql_stock add constraint bmsql_stock_pkey
27 | primary key (s_w_id, s_i_id);
28 |
29 | alter table bmsql_item add constraint bmsql_item_pkey
30 | primary key (i_id);
31 |
32 |
--------------------------------------------------------------------------------
/run/sql.common/indexDrops.sql:
--------------------------------------------------------------------------------
1 |
2 | alter table bmsql_warehouse drop constraint bmsql_warehouse_pkey;
3 |
4 | alter table bmsql_district drop constraint bmsql_district_pkey;
5 |
6 | alter table bmsql_customer drop constraint bmsql_customer_pkey;
7 | drop index bmsql_customer_idx1;
8 |
9 | -- history table has no primary key
10 | -- commit;
11 |
12 | alter table bmsql_oorder drop constraint bmsql_oorder_pkey;
13 | drop index bmsql_oorder_idx1;
14 |
15 | alter table bmsql_new_order drop constraint bmsql_new_order_pkey;
16 |
17 | alter table bmsql_order_line drop constraint bmsql_order_line_pkey;
18 |
19 | alter table bmsql_stock drop constraint bmsql_stock_pkey;
20 |
21 | alter table bmsql_item drop constraint bmsql_item_pkey;
22 |
--------------------------------------------------------------------------------
/run/sql.common/storedProcedureCreates.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- This is a placeholder for databases, that don't support
3 | -- stored procedures or where the support is too limited to
4 | -- reasonably implement the TPCC transaction profiles.
5 | -- ----------------------------------------------------------------------
6 |
--------------------------------------------------------------------------------
/run/sql.common/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | -- ----------------------------------------------------------------------
2 | -- This is a placeholder for databases, that don't support
3 | -- stored procedures or where the support is too limited to
4 | -- reasonably implement the TPCC transaction profiles.
5 | -- ----------------------------------------------------------------------
6 |
--------------------------------------------------------------------------------
/run/sql.common/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name varchar(30) primary key,
3 | cfg_value varchar(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id integer not null,
8 | w_ytd decimal(12,2),
9 | w_tax decimal(4,4),
10 | w_name varchar(10),
11 | w_street_1 varchar(20),
12 | w_street_2 varchar(20),
13 | w_city varchar(20),
14 | w_state char(2),
15 | w_zip char(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id integer not null,
20 | d_id integer not null,
21 | d_ytd decimal(12,2),
22 | d_tax decimal(4,4),
23 | d_next_o_id integer,
24 | d_name varchar(10),
25 | d_street_1 varchar(20),
26 | d_street_2 varchar(20),
27 | d_city varchar(20),
28 | d_state char(2),
29 | d_zip char(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id integer not null,
34 | c_d_id integer not null,
35 | c_id integer not null,
36 | c_discount decimal(4,4),
37 | c_credit char(2),
38 | c_last varchar(16),
39 | c_first varchar(16),
40 | c_credit_lim decimal(12,2),
41 | c_balance decimal(12,2),
42 | c_ytd_payment decimal(12,2),
43 | c_payment_cnt integer,
44 | c_delivery_cnt integer,
45 | c_street_1 varchar(20),
46 | c_street_2 varchar(20),
47 | c_city varchar(20),
48 | c_state char(2),
49 | c_zip char(9),
50 | c_phone char(16),
51 | c_since timestamp,
52 | c_middle char(2),
53 | c_data varchar(500)
54 | );
55 |
56 | create sequence bmsql_hist_id_seq;
57 |
58 | create table bmsql_history (
59 | hist_id integer,
60 | h_c_id integer,
61 | h_c_d_id integer,
62 | h_c_w_id integer,
63 | h_d_id integer,
64 | h_w_id integer,
65 | h_date timestamp,
66 | h_amount decimal(6,2),
67 | h_data varchar(24)
68 | );
69 |
70 | create table bmsql_new_order (
71 | no_w_id integer not null,
72 | no_d_id integer not null,
73 | no_o_id integer not null
74 | );
75 |
76 | create table bmsql_oorder (
77 | o_w_id integer not null,
78 | o_d_id integer not null,
79 | o_id integer not null,
80 | o_c_id integer,
81 | o_carrier_id integer,
82 | o_ol_cnt integer,
83 | o_all_local integer,
84 | o_entry_d timestamp
85 | );
86 |
87 | create table bmsql_order_line (
88 | ol_w_id integer not null,
89 | ol_d_id integer not null,
90 | ol_o_id integer not null,
91 | ol_number integer not null,
92 | ol_i_id integer not null,
93 | ol_delivery_d timestamp,
94 | ol_amount decimal(6,2),
95 | ol_supply_w_id integer,
96 | ol_quantity integer,
97 | ol_dist_info char(24)
98 | );
99 |
100 | create table bmsql_item (
101 | i_id integer not null,
102 | i_name varchar(24),
103 | i_price decimal(5,2),
104 | i_data varchar(50),
105 | i_im_id integer
106 | );
107 |
108 | create table bmsql_stock (
109 | s_w_id integer not null,
110 | s_i_id integer not null,
111 | s_quantity integer,
112 | s_ytd integer,
113 | s_order_cnt integer,
114 | s_remote_cnt integer,
115 | s_data varchar(50),
116 | s_dist_01 char(24),
117 | s_dist_02 char(24),
118 | s_dist_03 char(24),
119 | s_dist_04 char(24),
120 | s_dist_05 char(24),
121 | s_dist_06 char(24),
122 | s_dist_07 char(24),
123 | s_dist_08 char(24),
124 | s_dist_09 char(24),
125 | s_dist_10 char(24)
126 | );
127 |
128 |
129 |
--------------------------------------------------------------------------------
/run/sql.common/tableDrops.sql:
--------------------------------------------------------------------------------
1 | drop table bmsql_config;
2 |
3 | drop table bmsql_new_order;
4 |
5 | drop table bmsql_order_line;
6 |
7 | drop table bmsql_oorder;
8 |
9 | drop table bmsql_history;
10 |
11 | drop table bmsql_customer;
12 |
13 | drop table bmsql_stock;
14 |
15 | drop table bmsql_item;
16 |
17 | drop table bmsql_district;
18 |
19 | drop table bmsql_warehouse;
20 |
21 | drop sequence bmsql_hist_id_seq;
22 |
23 |
--------------------------------------------------------------------------------
/run/sql.common/tableTruncates.sql:
--------------------------------------------------------------------------------
1 |
2 | truncate table bmsql_warehouse;
3 |
4 | truncate table bmsql_item;
5 |
6 | truncate table bmsql_stock;
7 |
8 | truncate table bmsql_district;
9 |
10 | truncate table bmsql_customer;
11 |
12 | truncate table bmsql_history;
13 |
14 | truncate table bmsql_oorder;
15 |
16 | truncate table bmsql_order_line;
17 |
18 | truncate table bmsql_new_order;
19 |
--------------------------------------------------------------------------------
/run/sql.firebird/extraHistID.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra Schema objects/definitions for history.hist_id in Firebird
3 | -- ----
4 |
5 | -- ----
6 | -- This is an extra column not present in the TPC-C
7 | -- specs. It is useful for replication systems like
8 | -- Bucardo and Slony-I, which like to have a primary
9 | -- key on a table. It is an auto-increment or serial
10 | -- column type. The definition below is compatible
11 | -- with Firebird, using the sequence in a trigger.
12 | -- ----
13 | -- Adjust the sequence above the current max(hist_id)
14 | execute block
15 | as
16 | declare max_hist_id integer\;
17 | declare dummy integer\;
18 | begin
19 | max_hist_id = (select max(hist_id) + 1 from bmsql_history)\;
20 | dummy = GEN_ID(bmsql_hist_id_seq, -GEN_ID(bmsql_hist_id_seq, 0))\;
21 | dummy = GEN_ID(bmsql_hist_id_seq, max_hist_id)\;
22 | end;
23 |
24 | -- Create a trigger that forces hist_id to be hist_id_seq.nextval
25 | create trigger bmsql_hist_id_gen for bmsql_history
26 | active before insert
27 | as
28 | begin
29 | if (new.hist_id is null) then
30 | new.hist_id = GEN_ID(bmsql_hist_id_seq, 1)\;
31 | end;
32 |
33 | -- Add a primary key history(hist_id)
34 | -- Firebird lacks the capacity to declare an existing column NOT NULL.
35 | -- In order to not impose overhead due to CHECK constraints or other
36 | -- constructs, we leave the column nullable because the above trigger
37 | -- makes sure it isn't (at least on insert, which is all we ever do).
38 | create unique index bmsql_history_idx1 on bmsql_history (hist_id);
39 |
--------------------------------------------------------------------------------
/run/sql.oracle/extraHistID.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra Schema objects/definitions for history.hist_id in Oracle
3 | -- ----
4 |
5 | -- ----
6 | -- This is an extra column not present in the TPC-C
7 | -- specs. It is useful for replication systems like
8 | -- Bucardo and Slony-I, which like to have a primary
9 | -- key on a table. It is an auto-increment or serial
10 | -- column type. The definition below is compatible
11 | -- with Oracle 11g, using the sequence in a trigger.
12 | -- ----
13 | -- Adjust the sequence above the current max(hist_id)
14 |
15 | alter sequence bmsql_hist_id_seq increment by 30000;
16 |
17 | -- {
18 | declare
19 | n integer;
20 | i integer;
21 | dummy integer;
22 | begin
23 | select max(hist_id) into n from bmsql_history;
24 | i := 0;
25 | while i <= n loop
26 | select bmsql_hist_id_seq.nextval into dummy from dual;
27 | i := i + 30000;
28 | end loop;
29 | end;
30 | -- }
31 |
32 | alter sequence bmsql_hist_id_seq increment by 1;
33 |
34 | -- Create a trigger that forces hist_id to be hist_id_seq.nextval
35 | -- {
36 | create trigger bmsql_history_before_insert
37 | before insert on bmsql_history
38 | for each row
39 | begin
40 | if :new.hist_id is null then
41 | select bmsql_hist_id_seq.nextval into :new.hist_id from dual;
42 | end if;
43 | end;
44 | -- }
45 |
46 | -- Add a primary key history(hist_id)
47 | alter table bmsql_history add primary key (hist_id);
48 |
--------------------------------------------------------------------------------
/run/sql.oracle/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | drop package tpccc_oracle;
2 | drop type num_array;
3 | drop type char_array;
4 | drop type varchar24_array;
5 | drop type int_array;
6 | drop type varchar16_array;
7 | drop type timestamp_array;
8 |
--------------------------------------------------------------------------------
/run/sql.oracle/tableCreates.sql:
--------------------------------------------------------------------------------
1 | create table bmsql_config (
2 | cfg_name varchar2(30) primary key,
3 | cfg_value varchar2(50)
4 | );
5 |
6 | create table bmsql_warehouse (
7 | w_id integer not null,
8 | w_ytd number(12,2),
9 | w_tax number(4,4),
10 | w_name varchar2(10),
11 | w_street_1 varchar2(20),
12 | w_street_2 varchar2(20),
13 | w_city varchar2(20),
14 | w_state char(2),
15 | w_zip char(9)
16 | );
17 |
18 | create table bmsql_district (
19 | d_w_id integer not null,
20 | d_id integer not null,
21 | d_ytd number(12,2),
22 | d_tax number(4,4),
23 | d_next_o_id integer,
24 | d_name varchar2(10),
25 | d_street_1 varchar2(20),
26 | d_street_2 varchar2(20),
27 | d_city varchar2(20),
28 | d_state char(2),
29 | d_zip char(9)
30 | );
31 |
32 | create table bmsql_customer (
33 | c_w_id integer not null,
34 | c_d_id integer not null,
35 | c_id integer not null,
36 | c_discount number(4,4),
37 | c_credit char(2),
38 | c_last varchar2(16),
39 | c_first varchar2(16),
40 | c_credit_lim number(12,2),
41 | c_balance number(12,2),
42 | c_ytd_payment number(12,2),
43 | c_payment_cnt integer,
44 | c_delivery_cnt integer,
45 | c_street_1 varchar2(20),
46 | c_street_2 varchar2(20),
47 | c_city varchar2(20),
48 | c_state char(2),
49 | c_zip char(9),
50 | c_phone char(16),
51 | c_since timestamp,
52 | c_middle char(2),
53 | c_data varchar2(500)
54 | );
55 |
56 | create sequence bmsql_hist_id_seq;
57 |
58 | create table bmsql_history (
59 | hist_id integer,
60 | h_c_id integer,
61 | h_c_d_id integer,
62 | h_c_w_id integer,
63 | h_d_id integer,
64 | h_w_id integer,
65 | h_date timestamp,
66 | h_amount number(6,2),
67 | h_data varchar2(24)
68 | );
69 |
70 | create table bmsql_new_order (
71 | no_w_id integer not null,
72 | no_d_id integer not null,
73 | no_o_id integer not null
74 | );
75 |
76 | create table bmsql_oorder (
77 | o_w_id integer not null,
78 | o_d_id integer not null,
79 | o_id integer not null,
80 | o_c_id integer,
81 | o_carrier_id integer,
82 | o_ol_cnt integer,
83 | o_all_local integer,
84 | o_entry_d timestamp
85 | );
86 |
87 | create table bmsql_order_line (
88 | ol_w_id integer not null,
89 | ol_d_id integer not null,
90 | ol_o_id integer not null,
91 | ol_number integer not null,
92 | ol_i_id integer not null,
93 | ol_delivery_d timestamp,
94 | ol_amount number(6,2),
95 | ol_supply_w_id integer,
96 | ol_quantity integer,
97 | ol_dist_info char(24)
98 | );
99 |
100 | create table bmsql_item (
101 | i_id integer not null,
102 | i_name varchar2(24),
103 | i_price number(5,2),
104 | i_data varchar2(50),
105 | i_im_id integer
106 | );
107 |
108 | create table bmsql_stock (
109 | s_w_id integer not null,
110 | s_i_id integer not null,
111 | s_quantity integer,
112 | s_ytd integer,
113 | s_order_cnt integer,
114 | s_remote_cnt integer,
115 | s_data varchar2(50),
116 | s_dist_01 char(24),
117 | s_dist_02 char(24),
118 | s_dist_03 char(24),
119 | s_dist_04 char(24),
120 | s_dist_05 char(24),
121 | s_dist_06 char(24),
122 | s_dist_07 char(24),
123 | s_dist_08 char(24),
124 | s_dist_09 char(24),
125 | s_dist_10 char(24)
126 | );
127 |
128 |
129 |
--------------------------------------------------------------------------------
/run/sql.postgres/buildFinish.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra commands to run after the tables are created, loaded,
3 | -- indexes built and extra's created.
4 | -- PostgreSQL version.
5 | -- ----
6 |
7 | vacuum analyze;
8 |
--------------------------------------------------------------------------------
/run/sql.postgres/extraCommandsBeforeLoad.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE bmsql_oorder SET (FILLFACTOR = 80);
2 | ALTER TABLE bmsql_order_line SET (FILLFACTOR = 80);
3 | ALTER TABLE bmsql_warehouse SET (FILLFACTOR = 50);
4 | ALTER TABLE bmsql_district SET (FILLFACTOR = 79);
5 | ALTER TABLE bmsql_customer SET (FILLFACTOR = 90);
6 | ALTER TABLE bmsql_stock SET (FILLFACTOR = 95);
7 |
--------------------------------------------------------------------------------
/run/sql.postgres/extraHistID.sql:
--------------------------------------------------------------------------------
1 | -- ----
2 | -- Extra Schema objects/definitions for history.hist_id in PostgreSQL
3 | -- ----
4 |
5 | -- ----
6 | -- This is an extra column not present in the TPC-C
7 | -- specs. It is useful for replication systems like
8 | -- Bucardo and Slony-I, which like to have a primary
9 | -- key on a table. It is an auto-increment or serial
10 | -- column type. The definition below is compatible
11 | -- with Oracle 11g, using a sequence and a trigger.
12 | -- ----
13 | -- Adjust the sequence above the current max(hist_id)
14 | select setval('bmsql_hist_id_seq', (select max(hist_id) from bmsql_history));
15 |
16 | -- Make nextval(seq) the default value of the hist_id column.
17 | alter table bmsql_history
18 | alter column hist_id set default nextval('bmsql_hist_id_seq');
19 |
20 | -- Add a primary key history(hist_id)
21 | alter table bmsql_history add primary key (hist_id);
22 |
--------------------------------------------------------------------------------
/run/sql.postgres/storedProcedureCreates.sql:
--------------------------------------------------------------------------------
1 | CREATE OR REPLACE FUNCTION bmsql_cid_from_clast(
2 | in_c_w_id integer,
3 | in_c_d_id integer,
4 | in_c_last varchar(16))
5 | RETURNS integer AS
6 | $$
7 | DECLARE
8 | cust_cursor CURSOR (
9 | p_w_id integer, p_d_id integer, p_c_last varchar(16))
10 | FOR
11 | SELECT c_id FROM bmsql_customer
12 | WHERE c_w_id = p_w_id
13 | AND c_d_id = p_d_id
14 | AND c_last = p_c_last
15 | ORDER BY c_first;
16 | num_cust integer;
17 | idx_cust integer;
18 | ret_c_id integer;
19 | BEGIN
20 | -- Clause 2.5.2.2 Case 2, customer selected based on c_last.
21 | SELECT INTO num_cust count(*)
22 | FROM bmsql_customer
23 | WHERE c_w_id = in_c_w_id
24 | AND c_d_id = in_c_d_id
25 | AND c_last = in_c_last;
26 | IF num_cust = 0 THEN
27 | RAISE EXCEPTION 'Customer(s) for C_W_ID=% C_D_ID=% C_LAST=% not found',
28 | in_c_w_id, in_c_d_id, in_c_last;
29 | END IF;
30 | idx_cust = (num_cust + 1) / 2 - 1;
31 |
32 | OPEN cust_cursor(in_c_w_id, in_c_d_id, in_c_last);
33 | MOVE FORWARD idx_cust IN cust_cursor;
34 | FETCH FROM cust_cursor INTO ret_c_id;
35 | CLOSE cust_cursor;
36 |
37 | RETURN ret_c_id;
38 | END;
39 | $$
40 | LANGUAGE plpgsql;
41 |
42 |
43 | CREATE OR REPLACE FUNCTION bmsql_proc_new_order(
44 | IN in_w_id integer,
45 | IN in_d_id integer,
46 | IN in_c_id integer,
47 | IN in_ol_supply_w_id integer[],
48 | IN in_ol_i_id integer[],
49 | IN in_ol_quantity integer[],
50 | OUT out_w_tax decimal(4, 4),
51 | OUT out_d_tax decimal(4, 4),
52 | OUT out_o_id integer,
53 | OUT out_o_entry_d timestamp,
54 | OUT out_ol_cnt integer,
55 | OUT out_ol_amount decimal(12, 2)[],
56 | OUT out_total_amount decimal(12, 2),
57 | OUT out_c_last varchar(16),
58 | OUT out_c_credit char(2),
59 | OUT out_c_discount decimal(4, 4),
60 | OUT out_i_name varchar(24)[],
61 | OUT out_i_price decimal(5, 2)[],
62 | OUT out_s_quantity integer[],
63 | OUT out_brand_generic char[]
64 | ) AS
65 | $$
66 | DECLARE
67 | var_all_local integer := 1;
68 | var_x integer;
69 | var_y integer;
70 | var_tmp integer;
71 | var_seq integer[15];
72 | var_item_row record;
73 | var_stock_row record;
74 | BEGIN
75 | -- The o_entry_d is now.
76 | out_o_entry_d := CURRENT_TIMESTAMP;
77 | out_total_amount := 0.00;
78 |
79 | -- When processing the order lines we must select the STOCK rows
80 | -- FOR UPDATE. This is because we must perform business logic
81 | -- (the juggling with the S_QUANTITY) here in the application
82 | -- and cannot do that in an atomic UPDATE statement while getting
83 | -- the original value back at the same time (UPDATE ... RETURNING
84 | -- may not be vendor neutral). This can lead to possible deadlocks
85 | -- if two transactions try to lock the same two stock rows in
86 | -- opposite order. To avoid that we process the order lines in
87 | -- the order of the order of ol_supply_w_id, ol_i_id.
88 | out_ol_cnt := 0;
89 | FOR var_x IN 1 .. array_length(in_ol_i_id, 1) LOOP
90 | IF in_ol_i_id[var_x] IS NOT NULL AND in_ol_i_id[var_x] <> 0 THEN
91 | out_ol_cnt := out_ol_cnt + 1;
92 | var_seq[var_x] = var_x;
93 | IF in_ol_supply_w_id[var_x] <> in_w_id THEN
94 | var_all_local := 0;
95 | END IF;
96 | END IF;
97 | END LOOP;
98 | FOR var_x IN 1 .. out_ol_cnt - 1 LOOP
99 | FOR var_y IN var_x + 1 .. out_ol_cnt LOOP
100 | IF in_ol_supply_w_id[var_seq[var_y]] < in_ol_supply_w_id[var_seq[var_x]] THEN
101 | var_tmp = var_seq[var_x];
102 | var_seq[var_x] = var_seq[var_y];
103 | var_seq[var_y] = var_tmp;
104 | ELSE
105 | IF in_ol_supply_w_id[var_seq[var_y]] = in_ol_supply_w_id[var_seq[var_x]]
106 | AND in_ol_i_id[var_seq[var_y]] < in_ol_i_id[var_seq[var_x]] THEN
107 | var_tmp = var_seq[var_x];
108 | var_seq[var_x] = var_seq[var_y];
109 | var_seq[var_y] = var_tmp;
110 | END IF;
111 | END IF;
112 | END LOOP;
113 | END LOOP;
114 |
115 | -- Retrieve the required data from DISTRICT
116 | SELECT INTO out_d_tax, out_o_id
117 | d_tax, d_next_o_id
118 | FROM bmsql_district
119 | WHERE d_w_id = in_w_id AND d_id = in_d_id
120 | FOR UPDATE;
121 |
122 | -- Retrieve the required data from CUSTOMER and WAREHOUSE
123 | SELECT INTO out_w_tax, out_c_last, out_c_credit, out_c_discount
124 | w_tax, c_last, c_credit, c_discount
125 | FROM bmsql_customer
126 | JOIN bmsql_warehouse ON (w_id = c_w_id)
127 | WHERE c_w_id = in_w_id AND c_d_id = in_d_id AND c_id = in_c_id;
128 |
129 | -- Update the DISTRICT bumping the D_NEXT_O_ID
130 | UPDATE bmsql_district
131 | SET d_next_o_id = d_next_o_id + 1
132 | WHERE d_w_id = in_w_id AND d_id = in_d_id;
133 |
134 | -- Insert the ORDER row
135 | INSERT INTO bmsql_oorder (
136 | o_id, o_d_id, o_w_id, o_c_id, o_entry_d,
137 | o_ol_cnt, o_all_local)
138 | VALUES (
139 | out_o_id, in_d_id, in_w_id, in_c_id, out_o_entry_d,
140 | out_ol_cnt, var_all_local);
141 |
142 | -- Insert the NEW_ORDER row
143 | INSERT INTO bmsql_new_order (
144 | no_o_id, no_d_id, no_w_id)
145 | VALUES (
146 | out_o_id, in_d_id, in_w_id);
147 |
148 | -- Per ORDER_LINE
149 | FOR var_x IN 1 .. out_ol_cnt LOOP
150 | -- We process the lines in the sequence orderd by warehouse, item.
151 | var_y = var_seq[var_x];
152 | SELECT INTO var_item_row
153 | i_name, i_price, i_data
154 | FROM bmsql_item
155 | WHERE i_id = in_ol_i_id[var_y];
156 | IF NOT FOUND THEN
157 | RAISE EXCEPTION 'Item number is not valid';
158 | END IF;
159 | -- Found ITEM
160 | out_i_name[var_y] = var_item_row.i_name;
161 | out_i_price[var_y] = var_item_row.i_price;
162 |
163 | SELECT INTO var_stock_row
164 | s_quantity, s_data,
165 | s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05,
166 | s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10
167 | FROM bmsql_stock
168 | WHERE s_w_id = in_ol_supply_w_id[var_y]
169 | AND s_i_id = in_ol_i_id[var_y]
170 | FOR UPDATE;
171 | IF NOT FOUND THEN
172 | RAISE EXCEPTION 'STOCK not found: %,%', in_ol_supply_w_id[var_y],
173 | in_ol_i_id[var_y];
174 | END IF;
175 |
176 | out_s_quantity[var_y] = var_stock_row.s_quantity;
177 | out_ol_amount[var_y] = out_i_price[var_y] * in_ol_quantity[var_y];
178 | IF var_item_row.i_data LIKE '%ORIGINAL%'
179 | AND var_stock_row.s_data LIKE '%ORIGINAL%' THEN
180 | out_brand_generic[var_y] := 'B';
181 | ELSE
182 | out_brand_generic[var_y] := 'G';
183 | END IF;
184 | out_total_amount = out_total_amount +
185 | out_ol_amount[var_y] * (1.0 - out_c_discount)
186 | * (1.0 + out_w_tax + out_d_tax);
187 |
188 | -- Update the STOCK row.
189 | UPDATE bmsql_stock SET
190 | s_quantity = CASE
191 | WHEN var_stock_row.s_quantity >= in_ol_quantity[var_y] + 10 THEN
192 | var_stock_row.s_quantity - in_ol_quantity[var_y]
193 | ELSE
194 | var_stock_row.s_quantity + 91
195 | END,
196 | s_ytd = s_ytd + in_ol_quantity[var_y],
197 | s_order_cnt = s_order_cnt + 1,
198 | s_remote_cnt = s_remote_cnt + CASE
199 | WHEN in_w_id <> in_ol_supply_w_id[var_y] THEN
200 | 1
201 | ELSE
202 | 0
203 | END
204 | WHERE s_w_id = in_ol_supply_w_id[var_y]
205 | AND s_i_id = in_ol_i_id[var_y];
206 |
207 | -- Insert the ORDER_LINE row.
208 | INSERT INTO bmsql_order_line (
209 | ol_o_id, ol_d_id, ol_w_id, ol_number,
210 | ol_i_id, ol_supply_w_id, ol_quantity,
211 | ol_amount, ol_dist_info)
212 | VALUES (
213 | out_o_id, in_d_id, in_w_id, var_y,
214 | in_ol_i_id[var_y], in_ol_supply_w_id[var_y], in_ol_quantity[var_y],
215 | out_ol_amount[var_y],
216 | CASE
217 | WHEN in_d_id = 1 THEN var_stock_row.s_dist_01
218 | WHEN in_d_id = 2 THEN var_stock_row.s_dist_02
219 | WHEN in_d_id = 3 THEN var_stock_row.s_dist_03
220 | WHEN in_d_id = 4 THEN var_stock_row.s_dist_04
221 | WHEN in_d_id = 5 THEN var_stock_row.s_dist_05
222 | WHEN in_d_id = 6 THEN var_stock_row.s_dist_06
223 | WHEN in_d_id = 7 THEN var_stock_row.s_dist_07
224 | WHEN in_d_id = 8 THEN var_stock_row.s_dist_08
225 | WHEN in_d_id = 9 THEN var_stock_row.s_dist_09
226 | WHEN in_d_id = 10 THEN var_stock_row.s_dist_10
227 | END);
228 |
229 | END LOOP;
230 |
231 | RETURN;
232 | END;
233 | $$
234 | LANGUAGE plpgsql;
235 |
236 |
237 | CREATE OR REPLACE FUNCTION bmsql_proc_payment(
238 | IN in_w_id integer,
239 | IN in_d_id integer,
240 | INOUT in_c_id integer,
241 | IN in_c_d_id integer,
242 | IN in_c_w_id integer,
243 | IN in_c_last varchar(16),
244 | IN in_h_amount decimal(6,2),
245 | OUT out_w_name varchar(10),
246 | OUT out_w_street_1 varchar(20),
247 | OUT out_w_street_2 varchar(20),
248 | OUT out_w_city varchar(20),
249 | OUT out_w_state char(2),
250 | OUT out_w_zip char(9),
251 | OUT out_d_name varchar(10),
252 | OUT out_d_street_1 varchar(20),
253 | OUT out_d_street_2 varchar(20),
254 | OUT out_d_city varchar(20),
255 | OUT out_d_state char(2),
256 | OUT out_d_zip char(9),
257 | OUT out_c_first varchar(16),
258 | OUT out_c_middle char(2),
259 | OUT out_c_street_1 varchar(20),
260 | OUT out_c_street_2 varchar(20),
261 | OUT out_c_city varchar(20),
262 | OUT out_c_state char(2),
263 | OUT out_c_zip char(9),
264 | OUT out_c_phone char(16),
265 | OUT out_c_since timestamp,
266 | OUT out_c_credit char(2),
267 | OUT out_c_credit_lim decimal(12,2),
268 | OUT out_c_discount decimal(4,4),
269 | OUT out_c_balance decimal(12,2),
270 | OUT out_c_data varchar(500),
271 | OUT out_h_date timestamp
272 | ) AS
273 | $$
274 | BEGIN
275 | out_h_date := CURRENT_TIMESTAMP;
276 |
277 | --Update the DISTRICT
278 | UPDATE bmsql_district
279 | SET d_ytd = d_ytd + in_h_amount
280 | WHERE d_w_id = in_w_id AND d_id = in_d_id;
281 |
282 | --Select the DISTRICT
283 | SELECT INTO out_d_name, out_d_street_1, out_d_street_2,
284 | out_d_city, out_d_state, out_d_zip
285 | d_name, d_street_1, d_street_2, d_city, d_state, d_zip
286 | FROM bmsql_district
287 | WHERE d_w_id = in_w_id AND d_id = in_d_id
288 | FOR UPDATE;
289 |
290 | --Update the WAREHOUSE
291 | UPDATE bmsql_warehouse
292 | SET w_ytd = w_ytd + in_h_amount
293 | WHERE w_id = in_w_id;
294 |
295 | --Select the WAREHOUSE
296 | SELECT INTO out_w_name, out_w_street_1, out_w_street_2,
297 | out_w_city, out_w_state, out_w_zip
298 | w_name, w_street_1, w_street_2, w_city, w_state, w_zip
299 | FROM bmsql_warehouse
300 | WHERE w_id = in_w_id
301 | FOR UPDATE;
302 |
303 | --If C_Last is given instead of C_ID (60%), determine the C_ID.
304 | IF in_c_last IS NOT NULL THEN
305 | in_c_id = bmsql_cid_from_clast(in_c_w_id, in_c_d_id, in_c_last);
306 | END IF;
307 |
308 | --Select the CUSTOMER
309 | SELECT INTO out_c_first, out_c_middle, in_c_last, out_c_street_1,
310 | out_c_street_2, out_c_city, out_c_state, out_c_zip,
311 | out_c_phone, out_c_since, out_c_credit, out_c_credit_lim,
312 | out_c_discount, out_c_balance
313 | c_first, c_middle, c_last, c_street_1,
314 | c_street_2, c_city, c_state, c_zip,
315 | c_phone, c_since, c_credit, c_credit_lim,
316 | c_discount, c_balance
317 | FROM bmsql_customer
318 | WHERE c_w_id = in_c_w_id AND c_d_id = in_c_d_id AND c_id = in_c_id
319 | FOR UPDATE;
320 |
321 | --Update the CUSTOMER
322 | out_c_balance = out_c_balance-in_h_amount;
323 | IF out_c_credit = 'GC' THEN
324 | --Customer with good credit, don't update C_DATA
325 | UPDATE bmsql_customer
326 | SET c_balance = c_balance - in_h_amount,
327 | c_ytd_payment = c_ytd_payment + in_h_amount,
328 | c_payment_cnt = c_payment_cnt + 1
329 | WHERE c_w_id = in_c_w_id AND c_d_id=in_c_d_id AND c_id=in_c_id;
330 | out_c_data := '';
331 | ELSE
332 | --Customer with bad credit, need to do the C_DATA work.
333 | SELECT INTO out_c_data
334 | c_data
335 | FROM bmsql_customer
336 | WHERE c_w_id = in_c_w_id AND c_d_id = in_c_d_id
337 | AND c_id = in_c_id;
338 | out_c_data := substring('C_ID=' || in_c_id::text ||
339 | ' C_D_ID=' || in_c_d_id::text ||
340 | ' C_W_ID=' || in_c_w_id::text ||
341 | ' D_ID=' || in_d_id::text ||
342 | ' W_ID=' || in_w_id::text ||
343 | ' H_AMOUNT=' || round(in_h_amount,2)::text || ' ' ||
344 | out_c_data from 1 for 500);
345 |
346 | UPDATE bmsql_customer
347 | SET c_balance = c_balance - in_h_amount,
348 | c_ytd_payment = c_ytd_payment + in_h_amount,
349 | c_payment_cnt = c_payment_cnt + 1,
350 | c_data = out_c_data
351 | WHERE c_w_id = in_c_w_id AND c_d_id = in_c_d_id
352 | AND c_id = in_c_id;
353 | END IF;
354 |
355 | --Insert the HISTORY row
356 | INSERT INTO bmsql_history (
357 | h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id,
358 | h_date, h_amount, h_data)
359 | VALUES (
360 | in_c_id, in_c_d_id, in_c_w_id, in_d_id, in_w_id,
361 | out_h_date, in_h_amount, out_w_name||' '|| out_d_name
362 | );
363 | END;
364 | $$
365 | LANGUAGE plpgsql;
366 |
367 |
368 | CREATE OR REPLACE FUNCTION bmsql_proc_order_status(
369 | IN in_w_id integer,
370 | IN in_d_id integer,
371 | INOUT in_c_id integer,
372 | IN in_c_last varchar(16),
373 | OUT out_c_first varchar(16),
374 | OUT out_c_middle char(2),
375 | OUT out_c_balance decimal(12,2),
376 | OUT out_o_id integer,
377 | OUT out_o_entry_d varchar(24),
378 | OUT out_o_carrier_id integer,
379 | OUT out_ol_supply_w_id integer[],
380 | OUT out_ol_i_id integer[],
381 | OUT out_ol_quantity integer[],
382 | OUT out_ol_amount decimal(12,2)[],
383 | OUT out_ol_delivery_d timestamp[]
384 | ) AS
385 | $$
386 | DECLARE
387 | v_order_line record;
388 | v_ol_idx integer := 1;
389 | BEGIN
390 | --If C_LAST is given instead of C_ID (60%), determine the C_ID.
391 | IF in_c_last IS NOT NULL THEN
392 | in_c_id = bmsql_cid_from_clast(in_w_id, in_d_id, in_c_last);
393 | END IF;
394 |
395 | --Select the CUSTOMER
396 | SELECT INTO out_c_first, out_c_middle, in_c_last, out_c_balance
397 | c_first, c_middle, c_last, c_balance
398 | FROM bmsql_customer
399 | WHERE c_w_id=in_w_id AND c_d_id=in_d_id AND c_id = in_c_id;
400 |
401 | --Select the last ORDER for this customer.
402 | SELECT INTO out_o_id, out_o_entry_d, out_o_carrier_id
403 | o_id, o_entry_d, coalesce(o_carrier_id, -1)
404 | FROM bmsql_oorder
405 | WHERE o_w_id = in_w_id AND o_d_id = in_d_id AND o_c_id = in_c_id
406 | AND o_id = (
407 | SELECT max(o_id)
408 | FROM bmsql_oorder
409 | WHERE o_w_id = in_w_id AND o_d_id = in_d_id AND o_c_id = in_c_id
410 | );
411 |
412 | FOR v_order_line IN SELECT ol_i_id, ol_supply_w_id, ol_quantity,
413 | ol_amount, ol_delivery_d
414 | FROM bmsql_order_line
415 | WHERE ol_w_id = in_w_id AND ol_d_id = in_d_id AND ol_o_id = out_o_id
416 | ORDER BY ol_w_id, ol_d_id, ol_o_id, ol_number
417 | LOOP
418 | out_ol_i_id[v_ol_idx] = v_order_line.ol_i_id;
419 | out_ol_supply_w_id[v_ol_idx] = v_order_line.ol_supply_w_id;
420 | out_ol_quantity[v_ol_idx] = v_order_line.ol_quantity;
421 | out_ol_amount[v_ol_idx] = v_order_line.ol_amount;
422 | out_ol_delivery_d[v_ol_idx] = v_order_line.ol_delivery_d;
423 | v_ol_idx = v_ol_idx + 1;
424 | END LOOP;
425 |
426 | WHILE v_ol_idx < 16 LOOP
427 | out_ol_i_id[v_ol_idx] = 0;
428 | out_ol_supply_w_id[v_ol_idx] = 0;
429 | out_ol_quantity[v_ol_idx] = 0;
430 | out_ol_amount[v_ol_idx] = 0.0;
431 | out_ol_delivery_d[v_ol_idx] = NULL;
432 | v_ol_idx = v_ol_idx +1;
433 | END LOOP;
434 | END;
435 | $$
436 | Language plpgsql;
437 |
438 |
439 | CREATE OR REPLACE FUNCTION bmsql_proc_stock_level(
440 | IN in_w_id integer,
441 | IN in_d_id integer,
442 | IN in_threshold integer,
443 | OUT out_low_stock integer
444 | ) AS
445 | $$
446 | BEGIN
447 | SELECT INTO out_low_stock
448 | count(*) AS low_stock
449 | FROM (
450 | SELECT s_w_id, s_i_id, s_quantity
451 | FROM bmsql_stock
452 | WHERE s_w_id = in_w_id AND s_quantity < in_threshold
453 | AND s_i_id IN (
454 | SELECT ol_i_id
455 | FROM bmsql_district
456 | JOIN bmsql_order_line ON ol_w_id = d_w_id
457 | AND ol_d_id = d_id
458 | AND ol_o_id >= d_next_o_id - 20
459 | AND ol_o_id < d_next_o_id
460 | WHERE d_w_id = in_w_id AND d_id = in_d_id
461 | )
462 | ) AS L;
463 | END;
464 | $$
465 | LANGUAGE plpgsql;
466 |
467 |
468 | CREATE OR REPLACE FUNCTION bmsql_proc_delivery_bg(
469 | IN in_w_id integer,
470 | IN in_o_carrier_id integer,
471 | IN in_ol_delivery_d timestamp,
472 | OUT out_delivered_o_id integer[]
473 | ) AS
474 | $$
475 | DECLARE
476 | var_d_id integer;
477 | var_o_id integer;
478 | var_c_id integer;
479 | var_sum_ol_amount decimal(12, 2);
480 | BEGIN
481 | FOR var_d_id IN 1..10 LOOP
482 | var_o_id = -1;
483 | /*
484 | * Try to find the oldest undelivered order for this
485 | * DISTRICT. There may not be one, which is a case
486 | * that needs to be reported.
487 | */
488 | WHILE var_o_id < 0 LOOP
489 | SELECT INTO var_o_id
490 | no_o_id
491 | FROM bmsql_new_order
492 | WHERE no_w_id = in_w_id AND no_d_id = var_d_id
493 | ORDER BY no_o_id ASC;
494 | IF NOT FOUND THEN
495 | var_o_id = -1;
496 | EXIT;
497 | END IF;
498 |
499 | DELETE FROM bmsql_new_order
500 | WHERE no_w_id = in_w_id AND no_d_id = var_d_id
501 | AND no_o_id = var_o_id;
502 | IF NOT FOUND THEN
503 | var_o_id = -1;
504 | END IF;
505 | END LOOP;
506 |
507 | IF var_o_id < 0 THEN
508 | -- No undelivered NEW_ORDER found for this District.
509 | var_d_id = var_d_id + 1;
510 | CONTINUE;
511 | END IF;
512 |
513 | /*
514 | * We found out oldert undelivered order for this DISTRICT
515 | * and the NEW_ORDER line has been deleted. Process the
516 | * rest of the DELIVERY_BG.
517 | */
518 |
519 | -- Update the ORDER setting the o_carrier_id.
520 | UPDATE bmsql_oorder
521 | SET o_carrier_id = in_o_carrier_id
522 | WHERE o_w_id = in_w_id AND o_d_id = var_d_id AND o_id = var_o_id;
523 |
524 | -- Get the o_c_id from the ORDER.
525 | SELECT INTO var_c_id
526 | o_c_id
527 | FROM bmsql_oorder
528 | WHERE o_w_id = in_w_id AND o_d_id = var_d_id AND o_id = var_o_id;
529 |
530 | -- Update ORDER_LINE setting the ol_delivery_d.
531 | UPDATE bmsql_order_line
532 | SET ol_delivery_d = in_ol_delivery_d
533 | WHERE ol_w_id = in_w_id AND ol_d_id = var_d_id
534 | AND ol_o_id = var_o_id;
535 |
536 | -- SELECT the sum(ol_amount) from ORDER_LINE.
537 | SELECT INTO var_sum_ol_amount
538 | sum(ol_amount) AS sum_ol_amount
539 | FROM bmsql_order_line
540 | WHERE ol_w_id = in_w_id AND ol_d_id = var_d_id
541 | AND ol_o_id = var_o_id;
542 |
543 | -- Update the CUSTOMER.
544 | UPDATE bmsql_customer
545 | SET c_balance = c_balance + var_sum_ol_amount,
546 | c_delivery_cnt = c_delivery_cnt + 1
547 | WHERE c_w_id = in_w_id AND c_d_id = var_d_id and c_id = var_c_id;
548 |
549 | out_delivered_o_id[var_d_id] = var_o_id;
550 |
551 | var_d_id = var_d_id +1 ;
552 | END LOOP;
553 | END;
554 | $$
555 | LANGUAGE plpgsql;
556 |
--------------------------------------------------------------------------------
/run/sql.postgres/storedProcedureDrops.sql:
--------------------------------------------------------------------------------
1 | drop function if exists bmsql_proc_new_order (integer, integer, integer, integer[], integer[], integer[]);
2 | drop function if exists bmsql_proc_stock_level(integer, integer, integer);
3 | drop function if exists bmsql_proc_payment(integer, integer, integer, integer, integer, varchar(16), decimal(6,2));
4 | drop function if exists bmsql_proc_order_status (integer, integer, integer, var(16));
5 | drop function if exists bmsql_cid_from_clast(integer, integer, varchar(16));
6 | drop function if exists bmsql_proc_delivery_bg (integer, integer, integer);
7 |
--------------------------------------------------------------------------------
/run/sql.postgres/tableCopies.sql:
--------------------------------------------------------------------------------
1 |
2 | copy bmsql_config
3 | (cfg_name, cfg_value)
4 | from '/tmp/csv/config.csv' WITH CSV;
5 |
6 | copy bmsql_warehouse
7 | (w_id, w_ytd, w_tax, w_name, w_street_1, w_street_2, w_city, w_state, w_zip)
8 | from '/tmp/csv/warehouse.csv' WITH CSV;
9 |
10 | copy bmsql_item
11 | (i_id, i_name, i_price, i_data, i_im_id)
12 | from '/tmp/csv/item.csv' WITH CSV;
13 |
14 | copy bmsql_stock
15 | (s_i_id, s_w_id, s_quantity, s_ytd, s_order_cnt, s_remote_cnt, s_data,
16 | s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05,
17 | s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10)
18 | from '/tmp/csv/stock.csv' WITH CSV;
19 |
20 | copy bmsql_district
21 | (d_id, d_w_id, d_ytd, d_tax, d_next_o_id, d_name, d_street_1,
22 | d_street_2, d_city, d_state, d_zip)
23 | from '/tmp/csv/district.csv' WITH CSV;
24 |
25 | copy bmsql_customer
26 | (c_id, c_d_id, c_w_id, c_discount, c_credit, c_last, c_first, c_credit_lim,
27 | c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_street_1,
28 | c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_middle, c_data)
29 | from '/tmp/csv/customer.csv' WITH CSV;
30 |
31 | copy bmsql_history
32 | (hist_id, h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, h_date, h_amount, h_data)
33 | from '/tmp/csv/cust-hist.csv' WITH CSV;
34 |
35 | copy bmsql_oorder
36 | (o_id, o_w_id, o_d_id, o_c_id, o_carrier_id, o_ol_cnt, o_all_local, o_entry_d)
37 | from '/tmp/csv/order.csv' WITH CSV NULL AS 'NULL';
38 |
39 | copy bmsql_order_line
40 | (ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_delivery_d,
41 | ol_amount, ol_supply_w_id, ol_quantity, ol_dist_info)
42 | from '/tmp/csv/order-line.csv' WITH CSV NULL AS 'NULL';
43 |
44 | copy bmsql_new_order
45 | (no_w_id, no_d_id, no_o_id)
46 | from '/tmp/csv/new-order.csv' WITH CSV;
47 |
--------------------------------------------------------------------------------
/src/LoadData/LoadData.java:
--------------------------------------------------------------------------------
1 | /*
2 | * LoadData - Load Sample Data directly into database tables or into
3 | * CSV files using multiple parallel workers.
4 | *
5 | * Copyright (C) 2016, Denis Lussier
6 | * Copyright (C) 2016, Jan Wieck
7 | *
8 | */
9 |
10 | import java.sql.*;
11 | import java.util.*;
12 | import java.io.*;
13 | import java.lang.Integer;
14 |
15 | public class LoadData
16 | {
17 | private static Properties ini = new Properties();
18 | private static String db;
19 | private static Properties dbProps;
20 | private static jTPCCRandom rnd;
21 | private static String fileLocation = null;
22 | private static String csvNullValue = null;
23 |
24 | private static int numWarehouses;
25 | private static int numWorkers;
26 | private static int nextJob = 0;
27 | private static Object nextJobLock = new Object();
28 |
29 | private static LoadDataWorker[] workers;
30 | private static Thread[] workerThreads;
31 |
32 | private static String[] argv;
33 |
34 | private static boolean writeCSV = false;
35 | private static BufferedWriter configCSV = null;
36 | private static BufferedWriter itemCSV = null;
37 | private static BufferedWriter warehouseCSV = null;
38 | private static BufferedWriter districtCSV = null;
39 | private static BufferedWriter stockCSV = null;
40 | private static BufferedWriter customerCSV = null;
41 | private static BufferedWriter historyCSV = null;
42 | private static BufferedWriter orderCSV = null;
43 | private static BufferedWriter orderLineCSV = null;
44 | private static BufferedWriter newOrderCSV = null;
45 |
46 | public static void main(String[] args) {
47 | int i;
48 |
49 | System.out.println("Starting BenchmarkSQL LoadData");
50 | System.out.println("");
51 |
52 | /*
53 | * Load the Benchmark properties file.
54 | */
55 | try
56 | {
57 | ini.load(new FileInputStream(System.getProperty("prop")));
58 | }
59 | catch (IOException e)
60 | {
61 | System.err.println("ERROR: " + e.getMessage());
62 | System.exit(1);
63 | }
64 | argv = args;
65 |
66 | /*
67 | * Initialize the global Random generator that picks the
68 | * C values for the load.
69 | */
70 | rnd = new jTPCCRandom();
71 |
72 | /*
73 | * Load the JDBC driver and prepare the db and dbProps.
74 | */
75 | try {
76 | Class.forName(iniGetString("driver"));
77 | }
78 | catch (Exception e)
79 | {
80 | System.err.println("ERROR: cannot load JDBC driver - " +
81 | e.getMessage());
82 | System.exit(1);
83 | }
84 | db = iniGetString("conn");
85 | dbProps = new Properties();
86 | dbProps.setProperty("user", iniGetString("user"));
87 | dbProps.setProperty("password", iniGetString("password"));
88 |
89 | /*
90 | * Parse other vital information from the props file.
91 | */
92 | numWarehouses = iniGetInt("warehouses");
93 | numWorkers = iniGetInt("loadWorkers", 4);
94 | fileLocation = iniGetString("fileLocation");
95 | csvNullValue = iniGetString("csvNullValue", "NULL");
96 |
97 | /*
98 | * If CSV files are requested, open them all.
99 | */
100 | if (fileLocation != null)
101 | {
102 | writeCSV = true;
103 |
104 | try
105 | {
106 | configCSV = new BufferedWriter(new FileWriter(fileLocation +
107 | "config.csv"));
108 | itemCSV = new BufferedWriter(new FileWriter(fileLocation +
109 | "item.csv"));
110 | warehouseCSV = new BufferedWriter(new FileWriter(fileLocation +
111 | "warehouse.csv"));
112 | districtCSV = new BufferedWriter(new FileWriter(fileLocation +
113 | "district.csv"));
114 | stockCSV = new BufferedWriter(new FileWriter(fileLocation +
115 | "stock.csv"));
116 | customerCSV = new BufferedWriter(new FileWriter(fileLocation +
117 | "customer.csv"));
118 | historyCSV = new BufferedWriter(new FileWriter(fileLocation +
119 | "cust-hist.csv"));
120 | orderCSV = new BufferedWriter(new FileWriter(fileLocation +
121 | "order.csv"));
122 | orderLineCSV = new BufferedWriter(new FileWriter(fileLocation +
123 | "order-line.csv"));
124 | newOrderCSV = new BufferedWriter(new FileWriter(fileLocation +
125 | "new-order.csv"));
126 | }
127 | catch (IOException ie)
128 | {
129 | System.err.println(ie.getMessage());
130 | System.exit(3);
131 | }
132 | }
133 |
134 | System.out.println("");
135 |
136 | /*
137 | * Create the number of requested workers and start them.
138 | */
139 | workers = new LoadDataWorker[numWorkers];
140 | workerThreads = new Thread[numWorkers];
141 | for (i = 0; i < numWorkers; i++)
142 | {
143 | Connection dbConn;
144 |
145 | try
146 | {
147 | dbConn = DriverManager.getConnection(db, dbProps);
148 | dbConn.setAutoCommit(false);
149 | if (writeCSV)
150 | workers[i] = new LoadDataWorker(i, csvNullValue,
151 | rnd.newRandom());
152 | else
153 | workers[i] = new LoadDataWorker(i, dbConn,
154 | rnd.newRandom());
155 | workerThreads[i] = new Thread(workers[i]);
156 | workerThreads[i].start();
157 | }
158 | catch (SQLException se)
159 | {
160 | System.err.println("ERROR: " + se.getMessage());
161 | System.exit(3);
162 | return;
163 | }
164 |
165 | }
166 |
167 | for (i = 0; i < numWorkers; i++)
168 | {
169 | try {
170 | workerThreads[i].join();
171 | }
172 | catch (InterruptedException ie)
173 | {
174 | System.err.println("ERROR: worker " + i + " - " +
175 | ie.getMessage());
176 | System.exit(4);
177 | }
178 | }
179 |
180 | /*
181 | * Close the CSV files if we are writing them.
182 | */
183 | if (writeCSV)
184 | {
185 | try
186 | {
187 | configCSV.close();
188 | itemCSV.close();
189 | warehouseCSV.close();
190 | districtCSV.close();
191 | stockCSV.close();
192 | customerCSV.close();
193 | historyCSV.close();
194 | orderCSV.close();
195 | orderLineCSV.close();
196 | newOrderCSV.close();
197 | }
198 | catch (IOException ie)
199 | {
200 | System.err.println(ie.getMessage());
201 | System.exit(3);
202 | }
203 | }
204 | } // End of main()
205 |
206 | public static void configAppend(StringBuffer buf)
207 | throws IOException
208 | {
209 | synchronized(configCSV)
210 | {
211 | configCSV.write(buf.toString());
212 | }
213 | buf.setLength(0);
214 | }
215 |
216 | public static void itemAppend(StringBuffer buf)
217 | throws IOException
218 | {
219 | synchronized(itemCSV)
220 | {
221 | itemCSV.write(buf.toString());
222 | }
223 | buf.setLength(0);
224 | }
225 |
226 | public static void warehouseAppend(StringBuffer buf)
227 | throws IOException
228 | {
229 | synchronized(warehouseCSV)
230 | {
231 | warehouseCSV.write(buf.toString());
232 | }
233 | buf.setLength(0);
234 | }
235 |
236 | public static void districtAppend(StringBuffer buf)
237 | throws IOException
238 | {
239 | synchronized(districtCSV)
240 | {
241 | districtCSV.write(buf.toString());
242 | }
243 | buf.setLength(0);
244 | }
245 |
246 | public static void stockAppend(StringBuffer buf)
247 | throws IOException
248 | {
249 | synchronized(stockCSV)
250 | {
251 | stockCSV.write(buf.toString());
252 | }
253 | buf.setLength(0);
254 | }
255 |
256 | public static void customerAppend(StringBuffer buf)
257 | throws IOException
258 | {
259 | synchronized(customerCSV)
260 | {
261 | customerCSV.write(buf.toString());
262 | }
263 | buf.setLength(0);
264 | }
265 |
266 | public static void historyAppend(StringBuffer buf)
267 | throws IOException
268 | {
269 | synchronized(historyCSV)
270 | {
271 | historyCSV.write(buf.toString());
272 | }
273 | buf.setLength(0);
274 | }
275 |
276 | public static void orderAppend(StringBuffer buf)
277 | throws IOException
278 | {
279 | synchronized(orderCSV)
280 | {
281 | orderCSV.write(buf.toString());
282 | }
283 | buf.setLength(0);
284 | }
285 |
286 | public static void orderLineAppend(StringBuffer buf)
287 | throws IOException
288 | {
289 | synchronized(orderLineCSV)
290 | {
291 | orderLineCSV.write(buf.toString());
292 | }
293 | buf.setLength(0);
294 | }
295 |
296 | public static void newOrderAppend(StringBuffer buf)
297 | throws IOException
298 | {
299 | synchronized(newOrderCSV)
300 | {
301 | newOrderCSV.write(buf.toString());
302 | }
303 | buf.setLength(0);
304 | }
305 |
306 | public static int getNextJob()
307 | {
308 | int job;
309 |
310 | synchronized(nextJobLock)
311 | {
312 | if (nextJob > numWarehouses)
313 | job = -1;
314 | else
315 | job = nextJob++;
316 | }
317 |
318 | return job;
319 | }
320 |
321 | public static int getNumWarehouses()
322 | {
323 | return numWarehouses;
324 | }
325 |
326 | private static String iniGetString(String name)
327 | {
328 | String strVal = null;
329 |
330 | for (int i = 0; i < argv.length - 1; i += 2)
331 | {
332 | if (name.toLowerCase().equals(argv[i].toLowerCase()))
333 | {
334 | strVal = argv[i + 1];
335 | break;
336 | }
337 | }
338 |
339 | if (strVal == null)
340 | strVal = ini.getProperty(name);
341 |
342 | if (strVal == null)
343 | System.out.println(name + " (not defined)");
344 | else
345 | if (name.equals("password"))
346 | System.out.println(name + "=***********");
347 | else
348 | System.out.println(name + "=" + strVal);
349 | return strVal;
350 | }
351 |
352 | private static String iniGetString(String name, String defVal)
353 | {
354 | String strVal = null;
355 |
356 | for (int i = 0; i < argv.length - 1; i += 2)
357 | {
358 | if (name.toLowerCase().equals(argv[i].toLowerCase()))
359 | {
360 | strVal = argv[i + 1];
361 | break;
362 | }
363 | }
364 |
365 | if (strVal == null)
366 | strVal = ini.getProperty(name);
367 |
368 | if (strVal == null)
369 | {
370 | System.out.println(name + " (not defined - using default '" +
371 | defVal + "')");
372 | return defVal;
373 | }
374 | else
375 | if (name.equals("password"))
376 | System.out.println(name + "=***********");
377 | else
378 | System.out.println(name + "=" + strVal);
379 | return strVal;
380 | }
381 |
382 | private static int iniGetInt(String name)
383 | {
384 | String strVal = iniGetString(name);
385 |
386 | if (strVal == null)
387 | return 0;
388 | return Integer.parseInt(strVal);
389 | }
390 |
391 | private static int iniGetInt(String name, int defVal)
392 | {
393 | String strVal = iniGetString(name);
394 |
395 | if (strVal == null)
396 | return defVal;
397 | return Integer.parseInt(strVal);
398 | }
399 | }
400 |
--------------------------------------------------------------------------------
/src/OSCollector/OSCollector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * OSCollector.java
3 | *
4 | * Copyright (C) 2016, Denis Lussier
5 | * Copyright (C) 2016, Jan Wieck
6 | *
7 | */
8 |
9 | import org.apache.log4j.*;
10 |
11 | import java.lang.*;
12 | import java.io.*;
13 | import java.util.*;
14 |
15 | public class OSCollector
16 | {
17 | private String script;
18 | private int interval;
19 | private String sshAddress;
20 | private String devices;
21 | private File outputDir;
22 | private Logger log;
23 |
24 | private CollectData collector = null;
25 | private Thread collectorThread = null;
26 | private boolean endCollection = false;
27 | private Process collProc;
28 |
29 | private BufferedWriter resultCSVs[];
30 |
31 | public OSCollector(String script, int runID, int interval,
32 | String sshAddress, String devices, File outputDir,
33 | Logger log)
34 | {
35 | List cmdLine = new ArrayList();
36 | String deviceNames[];
37 |
38 | this.script = script;
39 | this.interval = interval;
40 | this.sshAddress = sshAddress;
41 | this.devices = devices;
42 | this.outputDir = outputDir;
43 | this.log = log;
44 |
45 | if (sshAddress != null)
46 | {
47 | cmdLine.add("ssh");
48 | // cmdLine.add("-t");
49 | cmdLine.add(sshAddress);
50 | }
51 | cmdLine.add("python2");
52 | cmdLine.add("-");
53 | cmdLine.add(Integer.toString(runID));
54 | cmdLine.add(Integer.toString(interval));
55 | if (devices != null)
56 | deviceNames = devices.split("[ \t]+");
57 | else
58 | deviceNames = new String[0];
59 |
60 | try
61 | {
62 | resultCSVs = new BufferedWriter[deviceNames.length + 1];
63 | resultCSVs[0] = new BufferedWriter(new FileWriter(
64 | new File(outputDir, "sys_info.csv")));
65 | for (int i = 0; i < deviceNames.length; i++)
66 | {
67 | cmdLine.add(deviceNames[i]);
68 | resultCSVs[i + 1] = new BufferedWriter(new FileWriter(
69 | new File(outputDir, deviceNames[i] + ".csv")));
70 | }
71 | }
72 | catch (Exception e)
73 | {
74 | log.error("OSCollector, " + e.getMessage());
75 | System.exit(1);
76 | }
77 |
78 | try
79 | {
80 | ProcessBuilder pb = new ProcessBuilder(cmdLine);
81 | pb.redirectErrorStream(true);
82 |
83 | collProc = pb.start();
84 |
85 | BufferedReader scriptReader = new BufferedReader(new FileReader(script));
86 | BufferedWriter scriptWriter = new BufferedWriter(
87 | new OutputStreamWriter(collProc.getOutputStream()));
88 | String line;
89 | while ((line = scriptReader.readLine()) != null)
90 | {
91 | scriptWriter.write(line);
92 | scriptWriter.newLine();
93 | }
94 | scriptWriter.close();
95 | scriptReader.close();
96 | }
97 | catch (Exception e)
98 | {
99 | log.error("OSCollector " + e.getMessage());
100 | e.printStackTrace();
101 | System.exit(1);
102 | }
103 |
104 | collector = new CollectData(this);
105 | collectorThread = new Thread(this.collector);
106 | collectorThread.start();
107 | }
108 |
109 | public void stop()
110 | {
111 | endCollection = true;
112 | try
113 | {
114 | collectorThread.join();
115 | }
116 | catch (InterruptedException ie)
117 | {
118 | log.error("OSCollector, " + ie.getMessage());
119 | return;
120 | }
121 | }
122 |
123 | private class CollectData implements Runnable
124 | {
125 | private OSCollector parent;
126 |
127 | public CollectData(OSCollector parent)
128 | {
129 | this.parent = parent;
130 | }
131 |
132 | public void run()
133 | {
134 | BufferedReader osData;
135 | String line;
136 | int resultIdx = 0;
137 |
138 | osData = new BufferedReader(new InputStreamReader(
139 | parent.collProc.getInputStream()));
140 |
141 | while (!endCollection || resultIdx != 0)
142 | {
143 | try
144 | {
145 | line = osData.readLine();
146 | if (line == null)
147 | {
148 | log.error("OSCollector, unexpected EOF " +
149 | "while reading from external " +
150 | "helper process");
151 | break;
152 | }
153 | parent.resultCSVs[resultIdx].write(line);
154 | parent.resultCSVs[resultIdx].newLine();
155 | parent.resultCSVs[resultIdx].flush();
156 | if (++resultIdx >= parent.resultCSVs.length)
157 | resultIdx = 0;
158 | }
159 | catch (Exception e)
160 | {
161 | log.error("OSCollector, " + e.getMessage());
162 | break;
163 | }
164 | }
165 |
166 | try
167 | {
168 | osData.close();
169 | for (int i = 0; i < parent.resultCSVs.length; i++)
170 | parent.resultCSVs[i].close();
171 | }
172 | catch (Exception e)
173 | {
174 | log.error("OSCollector, " + e.getMessage());
175 | }
176 | }
177 | }
178 | }
179 |
180 |
181 |
--------------------------------------------------------------------------------
/src/client/jTPCCConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * jTPCCConfig - Basic configuration parameters for jTPCC
3 | *
4 | * Copyright (C) 2003, Raul Barbosa
5 | * Copyright (C) 2004-2016, Denis Lussier
6 | * Copyright (C) 2016, Jan Wieck
7 | *
8 | */
9 |
10 | import java.text.*;
11 |
12 | public interface jTPCCConfig
13 | {
14 | public final static String JTPCCVERSION = "5.1devel";
15 |
16 | public final static int DB_UNKNOWN = 0,
17 | DB_FIREBIRD = 1,
18 | DB_ORACLE = 2,
19 | DB_POSTGRES = 3;
20 |
21 | public final static int NEW_ORDER = 1,
22 | PAYMENT = 2,
23 | ORDER_STATUS = 3,
24 | DELIVERY = 4,
25 | STOCK_LEVEL = 5;
26 |
27 | public final static String[] nameTokens = {"BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING"};
28 |
29 | public final static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
30 |
31 | public final static int configCommitCount = 10000; // commit every n records in LoadData
32 |
33 | public final static int configWhseCount = 10;
34 | public final static int configItemCount = 100000; // tpc-c std = 100,000
35 | public final static int configDistPerWhse = 10; // tpc-c std = 10
36 | public final static int configCustPerDist = 3000; // tpc-c std = 3,000
37 | }
38 |
--------------------------------------------------------------------------------
/src/client/jTPCCConnection.java:
--------------------------------------------------------------------------------
1 | /*
2 | * jTPCCConnection
3 | *
4 | * One connection to the database. Used by either the old style
5 | * Terminal or the new TimedSUT.
6 | *
7 | * Copyright (C) 2004-2016, Denis Lussier
8 | * Copyright (C) 2016, Jan Wieck
9 | *
10 | */
11 |
12 | import java.util.*;
13 | import java.sql.*;
14 |
15 | public class jTPCCConnection
16 | {
17 | private Connection dbConn = null;
18 | private int dbType = 0;
19 |
20 | public PreparedStatement stmtNewOrderSelectWhseCust;
21 | public PreparedStatement stmtNewOrderSelectDist;
22 | public PreparedStatement stmtNewOrderUpdateDist;
23 | public PreparedStatement stmtNewOrderInsertOrder;
24 | public PreparedStatement stmtNewOrderInsertNewOrder;
25 | public PreparedStatement stmtNewOrderSelectStock;
26 | public PreparedStatement stmtNewOrderSelectItem;
27 | public PreparedStatement stmtNewOrderUpdateStock;
28 | public PreparedStatement stmtNewOrderInsertOrderLine;
29 | public PreparedStatement stmtNewOrderStoredProc;
30 | public String stmtNewOrderStoredProcOracle;
31 |
32 | public PreparedStatement stmtPaymentSelectWarehouse;
33 | public PreparedStatement stmtPaymentSelectDistrict;
34 | public PreparedStatement stmtPaymentSelectCustomerListByLast;
35 | public PreparedStatement stmtPaymentSelectCustomer;
36 | public PreparedStatement stmtPaymentSelectCustomerData;
37 | public PreparedStatement stmtPaymentUpdateWarehouse;
38 | public PreparedStatement stmtPaymentUpdateDistrict;
39 | public PreparedStatement stmtPaymentUpdateCustomer;
40 | public PreparedStatement stmtPaymentUpdateCustomerWithData;
41 | public PreparedStatement stmtPaymentInsertHistory;
42 | public PreparedStatement stmtPaymentStoredProc;
43 | public String stmtPaymentStoredProcOracle;
44 |
45 | public PreparedStatement stmtOrderStatusSelectCustomerListByLast;
46 | public PreparedStatement stmtOrderStatusSelectCustomer;
47 | public PreparedStatement stmtOrderStatusSelectLastOrder;
48 | public PreparedStatement stmtOrderStatusSelectOrderLine;
49 | public PreparedStatement stmtOrderStatusStoredProc;
50 | public String stmtOrderStatusStoredProcOracle;
51 |
52 | public PreparedStatement stmtStockLevelSelectLow;
53 | public PreparedStatement stmtStockLevelStoredProc;
54 | public String stmtStockLevelStoredProcOracle;
55 |
56 | public PreparedStatement stmtDeliveryBGSelectOldestNewOrder;
57 | public PreparedStatement stmtDeliveryBGDeleteOldestNewOrder;
58 | public PreparedStatement stmtDeliveryBGSelectOrder;
59 | public PreparedStatement stmtDeliveryBGUpdateOrder;
60 | public PreparedStatement stmtDeliveryBGSelectSumOLAmount;
61 | public PreparedStatement stmtDeliveryBGUpdateOrderLine;
62 | public PreparedStatement stmtDeliveryBGUpdateCustomer;
63 | public PreparedStatement stmtDeliveryBGStoredProc;
64 | public String stmtDeliveryBGStoredProcOracle;
65 |
66 | public jTPCCConnection(Connection dbConn, int dbType)
67 | throws SQLException
68 | {
69 | this.dbConn = dbConn;
70 | this.dbType = dbType;
71 |
72 | // PreparedStataments for NEW_ORDER
73 | stmtNewOrderSelectWhseCust = dbConn.prepareStatement(
74 | "SELECT c_discount, c_last, c_credit, w_tax " +
75 | " FROM bmsql_customer " +
76 | " JOIN bmsql_warehouse ON (w_id = c_w_id) " +
77 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
78 | stmtNewOrderSelectDist = dbConn.prepareStatement(
79 | "SELECT d_tax, d_next_o_id " +
80 | " FROM bmsql_district " +
81 | " WHERE d_w_id = ? AND d_id = ? " +
82 | " FOR UPDATE");
83 | stmtNewOrderUpdateDist = dbConn.prepareStatement(
84 | "UPDATE bmsql_district " +
85 | " SET d_next_o_id = d_next_o_id + 1 " +
86 | " WHERE d_w_id = ? AND d_id = ?");
87 | stmtNewOrderInsertOrder = dbConn.prepareStatement(
88 | "INSERT INTO bmsql_oorder (" +
89 | " o_id, o_d_id, o_w_id, o_c_id, o_entry_d, " +
90 | " o_ol_cnt, o_all_local) " +
91 | "VALUES (?, ?, ?, ?, ?, ?, ?)");
92 | stmtNewOrderInsertNewOrder = dbConn.prepareStatement(
93 | "INSERT INTO bmsql_new_order (" +
94 | " no_o_id, no_d_id, no_w_id) " +
95 | "VALUES (?, ?, ?)");
96 | stmtNewOrderSelectStock = dbConn.prepareStatement(
97 | "SELECT s_quantity, s_data, " +
98 | " s_dist_01, s_dist_02, s_dist_03, s_dist_04, " +
99 | " s_dist_05, s_dist_06, s_dist_07, s_dist_08, " +
100 | " s_dist_09, s_dist_10 " +
101 | " FROM bmsql_stock " +
102 | " WHERE s_w_id = ? AND s_i_id = ? " +
103 | " FOR UPDATE");
104 | stmtNewOrderSelectItem = dbConn.prepareStatement(
105 | "SELECT i_price, i_name, i_data " +
106 | " FROM bmsql_item " +
107 | " WHERE i_id = ?");
108 | stmtNewOrderUpdateStock = dbConn.prepareStatement(
109 | "UPDATE bmsql_stock " +
110 | " SET s_quantity = ?, s_ytd = s_ytd + ?, " +
111 | " s_order_cnt = s_order_cnt + 1, " +
112 | " s_remote_cnt = s_remote_cnt + ? " +
113 | " WHERE s_w_id = ? AND s_i_id = ?");
114 | stmtNewOrderInsertOrderLine = dbConn.prepareStatement(
115 | "INSERT INTO bmsql_order_line (" +
116 | " ol_o_id, ol_d_id, ol_w_id, ol_number, " +
117 | " ol_i_id, ol_supply_w_id, ol_quantity, " +
118 | " ol_amount, ol_dist_info) " +
119 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)");
120 |
121 | switch (dbType)
122 | {
123 | case jTPCCConfig.DB_POSTGRES:
124 | stmtNewOrderStoredProc = dbConn.prepareStatement(
125 | "SELECT * FROM bmsql_proc_new_order (?, ?, ?, ?, ?, ?)");
126 | break;
127 |
128 | case jTPCCConfig.DB_ORACLE:
129 | stmtNewOrderStoredProcOracle =
130 | "{call tpccc_oracle.oracle_proc_new_order(?, ?, ?, ?, ?, ?, ?, ?, ?, ?" +
131 | ",?, ?, ?, ?, ?, ?, ?, ?, ?, ?)}";
132 | break;
133 | }
134 |
135 | // PreparedStatements for PAYMENT
136 | stmtPaymentSelectWarehouse = dbConn.prepareStatement(
137 | "SELECT w_name, w_street_1, w_street_2, w_city, " +
138 | " w_state, w_zip " +
139 | " FROM bmsql_warehouse " +
140 | " WHERE w_id = ? ");
141 | stmtPaymentSelectDistrict = dbConn.prepareStatement(
142 | "SELECT d_name, d_street_1, d_street_2, d_city, " +
143 | " d_state, d_zip " +
144 | " FROM bmsql_district " +
145 | " WHERE d_w_id = ? AND d_id = ?");
146 | stmtPaymentSelectCustomerListByLast = dbConn.prepareStatement(
147 | "SELECT c_id " +
148 | " FROM bmsql_customer " +
149 | " WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? " +
150 | " ORDER BY c_first");
151 | stmtPaymentSelectCustomer = dbConn.prepareStatement(
152 | "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, " +
153 | " c_city, c_state, c_zip, c_phone, c_since, c_credit, " +
154 | " c_credit_lim, c_discount, c_balance " +
155 | " FROM bmsql_customer " +
156 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ? " +
157 | " FOR UPDATE");
158 | stmtPaymentSelectCustomerData = dbConn.prepareStatement(
159 | "SELECT c_data " +
160 | " FROM bmsql_customer " +
161 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
162 | stmtPaymentUpdateWarehouse = dbConn.prepareStatement(
163 | "UPDATE bmsql_warehouse " +
164 | " SET w_ytd = w_ytd + ? " +
165 | " WHERE w_id = ?");
166 | stmtPaymentUpdateDistrict = dbConn.prepareStatement(
167 | "UPDATE bmsql_district " +
168 | " SET d_ytd = d_ytd + ? " +
169 | " WHERE d_w_id = ? AND d_id = ?");
170 | stmtPaymentUpdateCustomer = dbConn.prepareStatement(
171 | "UPDATE bmsql_customer " +
172 | " SET c_balance = c_balance - ?, " +
173 | " c_ytd_payment = c_ytd_payment + ?, " +
174 | " c_payment_cnt = c_payment_cnt + 1 " +
175 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
176 | stmtPaymentUpdateCustomerWithData = dbConn.prepareStatement(
177 | "UPDATE bmsql_customer " +
178 | " SET c_balance = c_balance - ?, " +
179 | " c_ytd_payment = c_ytd_payment + ?, " +
180 | " c_payment_cnt = c_payment_cnt + 1, " +
181 | " c_data = ? " +
182 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
183 | stmtPaymentInsertHistory = dbConn.prepareStatement(
184 | "INSERT INTO bmsql_history (" +
185 | " h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, " +
186 | " h_date, h_amount, h_data) " +
187 | "VALUES (?, ?, ?, ?, ?, ?, ?, ?)");
188 |
189 | switch (dbType)
190 | {
191 | case jTPCCConfig.DB_POSTGRES:
192 | stmtPaymentStoredProc = dbConn.prepareStatement(
193 | "SELECT * FROM bmsql_proc_payment (?, ?, ?, ?, ?, ?, ?)");
194 | break;
195 |
196 | case jTPCCConfig.DB_ORACLE:
197 | stmtPaymentStoredProcOracle =
198 | "{call tpccc_oracle.oracle_proc_payment(?, ?, ?, ?, ?, ?, ?, ?, ?, ?," +
199 | " ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?," +
200 | " ?, ?, ?, ?, ?)}";
201 | break;
202 | }
203 |
204 | // PreparedStatements for ORDER_STATUS
205 | stmtOrderStatusSelectCustomerListByLast = dbConn.prepareStatement(
206 | "SELECT c_id " +
207 | " FROM bmsql_customer " +
208 | " WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? " +
209 | " ORDER BY c_first");
210 | stmtOrderStatusSelectCustomer = dbConn.prepareStatement(
211 | "SELECT c_first, c_middle, c_last, c_balance " +
212 | " FROM bmsql_customer " +
213 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
214 | stmtOrderStatusSelectLastOrder = dbConn.prepareStatement(
215 | "SELECT o_id, o_entry_d, o_carrier_id " +
216 | " FROM bmsql_oorder " +
217 | " WHERE o_w_id = ? AND o_d_id = ? AND o_c_id = ? " +
218 | " AND o_id = (" +
219 | " SELECT max(o_id) " +
220 | " FROM bmsql_oorder " +
221 | " WHERE o_w_id = ? AND o_d_id = ? AND o_c_id = ?" +
222 | " )");
223 | stmtOrderStatusSelectOrderLine = dbConn.prepareStatement(
224 | "SELECT ol_i_id, ol_supply_w_id, ol_quantity, " +
225 | " ol_amount, ol_delivery_d " +
226 | " FROM bmsql_order_line " +
227 | " WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ? " +
228 | " ORDER BY ol_w_id, ol_d_id, ol_o_id, ol_number");
229 |
230 | switch(dbType)
231 | {
232 | case jTPCCConfig.DB_POSTGRES:
233 | stmtOrderStatusStoredProc = dbConn.prepareStatement(
234 | "SELECT * FROM bmsql_proc_order_status (?, ?, ?, ?)");
235 | break;
236 |
237 | case jTPCCConfig.DB_ORACLE:
238 | stmtOrderStatusStoredProcOracle =
239 | "{call tpccc_oracle.oracle_proc_order_status(?, ?, ?, " +
240 | "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)}";
241 | break;
242 | }
243 |
244 | // PreparedStatements for STOCK_LEVEL
245 | switch (dbType)
246 | {
247 | case jTPCCConfig.DB_POSTGRES:
248 | stmtStockLevelSelectLow = dbConn.prepareStatement(
249 | "SELECT count(*) AS low_stock FROM (" +
250 | " SELECT s_w_id, s_i_id, s_quantity " +
251 | " FROM bmsql_stock " +
252 | " WHERE s_w_id = ? AND s_quantity < ? AND s_i_id IN (" +
253 | " SELECT ol_i_id " +
254 | " FROM bmsql_district " +
255 | " JOIN bmsql_order_line ON ol_w_id = d_w_id " +
256 | " AND ol_d_id = d_id " +
257 | " AND ol_o_id >= d_next_o_id - 20 " +
258 | " AND ol_o_id < d_next_o_id " +
259 | " WHERE d_w_id = ? AND d_id = ? " +
260 | " ) " +
261 | " ) AS L");
262 | break;
263 |
264 | default:
265 | stmtStockLevelSelectLow = dbConn.prepareStatement(
266 | "SELECT count(*) AS low_stock FROM (" +
267 | " SELECT s_w_id, s_i_id, s_quantity " +
268 | " FROM bmsql_stock " +
269 | " WHERE s_w_id = ? AND s_quantity < ? AND s_i_id IN (" +
270 | " SELECT ol_i_id " +
271 | " FROM bmsql_district " +
272 | " JOIN bmsql_order_line ON ol_w_id = d_w_id " +
273 | " AND ol_d_id = d_id " +
274 | " AND ol_o_id >= d_next_o_id - 20 " +
275 | " AND ol_o_id < d_next_o_id " +
276 | " WHERE d_w_id = ? AND d_id = ? " +
277 | " ) " +
278 | " )");
279 | break;
280 | }
281 |
282 | switch (dbType)
283 | {
284 | case jTPCCConfig.DB_POSTGRES:
285 | stmtStockLevelStoredProc = dbConn.prepareStatement(
286 | "SELECT * FROM bmsql_proc_stock_level (?, ?, ?)");
287 | break;
288 |
289 | case jTPCCConfig.DB_ORACLE:
290 | stmtStockLevelStoredProcOracle =
291 | "{call tpccc_oracle.oracle_proc_stock_level(?, ?, ?, ?)}";
292 | break;
293 |
294 | }
295 |
296 | // PreparedStatements for DELIVERY_BG
297 | stmtDeliveryBGSelectOldestNewOrder = dbConn.prepareStatement(
298 | "SELECT no_o_id " +
299 | " FROM bmsql_new_order " +
300 | " WHERE no_w_id = ? AND no_d_id = ? " +
301 | " ORDER BY no_o_id ASC");
302 | stmtDeliveryBGDeleteOldestNewOrder = dbConn.prepareStatement(
303 | "DELETE FROM bmsql_new_order " +
304 | " WHERE no_w_id = ? AND no_d_id = ? AND no_o_id = ?");
305 | stmtDeliveryBGSelectOrder = dbConn.prepareStatement(
306 | "SELECT o_c_id " +
307 | " FROM bmsql_oorder " +
308 | " WHERE o_w_id = ? AND o_d_id = ? AND o_id = ?");
309 | stmtDeliveryBGUpdateOrder = dbConn.prepareStatement(
310 | "UPDATE bmsql_oorder " +
311 | " SET o_carrier_id = ? " +
312 | " WHERE o_w_id = ? AND o_d_id = ? AND o_id = ?");
313 | stmtDeliveryBGSelectSumOLAmount = dbConn.prepareStatement(
314 | "SELECT sum(ol_amount) AS sum_ol_amount " +
315 | " FROM bmsql_order_line " +
316 | " WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ?");
317 | stmtDeliveryBGUpdateOrderLine = dbConn.prepareStatement(
318 | "UPDATE bmsql_order_line " +
319 | " SET ol_delivery_d = ? " +
320 | " WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ?");
321 | stmtDeliveryBGUpdateCustomer = dbConn.prepareStatement(
322 | "UPDATE bmsql_customer " +
323 | " SET c_balance = c_balance + ?, " +
324 | " c_delivery_cnt = c_delivery_cnt + 1 " +
325 | " WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?");
326 | switch (dbType)
327 | {
328 | case jTPCCConfig.DB_POSTGRES:
329 | stmtDeliveryBGStoredProc = dbConn.prepareStatement(
330 | "SELECT * FROM bmsql_proc_delivery_bg(?, ?, ?)");
331 | break;
332 |
333 | case jTPCCConfig.DB_ORACLE:
334 | stmtDeliveryBGStoredProcOracle =
335 | "call tpccc_oracle.oracle_proc_delivery_bg(?, ?, ?, ?)";
336 | break;
337 | }
338 |
339 | }
340 |
341 |
342 | public jTPCCConnection(String connURL, Properties connProps, int dbType)
343 | throws SQLException
344 | {
345 | this(DriverManager.getConnection(connURL, connProps), dbType);
346 | }
347 |
348 | public void commit()
349 | throws SQLException
350 | {
351 | dbConn.commit();
352 | }
353 |
354 | public void rollback()
355 | throws SQLException
356 | {
357 | dbConn.rollback();
358 | }
359 |
360 | public Connection getConnection()
361 | {
362 | return this.dbConn;
363 | }
364 | }
365 |
--------------------------------------------------------------------------------
/src/client/jTPCCRandom.java:
--------------------------------------------------------------------------------
1 | /*
2 | * jTPCCUtil - utility functions for the Open Source Java implementation of
3 | * the TPC-C benchmark
4 | *
5 | * Copyright (C) 2003, Raul Barbosa
6 | * Copyright (C) 2004-2016, Denis Lussier
7 | * Copyright (C) 2016, Jan Wieck
8 | *
9 | */
10 |
11 |
12 | import java.io.*;
13 | import java.sql.*;
14 | import java.util.*;
15 | import java.text.*;
16 |
17 | public class jTPCCRandom
18 | {
19 | private static final char[] aStringChars = {
20 | 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
21 | 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
22 | 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
23 | 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
24 | '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'};
25 | private static final String[] cLastTokens = {
26 | "BAR", "OUGHT", "ABLE", "PRI", "PRES",
27 | "ESE", "ANTI", "CALLY", "ATION", "EING"};
28 |
29 | private static long nURandCLast;
30 | private static long nURandCC_ID;
31 | private static long nURandCI_ID;
32 | private static boolean initialized = false;
33 |
34 | private Random random;
35 |
36 | private String string_A_6[];
37 | private String string_A_8[];
38 | private String string_A_10[];
39 | private String string_A_12[];
40 | private String string_A_14[];
41 | private String string_A_24[];
42 | private String string_A_26[];
43 | private String string_A_300[];
44 |
45 | private String string_B_4[];
46 | private String string_B_8[];
47 | private String string_B_10[];
48 | private String string_B_12[];
49 | private String string_B_24[];
50 | private String string_B_200[];
51 |
52 | /*
53 | * jTPCCRandom()
54 | *
55 | * Used to create the master jTPCCRandom() instance for loading
56 | * the database. See below.
57 | */
58 | jTPCCRandom()
59 | {
60 | if (initialized)
61 | throw new IllegalStateException("Global instance exists");
62 |
63 | this.random = new Random(System.nanoTime());
64 | jTPCCRandom.nURandCLast = nextLong(0, 255);
65 | jTPCCRandom.nURandCC_ID = nextLong(0, 1023);
66 | jTPCCRandom.nURandCI_ID = nextLong(0, 8191);
67 |
68 | initialized = true;
69 | System.out.println("random initialized");
70 | }
71 |
72 | /*
73 | * jTPCCRandom(CLoad)
74 | *
75 | * Used to create the master jTPCCRandom instance for running
76 | * a benchmark load.
77 | *
78 | * TPC-C 2.1.6 defines the rules for picking the C values of
79 | * the non-uniform random number generator. In particular
80 | * 2.1.6.1 defines what numbers for the C value for generating
81 | * C_LAST must be excluded from the possible range during run
82 | * time, based on the number used during the load.
83 | */
84 | jTPCCRandom(long CLoad)
85 | {
86 | long delta;
87 |
88 | if (initialized)
89 | throw new IllegalStateException("Global instance exists");
90 |
91 | this.random = new Random(System.nanoTime());
92 | jTPCCRandom.nURandCC_ID = nextLong(0, 1023);
93 | jTPCCRandom.nURandCI_ID = nextLong(0, 8191);
94 |
95 | do
96 | {
97 | jTPCCRandom.nURandCLast = nextLong(0, 255);
98 |
99 | delta = Math.abs(jTPCCRandom.nURandCLast - CLoad);
100 | if (delta == 96 || delta == 112)
101 | continue;
102 | if (delta < 65 || delta > 119)
103 | continue;
104 | break;
105 | } while(true);
106 |
107 | initialized = true;
108 | }
109 |
110 | private jTPCCRandom(jTPCCRandom parent)
111 | {
112 | this.random = new Random(System.nanoTime());
113 | this.generateStrings();
114 | }
115 |
116 | /*
117 | * newRandom()
118 | *
119 | * Creates a derived random data generator to be used in another
120 | * thread of the current benchmark load or run process. As per
121 | * TPC-C 2.1.6 all terminals during a run must use the same C
122 | * values per field. The jTPCCRandom Class therefore cannot
123 | * generate them per instance, but each thread's instance must
124 | * inherit those numbers from a global instance.
125 | */
126 | jTPCCRandom newRandom()
127 | {
128 | return new jTPCCRandom(this);
129 | }
130 |
131 | private void generateStrings()
132 | {
133 | string_A_6 = new String[100];
134 | for (int i = 0; i < 100; i++)
135 | string_A_6[i] = getAString(6, 6);
136 | string_A_8 = new String[100];
137 | for (int i = 0; i < 100; i++)
138 | string_A_8[i] = getAString(8, 8);
139 | string_A_10 = new String[100];
140 | for (int i = 0; i < 100; i++)
141 | string_A_10[i] = getAString(10, 10);
142 | string_A_12 = new String[100];
143 | for (int i = 0; i < 100; i++)
144 | string_A_12[i] = getAString(12, 12);
145 | string_A_14 = new String[100];
146 | for (int i = 0; i < 100; i++)
147 | string_A_14[i] = getAString(14, 14);
148 | string_A_24 = new String[100];
149 | for (int i = 0; i < 100; i++)
150 | string_A_24[i] = getAString(24, 24);
151 | string_A_26 = new String[100];
152 | for (int i = 0; i < 100; i++)
153 | string_A_26[i] = getAString(26, 26);
154 | string_A_300 = new String[100];
155 | for (int i = 0; i < 100; i++)
156 | string_A_300[i] = getAString(300, 300);
157 |
158 | string_B_4 = new String[50];
159 | for (int i = 0; i < 50; i++)
160 | string_B_4[i] = getAString(i / 10, i / 10);
161 | string_B_8 = new String[90];
162 | for (int i = 0; i < 90; i++)
163 | string_B_8[i] = getAString(i / 10, i / 10);
164 | string_B_10 = new String[110];
165 | for (int i = 0; i < 110; i++)
166 | string_B_10[i] = getAString(i / 10, i / 10);
167 | string_B_12 = new String[130];
168 | for (int i = 0; i < 130; i++)
169 | string_B_12[i] = getAString(i / 10, i / 10);
170 | string_B_24 = new String[250];
171 | for (int i = 0; i < 250; i++)
172 | string_B_24[i] = getAString(i / 10, i / 10);
173 | string_B_200 = new String[2010];
174 | for (int i = 0; i < 2010; i++)
175 | string_B_200[i] = getAString(i / 10, i / 10);
176 | }
177 |
178 | /*
179 | * nextDouble(x, y)
180 | *
181 | * Produce double random number uniformly distributed in [x .. y]
182 | */
183 | public double nextDouble(double x, double y)
184 | {
185 | return random.nextDouble() * (y - x) + x;
186 | }
187 |
188 | /*
189 | * nextLong(x, y)
190 | *
191 | * Produce a random number uniformly distributed in [x .. y]
192 | */
193 | public long nextLong(long x, long y)
194 | {
195 | return (long)(random.nextDouble() * (y - x + 1) + x);
196 | }
197 |
198 | /*
199 | * nextInt(x, y)
200 | *
201 | * Produce a random number uniformly distributed in [x .. y]
202 | */
203 | public int nextInt(int x, int y)
204 | {
205 | return (int)(random.nextDouble() * (y - x + 1) + x);
206 | }
207 |
208 | /*
209 | * getAString(x, y)
210 | *
211 | * Procude a random alphanumeric string of length [x .. y].
212 | *
213 | * Note: TPC-C 4.3.2.2 asks for an "alhpanumeric" string.
214 | * Comment 1 about the character set does NOT mean that this
215 | * function must eventually produce 128 different characters,
216 | * only that the "character set" used to store this data must
217 | * be able to represent 128 different characters. '#@!%%ÄÖß'
218 | * is not an alphanumeric string. We can save ourselves a lot
219 | * of UTF8 related trouble by producing alphanumeric only
220 | * instead of cartoon style curse-bubbles.
221 | */
222 | public String getAString(long x, long y)
223 | {
224 | String result = new String();
225 | long len = nextLong(x, y);
226 | long have = 1;
227 |
228 | if (y <= 0)
229 | return result;
230 |
231 | result += aStringChars[(int)nextLong(0, 51)];
232 | while (have < len)
233 | {
234 | result += aStringChars[(int)nextLong(0, 61)];
235 | have++;
236 | }
237 |
238 | return result;
239 | }
240 |
241 | public String getAString_6_10()
242 | {
243 | String result = new String();
244 |
245 | result += string_A_6[nextInt(0, 99)];
246 | result += string_B_4[nextInt(0, 49)];
247 |
248 | return result;
249 | }
250 |
251 | public String getAString_8_16()
252 | {
253 | String result = new String();
254 |
255 | result += string_A_8[nextInt(0, 99)];
256 | result += string_B_8[nextInt(0, 89)];
257 |
258 | return result;
259 | }
260 |
261 | public String getAString_10_20()
262 | {
263 | String result = new String();
264 |
265 | result += string_A_10[nextInt(0, 99)];
266 | result += string_B_10[nextInt(0, 109)];
267 |
268 | return result;
269 | }
270 |
271 | public String getAString_12_24()
272 | {
273 | String result = new String();
274 |
275 | result += string_A_12[nextInt(0, 99)];
276 | result += string_B_12[nextInt(0, 129)];
277 |
278 | return result;
279 | }
280 |
281 | public String getAString_14_24()
282 | {
283 | String result = new String();
284 |
285 | result += string_A_14[nextInt(0, 99)];
286 | result += string_B_10[nextInt(0, 109)];
287 |
288 | return result;
289 | }
290 |
291 | public String getAString_24()
292 | {
293 | String result = new String();
294 |
295 | result += string_A_24[nextInt(0, 99)];
296 |
297 | return result;
298 | }
299 |
300 | public String getAString_26_50()
301 | {
302 | String result = new String();
303 |
304 | result += string_A_26[nextInt(0, 99)];
305 | result += string_B_24[nextInt(0, 249)];
306 |
307 | return result;
308 | }
309 |
310 | public String getAString_300_500()
311 | {
312 | String result = new String();
313 |
314 | result += string_A_300[nextInt(0, 99)];
315 | result += string_B_200[nextInt(0, 2009)];
316 |
317 | return result;
318 | }
319 |
320 | /*
321 | * getNString(x, y)
322 | *
323 | * Produce a random numeric string of length [x .. y].
324 | */
325 | public String getNString(long x, long y)
326 | {
327 | String result = new String();
328 | long len = nextLong(x, y);
329 | long have = 0;
330 |
331 | while (have < len)
332 | {
333 | result += (char)(nextLong((long)'0', (long)'9'));
334 | have++;
335 | }
336 |
337 | return result;
338 | }
339 |
340 | /*
341 | * getItemID()
342 | *
343 | * Produce a non uniform random Item ID.
344 | */
345 | public int getItemID()
346 | {
347 | return (int)((((nextLong(0, 8191) | nextLong(1, 100000)) + nURandCI_ID)
348 | % 100000) + 1);
349 | }
350 |
351 | /*
352 | * getCustomerID()
353 | *
354 | * Produce a non uniform random Customer ID.
355 | */
356 | public int getCustomerID()
357 | {
358 | return (int)((((nextLong(0, 1023) | nextLong(1, 3000)) + nURandCC_ID)
359 | % 3000) + 1);
360 | }
361 |
362 | /*
363 | * getCLast(num)
364 | *
365 | * Produce the syllable representation for C_LAST of [0 .. 999]
366 | */
367 | public String getCLast(int num)
368 | {
369 | String result = new String();
370 |
371 | for (int i = 0; i < 3; i++)
372 | {
373 | result = cLastTokens[num % 10] + result;
374 | num /= 10;
375 | }
376 |
377 | return result;
378 | }
379 |
380 | /*
381 | * getCLast()
382 | *
383 | * Procude a non uniform random Customer Last Name.
384 | */
385 | public String getCLast()
386 | {
387 | long num;
388 | num = (((nextLong(0, 255) | nextLong(0, 999)) + nURandCLast) % 1000);
389 | return getCLast((int)num);
390 | }
391 |
392 | public String getState()
393 | {
394 | String result = new String();
395 |
396 | result += (char)nextInt((int)'A', (int)'Z');
397 | result += (char)nextInt((int)'A', (int)'Z');
398 |
399 | return result;
400 | }
401 |
402 | /*
403 | * Methods to retrieve the C values used.
404 | */
405 | public long getNURandCLast()
406 | {
407 | return nURandCLast;
408 | }
409 |
410 | public long getNURandCC_ID()
411 | {
412 | return nURandCC_ID;
413 | }
414 |
415 | public long getNURandCI_ID()
416 | {
417 | return nURandCI_ID;
418 | }
419 | } // end jTPCCRandom
420 |
--------------------------------------------------------------------------------
/src/client/jTPCCTerminal.java:
--------------------------------------------------------------------------------
1 | /*
2 | * jTPCCTerminal - Terminal emulator code for jTPCC (transactions)
3 | *
4 | * Copyright (C) 2003, Raul Barbosa
5 | * Copyright (C) 2004-2016, Denis Lussier
6 | * Copyright (C) 2016, Jan Wieck
7 | *
8 | */
9 | import org.apache.log4j.*;
10 |
11 | import java.io.*;
12 | import java.sql.*;
13 | import java.sql.Date;
14 | import java.util.*;
15 | import javax.swing.*;
16 |
17 |
18 | public class jTPCCTerminal implements jTPCCConfig, Runnable
19 | {
20 | private static org.apache.log4j.Logger log = Logger.getLogger(jTPCCTerminal.class);
21 |
22 | private String terminalName;
23 | private Connection conn = null;
24 | private Statement stmt = null;
25 | private Statement stmt1 = null;
26 | private ResultSet rs = null;
27 | private int terminalWarehouseID, terminalDistrictID;
28 | private boolean terminalWarehouseFixed;
29 | private boolean useStoredProcedures;
30 | private double paymentWeight;
31 | private double orderStatusWeight;
32 | private double deliveryWeight;
33 | private double stockLevelWeight;
34 | private int limPerMin_Terminal;
35 | private jTPCC parent;
36 | private jTPCCRandom rnd;
37 |
38 | private int transactionCount = 1;
39 | private int numTransactions;
40 | private int numWarehouses;
41 | private int newOrderCounter;
42 | private long totalTnxs = 1;
43 | private StringBuffer query = null;
44 | private int result = 0;
45 | private boolean stopRunningSignal = false;
46 |
47 | long terminalStartTime = 0;
48 | long transactionEnd = 0;
49 |
50 | jTPCCConnection db = null;
51 | int dbType = 0;
52 |
53 | public jTPCCTerminal
54 | (String terminalName, int terminalWarehouseID, int terminalDistrictID,
55 | Connection conn, int dbType,
56 | int numTransactions, boolean terminalWarehouseFixed,
57 | boolean useStoredProcedures,
58 | double paymentWeight, double orderStatusWeight,
59 | double deliveryWeight, double stockLevelWeight,
60 | int numWarehouses, int limPerMin_Terminal, jTPCC parent) throws SQLException
61 | {
62 | this.terminalName = terminalName;
63 | this.conn = conn;
64 | this.dbType = dbType;
65 | this.stmt = conn.createStatement();
66 | this.stmt.setMaxRows(200);
67 | this.stmt.setFetchSize(100);
68 |
69 | this.stmt1 = conn.createStatement();
70 | this.stmt1.setMaxRows(1);
71 |
72 | this.terminalWarehouseID = terminalWarehouseID;
73 | this.terminalDistrictID = terminalDistrictID;
74 | this.terminalWarehouseFixed = terminalWarehouseFixed;
75 | this.useStoredProcedures = useStoredProcedures;
76 | this.parent = parent;
77 | this.rnd = parent.getRnd().newRandom();
78 | this.numTransactions = numTransactions;
79 | this.paymentWeight = paymentWeight;
80 | this.orderStatusWeight = orderStatusWeight;
81 | this.deliveryWeight = deliveryWeight;
82 | this.stockLevelWeight = stockLevelWeight;
83 | this.numWarehouses = numWarehouses;
84 | this.newOrderCounter = 0;
85 | this.limPerMin_Terminal = limPerMin_Terminal;
86 |
87 | this.db = new jTPCCConnection(conn, dbType);
88 |
89 | terminalMessage("");
90 | terminalMessage("Terminal \'" + terminalName + "\' has WarehouseID=" + terminalWarehouseID + " and DistrictID=" + terminalDistrictID + ".");
91 | terminalStartTime = System.currentTimeMillis();
92 | }
93 |
94 | public void run()
95 | {
96 | executeTransactions(numTransactions);
97 | try
98 | {
99 | printMessage("");
100 | printMessage("Closing statement and connection...");
101 |
102 | stmt.close();
103 | conn.close();
104 | }
105 | catch(Exception e)
106 | {
107 | printMessage("");
108 | printMessage("An error occurred!");
109 | logException(e);
110 | }
111 |
112 | printMessage("");
113 | printMessage("Terminal \'" + terminalName + "\' finished after " + (transactionCount-1) + " transaction(s).");
114 |
115 | parent.signalTerminalEnded(this, newOrderCounter);
116 | }
117 |
118 | public void stopRunningWhenPossible()
119 | {
120 | stopRunningSignal = true;
121 | printMessage("");
122 | printMessage("Terminal received stop signal!");
123 | printMessage("Finishing current transaction before exit...");
124 | }
125 |
126 | private void executeTransactions(int numTransactions)
127 | {
128 | boolean stopRunning = false;
129 |
130 | if(numTransactions != -1)
131 | printMessage("Executing " + numTransactions + " transactions...");
132 | else
133 | printMessage("Executing for a limited time...");
134 |
135 | for(int i = 0; (i < numTransactions || numTransactions == -1) && !stopRunning; i++)
136 | {
137 |
138 | double transactionType = rnd.nextDouble(0.0, 100.0);
139 | int skippedDeliveries = 0, newOrder = 0;
140 | String transactionTypeName;
141 |
142 | long transactionStart = System.currentTimeMillis();
143 |
144 | /*
145 | * TPC/C specifies that each terminal has a fixed
146 | * "home" warehouse. However, since this implementation
147 | * does not simulate "terminals", but rather simulates
148 | * "application threads", that association is no longer
149 | * valid. In the case of having less clients than
150 | * warehouses (which should be the normal case), it
151 | * leaves the warehouses without a client without any
152 | * significant traffic, changing the overall database
153 | * access pattern significantly.
154 | */
155 | if(!terminalWarehouseFixed)
156 | terminalWarehouseID = rnd.nextInt(1, numWarehouses);
157 |
158 | if(transactionType <= paymentWeight)
159 | {
160 | jTPCCTData term = new jTPCCTData();
161 | term.setNumWarehouses(numWarehouses);
162 | term.setWarehouse(terminalWarehouseID);
163 | term.setDistrict(terminalDistrictID);
164 | term.setUseStoredProcedures(useStoredProcedures);
165 | term.setDBType(dbType);
166 | try
167 | {
168 | term.generatePayment(log, rnd, 0);
169 | term.traceScreen(log);
170 | term.execute(log, db);
171 | parent.resultAppend(term);
172 | term.traceScreen(log);
173 | }
174 | catch (Exception e)
175 | {
176 | log.fatal(e.getMessage());
177 | e.printStackTrace();
178 | System.exit(4);
179 | }
180 | transactionTypeName = "Payment";
181 | }
182 | else if(transactionType <= paymentWeight + stockLevelWeight)
183 | {
184 | jTPCCTData term = new jTPCCTData();
185 | term.setNumWarehouses(numWarehouses);
186 | term.setWarehouse(terminalWarehouseID);
187 | term.setDistrict(terminalDistrictID);
188 | term.setUseStoredProcedures(useStoredProcedures);
189 | term.setDBType(dbType);
190 | try
191 | {
192 | term.generateStockLevel(log, rnd, 0);
193 | term.traceScreen(log);
194 | term.execute(log, db);
195 | parent.resultAppend(term);
196 | term.traceScreen(log);
197 | }
198 | catch (Exception e)
199 | {
200 | log.fatal(e.getMessage());
201 | e.printStackTrace();
202 | System.exit(4);
203 | }
204 | transactionTypeName = "Stock-Level";
205 | }
206 | else if(transactionType <= paymentWeight + stockLevelWeight + orderStatusWeight)
207 | {
208 | jTPCCTData term = new jTPCCTData();
209 | term.setNumWarehouses(numWarehouses);
210 | term.setWarehouse(terminalWarehouseID);
211 | term.setDistrict(terminalDistrictID);
212 | term.setUseStoredProcedures(useStoredProcedures);
213 | term.setDBType(dbType);
214 | try
215 | {
216 | term.generateOrderStatus(log, rnd, 0);
217 | term.traceScreen(log);
218 | term.execute(log, db);
219 | parent.resultAppend(term);
220 | term.traceScreen(log);
221 | }
222 | catch (Exception e)
223 | {
224 | log.fatal(e.getMessage());
225 | e.printStackTrace();
226 | System.exit(4);
227 | }
228 | transactionTypeName = "Order-Status";
229 | }
230 | else if(transactionType <= paymentWeight + stockLevelWeight + orderStatusWeight + deliveryWeight)
231 | {
232 | jTPCCTData term = new jTPCCTData();
233 | term.setNumWarehouses(numWarehouses);
234 | term.setWarehouse(terminalWarehouseID);
235 | term.setDistrict(terminalDistrictID);
236 | term.setUseStoredProcedures(useStoredProcedures);
237 | term.setDBType(dbType);
238 | try
239 | {
240 | term.generateDelivery(log, rnd, 0);
241 | term.traceScreen(log);
242 | term.execute(log, db);
243 | parent.resultAppend(term);
244 | term.traceScreen(log);
245 |
246 | /*
247 | * The old style driver does not have a delivery
248 | * background queue, so we have to execute that
249 | * part here as well.
250 | */
251 | jTPCCTData bg = term.getDeliveryBG();
252 | bg.traceScreen(log);
253 | bg.execute(log, db);
254 | parent.resultAppend(bg);
255 | bg.traceScreen(log);
256 |
257 | skippedDeliveries = bg.getSkippedDeliveries();
258 | }
259 | catch (Exception e)
260 | {
261 | log.fatal(e.getMessage());
262 | e.printStackTrace();
263 | System.exit(4);
264 | }
265 | transactionTypeName = "Delivery";
266 | }
267 | else
268 | {
269 | jTPCCTData term = new jTPCCTData();
270 | term.setNumWarehouses(numWarehouses);
271 | term.setWarehouse(terminalWarehouseID);
272 | term.setDistrict(terminalDistrictID);
273 | term.setUseStoredProcedures(useStoredProcedures);
274 | term.setDBType(dbType);
275 | try
276 | {
277 | term.generateNewOrder(log, rnd, 0);
278 | term.traceScreen(log);
279 | term.execute(log, db);
280 | parent.resultAppend(term);
281 | term.traceScreen(log);
282 | }
283 | catch (Exception e)
284 | {
285 | log.fatal(e.getMessage());
286 | e.printStackTrace();
287 | System.exit(4);
288 | }
289 | transactionTypeName = "New-Order";
290 | newOrderCounter++;
291 | newOrder = 1;
292 | }
293 |
294 | long transactionEnd = System.currentTimeMillis();
295 |
296 | if(!transactionTypeName.equals("Delivery"))
297 | {
298 | parent.signalTerminalEndedTransaction(this.terminalName, transactionTypeName, transactionEnd - transactionStart, null, newOrder);
299 | }
300 | else
301 | {
302 | parent.signalTerminalEndedTransaction(this.terminalName, transactionTypeName, transactionEnd - transactionStart, (skippedDeliveries == 0 ? "None" : "" + skippedDeliveries + " delivery(ies) skipped."), newOrder);
303 | }
304 |
305 | if(limPerMin_Terminal>0){
306 | long elapse = transactionEnd-transactionStart;
307 | long timePerTx = 60000/limPerMin_Terminal;
308 |
309 | if(elapse 6 ? dS.substring(0, 6) : dS;
65 | }
66 |
67 | public static String getConfig(String db, Properties dbProps, String option)
68 | throws Exception
69 | {
70 | ResultSet rs;
71 | String value;
72 |
73 | if (dbConn == null)
74 | {
75 | dbConn = DriverManager.getConnection(db, dbProps);
76 | stmtGetConfig = dbConn.prepareStatement(
77 | "SELECT cfg_value FROM bmsql_config " +
78 | " WHERE cfg_name = ?");
79 | }
80 | stmtGetConfig.setString(1, option);
81 | rs = stmtGetConfig.executeQuery();
82 | if (!rs.next())
83 | throw new Exception("DB Load configuration parameter '" +
84 | option + "' not found");
85 | value = rs.getString("cfg_value");
86 | rs.close();
87 |
88 | return value;
89 | }
90 |
91 | } // end jTPCCUtil
92 |
--------------------------------------------------------------------------------
/src/jdbc/ExecJDBC.java:
--------------------------------------------------------------------------------
1 | /*
2 | * ExecJDBC - Command line program to process SQL DDL statements, from
3 | * a text input file, to any JDBC Data Source
4 | *
5 | * Copyright (C) 2004-2016, Denis Lussier
6 | * Copyright (C) 2016, Jan Wieck
7 | *
8 | */
9 |
10 | import java.io.*;
11 | import java.sql.*;
12 | import java.util.*;
13 |
14 |
15 | public class ExecJDBC {
16 |
17 |
18 | public static void main(String[] args) {
19 |
20 | Connection conn = null;
21 | Statement stmt = null;
22 | String rLine = null;
23 | String sLine = null;
24 | StringBuffer sql = new StringBuffer();
25 |
26 | try {
27 |
28 | Properties ini = new Properties();
29 | ini.load( new FileInputStream(System.getProperty("prop")));
30 |
31 | // Register jdbcDriver
32 | Class.forName(ini.getProperty( "driver" ));
33 |
34 | // make connection
35 | conn = DriverManager.getConnection(ini.getProperty("conn"),
36 | ini.getProperty("user"),ini.getProperty("password"));
37 | conn.setAutoCommit(true);
38 |
39 | // Retrieve datbase type
40 | String dbType = ini.getProperty("db");
41 |
42 | // For oracle : Boolean that indicates whether or not there is a statement ready to be executed.
43 | Boolean ora_ready_to_execute = false;
44 |
45 | // Create Statement
46 | stmt = conn.createStatement();
47 |
48 | // Open inputFile
49 | BufferedReader in = new BufferedReader
50 | (new FileReader(jTPCCUtil.getSysProp("commandFile",null)));
51 |
52 | // loop thru input file and concatenate SQL statement fragments
53 | while((rLine = in.readLine()) != null) {
54 |
55 | if (ora_ready_to_execute == true)
56 | {
57 | String query = sql.toString();
58 |
59 | execJDBC(stmt, query);
60 | sql = new StringBuffer();
61 | ora_ready_to_execute = false;
62 | }
63 |
64 | String line = rLine.trim();
65 |
66 | if (line.length() != 0) {
67 | if (line.startsWith("--") && !line.startsWith("-- {")) {
68 | System.out.println(rLine); // print comment line
69 | } else {
70 | if (line.equals("$$"))
71 | {
72 | sql.append(rLine);
73 | sql.append("\n");
74 | while((rLine = in.readLine()) != null) {
75 | line = rLine.trim();
76 | sql.append(rLine);
77 | sql.append("\n");
78 | if (line.equals("$$"))
79 | {
80 | break;
81 | }
82 | }
83 | continue;
84 | }
85 |
86 | if (line.startsWith("-- {"))
87 | {
88 | sql.append(rLine);
89 | sql.append("\n");
90 | while((rLine = in.readLine()) != null) {
91 | line = rLine.trim();
92 | sql.append(rLine);
93 | sql.append("\n");
94 | if (line.startsWith("-- }"))
95 | {
96 | ora_ready_to_execute = true;
97 | break;
98 | }
99 | }
100 | continue;
101 | }
102 |
103 | if (line.endsWith("\\;"))
104 | {
105 | sql.append(rLine.replaceAll("\\\\;", ";"));
106 | sql.append("\n");
107 | }
108 | else
109 | {
110 | sql.append(line.replaceAll("\\\\;", ";"));
111 | if (line.endsWith(";")) {
112 | String query = sql.toString();
113 |
114 | execJDBC(stmt, query.substring(0, query.length() - 1));
115 | sql = new StringBuffer();
116 | } else {
117 | sql.append("\n");
118 | }
119 | }
120 | }
121 |
122 | } //end if
123 |
124 | } //end while
125 |
126 | in.close();
127 |
128 | } catch(IOException ie) {
129 | System.out.println(ie.getMessage());
130 |
131 | } catch(SQLException se) {
132 | System.out.println(se.getMessage());
133 |
134 | } catch(Exception e) {
135 | e.printStackTrace();
136 |
137 | //exit Cleanly
138 | } finally {
139 | try {
140 | if (conn !=null)
141 | conn.close();
142 | } catch(SQLException se) {
143 | se.printStackTrace();
144 | } // end finally
145 |
146 | } // end try
147 |
148 | } // end main
149 |
150 |
151 | static void execJDBC(Statement stmt, String query) {
152 |
153 | System.out.println(query + ";");
154 |
155 | try {
156 | stmt.execute(query);
157 | }catch(SQLException se) {
158 | System.out.println(se.getMessage());
159 | } // end try
160 |
161 | } // end execJDBCCommand
162 |
163 | } // end ExecJDBC Class
164 |
--------------------------------------------------------------------------------