├── .gitattributes ├── .gitignore ├── 9781484266519.jpg ├── Chapter_02 └── listing_2_8.txt ├── Chapter_13 └── figure_13_2_workbench_flush_lock.png ├── Chapter_15 └── figure_15_1_quan.png.png ├── Chapter_16 └── listing_16_2_deadlock.txt ├── Chapter_17 └── listing_17-1.txt ├── Chapter_18 └── listing_18-1.txt ├── Contributing.md ├── LICENSE.txt ├── README.md └── concurrency_book ├── __init__.py ├── generate.py ├── libs ├── __init__.py ├── innodb_buffer_pool.py ├── innodb_monitor.py ├── innodb_mutex.py ├── load.py ├── log.py ├── metrics.py ├── query.py ├── util.py └── workloads.py └── workloads ├── listing-6-10.yaml ├── listing_10-1.yaml ├── listing_10-11.yaml ├── listing_10-2.yaml ├── listing_10-3.yaml ├── listing_10-4.yaml ├── listing_10-5.yaml ├── listing_10-8.yaml ├── listing_12-1.yaml ├── listing_12-2.yaml ├── listing_12-3.yaml ├── listing_12-4.yaml ├── listing_12-5.yaml ├── listing_12-6.yaml ├── listing_12-7.yaml ├── listing_13-1.yaml ├── listing_14-1.yaml ├── listing_15-2.yaml ├── listing_16-1.yaml ├── listing_17-1.yaml ├── listing_17_1.py ├── listing_18-1.yaml ├── listing_18_1.py ├── listing_2-1.yaml ├── listing_2-2.yaml ├── listing_2-3.yaml ├── listing_2-4.yaml ├── listing_2-5.yaml ├── listing_2-6.yaml ├── listing_2-7.yaml ├── listing_3-1.yaml ├── listing_3-5.yaml ├── listing_3-6.yaml ├── listing_4-1.yaml ├── listing_4-5.yaml ├── listing_5-1.yaml ├── listing_5-2.yaml ├── listing_6-1.yaml ├── listing_6-3.yaml ├── listing_6-4.yaml ├── listing_6-7.yaml ├── listing_6-8.yaml ├── listing_6-9.yaml ├── listing_7-1.yaml ├── listing_7-2.yaml ├── listing_7-3.yaml ├── listing_7-4.yaml ├── listing_8-1.yaml ├── listing_8-2.yaml ├── listing_8-3.yaml ├── listing_8-4.yaml ├── listing_9-1.yaml ├── listing_9-2.yaml ├── listing_9-3.yaml ├── listing_9-4.yaml └── listing_B-5.yaml /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | __pycache__/ 3 | -------------------------------------------------------------------------------- /9781484266519.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/mysql-concurrency/1e89c82a94fbbcae086221adf3f1220f810bc8a6/9781484266519.jpg -------------------------------------------------------------------------------- /Chapter_02/listing_2_8.txt: -------------------------------------------------------------------------------- 1 | mysql> SET GLOBAL innodb_status_output_locks = ON; 2 | Query OK, 0 rows affected (0.0005 sec) 3 | 4 | mysql> SHOW ENGINE INNODB STATUS\G 5 | *************************** 1. row *************************** 6 | Type: InnoDB 7 | Name: 8 | Status: 9 | ===================================== 10 | 2020-06-27 12:54:29 0x7f00 INNODB MONITOR OUTPUT 11 | ===================================== 12 | Per second averages calculated from the last 50 seconds 13 | ----------------- 14 | BACKGROUND THREAD 15 | ----------------- 16 | srv_master_thread loops: 2532 srv_active, 0 srv_shutdown, 1224 srv_idle 17 | srv_master_thread log flush and writes: 0 18 | ---------- 19 | SEMAPHORES 20 | ---------- 21 | OS WAIT ARRAY INFO: reservation count 7750 22 | OS WAIT ARRAY INFO: signal count 6744 23 | RW-shared spins 3033, rounds 5292, OS waits 2261 24 | RW-excl spins 1600, rounds 25565, OS waits 1082 25 | RW-sx spins 2167, rounds 61634, OS waits 1874 26 | Spin rounds per wait: 1.74 RW-shared, 15.98 RW-excl, 28.44 RW-sx 27 | ------------------------ 28 | LATEST DETECTED DEADLOCK 29 | ------------------------ 30 | 2020-06-27 12:54:26 0x862c 31 | *** (1) TRANSACTION: 32 | TRANSACTION 296726, ACTIVE 0 sec starting index read 33 | mysql tables in use 1, locked 1 34 | LOCK WAIT 3 lock struct(s), heap size 1136, 2 row lock(s), undo log entries 1 35 | MySQL thread id 20, OS thread handle 29332, query id 56150 localhost ::1 root updating 36 | UPDATE world.city 37 | SET Population = Population + 1 38 | WHERE ID = 130 39 | 40 | *** (1) HOLDS THE LOCK(S): 41 | RECORD LOCKS space id 259 page no 34 n bits 248 index PRIMARY of table `world`.`city` trx id 296726 lock_mode X locks rec but not gap 42 | Record lock, heap no 66 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 43 | 0: len 4; hex 80000edd; asc ;; 44 | 1: len 6; hex 000000048716; asc ;; 45 | 2: len 7; hex 020000015f2949; asc _)I;; 46 | 3: len 30; hex 53616e204672616e636973636f2020202020202020202020202020202020; asc San Francisco ; (total 35 bytes); 47 | 4: len 3; hex 555341; asc USA;; 48 | 5: len 20; hex 43616c69666f726e696120202020202020202020; asc California ;; 49 | 6: len 4; hex 800bda1e; asc ;; 50 | 51 | 52 | *** (1) WAITING FOR THIS LOCK TO BE GRANTED: 53 | RECORD LOCKS space id 259 page no 7 n bits 248 index PRIMARY of table `world`.`city` trx id 296726 lock_mode X locks rec but not gap waiting 54 | Record lock, heap no 44 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 55 | 0: len 4; hex 80000082; asc ;; 56 | 1: len 6; hex 000000048715; asc ;; 57 | 2: len 7; hex 01000000d81fcd; asc ;; 58 | 3: len 30; hex 5379646e6579202020202020202020202020202020202020202020202020; asc Sydney ; (total 35 bytes); 59 | 4: len 3; hex 415553; asc AUS;; 60 | 5: len 20; hex 4e657720536f7574682057616c65732020202020; asc New South Wales ;; 61 | 6: len 4; hex 8031fdb0; asc 1 ;; 62 | 63 | 64 | *** (2) TRANSACTION: 65 | TRANSACTION 296725, ACTIVE 0 sec starting index read 66 | mysql tables in use 1, locked 1 67 | LOCK WAIT 3 lock struct(s), heap size 1136, 2 row lock(s), undo log entries 1 68 | MySQL thread id 19, OS thread handle 6576, query id 56151 localhost ::1 root updating 69 | UPDATE world.city 70 | SET Population = Population + 1 71 | WHERE ID = 3805 72 | 73 | *** (2) HOLDS THE LOCK(S): 74 | RECORD LOCKS space id 259 page no 7 n bits 248 index PRIMARY of table `world`.`city` trx id 296725 lock_mode X locks rec but not gap 75 | Record lock, heap no 44 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 76 | 0: len 4; hex 80000082; asc ;; 77 | 1: len 6; hex 000000048715; asc ;; 78 | 2: len 7; hex 01000000d81fcd; asc ;; 79 | 3: len 30; hex 5379646e6579202020202020202020202020202020202020202020202020; asc Sydney ; (total 35 bytes); 80 | 4: len 3; hex 415553; asc AUS;; 81 | 5: len 20; hex 4e657720536f7574682057616c65732020202020; asc New South Wales ;; 82 | 6: len 4; hex 8031fdb0; asc 1 ;; 83 | 84 | 85 | *** (2) WAITING FOR THIS LOCK TO BE GRANTED: 86 | RECORD LOCKS space id 259 page no 34 n bits 248 index PRIMARY of table `world`.`city` trx id 296725 lock_mode X locks rec but not gap waiting 87 | Record lock, heap no 66 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 88 | 0: len 4; hex 80000edd; asc ;; 89 | 1: len 6; hex 000000048716; asc ;; 90 | 2: len 7; hex 020000015f2949; asc _)I;; 91 | 3: len 30; hex 53616e204672616e636973636f2020202020202020202020202020202020; asc San Francisco ; (total 35 bytes); 92 | 4: len 3; hex 555341; asc USA;; 93 | 5: len 20; hex 43616c69666f726e696120202020202020202020; asc California ;; 94 | 6: len 4; hex 800bda1e; asc ;; 95 | 96 | *** WE ROLL BACK TRANSACTION (2) 97 | ------------ 98 | TRANSACTIONS 99 | ------------ 100 | Trx id counter 296728 101 | Purge done for trx's n:o < 296728 undo n:o < 0 state: running but idle 102 | History list length 1 103 | LIST OF TRANSACTIONS FOR EACH SESSION: 104 | ---TRANSACTION 283598406541472, not started 105 | 0 lock struct(s), heap size 1136, 0 row lock(s) 106 | ---TRANSACTION 283598406540640, not started 107 | 0 lock struct(s), heap size 1136, 0 row lock(s) 108 | ---TRANSACTION 283598406539808, not started 109 | 0 lock struct(s), heap size 1136, 0 row lock(s) 110 | ---TRANSACTION 283598406538976, not started 111 | 0 lock struct(s), heap size 1136, 0 row lock(s) 112 | ---TRANSACTION 296726, ACTIVE 3 sec 113 | 3 lock struct(s), heap size 1136, 2 row lock(s), undo log entries 2 114 | MySQL thread id 20, OS thread handle 29332, query id 56150 localhost ::1 root 115 | TABLE LOCK table `world`.`city` trx id 296726 lock mode IX 116 | RECORD LOCKS space id 259 page no 34 n bits 248 index PRIMARY of table `world`.`city` trx id 296726 lock_mode X locks rec but not gap 117 | Record lock, heap no 66 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 118 | 0: len 4; hex 80000edd; asc ;; 119 | 1: len 6; hex 000000048716; asc ;; 120 | 2: len 7; hex 020000015f2949; asc _)I;; 121 | 3: len 30; hex 53616e204672616e636973636f2020202020202020202020202020202020; asc San Francisco ; (total 35 bytes); 122 | 4: len 3; hex 555341; asc USA;; 123 | 5: len 20; hex 43616c69666f726e696120202020202020202020; asc California ;; 124 | 6: len 4; hex 800bda1e; asc ;; 125 | 126 | RECORD LOCKS space id 259 page no 7 n bits 248 index PRIMARY of table `world`.`city` trx id 296726 lock_mode X locks rec but not gap 127 | Record lock, heap no 44 PHYSICAL RECORD: n_fields 7; compact format; info bits 0 128 | 0: len 4; hex 80000082; asc ;; 129 | 1: len 6; hex 000000048716; asc ;; 130 | 2: len 7; hex 020000015f296c; asc _)l;; 131 | 3: len 30; hex 5379646e6579202020202020202020202020202020202020202020202020; asc Sydney ; (total 35 bytes); 132 | 4: len 3; hex 415553; asc AUS;; 133 | 5: len 20; hex 4e657720536f7574682057616c65732020202020; asc New South Wales ;; 134 | 6: len 4; hex 8031fdb0; asc 1 ;; 135 | 136 | -------- 137 | FILE I/O 138 | -------- 139 | I/O thread 0 state: wait Windows aio (insert buffer thread) 140 | I/O thread 1 state: wait Windows aio (log thread) 141 | I/O thread 2 state: wait Windows aio (read thread) 142 | I/O thread 3 state: wait Windows aio (read thread) 143 | I/O thread 4 state: wait Windows aio (read thread) 144 | I/O thread 5 state: wait Windows aio (read thread) 145 | I/O thread 6 state: wait Windows aio (write thread) 146 | I/O thread 7 state: wait Windows aio (write thread) 147 | I/O thread 8 state: wait Windows aio (write thread) 148 | I/O thread 9 state: wait Windows aio (write thread) 149 | Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , 150 | ibuf aio reads:, log i/o's:, sync i/o's: 151 | Pending flushes (fsync) log: 0; buffer pool: 14 152 | 39591 OS file reads, 229606 OS file writes, 43780 OS fsyncs 153 | 0.00 reads/s, 0 avg bytes/read, 0.30 writes/s, 0.16 fsyncs/s 154 | ------------------------------------- 155 | INSERT BUFFER AND ADAPTIVE HASH INDEX 156 | ------------------------------------- 157 | Ibuf: size 1, free list len 0, seg size 2, 6426 merges 158 | merged operations: 159 | insert 1272, delete mark 5171, delete 4 160 | discarded operations: 161 | insert 0, delete mark 0, delete 0 162 | Hash table size 553253, node heap has 944 buffer(s) 163 | Hash table size 553253, node heap has 0 buffer(s) 164 | Hash table size 553253, node heap has 0 buffer(s) 165 | Hash table size 553253, node heap has 0 buffer(s) 166 | Hash table size 553253, node heap has 0 buffer(s) 167 | Hash table size 553253, node heap has 0 buffer(s) 168 | Hash table size 553253, node heap has 1 buffer(s) 169 | Hash table size 553253, node heap has 4 buffer(s) 170 | 0.00 hash searches/s, 0.18 non-hash searches/s 171 | --- 172 | LOG 173 | --- 174 | Log sequence number 1910063545 175 | Log buffer assigned up to 1910063545 176 | Log buffer completed up to 1910063545 177 | Log written up to 1910063545 178 | Log flushed up to 1910063545 179 | Added dirty pages up to 1910063545 180 | Pages flushed up to 1910063545 181 | Last checkpoint at 1910063545 182 | 120548 log i/o's done, 0.10 log i/o's/second 183 | ---------------------- 184 | BUFFER POOL AND MEMORY 185 | ---------------------- 186 | Total large memory allocated 2197815296 187 | Dictionary memory allocated 409088 188 | Buffer pool size 131072 189 | Free buffers 86134 190 | Database pages 43989 191 | Old database pages 16391 192 | Modified db pages 0 193 | Pending reads 0 194 | Pending writes: LRU 0, flush list 0, single page 0 195 | Pages made young 2712, not young 0 196 | 0.00 youngs/s, 0.00 non-youngs/s 197 | Pages read 39564, created 4425, written 80463 198 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 199 | Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000 200 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 201 | LRU len: 43989, unzip_LRU len: 0 202 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 203 | ---------------------- 204 | INDIVIDUAL BUFFER POOL INFO 205 | ---------------------- 206 | ---BUFFER POOL 0 207 | Buffer pool size 16384 208 | Free buffers 10968 209 | Database pages 5296 210 | Old database pages 1974 211 | Modified db pages 0 212 | Pending reads 0 213 | Pending writes: LRU 0, flush list 0, single page 0 214 | Pages made young 291, not young 0 215 | 0.00 youngs/s, 0.00 non-youngs/s 216 | Pages read 4678, created 618, written 9709 217 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 218 | Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000 219 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 220 | LRU len: 5296, unzip_LRU len: 0 221 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 222 | ---BUFFER POOL 1 223 | Buffer pool size 16384 224 | Free buffers 10690 225 | Database pages 5574 226 | Old database pages 2077 227 | Modified db pages 0 228 | Pending reads 0 229 | Pending writes: LRU 0, flush list 0, single page 0 230 | Pages made young 377, not young 0 231 | 0.00 youngs/s, 0.00 non-youngs/s 232 | Pages read 5067, created 507, written 10235 233 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 234 | No buffer pool page gets since the last printout 235 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 236 | LRU len: 5574, unzip_LRU len: 0 237 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 238 | ---BUFFER POOL 2 239 | Buffer pool size 16384 240 | Free buffers 10620 241 | Database pages 5645 242 | Old database pages 2103 243 | Modified db pages 0 244 | Pending reads 0 245 | Pending writes: LRU 0, flush list 0, single page 0 246 | Pages made young 332, not young 0 247 | 0.00 youngs/s, 0.00 non-youngs/s 248 | Pages read 4917, created 728, written 11162 249 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 250 | Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000 251 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 252 | LRU len: 5645, unzip_LRU len: 0 253 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 254 | ---BUFFER POOL 3 255 | Buffer pool size 16384 256 | Free buffers 10820 257 | Database pages 5445 258 | Old database pages 2029 259 | Modified db pages 0 260 | Pending reads 0 261 | Pending writes: LRU 0, flush list 0, single page 0 262 | Pages made young 355, not young 0 263 | 0.00 youngs/s, 0.00 non-youngs/s 264 | Pages read 4965, created 480, written 8689 265 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 266 | Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000 267 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 268 | LRU len: 5445, unzip_LRU len: 0 269 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 270 | ---BUFFER POOL 4 271 | Buffer pool size 16384 272 | Free buffers 10853 273 | Database pages 5412 274 | Old database pages 2016 275 | Modified db pages 0 276 | Pending reads 0 277 | Pending writes: LRU 0, flush list 0, single page 0 278 | Pages made young 396, not young 0 279 | 0.00 youngs/s, 0.00 non-youngs/s 280 | Pages read 4971, created 441, written 9979 281 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 282 | No buffer pool page gets since the last printout 283 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 284 | LRU len: 5412, unzip_LRU len: 0 285 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 286 | ---BUFFER POOL 5 287 | Buffer pool size 16384 288 | Free buffers 11025 289 | Database pages 5242 290 | Old database pages 1954 291 | Modified db pages 0 292 | Pending reads 0 293 | Pending writes: LRU 0, flush list 0, single page 0 294 | Pages made young 330, not young 0 295 | 0.00 youngs/s, 0.00 non-youngs/s 296 | Pages read 4693, created 549, written 9609 297 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 298 | No buffer pool page gets since the last printout 299 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 300 | LRU len: 5242, unzip_LRU len: 0 301 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 302 | ---BUFFER POOL 6 303 | Buffer pool size 16384 304 | Free buffers 10722 305 | Database pages 5544 306 | Old database pages 2066 307 | Modified db pages 0 308 | Pending reads 0 309 | Pending writes: LRU 0, flush list 0, single page 0 310 | Pages made young 251, not young 0 311 | 0.00 youngs/s, 0.00 non-youngs/s 312 | Pages read 4947, created 597, written 9224 313 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 314 | No buffer pool page gets since the last printout 315 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 316 | LRU len: 5544, unzip_LRU len: 0 317 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 318 | ---BUFFER POOL 7 319 | Buffer pool size 16384 320 | Free buffers 10436 321 | Database pages 5831 322 | Old database pages 2172 323 | Modified db pages 0 324 | Pending reads 0 325 | Pending writes: LRU 0, flush list 0, single page 0 326 | Pages made young 380, not young 0 327 | 0.00 youngs/s, 0.00 non-youngs/s 328 | Pages read 5326, created 505, written 11856 329 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 330 | No buffer pool page gets since the last printout 331 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 332 | LRU len: 5831, unzip_LRU len: 0 333 | I/O sum[0]:cur[0], unzip sum[0]:cur[0] 334 | -------------- 335 | ROW OPERATIONS 336 | -------------- 337 | 0 queries inside InnoDB, 0 queries in queue 338 | 0 read views open inside InnoDB 339 | Process ID=33936, Main thread ID=0000000000000B20 , state=sleeping 340 | Number of rows inserted 0, updated 16047, deleted 0, read 48017 341 | 0.00 inserts/s, 0.06 updates/s, 0.00 deletes/s, 0.06 reads/s 342 | Number of system rows inserted 0, updated 315, deleted 0, read 5058 343 | 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.08 reads/s 344 | ---------------------------- 345 | END OF INNODB MONITOR OUTPUT 346 | ============================ 347 | 348 | 1 row in set (0.0013 sec) 349 | 350 | mysql> SET GLOBAL innodb_status_output_locks = OFF; 351 | Query OK, 0 rows affected (0.0004 sec) -------------------------------------------------------------------------------- /Chapter_13/figure_13_2_workbench_flush_lock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/mysql-concurrency/1e89c82a94fbbcae086221adf3f1220f810bc8a6/Chapter_13/figure_13_2_workbench_flush_lock.png -------------------------------------------------------------------------------- /Chapter_15/figure_15_1_quan.png.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/mysql-concurrency/1e89c82a94fbbcae086221adf3f1220f810bc8a6/Chapter_15/figure_15_1_quan.png.png -------------------------------------------------------------------------------- /Chapter_16/listing_16_2_deadlock.txt: -------------------------------------------------------------------------------- 1 | -- Connection 3 2 | Connection 3> SHOW ENGINE INNODB STATUS\G 3 | *************************** 1. row *************************** 4 | Type: InnoDB 5 | Name: 6 | Status: 7 | ===================================== 8 | 2020-08-07 20:09:36 0x8bf0 INNODB MONITOR OUTPUT 9 | ===================================== 10 | Per second averages calculated from the last 40 seconds 11 | ----------------- 12 | BACKGROUND THREAD 13 | ----------------- 14 | srv_master_thread loops: 1164 srv_active, 0 srv_shutdown, 200438 srv_idle 15 | srv_master_thread log flush and writes: 0 16 | ---------- 17 | SEMAPHORES 18 | ---------- 19 | OS WAIT ARRAY INFO: reservation count 1099 20 | OS WAIT ARRAY INFO: signal count 1057 21 | RW-shared spins 62, rounds 63, OS waits 1 22 | RW-excl spins 1024, rounds 12587, OS waits 373 23 | RW-sx spins 39, rounds 1089, OS waits 35 24 | Spin rounds per wait: 1.02 RW-shared, 12.29 RW-excl, 27.92 RW-sx 25 | ------------------------ 26 | LATEST DETECTED DEADLOCK 27 | ------------------------ 28 | 2020-08-07 20:08:55 0x9f0 29 | *** (1) TRANSACTION: 30 | TRANSACTION 537544, ACTIVE 0 sec starting index read 31 | mysql tables in use 1, locked 1 32 | LOCK WAIT 6 lock struct(s), heap size 1136, 30 row lock(s), undo log entries 14 33 | MySQL thread id 762, OS thread handle 10344, query id 3282590 localhost ::1 root updating 34 | UPDATE world.country SET Population = Population * 1.10 WHERE Code = 'AUS' 35 | 36 | *** (1) HOLDS THE LOCK(S): 37 | RECORD LOCKS space id 1923 page no 14 n bits 1272 index CountryCode of table `world`.`city` trx id 537544 lock_mode X locks gap before rec 38 | Record lock, heap no 603 PHYSICAL RECORD: n_fields 2; compact format; info bits 0 39 | 0: len 3; hex 415554; asc AUT;; 40 | 1: len 4; hex 800005f3; asc ;; 41 | 42 | 43 | *** (1) WAITING FOR THIS LOCK TO BE GRANTED: 44 | RECORD LOCKS space id 1924 page no 5 n bits 120 index PRIMARY of table `world`.`country` trx id 537544 lock_mode X locks rec but not gap waiting 45 | Record lock, heap no 16 PHYSICAL RECORD: n_fields 17; compact format; info bits 0 46 | 0: len 3; hex 415553; asc AUS;; 47 | 1: len 6; hex 0000000833c9; asc 3 ;; 48 | 2: len 7; hex 02000001750a3c; asc u <;; 49 | 3: len 30; hex 4175737472616c6961202020202020202020202020202020202020202020; asc Australia ; (total 52 bytes); 50 | 4: len 1; hex 05; asc ;; 51 | 5: len 26; hex 4175737472616c696120616e64204e6577205a65616c616e6420; asc Australia and New Zealand ;; 52 | 6: len 5; hex 80761f2400; asc v $ ;; 53 | 7: len 2; hex 876d; asc m;; 54 | 8: len 4; hex 812267c0; asc "g ;; 55 | 9: len 2; hex cf08; asc ;; 56 | 10: len 5; hex 80055bce00; asc [ ;; 57 | 11: len 5; hex 8005fecf00; asc ;; 58 | 12: len 30; hex 4175737472616c6961202020202020202020202020202020202020202020; asc Australia ; (total 45 bytes); 59 | 13: len 30; hex 436f6e737469747574696f6e616c204d6f6e61726368792c204665646572; asc Constitutional Monarchy, Feder; (total 45 bytes); 60 | 14: len 30; hex 456c69736162657468204949202020202020202020202020202020202020; asc Elisabeth II ; (total 60 bytes); 61 | 15: len 4; hex 80000087; asc ;; 62 | 16: len 2; hex 4155; asc AU;; 63 | 64 | 65 | *** (2) TRANSACTION: 66 | TRANSACTION 537545, ACTIVE 0 sec inserting 67 | mysql tables in use 1, locked 1 68 | LOCK WAIT 4 lock struct(s), heap size 1136, 2 row lock(s), undo log entries 2 69 | MySQL thread id 763, OS thread handle 37872, query id 3282591 localhost ::1 root update 70 | INSERT INTO world.city VALUES (4080, 'Darwin', 'AUS', 'Northern Territory', 146000) 71 | 72 | *** (2) HOLDS THE LOCK(S): 73 | RECORD LOCKS space id 1924 page no 5 n bits 120 index PRIMARY of table `world`.`country` trx id 537545 lock_mode X locks rec but not gap 74 | Record lock, heap no 16 PHYSICAL RECORD: n_fields 17; compact format; info bits 0 75 | 0: len 3; hex 415553; asc AUS;; 76 | 1: len 6; hex 0000000833c9; asc 3 ;; 77 | 2: len 7; hex 02000001750a3c; asc u <;; 78 | 3: len 30; hex 4175737472616c6961202020202020202020202020202020202020202020; asc Australia ; (total 52 bytes); 79 | 4: len 1; hex 05; asc ;; 80 | 5: len 26; hex 4175737472616c696120616e64204e6577205a65616c616e6420; asc Australia and New Zealand ;; 81 | 6: len 5; hex 80761f2400; asc v $ ;; 82 | 7: len 2; hex 876d; asc m;; 83 | 8: len 4; hex 812267c0; asc "g ;; 84 | 9: len 2; hex cf08; asc ;; 85 | 10: len 5; hex 80055bce00; asc [ ;; 86 | 11: len 5; hex 8005fecf00; asc ;; 87 | 12: len 30; hex 4175737472616c6961202020202020202020202020202020202020202020; asc Australia ; (total 45 bytes); 88 | 13: len 30; hex 436f6e737469747574696f6e616c204d6f6e61726368792c204665646572; asc Constitutional Monarchy, Feder; (total 45 bytes); 89 | 14: len 30; hex 456c69736162657468204949202020202020202020202020202020202020; asc Elisabeth II ; (total 60 bytes); 90 | 15: len 4; hex 80000087; asc ;; 91 | 16: len 2; hex 4155; asc AU;; 92 | 93 | 94 | *** (2) WAITING FOR THIS LOCK TO BE GRANTED: 95 | RECORD LOCKS space id 1923 page no 14 n bits 1272 index CountryCode of table `world`.`city` trx id 537545 lock_mode X locks gap before rec insert intention waiting 96 | Record lock, heap no 603 PHYSICAL RECORD: n_fields 2; compact format; info bits 0 97 | 0: len 3; hex 415554; asc AUT;; 98 | 1: len 4; hex 800005f3; asc ;; 99 | 100 | *** WE ROLL BACK TRANSACTION (2) 101 | ------------ 102 | TRANSACTIONS 103 | ------------ 104 | Trx id counter 537548 105 | Purge done for trx's n:o < 537548 undo n:o < 0 state: running but idle 106 | History list length 2 107 | LIST OF TRANSACTIONS FOR EACH SESSION: 108 | ---TRANSACTION 284186648312416, not started 109 | 0 lock struct(s), heap size 1136, 0 row lock(s) 110 | ---TRANSACTION 284186648311584, not started 111 | 0 lock struct(s), heap size 1136, 0 row lock(s) 112 | ---TRANSACTION 284186648310752, not started 113 | 0 lock struct(s), heap size 1136, 0 row lock(s) 114 | ---TRANSACTION 284186648309920, not started 115 | 0 lock struct(s), heap size 1136, 0 row lock(s) 116 | ---TRANSACTION 284186648315744, not started 117 | 0 lock struct(s), heap size 1136, 0 row lock(s) 118 | ---TRANSACTION 284186648314912, not started 119 | 0 lock struct(s), heap size 1136, 0 row lock(s) 120 | ---TRANSACTION 284186648314080, not started 121 | 0 lock struct(s), heap size 1136, 0 row lock(s) 122 | ---TRANSACTION 284186648313248, not started 123 | 0 lock struct(s), heap size 1136, 0 row lock(s) 124 | ---TRANSACTION 284186648309088, not started 125 | 0 lock struct(s), heap size 1136, 0 row lock(s) 126 | ---TRANSACTION 284186648308256, not started 127 | 0 lock struct(s), heap size 1136, 0 row lock(s) 128 | ---TRANSACTION 284186648307424, not started 129 | 0 lock struct(s), heap size 1136, 0 row lock(s) 130 | -------- 131 | FILE I/O 132 | -------- 133 | I/O thread 0 state: wait Windows aio (insert buffer thread) 134 | I/O thread 1 state: wait Windows aio (log thread) 135 | I/O thread 2 state: wait Windows aio (read thread) 136 | I/O thread 3 state: wait Windows aio (read thread) 137 | I/O thread 4 state: wait Windows aio (read thread) 138 | I/O thread 5 state: wait Windows aio (read thread) 139 | I/O thread 6 state: wait Windows aio (write thread) 140 | I/O thread 7 state: wait Windows aio (write thread) 141 | I/O thread 8 state: wait Windows aio (write thread) 142 | I/O thread 9 state: wait Windows aio (write thread) 143 | Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , 144 | ibuf aio reads:, log i/o's:, sync i/o's: 145 | Pending flushes (fsync) log: 0; buffer pool: 2 146 | 2272 OS file reads, 68722 OS file writes, 30901 OS fsyncs 147 | 0.00 reads/s, 0 avg bytes/read, 0.42 writes/s, 0.25 fsyncs/s 148 | ------------------------------------- 149 | INSERT BUFFER AND ADAPTIVE HASH INDEX 150 | ------------------------------------- 151 | Ibuf: size 1, free list len 13, seg size 15, 0 merges 152 | merged operations: 153 | insert 0, delete mark 0, delete 0 154 | discarded operations: 155 | insert 0, delete mark 0, delete 0 156 | Hash table size 34679, node heap has 2 buffer(s) 157 | Hash table size 34679, node heap has 1 buffer(s) 158 | Hash table size 34679, node heap has 2 buffer(s) 159 | Hash table size 34679, node heap has 2 buffer(s) 160 | Hash table size 34679, node heap has 1 buffer(s) 161 | Hash table size 34679, node heap has 1 buffer(s) 162 | Hash table size 34679, node heap has 2 buffer(s) 163 | Hash table size 34679, node heap has 6 buffer(s) 164 | 0.00 hash searches/s, 0.37 non-hash searches/s 165 | --- 166 | LOG 167 | --- 168 | Log sequence number 5774075682 169 | Log buffer assigned up to 5774075682 170 | Log buffer completed up to 5774075682 171 | Log written up to 5774075682 172 | Log flushed up to 5774075682 173 | Added dirty pages up to 5774075682 174 | Pages flushed up to 5774075682 175 | Last checkpoint at 5774075682 176 | 28546 log i/o's done, 0.10 log i/o's/second 177 | ---------------------- 178 | BUFFER POOL AND MEMORY 179 | ---------------------- 180 | Total large memory allocated 137363456 181 | Dictionary memory allocated 612044 182 | Buffer pool size 8192 183 | Free buffers 5466 184 | Database pages 2709 185 | Old database pages 982 186 | Modified db pages 0 187 | Pending reads 0 188 | Pending writes: LRU 0, flush list 0, single page 0 189 | Pages made young 3708, not young 2016 190 | 0.00 youngs/s, 0.00 non-youngs/s 191 | Pages read 2230, created 1599, written 29728 192 | 0.00 reads/s, 0.00 creates/s, 0.00 writes/s 193 | Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000 194 | Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s 195 | LRU len: 2709, unzip_LRU len: 0 196 | I/O sum[9]:cur[0], unzip sum[0]:cur[0] 197 | -------------- 198 | ROW OPERATIONS 199 | -------------- 200 | 0 queries inside InnoDB, 0 queries in queue 201 | 0 read views open inside InnoDB 202 | Process ID=13800, Main thread ID=0000000000004F24 , state=sleeping 203 | Number of rows inserted 5347, updated 4623, deleted 0, read 13191 204 | 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s 205 | Number of system rows inserted 5293, updated 3757, deleted 5191, read 30919 206 | 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s 207 | ---------------------------- 208 | END OF INNODB MONITOR OUTPUT 209 | ============================ 210 | 211 | 1 row in set (0.0009 sec) 212 | -------------------------------------------------------------------------------- /Chapter_17/listing_17-1.txt: -------------------------------------------------------------------------------- 1 | ******************************************** 2 | * * 3 | * Listing 17-1. Locks and foreign keys * 4 | * * 5 | ******************************************** 6 | 7 | 8 | Specify the number of seconds to run for (10-3600) [15]: 9 | Specify the sleep factor (0-30) [15]: 10 | 11 | -- Connection Processlist ID Thread ID Event ID 12 | -- -------------------------------------------------- 13 | -- 1 462 792 6 14 | -- 2 463 793 6 15 | -- 3 464 794 6 16 | -- 4 465 795 6 17 | -- 5 466 796 6 18 | 19 | 20 | 21 | mysql> SELECT error_number, error_name, sum_error_raised 22 | FROM performance_schema.events_errors_summary_global_by_error 23 | WHERE error_name IN ('ER_LOCK_WAIT_TIMEOUT', 'ER_LOCK_DEADLOCK') 24 | +--------------+----------------------+------------------+ 25 | | error_number | error_name | sum_error_raised | 26 | +--------------+----------------------+------------------+ 27 | | 1205 | ER_LOCK_WAIT_TIMEOUT | 310 | 28 | | 1213 | ER_LOCK_DEADLOCK | 12 | 29 | +--------------+----------------------+------------------+ 30 | 31 | mysql> SELECT event_name, count_star, sum_errors 32 | FROM performance_schema.events_statements_summary_global_by_event_name 33 | WHERE event_name IN ('statement/sql/alter_table', 34 | 'statement/sql/update') 35 | +---------------------------+------------+------------+ 36 | | event_name | count_star | sum_errors | 37 | +---------------------------+------------+------------+ 38 | | statement/sql/alter_table | 113 | 101 | 39 | | statement/sql/update | 4470 | 188 | 40 | +---------------------------+------------+------------+ 41 | 42 | mysql> SELECT Variable_name, Variable_value 43 | FROM sys.metrics 44 | WHERE Variable_name IN ( 45 | 'innodb_row_lock_current_waits', 46 | 'lock_row_lock_current_waits', 47 | 'innodb_row_lock_time', 48 | 'innodb_row_lock_waits', 49 | 'lock_deadlocks', 50 | 'lock_timeouts' 51 | ) 52 | +-------------------------------+----------------+ 53 | | Variable_name | Variable_value | 54 | +-------------------------------+----------------+ 55 | | innodb_row_lock_current_waits | 0 | 56 | | innodb_row_lock_time | 409555 | 57 | | innodb_row_lock_waits | 384 | 58 | | lock_deadlocks | 0 | 59 | | lock_row_lock_current_waits | 0 | 60 | | lock_timeouts | 188 | 61 | +-------------------------------+----------------+ 62 | 63 | mysql> SELECT object_name, lock_type, lock_status, 64 | owner_thread_id, owner_event_id 65 | FROM performance_schema.metadata_locks 66 | WHERE object_type = 'TABLE' 67 | AND object_schema = 'sakila' 68 | ORDER BY owner_thread_id, object_name, lock_type 69 | *************************** 1. row *************************** 70 | object_name: category 71 | lock_type: SHARED_READ 72 | lock_status: GRANTED 73 | owner_thread_id: 792 74 | owner_event_id: 9 75 | *************************** 2. row *************************** 76 | object_name: film 77 | lock_type: SHARED_READ 78 | lock_status: GRANTED 79 | owner_thread_id: 792 80 | owner_event_id: 9 81 | *************************** 3. row *************************** 82 | object_name: film_category 83 | lock_type: SHARED_WRITE 84 | lock_status: GRANTED 85 | owner_thread_id: 792 86 | owner_event_id: 9 87 | *************************** 4. row *************************** 88 | object_name: category 89 | lock_type: SHARED_WRITE 90 | lock_status: GRANTED 91 | owner_thread_id: 793 92 | owner_event_id: 9 93 | *************************** 5. row *************************** 94 | object_name: film 95 | lock_type: SHARED_READ 96 | lock_status: GRANTED 97 | owner_thread_id: 793 98 | owner_event_id: 9 99 | *************************** 6. row *************************** 100 | object_name: film_category 101 | lock_type: SHARED_WRITE 102 | lock_status: GRANTED 103 | owner_thread_id: 793 104 | owner_event_id: 9 105 | *************************** 7. row *************************** 106 | object_name: address 107 | lock_type: SHARED_READ 108 | lock_status: GRANTED 109 | owner_thread_id: 794 110 | owner_event_id: 10 111 | *************************** 8. row *************************** 112 | object_name: customer 113 | lock_type: SHARED_WRITE 114 | lock_status: GRANTED 115 | owner_thread_id: 794 116 | owner_event_id: 10 117 | *************************** 9. row *************************** 118 | object_name: inventory 119 | lock_type: SHARED_READ 120 | lock_status: GRANTED 121 | owner_thread_id: 794 122 | owner_event_id: 10 123 | *************************** 10. row *************************** 124 | object_name: payment 125 | lock_type: SHARED_WRITE 126 | lock_status: GRANTED 127 | owner_thread_id: 794 128 | owner_event_id: 10 129 | *************************** 11. row *************************** 130 | object_name: rental 131 | lock_type: SHARED_WRITE 132 | lock_status: GRANTED 133 | owner_thread_id: 794 134 | owner_event_id: 10 135 | *************************** 12. row *************************** 136 | object_name: staff 137 | lock_type: SHARED_READ 138 | lock_status: GRANTED 139 | owner_thread_id: 794 140 | owner_event_id: 10 141 | *************************** 13. row *************************** 142 | object_name: store 143 | lock_type: SHARED_READ 144 | lock_status: GRANTED 145 | owner_thread_id: 794 146 | owner_event_id: 10 147 | *************************** 14. row *************************** 148 | object_name: address 149 | lock_type: SHARED_READ 150 | lock_status: GRANTED 151 | owner_thread_id: 795 152 | owner_event_id: 10 153 | *************************** 15. row *************************** 154 | object_name: customer 155 | lock_type: SHARED_WRITE 156 | lock_status: GRANTED 157 | owner_thread_id: 795 158 | owner_event_id: 10 159 | *************************** 16. row *************************** 160 | object_name: inventory 161 | lock_type: SHARED_READ 162 | lock_status: PENDING 163 | owner_thread_id: 795 164 | owner_event_id: 10 165 | *************************** 17. row *************************** 166 | object_name: payment 167 | lock_type: SHARED_WRITE 168 | lock_status: GRANTED 169 | owner_thread_id: 795 170 | owner_event_id: 10 171 | *************************** 18. row *************************** 172 | object_name: rental 173 | lock_type: SHARED_WRITE 174 | lock_status: GRANTED 175 | owner_thread_id: 795 176 | owner_event_id: 10 177 | *************************** 19. row *************************** 178 | object_name: staff 179 | lock_type: SHARED_READ 180 | lock_status: GRANTED 181 | owner_thread_id: 795 182 | owner_event_id: 10 183 | *************************** 20. row *************************** 184 | object_name: store 185 | lock_type: SHARED_READ 186 | lock_status: GRANTED 187 | owner_thread_id: 795 188 | owner_event_id: 10 189 | *************************** 21. row *************************** 190 | object_name: #sql-35e8_1d2 191 | lock_type: EXCLUSIVE 192 | lock_status: GRANTED 193 | owner_thread_id: 796 194 | owner_event_id: 9 195 | *************************** 22. row *************************** 196 | object_name: film 197 | lock_type: SHARED_UPGRADABLE 198 | lock_status: GRANTED 199 | owner_thread_id: 796 200 | owner_event_id: 9 201 | *************************** 23. row *************************** 202 | object_name: inventory 203 | lock_type: EXCLUSIVE 204 | lock_status: PENDING 205 | owner_thread_id: 796 206 | owner_event_id: 9 207 | *************************** 24. row *************************** 208 | object_name: inventory 209 | lock_type: SHARED_UPGRADABLE 210 | lock_status: GRANTED 211 | owner_thread_id: 796 212 | owner_event_id: 9 213 | *************************** 25. row *************************** 214 | object_name: rental 215 | lock_type: SHARED_UPGRADABLE 216 | lock_status: GRANTED 217 | owner_thread_id: 796 218 | owner_event_id: 9 219 | *************************** 26. row *************************** 220 | object_name: store 221 | lock_type: SHARED_UPGRADABLE 222 | lock_status: GRANTED 223 | owner_thread_id: 796 224 | owner_event_id: 9 225 | 226 | mysql> SELECT object_name, COUNT(*) 227 | FROM performance_schema.metadata_locks 228 | WHERE object_type = 'TABLE' 229 | AND object_schema = 'sakila' 230 | GROUP BY object_name 231 | ORDER BY object_name 232 | +---------------+----------+ 233 | | object_name | COUNT(*) | 234 | +---------------+----------+ 235 | | #sql-35e8_1d2 | 1 | 236 | | address | 2 | 237 | | category | 2 | 238 | | customer | 2 | 239 | | film | 3 | 240 | | film_category | 2 | 241 | | inventory | 4 | 242 | | payment | 2 | 243 | | rental | 3 | 244 | | staff | 2 | 245 | | store | 3 | 246 | +---------------+----------+ 247 | 248 | mysql> SELECT * FROM sys.schema_table_lock_waits 249 | *************************** 1. row *************************** 250 | object_schema: sakila 251 | object_name: inventory 252 | waiting_thread_id: 796 253 | waiting_pid: 466 254 | waiting_account: root@localhost 255 | waiting_lock_type: EXCLUSIVE 256 | waiting_lock_duration: TRANSACTION 257 | waiting_query: ALTER TABLE sakila.inventory FORCE 258 | waiting_query_secs: 0 259 | waiting_query_rows_affected: 0 260 | waiting_query_rows_examined: 0 261 | blocking_thread_id: 794 262 | blocking_pid: 464 263 | blocking_account: root@localhost 264 | blocking_lock_type: SHARED_READ 265 | blocking_lock_duration: TRANSACTION 266 | sql_kill_blocking_query: KILL QUERY 464 267 | sql_kill_blocking_connection: KILL 464 268 | *************************** 2. row *************************** 269 | object_schema: sakila 270 | object_name: inventory 271 | waiting_thread_id: 795 272 | waiting_pid: 465 273 | waiting_account: root@localhost 274 | waiting_lock_type: SHARED_READ 275 | waiting_lock_duration: TRANSACTION 276 | waiting_query: UPDATE sakila.customer SET active = IF(active = 1, 0, 1) WHERE customer_id = 99 277 | waiting_query_secs: 0 278 | waiting_query_rows_affected: 0 279 | waiting_query_rows_examined: 0 280 | blocking_thread_id: 794 281 | blocking_pid: 464 282 | blocking_account: root@localhost 283 | blocking_lock_type: SHARED_READ 284 | blocking_lock_duration: TRANSACTION 285 | sql_kill_blocking_query: KILL QUERY 464 286 | sql_kill_blocking_connection: KILL 464 287 | *************************** 3. row *************************** 288 | object_schema: sakila 289 | object_name: inventory 290 | waiting_thread_id: 796 291 | waiting_pid: 466 292 | waiting_account: root@localhost 293 | waiting_lock_type: EXCLUSIVE 294 | waiting_lock_duration: TRANSACTION 295 | waiting_query: ALTER TABLE sakila.inventory FORCE 296 | waiting_query_secs: 0 297 | waiting_query_rows_affected: 0 298 | waiting_query_rows_examined: 0 299 | blocking_thread_id: 796 300 | blocking_pid: 466 301 | blocking_account: root@localhost 302 | blocking_lock_type: SHARED_UPGRADABLE 303 | blocking_lock_duration: TRANSACTION 304 | sql_kill_blocking_query: KILL QUERY 466 305 | sql_kill_blocking_connection: KILL 466 306 | *************************** 4. row *************************** 307 | object_schema: sakila 308 | object_name: inventory 309 | waiting_thread_id: 795 310 | waiting_pid: 465 311 | waiting_account: root@localhost 312 | waiting_lock_type: SHARED_READ 313 | waiting_lock_duration: TRANSACTION 314 | waiting_query: UPDATE sakila.customer SET active = IF(active = 1, 0, 1) WHERE customer_id = 99 315 | waiting_query_secs: 0 316 | waiting_query_rows_affected: 0 317 | waiting_query_rows_examined: 0 318 | blocking_thread_id: 796 319 | blocking_pid: 466 320 | blocking_account: root@localhost 321 | blocking_lock_type: SHARED_UPGRADABLE 322 | blocking_lock_duration: TRANSACTION 323 | sql_kill_blocking_query: KILL QUERY 466 324 | sql_kill_blocking_connection: KILL 466 325 | 326 | mysql> SELECT blocking_pid, COUNT(*) 327 | FROM sys.schema_table_lock_waits 328 | WHERE waiting_pid <> blocking_pid 329 | GROUP BY blocking_pid 330 | ORDER BY COUNT(*) DESC 331 | +--------------+----------+ 332 | | blocking_pid | COUNT(*) | 333 | +--------------+----------+ 334 | | 464 | 2 | 335 | | 466 | 1 | 336 | +--------------+----------+ 337 | 338 | mysql> SELECT thd_id, conn_id, command, state, 339 | current_statement, time, statement_latency, 340 | trx_latency, trx_state 341 | FROM sys.session 342 | WHERE thd_id IN (792, 793, 794, 795, 796) 343 | ORDER BY conn_id 344 | *************************** 1. row *************************** 345 | thd_id: 792 346 | conn_id: 462 347 | command: Sleep 348 | state: NULL 349 | current_statement: UPDATE sakila.film_category SET category_id = IF(category_id = 7, 16, 7) WHERE film_id = 64 350 | time: 2 351 | statement_latency: NULL 352 | trx_latency: 2.09 s 353 | trx_state: ACTIVE 354 | *************************** 2. row *************************** 355 | thd_id: 793 356 | conn_id: 463 357 | command: Query 358 | state: updating 359 | current_statement: UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 360 | time: 2 361 | statement_latency: 1.59 s 362 | trx_latency: 1.59 s 363 | trx_state: ACTIVE 364 | *************************** 3. row *************************** 365 | thd_id: 794 366 | conn_id: 464 367 | command: Sleep 368 | state: NULL 369 | current_statement: SELECT COUNT(*) FROM performance_schema.metadata_locks ... wner_thread_id IN (795) AND lock_status = 'PENDING' 370 | time: 0 371 | statement_latency: NULL 372 | trx_latency: 1.09 s 373 | trx_state: ACTIVE 374 | *************************** 4. row *************************** 375 | thd_id: 795 376 | conn_id: 465 377 | command: Query 378 | state: Waiting for table metadata lock 379 | current_statement: UPDATE sakila.customer SET active = IF(active = 1, 0, 1) WHERE customer_id = 99 380 | time: 0 381 | statement_latency: 88.18 ms 382 | trx_latency: 88.43 ms 383 | trx_state: ACTIVE 384 | *************************** 5. row *************************** 385 | thd_id: 796 386 | conn_id: 466 387 | command: Query 388 | state: Waiting for table metadata lock 389 | current_statement: ALTER TABLE sakila.inventory FORCE 390 | time: 0 391 | statement_latency: 89.88 ms 392 | trx_latency: 43.30 us 393 | trx_state: COMMITTED 394 | 395 | mysql> SELECT * FROM sys.innodb_lock_waits 396 | *************************** 1. row *************************** 397 | wait_started: 2020-08-02 14:17:13 398 | wait_age: 00:00:02 399 | wait_age_secs: 2 400 | locked_table: `sakila`.`category` 401 | locked_table_schema: sakila 402 | locked_table_name: category 403 | locked_table_partition: None 404 | locked_table_subpartition: None 405 | locked_index: PRIMARY 406 | locked_type: RECORD 407 | waiting_trx_id: 535860 408 | waiting_trx_started: 2020-08-02 14:17:13 409 | waiting_trx_age: 00:00:02 410 | waiting_trx_rows_locked: 1 411 | waiting_trx_rows_modified: 0 412 | waiting_pid: 463 413 | waiting_query: UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 414 | waiting_lock_id: 2711671600928:1795:4:282:2711634698920 415 | waiting_lock_mode: X,REC_NOT_GAP 416 | blocking_trx_id: 535859 417 | blocking_pid: 462 418 | blocking_query: None 419 | blocking_lock_id: 2711671600096:1795:4:282:2711634694976 420 | blocking_lock_mode: S,REC_NOT_GAP 421 | blocking_trx_started: 2020-08-02 14:17:13 422 | blocking_trx_age: 00:00:02 423 | blocking_trx_rows_locked: 5 424 | blocking_trx_rows_modified: 2 425 | sql_kill_blocking_query: KILL QUERY 462 426 | sql_kill_blocking_connection: KILL 462 427 | 1 row in set (0.0017 sec) 428 | 429 | mysql> UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 430 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 431 | 432 | mysql> ALTER TABLE sakila.inventory FORCE 433 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 434 | 435 | mysql> UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 436 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 437 | 438 | mysql> ALTER TABLE sakila.inventory FORCE 439 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 440 | 441 | mysql> UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 442 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 443 | 444 | mysql> UPDATE sakila.category SET name = IF(name = 'Travel', 'Exploring', 'Travel') WHERE category_id = 16 445 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 446 | 447 | mysql> ALTER TABLE sakila.inventory FORCE 448 | ERROR: 1205: Lock wait timeout exceeded; try restarting transaction 449 | 450 | 451 | -- Metrics reported by count collected during the test: 452 | NOW(),innodb_row_lock_current_waits,lock_row_lock_current_waits 453 | 2020-08-02 14:17:11.168000,0,0 454 | 2020-08-02 14:17:12.168000,0,0 455 | 2020-08-02 14:17:13.180000,0,0 456 | 2020-08-02 14:17:14.168000,1,1 457 | 2020-08-02 14:17:15.177000,1,1 458 | 2020-08-02 14:17:16.168000,1,1 459 | 2020-08-02 14:17:17.169000,1,1 460 | 2020-08-02 14:17:18.180000,0,0 461 | 2020-08-02 14:17:19.180000,0,0 462 | 2020-08-02 14:17:20.168000,0,0 463 | 2020-08-02 14:17:21.180000,0,0 464 | 2020-08-02 14:17:22.168000,1,1 465 | 2020-08-02 14:17:23.179000,1,1 466 | 2020-08-02 14:17:24.180000,1,1 467 | 2020-08-02 14:17:25.179000,1,1 468 | 2020-08-02 14:17:26.182000,0,0 469 | 2020-08-02 14:17:27.180000,0,0 470 | 2020-08-02 14:17:28.168000,0,0 471 | 2020-08-02 14:17:29.180000,0,0 472 | 2020-08-02 14:17:30.168000,1,1 473 | 474 | -- Metrics reported by rate collected during the test: 475 | time,innodb_row_lock_time,innodb_row_lock_waits,lock_deadlocks,lock_timeouts 476 | 2020-08-02 14:17:12.168000,0.0,0.0,0.0,0.0 477 | 2020-08-02 14:17:13.180000,0.0,0.0,0.0,0.0 478 | 2020-08-02 14:17:14.168000,0.0,1.0121457489878543,0.0,0.0 479 | 2020-08-02 14:17:15.177000,0.0,0.0,0.0,0.0 480 | 2020-08-02 14:17:16.168000,2019.1725529767912,1.0090817356205852,0.0,1.0090817356205852 481 | 2020-08-02 14:17:17.169000,0.0,0.0,0.0,0.0 482 | 2020-08-02 14:17:18.180000,1541.0484668644908,0.0,0.0,0.9891196834817014 483 | 2020-08-02 14:17:19.180000,0.0,0.0,0.0,0.0 484 | 2020-08-02 14:17:20.168000,0.0,0.0,0.0,0.0 485 | 2020-08-02 14:17:21.180000,0.0,0.0,0.0,0.0 486 | 2020-08-02 14:17:22.168000,82.99595141700405,2.0242914979757085,0.0,0.0 487 | 2020-08-02 14:17:23.179000,0.0,0.0,0.0,0.0 488 | 2020-08-02 14:17:24.180000,1997.0029970029973,0.9990009990009991,0.0,0.9990009990009991 489 | 2020-08-02 14:17:25.179000,0.0,0.0,0.0,0.0 490 | 2020-08-02 14:17:26.182000,2115.6530408773683,0.9970089730807579,0.0,0.9970089730807579 491 | 2020-08-02 14:17:27.180000,0.0,0.0,0.0,0.0 492 | 2020-08-02 14:17:28.168000,0.0,0.0,0.0,0.0 493 | 2020-08-02 14:17:29.180000,0.0,0.0,0.0,0.0 494 | 2020-08-02 14:17:30.168000,66.80161943319838,2.0242914979757085,0.0,0.0 495 | 496 | mysql> SELECT error_number, error_name, sum_error_raised 497 | FROM performance_schema.events_errors_summary_global_by_error 498 | WHERE error_name IN ('ER_LOCK_WAIT_TIMEOUT', 'ER_LOCK_DEADLOCK') 499 | +--------------+----------------------+------------------+ 500 | | error_number | error_name | sum_error_raised | 501 | +--------------+----------------------+------------------+ 502 | | 1205 | ER_LOCK_WAIT_TIMEOUT | 317 | 503 | | 1213 | ER_LOCK_DEADLOCK | 12 | 504 | +--------------+----------------------+------------------+ 505 | 506 | mysql> SELECT event_name, count_star, sum_errors 507 | FROM performance_schema.events_statements_summary_global_by_event_name 508 | WHERE event_name IN ('statement/sql/alter_table', 509 | 'statement/sql/update') 510 | +---------------------------+------------+------------+ 511 | | event_name | count_star | sum_errors | 512 | +---------------------------+------------+------------+ 513 | | statement/sql/alter_table | 116 | 104 | 514 | | statement/sql/update | 4562 | 192 | 515 | +---------------------------+------------+------------+ 516 | 517 | mysql> SELECT Variable_name, Variable_value 518 | FROM sys.metrics 519 | WHERE Variable_name IN ( 520 | 'innodb_row_lock_current_waits', 521 | 'lock_row_lock_current_waits', 522 | 'innodb_row_lock_time', 523 | 'innodb_row_lock_waits', 524 | 'lock_deadlocks', 525 | 'lock_timeouts' 526 | ) 527 | +-------------------------------+----------------+ 528 | | Variable_name | Variable_value | 529 | +-------------------------------+----------------+ 530 | | innodb_row_lock_current_waits | 1 | 531 | | innodb_row_lock_time | 417383 | 532 | | innodb_row_lock_waits | 392 | 533 | | lock_deadlocks | 0 | 534 | | lock_row_lock_current_waits | 1 | 535 | | lock_timeouts | 192 | 536 | +-------------------------------+----------------+ 537 | 538 | 2020-08-02 14:17:30.664018 0 [INFO] Stopping the threads. 539 | 540 | 2020-08-02 14:17:33.818122 0 [INFO] Completing the workload Listing 17-1 541 | 2020-08-02 14:17:33.820075 0 [INFO] Disconnecting for the workload Listing 17-1 542 | 2020-08-02 14:17:33.820075 0 [INFO] Completed the workload Listing 17-1 543 | -------------------------------------------------------------------------------- /Contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing to Apress Source Code 2 | 3 | Copyright for Apress source code belongs to the author(s). However, under fair use you are encouraged to fork and contribute minor corrections and updates for the benefit of the author(s) and other readers. 4 | 5 | ## How to Contribute 6 | 7 | 1. Make sure you have a GitHub account. 8 | 2. Fork the repository for the relevant book. 9 | 3. Create a new branch on which to make your change, e.g. 10 | `git checkout -b my_code_contribution` 11 | 4. Commit your change. Include a commit message describing the correction. Please note that if your commit message is not clear, the correction will not be accepted. 12 | 5. Submit a pull request. 13 | 14 | Thank you for your contribution! -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Freeware License, some rights reserved 2 | 3 | Copyright (c) 2021 Jesper Wisborg Krogh 4 | 5 | Permission is hereby granted, free of charge, to anyone obtaining a copy 6 | of this software and associated documentation files (the "Software"), 7 | to work with the Software within the limits of freeware distribution and fair use. 8 | This includes the rights to use, copy, and modify the Software for personal use. 9 | Users are also allowed and encouraged to submit corrections and modifications 10 | to the Software for the benefit of other users. 11 | 12 | It is not allowed to reuse, modify, or redistribute the Software for 13 | commercial use in any way, or for a user’s educational materials such as books 14 | or blog articles without prior permission from the copyright holder. 15 | 16 | The above copyright notice and this permission notice need to be included 17 | in all copies or substantial portions of the software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS OR APRESS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | 27 | 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apress Source Code 2 | 3 | This repository accompanies [*MySQL Concurrency*](https://www.apress.com/9781484266519) by Jesper Wisborg Krogh (Apress, 2021). 4 | 5 | [comment]: #cover 6 | ![Cover image](9781484266519.jpg) 7 | 8 | Download the files as a zip using the green button, or clone the repository to your machine using Git. 9 | 10 | ## Releases 11 | 12 | Release v1.2 updates the URL for downloading the world database and the path within the ZIP file after it has been updated on downloads.mysql.com. Thanks to shalk (xiao kun) for the patch and black-lotus (Dondon) for a similar patch. 13 | 14 | Release v1.1 fixes the directory structure, adds missing files for chapters 2, 13, 15, 16, 17, 18, and adds error message when using `concurrency_book.generate.load()` with the classic MySQL protocol. 15 | 16 | Release v1.0 corresponds to the code in the published book, without corrections or updates. 17 | 18 | ## Contributions 19 | 20 | See the file Contributing.md for more information on how you can contribute to this repository. 21 | -------------------------------------------------------------------------------- /concurrency_book/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/mysql-concurrency/1e89c82a94fbbcae086221adf3f1220f810bc8a6/concurrency_book/__init__.py -------------------------------------------------------------------------------- /concurrency_book/generate.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import queue 3 | import pathlib 4 | import sys 5 | from importlib import import_module 6 | 7 | import mysqlsh 8 | 9 | # In MySQL Shell to be able to use the modules in libs as for example 10 | # libs.query it is necessary to import both libs separately and the 11 | # whole module 12 | # noinspection PyUnresolvedReferences 13 | from concurrency_book import libs 14 | # noinspection PyUnresolvedReferences 15 | import concurrency_book.libs.load 16 | # noinspection PyUnresolvedReferences 17 | import concurrency_book.libs.log 18 | # noinspection PyUnresolvedReferences 19 | import concurrency_book.libs.query 20 | # noinspection PyUnresolvedReferences 21 | import concurrency_book.libs.util 22 | # noinspection PyUnresolvedReferences 23 | import concurrency_book.libs.workloads 24 | 25 | LOG = libs.log.Log(libs.log.INFO) 26 | WORKLOADS_PATH = pathlib.Path(__file__).parent.joinpath('workloads').resolve() 27 | WORKLOADS = libs.workloads.load(WORKLOADS_PATH) 28 | SCHEMA_LOADS = libs.load.get_jobs() 29 | 30 | 31 | def _run_connection(connection, session, query_queue, return_queue, 32 | sql_formatter): 33 | """Run the work done by a single connection.""" 34 | 35 | local = threading.local() 36 | try: 37 | session.set_fetch_warnings(True) 38 | except (IndexError, AttributeError): 39 | # Not available in the legacy protocol. MySQL Shell 8.0.21 and earlier 40 | # return IndexError, 8.0.22 and later return AttributeError 41 | pass 42 | 43 | local.data = {} 44 | while True: 45 | local.query = libs.query.get_from_queue(query_queue) 46 | if local.query is None: 47 | # No more work 48 | query_queue.task_done() 49 | return None 50 | 51 | local.result = None 52 | local.error = None 53 | local.sql = sql_formatter.sql_global_sub(local.query.sql, connection) 54 | if local.sql.upper() in ('START TRANSACTION', 'BEGIN'): 55 | local.result = session.start_transaction() 56 | elif local.sql.upper() == 'COMMIT': 57 | local.result = session.commit() 58 | elif local.sql.upper() == 'ROLLBACK': 59 | local.result = session.rollback() 60 | else: 61 | local.params = [] 62 | for local.parameter in local.query.parameters: 63 | try: 64 | local.params.append(local.data[local.parameter]) 65 | except KeyError as e: 66 | local.error = f'KeyError: {e} not found in the stored ' + \ 67 | 'result' 68 | local.result = None 69 | 70 | if local.error is None: 71 | try: 72 | local.result = session.run_sql(local.sql, local.params) 73 | except mysqlsh.DBError as e: 74 | local.error = e 75 | else: 76 | if local.query.store: 77 | # Only the result of the first row is stored. 78 | # Ensure the whole result set is consumed 79 | try: 80 | local.columns = [col.column_label for col 81 | in local.result.columns] 82 | local.data = dict( 83 | zip(local.columns, local.result.fetch_one())) 84 | local.result.fetch_all() 85 | except AttributeError as e: 86 | print(f'sql .......: {local.sql}') 87 | print(f'result ....: {local.result}') 88 | print(f'dir .......: {dir(local.result)}') 89 | print(f'str .......: {local.result.__str__()}') 90 | print(f'repr ......: {local.result.__repr__()}') 91 | print(e) 92 | 93 | if not local.query.silent and local.query.show_result: 94 | return_queue.put( 95 | libs.query.RESULT(local.query, local.result, local.error) 96 | ) 97 | query_queue.task_done() 98 | 99 | 100 | def _run_load(schema): 101 | """Load a schema.""" 102 | loader = libs.load.Load(schema.name) 103 | loader.execute() 104 | 105 | 106 | def _run_implementation(workload, uri): 107 | """Execute the workload as defined in a class.""" 108 | implementation = workload.implementation 109 | module_name = f'concurrency_book.{implementation.module}' 110 | try: 111 | module = import_module(module_name) 112 | except ModuleNotFoundError: 113 | LOG.error(f'Unknown module: {implementation.module}. ' + 114 | 'Check the code for errors.') 115 | return False 116 | 117 | try: 118 | class_object = getattr(module, implementation.class_name) 119 | except AttributeError: 120 | LOG.error(f'Unknown class name - module: {implementation.module} ' + 121 | f'- class: {implementation.class_name}') 122 | return False 123 | 124 | try: 125 | task = class_object(workload=workload, 126 | session=mysqlsh.globals.session, 127 | log=LOG, 128 | **implementation.args) 129 | except (TypeError, ValueError) as e: 130 | LOG.error('Wrong arguments provided for the class - module:' + 131 | f'{implementation.module} - class: ' + 132 | f'{implementation.class_name} - arguments: ' + 133 | f'{implementation.args} - error: {e}') 134 | return False 135 | 136 | task.execute(uri) 137 | 138 | return True 139 | 140 | 141 | def _run_main(workload, uri): 142 | """Run by the main thread. This is where the worker threads are 143 | created. It is also here the work among the worker threads is 144 | coordinated.""" 145 | 146 | LOG.workload = workload 147 | LOG.info(f'Starting the workload {workload.name}') 148 | LOG.caption() 149 | 150 | if workload.implementation.class_name is not None: 151 | success = _run_implementation(workload, uri) 152 | if not success: 153 | return None 154 | 155 | query_queue = [] 156 | connections = [] 157 | sessions = [] 158 | result_queue = queue.Queue() 159 | processlist_ids = [None] * workload.connections 160 | thread_ids = [None] * workload.connections 161 | event_ids = [None] * workload.connections 162 | 163 | # Set up the query queues and the sessions for the workload 164 | for i in range(workload.connections): 165 | query_queue.append(queue.Queue()) 166 | sessions.append(libs.util.get_session(workload, uri)) 167 | if sessions[i] is None: 168 | # Failed to get a connection 169 | return None 170 | (processlist_ids[i], 171 | thread_ids[i], 172 | event_ids[i]) = libs.query.get_connection_ids(sessions[i]) 173 | 174 | # Output the connection and thread ids, so make it easier to 175 | # investigate the workload. 176 | LOG.ids(processlist_ids, thread_ids, event_ids) 177 | 178 | sql_formatter = libs.query.Formatter(processlist_ids, 179 | thread_ids, 180 | event_ids) 181 | LOG.sql_formatter = sql_formatter 182 | for i in range(workload.connections): 183 | connections.append(threading.Thread( 184 | target=_run_connection, daemon=True, 185 | args=(i + 1, sessions[i], query_queue[i], result_queue, 186 | sql_formatter) 187 | )) 188 | connections[i].start() 189 | 190 | query_handler = libs.query.Query( 191 | query_queue, result_queue, processlist_ids, thread_ids, LOG, 192 | workload.concurrent) 193 | for loop in range(workload.loops): 194 | for query in workload.queries: 195 | query_handler.exec(query) 196 | 197 | # Allow the user to run investigations 198 | if workload.investigations: 199 | session = mysqlsh.globals.session 200 | 201 | number, investigation, sql = libs.util.prompt_investigation( 202 | workload.investigations, sql_formatter, workload.connections) 203 | while investigation is not None: 204 | print(f'-- Investigation #{number}') 205 | LOG.sql(investigation, sql, 3) 206 | result = None 207 | error = None 208 | try: 209 | result = session.run_sql(sql) 210 | except mysqlsh.DBError as e: 211 | error = e 212 | 213 | LOG.result(libs.query.RESULT(investigation, result, error)) 214 | number, investigation, sql = libs.util.prompt_investigation( 215 | workload.investigations, sql_formatter, 216 | workload.connections) 217 | print('') 218 | 219 | LOG.info(f'Completing the workload {workload.name}') 220 | 221 | for query in workload.completions: 222 | query_handler.exec(query) 223 | 224 | # Tell the connection threads to stop and disconnect 225 | LOG.info(f'Disconnecting for the workload {workload.name}') 226 | for i in range(workload.connections): 227 | query_queue[i].put(None) 228 | for i in range(workload.connections): 229 | query_queue[i].join() 230 | connections[i].join() 231 | sessions[i].close() 232 | 233 | LOG.info(f'Completed the workload {workload.name}') 234 | LOG.workload = None 235 | 236 | 237 | def load(schema_name=None): 238 | """High level steps to load a schema.""" 239 | 240 | # An existing connection is required to load a schema 241 | has_connection = libs.util.verify_session() 242 | if not has_connection: 243 | return None 244 | 245 | # Loading a schema requires changing the default schema. 246 | # This is currently only supported using the set_current_schema() 247 | # method which is not available for classic protocol. So verify 248 | # the method is available before proceeding. 249 | try: 250 | getattr(mysqlsh.globals.session, 'set_current_schema') 251 | except AttributeError: 252 | print('Loading data requires the session.set_current_schema() which. ' + 253 | 'is only available with the mysqlx protocol. Please exit MySQL ' + 254 | 'Shell and reconnect using the --mysqlx option.', file=sys.stderr) 255 | return None 256 | 257 | schema = None 258 | keep_asking = True 259 | if schema_name is not None: 260 | keep_asking = False 261 | try: 262 | schema = SCHEMA_LOADS[schema_name] 263 | except KeyError: 264 | print(f'Invalid schema name: "{schema_name}"') 265 | 266 | if schema is None: 267 | schema = libs.util.prompt_task('Schema load job', SCHEMA_LOADS) 268 | 269 | while schema is not None: 270 | _run_load(schema) 271 | print('') 272 | if keep_asking: 273 | schema = libs.util.prompt_task('Schema load job', SCHEMA_LOADS) 274 | else: 275 | schema = None 276 | 277 | 278 | def run(workload_name=None): 279 | """High level steps to execute a workload.""" 280 | 281 | # An existing connection is required to run a workload 282 | has_connection = libs.util.verify_session() 283 | if not has_connection: 284 | return None 285 | 286 | workload = None 287 | keep_asking = True 288 | if workload_name is not None: 289 | keep_asking = False 290 | try: 291 | workload_name = libs.util.normalize_task_name(workload_name) 292 | workload = WORKLOADS[workload_name] 293 | except KeyError: 294 | print(f'Invalid workload name: "{workload_name}"') 295 | 296 | if workload is None: 297 | workload = libs.util.prompt_task('workload', WORKLOADS) 298 | 299 | uri = None 300 | if workload is not None: 301 | uri = libs.util.get_uri() 302 | while workload is not None: 303 | _run_main(workload, uri) 304 | print('') 305 | if keep_asking: 306 | workload = libs.util.prompt_task('workload', WORKLOADS) 307 | else: 308 | workload = None 309 | 310 | 311 | def show(): 312 | """List the available workloads and load jobs.""" 313 | 314 | libs.util.list_tasks('workload', WORKLOADS) 315 | print('') 316 | libs.util.list_tasks('Schema load job', SCHEMA_LOADS) 317 | 318 | 319 | def help(): 320 | """Display help.""" 321 | 322 | print('The following actions are supported:') 323 | print('=' * 36) 324 | print('') 325 | print('* help()') 326 | print(' Display this help.') 327 | print('') 328 | print('* load(schema_name=None)') 329 | print(' Load a schema. Optionally takes the name of the schema to be') 330 | print(' loaded. If no schema name or an invalid is given, you') 331 | print(' will be prompted to select one.') 332 | print('') 333 | print('* show()') 334 | print(' List the available workloads. The function takes no arguments.') 335 | print('') 336 | print('* run(workload_name=None)') 337 | print(' Execute a workload. Optionally the name of the workload can be') 338 | print(' specified. If no workload name or an invalid is given, you') 339 | print(' will be prompted to select one. You will also be required to') 340 | print(' enter the password.') 341 | print('') 342 | -------------------------------------------------------------------------------- /concurrency_book/libs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/mysql-concurrency/1e89c82a94fbbcae086221adf3f1220f810bc8a6/concurrency_book/libs/__init__.py -------------------------------------------------------------------------------- /concurrency_book/libs/innodb_buffer_pool.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from collections import namedtuple 3 | 4 | STAT = namedtuple('BufferPoolStat', ['value', 'rate']) 5 | 6 | SQL = """ 7 | SELECT NOW(6) AS 'time', 8 | INNODB_BUFFER_POOL_STATS.* 9 | FROM information_schema.INNODB_BUFFER_POOL_STATS 10 | ORDER BY pool_id 11 | """.strip() 12 | 13 | 14 | class Stats(object): 15 | """Class for collecting the InnoDB buffer pool statistics.""" 16 | _samples = [] 17 | _session = None 18 | _columns = None 19 | 20 | def __init__(self, session): 21 | """Initialize the statistics.""" 22 | self._session = session 23 | self._samples = [] 24 | self._columns = None 25 | 26 | def collect(self): 27 | """Collect and store the current statistics.""" 28 | result = self._session.run_sql(SQL) 29 | if self._columns is None: 30 | self._columns = [col.column_label.lower() for col 31 | in result.columns] 32 | 33 | sample = [] 34 | for row in result.fetch_all(): 35 | data = {self._columns[i]: row[i] 36 | for i in range(len(self._columns))} 37 | # Convert the datetime Shell value to a Python datetime object 38 | data['time'] = datetime.fromisoformat(str(data['time'])) 39 | sample.append(data) 40 | 41 | self._samples.append(sample) 42 | 43 | def delta(self, metric, first=0, last=-1): 44 | """Return the delta and rate for a buffer pool metric. 45 | Optionally specify which two measurements to use for 46 | the calculation. The default is to use the first and 47 | last measurements.""" 48 | try: 49 | sample_first = self._samples[first] 50 | sample_last = self._samples[last] 51 | except IndexError: 52 | # The requested samples do not exists 53 | return None 54 | 55 | pool_ids = [row['pool_id'] for row in sample_first] 56 | pool_ids.sort() 57 | time_first = sample_first[pool_ids[0]]['time'] 58 | time_last = sample_last[pool_ids[0]]['time'] 59 | metric_first = 0 60 | metric_last = 0 61 | for pool_id in pool_ids: 62 | try: 63 | metric_first += sample_first[pool_id][metric] 64 | metric_last += sample_last[pool_id][metric] 65 | except KeyError: 66 | # The metric does not exist 67 | return None 68 | 69 | delta = metric_last - metric_first 70 | interval = (time_last - time_first).total_seconds() 71 | return STAT(delta, delta/interval) 72 | -------------------------------------------------------------------------------- /concurrency_book/libs/innodb_monitor.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | 4 | RE_TIME = r'\d\d\d\d-\d\d-\d\d [\d ]\d:\d\d:\d\d' 5 | RE_HEX = r'(?:0x)?[0-9a-f]*' 6 | RE_BANNER = 'INNODB MONITOR OUTPUT' 7 | RE_START = re.compile(rf'^({RE_TIME}) {RE_HEX} {RE_BANNER}\s+=+\s*\n', 8 | re.MULTILINE | re.DOTALL) 9 | RE_END = re.compile(r'^-+\s*\nEND OF INNODB MONITOR OUTPUT($)', 10 | re.MULTILINE | re.DOTALL) 11 | RE_SECTION = re.compile(r'^-+\n(.+)\n-+(\n)', re.MULTILINE) 12 | RE_INT_OR_FLOAT = r'\d+(?:\.\d+)?' 13 | RE_SEMAPHORE = re.compile( 14 | rf'^--Thread \d+ has waited at .+ line \d+ for ({RE_INT_OR_FLOAT}) ' + 15 | 'seconds the semaphore:$', 16 | re.MULTILINE) 17 | 18 | 19 | class InnodbMonitor(object): 20 | """Collect and return information from the InnoDB monitor.""" 21 | _session = None 22 | _output = None 23 | _sections = {} 24 | _output_time = None 25 | 26 | def __init__(self, session): 27 | """Initialize the data.""" 28 | self._session = session 29 | self._output = None 30 | self._reset_sections() 31 | 32 | def _reset_sections(self): 33 | """Reset the stored sections.""" 34 | self._sections = {} 35 | 36 | def _parse_output(self): 37 | """Parse the InnoDB monitor report.""" 38 | self._reset_sections() 39 | m_start = RE_START.search(self._output) 40 | m_end = RE_END.search(self._output) 41 | if m_start is None or m_end is None: 42 | # Invalid output 43 | self._output = None 44 | return False 45 | 46 | self._output_time = datetime.fromisoformat(m_start[1]) 47 | 48 | # Find the header and offsets for each section 49 | # Each section ends just before the next section, and the last 50 | # section ends just before the end banner. 51 | headers = [] 52 | start_offsets = [] 53 | end_offsets = [] 54 | count = 0 55 | sections = RE_SECTION.finditer(self._output) 56 | for section in sections: 57 | if count > 0: 58 | end_offsets.append(section.start() - 1) 59 | start_offsets.append(section.end()) 60 | headers.append(section[1]) 61 | count += 1 62 | end_offsets.append(m_end.start() - 1) 63 | 64 | # Create an object for each section with the section specific 65 | # logic. 66 | for i in range(len(headers)): 67 | name = headers[i] 68 | text = self._output[start_offsets[i]:end_offsets[i]] 69 | self._sections[name] = SECTIONS[name](text) 70 | 71 | return True 72 | 73 | def get_section(self, name): 74 | """Return a single section.""" 75 | try: 76 | section = self._sections[name.upper()] 77 | except KeyError: 78 | section = None 79 | 80 | return section 81 | 82 | def fetch(self): 83 | """Fetch the InnoDB monitor report.""" 84 | sql = "SHOW ENGINE INNODB STATUS" 85 | self._output = self._session.run_sql(sql).fetch_one()[2] 86 | self._parse_output() 87 | 88 | 89 | class StatusSection(object): 90 | """Class for handling individual InnoDB monitor sections.""" 91 | _name = None # Set in the individual classes 92 | _body = None 93 | 94 | def __init__(self, body): 95 | """Initialize the section body.""" 96 | self._body = body 97 | 98 | @property 99 | def body(self): 100 | """Return the content of the body.""" 101 | return self._body 102 | 103 | @property 104 | def name(self): 105 | """Return the name of the section.""" 106 | return self._name 107 | 108 | @property 109 | def header(self): 110 | """Returns the banner at the start of the section.""" 111 | header = '-' * len(self._name) + '\n' 112 | header += self._name + '\n' 113 | header += '-' * len(self._name) 114 | return header 115 | 116 | @property 117 | def content(self): 118 | """The section header and body""" 119 | return f'{self.header}\n{self.body}' 120 | 121 | 122 | class SectionBackgroundThread(StatusSection): 123 | """The class for the section with the background thread""" 124 | _name = 'BACKGROUND THREAD' 125 | 126 | 127 | class SectionBufferPoolAndMemory(StatusSection): 128 | """The class for the section for the buffer pool""" 129 | _name = 'BUFFER POOL AND MEMORY' 130 | 131 | 132 | class SectionFileIO(StatusSection): 133 | """The class for the section for file I/O""" 134 | _name = 'FILE I/O' 135 | 136 | 137 | class SectionIndividualBufferPoolInfo(StatusSection): 138 | """The class for the section for individual buffer pool 139 | instances""" 140 | _name = 'INDIVIDUAL BUFFER POOL INFO' 141 | 142 | 143 | class SectionInsertBufferAndAdaptiveHashIndex(StatusSection): 144 | """The class for the section for the change buffer and the 145 | adaptive hash index""" 146 | _name = 'INSERT BUFFER AND ADAPTIVE HASH INDEX' 147 | 148 | 149 | class SectionLatestDetectedDeadlock(StatusSection): 150 | """The class for the section with the latest detected deadlock""" 151 | _name = 'LATEST DETECTED DEADLOCK' 152 | 153 | 154 | class SectionLatestForeignKeyError(StatusSection): 155 | """The class for the section with the latest foreign key error""" 156 | _name = 'LATEST FOREIGN KEY ERROR' 157 | 158 | 159 | class SectionLog(StatusSection): 160 | """The class for the section with log information""" 161 | _name = 'LOG' 162 | 163 | 164 | class SectionRowOperations(StatusSection): 165 | """The class for the section with row operations information""" 166 | _name = 'ROW OPERATIONS' 167 | 168 | 169 | class SectionSemaphores(StatusSection): 170 | """The class for the section with semaphore information""" 171 | _name = 'SEMAPHORES' 172 | _max_wait = 0 173 | _num_waits = 0 174 | 175 | def __init__(self, body): 176 | super().__init__(body) 177 | self._max_wait = 0 178 | self._num_waits = 0 179 | self._parse() 180 | 181 | @property 182 | def num_waits(self): 183 | return self._num_waits 184 | 185 | @property 186 | def max_wait(self): 187 | return self._max_wait 188 | 189 | def _parse(self): 190 | waits = RE_SEMAPHORE.findall(self._body) 191 | self._num_waits = len(waits) 192 | try: 193 | self._max_wait = max([float(wait) for wait in waits]) 194 | except ValueError: 195 | # No waits 196 | self._max_wait = 0 197 | 198 | 199 | class SectionTransactions(StatusSection): 200 | """The class for the section with transaction information""" 201 | _name = 'TRANSACTIONS' 202 | 203 | 204 | # Map of sections names to class 205 | SECTIONS = { 206 | 'BACKGROUND THREAD': SectionBackgroundThread, 207 | 'SEMAPHORES': SectionSemaphores, 208 | 'LATEST FOREIGN KEY ERROR': SectionLatestForeignKeyError, 209 | 'LATEST DETECTED DEADLOCK': SectionLatestDetectedDeadlock, 210 | 'TRANSACTIONS': SectionTransactions, 211 | 'FILE I/O': SectionFileIO, 212 | 'INSERT BUFFER AND ADAPTIVE HASH INDEX': 213 | SectionInsertBufferAndAdaptiveHashIndex, 214 | 'LOG': SectionLog, 215 | 'BUFFER POOL AND MEMORY': SectionBufferPoolAndMemory, 216 | 'INDIVIDUAL BUFFER POOL INFO': SectionIndividualBufferPoolInfo, 217 | 'ROW OPERATIONS': SectionRowOperations, 218 | } 219 | -------------------------------------------------------------------------------- /concurrency_book/libs/innodb_mutex.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | from collections import namedtuple 4 | 5 | SQL = "SHOW ENGINE INNODB MUTEX" 6 | MUTEX_ROW = namedtuple('MutexRow', ['Type', 'Name', 'Status']) 7 | RE_STATUS_WAITS = re.compile(r'\bwaits=(\d+)\b') 8 | RE_NAME_FILE = re.compile(r'(\w+\.\w+):(\d+)$') 9 | 10 | 11 | def _get_delta_waits(last, prev): 12 | """Calculate the delta for all mutex waits.""" 13 | delta = {} 14 | min_width_key = 0 15 | min_width_waits = 0 16 | for key in last: 17 | total_waits = last[key] 18 | try: 19 | prev_waits = prev[key] 20 | except KeyError: 21 | prev_waits = 0 22 | delta_waits = total_waits - prev_waits 23 | if delta_waits > 0: 24 | delta[key] = delta_waits 25 | min_width_key = max(min_width_key, len(key)) 26 | min_width_waits = max(min_width_waits, len(str(delta_waits))) 27 | 28 | return delta 29 | 30 | 31 | def _gen_delta_report(delta, key_header): 32 | """Generate a report using the MySQL table format for a given 33 | set of delta values.""" 34 | min_width_key = len(key_header) 35 | min_width_waits = len('Waits') 36 | 37 | for key in delta: 38 | min_width_key = max(min_width_key, len(key)) 39 | min_width_waits = max(min_width_waits, len(str(delta[key]))) 40 | 41 | bar = '+-' + '-' * min_width_key + '-' 42 | bar += '+-' + '-' * min_width_waits + '-+' 43 | fmt_header = f'| {{0:{min_width_key}s}} ' 44 | fmt_header += f'| {{1:{min_width_waits}s}} |\n' 45 | fmt = f'| {{0:{min_width_key}s}} ' 46 | fmt += f'| {{1:{min_width_waits}d}} |\n' 47 | report = bar + '\n' 48 | report += fmt_header.format(key_header, 'Waits') 49 | report += bar + '\n' 50 | delta_sorted = sorted(delta.items(), key=lambda item: item[1]) 51 | for key, waits in delta_sorted: 52 | report += fmt.format(key, waits) 53 | report += bar 54 | return report 55 | 56 | 57 | class InnodbMutexMonitor(object): 58 | """Class for working with the InnoDB mutex monitor""" 59 | _session = None 60 | _rows = [] 61 | _total_waits = 0 62 | _prev_total_waits = 0 63 | _prev_waits_by_file = {} 64 | _prev_waits_by_file_line = {} 65 | _waits_by_name = {} 66 | _waits_by_file = {} 67 | _waits_by_file_line = {} 68 | _output_time = None 69 | 70 | def __init__(self, session): 71 | """Initialize the instance""" 72 | self._session = session 73 | self._prev_total_waits = 0 74 | self._prev_waits_by_file = {} 75 | self._prev_waits_by_file_line = {} 76 | self._reset() 77 | 78 | @property 79 | def output_time(self): 80 | """Return the time the output.""" 81 | return self._output_time 82 | 83 | @property 84 | def total_waits(self): 85 | """The total number of waits.""" 86 | return self._total_waits 87 | 88 | @property 89 | def waits_increased(self): 90 | """Whether the number of waits have increased compared to the 91 | previous data collection.""" 92 | return self._total_waits > self._prev_total_waits 93 | 94 | @property 95 | def report(self): 96 | """Return the latest output using the MySQL table format.""" 97 | min_width_type = len('Type') 98 | min_width_name = len('Name') 99 | min_width_status = len('Status') 100 | for row in self._rows: 101 | min_width_type = max(min_width_type, len(row.Type)) 102 | min_width_name = max(min_width_name, len(row.Name)) 103 | min_width_status = max(min_width_status, len(row.Status)) 104 | bar = '+-' + '-' * min_width_type + '-' 105 | bar += '+-' + '-' * min_width_name + '-' 106 | bar += '+-' + '-' * min_width_status + '-+' 107 | fmt = f'| {{0:{min_width_type}s}} ' 108 | fmt += f'| {{1:{min_width_name}s}} ' 109 | fmt += f'| {{2:{min_width_status}s}} |\n' 110 | report = f'mysql> {SQL};\n' 111 | report += bar + '\n' 112 | report += fmt.format('Type', 'Name', 'Status') 113 | report += bar + '\n' 114 | for row in self._rows: 115 | report += fmt.format(row.Type, row.Name, row.Status) 116 | report += bar 117 | return report 118 | 119 | def delta_by_file(self, fmt='dict'): 120 | """Return the delta compared to the previous output grouped by 121 | file name. The format can be 'dict' or 'table'.""" 122 | delta = _get_delta_waits(self._waits_by_file, self._prev_waits_by_file) 123 | 124 | if fmt == 'dict': 125 | return delta 126 | else: 127 | return _gen_delta_report(delta, 'File') 128 | 129 | def delta_by_file_line(self, fmt='dict'): 130 | """Return the delta compared to the previous output grouped by 131 | file name and line number. The format can be 'dict' or 132 | 'table'.""" 133 | delta = _get_delta_waits(self._waits_by_file_line, 134 | self._prev_waits_by_file_line) 135 | 136 | if fmt == 'dict': 137 | return delta 138 | else: 139 | return _gen_delta_report(delta, 'File:Line') 140 | 141 | def get_waits_by_name(self, name): 142 | """Get the number of waits by the full name.""" 143 | try: 144 | waits = self._waits_by_name[name] 145 | except KeyError: 146 | waits = 0 147 | return waits 148 | 149 | def get_waits_by_file(self, filename): 150 | """Get the number of waits grouped by the file name.""" 151 | try: 152 | waits = self._waits_by_file[filename] 153 | except KeyError: 154 | waits = 0 155 | return waits 156 | 157 | def get_waits_by_file_line(self, filename_line): 158 | """Get the number of waits grouped by file name and line 159 | number.""" 160 | try: 161 | waits = self._waits_by_file_line[filename_line] 162 | except KeyError: 163 | waits = 0 164 | return waits 165 | 166 | def _reset(self): 167 | """Reset the statistics, for example before collecting a new 168 | output.""" 169 | self._rows = [] 170 | self._output_time = None 171 | self._total_waits = 0 172 | self._waits_by_name = {} 173 | self._waits_by_file = {} 174 | self._waits_by_file_line = {} 175 | 176 | def _analyze(self): 177 | """Analyze the latest fetched output.""" 178 | self._total_waits = 0 179 | for row in self._rows: 180 | m_waits = RE_STATUS_WAITS.search(row.Status) 181 | if m_waits is not None: 182 | waits = int(m_waits[1]) 183 | self._total_waits += waits 184 | try: 185 | self._waits_by_name[row.Name] += waits 186 | except KeyError: 187 | self._waits_by_name[row.Name] = waits 188 | 189 | file = RE_NAME_FILE.search(row.Name) 190 | if file is not None: 191 | try: 192 | self._waits_by_file[file[1]] += waits 193 | except KeyError: 194 | self._waits_by_file[file[1]] = waits 195 | 196 | file_line = f'{file[1]}:{file[2]}' 197 | try: 198 | self._waits_by_file_line[file_line] += waits 199 | except KeyError: 200 | self._waits_by_file_line[file_line] = waits 201 | 202 | def fetch(self): 203 | """Fetch the mutex monitor output and store it line by line.""" 204 | self._prev_total_waits = self._total_waits 205 | self._prev_waits_by_file = self._waits_by_file 206 | self._prev_waits_by_file_line = self._waits_by_file_line 207 | self._reset() 208 | self._output_time = datetime.now() 209 | result = self._session.run_sql(SQL) 210 | for row in result.fetch_all(): 211 | self._rows.append(MUTEX_ROW(*row)) 212 | 213 | self._analyze() 214 | -------------------------------------------------------------------------------- /concurrency_book/libs/load.py: -------------------------------------------------------------------------------- 1 | """The functions and classes required to load schemas into the database. 2 | The supported schemas are available in KNOWN_SCHEMAS and can also be 3 | retrieved using the get_jobs() function.""" 4 | 5 | import zipfile 6 | from urllib import request 7 | import re 8 | import platform 9 | import pathlib 10 | import os 11 | from collections import namedtuple 12 | 13 | import mysqlsh 14 | 15 | import concurrency_book.libs.log 16 | 17 | LOAD_JOB = namedtuple('LoadJob', ['name', 'description']) 18 | 19 | # The schemas that are known how to load. 20 | # The key and value are used when listing the schemas. 21 | KNOWN_SCHEMAS = { 22 | 'employees': 'The employee database', 23 | 'employees partitioned': 'The employee database with partitions', 24 | 'sakila': 'The sakila database', 25 | 'world': 'The world database', 26 | } 27 | 28 | # The URLs used to download each schema 29 | URLS = { 30 | 'employees': 'https://github.com/datacharmer/test_db/archive/master.zip', 31 | 'sakila': 'https://downloads.mysql.com/docs/sakila-db.zip', 32 | 'world': 'https://downloads.mysql.com/docs/world-db.zip', 33 | } 34 | URLS['employees partitioned'] = URLS['employees'] 35 | 36 | if platform.system() == 'Windows': 37 | SAVE_DIR = pathlib.Path(os.environ['APPDATA']).joinpath( 38 | 'mysql_concurrency_book', 'sample_data') 39 | else: 40 | SAVE_DIR = pathlib.Path(os.environ['HOME']).joinpath( 41 | '.mysql_concurrency_book', 'sample_data') 42 | 43 | RE_COMMENT = re.compile('--+( +(.+)?)?$', re.MULTILINE) 44 | RE_DELIMITER = re.compile(r'^DELIMITER\s+(.+)\s*$', 45 | re.MULTILINE | re.IGNORECASE) 46 | 47 | LOG = concurrency_book.libs.log.Log(concurrency_book.libs.log.INFO) 48 | 49 | 50 | def get_jobs(): 51 | """Return a dictionary with LOAD_JOB named tuples with the 52 | supported load jobs.""" 53 | 54 | jobs = {} 55 | for job_name in KNOWN_SCHEMAS: 56 | jobs[job_name] = LOAD_JOB(job_name, KNOWN_SCHEMAS[job_name]) 57 | 58 | return jobs 59 | 60 | 61 | class Load(object): 62 | """Class for loading a schema.""" 63 | _schema = None 64 | _session = None 65 | _delimiter = ';' 66 | 67 | def __init__(self, schema): 68 | """Initialize the Load class setting the name of the schema to 69 | be loaded.""" 70 | 71 | if schema in URLS: 72 | self._schema = schema 73 | else: 74 | raise ValueError(f'Unknown schema: {schema} - Supported schemas:' + 75 | f' {list(URLS.keys())}') 76 | self._delimiter = ';' 77 | 78 | def _download(self): 79 | """Provide the path to the downloaded file with the statements 80 | to load a schema. If the file has not yet been downloaded, it 81 | will be downloaded first.""" 82 | 83 | try: 84 | SAVE_DIR.mkdir(parents=True, exist_ok=True) 85 | except FileExistsError: 86 | LOG.error(f'The path "{SAVE_DIR}" exists but is not a directory') 87 | 88 | path = SAVE_DIR.joinpath(pathlib.Path(URLS[self._schema]).name) 89 | if not path.is_file(): 90 | LOG.info(f'Downloading {URLS[self._schema]} to {path}') 91 | with open(path, mode='xb') as fs: 92 | with request.urlopen(URLS[self._schema]) as url: 93 | data = url.read(8 * 1024) 94 | LOG.debug(f'Downloaded data length: {len(data)} bytes') 95 | while len(data) > 0: 96 | fs.write(data) 97 | data = url.read(8 * 1024) 98 | LOG.debug(f'Downloaded data length: {len(data)} bytes') 99 | else: 100 | LOG.info(f'Using existing file in {path}') 101 | 102 | return path 103 | 104 | def _sql_execute(self, chunk_sql, delimiter, sql_file, zip_fs): 105 | """Execute a list of SQL statements.""" 106 | session = mysqlsh.globals.session 107 | 108 | # Split the file content up to the start of the chunk with 109 | # the delimiter into statements using the previous delimiter 110 | re_statement = re.compile(re.escape(delimiter) + r'\s*$', re.MULTILINE) 111 | statements = re_statement.split(chunk_sql) 112 | LOG.debug(f'Found {len(statements)} statements in the chunk.') 113 | for statement in statements: 114 | sql = RE_COMMENT.sub('', statement.strip()).strip() 115 | 116 | if sql.upper() in ('START TRANSACTION', 'BEGIN'): 117 | session.start_transaction() 118 | elif sql.upper() == f'COMMIT': 119 | session.commit() 120 | elif sql.upper() == f'ROLLBACK': 121 | session.rollback() 122 | elif sql.upper()[0:4] == f'USE ': 123 | _, schema = sql.split(' ', 1) 124 | session.set_current_schema(schema.strip('`')) 125 | elif sql.upper()[0:7] == 'SOURCE ': 126 | _, filename = sql.split(' ', 1) 127 | path = pathlib.PurePosixPath(sql_file.name) 128 | new_path = path.with_name(filename.strip()) 129 | with zip_fs.open(str(new_path)) as new_fs: 130 | self._sql_file(new_fs, zip_fs=zip_fs) 131 | elif sql != '': 132 | try: 133 | session.run_sql(sql) 134 | except mysqlsh.DBError as e: 135 | LOG.error(f'Error for sql: {sql}\n{e}') 136 | 137 | def _sql_file(self, sql_file, zip_fs): 138 | """Execute the SQL statements in a given file. Takes a 139 | file-like object and optional the file descriptor to the 140 | zip file (allows handling SOURCE commands inside the 141 | sql file.""" 142 | 143 | delimiter = ';' 144 | content = sql_file.read().decode('utf-8') 145 | path = pathlib.PurePosixPath(sql_file.name) 146 | LOG.info(f'Processing statements in {path.name}') 147 | LOG.debug(f'Read {len(content)} characters from {path.name}') 148 | 149 | # Split the file into chunks separated by the DELIMITER 150 | # command. 151 | chunks = RE_DELIMITER.finditer(content) 152 | offset = 0 153 | LOG.debug(f'Starting to process chunks') 154 | for chunk in chunks: 155 | LOG.debug(f'Processing chunk starting at position {offset} to ' + 156 | f'{chunk.start()}') 157 | chunk_sql = content[offset:chunk.start()] 158 | self._sql_execute(chunk_sql, delimiter, sql_file, zip_fs) 159 | offset = chunk.end() + 1 160 | delimiter = chunk[1] 161 | LOG.debug(f'Setting the delimiter to {delimiter}') 162 | 163 | # Handle the part of the file after the last DELIMITER command. 164 | # This includes handling the whole file is there are no 165 | # DELIMITER commands. 166 | LOG.debug(f'Processing chunk starting at position {offset} to ' + 167 | f'{len(content)}') 168 | chunk_sql = content[offset:] 169 | self._sql_execute(chunk_sql, delimiter, sql_file, zip_fs) 170 | 171 | def _exec_employees(self, partitioned=False): 172 | """Execute the steps required to load the employees schema.""" 173 | file = self._download() 174 | with zipfile.ZipFile(file) as zip_fs: 175 | if partitioned: 176 | filename = 'employees_partitioned.sql' 177 | else: 178 | filename = 'employees.sql' 179 | self._delimiter = ';' 180 | with zip_fs.open(f'test_db-master/{filename}') as schema: 181 | self._sql_file(schema, zip_fs) 182 | 183 | with zip_fs.open('test_db-master/objects.sql') as objects: 184 | self._sql_file(objects, zip_fs) 185 | 186 | LOG.info('Load of the employees schema completed') 187 | return True 188 | 189 | def _exec_employees_partitioned(self): 190 | """Load the partitioned employees database.""" 191 | return self._exec_employees(True) 192 | 193 | def _exec_sakila(self): 194 | """Execute the steps required to load the sakila schema.""" 195 | file = self._download() 196 | with zipfile.ZipFile(file) as zip_fs: 197 | self._delimiter = ';' 198 | with zip_fs.open('sakila-db/sakila-schema.sql') as schema: 199 | self._sql_file(schema, zip_fs) 200 | 201 | self._delimiter = ';' 202 | with zip_fs.open('sakila-db/sakila-data.sql') as data: 203 | self._sql_file(data, zip_fs) 204 | 205 | LOG.info('Load of the sakila schema completed') 206 | return True 207 | 208 | def _exec_world(self): 209 | """Execute the steps required to load the world schema.""" 210 | file = self._download() 211 | with zipfile.ZipFile(file) as zip_fs: 212 | self._delimiter = ';' 213 | with zip_fs.open('world-db/world.sql') as world: 214 | self._sql_file(world, zip_fs) 215 | 216 | LOG.info('Load of the world schema completed') 217 | return True 218 | 219 | def execute(self): 220 | """Execute a job.""" 221 | method = f'_exec_{self._schema.replace(" ", "_")}' 222 | return getattr(self, method)() 223 | -------------------------------------------------------------------------------- /concurrency_book/libs/log.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import re 3 | 4 | import mysqlsh 5 | 6 | DEBUG = 0 7 | INFO = 1 8 | WARNING = 2 9 | ERROR = 3 10 | 11 | RE_ERROR_MSG = re.compile('^.+.run_sql: ') 12 | 13 | 14 | def _now(): 15 | now = datetime.now() 16 | return now.strftime("%Y-%m-%d %H:%M:%S.%f") 17 | 18 | 19 | class Log(object): 20 | """Provides logging for the locks_book_2 module. This includes keeping 21 | track of the connection that the latest printed query and result 22 | were for, so a "-- Connection " can be printed when the 23 | connection changes.""" 24 | # The last connection id that there was logged query or result 25 | # information for 26 | _last_connection = None 27 | # The current workload. When that changes, _last_conn_id is set to None 28 | _workload = None 29 | _level = 2 30 | # An optional lock object. If present, writes are only done when the 31 | # lock is held. 32 | _lock = None 33 | _sql_formatter = None 34 | 35 | def __init__(self, level=INFO): 36 | self._last_connection = None 37 | self._workload = None 38 | self._level = level 39 | self._lock = None 40 | self._sql_formatter = None 41 | 42 | @property 43 | def level(self): 44 | return self._level 45 | 46 | @level.setter 47 | def level(self, level): 48 | self._level = level 49 | 50 | @property 51 | def lock(self): 52 | return self._lock 53 | 54 | @lock.setter 55 | def lock(self, lock): 56 | self._lock = lock 57 | 58 | @property 59 | def sql_formatter(self): 60 | return self._sql_formatter 61 | 62 | @sql_formatter.setter 63 | def sql_formatter(self, formatter): 64 | self._sql_formatter = formatter 65 | 66 | def _lock_acquire(self): 67 | if self._lock is not None: 68 | self._lock.acquire() 69 | 70 | def _lock_release(self): 71 | if self._lock is not None: 72 | self._lock.release() 73 | 74 | def _write(self, msg): 75 | self._lock_acquire() 76 | print(msg) 77 | self._lock_release() 78 | 79 | def _write_result(self, result, dump_format): 80 | self._lock_acquire() 81 | num_rows = mysqlsh.globals.shell.dump_rows(result, dump_format) 82 | self._lock_release() 83 | return num_rows 84 | 85 | def _connection_comment(self, connection): 86 | if connection != self._last_connection: 87 | self._last_connection = connection 88 | self._write(f'-- Connection {connection}') 89 | 90 | def ids(self, processlist_ids, thread_ids, event_ids): 91 | """Write the processlist ids and thread ids for the connections 92 | involved in the workload. This makes it easier to investigate 93 | the result.""" 94 | 95 | if len(processlist_ids) > 0: 96 | msg = '-- Connection Processlist ID Thread ID Event ID\n' 97 | msg += '-- ' + '-' * 50 + '\n' 98 | for i in range(len(processlist_ids)): 99 | processlist_id = processlist_ids[i] 100 | thread_id = thread_ids[i] 101 | event_id = event_ids[i] 102 | msg += f'-- {i + 1:10d} {processlist_id:14d} ' 103 | msg += f'{thread_id:9d} {event_id:8d}' + '\n' 104 | self._write(msg) 105 | 106 | def _log(self, connection, severity, msg): 107 | now = _now() 108 | self._write(f'{now} {connection:2d} [{severity.upper()}] {msg}') 109 | 110 | def error(self, msg, connection=0): 111 | if self._level <= ERROR: 112 | self._log(connection, 'ERROR', msg) 113 | 114 | def warning(self, msg, connection=0): 115 | if self._level <= WARNING: 116 | self._log(connection, 'WARNING', msg) 117 | 118 | def info(self, msg, connection=0): 119 | if self._level <= INFO: 120 | self._log(connection, 'INFO', msg) 121 | 122 | def debug(self, msg, connection=0): 123 | if self._level <= DEBUG: 124 | self._log(connection, 'DEBUG', msg) 125 | 126 | def caption(self): 127 | msg = f'* {self.workload.name}. {self.workload.description} *' 128 | banner = '*' * len(msg) 129 | space = '*' + ' ' * (len(msg) - 2) + '*' 130 | caption = f""" 131 | {banner} 132 | {space} 133 | {msg} 134 | {space} 135 | {banner} 136 | 137 | """ 138 | self._write(caption) 139 | 140 | def sql(self, query, sql=None, indent=0): 141 | """Write a query with it's prompt indicating which connection 142 | is executing the query. Each time the connection changes 143 | prefix with a comment line showing the new connection id. 144 | 145 | If sql is given that is used, otherwise the sql attribute of 146 | the query is used. The indent argument specifies how much 147 | subsequent lines than the first are already indented.""" 148 | 149 | msg = '' 150 | self._connection_comment(query.connection) 151 | try: 152 | if query.comment != '': 153 | msg += f'-- {query.comment}\n' 154 | except AttributeError: 155 | pass 156 | 157 | # Indent the query with 14 characters except for the first line 158 | # This ensures the lines align nicely. 159 | if sql is None: 160 | sql = query.sql 161 | if self._sql_formatter is not None: 162 | sql = self._sql_formatter.sql_global_sub(sql, query.connection) 163 | sql = self._sql_formatter.indent_sql(sql, 14 - indent) 164 | if query.format == 'vertical': 165 | delimiter = '\\G' 166 | else: 167 | delimiter = ';' 168 | msg += f'Connection {query.connection}> {sql}{delimiter}' 169 | self._write(msg) 170 | 171 | def result(self, result): 172 | """Print the result in the desired format (same formats as for 173 | shell.dump_rows() are supported). Prefix with a comment line 174 | showing the new connection id if the connection id has changed 175 | since the last message.""" 176 | 177 | self._connection_comment(result.query.connection) 178 | if result.error: 179 | msg = RE_ERROR_MSG.sub('', result.error.msg) 180 | try: 181 | self._write(f'ERROR: {result.error.code}: {msg}') 182 | except AttributeError: 183 | self._write(f'ERROR: {result.error}') 184 | else: 185 | timing = result.result.execution_time 186 | has_data = result.result.has_data() 187 | if not has_data: 188 | # Query OK, 1 row affected (0.3618 sec) 189 | items = result.result.affected_items_count 190 | if items == 1: 191 | rows = 'row' 192 | else: 193 | rows = 'rows' 194 | self._write(f'Query OK, {items} {rows} affected ({timing})') 195 | 196 | # For result without data this will output information about 197 | # the number of matched and changed rows and number of 198 | # warnings 199 | num_rows = self._write_result(result.result, result.query.format) 200 | 201 | if has_data: 202 | # Empty set (0.0015 sec) 203 | # 2 rows in set (0.2705 sec) 204 | if num_rows == 0: 205 | rows = 'Empty' 206 | elif num_rows == 1: 207 | rows = f'{num_rows} row in' 208 | else: 209 | rows = f'{num_rows} rows in' 210 | self._write(f'{rows} set ({timing})') 211 | 212 | if result.result.warnings_count > 0: 213 | msg = '' 214 | for warning in result.result.warnings: 215 | msg += f'{warning[0]} (code {warning[1]}): {warning[2]}\n' 216 | self._write(msg) 217 | 218 | self._write('') 219 | 220 | @property 221 | def workload(self): 222 | return self._workload 223 | 224 | @workload.setter 225 | def workload(self, new_workload): 226 | self._workload = new_workload 227 | self._last_connection = None 228 | -------------------------------------------------------------------------------- /concurrency_book/libs/metrics.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | from datetime import datetime 4 | 5 | # The query uses to collect metrics 6 | SQL = """ 7 | SELECT Variable_name, Variable_value 8 | FROM sys.metrics 9 | WHERE Enabled = 'YES' 10 | """.strip() 11 | 12 | 13 | class Metrics(object): 14 | """Monitor using the sys.metrics view.""" 15 | _samples = [] 16 | _session = None 17 | 18 | def __init__(self, session): 19 | """Initialize the class.""" 20 | self._session = session 21 | self._samples = [] 22 | 23 | def collect(self): 24 | """Collect a new set of metrics.""" 25 | result = self._session.run_sql(SQL) 26 | sample = {} 27 | for row in result.fetch_all(): 28 | if row[0] == 'NOW()': 29 | value = datetime.fromisoformat(row[1]) 30 | else: 31 | value = row[1] 32 | sample[row[0]] = value 33 | 34 | self._samples.append(sample) 35 | 36 | def write_csv(self, metrics): 37 | """Output the metrics in CSV format. The order is the order 38 | the metrics were collected.""" 39 | headers = ['NOW()'] + list(metrics) 40 | writer = csv.DictWriter(sys.stdout, headers, extrasaction='ignore') 41 | writer.writeheader() 42 | for sample in self._samples: 43 | writer.writerow(sample) 44 | 45 | def write_rate_csv(self, metrics): 46 | """Write the difference (rate) between successive samples 47 | as a CSV file.""" 48 | headers = ['time'] + list(metrics) 49 | writer = csv.DictWriter(sys.stdout, headers, extrasaction='ignore') 50 | writer.writeheader() 51 | prev_sample = None 52 | for sample in self._samples: 53 | if prev_sample is not None: 54 | delta_seconds = (sample['NOW()'] - 55 | prev_sample['NOW()']).total_seconds() 56 | data = {'time': sample['NOW()']} 57 | for metric in list(metrics): 58 | try: 59 | delta = int(sample[metric]) - int(prev_sample[metric]) 60 | except TypeError: 61 | # Can't convert to integers 62 | delta = 0 63 | data[metric] = delta/delta_seconds 64 | writer.writerow(data) 65 | 66 | prev_sample = sample 67 | -------------------------------------------------------------------------------- /concurrency_book/libs/query.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import queue 4 | import re 5 | from collections import namedtuple 6 | 7 | RESULT = namedtuple('Result', ['query', 'result', 'error']) 8 | 9 | RE_INDENT = re.compile(r'^', re.MULTILINE) 10 | RE_SQL_PARAM = re.compile(r'\?') 11 | RE_SQL_PROCESSLIST_IDS = re.compile(r'{processlist_ids}') 12 | RE_SQL_THREAD_IDS = re.compile(r'{thread_ids}') 13 | RE_SQL_THREAD_IDS_NOT_SELF = re.compile(r'{thread_ids_not_self}') 14 | RE_SQL_ID_FOR_CONNECTION = re.compile( 15 | r'{(event|processlist|thread)_id_connection_(\d+)(?:\+(\d+))?}') 16 | RE_SQL_THREAD_ID_FOR_CONNECTION = re.compile( 17 | r'{thread_id_connection_(\d+)(?:\+(\d+))?}') 18 | RE_SQL_PROCESSLIST_ID_FOR_CONNECTION = re.compile( 19 | r'{processlist_id_connection_(\d+)(?:\+(\d+))?}') 20 | RE_SQL_EVENT_ID_FOR_CONNECTION = re.compile( 21 | r'{event_id_connection_(\d+)(?:\+(\d+))?}') 22 | 23 | 24 | def get_connection_ids(session): 25 | """Obtain the connection and thread id for a connection.""" 26 | 27 | sql = """ 28 | SELECT PROCESSLIST_ID, THREAD_ID, EVENT_ID 29 | FROM performance_schema.threads 30 | INNER JOIN performance_schema.events_statements_current 31 | USING (THREAD_ID) 32 | WHERE PROCESSLIST_ID = CONNECTION_ID()""".strip() 33 | result = session.run_sql(sql) 34 | row = result.fetch_one() 35 | return row[0], row[1], row[2] 36 | 37 | 38 | def get_from_queue(q, timeout=None, iterations=1, on_empty=None): 39 | """Get the next value from a queue with support for looping with 40 | short timeouts.""" 41 | for i in range(iterations): 42 | try: 43 | item = q.get(timeout=timeout) 44 | except queue.Empty: 45 | pass 46 | else: 47 | return item 48 | 49 | return on_empty 50 | 51 | 52 | class Formatter(object): 53 | """Adds support for replacing parameters in the queries and to 54 | indent lines other than the first to make the lines align.""" 55 | 56 | _processlist_ids = [] 57 | _thread_ids = [] 58 | _event_ids = [] 59 | 60 | def __init__(self, processlist_ids, thread_ids, event_ids): 61 | """Initialize the class.""" 62 | self._processlist_ids = processlist_ids 63 | self._thread_ids = thread_ids 64 | self._event_ids = event_ids 65 | 66 | @staticmethod 67 | def indent_sql(sql, spaces): 68 | """Indent all but the first line of an SQL statement to make the 69 | lines align nicely e.g. depending on the width of the prompt""" 70 | 71 | return RE_INDENT.sub(' ' * spaces, sql).strip() 72 | 73 | @staticmethod 74 | def sql_param_sub(value, sql): 75 | """Substitute the next ? parameter in an SQL statement.""" 76 | return RE_SQL_PARAM.sub(str(value), sql, 1) 77 | 78 | def sql_global_sub(self, sql, connection): 79 | """Replace placeholders for "global" properties such as all 80 | processlist ids and thread ids.""" 81 | 82 | # Processlist ids 83 | ids = [str(pid) for pid in self._processlist_ids] 84 | sql = RE_SQL_PROCESSLIST_IDS.sub(', '.join(ids), sql) 85 | 86 | # Thread ids 87 | ids = [str(tid) for tid in self._thread_ids] 88 | sql = RE_SQL_THREAD_IDS.sub(', '.join(ids), sql) 89 | 90 | # Thread ids except for the connection itself 91 | ids = [str(self._thread_ids[i]) for i in range(len(self._thread_ids)) 92 | if i + 1 != connection] 93 | sql = RE_SQL_THREAD_IDS_NOT_SELF.sub(', '.join(ids), sql) 94 | 95 | # Individual thread ids 96 | for m in RE_SQL_ID_FOR_CONNECTION.finditer(sql): 97 | id_type = m[1] 98 | connection = int(m[2]) 99 | adjust = m[3] 100 | if id_type == 'event': 101 | value = self._event_ids[connection-1] 102 | sub_expression = RE_SQL_EVENT_ID_FOR_CONNECTION 103 | elif id_type == 'processlist': 104 | value = self._processlist_ids[connection-1] 105 | sub_expression = RE_SQL_PROCESSLIST_ID_FOR_CONNECTION 106 | else: 107 | value = self._thread_ids[connection-1] 108 | sub_expression = RE_SQL_THREAD_ID_FOR_CONNECTION 109 | 110 | if adjust is not None: 111 | value += int(adjust) 112 | sql = sub_expression.sub(str(value), sql) 113 | return sql 114 | 115 | def for_connection(self, sql, connection, parameters, indent): 116 | """Make parameter substitutions for a connection using 117 | the connection's process list, thread, event ids.""" 118 | for parameter in parameters: 119 | try: 120 | key, adjust = parameter.split('+') 121 | except ValueError: 122 | # No + sign in the parameter 123 | key = parameter 124 | adjust = None 125 | value = None 126 | if key == 'processlist_id': 127 | value = self._processlist_ids[connection] 128 | elif key == 'thread_id': 129 | value = self._thread_ids[connection] 130 | elif key == 'event_id': 131 | value = self._event_ids[connection] 132 | elif key == 'thread_ids': 133 | value = ', '.join(self._thread_ids) 134 | 135 | if adjust is not None: 136 | value = int(value) + int(adjust) 137 | 138 | sql = self.sql_param_sub(value, sql) 139 | sql = self.indent_sql(sql, indent) 140 | 141 | return sql 142 | 143 | 144 | class Query(object): 145 | """Class for handling query execution. This keep track of for 146 | example the number of outstanding queries.""" 147 | _outstanding = 0 148 | _query_queue = None 149 | _result_queue = None 150 | _processlist_ids = None 151 | _thread_ids = None 152 | _log = None 153 | _concurrent = False 154 | 155 | def __init__(self, query_queue, result_queue, processlist_ids, thread_ids, 156 | log, concurrent): 157 | """Initialize the class.""" 158 | self._outstanding = 0 159 | self._query_queue = query_queue 160 | self._result_queue = result_queue 161 | self._processlist_ids = processlist_ids 162 | self._thread_ids = thread_ids 163 | self._log = log 164 | self._concurrent = concurrent 165 | 166 | def exec(self, query): 167 | """Execute a query.""" 168 | if not query.silent: 169 | self._log.sql(query) 170 | if query.show_result: 171 | self._outstanding += 1 172 | else: 173 | print('') 174 | self._log.debug(f'Adding query to queue: {query}') 175 | self._query_queue[query.connection - 1].put(query) 176 | if query.sleep > 0: 177 | time.sleep(query.sleep) 178 | 179 | if query.wait: 180 | # Wait for all outstanding results to be returned 181 | results = {} 182 | self._log.debug('Waiting for result, outstanding ' + 183 | f'{self._outstanding}') 184 | while self._outstanding > 0: 185 | result = get_from_queue( 186 | self._result_queue, timeout=10, iterations=1, 187 | on_empty=RESULT(None, None, None)) 188 | self._outstanding -= 1 189 | 190 | if result.query is None: 191 | self._log.error("Didn't receive a result. Abandoning.") 192 | sys.exit(1) 193 | elif result.query.show_result and not result.query.silent: 194 | results[result.query.connection] = result 195 | 196 | # Sort the results so the last query to be executed is 197 | # handled first (if possible) to reduce the number of times 198 | # the connection id changes. 199 | result_conns = list(results.keys()) 200 | result_conns.sort( 201 | key=lambda i: -1 if i == query.connection else i) 202 | for connection in result_conns: 203 | self._log.result(results[connection]) 204 | elif not query.silent and query.show_result: 205 | print('') 206 | 207 | if not query.wait and query.sleep == 0 and not self._concurrent: 208 | # Wait for a brief moment to allow the query to start 209 | time.sleep(0.1) 210 | -------------------------------------------------------------------------------- /concurrency_book/libs/util.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import re 3 | 4 | import mysqlsh 5 | 6 | RE_NUMBER = re.compile('([0-9]+)') 7 | RE_LISTING = re.compile(r'^Listing\s+(\d+)-(\d+)$') 8 | RE_SPACE = re.compile(' +') 9 | 10 | 11 | def _task_name_part_convert(name_part): 12 | """Helper function for sorting task names.""" 13 | if name_part.isdigit(): 14 | return int(name_part) 15 | else: 16 | return name_part.lower() 17 | 18 | 19 | def _task_name_split(name): 20 | """Used for sorting task names.""" 21 | return [_task_name_part_convert(p) for p in RE_NUMBER.split(name)] 22 | 23 | 24 | def normalize_task_name(name): 25 | """Normalize the task name (removing duplicate whitespace).""" 26 | return RE_SPACE.sub(' ', name) 27 | 28 | 29 | def list_tasks(task_name, tasks): 30 | """List the available tasks. Returns a list of task names 31 | ordered by name.""" 32 | 33 | heading = f'Available {task_name}s:' 34 | print(heading) 35 | print('=' * len(heading)) 36 | print('') 37 | print('{0:>2s} {1:22s} {2}'.format('#', 'Name', 'Description')) 38 | print('-' * 100) 39 | names = list(tasks.keys()) 40 | names.sort(key=_task_name_split) 41 | count = 0 42 | for name in names: 43 | task = tasks[name] 44 | count += 1 45 | # For names that refer to a code listing from the book, make the 46 | # chapter numbers right align. 47 | m = RE_LISTING.match(name) 48 | if m is not None: 49 | name = f'Listing {int(m[1]):2d}-{m[2]}' 50 | print(f'{count:2d} {name:22s} {task.description}') 51 | print('') 52 | 53 | return names 54 | 55 | 56 | def prompt_task(task_name, tasks): 57 | """Ask which task to execute.""" 58 | 59 | names = list_tasks(task_name, tasks) 60 | task = None 61 | prompt = f'Choose {task_name} (# or name - empty to exit): ' 62 | while task is None: 63 | answer = mysqlsh.globals.shell.prompt(prompt) 64 | if answer == '': 65 | return None 66 | # Try to convert the answer to an int - if that does not work, 67 | # treat it as a name. 68 | try: 69 | name = names[int(answer) - 1] 70 | except ValueError: 71 | # Could not convert the answer to an integer 72 | # Normalise the name (e.g. remove duplice spaces) 73 | answer = normalize_task_name(answer) 74 | try: 75 | task = tasks[answer] 76 | except KeyError: 77 | pass 78 | except IndexError: 79 | pass 80 | else: 81 | task = tasks[name] 82 | 83 | if task is None: 84 | print(f'Unknown workload: "{answer}"') 85 | 86 | return task 87 | 88 | 89 | def prompt_investigation(investigations, sql_formatter, num_connections): 90 | """Ask for an investigation to perform.""" 91 | 92 | print('Available investigations:') 93 | print('=' * 25) 94 | print('') 95 | print('{0:>2s} {1}'.format('#', 'Query')) 96 | print('-' * 50) 97 | count = 0 98 | statements = [] 99 | answer_to_investigation = [] 100 | for investigation in investigations: 101 | # Replace global parameters such as a list of all thread ids 102 | sql_template = sql_formatter.sql_global_sub(investigation.sql, 0) 103 | if investigation.parameters: 104 | for connection in range(num_connections): 105 | count += 1 106 | sql = sql_formatter.for_connection(sql_template, connection, 107 | investigation.parameters, 3) 108 | answer_to_investigation.append(investigation) 109 | statements.append(sql) 110 | print(f'{count:2d} {sql}') 111 | print('') 112 | else: 113 | count += 1 114 | sql = sql_formatter.indent_sql(sql_template, 3) 115 | answer_to_investigation.append(investigation) 116 | statements.append(sql) 117 | print(f'{count:2d} {sql}') 118 | print('') 119 | 120 | answer = None 121 | investigation = None 122 | sql = None 123 | prompt = 'Choose investigation (# - empty to exit): ' 124 | while investigation is None: 125 | answer = mysqlsh.globals.shell.prompt(prompt) 126 | if answer == '': 127 | return None, None, None 128 | # Try to convert the answer to an int - if that does not work, 129 | # treat it as a name. 130 | try: 131 | answer = int(answer) 132 | except ValueError: 133 | pass 134 | else: 135 | try: 136 | investigation = answer_to_investigation[answer - 1] 137 | sql = statements[answer - 1] 138 | except IndexError: 139 | investigation = None 140 | 141 | return answer, investigation, sql 142 | 143 | 144 | def get_uri(): 145 | """Obtain the URI for the global session.""" 146 | 147 | uri = mysqlsh.globals.session.uri 148 | args = mysqlsh.globals.shell.parse_uri(uri) 149 | 150 | prompt = 'Password for connections: ' 151 | options = {'type': 'password'} 152 | args['password'] = mysqlsh.globals.shell.prompt(prompt, options) 153 | 154 | uri = mysqlsh.globals.shell.unparse_uri(args) 155 | return uri 156 | 157 | 158 | def verify_session(): 159 | """Verify a session is open.""" 160 | session = mysqlsh.globals.session 161 | try: 162 | has_connection = session.is_open() 163 | except AttributeError: 164 | has_connection = False 165 | 166 | if not has_connection: 167 | print('MySQL Shell must be connected to MySQL Server.', 168 | file=sys.stderr) 169 | 170 | return has_connection 171 | 172 | 173 | def get_session(workload, uri=None): 174 | """Return a connection based on the workload settings. Optionally, 175 | an uri can be provided. If it is not provided, it will be determined 176 | using the get_uri() function.""" 177 | 178 | if uri is None: 179 | uri = get_uri() 180 | 181 | session = None 182 | if workload.protocol == 'mysql': 183 | try: 184 | session = mysqlsh.globals.mysql.get_classic_session(uri) 185 | except mysqlsh.DBError as e: 186 | if e.code == 2007: 187 | print('This workload requires the classic MySQL ' + 188 | 'protocol. Please exit MySQL Shell and reconnect ' + 189 | 'using the --mysql option.', 190 | file=sys.stderr) 191 | else: 192 | print('Error when trying to create a connection ' + 193 | f'using the classic MySQL protocol:\n{e}', 194 | file=sys.stderr) 195 | elif workload.protocol == 'mysqlx': 196 | try: 197 | session = mysqlsh.globals.mysqlx.get_session(uri) 198 | except mysqlsh.DBError as e: 199 | if e.code == 2027: 200 | print('This workload requires the MySQL X protocol. ' + 201 | 'Please exit MySQL Shell and reconnect using ' + 202 | 'the --mysqlx option.', 203 | file=sys.stderr) 204 | else: 205 | print('Error when trying to create a connection ' + 206 | f'using the MySQL X protocol:\n{e}', 207 | file=sys.stderr) 208 | else: 209 | session = mysqlsh.globals.shell.open_session(uri) 210 | 211 | return session 212 | 213 | 214 | def prompt_int(min_val, max_val, default_val, prompt_base): 215 | """Ask for an integer in a specified interval with support for a 216 | default value.""" 217 | value = None 218 | prompt = f'{prompt_base} ({min_val}-{max_val}) [{default_val}]: ' 219 | while value is None: 220 | answer = mysqlsh.globals.shell.prompt(prompt) 221 | if answer == '': 222 | value = default_val 223 | else: 224 | try: 225 | value = int(answer) 226 | except ValueError: 227 | print('The value must be an integer.') 228 | value = None 229 | else: 230 | if value < min_val or value > max_val: 231 | print(f'The value must be an integer between {min_val} ' + 232 | f'and {max_val}.') 233 | value = None 234 | 235 | return value 236 | 237 | 238 | def prompt_bool(default_val, prompt_base): 239 | """Prompt for a boolean answer (Y/N/YES/NO).""" 240 | value = None 241 | prompt = f'{prompt_base} (Y|Yes|N|No) [{default_val}]: ' 242 | while value is None: 243 | answer = mysqlsh.globals.shell.prompt(prompt) 244 | if answer == '': 245 | answer = default_val 246 | if answer.upper() in ('Y', 'YES'): 247 | value = True 248 | elif answer.upper() in ('N', 'NO'): 249 | value = False 250 | else: 251 | print('The value must be one of Y, Yes, N, and No') 252 | 253 | return value 254 | -------------------------------------------------------------------------------- /concurrency_book/libs/workloads.py: -------------------------------------------------------------------------------- 1 | import re 2 | import pathlib 3 | from collections import namedtuple 4 | 5 | import concurrency_book.libs.log 6 | 7 | DEBUG = False 8 | 9 | # Known lists in the yaml definition 10 | KNOWN_LISTS = ['queries', 'completions', 'investigations'] 11 | 12 | # The workload named tuple that is returned after parsing the yaml file 13 | WORKLOAD = namedtuple( 14 | 'Workload', 15 | ['name', 'description', 'concurrent', 'connections', 'loops', 'queries', 16 | 'completions', 'investigations', 'implementation', 'protocol'] 17 | ) 18 | # The named tuple fields for a query. These are both used for a query and 19 | # a completion (query). 20 | QUERY_FIELDS = ['connection', 'wait', 'sql', 'format', 'silent', 'show_result', 21 | 'store', 'parameters', 'comment', 'sleep'] 22 | QUERY = namedtuple('Query', QUERY_FIELDS) 23 | COMPLETION = namedtuple('Completion', QUERY_FIELDS) 24 | INVESTIGATION = namedtuple( 25 | 'Investigation', ['connection', 'sql', 'format', 'parameters']) 26 | IMPLEMENTATION = namedtuple('Implementation', ['module', 'class_name', 'args']) 27 | 28 | # The required keys for a workload 29 | KEYS_WORKLOAD_REQUIRED = { 30 | 'name': 'string', 31 | 'description': 'string', 32 | } 33 | # The value is (data type, default value) 34 | KEYS_WORKLOAD_OPTIONAL = { 35 | 'connections': ('integer', 0), 36 | 'concurrent': ('boolean', False), 37 | 'completions': ('list', []), 38 | 'loops': ('integer', 1), 39 | 'queries': ('list', []), 40 | 'implementation': ('implementation', (None, None, '{}')), 41 | 'investigations': ('list', []), 42 | 'protocol': ('string', None), 43 | } 44 | KEYS_QUERY_REQUIRED = { 45 | 'connection': 'integer', 46 | 'sql': 'string', 47 | } 48 | # The value is (data type, default value) 49 | KEYS_QUERY_OPTIONAL = { 50 | 'wait': ('boolean', True), 51 | 'format': ('string', 'table'), 52 | 'show_result': ('boolean', True), 53 | 'silent': ('boolean', False), 54 | 'store': ('boolean', False), 55 | 'parameters': ('list', []), 56 | 'comment': ('string', ''), 57 | 'sleep': ('integer', 0), 58 | } 59 | 60 | KEYS_COMPLETION_REQUIRED = KEYS_QUERY_REQUIRED 61 | KEYS_COMPLETION_OPTIONAL = KEYS_QUERY_OPTIONAL 62 | 63 | KEYS_INVESTIGATION_REQUIRED = { 64 | 'sql': 'string', 65 | } 66 | 67 | # The value is (data type, default value) 68 | KEYS_INVESTIGATION_OPTIONAL = { 69 | 'format': ('string', 'table'), 70 | 'parameters': ('list', []), 71 | } 72 | 73 | # Supported values per key (irrespective of the part of the definition) 74 | CHOICES = { 75 | 'format': ['table', 'tabbed', 'vertical', 'json', 'ndjson', 'json/raw', 76 | 'json/array', 'json/pretty'], 77 | 'protocol': [None, 'mysql', 'mysqlx'] 78 | } 79 | 80 | RE_KEYWORD_LINE = re.compile('^( *)(- )?(\\w+):(.*)$') 81 | RE_COMMENT = re.compile('#.*') 82 | RE_INT = re.compile('^-?[1-9][0-9]*$') 83 | 84 | LOG = concurrency_book.libs.log.Log() 85 | 86 | 87 | def _validate_datatype(file, key, value, required_datatype): 88 | """Determine the data type of the actual value and validate it 89 | against the required data type.""" 90 | if value == str(value): 91 | actual_datatype = 'string' 92 | elif RE_INT.match(str(value)): 93 | actual_datatype = 'integer' 94 | elif value is True or value is False: 95 | actual_datatype = 'boolean' 96 | elif type(value) is list: 97 | actual_datatype = 'list' 98 | else: 99 | actual_datatype = 'undetermined' 100 | 101 | if (required_datatype == 'implementation' and 102 | actual_datatype == 'list' and 103 | len(value) in [2, 3]): 104 | return True 105 | if actual_datatype == required_datatype: 106 | return True 107 | else: 108 | LOG.error(f'Workload {file}: Wrong data type for {key} - Expected' + 109 | f' {required_datatype} - Actual: {actual_datatype}') 110 | return False 111 | 112 | 113 | def _validate_sublist(file, name, items, required, optional): 114 | """Validate sublists (e.g. queries) of the workload.""" 115 | 116 | all_known_keys = list(required.keys()) + list(optional.keys()) 117 | # Validate that all required keys are present 118 | for key in required: 119 | required_datatype = required[key] 120 | i = 0 121 | 122 | for item in items: 123 | item_keys = item.keys() 124 | 125 | if key not in item_keys: 126 | LOG.error(f'Workload {file}: The key "{key}" was not ' + 127 | f'found for the {name} number {i}.') 128 | return None 129 | 130 | if not _validate_datatype(file, key, item[key], required_datatype): 131 | return None 132 | 133 | if key == 'implementation': 134 | item[key] = IMPLEMENTATION(*item[key]) 135 | 136 | # Validate that no unknown keys are present 137 | for item_key in item_keys: 138 | if item_key not in all_known_keys: 139 | LOG.error(f'Workload {file}: The key "{item_key}" is ' + 140 | f'given for {name} number {i} but is not a ' + 141 | 'known key.') 142 | return None 143 | if key in CHOICES and item[key] not in CHOICES[key]: 144 | LOG.error(f'Workload {file}: The {name} value for ' + 145 | '"{key}" is not an allowed value. Allowed ' + 146 | f'values: {CHOICES[key]}') 147 | return None 148 | i += 1 149 | 150 | # Validate the optional keys for the sublist 151 | # Copy items to avoid modifying the list that is being iterated over 152 | items_copy = [val for val in items] 153 | for key in optional: 154 | required_datatype = optional[key][0] 155 | i = 0 156 | for item in items_copy: 157 | try: 158 | value = item[key] 159 | except KeyError: 160 | default_value = optional[key][1] 161 | items[i][key] = default_value 162 | else: 163 | if not _validate_datatype(file, key, value, required_datatype): 164 | return None 165 | 166 | if key == 'implementation': 167 | items[i][key] = IMPLEMENTATION(*item[key]) 168 | 169 | _parse_implementation(items[i]) 170 | i += 1 171 | 172 | return items 173 | 174 | 175 | def _parse_implementation(dictionary): 176 | """Parse the implementation key of a workload.""" 177 | try: 178 | value = dictionary['implementation'] 179 | except KeyError: 180 | # Nothing to do 181 | return 182 | 183 | module = value[0] 184 | class_name = value[1] 185 | args = {} 186 | try: 187 | args_string = value[2] 188 | except IndexError: 189 | # No arguments given 190 | pass 191 | else: 192 | # Remove curly braces 193 | key_value_pairs = args_string.lstrip('{').rstrip('}').split(',') 194 | for key_value in key_value_pairs: 195 | try: 196 | key, value = key_value.split(':') 197 | except ValueError: 198 | pass 199 | else: 200 | args[key.strip()] = value.strip() 201 | 202 | dictionary['implementation'] = IMPLEMENTATION(module, class_name, args) 203 | 204 | 205 | def _validate(path, workload_dict): 206 | """Validates that a dictionary can be converted into a 207 | workload. For optional elements, they are added with their 208 | default value if they do not already exist. 209 | 210 | Returns the updated dictionary upon success, otherwise None. 211 | """ 212 | 213 | file = path.name 214 | workload_keys = workload_dict.keys() 215 | # Verify all required workload keys are present 216 | # and of the right data type 217 | for key in KEYS_WORKLOAD_REQUIRED: 218 | required_datatype = KEYS_WORKLOAD_REQUIRED[key] 219 | if key not in workload_keys: 220 | LOG.error(f'Workload {file}: The key "{key}" was not found') 221 | return None 222 | 223 | if not _validate_datatype( 224 | file, key, workload_dict[key], required_datatype): 225 | return None 226 | 227 | # Verify optional workload keys 228 | # and assign values for those not present 229 | for key in KEYS_WORKLOAD_OPTIONAL: 230 | required_datatype = KEYS_WORKLOAD_OPTIONAL[key][0] 231 | try: 232 | value = workload_dict[key] 233 | except KeyError: 234 | default_value = KEYS_WORKLOAD_OPTIONAL[key][1] 235 | workload_dict[key] = default_value 236 | else: 237 | if not _validate_datatype(file, key, value, required_datatype): 238 | return None 239 | 240 | # Verify only known keys are present and if the supported values are 241 | # limited that it has a supported value. 242 | for key in workload_keys: 243 | if (key not in KEYS_WORKLOAD_REQUIRED and 244 | key not in KEYS_WORKLOAD_OPTIONAL): 245 | LOG.error(f'Workload {file}: The key "{key}" is given but ' + 246 | 'is not a known key.') 247 | if key in CHOICES and workload_dict[key] not in CHOICES[key]: 248 | LOG.error(f'Workload {file}: The value for "{key}" is not an' + 249 | f'allowed value. Allowed values: {CHOICES[key]}') 250 | 251 | _parse_implementation(workload_dict) 252 | 253 | # Validate required keys for the queries, completions, and investigations 254 | queries = _validate_sublist(file, 'query', workload_dict['queries'], 255 | KEYS_QUERY_REQUIRED, KEYS_QUERY_OPTIONAL) 256 | if queries is None: 257 | return None 258 | else: 259 | workload_dict['queries'] = queries 260 | 261 | completions = _validate_sublist(file, 'completion', 262 | workload_dict['completions'], 263 | KEYS_COMPLETION_REQUIRED, 264 | KEYS_COMPLETION_OPTIONAL) 265 | if completions is None: 266 | workload_dict['completions'] = [] 267 | else: 268 | workload_dict['completions'] = completions 269 | 270 | investigations = _validate_sublist(file, 'investigation', 271 | workload_dict['investigations'], 272 | KEYS_INVESTIGATION_REQUIRED, 273 | KEYS_INVESTIGATION_OPTIONAL) 274 | if investigations is None: 275 | workload_dict['investigations'] = [] 276 | else: 277 | workload_dict['investigations'] = investigations 278 | 279 | return workload_dict 280 | 281 | 282 | def _queries_to_tuple(queries_dict, connections): 283 | """Covert a query dictionary to a QUERY tuple.""" 284 | queries = [] 285 | for query_dict in queries_dict: 286 | if query_dict['connection'] == -1: 287 | # Add the query for all connections 288 | for connection in range(connections): 289 | conn_query = dict(query_dict) 290 | conn_query['connection'] = connection + 1 291 | queries.append(QUERY(**conn_query)) 292 | else: 293 | queries.append(QUERY(**query_dict)) 294 | 295 | return queries 296 | 297 | 298 | def _dict_to_tuple(workload_dict): 299 | """Convert the workload dictionary to a workload named tuple. 300 | This includes converting the queries, completions, and 301 | investigations as well.""" 302 | 303 | connections = workload_dict['connections'] 304 | queries = _queries_to_tuple(workload_dict['queries'], connections) 305 | completions = _queries_to_tuple(workload_dict['completions'], connections) 306 | 307 | investigations = [] 308 | for investigation_dict in workload_dict['investigations']: 309 | investigation_dict['connection'] = workload_dict['connections'] + 1 310 | investigations.append(INVESTIGATION(**investigation_dict)) 311 | 312 | workload = WORKLOAD( 313 | workload_dict['name'], 314 | workload_dict['description'], 315 | workload_dict['concurrent'], 316 | workload_dict['connections'], 317 | workload_dict['loops'], 318 | queries, 319 | completions, 320 | investigations, 321 | workload_dict['implementation'], 322 | workload_dict['protocol'] 323 | ) 324 | return workload 325 | 326 | 327 | def _parse_yaml(path): 328 | """Parse a workload YAML definition file. 329 | 330 | Note: This parse is by no means a full blown YAML parser - it just 331 | supports what is expected for the workload definition. 332 | It would be better to use the yaml module (the PyYAML PyPi package 333 | for this, but it is not supported in MySQL Shell. 334 | """ 335 | with open(path, 'rt', encoding='utf-8') as yml: 336 | definition = yml.readlines() 337 | 338 | workload = {} 339 | level = 'workflow' 340 | sql_indent = 0 # To remove the right amount of prefix whitespace 341 | current_list = {} 342 | current_sql = '' 343 | list_type = None 344 | for line in definition: 345 | line = line.rstrip() 346 | if DEBUG: 347 | print(f'line: {line}') 348 | line = RE_COMMENT.sub('', line) 349 | if line.strip() == '---': 350 | continue 351 | 352 | m = RE_KEYWORD_LINE.match(line) 353 | if m: 354 | (indent_str, list_item, keyword, value) = m.groups() 355 | indent = len(indent_str) 356 | value = value.strip() 357 | 358 | # Handle date type conversion 359 | if value.upper() == 'NO': 360 | value = False 361 | elif value.upper() == 'YES': 362 | value = True 363 | elif (len(value) > 2 and 364 | value[0] in ('"', "'") and 365 | value[-1] == value[0]): 366 | # The string is quoted - remove the quotes 367 | # Note: it is not checked whether the end quote is escaped! 368 | value = value[1:-1] 369 | elif len(value) > 2 and value[0] == '[' and value[-1] == ']': 370 | # A list 371 | value = [val.strip() for val in value[1:-1].split(',')] 372 | else: 373 | # Handle integer values 374 | try: 375 | value = int(value) 376 | except ValueError: 377 | pass 378 | 379 | if DEBUG: 380 | print(f' level = {level}') 381 | print(f' list_item = {list_item}') 382 | print(f' keyword = {keyword}') 383 | print(f' value = {value}') 384 | if level == 'sql': 385 | # SQL statement has completed 386 | if DEBUG: 387 | print(' SQL statement has completed') 388 | current_list['sql'] = current_sql.strip() 389 | current_sql = '' 390 | level = 'list' 391 | 392 | if keyword in KNOWN_LISTS: 393 | if len(current_list) > 0: 394 | workload[list_type].append(current_list) 395 | 396 | level = 'list' 397 | list_type = keyword 398 | current_list = {} 399 | workload[keyword] = [] 400 | elif list_item is not None: 401 | level = 'list' 402 | if len(current_list) > 0: 403 | workload[list_type].append(current_list) 404 | if keyword == 'sql' and value == '|': 405 | if DEBUG: 406 | print(' Starting new SQL statement') 407 | level = 'sql' 408 | current_sql = '' 409 | sql_indent = 0 410 | current_list = {} 411 | else: 412 | current_list = {keyword: value} 413 | elif keyword == 'sql': 414 | if value == '|': 415 | if DEBUG: 416 | print(' Starting new SQL statement') 417 | level = 'sql' 418 | current_sql = '' 419 | sql_indent = 0 420 | else: 421 | current_list['sql'] = value 422 | elif indent == 0: 423 | level = 'workflow' 424 | if len(current_list) > 0: 425 | workload[list_type].append(current_list) 426 | current_list = {} 427 | workload[keyword] = value 428 | elif level == 'list': 429 | current_list[keyword] = value 430 | else: 431 | if sql_indent == 0: 432 | sql_indent = len(line) - len(line.lstrip()) 433 | current_sql += line[sql_indent:] + '\n' 434 | 435 | if current_sql != '': 436 | current_list['sql'] = current_sql.strip() 437 | if len(current_list) > 0: 438 | workload[list_type].append(current_list) 439 | 440 | return workload 441 | 442 | 443 | def load(path): 444 | """Look for all YAML files in the path provided and pass them for 445 | workload definitions. Both .yaml and .yml files are parsed. 446 | 447 | Returns the definitions as a dictionary with the workload name as 448 | the key.""" 449 | path = pathlib.Path(path) 450 | workloads = {} 451 | for glob in ['*.yaml', '*.yml']: 452 | for yaml_path in path.glob(glob): 453 | workload_dict = _parse_yaml(yaml_path) 454 | workload_validated = _validate(yaml_path, workload_dict) 455 | if workload_validated is not None: 456 | workload = _dict_to_tuple(workload_validated) 457 | workloads[workload.name] = workload 458 | 459 | return workloads 460 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing-6-10.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-10. Example output of the log_status table 2 | name: Listing 6-10 3 | description: Example output of the log_status table 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | SELECT * 9 | FROM performance_schema.log_status 10 | format: vertical 11 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-1. Updating row by non-unique secondary index 2 | name: Listing 10-1 3 | description: Updating row by non-unique secondary index 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: | 8 | ALTER TABLE world.city 9 | ADD INDEX (Name) 10 | - connection: 1 11 | sql: START TRANSACTION 12 | - connection: 1 13 | sql: | 14 | UPDATE world.city 15 | SET Population = 5000000 16 | WHERE Name = 'Sydney' 17 | - connection: 2 18 | sql: | 19 | SELECT index_name, lock_type, 20 | lock_mode, lock_data 21 | FROM performance_schema.data_locks 22 | WHERE object_schema = 'world' 23 | AND object_name = 'city' 24 | AND thread_id = {thread_ids_not_self} 25 | format: vertical 26 | - connection: 1 27 | sql: ROLLBACK 28 | - connection: 1 29 | sql: | 30 | ALTER TABLE world.city 31 | DROP INDEX Name 32 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-11.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-11. Performing DDL on a table with foreign key relations 2 | name: Listing 10-11 3 | description: Performing DDL on a table with foreign key relations 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: OPTIMIZE TABLE sakila.inventory 8 | wait: No 9 | show_result: No 10 | - connection: 2 11 | sql: | 12 | SELECT object_name, lock_type, lock_duration 13 | FROM performance_schema.metadata_locks 14 | WHERE owner_thread_id = {thread_id_connection_1} 15 | AND object_type = 'TABLE' 16 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-2. Updating row by the primary index 2 | name: Listing 10-2 3 | description: Updating row by the primary index 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: | 8 | ALTER TABLE world.city 9 | ADD INDEX (Name) 10 | - connection: 1 11 | sql: | 12 | SELECT ID 13 | FROM world.city 14 | WHERE Name = 'Sydney' 15 | - connection: 1 16 | sql: START TRANSACTION 17 | - connection: 1 18 | sql: | 19 | UPDATE world.city 20 | SET Population = 5000000 21 | WHERE ID = 130 22 | - connection: 2 23 | sql: | 24 | SELECT index_name, lock_type, 25 | lock_mode, lock_data 26 | FROM performance_schema.data_locks 27 | WHERE object_schema = 'world' 28 | AND object_name = 'city' 29 | AND thread_id = {thread_ids_not_self} 30 | format: vertical 31 | - connection: 1 32 | sql: ROLLBACK 33 | - connection: 1 34 | sql: | 35 | ALTER TABLE world.city 36 | DROP INDEX Name 37 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-3. Updating rows in descending order by ascending index 2 | name: Listing 10-3 3 | description: Updating rows in descending order by ascending index 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | ALTER TABLE world.city 9 | ADD INDEX (Population) 10 | - connection: 1 11 | sql: START TRANSACTION 12 | - connection: 1 13 | sql: | 14 | UPDATE world.city 15 | SET Population = Population * 1.10 16 | WHERE Population BETWEEN 1000000 AND 2000000 17 | ORDER BY Population DESC 18 | LIMIT 3 19 | completions: 20 | - connection: 1 21 | sql: ROLLBACK 22 | - connection: 1 23 | sql: | 24 | ALTER TABLE world.city 25 | DROP INDEX Population 26 | investigations: 27 | - sql: | 28 | SELECT index_name, lock_type, 29 | lock_mode, lock_data 30 | FROM performance_schema.data_locks 31 | WHERE object_schema = 'world' 32 | AND object_name = 'city' 33 | AND lock_type = 'RECORD' 34 | AND thread_id = ? 35 | ORDER BY index_name, lock_data DESC 36 | parameters: [thread_id] 37 | - sql: | 38 | SELECT index_name, lock_type, 39 | lock_mode, COUNT(*) 40 | FROM performance_schema.data_locks 41 | WHERE object_schema = 'world' 42 | AND object_name = 'city' 43 | AND thread_id = ? 44 | GROUP BY index_name, lock_type, lock_mode 45 | parameters: [thread_id] 46 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-4. Updating rows in descending order by descending index 2 | name: Listing 10-4 3 | description: Updating rows in descending order by descending index 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | ALTER TABLE world.city 9 | ADD INDEX (Population DESC) 10 | - connection: 1 11 | sql: START TRANSACTION 12 | - connection: 1 13 | sql: | 14 | UPDATE world.city 15 | SET Population = Population * 1.10 16 | WHERE Population BETWEEN 1000000 AND 2000000 17 | ORDER BY Population DESC 18 | LIMIT 3 19 | completions: 20 | - connection: 1 21 | sql: ROLLBACK 22 | - connection: 1 23 | sql: | 24 | ALTER TABLE world.city 25 | DROP INDEX Population 26 | investigations: 27 | - sql: | 28 | SELECT index_name, lock_type, 29 | lock_mode, lock_data 30 | FROM performance_schema.data_locks 31 | WHERE object_schema = 'world' 32 | AND object_name = 'city' 33 | AND lock_type = 'RECORD' 34 | AND thread_id = ? 35 | ORDER BY index_name, lock_data DESC 36 | parameters: [thread_id] 37 | - sql: | 38 | SELECT index_name, lock_type, 39 | lock_mode, COUNT(*) 40 | FROM performance_schema.data_locks 41 | WHERE object_schema = 'world' 42 | AND object_name = 'city' 43 | AND thread_id = ? 44 | GROUP BY index_name, lock_type, lock_mode 45 | parameters: [thread_id] 46 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-5. The different between non-unique and unique secondary indexes 2 | name: Listing 10-5 3 | description: The different between non-unique and unique secondary indexes 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: DROP TABLE IF EXISTS world._tmp_city1 8 | - connection: 1 9 | sql: | 10 | CREATE TABLE world._tmp_city1 11 | SELECT * 12 | FROM world.city 13 | WHERE CountryCode = 'AUS' 14 | - connection: 1 15 | sql: | 16 | ALTER TABLE world._tmp_city1 17 | ADD PRIMARY KEY (ID), 18 | ADD INDEX (Name) 19 | - connection: 1 20 | sql: START TRANSACTION 21 | - connection: 1 22 | sql: | 23 | UPDATE world._tmp_city1 24 | SET Population = 5000000 25 | WHERE Name = 'Sydney' 26 | - connection: 2 27 | sql: DROP TABLE IF EXISTS world._tmp_city2 28 | - connection: 2 29 | sql: | 30 | CREATE TABLE world._tmp_city2 31 | SELECT * 32 | FROM world.city 33 | WHERE CountryCode = 'AUS' 34 | - connection: 2 35 | sql: | 36 | ALTER TABLE world._tmp_city2 37 | ADD PRIMARY KEY (ID), 38 | ADD UNIQUE INDEX (Name) 39 | - connection: 2 40 | sql: START TRANSACTION 41 | - connection: 2 42 | sql: | 43 | UPDATE world._tmp_city2 44 | SET Population = 5000000 45 | WHERE Name = 'Sydney' 46 | completions: 47 | - connection: 1 48 | sql: ROLLBACK 49 | - connection: 1 50 | sql: DROP TABLE world._tmp_city1 51 | - connection: 2 52 | sql: ROLLBACK 53 | - connection: 2 54 | sql: DROP TABLE world._tmp_city2 55 | investigations: 56 | - sql: | 57 | SELECT index_name, lock_mode, lock_data 58 | FROM performance_schema.data_locks 59 | WHERE object_schema = 'world' 60 | AND lock_type = 'RECORD' 61 | AND thread_id = ? 62 | parameters: [thread_id] 63 | format: vertical 64 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_10-8.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 10-8. Updating a row in a table with foreign keys relationships 2 | name: Listing 10-8 3 | description: Updating a row in a table with foreign keys relationships 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE sakila.inventory 11 | SET store_id = 1 12 | WHERE inventory_id = 4090 13 | completions: 14 | - connection: 1 15 | sql: ROLLBACK 16 | investigations: 17 | - sql: | 18 | SELECT object_schema, object_name, lock_type, 19 | index_name, lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE thread_id = ? 22 | parameters: [thread_id] 23 | format: vertical 24 | - sql: | 25 | SELECT object_type, object_schema, object_name, 26 | column_name, lock_type, lock_duration 27 | FROM performance_schema.metadata_locks 28 | WHERE owner_thread_id = ? 29 | ORDER BY object_type, object_schema, object_name, 30 | column_name, lock_type 31 | parameters: [thread_id] 32 | format: vertical 33 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-1. Read locking in the SERIALIZABLE transaction isolation level 2 | name: Listing 12-1 3 | description: Read locking in the SERIALIZABLE transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'SERIALIZABLE' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | SELECT ID, Name, Population 13 | FROM world.city 14 | WHERE CountryCode = 'SVK' 15 | AND District = 'Bratislava' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND lock_type = 'RECORD' 24 | AND thread_id = {thread_ids_not_self} 25 | ORDER BY index_name, lock_data DESC 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-2. Locking in the SERIALIZABLE transaction isolation level 2 | name: Listing 12-2 3 | description: Locking in the SERIALIZABLE transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = Population * 1.10 14 | WHERE CountryCode = 'SVK' 15 | AND District = 'Bratislava' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND lock_type = 'RECORD' 24 | AND thread_id = {thread_ids_not_self} 25 | ORDER BY index_name, lock_data DESC 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-3. Read locking in the REPEATABLE READ transaction isolation level 2 | name: Listing 12-3 3 | description: Read locking in the REPEATABLE READ transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'REPEATABLE-READ' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | SELECT ID, Name, Population 13 | FROM world.city 14 | WHERE CountryCode = 'SVK' 15 | AND District = 'Bratislava' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND lock_type = 'RECORD' 24 | AND thread_id = {thread_ids_not_self} 25 | ORDER BY index_name, lock_data DESC 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-4. Locking in the REPEATABLE READ transaction isolation level 2 | name: Listing 12-4 3 | description: Locking in the REPEATABLE READ transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'REPEATABLE-READ' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = Population * 1.10 14 | WHERE CountryCode = 'SVK' 15 | AND District = 'Bratislava' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND lock_type = 'RECORD' 24 | AND thread_id = {thread_ids_not_self} 25 | ORDER BY index_name, lock_data DESC 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-5. Consistent reads mixed with DML 2 | name: Listing 12-5 3 | description: Consistent reads mixed with DML 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'REPEATABLE-READ' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | SELECT ID, Name, Population 13 | FROM world.city 14 | WHERE CountryCOde = 'BHS' 15 | - connection: 2 16 | sql: START TRANSACTION 17 | - connection: 2 18 | sql: | 19 | INSERT INTO world.city 20 | VALUES (4080, 'Freeport', 'BHS', 21 | 'Grand Bahama', 50000) 22 | - connection: 2 23 | sql: COMMIT 24 | - connection: 1 25 | sql: | 26 | SELECT ID, Name, Population 27 | FROM world.city 28 | WHERE CountryCOde = 'BHS' 29 | - connection: 1 30 | sql: | 31 | UPDATE world.city 32 | SET Population = Population * 1.10 33 | WHERE CountryCOde = 'BHS' 34 | - connection: 1 35 | sql: | 36 | SELECT ID, Name, Population 37 | FROM world.city 38 | WHERE CountryCOde = 'BHS' 39 | completions: 40 | - connection: 1 41 | sql: ROLLBACK 42 | - connection: 2 43 | sql: DELETE FROM world.city WHERE ID = 4080 44 | investigations: 45 | - sql: | 46 | SELECT index_name, lock_type, 47 | lock_mode, lock_data 48 | FROM performance_schema.data_locks 49 | WHERE object_schema = 'world' 50 | AND object_name = 'city' 51 | AND lock_type = 'RECORD' 52 | AND thread_id = ? 53 | ORDER BY index_name, lock_data DESC 54 | parameters: [thread_id] 55 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-6.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-6. Locking in the READ COMMITTED transaction isolation level 2 | name: Listing 12-6 3 | description: Locking in the READ COMMITTED transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'READ-COMMITTED' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = Population * 1.10 14 | WHERE CountryCode = 'SVK' 15 | AND District = 'Bratislava' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND lock_type = 'RECORD' 24 | AND thread_id = {thread_ids_not_self} 25 | ORDER BY index_name, lock_data DESC 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_12-7.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 12-7. READ COMMITTED Semi-consistent reads 2 | name: Listing 12-7 3 | description: READ COMMITTED Semi-consistent reads 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET transaction_isolation = 'READ-COMMITTED' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = Population * 1.10 14 | WHERE Name = 'San Jose' 15 | AND District = 'Southern Tagalog' 16 | - connection: 2 17 | sql: SET transaction_isolation = 'READ-COMMITTED' 18 | - connection: 2 19 | sql: START TRANSACTION 20 | - connection: 2 21 | sql: | 22 | UPDATE world.city 23 | SET Population = Population * 1.10 24 | WHERE Name = 'San Jose' 25 | AND District = 'Central Luzon' 26 | completions: 27 | - connection: 1 28 | sql: ROLLBACK 29 | - connection: 2 30 | sql: ROLLBACK 31 | investigations: 32 | - sql: | 33 | SELECT index_name, lock_type, 34 | lock_mode, lock_data 35 | FROM performance_schema.data_locks 36 | WHERE object_schema = 'world' 37 | AND object_name = 'city' 38 | AND lock_type = 'RECORD' 39 | AND thread_id = ? 40 | ORDER BY index_name, lock_data DESC 41 | parameters: [thread_id] 42 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_13-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 13-1. Triggering flush lock contention 2 | name: Listing 13-1 3 | description: Triggering flush lock contention 4 | connections: 3 5 | queries: 6 | - connection: 1 7 | sql: SELECT city.*, SLEEP(3600) FROM world.city WHERE ID = 130 8 | wait: No 9 | sleep: 2 10 | - connection: 2 11 | sql: FLUSH TABLES WITH READ LOCK 12 | wait: No 13 | sleep: 2 14 | - connection: 3 15 | sql: SELECT * FROM world.city WHERE ID = 3805 16 | wait: No 17 | investigations: 18 | - sql: | 19 | SELECT thd_id, conn_id, state, 20 | current_statement, 21 | statement_latency 22 | FROM sys.session 23 | WHERE command = 'Query' 24 | format: vertical 25 | - sql: | 26 | SELECT thd_id, conn_id, state, 27 | current_statement, 28 | statement_latency 29 | FROM sys.session 30 | WHERE thd_id IN ({thread_ids}) 31 | - sql: | 32 | SELECT object_type, object_schema, object_name, 33 | lock_type, lock_duration, lock_status, 34 | owner_thread_id 35 | FROM performance_schema.metadata_locks 36 | WHERE owner_thread_id IN ({thread_ids}) 37 | - sql: | 38 | SELECT thread_id, event_id, 39 | object_schema, object_name, index_name, 40 | lock_type, lock_mode, lock_status, lock_data 41 | FROM performance_schema.data_locks 42 | WHERE thread_id IN ({thread_ids}) 43 | - sql: EXPLAIN FOR CONNECTION {processlist_id_connection_1} 44 | - sql: KILL ? 45 | parameters: [processlist_id] 46 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_14-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 14-1. Triggering metadata lock contention 2 | name: Listing 14-1 3 | description: Triggering metadata lock contention 4 | connections: 3 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: SELECT * FROM world.city WHERE ID = 3805 10 | format: vertical 11 | - connection: 1 12 | sql: SELECT Code, Name FROM world.country WHERE Code = 'USA' 13 | format: vertical 14 | sleep: 2 15 | - connection: 2 16 | sql: ALTER TABLE world.city ADD INDEX (Name) 17 | wait: No 18 | sleep: 2 19 | - connection: 3 20 | sql: SELECT * FROM world.city WHERE ID = 130 21 | wait: No 22 | completions: 23 | - connection: 1 24 | sql: ROLLBACK 25 | - connection: 2 26 | sql: ALTER TABLE world.city DROP INDEX Name 27 | investigations: 28 | - sql: | 29 | SELECT thd_id, conn_id, state, 30 | current_statement, 31 | statement_latency 32 | FROM sys.session 33 | WHERE command = 'Query' OR trx_state = 'ACTIVE' 34 | format: vertical 35 | - sql: | 36 | SELECT thd_id, conn_id, state, 37 | current_statement, 38 | statement_latency 39 | FROM sys.session 40 | WHERE thd_id IN ({thread_ids}) 41 | - sql: | 42 | SELECT * 43 | FROM sys.schema_table_lock_waits 44 | format: vertical 45 | - sql: | 46 | SELECT * 47 | FROM sys.schema_table_lock_waits 48 | WHERE waiting_lock_type = 'EXCLUSIVE' 49 | AND waiting_pid <> blocking_pid 50 | format: vertical 51 | - sql: | 52 | SELECT blocking_pid, COUNT(*) 53 | FROM sys.schema_table_lock_waits 54 | WHERE waiting_pid <> blocking_pid 55 | GROUP BY blocking_pid 56 | ORDER BY COUNT(*) DESC 57 | - sql: | 58 | SELECT * 59 | FROM information_schema.INNODB_TRX 60 | WHERE trx_mysql_thread_id = {processlist_id_connection_1} 61 | format: vertical 62 | - sql: | 63 | SELECT * 64 | FROM performance_schema.events_transactions_current 65 | WHERE thread_id = {thread_id_connection_1} 66 | format: vertical 67 | - sql: | 68 | SELECT event_id, current_schema, sql_text 69 | FROM performance_schema.events_statements_history 70 | WHERE thread_id = {thread_id_connection_1} 71 | AND nesting_event_id = {event_id_connection_1+2} 72 | AND nesting_event_type = 'TRANSACTION' 73 | format: vertical 74 | - sql: | 75 | SELECT attr_name, attr_value 76 | FROM performance_schema.session_connect_attrs 77 | WHERE processlist_id = {processlist_id_connection_1} 78 | ORDER BY attr_name 79 | - sql: | 80 | SELECT object_type, object_schema, object_name, 81 | lock_type, lock_duration, lock_status, 82 | owner_thread_id 83 | FROM performance_schema.metadata_locks 84 | WHERE owner_thread_id IN ({thread_ids}) 85 | - sql: | 86 | SELECT thread_id, event_id, 87 | object_schema, object_name, index_name, 88 | lock_type, lock_mode, lock_status, lock_data 89 | FROM performance_schema.data_locks 90 | WHERE thread_id IN ({thread_ids}) 91 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_15-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 15-2. Triggering InnoDB record lock contention 2 | name: Listing 15-2 3 | description: Triggering InnoDB record lock contention 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = 5000000 12 | WHERE ID = 130 13 | - connection: 2 14 | sql: SET SESSION innodb_lock_wait_timeout = 3600 15 | - connection: 2 16 | sql: START TRANSACTION 17 | - connection: 2 18 | sql: UPDATE world.city SET Population = Population * 1.10 WHERE CountryCode = 'AUS' 19 | wait: No 20 | completions: 21 | - connection: 1 22 | sql: ROLLBACK 23 | - connection: 2 24 | sql: ROLLBACK 25 | investigations: 26 | - sql: SELECT * FROM sys.innodb_lock_waits 27 | format: vertical 28 | - sql: | 29 | SELECT thd_id, conn_id, state, 30 | current_statement, 31 | statement_latency 32 | FROM sys.session 33 | WHERE thd_id IN ({thread_ids}) 34 | - sql: | 35 | SELECT * 36 | FROM performance_schema.data_locks 37 | WHERE thread_id IN ({thread_ids}) 38 | format: vertical 39 | - sql: | 40 | SELECT * 41 | FROM performance_schema.data_lock_waits 42 | WHERE requesting_thread_id IN ({thread_ids}) 43 | format: vertical 44 | - sql: | 45 | SELECT object_type, object_schema, object_name, 46 | lock_type, lock_duration, lock_status, 47 | owner_thread_id 48 | FROM performance_schema.metadata_locks 49 | WHERE owner_thread_id IN ({thread_ids}) 50 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_16-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 16-1. Triggering an InnoDB deadlock 2 | name: Listing 16-1 3 | description: Triggering an InnoDB deadlock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: UPDATE world.city SET Population = Population * 1.10 WHERE CountryCode = 'AUS' 10 | - connection: 2 11 | sql: START TRANSACTION 12 | - connection: 2 13 | sql: UPDATE world.country SET Population = Population + 146000 WHERE Code = 'AUS' 14 | - connection: 1 15 | sql: UPDATE world.country SET Population = Population * 1.10 WHERE Code = 'AUS' 16 | wait: No 17 | - connection: 2 18 | sql: INSERT INTO world.city VALUES (4080, 'Darwin', 'AUS', 'Northern Territory', 146000) 19 | - connection: 2 20 | sql: ROLLBACK 21 | - connection: 1 22 | sql: ROLLBACK 23 | investigations: 24 | - sql: SHOW ENGINE INNODB STATUS 25 | format: vertical 26 | - sql: SELECT * FROM sys.innodb_lock_waits 27 | format: vertical 28 | - sql: | 29 | SELECT * 30 | FROM world.city 31 | WHERE CountryCode = 'AUT' 32 | ORDER BY CountryCode, ID 33 | LIMIT 1 34 | - sql: | 35 | SELECT sql_text, nesting_event_id, 36 | nesting_event_type, mysql_errno, 37 | IFNULL(error_name, '') AS error, 38 | message_text 39 | FROM performance_schema.events_statements_history 40 | LEFT OUTER JOIN performance_schema.events_errors_summary_global_by_error 41 | ON error_number = mysql_errno 42 | WHERE thread_id = PS_THREAD_ID(?) 43 | AND event_id > ? 44 | ORDER BY event_id 45 | parameters: [processlist_id, event_id] 46 | format: vertical 47 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_17-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 17-1. Locks and foreign keys 2 | name: Listing 17-1 3 | description: Locks and foreign keys 4 | implementation: [workloads.listing_17_1, ForeignKeys, {}] 5 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_17_1.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from datetime import datetime 3 | from datetime import timedelta 4 | from time import sleep 5 | 6 | import mysqlsh 7 | 8 | # noinspection PyUnresolvedReferences 9 | from concurrency_book import libs 10 | # noinspection PyUnresolvedReferences 11 | import concurrency_book.libs.query 12 | # noinspection PyUnresolvedReferences 13 | import concurrency_book.libs.util 14 | # noinspection PyUnresolvedReferences 15 | import concurrency_book.libs.log 16 | # noinspection PyUnresolvedReferences 17 | import concurrency_book.libs.metrics 18 | 19 | DEFAULT_RUNTIME = 15 20 | DEFAULT_SLEEP_FACTOR = 15 21 | 22 | 23 | class ForeignKeys(object): 24 | """Implements the workload triggering lock contention due to 25 | foreign keys.""" 26 | _workload = None 27 | _session = None 28 | _log = None 29 | _done = None 30 | _mutex_payment_start = None 31 | _mutex_payment_commit = None 32 | _log_lock = None 33 | _processlist_ids = {} 34 | _thread_ids = {} 35 | _event_ids = {} 36 | _sql_formatter = None 37 | _runtime = None 38 | _sleep_factor = None 39 | 40 | def __init__(self, workload, session, log): 41 | """Initialize the instance.""" 42 | self._workload = workload 43 | self._session = session 44 | self._log = log 45 | self._done = None 46 | self._mutex_payment_start = None 47 | self._mutex_payment_commit = None 48 | self._log_lock = threading.Lock() 49 | self._processlist_ids = {} 50 | self._thread_ids = {} 51 | self._event_ids = {} 52 | self._sql_formatter = None 53 | self._runtime = None 54 | self._sleep_factor = None 55 | self._prompt_settings() 56 | 57 | def _prompt_settings(self): 58 | """Prompt for the settings for the workload.""" 59 | prompt = 'Specify the number of seconds to run for' 60 | self._runtime = libs.util.prompt_int(10, 3600, DEFAULT_RUNTIME, prompt) 61 | 62 | prompt = 'Specify the sleep factor' 63 | self._sleep_factor = libs.util.prompt_int(0, 30, DEFAULT_SLEEP_FACTOR, 64 | prompt) 65 | 66 | print('') 67 | 68 | def _wait_metadata_lock(self, session, thread_ids, min_count=1): 69 | """Monitor for a metadata lock wait situation.""" 70 | local = threading.local() 71 | try: 72 | local.id_list = list(thread_ids) 73 | except TypeError: 74 | local.id_list = [thread_ids] 75 | local.in_list = ', '.join([str(local.thd_id) 76 | for local.thd_id in local.id_list]) 77 | local.sql = f""" 78 | SELECT COUNT(*) 79 | FROM performance_schema.metadata_locks 80 | WHERE owner_thread_id IN ({local.in_list}) 81 | AND lock_status = 'PENDING'""".strip() 82 | 83 | local.count = 0 84 | while local.count < min_count and not self._done.wait(0): 85 | try: 86 | local.result = session.run_sql(local.sql) 87 | local.count = local.result.fetch_one()[0] 88 | except (SystemError, IndexError, AttributeError): 89 | # MySQL Shell is not entirely thread safe 90 | pass 91 | 92 | def _exec_and_print_sql(self, session, sql, dump_format='table'): 93 | """Execute and print the result of a query. 94 | This method assumes the log lock is taken ahead of time in 95 | the calling routine.""" 96 | local = threading.local() 97 | local.result = session.run_sql(sql) 98 | if dump_format == 'vertical': 99 | local.delimiter = r'\G' 100 | else: 101 | local.delimiter = ';' 102 | print('mysql> ' + self._sql_formatter.indent_sql(sql, 7) + 103 | local.delimiter) 104 | mysqlsh.globals.shell.dump_rows(local.result, dump_format) 105 | print('') 106 | 107 | def _exec_with_error_handling(self, session, sql, verbose=True): 108 | """Execute a query. Exceptions caused by the execution are 109 | handled with the error printed. If there is no error, the 110 | result is printed if verbose is set to true.""" 111 | local = threading.local() 112 | try: 113 | local.result = session.run_sql(sql) 114 | except mysqlsh.DBError as e: 115 | self._log_lock.acquire() 116 | print(f'mysql> {sql};') 117 | local.msg = libs.log.RE_ERROR_MSG.sub('', e.msg) 118 | print(f'ERROR: {e.code}: {local.msg}') 119 | print('') 120 | self._log_lock.release() 121 | else: 122 | if verbose: 123 | self._log_lock.acquire() 124 | print(f'mysql> {sql};') 125 | print(f'Query OK, 0 rows affected ' + 126 | f'({local.result.execution_time})') 127 | mysqlsh.globals.shell.dump_rows(local.result) 128 | print('') 129 | self._log_lock.release() 130 | 131 | def _customer(self, session, customer_id, other_thread_id): 132 | """Execute the queries on the customer table.""" 133 | local = threading.local() 134 | local.sql = "UPDATE sakila.customer " + \ 135 | "SET active = IF(active = 1, 0, 1) WHERE customer_id = ?" 136 | local.done = False 137 | # The timeout for the mutex acquire() calls ensures that if 138 | # a mistake happens and the test locks up that the thread blocks 139 | # for at most 20 seconds. 140 | self._mutex_payment_commit.acquire(timeout=20) 141 | self._log.debug('Acquired commit mutex', customer_id) 142 | while not local.done: 143 | self._mutex_payment_start.acquire(timeout=20) 144 | self._log.debug('Acquired start mutex', customer_id) 145 | session.start_transaction() 146 | self._log.debug('Releasing commit mutex', customer_id) 147 | try: 148 | self._mutex_payment_commit.release() 149 | except RuntimeError: 150 | pass 151 | session.run_sql(local.sql, (customer_id, )) 152 | 153 | # Ensure the ALTER TABLE thread is waiting for the 154 | # metadata lock before proceeding. 155 | self._wait_metadata_lock(session, self._thread_ids['alter']) 156 | self._log.debug('Metadata locks after run_sql()', customer_id) 157 | self._log.debug('Releasing start mutex', customer_id) 158 | try: 159 | self._mutex_payment_start.release() 160 | except RuntimeError: 161 | pass 162 | self._mutex_payment_commit.acquire(timeout=20) 163 | self._log.debug('Acquired commit mutex', customer_id) 164 | # Need to wait to ensure the other thread updating the 165 | # the customer table has started as they block each other 166 | # due to a metadata lock on the inventory table. 167 | self._wait_metadata_lock(session, other_thread_id) 168 | self._log.debug('Metadata locks before commit()', customer_id) 169 | sleep(0.1 * self._sleep_factor) 170 | session.commit() 171 | local.done = self._done.wait(0) 172 | 173 | session.commit() 174 | # Ensure the mutexes are released so the other thread is not blocked 175 | try: 176 | self._mutex_payment_start.release() 177 | except RuntimeError: 178 | pass 179 | try: 180 | self._mutex_payment_commit.release() 181 | except RuntimeError: 182 | pass 183 | 184 | def _alter(self, session): 185 | """Execute the ALTER TABLE statement in the inventory table.""" 186 | local = threading.local() 187 | local.sql = "ALTER TABLE sakila.inventory FORCE" 188 | local.done = False 189 | session.run_sql('SET SESSION lock_wait_timeout = 1') 190 | while not local.done: 191 | self._log.debug('Executing ALTER TABLE') 192 | self._exec_with_error_handling(session, local.sql) 193 | local.done = self._done.wait(5) 194 | 195 | def _film_category(self, session): 196 | """Execute the UPDATE on the film_category table.""" 197 | local = threading.local() 198 | local.sql = "UPDATE sakila.film_category " + \ 199 | "SET category_id = IF(category_id = 7, 16, 7) " + \ 200 | "WHERE film_id = 64" 201 | local.done = False 202 | session.run_sql("SET SESSION autocommit = 0") 203 | while not local.done: 204 | self._log.debug('Executing update on film_category') 205 | session.run_sql(local.sql) 206 | local.done = self._done.wait(4) 207 | session.commit() 208 | 209 | def _category(self, session): 210 | """Execute the UPDATE on the category table.""" 211 | local = threading.local() 212 | local.sql = "UPDATE sakila.category " + \ 213 | "SET name = IF(name = 'Travel', 'Exploring', 'Travel')" + \ 214 | " WHERE category_id = 16" 215 | session.run_sql("SET SESSION innodb_lock_wait_timeout = 1") 216 | local.done = False 217 | while not local.done: 218 | self._log.debug('Executing update on category') 219 | self._exec_with_error_handling(session, local.sql, False) 220 | local.done = self._done.wait(0) 221 | 222 | def _monitor_waits(self, session): 223 | """Monitor for lock waits.""" 224 | local = threading.local() 225 | local.sql_mdl = """ 226 | SELECT object_name, lock_type, lock_status, 227 | owner_thread_id, owner_event_id 228 | FROM performance_schema.metadata_locks 229 | WHERE object_type = 'TABLE' 230 | AND object_schema = 'sakila' 231 | ORDER BY owner_thread_id, object_name, lock_type""".strip() 232 | 233 | local.sql_mdl_summary = """ 234 | SELECT object_name, COUNT(*) 235 | FROM performance_schema.metadata_locks 236 | WHERE object_type = 'TABLE' 237 | AND object_schema = 'sakila' 238 | GROUP BY object_name 239 | ORDER BY object_name""".strip() 240 | 241 | local.sql_mdl_waits = "SELECT * FROM sys.schema_table_lock_waits" 242 | 243 | local.sql_mdl_waits_summary = """ 244 | SELECT blocking_pid, COUNT(*) 245 | FROM sys.schema_table_lock_waits 246 | WHERE waiting_pid <> blocking_pid 247 | GROUP BY blocking_pid 248 | ORDER BY COUNT(*) DESC""".strip() 249 | 250 | # {thread_ids} is a placeholder and will be replaced 251 | # when executing the query. 252 | local.sql_session_template = """ 253 | SELECT thd_id, conn_id, command, state, 254 | current_statement, time, statement_latency, 255 | trx_latency, trx_state 256 | FROM sys.session 257 | WHERE thd_id IN ({thread_ids}) 258 | ORDER BY conn_id""".strip() 259 | 260 | local.sql_innodb_waits = "SELECT * FROM sys.innodb_lock_waits" 261 | 262 | local.sql_session = self._sql_formatter.sql_global_sub( 263 | local.sql_session_template, 0) 264 | 265 | # Set statement_truncate_len high enough to include the whole 266 | # statement for all the queries. 267 | session.run_sql("SET @sys.statement_truncate_len = 128") 268 | 269 | # Wait for the ALTER thread to have a pending metadata lock 270 | local.thread_ids = [ 271 | self._thread_ids['customer_1'], 272 | self._thread_ids['customer_2'], 273 | self._thread_ids['alter'], 274 | ] 275 | self._wait_metadata_lock(session, local.thread_ids, 2) 276 | self._log.debug('Detected pending metadata lock for the alter ' + 277 | 'thread') 278 | 279 | self._log_lock.acquire() 280 | self._exec_and_print_sql(session, local.sql_mdl, 'vertical') 281 | self._exec_and_print_sql(session, local.sql_mdl_summary) 282 | self._exec_and_print_sql(session, local.sql_mdl_waits, 'vertical') 283 | self._exec_and_print_sql(session, local.sql_mdl_waits_summary) 284 | self._exec_and_print_sql(session, local.sql_session, 'vertical') 285 | self._log_lock.release() 286 | local.rows = [] 287 | while len(local.rows) == 0: 288 | local.result = session.run_sql(local.sql_innodb_waits) 289 | sleep(0.1) # Race condition in mysqlsh 290 | local.rows = local.result.fetch_all() 291 | 292 | self._log_lock.acquire() 293 | print('mysql> ' + 294 | self._sql_formatter.indent_sql(local.sql_innodb_waits, 7) + 295 | r'\G') 296 | local.i = 0 297 | for local.row in local.rows: 298 | local.i += 1 299 | print(f'*************************** {local.i}. row ' + 300 | '***************************') 301 | local.j = 0 302 | for local.column in local.result.columns: 303 | print(f'{local.column.column_label:>28s}: ' + 304 | f'{local.row[local.j]}') 305 | local.j += 1 306 | if len(local.rows) == 1: 307 | local.rows = f'{len(local.rows)} row in' 308 | else: 309 | local.rows = f'{len(local.rows)} rows in' 310 | print(f'{local.rows} set ({local.result.execution_time})') 311 | print('') 312 | self._log_lock.release() 313 | 314 | def _monitor_metrics(self, session, done): 315 | """Collect monitoring information.""" 316 | local = threading.local() 317 | # Collect InnoDB and global metrics 318 | local.metrics = libs.metrics.Metrics(session) 319 | 320 | local.count_metrics = [ 321 | 'innodb_row_lock_current_waits', 322 | 'lock_row_lock_current_waits', 323 | ] 324 | local.delta_metrics = [ 325 | 'innodb_row_lock_time', 326 | 'innodb_row_lock_waits', 327 | 'lock_deadlocks', 328 | 'lock_timeouts', 329 | ] 330 | local.names = ',\n '.join( 331 | [f"'{name}'" for name in 332 | local.count_metrics + local.delta_metrics] 333 | ) 334 | 335 | # Get the number of lock errors 336 | local.sql_errors = """ 337 | SELECT error_number, error_name, sum_error_raised 338 | FROM performance_schema.events_errors_summary_global_by_error 339 | WHERE error_name IN ('ER_LOCK_WAIT_TIMEOUT', 'ER_LOCK_DEADLOCK')""".strip() 340 | 341 | # Get statistics per statement type 342 | local.sql_stmts_events = """ 343 | SELECT event_name, count_star, sum_errors 344 | FROM performance_schema.events_statements_summary_global_by_event_name 345 | WHERE event_name IN ('statement/sql/alter_table', 346 | 'statement/sql/update') 347 | """ 348 | # Get lock related metrics 349 | local.sql_metrics = f""" 350 | SELECT Variable_name, Variable_value 351 | FROM sys.metrics 352 | WHERE Variable_name IN ( 353 | {local.names} 354 | )""".strip() 355 | 356 | self._log_lock.acquire() 357 | print('') 358 | self._exec_and_print_sql(session, local.sql_errors) 359 | self._exec_and_print_sql(session, local.sql_stmts_events) 360 | self._exec_and_print_sql(session, local.sql_metrics) 361 | self._log_lock.release() 362 | 363 | local.interval = timedelta(seconds=1) 364 | local.next_wakeup = datetime.now() 365 | local.done = False 366 | while not local.done: 367 | local.metrics.collect() 368 | local.next_wakeup += local.interval 369 | local.sleep_time = (local.next_wakeup - 370 | datetime.now()).total_seconds() 371 | local.done = done.wait(local.sleep_time) 372 | 373 | self._log_lock.acquire() 374 | print('') 375 | print('-- Metrics reported by count collected during the test:') 376 | local.metrics.write_csv(local.count_metrics) 377 | print('') 378 | print('-- Metrics reported by rate collected during the test:') 379 | local.metrics.write_rate_csv(local.delta_metrics) 380 | print('') 381 | self._exec_and_print_sql(session, local.sql_errors) 382 | self._exec_and_print_sql(session, local.sql_stmts_events) 383 | self._exec_and_print_sql(session, local.sql_metrics) 384 | self._log_lock.release() 385 | 386 | def execute(self, uri): 387 | """This workload uses five connections in four threads: 388 | * Two that keeps updating the sakila.payment table in such a 389 | way that there is always a transaction ongoing but none of 390 | the transactions are long running. 391 | * One that executes ALTER TABLE sakila.inventory. 392 | * One that updates the sakila.film_category table. 393 | * One that updates the sakila.category table. 394 | """ 395 | 396 | # Create the mutexes used to ensure the correct flow 397 | self._log.lock = self._log_lock 398 | # self._log.level = libs.log.DEBUG 399 | self._mutex_payment_start = threading.Lock() 400 | self._mutex_payment_commit = threading.Lock() 401 | self._done = threading.Event() 402 | 403 | # Create the five sessions 404 | sessions = { 405 | 'film_category': libs.util.get_session(self._workload, uri), 406 | 'category': libs.util.get_session(self._workload, uri), 407 | 'customer_1': libs.util.get_session(self._workload, uri), 408 | 'customer_2': libs.util.get_session(self._workload, uri), 409 | 'alter': libs.util.get_session(self._workload, uri), 410 | } 411 | 412 | # Get the processlist, thread, and event ids for the connections 413 | for name in sessions: 414 | (self._processlist_ids[name], 415 | self._thread_ids[name], 416 | self._event_ids[name] 417 | ) = libs.query.get_connection_ids(sessions[name]) 418 | 419 | # Output the connection and thread ids, so make it easier to 420 | # investigate the workload. 421 | self._log.ids( 422 | list(self._processlist_ids.values()), 423 | list(self._thread_ids.values()), 424 | list(self._event_ids.values()) 425 | ) 426 | self._sql_formatter = libs.query.Formatter( 427 | list(self._processlist_ids.values()), 428 | list(self._thread_ids.values()), 429 | list(self._event_ids.values()) 430 | ) 431 | 432 | # Start a monitoring thread which will output diagnostics 433 | # information. The monitoring thread is stopped by through 434 | # a threading event. 435 | monitor_done = threading.Event() 436 | monitor_metrics_session = libs.util.get_session(self._workload, uri) 437 | monitor_waits_session = libs.util.get_session(self._workload, uri) 438 | monitor_metrics = threading.Thread( 439 | target=self._monitor_metrics, 440 | daemon=True, 441 | args=(monitor_metrics_session, monitor_done) 442 | ) 443 | monitor_metrics.start() 444 | monitor_waits = threading.Thread( 445 | target=self._monitor_waits, 446 | daemon=True, 447 | args=(monitor_waits_session, ) 448 | ) 449 | monitor_waits.start() 450 | self._log.debug('Waiting 2 seconds to let the monitoring collect ' + 451 | 'some information before starting the test.') 452 | sleep(2) 453 | 454 | # Create the threads 455 | threads = [] 456 | for name in sessions: 457 | args = [sessions[name]] 458 | if name in ('customer_1', 'customer_2'): 459 | target = getattr(self, f'_customer') 460 | # Set the customer_id for each thread 461 | if name == 'customer_1': 462 | args.append(42) 463 | args.append(self._thread_ids['customer_2']) 464 | else: 465 | args.append(99) 466 | args.append(self._thread_ids['customer_1']) 467 | else: 468 | target = getattr(self, f'_{name}') 469 | 470 | threads.append( 471 | threading.Thread(target=target, daemon=True, name=name, 472 | args=args) 473 | ) 474 | 475 | # Start the threads - sleep a second between each thread to 476 | # ensure that they execute their first statement in the right 477 | # order. 478 | self._log.debug('Starting the threads.') 479 | for thread in threads: 480 | thread.start() 481 | sleep(0.5) 482 | self._log.debug('All threads started.') 483 | 484 | # Let the test run for a while 485 | sleep(self._runtime) 486 | 487 | # Output the monitoring data 488 | monitor_done.set() 489 | monitor_metrics.join() 490 | monitor_metrics_session.close() 491 | 492 | # Clean up 493 | self._log.info('Stopping the threads.') 494 | self._done.set() 495 | monitor_waits.join() 496 | monitor_waits_session.close() 497 | # Wait for the threads to complete their work 498 | for thread in threads: 499 | thread.join() 500 | sessions[thread.name].close() 501 | 502 | self._log.lock = None 503 | print('') 504 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_18-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 18-1. Semaphore Waits 2 | name: Listing 18-1 3 | description: Semaphore Waits 4 | implementation: [workloads.listing_18_1, SemaphoreWaits, {}] 5 | investigations: 6 | - sql: | 7 | SELECT variable_name, variable_value AS value, enabled 8 | FROM sys.metrics 9 | WHERE type = 'InnoDB Metrics - adaptive_hash_index' 10 | ORDER BY variable_name 11 | - sql: SHOW ENGINE INNODB MUTEX 12 | - sql: SHOW ENGINE INNODB STATUS 13 | format: vertical 14 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-1. Example use of the metadata_locks table 2 | name: Listing 2-1 3 | description: Example use of the metadata_locks table 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: SELECT * FROM world.city WHERE ID = 130 10 | - connection: 1 11 | sql: | 12 | SELECT * 13 | FROM performance_schema.metadata_locks 14 | WHERE OBJECT_TYPE = 'TABLE' 15 | AND OBJECT_SCHEMA = 'world' 16 | AND OBJECT_NAME = 'city' 17 | AND OWNER_THREAD_ID = PS_CURRENT_THREAD_ID() 18 | format: vertical 19 | - connection: 1 20 | sql: ROLLBACK 21 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-2. Example of using the table_handles table 2 | name: Listing 2-2 3 | description: Example of using the table_handles table 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: LOCK TABLE world.city READ 8 | - connection: 1 9 | sql: | 10 | SELECT * 11 | FROM performance_schema.table_handles 12 | WHERE OBJECT_SCHEMA = 'world' 13 | AND OBJECT_NAME = 'city' 14 | AND OWNER_THREAD_ID = PS_CURRENT_THREAD_ID() 15 | format: vertical 16 | - connection: 1 17 | sql: UNLOCK TABLES 18 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-3. Using the data_locks table 2 | name: Listing 2-3 3 | description: Using the data_locks table 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | SELECT * 11 | FROM world.city 12 | WHERE ID = 130 13 | FOR SHARE 14 | - connection: 1 15 | sql: | 16 | SELECT * 17 | FROM performance_schema.data_locks 18 | WHERE THREAD_ID = PS_CURRENT_THREAD_ID() 19 | format: vertical 20 | - connection: 1 21 | sql: ROLLBACK 22 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-4. Example of a lock error in the statement tables 2 | name: Listing 2-4 3 | description: Example of a lock error in the statement tables 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE ID = 130 13 | - connection: 2 14 | sql: SET SESSION innodb_lock_wait_timeout = 1 15 | - connection: 2 16 | sql: START TRANSACTION 17 | - connection: 2 18 | sql: | 19 | UPDATE world.city 20 | SET Population = Population + 1 21 | WHERE ID = 130 22 | - connection: 2 23 | sql: | 24 | SELECT thread_id, event_id, 25 | FORMAT_PICO_TIME(lock_time) AS lock_time, 26 | sys.format_statement(SQL_TEXT) AS statement, 27 | digest, mysql_errno, 28 | returned_sqlstate, message_text, errors 29 | FROM performance_schema.events_statements_history 30 | WHERE thread_id = PS_CURRENT_THREAD_ID() 31 | AND mysql_errno > 0 32 | format: vertical 33 | - connection: 1 34 | sql: ROLLBACK 35 | - connection: 2 36 | sql: ROLLBACK 37 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-5. Lock metrics 2 | name: Listing 2-5 3 | description: Lock metrics 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | SELECT Variable_name, 9 | Variable_value AS Value, 10 | Enabled 11 | FROM sys.metrics 12 | WHERE Variable_name LIKE 'innodb_row_lock%' 13 | OR Variable_name LIKE 'Table_locks%' 14 | OR Variable_name LIKE 'innodb_rwlock_%' 15 | OR Type = 'InnoDB Metrics - lock' 16 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-6.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-6. Using the INNODB_METRICS view 2 | name: Listing 2-6 3 | description: Using the INNODB_METRICS view 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: SET GLOBAL innodb_monitor_enable = 'icp%' 8 | - connection: 1 9 | sql: | 10 | SELECT NAME, SUBSYSTEM, COUNT, MIN_COUNT, 11 | MAX_COUNT, AVG_COUNT, 12 | STATUS, COMMENT 13 | FROM information_schema.INNODB_METRICS 14 | WHERE SUBSYSTEM = 'icp' 15 | format: vertical 16 | - connection: 1 17 | sql: SET GLOBAL innodb_monitor_disable = 'module_icp' 18 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_2-7.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 2-7. An example of creating a deadlock 2 | name: Listing 2-7 3 | description: An example of creating a deadlock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE ID = 130 13 | - connection: 2 14 | sql: START TRANSACTION 15 | - connection: 2 16 | sql: | 17 | UPDATE world.city 18 | SET Population = Population + 1 19 | WHERE ID = 3805 20 | - connection: 2 21 | wait: No 22 | sql: | 23 | UPDATE world.city 24 | SET Population = Population + 1 25 | WHERE ID = 130 26 | - connection: 1 27 | sql: | 28 | UPDATE world.city 29 | SET Population = Population + 1 30 | WHERE ID = 3805 31 | investigations: 32 | - sql: SET GLOBAL innodb_status_output_locks = ON 33 | - sql: SET GLOBAL innodb_status_output_locks = OFF 34 | - sql: SHOW ENGINE INNODB STATUS 35 | format: vertical 36 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_3-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 3-1. Example transactions 2 | name: Listing 3-1 3 | description: Example transactions 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: UPDATE world.city SET Population = Population + MOD(ID, 2) + SLEEP(0.01) 10 | wait: No 11 | show_result: No 12 | - connection: 2 13 | sql: SET SESSION autocommit = ON 14 | - connection: 2 15 | sql: SELECT COUNT(*) FROM world.city WHERE ID > SLEEP(0.01) 16 | wait: No 17 | show_result: No 18 | investigations: 19 | - sql: | 20 | SELECT * 21 | FROM information_schema.INNODB_TRX 22 | WHERE trx_mysql_thread_id IN ({processlist_ids}) 23 | format: vertical 24 | - sql: | 25 | SELECT * 26 | FROM information_schema.INNODB_TRX 27 | WHERE trx_started < NOW() - INTERVAL 10 SECOND; 28 | format: vertical 29 | - sql: | 30 | SELECT thd.thread_id, thd.processlist_id, 31 | trx.trx_id, stmt.event_id, trx.trx_started, 32 | TO_SECONDS(NOW()) - 33 | TO_SECONDS(trx.trx_started 34 | ) AS age_seconds, 35 | trx.trx_rows_locked, trx.trx_rows_modified, 36 | FORMAT_PICO_TIME(stmt.timer_wait) AS latency, 37 | stmt.rows_examined, stmt.rows_affected, 38 | sys.format_statement(SQL_TEXT) as statement 39 | FROM information_schema.INNODB_TRX trx 40 | INNER JOIN performance_schema.threads thd 41 | ON thd.processlist_id = trx.trx_mysql_thread_id 42 | INNER JOIN performance_schema.events_statements_current stmt 43 | USING (thread_id) 44 | WHERE trx_started < NOW() - INTERVAL 10 SECOND 45 | format: vertical 46 | - sql: SHOW ENGINE INNODB STATUS 47 | format: vertical 48 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_3-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 3-5. InnoDB metrics related to transactions 2 | name: Listing 3-5 3 | description: InnoDB metrics related to transactions 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | SELECT NAME, COUNT, STATUS, COMMENT 9 | FROM information_schema.INNODB_METRICS 10 | WHERE SUBSYSTEM = 'transaction' 11 | format: vertical 12 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_3-6.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 3-6. Using the sys.metrics view to get the transaction metrics 2 | name: Listing 3-6 3 | description: Using the sys.metrics view to get the transaction metrics 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | SELECT Variable_name AS Name, 9 | Variable_value AS Value, 10 | Enabled 11 | FROM sys.metrics 12 | WHERE Type = 'InnoDB Metrics - transaction' 13 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_4-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 4-1. Example transactions 2 | name: Listing 4-1 3 | description: Example transactions 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: UPDATE world.city SET Population = 5200000 WHERE ID = 130 10 | wait: No 11 | show_result: No 12 | - connection: 1 13 | sql: UPDATE world.city SET Population = 4900000 WHERE ID = 131 14 | wait: No 15 | show_result: No 16 | - connection: 1 17 | sql: UPDATE world.city SET Population = 2400000 WHERE ID = 132 18 | wait: No 19 | show_result: No 20 | - connection: 1 21 | sql: UPDATE world.city SET Population = 2000000 WHERE ID = 133 22 | wait: No 23 | show_result: No 24 | - connection: 2 25 | sql: XA START 'abc', 'def', 1 26 | wait: No 27 | show_result: No 28 | - connection: 2 29 | sql: UPDATE world.city SET Population = 900000 WHERE ID = 3805 30 | wait: No 31 | show_result: No 32 | completions: 33 | - connection: 1 34 | sql: ROLLBACK 35 | - connection: 2 36 | sql: XA END 'abc', 'def', 1 37 | - connection: 2 38 | sql: XA ROLLBACK 'abc', 'def', 1 39 | investigations: 40 | - sql: | 41 | SELECT * 42 | FROM performance_schema.events_transactions_current 43 | WHERE state = 'ACTIVE' 44 | format: vertical 45 | - sql: | 46 | SELECT sql_text 47 | FROM performance_schema.events_statements_history 48 | WHERE thread_id = ? 49 | AND event_id = ? 50 | parameters: [thread_id, event_id+1] 51 | format: vertical 52 | - sql: | 53 | SET @thread_id = ?, 54 | @event_id = ?, 55 | @nesting_event_id = ? 56 | parameters: [thread_id, event_id+2, event_id+1] 57 | - sql: | 58 | SELECT event_id, sql_text, 59 | FORMAT_PICO_TIME(timer_wait) AS latency, 60 | IF(end_event_id IS NULL, 'YES', 'NO') AS current 61 | FROM ((SELECT event_id, end_event_id, 62 | timer_wait, 63 | sql_text, nesting_event_id, 64 | nesting_event_type 65 | FROM performance_schema.events_statements_current 66 | WHERE thread_id = @thread_id 67 | ) UNION ( 68 | SELECT event_id, end_event_id, 69 | timer_wait, 70 | sql_text, nesting_event_id, 71 | nesting_event_type 72 | FROM performance_schema.events_statements_history 73 | WHERE thread_id = @thread_id 74 | ) 75 | ) events 76 | WHERE (nesting_event_type = 'TRANSACTION' 77 | AND nesting_event_id = @event_id) 78 | OR event_id = @nesting_event_id 79 | ORDER BY event_id DESC 80 | format: vertical 81 | - sql: | 82 | SELECT * 83 | FROM sys.session 84 | WHERE trx_state = 'ACTIVE' 85 | AND conn_id <> CONNECTION_ID() 86 | format: vertical 87 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_4-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 4-5. The events_transactions_summary_global_by_event_name table 2 | name: Listing 4-5 3 | description: The events_transactions_summary_global_by_event_name table 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | SELECT * 9 | FROM performance_schema.events_transactions_summary_global_by_event_name 10 | format: vertical 11 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_5-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 5-1. Example of obtaining a shared lock 2 | name: Listing 5-1 3 | description: Example of obtaining a shared lock 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: SELECT * FROM world.city WHERE ID = 130 FOR SHARE 10 | - connection: 1 11 | sql: | 12 | SELECT object_type, object_schema, object_name, 13 | lock_type, lock_duration, lock_status 14 | FROM performance_schema.metadata_locks 15 | WHERE OWNER_THREAD_ID = PS_CURRENT_THREAD_ID() 16 | AND OBJECT_SCHEMA <> 'performance_schema' 17 | format: vertical 18 | - connection: 1 19 | sql: | 20 | SELECT engine, object_schema, object_name, 21 | lock_type, lock_mode, lock_status 22 | FROM performance_schema.data_locks 23 | WHERE THREAD_ID = PS_CURRENT_THREAD_ID() 24 | format: vertical 25 | - connection: 1 26 | sql: ROLLBACK 27 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_5-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 5-2. Example of obtaining exclusive locks 2 | name: Listing 5-2 3 | description: Example of obtaining exclusive locks 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE ID = 130 13 | - connection: 1 14 | sql: | 15 | SELECT object_type, object_schema, object_name, 16 | lock_type, lock_duration, lock_status 17 | FROM performance_schema.metadata_locks 18 | WHERE OWNER_THREAD_ID = PS_CURRENT_THREAD_ID() 19 | AND OBJECT_SCHEMA <> 'performance_schema' 20 | format: vertical 21 | - connection: 1 22 | sql: | 23 | SELECT engine, object_schema, object_name, 24 | lock_type, lock_mode, lock_status 25 | FROM performance_schema.data_locks 26 | WHERE THREAD_ID = PS_CURRENT_THREAD_ID() 27 | format: vertical 28 | - connection: 1 29 | sql: ROLLBACK 30 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-1. A deadlock for user-level locks 2 | name: Listing 6-1 3 | description: A deadlock for user-level locks 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SELECT GET_LOCK('my_lock_1', -1) 8 | - connection: 2 9 | sql: SELECT GET_LOCK('my_lock_2', -1) 10 | - connection: 2 11 | sql: SELECT GET_LOCK('my_lock_1', -1) 12 | wait: No 13 | show_result: No 14 | - connection: 1 15 | sql: SELECT GET_LOCK('my_lock_2', -1) 16 | completions: 17 | - connection: 1 18 | sql: SELECT RELEASE_ALL_LOCKS() 19 | - connection: 2 20 | sql: SELECT RELEASE_ALL_LOCKS() 21 | investigations: 22 | - sql: | 23 | SELECT * 24 | FROM performance_schema.metadata_locks 25 | WHERE object_type = 'USER LEVEL LOCK' 26 | AND owner_thread_id IN ({thread_ids}) 27 | format: vertical 28 | - sql: | 29 | SELECT thread_id, event_id, sql_text, 30 | mysql_errno, returned_sqlstate, message_text, 31 | errors, warnings 32 | FROM performance_schema.events_statements_history 33 | WHERE thread_id = ? AND event_id > ? 34 | ORDER BY event_id 35 | parameters: [thread_id, event_id] 36 | format: vertical 37 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-3. Example of waiting for a flush lock 2 | name: Listing 6-3 3 | description: Example of waiting for a flush lock 4 | connections: 4 5 | queries: 6 | - connection: 1 7 | sql: SELECT city.*, SLEEP(3) FROM world.city WHERE ID = 130 8 | wait: No 9 | show_result: No 10 | - connection: 2 11 | sql: FLUSH TABLES world.city 12 | wait: No 13 | show_result: No 14 | - connection: 3 15 | sql: SELECT * FROM world.city WHERE ID = 201 16 | wait: No 17 | show_result: No 18 | - connection: 4 19 | comment: Query sys.session for the three threads involved in the lock situation 20 | sql: | 21 | SELECT thd_id, conn_id, state, 22 | current_statement 23 | FROM sys.session 24 | WHERE current_statement IS NOT NULL 25 | AND thd_id IN ({thread_ids_not_self}) 26 | ORDER BY thd_id 27 | format: vertical 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-4. Example of waiting for table metadata lock 2 | name: Listing 6-4 3 | description: Example of waiting for table metadata lock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: SELECT * FROM world.city WHERE ID = 130 10 | format: vertical 11 | - connection: 2 12 | sql: OPTIMIZE TABLE world.city 13 | wait: No 14 | show_result: No 15 | completions: 16 | - connection: 1 17 | sql: ROLLBACK 18 | investigations: 19 | - sql: | 20 | SELECT thd_id, conn_id, state, 21 | current_statement, statement_latency, 22 | last_statement, trx_state 23 | FROM sys.session 24 | WHERE conn_id IN ({processlist_ids}) 25 | ORDER BY conn_id 26 | format: vertical 27 | - sql: | 28 | SELECT object_type, object_schema, object_name, 29 | lock_type, lock_duration, lock_status, 30 | owner_thread_id 31 | FROM performance_schema.metadata_locks 32 | WHERE owner_thread_id IN ({thread_ids}) 33 | AND object_type = 'TABLE' 34 | format: vertical 35 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-7.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-7. Using explicit table locks 2 | name: Listing 6-7 3 | description: Using explicit table locks 4 | connections: 1 5 | queries: 6 | - connection: 1 7 | sql: | 8 | LOCK TABLES world.country READ, 9 | world.countrylanguage READ, 10 | world.city WRITE 11 | - connection: 1 12 | sql: | 13 | UPDATE world.country 14 | SET Population = Population + 1 15 | WHERE Code = 'AUS' 16 | - connection: 1 17 | sql: | 18 | SELECT * 19 | FROM sakila.film 20 | WHERE film_id = 1 21 | - connection: 1 22 | sql: UNLOCK TABLES 23 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-8.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-8. Example of an InnoDB intention shared lock 2 | name: Listing 6-8 3 | description: Example of an InnoDB intention shared lock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | SELECT * 11 | FROM world.city 12 | WHERE ID = 130 13 | FOR SHARE 14 | format: vertical 15 | - connection: 2 16 | sql: | 17 | SELECT engine, thread_id, object_schema, 18 | object_name, lock_type, lock_mode, 19 | lock_status, lock_data 20 | FROM performance_schema.data_locks 21 | WHERE lock_type = 'TABLE' 22 | AND thread_id = {thread_id_connection_1} 23 | format: vertical 24 | - connection: 1 25 | sql: ROLLBACK 26 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_6-9.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 6-9. Example of a conflict for the backup lock 2 | name: Listing 6-9 3 | description: Example of a conflict for the backup lock 4 | connections: 3 5 | protocol: mysql 6 | queries: 7 | - connection: 1 8 | sql: LOCK INSTANCE FOR BACKUP 9 | - connection: 2 10 | sql: OPTIMIZE TABLE world.city 11 | wait: No 12 | show_result: No 13 | - connection: 3 14 | sql: | 15 | SELECT object_type, object_schema, object_name, 16 | lock_type, lock_duration, lock_status, 17 | owner_thread_id 18 | FROM performance_schema.metadata_locks 19 | WHERE object_type = 'BACKUP LOCK' 20 | AND owner_thread_id IN ({thread_ids_not_self}) 21 | format: vertical 22 | - connection: 1 23 | sql: UNLOCK INSTANCE 24 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_7-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 7-1. Example of InnoDB record locks 2 | name: Listing 7-1 3 | description: Example of InnoDB record locks 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE CountryCode = 'LUX' 13 | - connection: 2 14 | sql: | 15 | SELECT thread_id, event_id, 16 | object_schema, object_name, index_name, 17 | lock_type, lock_mode, lock_status, lock_data 18 | FROM performance_schema.data_locks 19 | WHERE thread_id = {thread_id_connection_1} 20 | format: vertical 21 | - connection: 1 22 | sql: ROLLBACK 23 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_7-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 7-2. Example of predicate/page locks 2 | name: Listing 7-2 3 | description: Example of predicate/page locks 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE sakila.address 11 | SET address = '42 Concurrency Boulevard', 12 | district = 'Punjab', 13 | city_id = 208, 14 | postal_code = 40509, 15 | location = ST_GeomFromText('POINT(75.91 31.53)', 0) 16 | WHERE address_id = 372 17 | - connection: 2 18 | sql: | 19 | SELECT engine_lock_id, thread_id, event_id, 20 | object_schema, object_name, index_name, 21 | lock_type, lock_mode, lock_status, lock_data 22 | FROM performance_schema.data_locks 23 | WHERE thread_id = {thread_ids_not_self} 24 | AND index_name = 'idx_location' 25 | format: vertical 26 | - connection: 1 27 | sql: ROLLBACK 28 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_7-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 7-3. Example of an insert intention lock 2 | name: Listing 7-3 3 | description: Example of an insert intention lock 4 | connections: 3 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | SELECT * 11 | FROM world.city 12 | WHERE ID > 4079 13 | FOR UPDATE 14 | format: vertical 15 | - connection: 2 16 | sql: START TRANSACTION 17 | - connection: 2 18 | sql: | 19 | INSERT INTO world.city 20 | VALUES (4080, 'Darwin', 'AUS', 21 | 'Northern Territory', 146000) 22 | wait: No 23 | show_result: No 24 | - connection: 3 25 | sql: | 26 | SELECT thread_id, event_id, 27 | object_schema, object_name, index_name, 28 | lock_type, lock_mode, lock_status, lock_data 29 | FROM performance_schema.data_locks 30 | WHERE thread_id IN ({thread_ids_not_self}) 31 | AND object_name = 'city' 32 | AND index_name = 'PRIMARY' 33 | format: vertical 34 | - connection: 1 35 | sql: ROLLBACK 36 | - connection: 2 37 | sql: ROLLBACK 38 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_7-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 7-4. Example of synchronization waits 2 | name: Listing 7-4 3 | description: Example of synchronization waits 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: | 8 | UPDATE performance_schema.setup_instruments 9 | SET ENABLED = 'YES', 10 | TIMED = 'YES' 11 | WHERE NAME LIKE 'wait/synch/%' 12 | - connection: 1 13 | sql: | 14 | UPDATE performance_schema.setup_consumers 15 | SET ENABLED = 'YES' 16 | WHERE NAME IN ('events_waits_current', 'events_waits_history_long') 17 | - connection: 2 18 | sql: | 19 | UPDATE world.city 20 | SET Population = Population + 1 21 | WHERE CountryCode = 'USA' 22 | - connection: 1 23 | sql: | 24 | SELECT REPLACE(event_name, 'wait/synch/', '') AS event, COUNT(*) 25 | FROM performance_schema.events_waits_history_long 26 | WHERE thread_id = {thread_id_connection_2} 27 | AND event_name LIKE 'wait/synch/%' 28 | GROUP BY event_name 29 | WITH ROLLUP 30 | ORDER BY COUNT(*) 31 | - connection: 1 32 | sql: | 33 | UPDATE performance_schema.setup_instruments 34 | SET ENABLED = 'NO', 35 | TIMED = 'NO' 36 | WHERE NAME LIKE 'wait/synch/%' 37 | - connection: 1 38 | sql: | 39 | UPDATE performance_schema.setup_consumers 40 | SET ENABLED = 'NO' 41 | WHERE NAME IN ('events_waits_current', 'events_waits_history_long') 42 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_8-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 8-1. Example of synchronization waits 2 | name: Listing 8-1 3 | description: Lock wait timeout for table lock request 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: LOCK TABLES world.city WRITE 8 | - connection: 2 9 | sql: SET SESSION lock_wait_timeout = 5 10 | - connection: 2 11 | sql: LOCK TABLES world.city WRITE 12 | - connection: 1 13 | sql: UNLOCK TABLES 14 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_8-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 8-2. Example of an InnoDB lock wait timeout 2 | name: Listing 8-2 3 | description: Example of an InnoDB lock wait timeout 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE ID = 130 13 | - connection: 2 14 | sql: SET SESSION innodb_lock_wait_timeout = 3 15 | - connection: 2 16 | sql: | 17 | UPDATE world.city 18 | SET Population = Population + 1 19 | WHERE ID = 130 20 | - connection: 1 21 | sql: ROLLBACK 22 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_8-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 8-3. Example of a deadlock 2 | name: Listing 8-3 3 | description: Example of a deadlock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | UPDATE world.city 11 | SET Population = Population + 1 12 | WHERE ID = 130 13 | - connection: 2 14 | sql: START TRANSACTION 15 | - connection: 2 16 | sql: | 17 | UPDATE world.city 18 | SET Population = Population + 1 19 | WHERE ID = 3805 20 | - connection: 2 21 | wait: No 22 | sql: | 23 | UPDATE world.city 24 | SET Population = Population + 1 25 | WHERE ID = 130 26 | - connection: 1 27 | sql: | 28 | UPDATE world.city 29 | SET Population = Population + 1 30 | WHERE ID = 3805 31 | - connection: 1 32 | sql: ROLLBACK 33 | - connection: 2 34 | sql: ROLLBACK 35 | investigations: 36 | - sql: SHOW ENGINE INNODB STATUS 37 | format: vertical 38 | - sql: | 39 | SELECT thread_id, event_id, sql_text, 40 | mysql_errno, returned_sqlstate, message_text, 41 | errors, warnings 42 | FROM performance_schema.events_statements_history 43 | WHERE thread_id = ? AND event_id > ? 44 | ORDER BY event_id 45 | parameters: [thread_id, event_id] 46 | format: vertical 47 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_8-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 8-4. A single row deadlock 2 | name: Listing 8-4 3 | description: A single row deadlock 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: SELECT * FROM world.city WHERE ID = 130 FOR SHARE 10 | - connection: 2 11 | sql: START TRANSACTION 12 | - connection: 2 13 | sql: | 14 | UPDATE world.city 15 | SET Population = Population + 1 16 | WHERE ID = 130 17 | wait: No 18 | - connection: 1 19 | sql: | 20 | UPDATE world.city 21 | SET Population = Population + 1 22 | WHERE ID = 130 23 | - connection: 1 24 | sql: ROLLBACK 25 | - connection: 2 26 | sql: ROLLBACK 27 | investigations: 28 | - sql: SHOW ENGINE INNODB STATUS 29 | format: vertical 30 | - sql: | 31 | SELECT thread_id, event_id, sql_text, 32 | mysql_errno, returned_sqlstate, message_text, 33 | errors, warnings 34 | FROM performance_schema.events_statements_history 35 | WHERE thread_id = ? AND event_id > ? 36 | ORDER BY event_id 37 | parameters: [thread_id, event_id] 38 | format: vertical 39 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_9-1.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 9-1. Record locks without an index on the Name column 2 | name: Listing 9-1 3 | description: Record locks without an index on the Name column 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: START TRANSACTION 8 | - connection: 1 9 | sql: | 10 | SELECT ID, Name, CountryCode, District 11 | FROM world.city 12 | WHERE Name = 'Sydney' 13 | FOR SHARE 14 | - connection: 2 15 | sql: | 16 | SELECT index_name, lock_type, 17 | lock_mode, COUNT(*) 18 | FROM performance_schema.data_locks 19 | WHERE object_schema = 'world' 20 | AND object_name = 'city' 21 | AND thread_id = {thread_ids_not_self} 22 | GROUP BY index_name, lock_type, lock_mode 23 | - connection: 1 24 | sql: ROLLBACK 25 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_9-2.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 9-2. Record locks with an index on the Name column 2 | name: Listing 9-2 3 | description: Record locks with an index on the Name column 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: | 8 | ALTER TABLE world.city 9 | ADD INDEX (Name) 10 | - connection: 1 11 | sql: START TRANSACTION 12 | - connection: 1 13 | sql: | 14 | SELECT ID, Name, CountryCode, District 15 | FROM world.city 16 | WHERE Name = 'Sydney' 17 | FOR SHARE 18 | - connection: 2 19 | sql: | 20 | SELECT index_name, lock_type, 21 | lock_mode, COUNT(*) 22 | FROM performance_schema.data_locks 23 | WHERE object_schema = 'world' 24 | AND object_name = 'city' 25 | AND thread_id = {thread_ids_not_self} 26 | GROUP BY index_name, lock_type, lock_mode 27 | - connection: 1 28 | sql: ROLLBACK 29 | - connection: 1 30 | sql: | 31 | ALTER TABLE world.city 32 | DROP INDEX Name 33 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_9-3.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 9-3. The locks held in the REPEATABLE READ transaction isolation level 2 | name: Listing 9-3 3 | description: The locks held in the REPEATABLE READ transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET SESSION transaction_isolation = 'REPEATABLE-READ' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = 5000000 14 | WHERE Name = 'Sydney' 15 | AND CountryCode = 'AUS' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, COUNT(*) 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND thread_id = {thread_ids_not_self} 24 | GROUP BY index_name, lock_type, lock_mode 25 | - connection: 1 26 | sql: ROLLBACK 27 | - connection: 1 28 | sql: SET SESSION transaction_isolation = @@global.transaction_isolation 29 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_9-4.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing 9-4. The locks held in the READ-COMMITTED transaction isolation level 2 | name: Listing 9-4 3 | description: The locks held in the READ-COMMITTED transaction isolation level 4 | connections: 2 5 | queries: 6 | - connection: 1 7 | sql: SET SESSION transaction_isolation = 'READ-COMMITTED' 8 | - connection: 1 9 | sql: START TRANSACTION 10 | - connection: 1 11 | sql: | 12 | UPDATE world.city 13 | SET Population = 5000000 14 | WHERE Name = 'Sydney' 15 | AND CountryCode = 'AUS' 16 | - connection: 2 17 | sql: | 18 | SELECT index_name, lock_type, 19 | lock_mode, COUNT(*) 20 | FROM performance_schema.data_locks 21 | WHERE object_schema = 'world' 22 | AND object_name = 'city' 23 | AND thread_id = {thread_ids_not_self} 24 | GROUP BY index_name, lock_type, lock_mode 25 | - connection: 1 26 | sql: ROLLBACK 27 | - connection: 1 28 | sql: SET SESSION transaction_isolation = @@global.transaction_isolation 29 | -------------------------------------------------------------------------------- /concurrency_book/workloads/listing_B-5.yaml: -------------------------------------------------------------------------------- 1 | --- # Listing B-5. Example demonstrating the workload syntax 2 | name: Listing B-5 3 | description: Example demonstrating the workload syntax 4 | connections: 2 5 | concurrent: No 6 | loops: 1 7 | queries: 8 | - connection: 1 9 | sql: SET SESSION innodb_lock_wait_timeout = 1 10 | silent: No 11 | wait: Yes 12 | - connection: 1 13 | sql: START TRANSACTION 14 | - connection: 2 15 | sql: START TRANSACTION 16 | - connection: 1 17 | sql: SET @id = CEIL(RAND()*4079) 18 | - connection: 1 19 | sql: | 20 | SELECT * 21 | FROM world.city 22 | WHERE ID = @id 23 | format: json 24 | store: Yes 25 | - connection: 2 26 | sql: | 27 | UPDATE world.city 28 | SET Population = Population + 1 29 | WHERE ID = 130 30 | - connection: 1 31 | comment: This will cause a lock wait timeout 32 | sql: | 33 | UPDATE world.city 34 | SET Population = Population + 1 35 | WHERE Name = ? 36 | parameters: [Name] 37 | completions: 38 | - connection: 1 39 | sql: ROLLBACK 40 | - connection: 2 41 | sql: ROLLBACK 42 | investigations: 43 | - sql: | 44 | SELECT thread_id, event_id, sys.format_statement(sql_text) AS 'SQL', 45 | mysql_errno, message_text 46 | FROM performance_schema.events_statements_history 47 | WHERE thread_id IN ({thread_ids}) 48 | AND mysql_errno > 0 49 | ORDER BY thread_id, event_id 50 | format: vertical 51 | - sql: | 52 | SELECT THREAD_ID, EVENT_ID, SQL_TEXT 53 | FROM performance_schema.events_statements_history 54 | WHERE thread_id = ? AND event_id > ? 55 | ORDER BY thread_id, event_id 56 | parameters: [thread_id, event_id] 57 | format: vertical 58 | --------------------------------------------------------------------------------