├── .gitignore ├── BUGS ├── License ├── Makefile ├── README.md ├── pgcsvstat.c ├── pgdisplay.c ├── pgfe_cancel.c ├── pgfe_connect_utils.c ├── pgfe_query_utils.c ├── pgreport.c ├── pgreport_queries.h ├── pgstat.c └── pgwaitevent.c /.gitignore: -------------------------------------------------------------------------------- 1 | /pgcsvstat 2 | /pgdisplay 3 | /pgreport 4 | /pgstat 5 | /pgwaitevent 6 | *.o 7 | -------------------------------------------------------------------------------- /BUGS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gleu/pgstats/f62c689b5bf6ce0b2040b80419ae99a7b9d9aaad/BUGS -------------------------------------------------------------------------------- /License: -------------------------------------------------------------------------------- 1 | These softwares, pgcsvstat, pgdisplay, pgreport, and pgstat, are released 2 | under the terms of the PostgreSQL License. 3 | 4 | Copyright (c) 2011-2025, Guillaume Lelarge 5 | 6 | Permission to use, copy, modify, and distribute this software and its 7 | documentation for any purpose, without fee, and without a written agreement 8 | is hereby granted, provided that the above copyright notice and this paragraph 9 | and the following two paragraphs appear in all copies. 10 | 11 | IN NO EVENT SHALL Guillaume Lelarge BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, 12 | SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING 13 | OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Guillaume Lelarge 14 | HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 15 | 16 | Guillaume Lelarge SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 18 | PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, 19 | AND Guillaume Lelarge HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, 20 | UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PGFILEDESC = "Statistics utilities" 2 | PGAPPICON = win32 3 | 4 | PROGRAMS = pgcsvstat pgstat pgdisplay pgwaitevent pgreport 5 | PGFELIBS = pgfe_connect_utils.o pgfe_query_utils.o pgfe_cancel.o 6 | 7 | PG_CPPFLAGS = -I$(libpq_srcdir) 8 | PG_LIBS = $(libpq_pgport) 9 | SCRIPTS_built = pgcsvstat pgstat pgdisplay pgwaitevent pgreport 10 | EXTRA_CLEAN = $(addsuffix .o, $(PROGRAMS)) $(PGFELIBS) 11 | 12 | PG_CONFIG = pg_config 13 | PGXS := $(shell $(PG_CONFIG) --pgxs) 14 | include $(PGXS) 15 | 16 | all: $(PROGRAMS) 17 | 18 | %: %.o $(WIN32RES) 19 | $(CC) $(CFLAGS) $^ $(libpq_pgport) $(LDFLAGS) -L $(pkglibdir) -lpgcommon -lpgport -lm -o $@$(X) 20 | 21 | pgcsvstat: pgcsvstat.o $(PGFELIBS) 22 | pgdisplay: pgdisplay.o $(PGFELIBS) 23 | pgstat: pgstat.o $(PGFELIBS) 24 | pgwaitevent: pgwaitevent.o $(PGFELIBS) 25 | pgreport: pgreport.o $(PGFELIBS) 26 | pgreport.o: pgreport_queries.h 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | README 2 | ====== 3 | 4 | This repository contains the source of a collection of tools. 5 | 6 | pgstat is a vmstat-like tool for PostgreSQL. 7 | 8 | pgreport is a reporting tool for PostgreSQL. It tries to get a lot of 9 | informations from the metadata and statistics of PostgreSQL. 10 | 11 | pgwaitevent gathers every wait event for a specific PID, grouping them by 12 | queries. 13 | 14 | pgcsvstat outputs PostgreSQL statistics views into CSV files. The idea is that 15 | you can load them on any spreadsheet to get the graphs you want. 16 | 17 | pgdisplay tries to display a table in an informative way. Still pretty much 18 | experimental. 19 | 20 | They all should be compatible with the latest PostgreSQL release (13 right 21 | now), and down to the oldest stable release (9.5 right now). They may also be 22 | compatible with much older releases (8.x for most of them). 23 | 24 | Requirements 25 | ------------ 26 | 27 | To compile these tools, you will need the libpq library (.so), the libpgcommon, 28 | and libpgport libraries (.a), the PostgreSQL 14+ header files, and 29 | the pg_config tool. The header files and the tool are usually available in a 30 | -dev package. 31 | 32 | To use them once compiled, you only need the libpq library. Any version should 33 | be fine. 34 | 35 | Compilation 36 | ----------- 37 | 38 | You only have to do: 39 | 40 | ``` 41 | make 42 | make install 43 | ``` 44 | 45 | Usage 46 | ----- 47 | 48 | Use --help to get informations on all command line options for these three 49 | tools. 50 | 51 | More informations on pgstat 52 | --------------------------- 53 | 54 | pgstat is an online command tool that connects to a database and grabs its 55 | activity statistics. As PostgreSQL has many statistics, you have a command 56 | switch to choose the one you want (-s): 57 | 58 | * archiver for pg_stat_archiver (9.4+) 59 | * bgwriter for pg_stat_bgwriter 60 | * checkpointer for pg_stat_checkpointer (17+) 61 | * connection for connections by type (9.2+) 62 | * database for pg_stat_database 63 | * table for pg_stat_all_tables 64 | * tableio for pg_statio_all_tables 65 | * index for pg_stat_all_indexes 66 | * function for pg_stat_user_function 67 | * statement for pg_stat_statements 68 | * xlog for xlog writes (9.2+) 69 | * tempfile for temporary file usage 70 | * waitevent for wait events usage (9.6+) 71 | * progress_analyze to get the progress on an ANALYZE statement (13+) 72 | * progress_basebackup to get the progress on a BASE BACKUP (replication) statement (13+) 73 | * progress_cluster to get the progress on a CLUSTER/VACUUM FULL statement (12+) 74 | * progress_createindex to get the progress on a CREATE INDEX statement (12+) 75 | * progress_vacuum to get the progress on a VACUUM statement (9.6+) 76 | * pbpools for pgBouncer pools statistics 77 | * pbstats for pgBouncer general statistics 78 | 79 | It looks a lot like vmstat. You ask it the statistics you want, and the 80 | frequency to gather these statistics. Just like this: 81 | 82 | ``` 83 | $ pgstat -s connection 84 | - total - active - lockwaiting - idle in transaction - idle - 85 | 1546 15 0 0 1531 86 | 1544 17 0 0 1527 87 | 1544 14 0 0 1530 88 | 1546 26 0 0 1520 89 | 1543 21 0 0 1522 90 | ``` 91 | 92 | Yeah, way too many idle connections. Actually, way too many connections. 93 | Definitely needs a pooler there. 94 | 95 | This is what happens on a 10-seconds 10-clients pgbench test: 96 | 97 | ``` 98 | $ pgstat -s database 1 99 | - backends - ------ xacts ------ -------------- blocks -------------- -------------- tuples -------------- ------ temp ------ ------- misc -------- 100 | commit rollback read hit read_time write_time ret fet ins upd del files bytes conflicts deadlocks 101 | 1 224041 17 24768 2803774 0 0 4684398 234716 2105701 16615 113 1 14016512 0 0 102 | 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 103 | 1 3 0 0 205 0 0 92 92 0 0 0 0 0 0 0 104 | 11 20 0 0 500 0 0 1420 184 0 1 0 0 0 0 0 105 | 11 69 0 1 4438 0 0 1736 986 68 204 0 0 0 0 0 106 | 11 136 0 12 4406 0 0 1767 270 135 405 0 0 0 0 0 107 | 11 108 0 0 3434 0 0 1394 214 107 321 0 0 0 0 0 108 | 11 96 0 0 3290 0 0 1240 190 95 285 0 0 0 0 0 109 | 11 125 0 0 4045 0 0 1620 248 124 372 0 0 0 0 0 110 | 11 126 0 0 4222 0 0 1628 250 125 375 0 0 0 0 0 111 | 11 111 0 0 3644 0 0 1436 220 110 330 0 0 0 0 0 112 | 11 78 0 0 2549 0 0 1918 161 75 225 0 0 0 0 0 113 | 11 118 0 0 3933 0 0 1524 234 117 351 0 0 0 0 0 114 | 1 130 0 0 4276 0 0 1685 258 129 387 0 0 0 0 0 115 | 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 116 | 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 117 | 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 118 | ``` 119 | 120 | You clearly see when it starts, when it stops, and what it did during the 10 121 | seconds. You can filter on a specific database with the -f command line 122 | option. Here is what happens at the tables level: 123 | 124 | ``` 125 | $ pgstat -s table -d b1 1 126 | -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- 127 | scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 128 | 68553 1467082 264957 266656 7919869 59312 113 57262 4611779 3782 5401 22 10 4 22 129 | 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 130 | 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 131 | 231 2351 1116 1222 61 184 0 180 61 124 245 2 0 0 0 132 | 431 1750 240 240 120 360 0 358 120 242 480 0 0 0 0 133 | 385 1640 220 220 110 330 0 327 110 11 440 0 0 0 0 134 | 340 1475 190 190 95 285 0 285 95 189 380 0 0 0 0 135 | 398 1651 222 222 111 333 0 331 111 -2 444 0 0 0 0 136 | 353 1519 198 198 99 297 0 293 99 200 396 0 0 0 0 137 | 335 1453 186 186 93 279 0 274 93 -210 372 0 0 0 0 138 | 446 1838 256 256 128 384 0 381 128 104 512 0 0 0 0 139 | 425 1739 238 238 119 357 0 354 119 241 476 0 0 0 0 140 | 360 1552 204 204 102 306 0 305 102 -10 408 0 0 0 0 141 | 386 1629 218 218 109 327 0 325 109 57 436 0 0 0 0 142 | 437 1761 242 242 121 363 0 363 121 -292 484 0 0 0 0 143 | 373 1563 206 206 103 309 0 305 103 -1 412 0 0 0 0 144 | 323 1442 184 184 92 276 0 273 92 188 368 0 0 0 0 145 | 412 1706 232 232 116 348 0 346 116 76 464 0 0 0 0 146 | 291 1332 164 164 82 246 0 245 82 -216 328 0 0 0 0 147 | 189 1013 106 106 53 159 0 158 53 106 212 0 0 0 0 148 | 346 1508 196 196 98 294 0 290 98 -18 392 0 0 0 0 149 | 304 1376 172 172 86 258 0 258 86 -156 344 0 0 0 0 150 | 442 1794 248 248 124 372 0 368 124 -260 496 0 0 0 0 151 | 9 1371 157 260 0 13 0 13 -11602 -329 -6053 0 2 0 3 152 | 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 153 | 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 154 | ``` 155 | 156 | You can also filter by table name with the -f command line switch: 157 | 158 | ``` 159 | $ pgstat -s table -d b1 -f pgbench_history 1 160 | -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- 161 | scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 162 | 0 0 0 0 21750 0 0 0 2022 0 0 1 0 1 7 163 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 164 | 0 0 0 0 64 0 0 0 64 0 64 0 0 0 0 165 | 0 0 0 0 122 0 0 0 122 0 122 0 0 0 0 166 | 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 167 | 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 168 | 0 0 0 0 88 0 0 0 88 0 88 0 0 0 0 169 | 0 0 0 0 116 0 0 0 116 0 116 0 0 0 0 170 | 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 171 | 0 0 0 0 61 0 0 0 61 0 61 0 0 0 0 172 | 0 0 0 0 42 0 0 0 42 0 42 0 0 0 0 173 | 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 174 | 0 0 0 0 55 0 0 0 55 0 55 0 0 0 0 175 | 0 0 0 0 121 0 0 0 121 0 121 0 0 0 0 176 | 0 0 0 0 68 0 0 0 -1942 0 -1011 0 0 0 1 177 | 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 178 | 0 0 0 0 109 0 0 0 109 0 109 0 0 0 0 179 | 0 0 0 0 94 0 0 0 94 0 94 0 0 0 0 180 | 0 0 0 0 120 0 0 0 120 0 120 0 0 0 0 181 | 0 0 0 0 110 0 0 0 110 0 110 0 0 0 0 182 | 0 0 0 0 100 0 0 0 100 0 100 0 0 0 0 183 | 0 0 0 0 115 0 0 0 115 0 115 0 0 0 0 184 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 185 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 186 | ``` 187 | 188 | We see that the activity on this table is quite different from what happens to 189 | the other tables. 190 | 191 | There's also a report from the pg_stat_statements extension. It works pretty well: 192 | 193 | ``` 194 | $ pgstat -s statement -d b1 195 | --------- misc ---------- ----------- shared ----------- ----------- local ----------- ----- temp ----- -------- time -------- 196 | calls time rows hit read dirty written hit read dirty written read written read written 197 | 383843 1756456.50 13236523 9277049 38794 50915 1640 1008844 17703 8850 8850 1711 1711 0.00 0.00 198 | 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 199 | 1 0.50 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 200 | 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 201 | 310 2709.88 220 1527 10 63 0 0 0 0 0 0 0 0.00 0.00 202 | 797 8555.00 569 3736 10 109 0 0 0 0 0 0 0 0.00 0.00 203 | 725 9215.25 519 3610 23 115 0 0 0 0 0 0 0 0.00 0.00 204 | 266 7729.38 190 1257 2 43 0 0 0 0 0 0 0 0.00 0.00 205 | 831 10196.12 594 3988 11 112 0 0 0 0 0 0 0 0.00 0.00 206 | 788 8678.38 563 3803 8 92 0 0 0 0 0 0 0 0.00 0.00 207 | 736 9080.62 526 3616 7 89 0 0 0 0 0 0 0 0.00 0.00 208 | 792 8395.50 566 3742 11 96 0 0 0 0 0 0 0 0.00 0.00 209 | 814 9346.75 582 3985 9 84 0 0 0 0 0 0 0 0.00 0.00 210 | 763 8941.12 545 3799 9 84 0 0 0 0 0 0 0 0.00 0.00 211 | 728 8543.25 520 3549 8 62 0 0 0 0 0 0 0 0.00 0.00 212 | 589 9143.62 421 2812 7 45 0 0 0 0 0 0 0 0.00 0.00 213 | 785 8710.00 561 3788 4 60 0 0 0 0 0 0 0 0.00 0.00 214 | 785 9117.25 561 3885 4 60 0 0 0 0 0 0 0 0.00 0.00 215 | 785 8397.12 561 3788 1 52 0 0 0 0 0 0 0 0.00 0.00 216 | 799 9398.12 571 3925 7 60 0 0 0 0 0 0 0 0.00 0.00 217 | 765 9033.88 547 3757 3 43 0 0 0 0 0 0 0 0.00 0.00 218 | 805 8663.25 575 3886 6 57 0 0 0 0 0 0 0 0.00 0.00 219 | 765 8490.50 547 3661 7 39 0 0 0 0 0 0 0 0.00 0.00 220 | 764 8850.00 546 3698 4 41 0 0 0 0 0 0 0 0.00 0.00 221 | 396 6706.50 283 1992 1 14 0 0 0 0 0 0 0 0.00 0.00 222 | 1 0.38 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 223 | 1 0.62 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 224 | ``` 225 | 226 | You can filter a specific statement by its query id. 227 | 228 | Of course, it first searches for the extension, and complains if it isn't there: 229 | 230 | ``` 231 | $ pgstat -s statement -d b2 232 | pgstat: Cannot find the pg_stat_statements extension. 233 | ``` 234 | 235 | You can filter by group of columns. That only works for the biggest stats, meaning database and statement. For example : 236 | 237 | ``` 238 | $ ./pgstat -s database -S backends,tuples 239 | - backends - -------------- tuples -------------- 240 | ret fet ins upd del 241 | 1 32167008 7201488 24524575 433 1357 242 | 1 0 0 0 0 0 243 | 1 162 154 0 0 0 244 | 1 4 0 0 0 0 245 | 1 897 18 0 0 0 246 | 1 4 0 0 0 0 247 | 248 | $ ./pgstat -s statement -S exec,wal 249 | --------- exec ---------- -------------- wal -------------- 250 | calls time rows wal_records wal_fpi wal_bytes 251 | 96 11.37 96 2 2 6882 252 | 1 0.20 1 0 0 0 253 | 1 0.16 1 0 0 0 254 | 1 0.28 1 0 0 0 255 | ``` 256 | 257 | One of my customers had a lot of writes on their databases, and I wanted to 258 | know how much writes occured in the WAL files. vmstat would only tell me how 259 | much writes on all files, but I was only interested in WAL writes. So I added 260 | a new report that grabs the current XLOG position, and diff it with the 261 | previous XLOG position. It gives something like this with a pgbench test: 262 | 263 | ``` 264 | $ ./pgstat -s xlog 265 | -------- filename -------- -- location -- ---- bytes ---- 266 | 00000001000000000000003E 0/3EC49940 1053071680 267 | 00000001000000000000003E 0/3EC49940 0 268 | 00000001000000000000003E 0/3EC49940 0 269 | 00000001000000000000003E 0/3EC875F8 253112 270 | 00000001000000000000003E 0/3ED585C8 856016 271 | 00000001000000000000003E 0/3EE36C40 910968 272 | 00000001000000000000003E 0/3EEFCC58 811032 273 | 00000001000000000000003E 0/3EFAB9D0 716152 274 | 00000001000000000000003F 0/3F06A3C0 780784 275 | 00000001000000000000003F 0/3F0E79E0 513568 276 | 00000001000000000000003F 0/3F1354E0 318208 277 | 00000001000000000000003F 0/3F1F6218 789816 278 | 00000001000000000000003F 0/3F2BCE00 814056 279 | 00000001000000000000003F 0/3F323240 418880 280 | 00000001000000000000003F 0/3F323240 0 281 | 00000001000000000000003F 0/3F323240 0 282 | ``` 283 | 284 | That's not big numbers, so it's easy to find it writes at 253K/s, but if the 285 | number were bigger, it might get hard to read. One of my co-worker, Julien 286 | Rouhaud, added a human readable option: 287 | 288 | ``` 289 | $ ./pgstat -s xlog -H 290 | -------- filename -------- -- location -- ---- bytes ---- 291 | 00000001000000000000003F 0/3F32EDC0 1011 MB 292 | 00000001000000000000003F 0/3F32EDC0 0 bytes 293 | 00000001000000000000003F 0/3F32EDC0 0 bytes 294 | 00000001000000000000003F 0/3F3ABC78 500 kB 295 | 00000001000000000000003F 0/3F491C10 920 kB 296 | 00000001000000000000003F 0/3F568548 858 kB 297 | 00000001000000000000003F 0/3F634748 817 kB 298 | 00000001000000000000003F 0/3F6F4378 767 kB 299 | 00000001000000000000003F 0/3F7A56D8 709 kB 300 | 00000001000000000000003F 0/3F8413D0 623 kB 301 | 00000001000000000000003F 0/3F8D7590 600 kB 302 | 00000001000000000000003F 0/3F970160 611 kB 303 | 00000001000000000000003F 0/3F9F2840 522 kB 304 | 00000001000000000000003F 0/3FA1FD88 181 kB 305 | 00000001000000000000003F 0/3FA1FD88 0 bytes 306 | 00000001000000000000003F 0/3FA1FD88 0 bytes 307 | 00000001000000000000003F 0/3FA1FD88 0 bytes 308 | ``` 309 | 310 | That's indeed much more readable if you ask me. 311 | 312 | You can also add a timestamp to each line with the `-t` option. For example: 313 | 314 | ``` 315 | ----- timestamp ----- -------------- buffers ------------- 316 | clean alloc maxwritten 317 | 2025-03-26 16:48:32 0 14662 0 318 | 2025-03-26 16:48:33 0 0 0 319 | ``` 320 | 321 | Another customer wanted to know how many temporary files were written, and 322 | their sizes. Of course, you can get that with the pg_stat_database view, but 323 | it only gets added when the query is done. We wanted to know when the query is 324 | executed. So I added another report: 325 | 326 | ``` 327 | $ ./pgstat -s tempfile 328 | --- size --- --- count --- 329 | 0 0 330 | 0 0 331 | 13082624 1 332 | 34979840 1 333 | 56016896 1 334 | 56016896 1 335 | 56016896 1 336 | 0 0 337 | 0 0 338 | ``` 339 | 340 | You see the file being stored. 341 | 342 | Since release 9.6, there are some very interesting progress views. Here is an 343 | example that shows the VACUUM progress on a table. We can see the progress 344 | while it goes through different phases. 345 | 346 | ``` 347 | $ ./pgstat -s progressvacuum 348 | --------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- 349 | database relation size %scan %vacuum #index %dead tuple 350 | bdd_alfresco alf_node_properties 254 GB scanning heap 39.95 0.00 0 0.21 351 | bdd_alfresco alf_node_properties 254 GB scanning heap 39.98 0.00 0 0.21 352 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.01 0.00 0 0.21 353 | bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 354 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.02 0.00 0 0.21 355 | bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 356 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.07 0.00 0 0.21 357 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.10 0.00 0 0.21 358 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.13 0.00 0 0.21 359 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.15 0.00 0 0.21 360 | bdd_alfresco alf_node_properties 254 GB scanning heap 40.17 0.00 0 0.21 361 | ... 362 | bdd_alfresco alf_node_properties 254 GB scanning heap 47.10 0.00 0 0.47 363 | bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 364 | bdd_alfresco alf_node_properties 254 GB scanning heap 47.13 0.00 0 0.47 365 | bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 366 | bdd_alfresco alf_node_properties 254 GB scanning heap 47.16 0.00 0 0.47 367 | bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 368 | bdd_alfresco alf_node_properties 254 GB scanning heap 47.18 0.00 0 0.48 369 | bdd_alfresco alf_node_properties 254 GB scanning heap 47.21 0.00 0 0.48 370 | ... 371 | bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 372 | bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 373 | bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 374 | bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 375 | ... 376 | ``` 377 | 378 | Information shown depends on the progress views. 379 | 380 | More informations on pgwaitevent 381 | -------------------------------- 382 | 383 | The pgwaitevent tool waits the execution of a query on a specific PID backend. 384 | It then gathers all the waiting events, and sums them up. At the end of the 385 | query, it prints a table with the waiting events, their occurences, and 386 | percentage. 387 | 388 | Here is an exemple of a session with this tool: 389 | 390 | ``` 391 | $ ./pgwaitevent -i 0.1 548292 392 | Tracing wait events for PID 548292, sampling at 0.100s 393 | 394 | New query: truncate t1; 395 | Query duration: 00:00:00.324883 396 | Trace duration: 00:00:00.313353 397 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 398 | │ Wait event │ WE type │ Occurences │ Percent │ 399 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 400 | │ [Running] │ │ 3 │ 100.00 │ 401 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 402 | 403 | New query: insert into t1 select generate_series(1, 1000000); 404 | Query duration: 00:00:02.077609 405 | Trace duration: 00:00:02.038534 406 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 407 | │ Wait event │ WE type │ Occurences │ Percent │ 408 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 409 | │ WALSync │ IO │ 12 │ 60.00 │ 410 | │ [Running] │ │ 5 │ 25.00 │ 411 | │ WALWriteLock │ LWLock │ 3 │ 15.00 │ 412 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 413 | 414 | New query: select * from t1 where id<5000; 415 | Query duration: 00:00:00.207713 416 | Trace duration: 00:00:00.108132 417 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 418 | │ Wait event │ WE type │ Occurences │ Percent │ 419 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 420 | │ [Running] │ │ 1 │ 100.00 │ 421 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 422 | 423 | New query: select * from t1 where id<500000; 424 | Query duration: 00:00:00.357929 425 | Trace duration: 00:00:00.312559 426 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 427 | │ Wait event │ WE type │ Occurences │ Percent │ 428 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 429 | │ [Running] │ │ 3 │ 100.00 │ 430 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 431 | 432 | New query: insert into t1 select generate_series(1, 1000000); 433 | Query duration: 00:00:01.908082 434 | Trace duration: 00:00:01.8308 435 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 436 | │ Wait event │ WE type │ Occurences │ Percent │ 437 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 438 | │ WALWriteLock │ LWLock │ 6 │ 33.33 │ 439 | │ [Running] │ │ 5 │ 27.78 │ 440 | │ WALSync │ IO │ 4 │ 22.22 │ 441 | │ WALWrite │ IO │ 2 │ 11.11 │ 442 | │ DataFileExtend │ IO │ 1 │ 5.56 │ 443 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 444 | 445 | New query: insert into t1 select generate_series(1, 1000000); 446 | Query duration: 00:00:01.602976 447 | Trace duration: 00:00:01.524851 448 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 449 | │ Wait event │ WE type │ Occurences │ Percent │ 450 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 451 | │ WALSync │ IO │ 7 │ 46.67 │ 452 | │ [Running] │ │ 4 │ 26.67 │ 453 | │ WALWriteLock │ LWLock │ 3 │ 20.00 │ 454 | │ WALWrite │ IO │ 1 │ 6.67 │ 455 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 456 | 457 | New query: insert into t1 select generate_series(1, 1000000); 458 | Query duration: 00:00:01.638675 459 | Trace duration: 00:00:01.630696 460 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 461 | │ Wait event │ WE type │ Occurences │ Percent │ 462 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 463 | │ [Running] │ │ 8 │ 50.00 │ 464 | │ WALWriteLock │ LWLock │ 4 │ 25.00 │ 465 | │ WALSync │ IO │ 4 │ 25.00 │ 466 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 467 | 468 | New query: select * from t1 where id<500000; 469 | Query duration: 00:00:00.893073 470 | Trace duration: 00:00:00.819036 471 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 472 | │ Wait event │ WE type │ Occurences │ Percent │ 473 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 474 | │ [Running] │ │ 8 │ 100.00 │ 475 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 476 | 477 | New query: create index on t1(id); 478 | Query duration: 00:00:04.051142 479 | Trace duration: 00:00:03.955806 480 | ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ 481 | │ Wait event │ WE type │ Occurences │ Percent │ 482 | ├───────────────────────────────────┼───────────┼────────────┼─────────┤ 483 | │ [Running] │ │ 15 │ 38.46 │ 484 | │ WALSync │ IO │ 15 │ 38.46 │ 485 | │ DataFileImmediateSync │ IO │ 5 │ 12.82 │ 486 | │ WALWriteLock │ LWLock │ 4 │ 10.26 │ 487 | └───────────────────────────────────┴───────────┴────────────┴─────────┘ 488 | 489 | No more session with PID 548292, exiting... 490 | ``` 491 | 492 | It sleeps 100msec before checking if a new query is being executed. It checks 493 | waiting events on an interval set up with the `-i` command line option. By 494 | default, it's 1 second (which is a bit on the high end). 495 | 496 | Starting with PostgreSQL 13, pgwaitevent is able to 497 | include leader and workers. You need the -g command line option for this. 498 | 499 | Ideas 500 | ----- 501 | 502 | * pgstat 503 | * pg_stat_archiver: display the current wal and the last archived 504 | * pg_stat_archiver: display the duration since the last archived wal 505 | * pg_stat_X_tables: display the duration since the last vacuum and analyze 506 | * sum the number of archived wal files 507 | * add a report for pg_stat_database_conflicts 508 | * add a report for pg_stat_recovery_prefetch 509 | * add a report for pg_stat_replication 510 | * add a report for pg_stat_replication_slots 511 | * add a report for pg_stat_subscription 512 | * add a report for pg_stat_subscription_stats 513 | * add a report for pg_stat_wal_receiver 514 | * pgcsvstat 515 | * check for anything missing in this tool 516 | * pgreport 517 | * get data stats (idea from Christophe Courtois) 518 | * find FK without incompatible data type 519 | -------------------------------------------------------------------------------- /pgcsvstat.c: -------------------------------------------------------------------------------- 1 | /* 2 | * pgcsvstat, a PostgreSQL app to gather statistical informations 3 | * from a PostgreSQL database. 4 | * 5 | * This software is released under the PostgreSQL Licence. 6 | * 7 | * Guillaume Lelarge, guillaume@lelarge.info, 2011-2025. 8 | * 9 | * pgstats/pgcsvstat.c 10 | */ 11 | 12 | 13 | /* 14 | * System headers 15 | */ 16 | #include 17 | 18 | /* 19 | * PostgreSQL headers 20 | */ 21 | 22 | #include "postgres_fe.h" 23 | #include "common/logging.h" 24 | #include "fe_utils/cancel.h" 25 | #include "fe_utils/connect_utils.h" 26 | 27 | extern char *optarg; 28 | 29 | /* 30 | * Defines 31 | */ 32 | #define PGCSVSTAT_VERSION "1.4.0" 33 | 34 | /* these are the opts structures for command line params */ 35 | struct options 36 | { 37 | bool quiet; 38 | bool nodb; 39 | char *directory; 40 | 41 | char *dbname; 42 | char *hostname; 43 | char *port; 44 | char *username; 45 | 46 | int major; 47 | int minor; 48 | }; 49 | 50 | /* global variables */ 51 | static struct options *opts; 52 | static PGconn *conn; 53 | 54 | /* function prototypes */ 55 | static void help(const char *progname); 56 | void get_opts(int, char **); 57 | void *myalloc(size_t size); 58 | char *mystrdup(const char *str); 59 | int sql_exec(const char *sql, const char *filename, bool quiet); 60 | void sql_exec_dump_pgstatactivity(void); 61 | void sql_exec_dump_pgstatarchiver(void); 62 | void sql_exec_dump_pgstatbgwriter(void); 63 | void sql_exec_dump_pgstatcheckpointer(void); 64 | void sql_exec_dump_pgstatdatabase(void); 65 | void sql_exec_dump_pgstatdatabaseconflicts(void); 66 | void sql_exec_dump_pgstatreplication(void); 67 | void sql_exec_dump_pgstatreplicationslots(void); 68 | void sql_exec_dump_pgstatslru(void); 69 | void sql_exec_dump_pgstatsubscription(void); 70 | void sql_exec_dump_pgstatwal(void); 71 | void sql_exec_dump_pgstatwalreceiver(void); 72 | void sql_exec_dump_pgstatalltables(void); 73 | void sql_exec_dump_pgstatallindexes(void); 74 | void sql_exec_dump_pgstatioalltables(void); 75 | void sql_exec_dump_pgstatioallindexes(void); 76 | void sql_exec_dump_pgstatioallsequences(void); 77 | void sql_exec_dump_pgstatuserfunctions(void); 78 | void sql_exec_dump_pgclass_size(void); 79 | void sql_exec_dump_pgstatstatements(void); 80 | void sql_exec_dump_xlog_stat(void); 81 | void sql_exec_dump_pgstatprogressanalyze(void); 82 | void sql_exec_dump_pgstatprogressbasebackup(void); 83 | void sql_exec_dump_pgstatprogresscluster(void); 84 | void sql_exec_dump_pgstatprogresscopy(void); 85 | void sql_exec_dump_pgstatprogresscreateindex(void); 86 | void sql_exec_dump_pgstatprogressvacuum(void); 87 | void fetch_version(void); 88 | bool check_superuser(void); 89 | bool backend_minimum_version(int major, int minor); 90 | bool backend_has_pgstatstatements(void); 91 | 92 | 93 | /* function to parse command line options and check for some usage errors. */ 94 | void 95 | get_opts(int argc, char **argv) 96 | { 97 | int c; 98 | const char *progname; 99 | 100 | progname = get_progname(argv[0]); 101 | 102 | /* set the defaults */ 103 | opts->quiet = false; 104 | opts->nodb = false; 105 | opts->directory = NULL; 106 | opts->dbname = NULL; 107 | opts->hostname = NULL; 108 | opts->port = NULL; 109 | opts->username = NULL; 110 | 111 | if (argc > 1) 112 | { 113 | if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) 114 | { 115 | help(progname); 116 | exit(0); 117 | } 118 | if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) 119 | { 120 | puts("pgcsvstats " PGCSVSTAT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); 121 | exit(0); 122 | } 123 | } 124 | 125 | /* get opts */ 126 | while ((c = getopt(argc, argv, "h:p:U:d:D:q")) != -1) 127 | { 128 | switch (c) 129 | { 130 | /* specify the database */ 131 | case 'd': 132 | opts->dbname = mystrdup(optarg); 133 | break; 134 | 135 | /* specify the directory */ 136 | case 'D': 137 | opts->directory = mystrdup(optarg); 138 | break; 139 | 140 | /* don't show headers */ 141 | case 'q': 142 | opts->quiet = true; 143 | break; 144 | 145 | /* host to connect to */ 146 | case 'h': 147 | opts->hostname = mystrdup(optarg); 148 | break; 149 | 150 | /* port to connect to on remote host */ 151 | case 'p': 152 | opts->port = mystrdup(optarg); 153 | break; 154 | 155 | /* username */ 156 | case 'U': 157 | opts->username = mystrdup(optarg); 158 | break; 159 | 160 | default: 161 | pg_log_error("Try \"%s --help\" for more information.\n", progname); 162 | exit(EXIT_FAILURE); 163 | } 164 | } 165 | } 166 | 167 | 168 | static void 169 | help(const char *progname) 170 | { 171 | printf("%s gathers statistics from a PostgreSQL database.\n\n" 172 | "Usage:\n" 173 | " %s [OPTIONS]...\n" 174 | "\nGeneral options:\n" 175 | " -d DBNAME database to connect to\n" 176 | " -D DIRECTORY directory for stats files (defaults to current)\n" 177 | " -q quiet\n" 178 | " --help show this help, then exit\n" 179 | " --version output version information, then exit\n" 180 | "\nConnection options:\n" 181 | " -h HOSTNAME database server host or socket directory\n" 182 | " -p PORT database server port number\n" 183 | " -U USER connect as specified database user\n" 184 | "\nIt creates CSV files for each report.\n\n" 185 | "Report bugs to .\n", 186 | progname, progname); 187 | } 188 | 189 | void * 190 | myalloc(size_t size) 191 | { 192 | void *ptr = malloc(size); 193 | 194 | if (!ptr) 195 | { 196 | pg_log_error("out of memory (myalloc)"); 197 | exit(EXIT_FAILURE); 198 | } 199 | return ptr; 200 | } 201 | 202 | char * 203 | mystrdup(const char *str) 204 | { 205 | char *result = strdup(str); 206 | 207 | if (!result) 208 | { 209 | pg_log_error("out of memory (mystrdup)"); 210 | exit(EXIT_FAILURE); 211 | } 212 | return result; 213 | } 214 | 215 | /* 216 | * Actual code to extrac statistics from the database 217 | * and to store the output data in CSV files. 218 | */ 219 | int 220 | sql_exec(const char *query, const char* filename, bool quiet) 221 | { 222 | PGresult *res; 223 | FILE *fdcsv; 224 | struct stat st; 225 | int nfields; 226 | int nrows; 227 | int i, j; 228 | int size; 229 | 230 | /* open the csv file */ 231 | fdcsv = fopen(filename, "a"); 232 | if (!fdcsv) 233 | { 234 | pg_log_error("Cannot open file %s, errno %d\n", filename, errno); 235 | PQfinish(conn); 236 | exit(EXIT_FAILURE); 237 | } 238 | 239 | /* get size of file */ 240 | stat(filename, &st); 241 | size = st.st_size; 242 | 243 | /* make the call */ 244 | res = PQexec(conn, query); 245 | 246 | /* check and deal with errors */ 247 | if (!res || PQresultStatus(res) > 2) 248 | { 249 | pg_log_error("query failed: %s\n", PQerrorMessage(conn)); 250 | pg_log_info("query was: %s\n", query); 251 | 252 | PQclear(res); 253 | PQfinish(conn); 254 | exit(-1); 255 | } 256 | 257 | /* get the number of fields */ 258 | nrows = PQntuples(res); 259 | nfields = PQnfields(res); 260 | 261 | /* print a header */ 262 | if (!quiet && size == 0) 263 | { 264 | for (j = 0; j < nfields; j++) 265 | { 266 | fprintf(fdcsv, "%s", PQfname(res, j)); 267 | if (j < nfields - 1) 268 | fprintf(fdcsv, ";"); 269 | } 270 | fprintf(fdcsv, "\n"); 271 | } 272 | 273 | /* for each row, dump the information */ 274 | for (i = 0; i < nrows; i++) 275 | { 276 | for (j = 0; j < nfields; j++) 277 | { 278 | fprintf(fdcsv, "%s", PQgetvalue(res, i, j)); 279 | if (j < nfields - 1) 280 | fprintf(fdcsv, ";"); 281 | } 282 | fprintf(fdcsv, "\n"); 283 | } 284 | 285 | /* cleanup */ 286 | PQclear(res); 287 | 288 | /* close the csv file */ 289 | fclose(fdcsv); 290 | 291 | return 0; 292 | } 293 | 294 | /* 295 | * Dump all activities. 296 | */ 297 | void 298 | sql_exec_dump_pgstatactivity() 299 | { 300 | char query[1024]; 301 | char filename[1024]; 302 | 303 | snprintf(query, sizeof(query), 304 | "SELECT date_trunc('seconds', now()), datid, datname, %s, %s" 305 | "usesysid, usename, %s%s%s%s%s" 306 | "date_trunc('seconds', query_start) AS query_start, " 307 | "%s%s%s%s%s%s%s state " 308 | "FROM pg_stat_activity " 309 | "ORDER BY %s", 310 | backend_minimum_version(9, 2) ? "pid" : "procpid", 311 | backend_minimum_version(13, 0) ? "leader_pid, " : "", 312 | backend_minimum_version(9, 0) ? "application_name, " : "", 313 | backend_minimum_version(8, 1) ? "client_addr, " : "", 314 | backend_minimum_version(9, 1) ? "client_hostname, " : "", 315 | backend_minimum_version(8, 1) ? "client_port, date_trunc('seconds', backend_start) AS backend_start, " : "", 316 | backend_minimum_version(8, 3) ? "date_trunc('seconds', xact_start) AS xact_start, " : "", 317 | backend_minimum_version(9, 2) ? "state_change, " : "", 318 | backend_minimum_version(9, 6) ? "wait_event_type, wait_event, " : backend_minimum_version(8, 2) ? "waiting, " : "", 319 | backend_minimum_version(9, 4) ? "backend_xid, " : "", 320 | backend_minimum_version(9, 4) ? "backend_xmin, " : "", 321 | backend_minimum_version(14, 0) ? "query_id, " : "", 322 | backend_minimum_version(9, 2) ? "query, " : "current_query,", 323 | backend_minimum_version(10, 0) ? "backend_type, " : "", 324 | backend_minimum_version(9, 2) ? "pid" : "procpid"); // the last one is for the ORDER BY 325 | 326 | snprintf(filename, sizeof(filename), 327 | "%s/pg_stat_activity.csv", opts->directory); 328 | 329 | sql_exec(query, filename, opts->quiet); 330 | } 331 | 332 | /* 333 | * Dump all bgwriter stats. 334 | */ 335 | void 336 | sql_exec_dump_pgstatbgwriter() 337 | { 338 | char query[1024]; 339 | char filename[1024]; 340 | 341 | if (backend_minimum_version(17, 0)) 342 | { 343 | snprintf(query, sizeof(query), 344 | "SELECT date_trunc('seconds', now()), buffers_clean, " 345 | "maxwritten_clean, buffers_alloc, " 346 | "date_trunc('seconds', stats_reset) AS stats_reset " 347 | "FROM pg_stat_bgwriter "); 348 | } 349 | else 350 | { 351 | snprintf(query, sizeof(query), 352 | "SELECT date_trunc('seconds', now()), checkpoints_timed, " 353 | "checkpoints_req, %sbuffers_checkpoint, buffers_clean, " 354 | "maxwritten_clean, buffers_backend, %sbuffers_alloc%s " 355 | "FROM pg_stat_bgwriter ", 356 | backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "", 357 | backend_minimum_version(9, 1) ? "buffers_backend_fsync, " : "", 358 | backend_minimum_version(9, 1) ? ", date_trunc('seconds', stats_reset) AS stats_reset " : ""); 359 | } 360 | 361 | snprintf(filename, sizeof(filename), 362 | "%s/pg_stat_bgwriter.csv", opts->directory); 363 | 364 | sql_exec(query, filename, opts->quiet); 365 | } 366 | 367 | /* 368 | * Dump all checkpointer stats. 369 | */ 370 | void 371 | sql_exec_dump_pgstatcheckpointer() 372 | { 373 | char query[1024]; 374 | char filename[1024]; 375 | 376 | snprintf(query, sizeof(query), 377 | "SELECT date_trunc('seconds', now()), num_timed, num_requested, " 378 | "restartpoints_timed, restartpoints_req, restartpoints_done, " 379 | "write_time, sync_time, buffers_written, " 380 | "date_trunc('seconds', stats_reset) AS stats_reset " 381 | "FROM pg_stat_checkpointer "); 382 | 383 | snprintf(filename, sizeof(filename), 384 | "%s/pg_stat_checkpointer.csv", opts->directory); 385 | 386 | sql_exec(query, filename, opts->quiet); 387 | } 388 | 389 | /* 390 | * Dump all archiver stats. 391 | */ 392 | void 393 | sql_exec_dump_pgstatarchiver() 394 | { 395 | char query[1024]; 396 | char filename[1024]; 397 | 398 | snprintf(query, sizeof(query), 399 | "SELECT date_trunc('seconds', now()), archived_count, " 400 | "last_archived_wal, date_trunc('seconds', last_archived_time) AS last_archived_time, " 401 | "failed_count, " 402 | "last_failed_wal, date_trunc('seconds', last_failed_time) AS last_failed_time, " 403 | "date_trunc('seconds', stats_reset) AS stats_reset " 404 | "FROM pg_stat_archiver "); 405 | 406 | snprintf(filename, sizeof(filename), 407 | "%s/pg_stat_archiver.csv", opts->directory); 408 | 409 | sql_exec(query, filename, opts->quiet); 410 | } 411 | 412 | /* 413 | * Dump all databases stats. 414 | */ 415 | void 416 | sql_exec_dump_pgstatdatabase() 417 | { 418 | char query[1024]; 419 | char filename[1024]; 420 | 421 | snprintf(query, sizeof(query), 422 | "SELECT date_trunc('seconds', now()), datid, datname, " 423 | "numbackends, xact_commit, xact_rollback, blks_read, blks_hit" 424 | "%s%s%s%s%s " 425 | "FROM pg_stat_database " 426 | "ORDER BY datname", 427 | backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "", 428 | backend_minimum_version(9, 1) ? ", conflicts, date_trunc('seconds', stats_reset) AS stats_reset" : "", 429 | backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : "", 430 | backend_minimum_version(12, 0) ? ", checksum_failures, checksum_last_failure" : "", 431 | backend_minimum_version(14, 0) ? ", session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed" : ""); 432 | 433 | snprintf(filename, sizeof(filename), 434 | "%s/pg_stat_database.csv", opts->directory); 435 | 436 | sql_exec(query, filename, opts->quiet); 437 | } 438 | 439 | /* 440 | * Dump all databases conflicts stats. 441 | */ 442 | void 443 | sql_exec_dump_pgstatdatabaseconflicts() 444 | { 445 | char query[1024]; 446 | char filename[1024]; 447 | 448 | snprintf(query, sizeof(query), 449 | "SELECT date_trunc('seconds', now()), * " 450 | "FROM pg_stat_database_conflicts " 451 | "ORDER BY datname"); 452 | 453 | snprintf(filename, sizeof(filename), 454 | "%s/pg_stat_database_conflicts.csv", opts->directory); 455 | 456 | sql_exec(query, filename, opts->quiet); 457 | } 458 | 459 | /* 460 | * Dump all replication stats. 461 | */ 462 | void 463 | sql_exec_dump_pgstatreplication() 464 | { 465 | char query[1024]; 466 | char filename[1024]; 467 | 468 | /* get the oid and database name from the system pg_database table */ 469 | snprintf(query, sizeof(query), 470 | "SELECT date_trunc('seconds', now()), %s, usesysid, usename, " 471 | "application_name, client_addr, client_hostname, client_port, " 472 | "date_trunc('seconds', backend_start) AS backend_start, %sstate, " 473 | "%s AS master_location, %s%s" 474 | "sync_priority, " 475 | "sync_state%s " 476 | "FROM pg_stat_replication " 477 | "ORDER BY application_name", 478 | backend_minimum_version(9, 2) ? "pid" : "procpid", 479 | backend_minimum_version(9, 4) ? "backend_xmin, " : "", 480 | backend_minimum_version(10, 0) ? "pg_current_wal_lsn()" : "pg_current_xlog_location()", 481 | backend_minimum_version(10, 0) ? "sent_lsn, write_lsn, flush_lsn, replay_lsn, " : 482 | "sent_location, write_location, flush_location, replay_location, ", 483 | backend_minimum_version(10, 0) ? "write_lag, flush_lag, replay_lag, " : "", 484 | backend_minimum_version(12, 0) ? ", reply_time" : ""); 485 | 486 | snprintf(filename, sizeof(filename), 487 | "%s/pg_stat_replication.csv", opts->directory); 488 | 489 | sql_exec(query, filename, opts->quiet); 490 | } 491 | 492 | /* 493 | * Dump all replication slots stats. 494 | */ 495 | void 496 | sql_exec_dump_pgstatreplicationslots() 497 | { 498 | char query[1024]; 499 | char filename[1024]; 500 | 501 | /* get the oid and database name from the system pg_database table */ 502 | snprintf(query, sizeof(query), 503 | "SELECT date_trunc('seconds', now()), slot_name, " 504 | "spill_txns, spill_count, spill_bytes, " 505 | "stream_txns, stream_count, stream_bytes, " 506 | "total_txns, total_bytes, " 507 | "date_trunc('seconds', stats_reset) AS stats_reset " 508 | "FROM pg_stat_replication_slots " 509 | "ORDER BY slot_name"); 510 | 511 | snprintf(filename, sizeof(filename), 512 | "%s/pg_stat_replication_slots.csv", opts->directory); 513 | 514 | sql_exec(query, filename, opts->quiet); 515 | } 516 | 517 | /* 518 | * Dump all SLRU stats. 519 | */ 520 | void 521 | sql_exec_dump_pgstatslru() 522 | { 523 | char query[1024]; 524 | char filename[1024]; 525 | 526 | snprintf(query, sizeof(query), 527 | "SELECT date_trunc('seconds', now()), name, " 528 | "blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, " 529 | "flushes, truncates, " 530 | "date_trunc('seconds', stats_reset) AS stats_reset " 531 | "FROM pg_stat_slru " 532 | "ORDER BY name"); 533 | 534 | snprintf(filename, sizeof(filename), 535 | "%s/pg_stat_slru.csv", opts->directory); 536 | 537 | sql_exec(query, filename, opts->quiet); 538 | } 539 | 540 | /* 541 | * Dump all subscriptions stats. 542 | */ 543 | void 544 | sql_exec_dump_pgstatsubscription() 545 | { 546 | char query[1024]; 547 | char filename[1024]; 548 | 549 | snprintf(query, sizeof(query), 550 | "SELECT date_trunc('seconds', now()), subid, subname%s, " 551 | "pid%s, relid, relname, received_lsn, " 552 | "date_trunc('seconds', last_msg_send_time) AS last_msg_send_time, " 553 | "date_trunc('seconds', last_msg_receipt_time) AS last_msg_receipt_time, " 554 | "latest_end_lsn, date_trunc('seconds', latest_end_time) AS latest_end_time " 555 | "FROM pg_stat_subscription s " 556 | "LEFT JOIN pg_class c ON c.oid=s.relid " 557 | "ORDER BY subid", 558 | backend_minimum_version(17, 0) ? ", worker_type" : "", 559 | backend_minimum_version(16, 0) ? ", leader_pid" : ""); 560 | 561 | snprintf(filename, sizeof(filename), 562 | "%s/pg_stat_subscription.csv", opts->directory); 563 | 564 | sql_exec(query, filename, opts->quiet); 565 | } 566 | 567 | /* 568 | * Dump all WAL stats. 569 | */ 570 | void 571 | sql_exec_dump_pgstatwal() 572 | { 573 | char query[1024]; 574 | char filename[1024]; 575 | 576 | snprintf(query, sizeof(query), 577 | "SELECT date_trunc('seconds', now()), " 578 | "wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, " 579 | "wal_sync, wal_write_time, wal_sync_time, " 580 | "date_trunc('seconds', stats_reset) AS stats_reset " 581 | "FROM pg_stat_wal"); 582 | 583 | snprintf(filename, sizeof(filename), 584 | "%s/pg_stat_wal.csv", opts->directory); 585 | 586 | sql_exec(query, filename, opts->quiet); 587 | } 588 | 589 | /* 590 | * Dump all wal receiver stats. 591 | */ 592 | void 593 | sql_exec_dump_pgstatwalreceiver() 594 | { 595 | char query[1024]; 596 | char filename[1024]; 597 | 598 | snprintf(query, sizeof(query), 599 | "SELECT pid, status, receive_start_lsn, receive_start_tli, " 600 | "written_lsn, flushed_lsn, received_tli, " 601 | "date_trunc('seconds', last_msg_send_time) last_msg_send_time, " 602 | "date_trunc('seconds', last_msg_receipt_time) last_msg_receipt_time, " 603 | "latest_end_lsn, date_trunc('seconds', latest_end_time) latest_end_time, " 604 | "slot_name, sender_host, sender_port, conninfo " 605 | "FROM pg_stat_wal_receiver " 606 | "ORDER BY pid"); 607 | 608 | snprintf(filename, sizeof(filename), 609 | "%s/pg_stat_wal_receiver.csv", opts->directory); 610 | 611 | sql_exec(query, filename, opts->quiet); 612 | } 613 | 614 | /* 615 | * Dump all tables stats. 616 | */ 617 | void 618 | sql_exec_dump_pgstatalltables() 619 | { 620 | char query[1024]; 621 | char filename[1024]; 622 | 623 | snprintf(query, sizeof(query), 624 | "SELECT date_trunc('seconds', now()), relid, schemaname, relname, " 625 | "seq_scan%s, seq_tup_read, idx_scan%s, idx_tup_fetch, " 626 | "n_tup_ins, n_tup_upd, n_tup_del" 627 | "%s%s%s%s%s%s%s " 628 | "FROM pg_stat_all_tables " 629 | "WHERE schemaname <> 'information_schema' " 630 | "ORDER BY schemaname, relname", 631 | backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_seq_scan) AS last_seq_scan" : "", 632 | backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_idx_scan) AS last_idx_scan" : "", 633 | backend_minimum_version(8, 3) ? ", n_tup_hot_upd" : "", 634 | backend_minimum_version(16, 0) ? ", n_tup_newpage_upd" : "", 635 | backend_minimum_version(8, 3) ? ", n_live_tup, n_dead_tup" : "", 636 | backend_minimum_version(9, 4) ? ", n_mod_since_analyze" : "", 637 | backend_minimum_version(13, 0) ? ", n_ins_since_vacuum" : "", 638 | backend_minimum_version(8, 2) ? ", date_trunc('seconds', last_vacuum) AS last_vacuum, date_trunc('seconds', last_autovacuum) AS last_autovacuum, date_trunc('seconds',last_analyze) AS last_analyze, date_trunc('seconds',last_autoanalyze) AS last_autoanalyze" : "", 639 | backend_minimum_version(9, 1) ? ", vacuum_count, autovacuum_count, analyze_count, autoanalyze_count" : ""); 640 | 641 | snprintf(filename, sizeof(filename), 642 | "%s/pg_stat_all_tables.csv", opts->directory); 643 | 644 | sql_exec(query, filename, opts->quiet); 645 | } 646 | 647 | /* 648 | * Dump all indexes stats. 649 | */ 650 | void 651 | sql_exec_dump_pgstatallindexes() 652 | { 653 | char query[1024]; 654 | char filename[1024]; 655 | 656 | snprintf(query, sizeof(query), 657 | "SELECT date_trunc('seconds', now()), " 658 | "relid, indexrelid, schemaname, relname, indexrelname, " 659 | "idx_scan%s, idx_tup_read, idx_tup_fetch " 660 | "FROM pg_stat_all_indexes " 661 | "WHERE schemaname <> 'information_schema' " 662 | "ORDER BY schemaname, relname", 663 | backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_idx_scan) AS last_idx_scan" : "" 664 | ); 665 | 666 | snprintf(filename, sizeof(filename), 667 | "%s/pg_stat_all_indexes.csv", opts->directory); 668 | 669 | sql_exec(query, filename, opts->quiet); 670 | } 671 | 672 | /* 673 | * Dump all tables IO stats. 674 | */ 675 | void 676 | sql_exec_dump_pgstatioalltables() 677 | { 678 | char query[1024]; 679 | char filename[1024]; 680 | 681 | snprintf(query, sizeof(query), 682 | "SELECT date_trunc('seconds', now()), * " 683 | "FROM pg_statio_all_tables " 684 | "WHERE schemaname <> 'information_schema' " 685 | "ORDER BY schemaname, relname"); 686 | 687 | snprintf(filename, sizeof(filename), 688 | "%s/pg_statio_all_tables.csv", opts->directory); 689 | 690 | sql_exec(query, filename, opts->quiet); 691 | } 692 | 693 | /* 694 | * Dump all indexes IO stats. 695 | */ 696 | void 697 | sql_exec_dump_pgstatioallindexes() 698 | { 699 | char query[1024]; 700 | char filename[1024]; 701 | 702 | snprintf(query, sizeof(query), 703 | "SELECT date_trunc('seconds', now()), * " 704 | "FROM pg_statio_all_indexes " 705 | "WHERE schemaname <> 'information_schema' " 706 | "ORDER BY schemaname, relname"); 707 | 708 | snprintf(filename, sizeof(filename), 709 | "%s/pg_statio_all_indexes.csv", opts->directory); 710 | 711 | sql_exec(query, filename, opts->quiet); 712 | } 713 | 714 | /* 715 | * Dump all sequences IO stats. 716 | */ 717 | void 718 | sql_exec_dump_pgstatioallsequences() 719 | { 720 | char query[1024]; 721 | char filename[1024]; 722 | 723 | snprintf(query, sizeof(query), 724 | "SELECT date_trunc('seconds', now()), * " 725 | "FROM pg_statio_all_sequences " 726 | "WHERE schemaname <> 'information_schema' " 727 | "ORDER BY schemaname, relname"); 728 | 729 | snprintf(filename, sizeof(filename), 730 | "%s/pg_statio_all_sequences.csv", opts->directory); 731 | 732 | sql_exec(query, filename, opts->quiet); 733 | } 734 | 735 | /* 736 | * Dump all functions stats. 737 | */ 738 | void 739 | sql_exec_dump_pgstatuserfunctions() 740 | { 741 | char query[1024]; 742 | char filename[1024]; 743 | 744 | snprintf(query, sizeof(query), 745 | "SELECT date_trunc('seconds', now()), * " 746 | "FROM pg_stat_user_functions " 747 | "WHERE schemaname <> 'information_schema' " 748 | "ORDER BY schemaname, funcname"); 749 | 750 | snprintf(filename, sizeof(filename), 751 | "%s/pg_stat_user_functions.csv", opts->directory); 752 | 753 | sql_exec(query, filename, opts->quiet); 754 | } 755 | 756 | /* 757 | * Dump all size class stats. 758 | */ 759 | void 760 | sql_exec_dump_pgclass_size() 761 | { 762 | char query[1024]; 763 | char filename[1024]; 764 | 765 | snprintf(query, sizeof(query), 766 | "SELECT date_trunc('seconds', now()), n.nspname, c.relname, c.relkind, " 767 | "c.reltuples, c.relpages%s%s " 768 | "FROM pg_class c " 769 | "JOIN pg_namespace n ON n.oid=c.relnamespace " 770 | "WHERE n.nspname <> 'information_schema' " 771 | "ORDER BY n.nspname, c.relname", 772 | backend_minimum_version(9, 2) ? ", c.relallvisible" : "", 773 | backend_minimum_version(8, 1) ? ", pg_relation_size(c.oid)" : ""); 774 | 775 | snprintf(filename, sizeof(filename), 776 | "%s/pg_class_size.csv", opts->directory); 777 | 778 | sql_exec(query, filename, opts->quiet); 779 | } 780 | 781 | /* 782 | * Dump all statements stats. 783 | * to be fixed wrt v14 784 | */ 785 | void 786 | sql_exec_dump_pgstatstatements() 787 | { 788 | char query[1024]; 789 | char filename[1024]; 790 | 791 | snprintf(query, sizeof(query), 792 | "SELECT date_trunc('seconds', now()), r.rolname, d.datname, " 793 | "%sregexp_replace(query, E'\n', ' ', 'g') as query, %scalls, %s, rows, " 794 | "shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, " 795 | "local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, " 796 | "temp_blks_read, temp_blks_written%s%s%s%s%s%s " 797 | "FROM pg_stat_statements q " 798 | "LEFT JOIN pg_database d ON q.dbid=d.oid " 799 | "LEFT JOIN pg_roles r ON q.userid=r.oid " 800 | "ORDER BY r.rolname, d.datname", 801 | backend_minimum_version(14, 0) ? "toplevel, queryid, " : "", 802 | backend_minimum_version(13, 0) ? "plans, total_plan_time, min_plan_time, max_plan_time, mean_plan_time, stddev_plan_time, " : "", 803 | backend_minimum_version(13, 0) ? "total_exec_time, min_exec_time, max_exec_time, mean_exec_time, stddev_exec_time" : "total_time", 804 | backend_minimum_version(17, 0) ? ", shared_blk_read_time, shared_blk_write_time, local_blk_read_time, local_blk_write_time" : ", blk_read_time, blk_write_time", 805 | backend_minimum_version(15, 0) ? ", temp_blk_read_time, temp_blk_write_time" : "", 806 | backend_minimum_version(13, 0) ? ", wal_records, wal_fpi, wal_bytes" : "", 807 | backend_minimum_version(15, 0) ? ", jit_functions, jit_generation_time, jit_inlining_count, jit_inlining_time, jit_optimization_count, jit_optimization_time, jit_emission_count, jit_emission_time" : "", 808 | backend_minimum_version(17, 0) ? ", date_trunc('seconds', stats_since) AS stats_since " : "", 809 | backend_minimum_version(17, 0) ? ", date_trunc('seconds', minmax_stats_since) AS minmax_stats_since " : ""); 810 | 811 | snprintf(filename, sizeof(filename), 812 | "%s/pg_stat_statements.csv", opts->directory); 813 | 814 | sql_exec(query, filename, opts->quiet); 815 | } 816 | 817 | /* 818 | * Dump all xlog stats. 819 | */ 820 | void 821 | sql_exec_dump_xlog_stat() 822 | { 823 | char query[1024]; 824 | char filename[1024]; 825 | 826 | snprintf(query, sizeof(query), 827 | backend_minimum_version(10, 0) 828 | ? 829 | "SELECT date_trunc('seconds', now()), pg_walfile_name(pg_current_wal_lsn())=pg_ls_dir AS current, pg_ls_dir AS filename, " 830 | "(SELECT modification FROM pg_stat_file('pg_wal/'||pg_ls_dir)) AS modification_timestamp " 831 | "FROM pg_ls_dir('pg_wal') " 832 | "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " 833 | "ORDER BY pg_ls_dir" 834 | : 835 | "SELECT date_trunc('seconds', now()), pg_xlogfile_name(pg_current_xlog_location())=pg_ls_dir AS current, pg_ls_dir AS filename, " 836 | "(SELECT modification FROM pg_stat_file('pg_xlog/'||pg_ls_dir)) AS modification_timestamp " 837 | "FROM pg_ls_dir('pg_xlog') " 838 | "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " 839 | "ORDER BY pg_ls_dir"); 840 | 841 | snprintf(filename, sizeof(filename), 842 | "%s/pg_xlog_stat.csv", opts->directory); 843 | 844 | sql_exec(query, filename, opts->quiet); 845 | } 846 | 847 | 848 | /* 849 | * Dump ANALYZE progress 850 | */ 851 | void 852 | sql_exec_dump_pgstatprogressanalyze() 853 | { 854 | char query[1024]; 855 | char filename[1024]; 856 | 857 | snprintf(query, sizeof(query), 858 | "SELECT date_trunc('seconds', now()), pid, datid, datname, " 859 | "relid, relid::regclass relname, phase, sample_blks_total, " 860 | "sample_blks_scanned, ext_stats_total, ext_stats_computed, " 861 | "child_tables_total, child_tables_done, current_child_table_relid, " 862 | "current_child_table_relid::regclass current_child_table_relname " 863 | "FROM pg_stat_progress_analyze " 864 | "ORDER BY pid"); 865 | 866 | snprintf(filename, sizeof(filename), 867 | "%s/pg_stat_progress_analyze.csv", opts->directory); 868 | 869 | sql_exec(query, filename, opts->quiet); 870 | } 871 | 872 | /* 873 | * Dump BASE BACKUP progress 874 | */ 875 | void 876 | sql_exec_dump_pgstatprogressbasebackup() 877 | { 878 | char query[1024]; 879 | char filename[1024]; 880 | 881 | snprintf(query, sizeof(query), 882 | "SELECT date_trunc('seconds', now()), pid, phase, " 883 | "backup_total, backup_streamed, " 884 | "tablespaces_total, tablespaces_streamed " 885 | "FROM pg_stat_progress_basebackup " 886 | "ORDER BY pid"); 887 | 888 | snprintf(filename, sizeof(filename), 889 | "%s/pg_stat_progress_basebackup.csv", opts->directory); 890 | 891 | sql_exec(query, filename, opts->quiet); 892 | } 893 | 894 | /* 895 | * Dump CLUSTER progress 896 | */ 897 | void 898 | sql_exec_dump_pgstatprogresscluster() 899 | { 900 | char query[1024]; 901 | char filename[1024]; 902 | 903 | snprintf(query, sizeof(query), 904 | "SELECT date_trunc('seconds', now()), pid, datid, datname, " 905 | "relid, relid::regclass relname, command, phase, " 906 | "cluster_index_relid, cluster_index_relid::regclass cluster_index_relname, " 907 | "heap_tuples_scanned, heap_tuples_written, heap_blks_total, " 908 | "heap_blks_scanned, index_rebuild_count " 909 | "FROM pg_stat_progress_cluster " 910 | "ORDER BY pid"); 911 | 912 | snprintf(filename, sizeof(filename), 913 | "%s/pg_stat_progress_cluster.csv", opts->directory); 914 | 915 | sql_exec(query, filename, opts->quiet); 916 | } 917 | 918 | /* 919 | * Dump COPY progress 920 | */ 921 | void 922 | sql_exec_dump_pgstatprogresscopy() 923 | { 924 | char query[1024]; 925 | char filename[1024]; 926 | 927 | snprintf(query, sizeof(query), 928 | "SELECT date_trunc('seconds', now()), pid, datid, datname, " 929 | "relid, relid::regclass relname, command, type, " 930 | "bytes_processed, bytes_total, " 931 | "tuples_processed, tuples_excluded%s " 932 | "FROM pg_stat_progress_copy " 933 | "ORDER BY pid", 934 | backend_minimum_version(17, 0) ? ", tuples_excluded" : ""); 935 | 936 | snprintf(filename, sizeof(filename), 937 | "%s/pg_stat_progress_copy.csv", opts->directory); 938 | 939 | sql_exec(query, filename, opts->quiet); 940 | } 941 | 942 | /* 943 | * Dump CREATE INDEX progress 944 | */ 945 | void 946 | sql_exec_dump_pgstatprogresscreateindex() 947 | { 948 | char query[1024]; 949 | char filename[1024]; 950 | 951 | snprintf(query, sizeof(query), 952 | "SELECT date_trunc('seconds', now()), pid, datid, datname, " 953 | "relid, relid::regclass relname, index_relid, index_relid::regclass index_relname, " 954 | "command, phase, lockers_total, lockers_done, current_locker_pid, " 955 | "blocks_total, blocks_done, tuples_total, tuples_done, " 956 | "partitions_total, partitions_done " 957 | "FROM pg_stat_progress_create_index " 958 | "ORDER BY pid"); 959 | 960 | snprintf(filename, sizeof(filename), 961 | "%s/pg_stat_progress_create_index.csv", opts->directory); 962 | 963 | sql_exec(query, filename, opts->quiet); 964 | } 965 | 966 | /* 967 | * Dump VACUUM progress 968 | */ 969 | void 970 | sql_exec_dump_pgstatprogressvacuum() 971 | { 972 | char query[1024]; 973 | char filename[1024]; 974 | 975 | /* get the oid and database name from the system pg_database table */ 976 | snprintf(query, sizeof(query), 977 | "SELECT date_trunc('seconds', now()), pid, datid, datname, " 978 | "relid, relid::regclass relname, phase, " 979 | "heap_blks_total, heap_blks_scanned, heap_blks_vacuumed, " 980 | "index_vacuum_count, %s, %s " 981 | "FROM pg_stat_progress_vacuum " 982 | "ORDER BY pid", 983 | backend_minimum_version(17, 0) ? "max_dead_tuple_bytes" : "max_dead_tuples", 984 | backend_minimum_version(17, 0) ? "dead_tuple_bytes" : "num_dead_tuples"); 985 | 986 | snprintf(filename, sizeof(filename), 987 | "%s/pg_stat_progress_vacuum.csv", opts->directory); 988 | 989 | sql_exec(query, filename, opts->quiet); 990 | } 991 | 992 | /* 993 | * Fetch PostgreSQL major and minor numbers 994 | */ 995 | void 996 | fetch_version() 997 | { 998 | char query[1024]; 999 | PGresult *res; 1000 | 1001 | /* get the oid and database name from the system pg_database table */ 1002 | snprintf(query, sizeof(query), "SELECT version()"); 1003 | 1004 | /* make the call */ 1005 | res = PQexec(conn, query); 1006 | 1007 | /* check and deal with errors */ 1008 | if (!res || PQresultStatus(res) > 2) 1009 | { 1010 | pg_log_error("query failed: %s\n", PQerrorMessage(conn)); 1011 | pg_log_info("query was: %s\n", query); 1012 | 1013 | PQclear(res); 1014 | PQfinish(conn); 1015 | exit(-1); 1016 | } 1017 | 1018 | /* get the only row, and parse it to get major and minor numbers */ 1019 | sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); 1020 | 1021 | /* print version */ 1022 | if (!opts->quiet) 1023 | printf("Detected release: %d.%d\n", opts->major, opts->minor); 1024 | 1025 | /* cleanup */ 1026 | PQclear(res); 1027 | } 1028 | 1029 | 1030 | /* 1031 | * Check if user has the superuser attribute 1032 | */ 1033 | bool 1034 | check_superuser() 1035 | { 1036 | PGresult *res; 1037 | char sql[1024]; 1038 | bool is_superuser = false; 1039 | 1040 | /* get the oid and database name from the system pg_database table */ 1041 | snprintf(sql, sizeof(sql), 1042 | "SELECT rolsuper FROM pg_roles WHERE rolname=current_user "); 1043 | 1044 | /* make the call */ 1045 | res = PQexec(conn, sql); 1046 | 1047 | /* check and deal with errors */ 1048 | if (!res || PQresultStatus(res) > 2) 1049 | { 1050 | pg_log_error("query failed: %s\n", PQerrorMessage(conn)); 1051 | pg_log_info("query was: %s\n", sql); 1052 | 1053 | PQclear(res); 1054 | PQfinish(conn); 1055 | exit(-1); 1056 | } 1057 | 1058 | /* get the information */ 1059 | is_superuser = strncmp(PQgetvalue(res, 0, 0), "t", 1) == 0; 1060 | 1061 | /* cleanup */ 1062 | PQclear(res); 1063 | 1064 | return is_superuser; 1065 | } 1066 | 1067 | 1068 | /* 1069 | * Compare given major and minor numbers to the one of the connected server 1070 | */ 1071 | bool 1072 | backend_minimum_version(int major, int minor) 1073 | { 1074 | return opts->major > major || (opts->major == major && opts->minor >= minor); 1075 | } 1076 | 1077 | 1078 | /* 1079 | * Check if backend has the pg_stat_statements view 1080 | */ 1081 | bool 1082 | backend_has_pgstatstatements() 1083 | { 1084 | PGresult *res; 1085 | char sql[1024]; 1086 | bool has_pgstatstatements = false; 1087 | 1088 | /* get the oid and database name from the system pg_database table */ 1089 | snprintf(sql, sizeof(sql), 1090 | "SELECT n.nspname, " 1091 | "has_schema_privilege(c.relnamespace, 'USAGE') AS schema_priv, " 1092 | "has_table_privilege(c.oid, 'SELECT') AS view_priv " 1093 | "FROM pg_class c " 1094 | "JOIN pg_namespace n ON c.relnamespace=n.oid " 1095 | "WHERE c.relname='pg_stat_statements' AND c.relkind='v'"); 1096 | 1097 | /* make the call */ 1098 | res = PQexec(conn, sql); 1099 | 1100 | /* check and deal with errors */ 1101 | if (!res || PQresultStatus(res) > 2) 1102 | { 1103 | pg_log_error("query failed: %s\n", PQerrorMessage(conn)); 1104 | pg_log_info("query was: %s\n", sql); 1105 | 1106 | PQclear(res); 1107 | PQfinish(conn); 1108 | exit(-1); 1109 | } 1110 | 1111 | /* get the information */ 1112 | has_pgstatstatements = PQntuples(res)>0; 1113 | 1114 | /* if it's present, set search_path to access it */ 1115 | if (has_pgstatstatements) 1116 | { 1117 | // check if user has rights to use schema 1118 | if (!strcmp(PQgetvalue(res, 0, 1), "f")) 1119 | { 1120 | pg_log_warning("pg_stat_statements is available, but user has no right to use schema \"%s\"!", 1121 | PQgetvalue(res, 0, 0)); 1122 | has_pgstatstatements = false; 1123 | } 1124 | 1125 | // check if user has rights to select view 1126 | if (!strcmp(PQgetvalue(res, 0, 2), "f")) 1127 | { 1128 | pg_log_warning("pg_stat_statements is available, but user has no right to use view \"pg_stat_statements\"!"); 1129 | has_pgstatstatements = false; 1130 | } 1131 | 1132 | if (has_pgstatstatements) 1133 | { 1134 | snprintf(sql, sizeof(sql), 1135 | "SET search_path TO %s", 1136 | PQgetvalue(res, 0, 0)); 1137 | 1138 | /* cleanup */ 1139 | PQclear(res); 1140 | 1141 | /* make the call to set search_path */ 1142 | res = PQexec(conn, sql); 1143 | 1144 | /* check and deal with errors */ 1145 | if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) 1146 | { 1147 | pg_log_error("query failed: %s\n", PQerrorMessage(conn)); 1148 | pg_log_info("query was: %s\n", sql); 1149 | 1150 | PQclear(res); 1151 | PQfinish(conn); 1152 | exit(-1); 1153 | } 1154 | } 1155 | } 1156 | 1157 | /* cleanup */ 1158 | PQclear(res); 1159 | 1160 | return has_pgstatstatements; 1161 | } 1162 | 1163 | 1164 | int 1165 | main(int argc, char **argv) 1166 | { 1167 | const char *progname; 1168 | ConnParams cparams; 1169 | bool is_superuser = false; 1170 | 1171 | /* Initialize the logging interface */ 1172 | pg_logging_init(argv[0]); 1173 | 1174 | /* Get the program name */ 1175 | progname = get_progname(argv[0]); 1176 | 1177 | opts = (struct options *) myalloc(sizeof(struct options)); 1178 | 1179 | /* parse the opts */ 1180 | get_opts(argc, argv); 1181 | 1182 | if (opts->dbname == NULL) 1183 | { 1184 | opts->dbname = "postgres"; 1185 | opts->nodb = true; 1186 | } 1187 | 1188 | if (opts->directory == NULL) 1189 | { 1190 | opts->directory = "./"; 1191 | } 1192 | 1193 | /* Set the connection struct */ 1194 | cparams.pghost = opts->hostname; 1195 | cparams.pgport = opts->port; 1196 | cparams.pguser = opts->username; 1197 | cparams.dbname = opts->dbname; 1198 | cparams.prompt_password = TRI_DEFAULT; 1199 | cparams.override_dbname = NULL; 1200 | 1201 | /* Connect to the database */ 1202 | conn = connectDatabase(&cparams, progname, false, false, false); 1203 | 1204 | /* get version */ 1205 | fetch_version(); 1206 | 1207 | /* check superuser attribute */ 1208 | is_superuser = check_superuser(); 1209 | 1210 | /* grab cluster stats info */ 1211 | sql_exec_dump_pgstatactivity(); 1212 | if (backend_minimum_version(9, 4)) 1213 | sql_exec_dump_pgstatarchiver(); 1214 | if (backend_minimum_version(8, 3)) 1215 | sql_exec_dump_pgstatbgwriter(); 1216 | if (backend_minimum_version(17, 0)) 1217 | sql_exec_dump_pgstatcheckpointer(); 1218 | sql_exec_dump_pgstatdatabase(); 1219 | if (backend_minimum_version(9, 1)) 1220 | { 1221 | sql_exec_dump_pgstatdatabaseconflicts(); 1222 | sql_exec_dump_pgstatreplication(); 1223 | } 1224 | if (backend_minimum_version(14, 0)) 1225 | sql_exec_dump_pgstatreplicationslots(); 1226 | if (backend_minimum_version(13, 0)) 1227 | sql_exec_dump_pgstatslru(); 1228 | if (backend_minimum_version(10, 0)) 1229 | sql_exec_dump_pgstatsubscription(); 1230 | if (backend_minimum_version(14, 0)) 1231 | { 1232 | sql_exec_dump_pgstatwal(); 1233 | sql_exec_dump_pgstatwalreceiver(); 1234 | } 1235 | 1236 | /* grab database stats info */ 1237 | sql_exec_dump_pgstatalltables(); 1238 | sql_exec_dump_pgstatallindexes(); 1239 | sql_exec_dump_pgstatioalltables(); 1240 | sql_exec_dump_pgstatioallindexes(); 1241 | sql_exec_dump_pgstatioallsequences(); 1242 | if (backend_minimum_version(8, 4)) 1243 | sql_exec_dump_pgstatuserfunctions(); 1244 | 1245 | /* grab progress stats info */ 1246 | if (backend_minimum_version(13, 0)) 1247 | sql_exec_dump_pgstatprogressanalyze(); 1248 | if (backend_minimum_version(13, 0)) 1249 | sql_exec_dump_pgstatprogressbasebackup(); 1250 | if (backend_minimum_version(12, 0)) 1251 | sql_exec_dump_pgstatprogresscluster(); 1252 | if (backend_minimum_version(14, 0)) 1253 | sql_exec_dump_pgstatprogresscopy(); 1254 | if (backend_minimum_version(12, 0)) 1255 | sql_exec_dump_pgstatprogresscreateindex(); 1256 | if (backend_minimum_version(10, 0)) 1257 | sql_exec_dump_pgstatprogressvacuum(); 1258 | 1259 | /* grab other informations */ 1260 | sql_exec_dump_pgclass_size(); 1261 | if (backend_has_pgstatstatements()) 1262 | sql_exec_dump_pgstatstatements(); 1263 | if (backend_minimum_version(8, 2) && is_superuser) 1264 | sql_exec_dump_xlog_stat(); 1265 | 1266 | PQfinish(conn); 1267 | return 0; 1268 | } 1269 | -------------------------------------------------------------------------------- /pgdisplay.c: -------------------------------------------------------------------------------- 1 | /* 2 | * pgdisplay, a PostgreSQL app to display a table 3 | * in an informative way. 4 | * 5 | * This software is released under the PostgreSQL Licence. 6 | * 7 | * Guillaume Lelarge, guillaume@lelarge.info, 2015-2025. 8 | * 9 | * pgstat/pgdisplay.c 10 | */ 11 | 12 | 13 | /* 14 | * System headers 15 | */ 16 | #include 17 | #include 18 | 19 | /* 20 | * PostgreSQL headers 21 | */ 22 | #include "postgres_fe.h" 23 | #include "common/logging.h" 24 | #include "fe_utils/connect_utils.h" 25 | 26 | /* 27 | * Defines 28 | */ 29 | #define PGDISPLAY_VERSION "0.0.1" 30 | #define PGSTAT_DEFAULT_STRING_SIZE 1024 31 | 32 | #define couleur(param) printf("\033[48;2;255;%d;%dm",param,param) 33 | #define nocouleur() printf("\033[0m") 34 | 35 | /* these are the options structure for command line parameters */ 36 | struct options 37 | { 38 | /* misc */ 39 | bool verbose; 40 | char *table; 41 | int groups; 42 | int blocksize; 43 | 44 | /* connection parameters */ 45 | char *dbname; 46 | char *hostname; 47 | char *port; 48 | char *username; 49 | 50 | /* version number */ 51 | int major; 52 | int minor; 53 | }; 54 | 55 | /* 56 | * Global variables 57 | */ 58 | static PGconn *conn; 59 | static struct options *opts; 60 | extern char *optarg; 61 | 62 | /* 63 | * Function prototypes 64 | */ 65 | static void help(const char *progname); 66 | void get_opts(int, char **); 67 | #ifndef FE_MEMUTILS_H 68 | void *pg_malloc(size_t size); 69 | char *pg_strdup(const char *in); 70 | #endif 71 | void display_fsm(char *table); 72 | void fetch_version(void); 73 | void fetch_blocksize(void); 74 | bool backend_minimum_version(int major, int minor); 75 | void allocate_struct(void); 76 | static void quit_properly(SIGNAL_ARGS); 77 | 78 | /* 79 | * Print help message 80 | */ 81 | static void 82 | help(const char *progname) 83 | { 84 | printf("%s displays table in an informative way.\n\n" 85 | "Usage:\n" 86 | " %s [OPTIONS] [delay [count]]\n" 87 | "\nGeneral options:\n" 88 | " -G GROUPS # of groups of blocks\n" 89 | " -t TABLE table to display\n" 90 | " -v verbose\n" 91 | " -?|--help show this help, then exit\n" 92 | " -V|--version output version information, then exit\n" 93 | "\nConnection options:\n" 94 | " -h HOSTNAME database server host or socket directory\n" 95 | " -p PORT database server port number\n" 96 | " -U USER connect as specified database user\n" 97 | " -d DBNAME database to connect to\n" 98 | "Report bugs to .\n", 99 | progname, progname); 100 | } 101 | 102 | /* 103 | * Parse command line options and check for some usage errors 104 | */ 105 | void 106 | get_opts(int argc, char **argv) 107 | { 108 | int c; 109 | const char *progname; 110 | 111 | progname = get_progname(argv[0]); 112 | 113 | /* set the defaults */ 114 | opts->verbose = false; 115 | opts->groups = 20; 116 | opts->blocksize = 0; 117 | opts->table = NULL; 118 | opts->dbname = NULL; 119 | opts->hostname = NULL; 120 | opts->port = NULL; 121 | opts->username = NULL; 122 | 123 | if (argc > 1) 124 | { 125 | if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) 126 | { 127 | help(progname); 128 | exit(0); 129 | } 130 | if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) 131 | { 132 | puts("pgdisplay " PGDISPLAY_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); 133 | exit(0); 134 | } 135 | } 136 | 137 | /* get opts */ 138 | while ((c = getopt(argc, argv, "h:p:U:d:t:G:v")) != -1) 139 | { 140 | switch (c) 141 | { 142 | /* specify the # of groups */ 143 | case 'G': 144 | opts->groups = atoi(optarg); 145 | break; 146 | 147 | /* specify the table */ 148 | case 't': 149 | opts->table = pg_strdup(optarg); 150 | break; 151 | 152 | /* don't show headers */ 153 | case 'v': 154 | opts->verbose = true; 155 | break; 156 | 157 | /* specify the database */ 158 | case 'd': 159 | opts->dbname = pg_strdup(optarg); 160 | break; 161 | 162 | /* host to connect to */ 163 | case 'h': 164 | opts->hostname = pg_strdup(optarg); 165 | break; 166 | 167 | /* port to connect to on remote host */ 168 | case 'p': 169 | opts->port = pg_strdup(optarg); 170 | break; 171 | 172 | /* username */ 173 | case 'U': 174 | opts->username = pg_strdup(optarg); 175 | break; 176 | 177 | default: 178 | errx(1, "Try \"%s --help\" for more information.\n", progname); 179 | exit(EXIT_FAILURE); 180 | } 181 | } 182 | 183 | if (opts->table == NULL) 184 | { 185 | pg_log_error("missing table name\n"); 186 | exit(EXIT_FAILURE); 187 | } 188 | 189 | if (opts->dbname == NULL) 190 | { 191 | /* 192 | * We want to use dbname for possible error reports later, 193 | * and in case someone has set and is using PGDATABASE 194 | * in its environment preserve that name for later usage 195 | */ 196 | if (!getenv("PGDATABASE")) 197 | opts->dbname = "postgres"; 198 | else 199 | opts->dbname = getenv("PGDATABASE"); 200 | } 201 | } 202 | 203 | #ifndef FE_MEMUTILS_H 204 | /* 205 | * "Safe" wrapper around malloc(). 206 | */ 207 | void * 208 | pg_malloc(size_t size) 209 | { 210 | void *tmp; 211 | 212 | /* Avoid unportable behavior of malloc(0) */ 213 | if (size == 0) 214 | size = 1; 215 | tmp = malloc(size); 216 | if (!tmp) 217 | { 218 | pg_log_error("out of memory\n"); 219 | exit(EXIT_FAILURE); 220 | } 221 | return tmp; 222 | } 223 | 224 | /* 225 | * "Safe" wrapper around strdup(). 226 | */ 227 | char * 228 | pg_strdup(const char *in) 229 | { 230 | char *tmp; 231 | 232 | if (!in) 233 | { 234 | pg_log_error("cannot duplicate null pointer (internal error)\n"); 235 | exit(EXIT_FAILURE); 236 | } 237 | tmp = strdup(in); 238 | if (!tmp) 239 | { 240 | pg_log_error("out of memory\n"); 241 | exit(EXIT_FAILURE); 242 | } 243 | return tmp; 244 | } 245 | #endif 246 | 247 | /* 248 | * Dump all archiver stats. 249 | */ 250 | void 251 | display_fsm(char *table) 252 | { 253 | char sql[PGSTAT_DEFAULT_STRING_SIZE]; 254 | PGresult *res; 255 | int nrows; 256 | int row; 257 | int color; 258 | int totalspace, freespace; 259 | int groupby, blocksize; 260 | int n; 261 | 262 | blocksize = 8192; 263 | 264 | /* grab the stats (this is the only stats on one line) */ 265 | /* 266 | snprintf(sql, sizeof(sql), 267 | "with fsm as (select blkno/443 as blockrange, sum(avail) as available, 8192*443 as total from pg_freespace('%s') group by 1)" 268 | "select blockrange, available, total, 100*available/total as ratio, 180*available/total as color from fsm order by 1", 269 | table); 270 | */ 271 | snprintf(sql, sizeof(sql), 272 | "select avail from pg_freespace('%s') order by blkno", 273 | table); 274 | 275 | /* make the call */ 276 | res = PQexec(conn, sql); 277 | 278 | /* check and deal with errors */ 279 | if (!res || PQresultStatus(res) > 2) 280 | { 281 | warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); 282 | PQclear(res); 283 | PQfinish(conn); 284 | errx(1, "pgdisplay: query was: %s", sql); 285 | } 286 | 287 | /* get the number of fields */ 288 | nrows = PQntuples(res); 289 | 290 | /* initialize some vars */ 291 | totalspace = nrows*blocksize; 292 | if (nrows <= opts->groups) 293 | groupby = 1; 294 | else 295 | groupby = nrows/opts->groups; 296 | freespace = 0; 297 | n = 0; 298 | 299 | printf("Pages #: %d\n", nrows); 300 | printf("Table size: %d\n", totalspace); 301 | printf("... group of %d\n", groupby); 302 | printf("\n\n"); 303 | 304 | /* for each row, dump the information */ 305 | for (row = 0; row < nrows; row++) 306 | { 307 | /* getting new values */ 308 | freespace += atol(PQgetvalue(res, row, 0)); 309 | 310 | if (++n >= groupby) 311 | { 312 | //printf("Free space [%d] : %d (on %d)\n", n, freespace, groupby*blocksize); 313 | /* printing the diff... 314 | * note that the first line will be the current value, rather than the diff */ 315 | color = 180*freespace/(8192*groupby); 316 | if (color<0) 317 | color = 0; 318 | couleur(color); 319 | printf(" "); 320 | nocouleur(); 321 | 322 | freespace = 0; 323 | n = 0; 324 | } 325 | } 326 | 327 | printf("\n\n"); 328 | /* cleanup */ 329 | PQclear(res); 330 | } 331 | 332 | /* 333 | * Fetch block size. 334 | */ 335 | void 336 | fetch_blocksize() 337 | { 338 | char sql[PGSTAT_DEFAULT_STRING_SIZE]; 339 | PGresult *res; 340 | 341 | /* get the cluster version */ 342 | snprintf(sql, sizeof(sql), "SELECT current_setting('block_size')"); 343 | 344 | /* make the call */ 345 | res = PQexec(conn, sql); 346 | 347 | /* check and deal with errors */ 348 | if (!res || PQresultStatus(res) > 2) 349 | { 350 | warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); 351 | PQclear(res); 352 | PQfinish(conn); 353 | errx(1, "pgdisplay: query was: %s", sql); 354 | } 355 | 356 | /* get the only row, and parse it to get major and minor numbers */ 357 | opts->blocksize = atoi(PQgetvalue(res, 0, 0)); 358 | 359 | /* print version */ 360 | if (opts->verbose) 361 | printf("Detected block size: %d\n", opts->blocksize); 362 | 363 | /* cleanup */ 364 | PQclear(res); 365 | } 366 | 367 | /* 368 | * Fetch PostgreSQL major and minor numbers 369 | */ 370 | void 371 | fetch_version() 372 | { 373 | char sql[PGSTAT_DEFAULT_STRING_SIZE]; 374 | PGresult *res; 375 | 376 | /* get the cluster version */ 377 | snprintf(sql, sizeof(sql), "SELECT version()"); 378 | 379 | /* make the call */ 380 | res = PQexec(conn, sql); 381 | 382 | /* check and deal with errors */ 383 | if (!res || PQresultStatus(res) > 2) 384 | { 385 | warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); 386 | PQclear(res); 387 | PQfinish(conn); 388 | errx(1, "pgdisplay: query was: %s", sql); 389 | } 390 | 391 | /* get the only row, and parse it to get major and minor numbers */ 392 | sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); 393 | 394 | /* print version */ 395 | if (opts->verbose) 396 | printf("Detected release: %d.%d\n", opts->major, opts->minor); 397 | 398 | /* cleanup */ 399 | PQclear(res); 400 | } 401 | 402 | /* 403 | * Compare given major and minor numbers to the one of the connected server 404 | */ 405 | bool 406 | backend_minimum_version(int major, int minor) 407 | { 408 | return opts->major > major || (opts->major == major && opts->minor >= minor); 409 | } 410 | 411 | /* 412 | * Close the PostgreSQL connection, and quit 413 | */ 414 | static void 415 | quit_properly(SIGNAL_ARGS) 416 | { 417 | PQfinish(conn); 418 | exit(EXIT_FAILURE); 419 | } 420 | 421 | /* 422 | * Main function 423 | */ 424 | int 425 | main(int argc, char **argv) 426 | { 427 | const char *progname; 428 | ConnParams cparams; 429 | 430 | /* 431 | * If the user stops the program (control-Z) and then resumes it, 432 | * print out the header again. 433 | */ 434 | pqsignal(SIGINT, quit_properly); 435 | 436 | /* Allocate the options struct */ 437 | opts = (struct options *) pg_malloc(sizeof(struct options)); 438 | 439 | /* Parse the options */ 440 | get_opts(argc, argv); 441 | 442 | /* Initialize the logging interface */ 443 | pg_logging_init(argv[0]); 444 | 445 | /* Get the program name */ 446 | progname = get_progname(argv[0]); 447 | 448 | /* Set the connection struct */ 449 | cparams.pghost = opts->hostname; 450 | cparams.pgport = opts->port; 451 | cparams.pguser = opts->username; 452 | cparams.dbname = opts->dbname; 453 | cparams.prompt_password = TRI_DEFAULT; 454 | cparams.override_dbname = NULL; 455 | 456 | /* Connect to the database */ 457 | conn = connectDatabase(&cparams, progname, false, false, false); 458 | 459 | // check last vacuum timestamp 460 | // fetch blocks count 461 | 462 | fetch_blocksize(); 463 | 464 | display_fsm(opts->table); 465 | 466 | PQfinish(conn); 467 | return 0; 468 | } 469 | -------------------------------------------------------------------------------- /pgfe_cancel.c: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------ 2 | * 3 | * Query cancellation support for frontend code 4 | * 5 | * Assorted utility functions to control query cancellation with signal 6 | * handler for SIGINT. 7 | * 8 | * 9 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 10 | * Portions Copyright (c) 1994, Regents of the University of California 11 | * 12 | * src/fe_utils/cancel.c 13 | * 14 | *------------------------------------------------------------------------ 15 | */ 16 | 17 | #include "postgres_fe.h" 18 | 19 | #include 20 | 21 | #include "common/connect.h" 22 | #include "fe_utils/cancel.h" 23 | #include "fe_utils/string_utils.h" 24 | 25 | 26 | /* 27 | * Write a simple string to stderr --- must be safe in a signal handler. 28 | * We ignore the write() result since there's not much we could do about it. 29 | * Certain compilers make that harder than it ought to be. 30 | */ 31 | #define write_stderr(str) \ 32 | do { \ 33 | const char *str_ = (str); \ 34 | int rc_; \ 35 | rc_ = write(fileno(stderr), str_, strlen(str_)); \ 36 | (void) rc_; \ 37 | } while (0) 38 | 39 | /* 40 | * Contains all the information needed to cancel a query issued from 41 | * a database connection to the backend. 42 | */ 43 | static PGcancel *volatile cancelConn = NULL; 44 | 45 | /* 46 | * Predetermined localized error strings --- needed to avoid trying 47 | * to call gettext() from a signal handler. 48 | */ 49 | static const char *cancel_sent_msg = NULL; 50 | static const char *cancel_not_sent_msg = NULL; 51 | 52 | /* 53 | * CancelRequested is set when we receive SIGINT (or local equivalent). 54 | * There is no provision in this module for resetting it; but applications 55 | * might choose to clear it after successfully recovering from a cancel. 56 | * Note that there is no guarantee that we successfully sent a Cancel request, 57 | * or that the request will have any effect if we did send it. 58 | */ 59 | volatile sig_atomic_t CancelRequested = false; 60 | 61 | #ifdef WIN32 62 | static CRITICAL_SECTION cancelConnLock; 63 | #endif 64 | 65 | /* 66 | * Additional callback for cancellations. 67 | */ 68 | static void (*cancel_callback) (void) = NULL; 69 | 70 | 71 | /* 72 | * SetCancelConn 73 | * 74 | * Set cancelConn to point to the current database connection. 75 | */ 76 | void 77 | SetCancelConn(PGconn *conn) 78 | { 79 | PGcancel *oldCancelConn; 80 | 81 | #ifdef WIN32 82 | EnterCriticalSection(&cancelConnLock); 83 | #endif 84 | 85 | /* Free the old one if we have one */ 86 | oldCancelConn = cancelConn; 87 | 88 | /* be sure handle_sigint doesn't use pointer while freeing */ 89 | cancelConn = NULL; 90 | 91 | if (oldCancelConn != NULL) 92 | PQfreeCancel(oldCancelConn); 93 | 94 | cancelConn = PQgetCancel(conn); 95 | 96 | #ifdef WIN32 97 | LeaveCriticalSection(&cancelConnLock); 98 | #endif 99 | } 100 | 101 | /* 102 | * ResetCancelConn 103 | * 104 | * Free the current cancel connection, if any, and set to NULL. 105 | */ 106 | void 107 | ResetCancelConn(void) 108 | { 109 | PGcancel *oldCancelConn; 110 | 111 | #ifdef WIN32 112 | EnterCriticalSection(&cancelConnLock); 113 | #endif 114 | 115 | oldCancelConn = cancelConn; 116 | 117 | /* be sure handle_sigint doesn't use pointer while freeing */ 118 | cancelConn = NULL; 119 | 120 | if (oldCancelConn != NULL) 121 | PQfreeCancel(oldCancelConn); 122 | 123 | #ifdef WIN32 124 | LeaveCriticalSection(&cancelConnLock); 125 | #endif 126 | } 127 | 128 | 129 | /* 130 | * Code to support query cancellation 131 | * 132 | * Note that sending the cancel directly from the signal handler is safe 133 | * because PQcancel() is written to make it so. We use write() to report 134 | * to stderr because it's better to use simple facilities in a signal 135 | * handler. 136 | * 137 | * On Windows, the signal canceling happens on a separate thread, because 138 | * that's how SetConsoleCtrlHandler works. The PQcancel function is safe 139 | * for this (unlike PQrequestCancel). However, a CRITICAL_SECTION is required 140 | * to protect the PGcancel structure against being changed while the signal 141 | * thread is using it. 142 | */ 143 | 144 | #ifndef WIN32 145 | 146 | /* 147 | * handle_sigint 148 | * 149 | * Handle interrupt signals by canceling the current command, if cancelConn 150 | * is set. 151 | */ 152 | static void 153 | handle_sigint(SIGNAL_ARGS) 154 | { 155 | int save_errno = errno; 156 | char errbuf[256]; 157 | 158 | CancelRequested = true; 159 | 160 | if (cancel_callback != NULL) 161 | cancel_callback(); 162 | 163 | /* Send QueryCancel if we are processing a database query */ 164 | if (cancelConn != NULL) 165 | { 166 | if (PQcancel(cancelConn, errbuf, sizeof(errbuf))) 167 | { 168 | write_stderr(cancel_sent_msg); 169 | } 170 | else 171 | { 172 | write_stderr(cancel_not_sent_msg); 173 | write_stderr(errbuf); 174 | } 175 | } 176 | 177 | errno = save_errno; /* just in case the write changed it */ 178 | } 179 | 180 | /* 181 | * setup_cancel_handler 182 | * 183 | * Register query cancellation callback for SIGINT. 184 | */ 185 | void 186 | setup_cancel_handler(void (*query_cancel_callback) (void)) 187 | { 188 | cancel_callback = query_cancel_callback; 189 | cancel_sent_msg = _("Cancel request sent\n"); 190 | cancel_not_sent_msg = _("Could not send cancel request: "); 191 | 192 | pqsignal(SIGINT, handle_sigint); 193 | } 194 | 195 | #else /* WIN32 */ 196 | 197 | static BOOL WINAPI 198 | consoleHandler(DWORD dwCtrlType) 199 | { 200 | char errbuf[256]; 201 | 202 | if (dwCtrlType == CTRL_C_EVENT || 203 | dwCtrlType == CTRL_BREAK_EVENT) 204 | { 205 | CancelRequested = true; 206 | 207 | if (cancel_callback != NULL) 208 | cancel_callback(); 209 | 210 | /* Send QueryCancel if we are processing a database query */ 211 | EnterCriticalSection(&cancelConnLock); 212 | if (cancelConn != NULL) 213 | { 214 | if (PQcancel(cancelConn, errbuf, sizeof(errbuf))) 215 | { 216 | write_stderr(cancel_sent_msg); 217 | } 218 | else 219 | { 220 | write_stderr(cancel_not_sent_msg); 221 | write_stderr(errbuf); 222 | } 223 | } 224 | 225 | LeaveCriticalSection(&cancelConnLock); 226 | 227 | return TRUE; 228 | } 229 | else 230 | /* Return FALSE for any signals not being handled */ 231 | return FALSE; 232 | } 233 | 234 | void 235 | setup_cancel_handler(void (*callback) (void)) 236 | { 237 | cancel_callback = callback; 238 | cancel_sent_msg = _("Cancel request sent\n"); 239 | cancel_not_sent_msg = _("Could not send cancel request: "); 240 | 241 | InitializeCriticalSection(&cancelConnLock); 242 | 243 | SetConsoleCtrlHandler(consoleHandler, TRUE); 244 | } 245 | 246 | #endif /* WIN32 */ 247 | -------------------------------------------------------------------------------- /pgfe_connect_utils.c: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * Facilities for frontend code to connect to and disconnect from databases. 4 | * 5 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 6 | * Portions Copyright (c) 1994, Regents of the University of California 7 | * 8 | * src/fe_utils/connect_utils.c 9 | * 10 | *------------------------------------------------------------------------- 11 | */ 12 | #include "postgres_fe.h" 13 | 14 | #include "common/connect.h" 15 | #include "common/logging.h" 16 | #include "common/string.h" 17 | #include "fe_utils/connect_utils.h" 18 | #include "fe_utils/query_utils.h" 19 | 20 | /* 21 | * Make a database connection with the given parameters. 22 | * 23 | * An interactive password prompt is automatically issued if needed and 24 | * allowed by cparams->prompt_password. 25 | * 26 | * If allow_password_reuse is true, we will try to re-use any password 27 | * given during previous calls to this routine. (Callers should not pass 28 | * allow_password_reuse=true unless reconnecting to the same database+user 29 | * as before, else we might create password exposure hazards.) 30 | */ 31 | PGconn * 32 | connectDatabase(const ConnParams *cparams, const char *progname, 33 | bool echo, bool fail_ok, bool allow_password_reuse) 34 | { 35 | PGconn *conn; 36 | bool new_pass; 37 | static char *password = NULL; 38 | 39 | /* Callers must supply at least dbname; other params can be NULL */ 40 | Assert(cparams->dbname); 41 | 42 | if (!allow_password_reuse && password) 43 | { 44 | free(password); 45 | password = NULL; 46 | } 47 | 48 | if (cparams->prompt_password == TRI_YES && password == NULL) 49 | password = simple_prompt("Password: ", false); 50 | 51 | /* 52 | * Start the connection. Loop until we have a password if requested by 53 | * backend. 54 | */ 55 | do 56 | { 57 | const char *keywords[8]; 58 | const char *values[8]; 59 | int i = 0; 60 | 61 | /* 62 | * If dbname is a connstring, its entries can override the other 63 | * values obtained from cparams; but in turn, override_dbname can 64 | * override the dbname component of it. 65 | */ 66 | keywords[i] = "host"; 67 | values[i++] = cparams->pghost; 68 | keywords[i] = "port"; 69 | values[i++] = cparams->pgport; 70 | keywords[i] = "user"; 71 | values[i++] = cparams->pguser; 72 | keywords[i] = "password"; 73 | values[i++] = password; 74 | keywords[i] = "dbname"; 75 | values[i++] = cparams->dbname; 76 | if (cparams->override_dbname) 77 | { 78 | keywords[i] = "dbname"; 79 | values[i++] = cparams->override_dbname; 80 | } 81 | keywords[i] = "fallback_application_name"; 82 | values[i++] = progname; 83 | keywords[i] = NULL; 84 | values[i++] = NULL; 85 | Assert(i <= lengthof(keywords)); 86 | 87 | new_pass = false; 88 | conn = PQconnectdbParams(keywords, values, true); 89 | 90 | if (!conn) 91 | pg_fatal("could not connect to database %s: out of memory", 92 | cparams->dbname); 93 | 94 | /* 95 | * No luck? Trying asking (again) for a password. 96 | */ 97 | if (PQstatus(conn) == CONNECTION_BAD && 98 | PQconnectionNeedsPassword(conn) && 99 | cparams->prompt_password != TRI_NO) 100 | { 101 | PQfinish(conn); 102 | free(password); 103 | password = simple_prompt("Password: ", false); 104 | new_pass = true; 105 | } 106 | } while (new_pass); 107 | 108 | /* check to see that the backend connection was successfully made */ 109 | if (PQstatus(conn) == CONNECTION_BAD) 110 | { 111 | if (fail_ok) 112 | { 113 | PQfinish(conn); 114 | return NULL; 115 | } 116 | pg_fatal("%s", PQerrorMessage(conn)); 117 | } 118 | 119 | /* Start strict; callers may override this. */ 120 | PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, echo)); 121 | 122 | return conn; 123 | } 124 | 125 | /* 126 | * Try to connect to the appropriate maintenance database. 127 | * 128 | * This differs from connectDatabase only in that it has a rule for 129 | * inserting a default "dbname" if none was given (which is why cparams 130 | * is not const). Note that cparams->dbname should typically come from 131 | * a --maintenance-db command line parameter. 132 | */ 133 | PGconn * 134 | connectMaintenanceDatabase(ConnParams *cparams, 135 | const char *progname, bool echo) 136 | { 137 | PGconn *conn; 138 | 139 | /* If a maintenance database name was specified, just connect to it. */ 140 | if (cparams->dbname) 141 | return connectDatabase(cparams, progname, echo, false, false); 142 | 143 | /* Otherwise, try postgres first and then template1. */ 144 | cparams->dbname = "postgres"; 145 | conn = connectDatabase(cparams, progname, echo, true, false); 146 | if (!conn) 147 | { 148 | cparams->dbname = "template1"; 149 | conn = connectDatabase(cparams, progname, echo, false, false); 150 | } 151 | return conn; 152 | } 153 | 154 | /* 155 | * Disconnect the given connection, canceling any statement if one is active. 156 | */ 157 | void 158 | disconnectDatabase(PGconn *conn) 159 | { 160 | char errbuf[256]; 161 | 162 | Assert(conn != NULL); 163 | 164 | if (PQtransactionStatus(conn) == PQTRANS_ACTIVE) 165 | { 166 | PGcancel *cancel; 167 | 168 | if ((cancel = PQgetCancel(conn))) 169 | { 170 | (void) PQcancel(cancel, errbuf, sizeof(errbuf)); 171 | PQfreeCancel(cancel); 172 | } 173 | } 174 | 175 | PQfinish(conn); 176 | } 177 | -------------------------------------------------------------------------------- /pgfe_query_utils.c: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * Facilities for frontend code to query a databases. 4 | * 5 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 6 | * Portions Copyright (c) 1994, Regents of the University of California 7 | * 8 | * src/fe_utils/query_utils.c 9 | * 10 | *------------------------------------------------------------------------- 11 | */ 12 | #include "postgres_fe.h" 13 | 14 | #include "common/logging.h" 15 | #include "fe_utils/cancel.h" 16 | #include "fe_utils/query_utils.h" 17 | 18 | /* 19 | * Run a query, return the results, exit program on failure. 20 | */ 21 | PGresult * 22 | executeQuery(PGconn *conn, const char *query, bool echo) 23 | { 24 | PGresult *res; 25 | 26 | if (echo) 27 | printf("%s\n", query); 28 | 29 | res = PQexec(conn, query); 30 | if (!res || 31 | PQresultStatus(res) != PGRES_TUPLES_OK) 32 | { 33 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 34 | pg_log_error_detail("Query was: %s", query); 35 | PQfinish(conn); 36 | exit(1); 37 | } 38 | 39 | return res; 40 | } 41 | 42 | 43 | /* 44 | * As above for a SQL command (which returns nothing). 45 | */ 46 | void 47 | executeCommand(PGconn *conn, const char *query, bool echo) 48 | { 49 | PGresult *res; 50 | 51 | if (echo) 52 | printf("%s\n", query); 53 | 54 | res = PQexec(conn, query); 55 | if (!res || 56 | PQresultStatus(res) != PGRES_COMMAND_OK) 57 | { 58 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 59 | pg_log_error_detail("Query was: %s", query); 60 | PQfinish(conn); 61 | exit(1); 62 | } 63 | 64 | PQclear(res); 65 | } 66 | 67 | 68 | /* 69 | * As above for a SQL maintenance command (returns command success). 70 | * Command is executed with a cancel handler set, so Ctrl-C can 71 | * interrupt it. 72 | */ 73 | bool 74 | executeMaintenanceCommand(PGconn *conn, const char *query, bool echo) 75 | { 76 | PGresult *res; 77 | bool r; 78 | 79 | if (echo) 80 | printf("%s\n", query); 81 | 82 | SetCancelConn(conn); 83 | res = PQexec(conn, query); 84 | ResetCancelConn(); 85 | 86 | r = (res && PQresultStatus(res) == PGRES_COMMAND_OK); 87 | 88 | PQclear(res); 89 | 90 | return r; 91 | } 92 | -------------------------------------------------------------------------------- /pgreport.c: -------------------------------------------------------------------------------- 1 | /* 2 | * pgreport, a PostgreSQL app to get lots of informations from PostgreSQL 3 | * metadata and statistics. 4 | * 5 | * This software is released under the PostgreSQL Licence. 6 | * 7 | * Guillaume Lelarge, guillaume@lelarge.info, 2020-2025. 8 | * 9 | * pgstats/pgreport.c 10 | */ 11 | 12 | 13 | /* 14 | * PostgreSQL headers 15 | */ 16 | #include "postgres_fe.h" 17 | #include "common/logging.h" 18 | #include "fe_utils/connect_utils.h" 19 | #include "libpq/pqsignal.h" 20 | 21 | 22 | /* 23 | * pgreport headers 24 | */ 25 | #include "pgreport_queries.h" 26 | 27 | 28 | /* 29 | * Defines 30 | */ 31 | #define PGREPORT_VERSION "1.4.0" 32 | #define PGREPORT_DEFAULT_LINES 20 33 | #define PGREPORT_DEFAULT_STRING_SIZE 2048 34 | 35 | 36 | /* 37 | * Structs 38 | */ 39 | 40 | /* these are the options structure for command line parameters */ 41 | struct options 42 | { 43 | /* misc */ 44 | char *script; 45 | bool verbose; 46 | 47 | /* version number */ 48 | int major; 49 | int minor; 50 | }; 51 | 52 | 53 | /* 54 | * Global variables 55 | */ 56 | static struct options *opts; 57 | extern char *optarg; 58 | 59 | 60 | /* 61 | * Function prototypes 62 | */ 63 | static void help(const char *progname); 64 | void get_opts(int, char **); 65 | #ifndef FE_MEMUTILS_H 66 | void *pg_malloc(size_t size); 67 | char *pg_strdup(const char *in); 68 | #endif 69 | bool backend_minimum_version(int major, int minor); 70 | void execute(char *query); 71 | void install_extension(char *extension); 72 | void fetch_version(void); 73 | void fetch_postmaster_reloadconftime(void); 74 | void fetch_postmaster_starttime(void); 75 | void fetch_table(char *label, char *query); 76 | void fetch_file(char *filename); 77 | void fetch_kernelconfig(char *cfg); 78 | void exec_command(char *cmd); 79 | static void quit_properly(SIGNAL_ARGS); 80 | 81 | 82 | /* 83 | * Print help message 84 | */ 85 | static void 86 | help(const char *progname) 87 | { 88 | printf("%s gets lots of informations from PostgreSQL metadata and statistics.\n\n" 89 | "Usage:\n" 90 | " %s [OPTIONS]\n" 91 | "\nGeneral options:\n" 92 | " -s VERSION generate SQL script for $VERSION release\n" 93 | " -v verbose\n" 94 | " -?|--help show this help, then exit\n" 95 | " -V|--version output version information, then exit\n" 96 | "Report bugs to .\n", 97 | progname, progname); 98 | } 99 | 100 | 101 | /* 102 | * Parse command line options and check for some usage errors 103 | */ 104 | void 105 | get_opts(int argc, char **argv) 106 | { 107 | int c; 108 | const char *progname; 109 | 110 | progname = get_progname(argv[0]); 111 | 112 | /* set the defaults */ 113 | opts->script = NULL; 114 | opts->verbose = false; 115 | 116 | /* we should deal quickly with help and version */ 117 | if (argc > 1) 118 | { 119 | if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) 120 | { 121 | help(progname); 122 | exit(0); 123 | } 124 | if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) 125 | { 126 | puts("pgreport " PGREPORT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); 127 | exit(0); 128 | } 129 | } 130 | 131 | /* get options */ 132 | while ((c = getopt(argc, argv, "vs:")) != -1) 133 | { 134 | switch (c) 135 | { 136 | /* get script */ 137 | case 's': 138 | opts->script = pg_strdup(optarg); 139 | sscanf(opts->script, "%d.%d", &(opts->major), &(opts->minor)); 140 | break; 141 | 142 | /* get verbose */ 143 | case 'v': 144 | opts->verbose = true; 145 | break; 146 | 147 | default: 148 | pg_log_error("Try \"%s --help\" for more information.\n", progname); 149 | exit(EXIT_FAILURE); 150 | } 151 | } 152 | 153 | if (opts->script == NULL) 154 | { 155 | opts->script = "17"; 156 | } 157 | } 158 | 159 | #ifndef FE_MEMUTILS_H 160 | /* 161 | * "Safe" wrapper around malloc(). 162 | */ 163 | void * 164 | pg_malloc(size_t size) 165 | { 166 | void *tmp; 167 | 168 | /* Avoid unportable behavior of malloc(0) */ 169 | if (size == 0) 170 | size = 1; 171 | tmp = malloc(size); 172 | if (!tmp) 173 | { 174 | pg_log_error("out of memory (pg_malloc)\n"); 175 | exit(EXIT_FAILURE); 176 | } 177 | return tmp; 178 | } 179 | 180 | 181 | /* 182 | * "Safe" wrapper around strdup(). 183 | */ 184 | char * 185 | pg_strdup(const char *in) 186 | { 187 | char *tmp; 188 | 189 | if (!in) 190 | { 191 | pg_log_error("cannot duplicate null pointer (internal error)\n"); 192 | exit(EXIT_FAILURE); 193 | } 194 | tmp = strdup(in); 195 | if (!tmp) 196 | { 197 | pg_log_error("out of memory (pg_strdup)\n"); 198 | exit(EXIT_FAILURE); 199 | } 200 | return tmp; 201 | } 202 | #endif 203 | 204 | 205 | /* 206 | * Compare given major and minor numbers to the one of the connected server 207 | */ 208 | bool 209 | backend_minimum_version(int major, int minor) 210 | { 211 | return opts->major > major || (opts->major == major && opts->minor >= minor); 212 | } 213 | 214 | 215 | /* 216 | * Execute query 217 | */ 218 | void 219 | execute(char *query) 220 | { 221 | printf("%s;\n", query); 222 | } 223 | 224 | /* 225 | * Install extension 226 | */ 227 | void 228 | install_extension(char *extension) 229 | { 230 | printf("CREATE EXTENSION IF NOT EXISTS %s;\n", extension); 231 | } 232 | 233 | 234 | /* 235 | * Fetch PostgreSQL major and minor numbers 236 | */ 237 | void 238 | fetch_version() 239 | { 240 | printf("\\echo PostgreSQL version\n"); 241 | printf("SELECT version();\n"); 242 | } 243 | 244 | 245 | /* 246 | * Fetch PostgreSQL reload configuration time 247 | */ 248 | void 249 | fetch_postmaster_reloadconftime() 250 | { 251 | printf("\\echo PostgreSQL reload conf time\n"); 252 | printf("SELECT pg_conf_load_time();\n"); 253 | } 254 | 255 | 256 | /* 257 | * Fetch PostgreSQL start time 258 | */ 259 | void 260 | fetch_postmaster_starttime() 261 | { 262 | printf("\\echo PostgreSQL start time\n"); 263 | printf("SELECT pg_postmaster_start_time();\n"); 264 | } 265 | 266 | 267 | /* 268 | * Handle query 269 | */ 270 | void 271 | fetch_table(char *label, char *query) 272 | { 273 | printf("\\echo\n"); 274 | printf("\\echo ## %s\n",label); 275 | printf("\\echo\n\n"); 276 | printf("%s;\n",query); 277 | } 278 | 279 | 280 | /* 281 | * Close the PostgreSQL connection, and quit 282 | */ 283 | static void 284 | quit_properly(SIGNAL_ARGS) 285 | { 286 | exit(EXIT_FAILURE); 287 | } 288 | 289 | 290 | /* 291 | * Main function 292 | */ 293 | int 294 | main(int argc, char **argv) 295 | { 296 | char sql[10240]; 297 | 298 | /* 299 | * If the user stops the program, 300 | * quit nicely. 301 | */ 302 | pqsignal(SIGINT, quit_properly); 303 | 304 | /* Initialize the logging interface */ 305 | pg_logging_init(argv[0]); 306 | 307 | /* Allocate the options struct */ 308 | opts = (struct options *) pg_malloc(sizeof(struct options)); 309 | 310 | /* Parse the options */ 311 | get_opts(argc, argv); 312 | 313 | printf("\\echo =================================================================================\n"); 314 | printf("\\echo == pgreport SQL script for a %s release =========================================\n", opts->script); 315 | printf("\\echo =================================================================================\n"); 316 | printf("SET application_name to 'pgreport';\n"); 317 | 318 | printf("\\pset format aligned\n"); 319 | printf("\\pset border 2\n"); 320 | printf("\\pset linestyle ascii\n"); 321 | 322 | /* Fetch version */ 323 | printf("\\echo\n"); 324 | printf("\\echo # PostgreSQL Version\n"); 325 | printf("\\echo\n\n"); 326 | fetch_version(); 327 | printf("\n"); 328 | 329 | /* Create schema, and set if as our search_path */ 330 | execute(CREATE_SCHEMA); 331 | execute(SET_SEARCHPATH); 332 | /* Install some extensions if they are not already there */ 333 | install_extension("pg_buffercache"); 334 | install_extension("pg_visibility"); 335 | /* Install some functions/views */ 336 | execute(CREATE_GETVALUE_FUNCTION_SQL); 337 | execute(CREATE_BLOATTABLE_VIEW_SQL); 338 | strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_1); 339 | strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_2); 340 | execute(sql); 341 | if (backend_minimum_version(10,0)) 342 | { 343 | execute(CREATE_ORPHANEDFILES_VIEW_SQL2); 344 | } 345 | else 346 | { 347 | execute(CREATE_ORPHANEDFILES_VIEW_SQL1); 348 | } 349 | 350 | /* Fetch postmaster start time */ 351 | printf("\\echo\n"); 352 | printf("\\echo # PostgreSQL Start time\n"); 353 | printf("\\echo\n\n"); 354 | fetch_postmaster_starttime(); 355 | printf("\n"); 356 | 357 | /* Fetch reload conf time */ 358 | printf("\\echo\n"); 359 | printf("\\echo # PostgreSQL Reload conf time\n"); 360 | printf("\\echo\n\n"); 361 | fetch_postmaster_reloadconftime(); 362 | printf("\n"); 363 | 364 | /* Fetch settings by various ways */ 365 | printf("\\echo\n"); 366 | printf("\\echo # PostgreSQL Configuration\n"); 367 | printf("\\echo\n\n"); 368 | fetch_table(SETTINGS_BY_SOURCEFILE_TITLE, SETTINGS_BY_SOURCEFILE_SQL); 369 | fetch_table(SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE, 370 | SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL); 371 | if (backend_minimum_version(9,5)) 372 | { 373 | fetch_table(PGFILESETTINGS_TITLE, PGFILESETTINGS_SQL); 374 | } 375 | if (backend_minimum_version(10,0)) 376 | { 377 | fetch_table(PGHBAFILERULES_TITLE, PGHBAFILERULES_SQL); 378 | } 379 | if (backend_minimum_version(15,0)) 380 | { 381 | fetch_table(PGIDENTFILEMAPPINGS_TITLE, PGIDENTFILEMAPPINGS_SQL); 382 | } 383 | fetch_table(PGSETTINGS_TITLE, PGSETTINGS_SQL); 384 | 385 | /* Fetch global objects */ 386 | printf("\\echo\n"); 387 | printf("\\echo # Global objects\n"); 388 | printf("\\echo\n\n"); 389 | fetch_table(CLUSTER_HITRATIO_TITLE, CLUSTER_HITRATIO_SQL); 390 | fetch_table(CLUSTER_BUFFERSUSAGE_TITLE, CLUSTER_BUFFERSUSAGE_SQL); 391 | fetch_table(CLUSTER_BUFFERSUSAGEDIRTY_TITLE, CLUSTER_BUFFERSUSAGEDIRTY_SQL); 392 | fetch_table(DATABASES_TITLE, DATABASES_SQL); 393 | fetch_table(DATABASES_IN_CACHE_TITLE, DATABASES_IN_CACHE_SQL); 394 | fetch_table(TABLESPACES_TITLE, TABLESPACES_SQL); 395 | fetch_table(ROLES_TITLE, backend_minimum_version(9,5) ? ROLES_SQL_95min : ROLES_SQL_94max); 396 | fetch_table(USER_PASSWORDS_TITLE, USER_PASSWORDS_SQL); 397 | fetch_table(DATABASEUSER_CONFIG_TITLE, DATABASEUSER_CONFIG_SQL); 398 | 399 | /* Fetch local objects of the current database */ 400 | printf("\\echo\n"); 401 | if (backend_minimum_version(9,3)) 402 | { 403 | printf("SELECT current_database() AS db \\gset"); 404 | printf("\\echo # Local objects in database :'db'\n"); 405 | } 406 | else 407 | { 408 | printf("\\echo # Local objects in current database\n"); 409 | } 410 | printf("\\echo\n\n"); 411 | fetch_table(SCHEMAS_TITLE, SCHEMAS_SQL); 412 | fetch_table(NBRELS_IN_SCHEMA_TITLE, NBRELS_IN_SCHEMA_SQL); 413 | if (backend_minimum_version(11,0)) 414 | { 415 | fetch_table(NBFUNCSPROCS_IN_SCHEMA_TITLE, 416 | NBFUNCSPROCS_IN_SCHEMA_SQL); 417 | } 418 | else 419 | { 420 | fetch_table(NBFUNCS_IN_SCHEMA_TITLE, NBFUNCS_IN_SCHEMA_SQL); 421 | } 422 | fetch_table(HEAPTOAST_SIZE_TITLE, HEAPTOAST_SIZE_SQL); 423 | fetch_table(EXTENSIONS_TITLE, EXTENSIONS_SQL); 424 | fetch_table(EXTENSIONSTABLE_TITLE, EXTENSIONSTABLE_SQL); 425 | fetch_table(KINDS_SIZE_TITLE, KINDS_SIZE_SQL); 426 | fetch_table(DEPENDENCIES_TITLE, DEPENDENCIES_SQL); 427 | fetch_table(KINDS_IN_CACHE_TITLE, KINDS_IN_CACHE_SQL); 428 | fetch_table(AM_SIZE_TITLE, AM_SIZE_SQL); 429 | fetch_table(TABLEWITHOUTPKEY_TITLE, TABLEWITHOUTPKEY_SQL); 430 | fetch_table(BAD_DATATYPE_TITLE, BAD_DATATYPE_SQL); 431 | fetch_table(RLS_TITLE, RLS_SQL); 432 | fetch_table(POLICY_TITLE, POLICY_SQL); 433 | fetch_table(PARTITIONGLOBALSIZE_TITLE, PARTITIONGLOBALSIZE_SQL); 434 | fetch_table(PARTITIONLIST_TITLE, PARTITIONLIST_SQL); 435 | fetch_table(INDEXTYPE_TITLE, INDEXTYPE_SQL); 436 | fetch_table(INDEXONTEXT_TITLE, INDEXONTEXT_SQL); 437 | fetch_table(PERCENTUSEDINDEXES_TITLE, PERCENTUSEDINDEXES_SQL); 438 | fetch_table(UNUSEDINDEXES_TITLE, UNUSEDINDEXES_SQL); 439 | fetch_table(REDUNDANTINDEXES_TITLE, REDUNDANTINDEXES_SQL); 440 | fetch_table(ORPHANEDFILES_TITLE, ORPHANEDFILES_SQL); 441 | fetch_table(NBFUNCS_TITLE, NBFUNCS_SQL); 442 | fetch_table(SECDEF_FUNCS_TITLE, SECDEF_FUNCS_SQL); 443 | if (backend_minimum_version(11,0)) 444 | { 445 | fetch_table(FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE, 446 | FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL); 447 | } 448 | else 449 | { 450 | fetch_table(FUNCS_PER_SCHEMA_TITLE, FUNCS_PER_SCHEMA_SQL); 451 | } 452 | fetch_table(RULES_TITLE, RULES_SQL); 453 | fetch_table(LOBJ_TITLE, LOBJ_SQL); 454 | fetch_table(LOBJ_STATS_TITLE, LOBJ_STATS_SQL); 455 | fetch_table(RELOPTIONS_TITLE, RELOPTIONS_SQL); 456 | fetch_table(NEEDVACUUM_TITLE, NEEDVACUUM_SQL); 457 | fetch_table(NEEDANALYZE_TITLE, NEEDANALYZE_SQL); 458 | fetch_table(MINAGE_TITLE, MINAGE_SQL); 459 | fetch_table(TOBEFROZEN_TABLES_TITLE, TOBEFROZEN_TABLES_SQL); 460 | fetch_table(BLOATOVERVIEW_TITLE, BLOATOVERVIEW_SQL); 461 | fetch_table(TOP20BLOAT_TABLES_TITLE, TOP20BLOAT_TABLES_SQL); 462 | fetch_table(TOP20BLOAT_INDEXES_TITLE, TOP20BLOAT_INDEXES_SQL); 463 | fetch_table(REPSLOTS_TITLE, REPSLOTS_SQL); 464 | if (backend_minimum_version(10,0)) 465 | { 466 | fetch_table(PUBLICATIONS_TITLE, PUBLICATIONS_SQL); 467 | fetch_table(SUBSCRIPTIONS_TITLE, SUBSCRIPTIONS_SQL); 468 | } 469 | /* 470 | fetch_table(TOP10QUERYIDS_TITLE, TOP10QUERYIDS_SQL); 471 | fetch_table(TOP10QUERIES_TITLE, TOP10QUERIES_SQL); 472 | */ 473 | 474 | /* 475 | * Uninstall all 476 | * Actually, it drops our schema, which should get rid of all our stuff 477 | */ 478 | execute(DROP_ALL); 479 | 480 | pg_free(opts); 481 | 482 | return 0; 483 | } 484 | -------------------------------------------------------------------------------- /pgreport_queries.h: -------------------------------------------------------------------------------- 1 | #define SETTINGS_BY_SOURCEFILE_TITLE "Settings by source file" 2 | #define SETTINGS_BY_SOURCEFILE_SQL "SELECT source, sourcefile, count(*) AS nb FROM pg_settings GROUP BY 1, 2" 3 | 4 | #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE "Non default value and not config file settings" 5 | #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL "SELECT source, name, setting, unit FROM pg_settings WHERE source NOT IN ('configuration file', 'default') OR (setting != boot_val) ORDER BY source, name" 6 | 7 | #define CLUSTER_HITRATIO_TITLE "Hit ratio" 8 | #define CLUSTER_HITRATIO_SQL "SELECT 'index hit rate' AS name, 100.*sum(idx_blks_hit) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio FROM pg_statio_user_indexes UNION ALL SELECT 'table hit rate' AS name, 100.*sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio FROM pg_statio_user_tables" 9 | 10 | #define CLUSTER_BUFFERSUSAGE_TITLE "Buffers Usage" 11 | #define CLUSTER_BUFFERSUSAGE_SQL "SELECT usagecount, count(*) FROM pg_buffercache GROUP BY 1 ORDER BY 1" 12 | 13 | #define CLUSTER_BUFFERSUSAGEDIRTY_TITLE "Buffers Usage with dirty" 14 | #define CLUSTER_BUFFERSUSAGEDIRTY_SQL "SELECT usagecount, isdirty, count(*) FROM pg_buffercache GROUP BY 1,2 ORDER BY 1,2" 15 | 16 | #define DATABASES_TITLE "Databases" 17 | #define DATABASES_SQL "SELECT d.datname as \"Name\", pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\", pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\", d.datcollate as \"Collate\", d.datctype as \"Ctype\", pg_catalog.array_to_string(d.datacl, E'\n') AS \"Access privileges\", CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname)) ELSE 'No Access' END as \"Size\", t.spcname as \"Tablespace\", pg_catalog.shobj_description(d.oid, 'pg_database') as \"Description\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid ORDER BY 1" 18 | 19 | #define DATABASES_IN_CACHE_TITLE "Databases in cache" 20 | #define DATABASES_IN_CACHE_SQL "SELECT CASE WHEN datname IS NULL THEN '' ELSE datname END AS datname, pg_size_pretty(count(*)*8192) FROM pg_buffercache bc LEFT JOIN pg_database d ON d.oid=bc.reldatabase GROUP BY 1 ORDER BY count(*) DESC" 21 | 22 | #define TABLESPACES_TITLE "Tablespaces" 23 | #define TABLESPACES_SQL "SELECT spcname AS \"Name\", pg_catalog.pg_get_userbyid(spcowner) AS \"Owner\", pg_catalog.pg_tablespace_location(oid) AS \"Location\", pg_size_pretty(pg_tablespace_size(oid)) AS \"Size\", pg_catalog.array_to_string(spcacl, E'\n') AS \"Access privileges\", spcoptions AS \"Options\", pg_catalog.shobj_description(oid, 'pg_tablespace') AS \"Description\" FROM pg_catalog.pg_tablespace ORDER BY 1" 24 | 25 | #define ROLES_TITLE "Roles" 26 | #define ROLES_SQL_94max "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" 27 | #define ROLES_SQL_95min "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication, r.rolbypassrls FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" 28 | 29 | #define USER_PASSWORDS_TITLE "User passwords" 30 | #define USER_PASSWORDS_SQL "SELECT usename, valuntil, CASE WHEN passwd IS NULL THEN '' else passwd END AS passwd FROM pg_catalog.pg_shadow ORDER BY 1" 31 | 32 | #define DATABASEUSER_CONFIG_TITLE "Databases and users specific configuration" 33 | #define DATABASEUSER_CONFIG_SQL "select datname, rolname, setconfig from pg_db_role_setting drs left join pg_database d on d.oid=drs.setdatabase left join pg_roles r on r.oid=drs.setrole" 34 | 35 | #define SCHEMAS_TITLE "Schemas" 36 | #define SCHEMAS_SQL "SELECT n.nspname AS \"Name\", pg_catalog.pg_get_userbyid(n.nspowner) AS \"Owner\" FROM pg_catalog.pg_namespace n WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1" 37 | 38 | #define NBRELS_IN_SCHEMA_TITLE "Relations per kinds and schemas" 39 | #define NBRELS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where relkind='r') as tables, count(*) filter (where relkind='t') as toasts, count(*) filter (where relkind='i') as index, count(*) filter (where relkind='S') as sequences, count(*) filter (where relkind='v') as views, count(*) filter (where relkind='m') as matviews from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_class c on n.oid=c.relnamespace group by nspname, rolname order by 1, 2" 40 | 41 | #define NBFUNCS_IN_SCHEMA_TITLE "Functions per schema" 42 | #define NBFUNCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where p.oid is not null) as functions from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" 43 | 44 | #define NBFUNCSPROCS_IN_SCHEMA_TITLE "Routines per schema" 45 | #define NBFUNCSPROCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where prokind='f') as functions, count(*) filter (where prokind='p') as procedures from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" 46 | 47 | #define SECDEF_FUNCS_TITLE "Security Definer Functions" 48 | #define SECDEF_FUNCS_SQL "SELECT n.nspname, p.proname FROM pg_proc p JOIN pg_namespace n ON n.oid=p.pronamespace WHERE prosecdef" 49 | 50 | #define RLS_TITLE "Row-Level Security Tables" 51 | #define RLS_SQL "SELECT oid, relname FROM pg_class WHERE relrowsecurity" 52 | 53 | #define POLICY_TITLE "Policies" 54 | #define POLICY_SQL "SELECT * FROM pg_policy" 55 | 56 | #define PARTITIONGLOBALSIZE_TITLE "Partitions global size" 57 | #define PARTITIONGLOBALSIZE_SQL "select pg_size_pretty(sum(pg_table_size(ppt.relid))) from pg_class c, lateral pg_partition_tree(c.oid) ppt where c.relkind='p' and not c.relispartition and ppt.isleaf" 58 | 59 | #define PARTITIONLIST_TITLE "Partitions" 60 | #define PARTITIONLIST_SQL "with tmp as ( select ppt.level, ppt.parentrelid as parentable, case level when 0 then c.oid else ppt.relid end as tableoid, case pt.partstrat when 'h' then 'hash' when 'l' then 'list' when 'r' then 'range' else 'weird' end as strategy, cdef.relname as default, ppt.isleaf from pg_partitioned_table pt join pg_class c on c.oid=pt.partrelid left join pg_class cdef on cdef.oid=pt.partdefid, lateral pg_partition_tree(c.oid) ppt) select *, pg_size_pretty(pg_table_size(tableoid)) as size from tmp" 61 | 62 | #define HEAPTOAST_SIZE_TITLE "HEAP and TOAST sizes per schema" 63 | #define HEAPTOAST_SIZE_SQL "select nspname, relname, pg_relation_size(c.oid) as heap_size, pg_relation_size(reltoastrelid) as toast_size from pg_namespace n join pg_class c on n.oid=c.relnamespace where pg_relation_size(reltoastrelid)>0 order by nspname, relname" 64 | 65 | #define EXTENSIONS_TITLE "Extensions" 66 | #define EXTENSIONS_SQL "SELECT e.extname AS \"Name\", e.extversion AS \"Version\", n.nspname AS \"Schema\", c.description AS \"Description\" FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace LEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oid AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass ORDER BY 1" 67 | 68 | #define EXTENSIONSTABLE_TITLE "Extensions Tables (dumpable or not?)" 69 | #define EXTENSIONSTABLE_SQL "WITH tables_dumped AS (SELECT e.extname, n.nspname||'.'||c.relname AS relation_name1 FROM pg_extension e, LATERAL unnest(extconfig) AS toid JOIN pg_class c on c.oid=toid JOIN pg_namespace n on n.oid=c.relnamespace) SELECT e.extname AS extension_name, relation_name2, tables_dumped.extname IS NOT NULL AS to_be_dumped FROM pg_catalog.pg_depend d JOIN pg_catalog.pg_extension e ON e.oid=d.refobjid, LATERAL pg_catalog.pg_describe_object(d.classid, d.objid, 0), LATERAL substr(pg_describe_object, case when pg_describe_object like 'table %' then length('table ') else length('sequence ') end + 1) AS relation_name2 LEFT JOIN tables_dumped ON tables_dumped.relation_name1=relation_name2 WHERE d.refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND d.deptype = 'e' AND (pg_describe_object like 'table %' OR pg_describe_object like 'sequence %') ORDER BY 1,2,3" 70 | 71 | #define KINDS_SIZE_TITLE "Number and size per relations kinds" 72 | #define KINDS_SIZE_SQL "SELECT nspname, relkind, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) FROM pg_class c JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1,2 ORDER BY 1,2" 73 | 74 | #define DEPENDENCIES_TITLE "Dependencies" 75 | #define DEPENDENCIES_SQL "with etypes as ( select classid::regclass, objid, deptype, e.extname from pg_depend join pg_extension e on refclassid = 'pg_extension'::regclass and refobjid = e.oid where classid = 'pg_type'::regclass ) select etypes.extname, etypes.objid::regtype as type, n.nspname as schema, c.relname as table, attname as column from pg_depend join etypes on etypes.classid = pg_depend.refclassid and etypes.objid = pg_depend.refobjid join pg_class c on c.oid = pg_depend.objid join pg_namespace n on n.oid = c.relnamespace join pg_attribute attr on attr.attrelid = pg_depend.objid and attr.attnum = pg_depend.objsubid where pg_depend.classid = 'pg_class'::regclass" 76 | 77 | #define KINDS_IN_CACHE_TITLE "Relation kinds in cache" 78 | #define KINDS_IN_CACHE_SQL "select relkind, pg_size_pretty(count(*)*8192) from pg_buffercache bc left join pg_class c on c.relfilenode=bc.relfilenode group by 1 order by count(*) desc" 79 | 80 | #define AM_SIZE_TITLE "Access Methods" 81 | #define AM_SIZE_SQL "select nspname, amname, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) from pg_class c join pg_am a on a.oid=c.relam join pg_namespace n on n.oid=c.relnamespace group by 1, 2 order by 1,2" 82 | 83 | #define TABLEWITHOUTPKEY_TITLE "Tables without primary key" 84 | #define TABLEWITHOUTPKEY_SQL "select ns.nspname, cl.relname from pg_class cl join pg_namespace ns on ns.oid=cl.relnamespace left join pg_constraint co on co.conrelid=cl.oid and co.contype='p' where cl.relkind='r' and ns.nspname not in ('pg_catalog', 'information_schema') and co.conname is null order by ns.nspname, cl.relname" 85 | 86 | #define BAD_DATATYPE_TITLE "Columns with bad datatype" 87 | #define BAD_DATATYPE_SQL "select n.nspname as \"schema\", c.relname as \"table\", a.attname as \"column\", t.typname as \"type\" from pg_class c join pg_namespace n on n.oid=c.relnamespace join pg_attribute a on a.attrelid=c.oid join pg_type t on t.oid=a.atttypid where n.nspname not like 'pg%' and n.nspname <> 'information_schema' and c.relkind='r' and a.attnum>0 and ((t.typname='varchar' and coalesce(a.atttypmod,0)>4) or (t.typname='money') or (t.typname='timestamp')) order by n.nspname, c.relname, a.attnum" 88 | 89 | #define INDEXTYPE_TITLE "Index by types" 90 | #define INDEXTYPE_SQL "SELECT nspname, count(*) FILTER (WHERE not indisunique AND not indisprimary) as standard, count(*) FILTER (WHERE indisunique AND not indisprimary) as unique, count(*) FILTER (WHERE indisprimary) as primary, count(*) FILTER (WHERE indisexclusion) as exclusion, count(*) FILTER (WHERE indisclustered) as clustered, count(*) FILTER (WHERE indisvalid) as valid FROM pg_index i JOIN pg_class c ON c.oid=i.indexrelid JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1;" 91 | 92 | #define INDEXONTEXT_TITLE "Index and opclass" 93 | #define INDEXONTEXT_SQL "WITH colind AS (SELECT i.indrelid AS oid, i.indrelid::regclass AS tbl, c.relname AS idx, unnest(i.indkey::int4[]) AS num, unnest(i.indclass::int4[]) AS class FROM pg_class c JOIN pg_am a ON a.oid = c.relam JOIN pg_index i ON i.indexrelid = c.oid WHERE c.relkind = 'i' AND c.relname NOT LIKE 'pg%' AND a.amname = 'btree') SELECT colind.tbl AS \"Table\", colind.idx AS \"Index\", a.attname AS \"Column\", t.typname AS \"Type\", oc.opcname AS \"Operator class\", oc.opcdefault AS \"Default?\" FROM colind JOIN pg_attribute a ON a.attrelid = colind.oid AND a.attnum = colind.num JOIN pg_type t ON t.oid = a.atttypid JOIN pg_opclass oc ON oc.oid = colind.class ORDER BY colind.tbl, colind.idx, colind.num" 94 | 95 | #define NBFUNCS_TITLE "User routines" 96 | #define NBFUNCS_SQL "select count(*) from pg_proc where pronamespace=2200 or pronamespace>16383" 97 | 98 | #define FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE "Routines per schema and kind" 99 | #define FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL "select n.nspname, l.lanname, p.prokind, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2, 3 order by 1, 2, 3" 100 | 101 | #define FUNCS_PER_SCHEMA_TITLE "Functions per schema and language" 102 | #define FUNCS_PER_SCHEMA_SQL "select n.nspname, l.lanname, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2 order by 1, 2" 103 | 104 | #define RULES_TITLE "Rules" 105 | #define RULES_SQL "SELECT n.nspname AS schemaname, c.relname AS tablename, r.rulename, pg_get_ruledef(r.oid) AS definition FROM pg_rewrite r JOIN pg_class c ON c.oid = r.ev_class LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE r.rulename <> '_RETURN'::name" 106 | 107 | #define LOBJ_TITLE "Large Objects" 108 | #define LOBJ_SQL "select count(*) from pg_largeobject" 109 | 110 | #define LOBJ_STATS_TITLE "Large Objects Size" 111 | #define LOBJ_STATS_SQL "select reltuples, relpages from pg_class where relname='pg_largeobject'" 112 | 113 | #define RELOPTIONS_TITLE "Relation Options" 114 | #define RELOPTIONS_SQL "select nspname, relkind, relname, reloptions from pg_class c join pg_namespace n on n.oid=c.relnamespace where reloptions is not null order by 1, 3, 2" 115 | 116 | #define TOBEFROZEN_TABLES_TITLE "Tables to be frozen" 117 | #define TOBEFROZEN_TABLES_SQL "select count(*) from pg_class where relkind='r' and age(relfrozenxid)>current_setting('autovacuum_freeze_max_age')::integer" 118 | 119 | #define PGFILESETTINGS_TITLE "pg_file_settings" 120 | #define PGFILESETTINGS_SQL "select * from pg_file_settings " 121 | 122 | #define PGHBAFILERULES_TITLE "pg_hba_file_rules" 123 | #define PGHBAFILERULES_SQL "select * from pg_hba_file_rules" 124 | 125 | #define PGIDENTFILEMAPPINGS_TITLE "pg_ident_file_mappings" 126 | #define PGIDENTFILEMAPPINGS_SQL "select * from pg_ident_file_mappings" 127 | 128 | #define PUBLICATIONS_TITLE "Publications" 129 | #define PUBLICATIONS_SQL "select * from pg_publication" 130 | 131 | #define REPSLOTS_TITLE "Replication slots" 132 | #define REPSLOTS_SQL "select * from pg_replication_slots" 133 | 134 | #define SUBSCRIPTIONS_TITLE "Subscriptions" 135 | #define SUBSCRIPTIONS_SQL "select * from pg_subscription" 136 | 137 | #define PGSETTINGS_TITLE "pg_settings" 138 | #define PGSETTINGS_SQL "select * from pg_settings" 139 | 140 | #define TOP10QUERYIDS_SQL "select queryid, calls, total_time, mean_time from pg_stat_statements order by total_time desc limit 10" 141 | 142 | #define TOP10QUERIES_SQL "select queryid, query from pg_stat_statements order by total_time desc limit 10" 143 | 144 | #define PERCENTUSEDINDEXES_TITLE "Percentage usage of indexes" 145 | #define PERCENTUSEDINDEXES_SQL "SELECT relname, CASE idx_scan WHEN 0 THEN 'Insufficient data' ELSE (100 * idx_scan / (seq_scan + idx_scan))::text END percent_of_times_index_used, n_live_tup rows_in_table FROM pg_stat_user_tables ORDER BY n_live_tup DESC" 146 | 147 | #define UNUSEDINDEXES_TITLE "Unused indexes" 148 | #define UNUSEDINDEXES_SQL "select schemaname, count(*) from pg_stat_user_indexes s join pg_index i using (indexrelid) where idx_scan=0 and (not indisunique AND not indisprimary) group by 1;" 149 | 150 | #define REDUNDANTINDEXES_TITLE "Redundant indexes" 151 | #define REDUNDANTINDEXES_SQL "SELECT pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS SIZE, string_agg(idx::text, ', ') AS indexes FROM ( SELECT indexrelid::regclass AS idx, (indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'||COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')) AS KEY FROM pg_index) sub GROUP BY KEY HAVING COUNT(*)>1 ORDER BY SUM(pg_relation_size(idx)) DESC" 152 | 153 | #define MINAGE_TITLE "Min age" 154 | #define MINAGE_SQL "SELECT label, age FROM ( select 'Process #'||pid AS label, age(backend_xid) AS age from pg_stat_activity UNION select 'Process #'||pid, age(backend_xmin) from pg_stat_activity UNION select 'Prepared transaction '||gid, age(transaction) from pg_prepared_xacts UNION select 'Replication slot '||slot_name, age(xmin) from pg_replication_slots UNION select 'Replication slot '||slot_name, age(catalog_xmin) from pg_replication_slots) tmp UNION select 'Secondary '||client_addr, age(backend_xmin) FROM pg_stat_replication WHERE backend_xmin IS NOT NULL ORDER BY age DESC;" 155 | 156 | #define NEEDVACUUM_TITLE "Tables needing autoVACUUMs" 157 | #define NEEDVACUUM_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_dead_tup dead_tup, round((get_value('autovacuum_vacuum_threshold', c.reloptions, c.relkind) + get_value('autovacuum_vacuum_scale_factor', c.reloptions, c.relkind ) * c.reltuples)::numeric,2) max_dead_tup, st.last_autovacuum, count(*) FILTER (WHERE NOT all_visible) AS tobevacuumed_blocks, count(*) AS total_blocks FROM pg_stat_all_tables st, pg_class c, LATERAL pg_visibility_map(st.relid) WHERE c.oid = st.relid AND c.relkind IN ('r','m','t') AND st.n_dead_tup>0 GROUP BY 1,2,3,4" 158 | 159 | #define NEEDANALYZE_TITLE "Tables needing autoANALYZEs" 160 | #define NEEDANALYZE_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_mod_since_analyze mod_tup, get_value('autovacuum_analyze_threshold', c.reloptions, c.relkind) + get_value('autovacuum_analyze_scale_factor', c.reloptions, c.relkind) * c.reltuples max_mod_tup, st.last_autoanalyze FROM pg_stat_all_tables st, pg_class c WHERE c.oid = st.relid AND c.relkind IN ('r','m') AND st.n_mod_since_analyze>0" 161 | 162 | #define CREATE_GETVALUE_FUNCTION_SQL "CREATE FUNCTION get_value(param text, reloptions text[], relkind \"char\") RETURNS float AS $$ SELECT coalesce((SELECT option_value FROM pg_options_to_table(reloptions) WHERE option_name = CASE WHEN relkind = 't' THEN 'toast.' ELSE '' END || param), current_setting(param))::float; $$ LANGUAGE sql" 163 | 164 | #define CREATE_BLOATTABLE_VIEW_SQL "CREATE TEMPORARY VIEW bloat_table AS SELECT schemaname, tblname, bs*tblpages AS real_size, (tblpages-est_tblpages)*bs AS extra_size, CASE WHEN tblpages - est_tblpages > 0 THEN 100 * (tblpages - est_tblpages)/tblpages::float ELSE 0 END AS extra_ratio, fillfactor, CASE WHEN tblpages - est_tblpages_ff > 0 THEN (tblpages-est_tblpages_ff)*bs ELSE 0 END AS bloat_size, CASE WHEN tblpages - est_tblpages_ff > 0 THEN 100 * (tblpages - est_tblpages_ff)/tblpages::float ELSE 0 END AS bloat_ratio, is_na FROM ( SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages, ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff, tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na FROM ( SELECT ( 4 + tpl_hdr_size + tpl_data_size + (2*ma) - CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END - CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END) AS tpl_size, bs - page_hdr AS size_per_block, (heappages + toastpages) AS tblpages, heappages, toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na FROM ( SELECT tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS tblname, tbl.reltuples, tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages, coalesce(toast.reltuples, 0) AS toasttuples, coalesce(substring( array_to_string(tbl.reloptions, ' ') FROM 'fillfactor=([0-9]+)')::smallint, 100) AS fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma, 24 AS page_hdr, 23 + CASE WHEN MAX(coalesce(s.null_frac,0)) > 0 THEN ( 7 + count(s.attname) ) / 8 ELSE 0::int END + CASE WHEN bool_or(att.attname = 'oid' and att.attnum < 0) THEN 4 ELSE 0 END AS tpl_hdr_size, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0) ) AS tpl_data_size, bool_or(att.atttypid = 'pg_catalog.name'::regtype) OR sum(CASE WHEN att.attnum > 0 THEN 1 ELSE 0 END) <> count(s.attname) AS is_na FROM pg_attribute AS att JOIN pg_class AS tbl ON att.attrelid = tbl.oid JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace LEFT JOIN pg_stats AS s ON s.schemaname=ns.nspname AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid WHERE NOT att.attisdropped AND tbl.relkind in ('r','m') GROUP BY 1,2,3,4,5,6,7,8,9,10 ORDER BY 2,3) AS s) AS s2) AS s3" 165 | 166 | #define CREATE_BLOATINDEX_VIEW_SQL_1 "CREATE TEMPORARY VIEW bloat_index AS SELECT nspname AS schemaname, tblname, idxname, bs*(relpages)::bigint AS real_size, bs*(relpages-est_pages)::bigint AS extra_size, 100 * (relpages-est_pages)::float / relpages AS extra_ratio, fillfactor, CASE WHEN relpages > est_pages_ff THEN bs*(relpages-est_pages_ff) ELSE 0 END AS bloat_size, 100 * (relpages-est_pages_ff)::float / relpages AS bloat_ratio, is_na FROM ( SELECT coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)/(4+nulldatahdrwidth)::float)), 0) AS est_pages, coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0) AS est_pages_ff, bs, nspname, tblname, idxname, relpages, fillfactor, is_na FROM ( SELECT maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, ( index_tuple_hdr_bm + maxalign - CASE WHEN index_tuple_hdr_bm%maxalign = 0 THEN maxalign ELSE index_tuple_hdr_bm%maxalign END + nulldatawidth + maxalign - CASE WHEN nulldatawidth = 0 THEN 0 WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign ELSE nulldatawidth::integer%maxalign END)::numeric AS nulldatahdrwidth, pagehdr, pageopqdata, is_na FROM ( SELECT n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.idxoid, i.fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version() ~ 'mingw32' OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS maxalign, 24 AS pagehdr, 16 AS pageopqdata, CASE WHEN max(coalesce(s.null_frac,0)) = 0 THEN 2 ELSE 2 + (( 32 + 8 - 1 ) / 8) END AS index_tuple_hdr_bm, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) AS nulldatawidth, max( CASE WHEN i.atttypid = 'pg_catalog.name'::regtype THEN 1 ELSE 0 END ) > 0 AS is_na FROM ( SELECT ct.relname AS tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey, ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor, coalesce(a1.attnum, a2.attnum) AS attnum, coalesce(a1.attname, a2.attname) AS attname, coalesce(a1.atttypid, a2.atttypid) AS atttypid, CASE WHEN a1.attnum IS NULL THEN ic.idxname ELSE ct.relname END AS attrelname FROM ( SELECT idxname, reltuples, relpages, tbloid, idxoid, fillfactor, indkey, pg_catalog.generate_series(1,indnatts) AS attpos " 167 | #define CREATE_BLOATINDEX_VIEW_SQL_2 "FROM ( SELECT ci.relname AS idxname, ci.reltuples, ci.relpages, i.indrelid AS tbloid, i.indexrelid AS idxoid, coalesce(substring( array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) AS fillfactor, i.indnatts, pg_catalog.string_to_array(pg_catalog.textin( pg_catalog.int2vectorout(i.indkey)),' ')::int[] AS indkey FROM pg_catalog.pg_index i JOIN pg_catalog.pg_class ci ON ci.oid = i.indexrelid WHERE ci.relam=(SELECT oid FROM pg_am WHERE amname = 'btree') AND ci.relpages > 0) AS idx_data) AS ic JOIN pg_catalog.pg_class ct ON ct.oid = ic.tbloid LEFT JOIN pg_catalog.pg_attribute a1 ON ic.indkey[ic.attpos] <> 0 AND a1.attrelid = ic.tbloid AND a1.attnum = ic.indkey[ic.attpos] LEFT JOIN pg_catalog.pg_attribute a2 ON ic.indkey[ic.attpos] = 0 AND a2.attrelid = ic.idxoid AND a2.attnum = ic.attpos) i JOIN pg_catalog.pg_namespace n ON n.oid = i.relnamespace JOIN pg_catalog.pg_stats s ON s.schemaname = n.nspname AND s.tablename = i.attrelname AND s.attname = i.attname GROUP BY 1,2,3,4,5,6,7,8,9,10,11) AS rows_data_stats) AS rows_hdr_pdg_stats) AS relation_stats" 168 | 169 | #define CREATE_ORPHANEDFILES_VIEW_SQL1 "CREATE TEMPORARY VIEW orphaned_files AS WITH ver AS ( select current_setting('server_version_num') pgversion, v::integer/10000||'.'||mod(v::integer,10000)/100 AS version FROM current_setting('server_version_num') v), tbl_paths AS ( SELECT tbs.oid AS tbs_oid, spcname, 'pg_tblspc/' || tbs.oid || '/' || (SELECT dir FROM pg_ls_dir('pg_tblspc/'||tbs.oid||'/',true,false) dir WHERE dir LIKE E'PG\\_'||ver.version||E'\\_%' ) as tbl_path FROM pg_tablespace tbs, ver WHERE tbs.spcname NOT IN ('pg_default','pg_global')), files AS ( SELECT d.oid AS database_oid, 0 AS tbs_oid, 'base/'||d.oid AS path, file_name AS file_name, substring(file_name from E'[0-9]+' ) AS base_name FROM pg_database d, pg_ls_dir('base/' || d.oid,true,false) AS file_name WHERE d.datname = current_database() UNION ALL SELECT d.oid, tbp.tbs_oid, tbl_path||'/'||d.oid, file_name, (substring(file_name from E'[0-9]+' )) AS base_name FROM pg_database d, tbl_paths tbp, pg_ls_dir(tbp.tbl_path||'/'|| d.oid,true,false) AS file_name WHERE d.datname = current_database()), orphans AS ( SELECT tbs_oid, base_name, file_name, current_setting('data_directory')||'/'||path||'/'||file_name as orphaned_file, pg_filenode_relation (tbs_oid,base_name::oid) as rel_without_pgclass FROM ver, files LEFT JOIN pg_class c ON (c.relfilenode::text=files.base_name OR (c.oid::text = files.base_name and c.relfilenode=0 and c.relname like 'pg_%')) WHERE c.oid IS null AND lower(file_name) NOT LIKE 'pg_%') SELECT orphaned_file, pg_size_pretty((pg_stat_file(orphaned_file)).size) as file_size, (pg_stat_file(orphaned_file)).modification as modification_date, current_database() FROM orphans WHERE rel_without_pgclass IS NULL" 170 | #define CREATE_ORPHANEDFILES_VIEW_SQL2 "CREATE TEMPORARY VIEW orphaned_files AS WITH ver AS ( select current_setting('server_version_num') pgversion, v::integer/10000 AS version FROM current_setting('server_version_num') v), tbl_paths AS ( SELECT tbs.oid AS tbs_oid, spcname, 'pg_tblspc/' || tbs.oid || '/' || (SELECT dir FROM pg_ls_dir('pg_tblspc/'||tbs.oid||'/',true,false) dir WHERE dir LIKE E'PG\\_'||ver.version||E'\\_%' ) as tbl_path FROM pg_tablespace tbs, ver WHERE tbs.spcname NOT IN ('pg_default','pg_global')), files AS ( SELECT d.oid AS database_oid, 0 AS tbs_oid, 'base/'||d.oid AS path, file_name AS file_name, substring(file_name from E'[0-9]+' ) AS base_name FROM pg_database d, pg_ls_dir('base/' || d.oid,true,false) AS file_name WHERE d.datname = current_database() UNION ALL SELECT d.oid, tbp.tbs_oid, tbl_path||'/'||d.oid, file_name, (substring(file_name from E'[0-9]+' )) AS base_name FROM pg_database d, tbl_paths tbp, pg_ls_dir(tbp.tbl_path||'/'|| d.oid,true,false) AS file_name WHERE d.datname = current_database()), orphans AS ( SELECT tbs_oid, base_name, file_name, current_setting('data_directory')||'/'||path||'/'||file_name as orphaned_file, pg_filenode_relation (tbs_oid,base_name::oid) as rel_without_pgclass FROM ver, files LEFT JOIN pg_class c ON (c.relfilenode::text=files.base_name OR (c.oid::text = files.base_name and c.relfilenode=0 and c.relname like 'pg_%')) WHERE c.oid IS null AND lower(file_name) NOT LIKE 'pg_%') SELECT orphaned_file, pg_size_pretty((pg_stat_file(orphaned_file)).size) as file_size, (pg_stat_file(orphaned_file)).modification as modification_date, current_database() FROM orphans WHERE rel_without_pgclass IS NULL" 171 | 172 | #define BLOATOVERVIEW_TITLE "Bloat Overview" 173 | #define BLOATOVERVIEW_SQL "SELECT 'Tables'' bloat' AS label, pg_size_pretty(sum(bloat_size)::numeric) AS bloat_size FROM bloat_table UNION SELECT 'Indexes'' bloat', pg_size_pretty(sum(bloat_size)::numeric) FROM bloat_index" 174 | #define TOP20BLOAT_TABLES_TITLE "Top 20 most fragmented tables (over 1MB)" 175 | #define TOP20BLOAT_TABLES_SQL "SELECT * FROM bloat_table WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" 176 | #define TOP20BLOAT_INDEXES_TITLE "Top 20 most fragmented indexes (over 1MB)" 177 | #define TOP20BLOAT_INDEXES_SQL "SELECT * FROM bloat_index WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" 178 | 179 | #define ORPHANEDFILES_TITLE "Orphaned files" 180 | #define ORPHANEDFILES_SQL "SELECT * FROM orphaned_files ORDER BY file_size DESC" 181 | 182 | #define CREATE_SCHEMA "CREATE SCHEMA pgreport" 183 | #define SET_SEARCHPATH "SET search_path TO pgreport" 184 | #define DROP_ALL "DROP FUNCTION get_value(text, text[], \"char\");DROP EXTENSION pg_buffercache;DROP EXTENSION pg_visibility;DROP SCHEMA pgreport" 185 | 186 | -------------------------------------------------------------------------------- /pgwaitevent.c: -------------------------------------------------------------------------------- 1 | /* 2 | * pgwaitevent, a PostgreSQL app to gather statistical informations 3 | * on wait events of PostgreSQL PID backend. 4 | * 5 | * This software is released under the PostgreSQL Licence. 6 | * 7 | * Guillaume Lelarge, guillaume@lelarge.info, 2019-2025. 8 | * 9 | * pgstats/pgwaitevent.c 10 | */ 11 | 12 | 13 | /* 14 | * PostgreSQL headers 15 | */ 16 | #include "postgres_fe.h" 17 | #include "common/logging.h" 18 | #include "fe_utils/connect_utils.h" 19 | #include "libpq/pqsignal.h" 20 | 21 | /* 22 | * Defines 23 | */ 24 | #define PGWAITEVENT_VERSION "1.4.0" 25 | #define PGWAITEVENT_DEFAULT_LINES 20 26 | #define PGWAITEVENT_DEFAULT_STRING_SIZE 2048 27 | 28 | 29 | /* 30 | * Structs 31 | */ 32 | 33 | /* these are the options structure for command line parameters */ 34 | struct options 35 | { 36 | /* misc */ 37 | bool verbose; 38 | 39 | /* connection parameters */ 40 | char *dbname; 41 | char *hostname; 42 | char *port; 43 | char *username; 44 | 45 | /* version number */ 46 | int major; 47 | int minor; 48 | 49 | /* pid */ 50 | int pid; 51 | 52 | /* include leader and workers PIDs */ 53 | bool includeleaderworkers; 54 | 55 | /* frequency */ 56 | float interval; 57 | 58 | /* query and trace timestamps */ 59 | char *query_start; 60 | char *trace_start; 61 | }; 62 | 63 | 64 | /* 65 | * Global variables 66 | */ 67 | static PGconn *conn; 68 | static struct options *opts; 69 | extern char *optarg; 70 | 71 | 72 | /* 73 | * Function prototypes 74 | */ 75 | static void help(const char *progname); 76 | void get_opts(int, char **); 77 | #ifndef FE_MEMUTILS_H 78 | void *pg_malloc(size_t size); 79 | char *pg_strdup(const char *in); 80 | #endif 81 | void fetch_version(void); 82 | bool backend_minimum_version(int major, int minor); 83 | void build_env(void); 84 | bool active_session(void); 85 | void handle_current_query(void); 86 | void drop_env(void); 87 | static void quit_properly(SIGNAL_ARGS); 88 | 89 | 90 | /* 91 | * Print help message 92 | */ 93 | static void 94 | help(const char *progname) 95 | { 96 | printf("%s gathers every wait events from a specific PID, grouping them by queries.\n\n" 97 | "Usage:\n" 98 | " %s [OPTIONS] PID\n" 99 | "\nGeneral options:\n" 100 | " -g include leader and workers (parallel queries) [v13+]\n" 101 | " -i interval (default is 1s)\n" 102 | " -v verbose\n" 103 | " -?|--help show this help, then exit\n" 104 | " -V|--version output version information, then exit\n" 105 | "\nConnection options:\n" 106 | " -h HOSTNAME database server host or socket directory\n" 107 | " -p PORT database server port number\n" 108 | " -U USER connect as specified database user\n" 109 | " -d DBNAME database to connect to\n\n" 110 | "Report bugs to .\n", 111 | progname, progname); 112 | } 113 | 114 | 115 | /* 116 | * Parse command line options and check for some usage errors 117 | */ 118 | void 119 | get_opts(int argc, char **argv) 120 | { 121 | int c; 122 | const char *progname; 123 | 124 | progname = get_progname(argv[0]); 125 | 126 | /* set the defaults */ 127 | opts->verbose = false; 128 | opts->dbname = NULL; 129 | opts->hostname = NULL; 130 | opts->port = NULL; 131 | opts->username = NULL; 132 | opts->pid = 0; 133 | opts->includeleaderworkers = false; 134 | opts->interval = 1; 135 | 136 | /* we should deal quickly with help and version */ 137 | if (argc > 1) 138 | { 139 | if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) 140 | { 141 | help(progname); 142 | exit(0); 143 | } 144 | if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) 145 | { 146 | puts("pgwaitevent " PGWAITEVENT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); 147 | exit(0); 148 | } 149 | } 150 | 151 | /* get options */ 152 | while ((c = getopt(argc, argv, "h:p:U:d:i:gv")) != -1) 153 | { 154 | switch (c) 155 | { 156 | /* specify the database */ 157 | case 'd': 158 | opts->dbname = pg_strdup(optarg); 159 | break; 160 | 161 | /* host to connect to */ 162 | case 'h': 163 | opts->hostname = pg_strdup(optarg); 164 | break; 165 | 166 | /* parallel queries */ 167 | case 'g': 168 | opts->includeleaderworkers = true; 169 | break; 170 | 171 | /* interval */ 172 | case 'i': 173 | opts->interval = atof(optarg); 174 | break; 175 | 176 | /* port to connect to on remote host */ 177 | case 'p': 178 | opts->port = pg_strdup(optarg); 179 | break; 180 | 181 | /* username */ 182 | case 'U': 183 | opts->username = pg_strdup(optarg); 184 | break; 185 | 186 | /* get verbose */ 187 | case 'v': 188 | opts->verbose = true; 189 | break; 190 | 191 | default: 192 | pg_log_error("Try \"%s --help\" for more information.\n", progname); 193 | exit(EXIT_FAILURE); 194 | } 195 | } 196 | 197 | /* get PID to monitor */ 198 | if (optind < argc) 199 | { 200 | opts->pid = atoi(argv[optind]); 201 | } 202 | else 203 | { 204 | pg_log_error("PID required.\n"); 205 | pg_log_info("Try \"%s --help\" for more information.\n", progname); 206 | exit(EXIT_FAILURE); 207 | } 208 | 209 | /* set dbname if unset */ 210 | if (opts->dbname == NULL) 211 | { 212 | /* 213 | * We want to use dbname for possible error reports later, 214 | * and in case someone has set and is using PGDATABASE 215 | * in its environment, preserve that name for later usage 216 | */ 217 | if (!getenv("PGDATABASE")) 218 | opts->dbname = "postgres"; 219 | else 220 | opts->dbname = getenv("PGDATABASE"); 221 | } 222 | } 223 | 224 | 225 | #ifndef FE_MEMUTILS_H 226 | /* 227 | * "Safe" wrapper around malloc(). 228 | */ 229 | void * 230 | pg_malloc(size_t size) 231 | { 232 | void *tmp; 233 | 234 | /* Avoid unportable behavior of malloc(0) */ 235 | if (size == 0) 236 | size = 1; 237 | tmp = malloc(size); 238 | if (!tmp) 239 | { 240 | pg_log_error("out of memory (pg_malloc)\n"); 241 | exit(EXIT_FAILURE); 242 | } 243 | return tmp; 244 | } 245 | 246 | 247 | /* 248 | * "Safe" wrapper around strdup(). 249 | */ 250 | char * 251 | pg_strdup(const char *in) 252 | { 253 | char *tmp; 254 | 255 | if (!in) 256 | { 257 | pg_log_error("cannot duplicate null pointer (internal error)\n"); 258 | exit(EXIT_FAILURE); 259 | } 260 | tmp = strdup(in); 261 | if (!tmp) 262 | { 263 | pg_log_error("out of memory (pg_strdup)\n"); 264 | exit(EXIT_FAILURE); 265 | } 266 | return tmp; 267 | } 268 | #endif 269 | 270 | 271 | /* 272 | * Fetch PostgreSQL major and minor numbers 273 | */ 274 | void 275 | fetch_version() 276 | { 277 | char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; 278 | PGresult *res; 279 | 280 | /* get the cluster version */ 281 | snprintf(sql, sizeof(sql), "SELECT version()"); 282 | 283 | /* make the call */ 284 | res = PQexec(conn, sql); 285 | 286 | /* check and deal with errors */ 287 | if (!res || PQresultStatus(res) > 2) 288 | { 289 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 290 | pg_log_info("query was: %s", sql); 291 | PQclear(res); 292 | PQfinish(conn); 293 | exit(EXIT_FAILURE); 294 | } 295 | 296 | /* get the only row, and parse it to get major and minor numbers */ 297 | sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); 298 | 299 | /* print version */ 300 | if (opts->verbose) 301 | printf("Detected release: %d.%d\n", opts->major, opts->minor); 302 | 303 | /* cleanup */ 304 | PQclear(res); 305 | } 306 | 307 | 308 | /* 309 | * Compare given major and minor numbers to the one of the connected server 310 | */ 311 | bool 312 | backend_minimum_version(int major, int minor) 313 | { 314 | return opts->major > major || (opts->major == major && opts->minor >= minor); 315 | } 316 | 317 | 318 | /* 319 | * Close the PostgreSQL connection, and quit 320 | */ 321 | static void 322 | quit_properly(SIGNAL_ARGS) 323 | { 324 | drop_env(); 325 | PQfinish(conn); 326 | exit(EXIT_FAILURE); 327 | } 328 | 329 | 330 | /* 331 | * Create function 332 | */ 333 | void 334 | build_env() 335 | { 336 | char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; 337 | PGresult *res; 338 | 339 | /* build the DDL query */ 340 | snprintf(sql, sizeof(sql), 341 | "CREATE TEMPORARY TABLE waitevents (we text, wet text, o integer);\n" 342 | "ALTER TABLE waitevents ADD UNIQUE(we, wet);\n"); 343 | 344 | /* make the call */ 345 | res = PQexec(conn, sql); 346 | 347 | /* check and deal with errors */ 348 | if (!res || PQresultStatus(res) > 2) 349 | { 350 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 351 | pg_log_info("query was: %s", sql); 352 | PQclear(res); 353 | PQfinish(conn); 354 | exit(EXIT_FAILURE); 355 | } 356 | 357 | /* print verbose */ 358 | if (opts->verbose) 359 | printf("Temporary table created\n"); 360 | 361 | /* cleanup */ 362 | PQclear(res); 363 | 364 | /* build the DDL query */ 365 | snprintf(sql, sizeof(sql), "CREATE SCHEMA pgwaitevent;"); 366 | 367 | /* make the call */ 368 | res = PQexec(conn, sql); 369 | 370 | /* check and deal with errors */ 371 | if (!res || PQresultStatus(res) > 2) 372 | { 373 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 374 | pg_log_info("query was: %s", sql); 375 | PQclear(res); 376 | PQfinish(conn); 377 | exit(EXIT_FAILURE); 378 | } 379 | 380 | /* print verbose */ 381 | if (opts->verbose) 382 | printf("Schema created\n"); 383 | 384 | /* build the DDL query */ 385 | snprintf(sql, sizeof(sql), 386 | "CREATE OR REPLACE FUNCTION pgwaitevent.trace_wait_events_for_pid(p integer, leader boolean, s numeric default 1)\n" 387 | "RETURNS TABLE (wait_event text, wait_event_type text, occurences integer, percent numeric(5,2))\n" 388 | "LANGUAGE plpgsql\n" 389 | "AS $$\n" 390 | "DECLARE\n" 391 | " q text;\n" 392 | " r record;\n" 393 | "BEGIN\n" 394 | " -- check it is a backend\n" 395 | " SELECT query INTO q FROM pg_stat_activity\n" 396 | " WHERE pid=p AND backend_type='client backend' AND state='active';\n" 397 | "\n" 398 | " IF NOT FOUND THEN\n" 399 | " RAISE EXCEPTION 'PID %% doesn''t appear to be an active backend', p\n" 400 | " USING HINT = 'Check the PID and its state';\n" 401 | " END IF;\n" 402 | "\n" 403 | " -- logging\n" 404 | " RAISE LOG 'Tracing PID %%, sampling at %%s', p, s;\n" 405 | " RAISE LOG 'Query is <%%>', q;\n" 406 | "\n" 407 | " -- drop if exists, then create temp table\n" 408 | " TRUNCATE waitevents;\n" 409 | "\n" 410 | " -- loop till the end of the query\n" 411 | " LOOP\n" 412 | " -- get wait event\n" 413 | " IF leader THEN\n" 414 | " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" 415 | " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" 416 | " INTO r\n" 417 | " FROM pg_stat_activity psa\n" 418 | " WHERE pid=p OR leader_pid=p;\n" 419 | " ELSE\n" 420 | " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" 421 | " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" 422 | " INTO r\n" 423 | " FROM pg_stat_activity psa\n" 424 | " WHERE pid=p;\n" 425 | " END IF;\n" 426 | "\n" 427 | " -- loop control\n" 428 | " EXIT WHEN r.wait_event = 'ClientRead';\n" 429 | "\n" 430 | " -- update wait events stats\n" 431 | " INSERT INTO waitevents VALUES (r.wait_event, r.wait_event_type, 1)\n" 432 | " ON CONFLICT (we,wet) DO UPDATE SET o = waitevents.o+1;\n" 433 | "\n" 434 | " -- sleep a bit\n" 435 | " PERFORM pg_sleep(s);\n" 436 | " END LOOP;\n" 437 | "\n" 438 | " -- return stats\n" 439 | " RETURN QUERY\n" 440 | " SELECT we, wet, o, (o*100./sum(o) over ())::numeric(5,2)\n" 441 | " FROM waitevents\n" 442 | " ORDER BY o DESC;\n" 443 | "END\n" 444 | "$$;\n"); 445 | 446 | /* make the call */ 447 | res = PQexec(conn, sql); 448 | 449 | /* check and deal with errors */ 450 | if (!res || PQresultStatus(res) > 2) 451 | { 452 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 453 | pg_log_info("query was: %s", sql); 454 | PQclear(res); 455 | PQfinish(conn); 456 | exit(EXIT_FAILURE); 457 | } 458 | 459 | /* print verbose */ 460 | if (opts->verbose) 461 | printf("Function created\n"); 462 | 463 | /* cleanup */ 464 | PQclear(res); 465 | } 466 | 467 | 468 | /* 469 | * Is PID an active session? 470 | */ 471 | bool 472 | active_session() 473 | { 474 | char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; 475 | PGresult *res; 476 | bool active = false; 477 | 478 | /* build the query */ 479 | snprintf(sql, sizeof(sql), 480 | "SELECT state, query, query_start, now() FROM pg_stat_activity\n" 481 | "WHERE backend_type='client backend'\n" 482 | "AND pid=%d", 483 | opts->pid); 484 | 485 | /* make the call */ 486 | res = PQexec(conn, sql); 487 | 488 | /* check and deal with errors */ 489 | if (!res || PQresultStatus(res) > 2) 490 | { 491 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 492 | pg_log_info("query was: %s", sql); 493 | PQclear(res); 494 | PQfinish(conn); 495 | exit(EXIT_FAILURE); 496 | } 497 | 498 | /* if zero row, then PID is gone */ 499 | if (PQntuples(res) == 0) 500 | { 501 | printf("\nNo more session with PID %d, exiting...\n", opts->pid); 502 | drop_env(); 503 | PQfinish(conn); 504 | exit(2); 505 | } 506 | 507 | /* if one row, we found the good one */ 508 | if (PQntuples(res) == 1) 509 | { 510 | if (!strncmp(PQgetvalue(res, 0, 0), "active", 6)) 511 | { 512 | active = true; 513 | printf("\nNew query: %s\n", PQgetvalue(res, 0, 1)); 514 | opts->query_start = pg_strdup(PQgetvalue(res, 0, 2)); 515 | opts->trace_start = pg_strdup(PQgetvalue(res, 0, 3)); 516 | } 517 | } 518 | 519 | /* this also means that in case of multiple rows, we treat it as no rows */ 520 | 521 | /* cleanup */ 522 | PQclear(res); 523 | 524 | return active; 525 | } 526 | 527 | 528 | /* 529 | * Handle query 530 | */ 531 | void 532 | handle_current_query() 533 | { 534 | char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; 535 | PGresult *workers_res; 536 | PGresult *trace_res; 537 | PGresult *duration_res; 538 | int nrows; 539 | int row; 540 | int nworkers = 0; 541 | 542 | if (opts->includeleaderworkers) 543 | { 544 | /* build the workers query if the user asked to include leader and workers */ 545 | snprintf(sql, sizeof(sql), "SELECT count(*) FROM pg_stat_activity " 546 | "WHERE pid=%d OR leader_pid=%d", 547 | opts->pid, opts->pid); 548 | 549 | /* execute it */ 550 | workers_res = PQexec(conn, sql); 551 | 552 | /* check and deal with errors */ 553 | if (!workers_res || PQresultStatus(workers_res) > 2) 554 | { 555 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 556 | pg_log_info("query was: %s", sql); 557 | PQclear(workers_res); 558 | PQfinish(conn); 559 | exit(EXIT_FAILURE); 560 | } 561 | 562 | /* get the number of leader and workers */ 563 | nworkers = atoi(PQgetvalue(workers_res, 0, 0)); 564 | 565 | /* clean up */ 566 | PQclear(workers_res); 567 | } 568 | 569 | /* build the trace query */ 570 | snprintf(sql, sizeof(sql), "SELECT * FROM pgwaitevent.trace_wait_events_for_pid(%d, %s, %f);", 571 | opts->pid, opts->includeleaderworkers ? "'t'" : "'f'", opts->interval); 572 | 573 | /* execute it */ 574 | trace_res = PQexec(conn, sql); 575 | 576 | /* check and deal with errors */ 577 | if (!trace_res || PQresultStatus(trace_res) > 2) 578 | { 579 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 580 | pg_log_info("query was: %s", sql); 581 | PQclear(trace_res); 582 | PQfinish(conn); 583 | exit(EXIT_FAILURE); 584 | } 585 | 586 | /* build the duration query */ 587 | snprintf(sql, sizeof(sql), "SELECT now()-'%s'::timestamptz, now()-'%s'::timestamptz;", 588 | opts->query_start, opts->trace_start); 589 | 590 | /* execute it */ 591 | duration_res = PQexec(conn, sql); 592 | 593 | /* check and deal with errors */ 594 | if (!duration_res || PQresultStatus(duration_res) > 2) 595 | { 596 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 597 | pg_log_info("query was: %s", sql); 598 | PQclear(duration_res); 599 | PQfinish(conn); 600 | exit(EXIT_FAILURE); 601 | } 602 | 603 | /* show durations */ 604 | (void)printf("Query duration: %s\n", PQgetvalue(duration_res, 0, 0)); 605 | (void)printf("Trace duration: %s\n", PQgetvalue(duration_res, 0, 1)); 606 | 607 | /* show number of workers */ 608 | if (opts->includeleaderworkers) 609 | { 610 | (void)printf("Number of processes: %d\n", nworkers); 611 | } 612 | 613 | /* get the number of rows */ 614 | nrows = PQntuples(trace_res); 615 | 616 | /* print headers */ 617 | (void)printf( 618 | "┌───────────────────────────────────┬───────────┬────────────┬─────────┐\n" 619 | "│ Wait event │ WE type │ Occurences │ Percent │\n" 620 | "├───────────────────────────────────┼───────────┼────────────┼─────────┤\n"); 621 | 622 | /* for each row, print all columns in a row */ 623 | for (row = 0; row < nrows; row++) 624 | { 625 | (void)printf("│ %-33s │ %-9s │ %10ld │ %6.2f │\n", 626 | PQgetvalue(trace_res, row, 0), 627 | PQgetvalue(trace_res, row, 1), 628 | atol(PQgetvalue(trace_res, row, 2)), 629 | atof(PQgetvalue(trace_res, row, 3)) 630 | ); 631 | } 632 | 633 | /* print footers */ 634 | (void)printf( 635 | "└───────────────────────────────────┴───────────┴────────────┴─────────┘\n"); 636 | 637 | /* cleanup */ 638 | PQclear(duration_res); 639 | PQclear(trace_res); 640 | } 641 | 642 | 643 | /* 644 | * Drop env 645 | */ 646 | void 647 | drop_env() 648 | { 649 | char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; 650 | PGresult *res; 651 | 652 | /* no need to drop the temp table */ 653 | 654 | /* drop function */ 655 | snprintf(sql, sizeof(sql), 656 | "DROP FUNCTION pgwaitevent.trace_wait_events_for_pid(integer, boolean, numeric)"); 657 | 658 | /* make the call */ 659 | res = PQexec(conn, sql); 660 | 661 | /* check and deal with errors */ 662 | if (!res || PQresultStatus(res) > 2) 663 | { 664 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 665 | pg_log_info("query was: %s", sql); 666 | PQclear(res); 667 | PQfinish(conn); 668 | exit(EXIT_FAILURE); 669 | } 670 | 671 | /* print verbose */ 672 | if (opts->verbose) 673 | printf("Function dropped\n"); 674 | 675 | /* drop function */ 676 | snprintf(sql, sizeof(sql), 677 | "DROP SCHEMA pgwaitevent"); 678 | 679 | /* make the call */ 680 | res = PQexec(conn, sql); 681 | 682 | /* check and deal with errors */ 683 | if (!res || PQresultStatus(res) > 2) 684 | { 685 | pg_log_error("query failed: %s", PQerrorMessage(conn)); 686 | pg_log_info("query was: %s", sql); 687 | PQclear(res); 688 | PQfinish(conn); 689 | exit(EXIT_FAILURE); 690 | } 691 | 692 | /* print verbose */ 693 | if (opts->verbose) 694 | printf("Schema dropped\n"); 695 | 696 | /* cleanup */ 697 | PQclear(res); 698 | } 699 | 700 | 701 | /* 702 | * Main function 703 | */ 704 | int 705 | main(int argc, char **argv) 706 | { 707 | const char *progname; 708 | ConnParams cparams; 709 | 710 | /* 711 | * If the user stops the program, 712 | * quit nicely. 713 | */ 714 | pqsignal(SIGINT, quit_properly); 715 | 716 | /* Initialize the logging interface */ 717 | pg_logging_init(argv[0]); 718 | 719 | /* Get the program name */ 720 | progname = get_progname(argv[0]); 721 | 722 | /* Allocate the options struct */ 723 | opts = (struct options *) pg_malloc(sizeof(struct options)); 724 | 725 | /* Parse the options */ 726 | get_opts(argc, argv); 727 | 728 | /* Set the connection struct */ 729 | cparams.pghost = opts->hostname; 730 | cparams.pgport = opts->port; 731 | cparams.pguser = opts->username; 732 | cparams.dbname = opts->dbname; 733 | cparams.prompt_password = TRI_DEFAULT; 734 | cparams.override_dbname = NULL; 735 | 736 | /* Connect to the database */ 737 | conn = connectDatabase(&cparams, progname, false, false, false); 738 | 739 | /* Fetch version */ 740 | fetch_version(); 741 | 742 | /* Check options */ 743 | if (opts->includeleaderworkers && !backend_minimum_version(13, 0)) 744 | { 745 | pg_log_error("You need at least v13 to include workers' wait events."); 746 | exit(EXIT_FAILURE); 747 | } 748 | 749 | /* Create the trace_wait_events_for_pid function */ 750 | build_env(); 751 | 752 | /* show what we're doing */ 753 | printf("Tracing wait events for PID %d, sampling at %.3fs, %s\n", 754 | opts->pid, 755 | opts->interval, 756 | opts->includeleaderworkers ? "including leader and workers" : "PID only"); 757 | 758 | while(true) 759 | { 760 | if (active_session()) 761 | { 762 | /* Handle query currently executed */ 763 | handle_current_query(); 764 | } 765 | 766 | /* wait 100ms */ 767 | (void)usleep(100000); 768 | } 769 | 770 | /* Drop the function */ 771 | drop_env(); 772 | PQfinish(conn); 773 | return 0; 774 | } 775 | --------------------------------------------------------------------------------