├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── control.tpl ├── data ├── Makefile ├── filter.sed ├── import_queries.sql ├── report.sql ├── report_static.sql ├── report_struct.sql ├── script.sed └── static │ ├── css │ └── style.css │ └── js │ ├── chart.js │ ├── generate.js │ ├── highlight.js │ ├── main.js │ ├── menu.js │ ├── preview.js │ └── utilities.js ├── doc ├── json_schema.md └── pg_profile.md ├── expected ├── create_extension.out ├── drop_extension.out ├── export_import.out ├── kcache_create_extension.out ├── kcache_drop_extension.out ├── kcache_stat_avail.out ├── retention_and_baselines.out ├── samples_and_reports.out ├── server_management.out └── sizes_collection.out ├── grafana ├── README.md ├── pg_profile_activity.json ├── pg_profile_io.json ├── pg_profile_summary.json ├── pg_profile_visualization.json └── pg_profile_waits.json ├── management ├── baseline.sql ├── export.sql ├── internal.sql ├── local_server.sql └── server.sql ├── migration ├── Makefile ├── func_create.sed ├── func_drop.sql └── migration.sql ├── privileges └── pg_profile.sql ├── report ├── Makefile ├── functions │ ├── clusterstat.sql │ ├── dbstat.sql │ ├── dead_mods_ix_unused.sql │ ├── extensions.sql │ ├── functionstat.sql │ ├── indexstat.sql │ ├── kcachestat.sql │ ├── kcachestat_checks.sql │ ├── pg_wait_sampling.sql │ ├── settings.sql │ ├── stat_io.sql │ ├── stat_slru.sql │ ├── statements_checks.sql │ ├── statementstat.sql │ ├── statementstat_dbagg.sql │ ├── subsample.sql │ ├── tablespacestat.sql │ ├── tablestat.sql │ ├── top_io_stat.sql │ └── walstat.sql ├── report.sql ├── reportdiff.sql └── section.sql ├── report_examples ├── pg17.html └── pg17_diff.html ├── sample ├── compat.sql ├── pg_wait_sampling.sql ├── sample.sql ├── sample_pg_stat_statements.sql └── subsample.sql ├── schema ├── Makefile ├── cluster.sql ├── core.sql ├── db.sql ├── extension_versions.sql ├── funcs.sql ├── import.sql ├── indexes.sql ├── pg_wait_sampling.sql ├── reports.sql ├── roles.sql ├── rusage.sql ├── settings.sql ├── smpl_timing.sql ├── statements.sql ├── subsample.sql ├── tables.sql └── tablespaces.sql └── sql ├── create_extension.sql ├── drop_extension.sql ├── export_import.sql ├── kcache_create_extension.sql ├── kcache_drop_extension.sql ├── kcache_stat_avail.sql ├── retention_and_baselines.sql ├── samples_and_reports.sql ├── server_management.sql └── sizes_collection.sql /LICENSE: -------------------------------------------------------------------------------- 1 | pg_profile - Postgres historic workload reporting tool 2 | 3 | Copyright (c) 2017-2025 Andrei Zubkov, Evgeniy Sharaev, Maksim Logvinenko 4 | 5 | Permission to use, copy, modify, and distribute this software and its 6 | documentation for any purpose, without fee, and without a written agreement 7 | is hereby granted, provided that the above copyright notice and this 8 | paragraph and the following two paragraphs appear in all copies. 9 | 10 | IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR DIRECT, 11 | INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST 12 | PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 13 | THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 14 | 15 | THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 16 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 17 | PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND 18 | THE COPYRIGHT HOLDER HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, 19 | UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PGPROFILE_VERSION = 4.8 2 | EXTENSION = pg_profile 3 | 4 | TAR_pkg = $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz 5 | 6 | default: all 7 | 8 | include migration/Makefile 9 | 10 | DATA_built = $(EXTENSION)--$(PGPROFILE_VERSION).sql $(EXTENSION).control $(MIGRATION) 11 | 12 | EXTRA_CLEAN = $(TAR_pkg) $(MIGRATION) $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql $(schema) \ 13 | $(report) data/report_templates.sql 14 | 15 | REGRESS = \ 16 | create_extension \ 17 | server_management \ 18 | samples_and_reports \ 19 | sizes_collection \ 20 | export_import \ 21 | retention_and_baselines \ 22 | drop_extension 23 | 24 | # pg_stat_kcache tests 25 | ifdef USE_KCACHE 26 | REGRESS += \ 27 | kcache_create_extension \ 28 | server_management \ 29 | samples_and_reports \ 30 | sizes_collection \ 31 | kcache_stat_avail \ 32 | export_import \ 33 | retention_and_baselines \ 34 | kcache_drop_extension 35 | endif 36 | 37 | PG_CONFIG ?= pg_config 38 | 39 | ifdef USE_PGXS 40 | PGXS := $(shell $(PG_CONFIG) --pgxs) 41 | include $(PGXS) 42 | else 43 | subdir = contrib/$(EXTENSION) 44 | top_builddir = ../.. 45 | include $(top_builddir)/src/Makefile.global 46 | include $(top_srcdir)/contrib/contrib-global.mk 47 | endif 48 | 49 | schema = schema/schema.sql 50 | 51 | data = data/import_queries.sql \ 52 | data/report_templates.sql 53 | common = management/internal.sql 54 | adm_funcs = management/baseline.sql \ 55 | management/server.sql \ 56 | management/local_server.sql 57 | export_funcs = \ 58 | management/export.sql 59 | sample = \ 60 | sample/sample_pg_stat_statements.sql \ 61 | sample/pg_wait_sampling.sql \ 62 | sample/sample.sql \ 63 | sample/subsample.sql \ 64 | sample/compat.sql 65 | 66 | report = report/report_build.sql 67 | 68 | grants = \ 69 | privileges/pg_profile.sql 70 | 71 | # Extension script contents 72 | functions = $(common) $(adm_funcs) $(export_funcs) $(sample) $(report) 73 | script = $(schema) $(data) $(functions) $(grants) 74 | 75 | # Manual script contents 76 | functions_man = $(common) $(adm_funcs) $(sample) $(report) 77 | script_man = $(schema) $(functions_man) $(grants) data/report_templates.sql 78 | 79 | # Common sed replacement script 80 | sed_extension = -e 's/{pg_profile}/$(EXTENSION)/; s/{extension_version}/$(PGPROFILE_VERSION)/; /--/,/--/d; /--/d; /--/d' 81 | sed_manual = -e 's/{pg_profile}/$(EXTENSION)/; s/{extension_version}/$(PGPROFILE_VERSION)/; /--/,/--/d; /--/d; /--/d' 82 | 83 | schema/schema.sql: 84 | ${MAKE} -C schema 85 | 86 | data/report_templates.sql: 87 | ${MAKE} -C data 88 | 89 | report/report_build.sql: 90 | ${MAKE} -C report 91 | 92 | sqlfile: $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql 93 | 94 | $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql: $(script) 95 | sed -e 's/SET search_path=@extschema@//' \ 96 | $(sed_manual) \ 97 | $(script_man) \ 98 | -e '1i \\\set ON_ERROR_STOP on' \ 99 | > $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql 100 | 101 | $(EXTENSION).control: control.tpl 102 | sed -e 's/{version}/$(PGPROFILE_VERSION)/' control.tpl > $(EXTENSION).control 103 | 104 | $(EXTENSION)--$(PGPROFILE_VERSION).sql: $(script) 105 | sed \ 106 | -e '1i \\\echo Use "CREATE EXTENSION $(EXTENSION)" to load this file. \\quit' \ 107 | $(sed_extension) \ 108 | $(script) \ 109 | > $(EXTENSION)--$(PGPROFILE_VERSION).sql 110 | 111 | $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz: sqlfile 112 | tar czf $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql 113 | 114 | $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz: $(DATA_built) 115 | tar czf $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz $(DATA_built) 116 | 117 | tarpkg: $(TAR_pkg) 118 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pg_profile 2 | This extension for PostgreSQL helps you to find out most resource intensive activities in your PostgreSQL databases. 3 | ## Concepts 4 | This extension is based on statistics views of PostgreSQL and contrib extensions *pg_stat_statements* and *pg_stat_kcache*. It is written in pure pl/pgsql and doesn't need any external libraries or software, but PostgreSQL database itself and a cron-like tool performing periodic tasks. Initially developed and tested on PostgreSQL 9.6 extension may be incompatible with earlier releases. 5 | 6 | Historic repository will be created in your database by this extension. This repository will hold statistics "samples" for your postgres clusters. Sample is taken by calling _take_sample()_ function. PostgreSQL doesn't have any job-like engine, so you'll need to use *cron*. 7 | 8 | Periodic samples can help you finding most resource intensive activities in the past. Suppose, you were reported performance degradation several hours ago. Resolving such issue, you can build a report between two samples bounding performance issue period to see load profile of your database. It's worth using a monitoring tool such as Zabbix to know exact time when performance issues was happening. 9 | 10 | You can take an explicit samples before running any batch processing, and after it will be done. 11 | 12 | Any time you take a sample, _pg_stat_statements_reset()_ will be called, ensuring you will not loose statements due to reaching *pg_stat_statements.max*. Also, report will contain section, informing you if captured statements count in any sample reaches 90% of _pg_stat_statements.max_. Reset performed by _pg_profile_ extention will affect statistics collected by other monitoring tools from _pg_stat_statements_ view. 13 | 14 | *pg_profile*, installed in one cluster is able to collect statistics from other clusters, called *servers*. You just need to define some servers, providing names and connection strings and make sure connection can be established to all databases of all defined servers. Now you can track statistics on your standbys from master, or from any other server. Once extension is installed, a *local* server is automatically created - this is a *server* for cluster where *pg_profile* resides. 15 | 16 | Report examples: 17 | * [Regular report from Postgres 17 database](https://zubkov-andrei.github.io/pg_profile/report_examples/pg17.html) 18 | * [Differential report from Postgres 17 database](https://zubkov-andrei.github.io/pg_profile/report_examples/pg17_diff.html) 19 | 20 | ## Grafana dashboard ## 21 | There are some grafana dashboards provided in the grafana folder of the project. They have separate [documentation](grafana/README.md). 22 | 23 | ## Prerequisites 24 | 25 | Although *pg_profile* is usually installed in the target cluster, it also can collect performance data from other clusters. Hence, we have prerequisites for *pg_profile* database, and for *servers*. 26 | 27 | ### pg_profile database prerequisites 28 | 29 | _pg_profile_ extension depends on extensions _plpgsql_ and _dblink_. 30 | 31 | ### Servers prerequisites 32 | 33 | The only mandatory requirement for server cluster is the ability to connect from pg_profile database using provided server connection string. All other requirements are optional, but they can improve completeness of gathered statistics. 34 | 35 | Consider setting following Statistics Collector parameters: 36 | 37 | ``` 38 | track_activities = on 39 | track_counts = on 40 | track_io_timing = on 41 | track_wal_io_timing = on # Since Postgres 14 42 | track_functions = all/pl 43 | ``` 44 | 45 | If you need statement statistics in reports, then database, mentioned in server connection string must have _pg_stat_statements_ extension installed and configured. Set *pg_stat_statements* parameters to meet your needs (see PostgreSQL documentation): 46 | 47 | * _pg_stat_statements.max_ - low setting for this parameter may cause some statements statistics to be wiped out before sample is taken. Report will warn you if your _pg_stat_statements.max_ is seems to be undersized. 48 | * _pg_stat_statements.track = 'top'_ - _all_ value will affect accuracy of _%Total_ fields for statements-related sections of report. 49 | 50 | If CPU and filesystem statistics is needed, consider installing *pg_stat_kcache* extension. 51 | 52 | ## Supported versions 53 | ## PostgreSQL 54 | * **17** supported since version 4.7 55 | * **16** supported since version 4.3 56 | * **15** supported since version 4.1 57 | * **14** supported since version 0.3.4 58 | * **13** supported since version 0.1.3 59 | * **12** supported since version 0.1.0 60 | ## _pg_stat_statements_ extension 61 | * **1.11** supported since version 4.7 62 | * **1.10** supported since version 4.1 63 | * **1.9** supported since version 4.0 64 | * **1.8** supported since version 0.1.2 65 | ## _pg_stat_kcache_ extension 66 | * **2.3.0** supported since version 4.7 67 | * **2.2.3** supported since version 4.4 68 | * **2.2.2** supported since version 4.3 69 | * **2.2.1** supported since version 4.0 70 | * **2.2.0** supported since version 0.3.1 71 | * **2.1.3** supported since version 0.2.1 72 | 73 | 74 | ## Installation 75 | 76 | ### Step 1 Installation of extension files 77 | 78 | Extract extension files (see [Releases](https://github.com/zubkov-andrei/pg_profile/releases) page) to PostgreSQL extensions location, which is 79 | 80 | ``` 81 | # tar xzf pg_profile-.tar.gz --directory $(pg_config --sharedir)/extension 82 | ``` 83 | 84 | Just make sure you are using appropriate *pg_config*. 85 | 86 | ### Step 2 Creating extensions 87 | 88 | The most easy way is to install everything in public schema of a database: 89 | 90 | ``` 91 | postgres=# CREATE EXTENSION dblink; 92 | postgres=# CREATE EXTENSION pg_stat_statements; 93 | postgres=# CREATE EXTENSION pg_profile; 94 | ``` 95 | 96 | If you want to install *pg_profile* in other schema, just create it, and install extension in that schema: 97 | 98 | ``` 99 | postgres=# CREATE EXTENSION dblink; 100 | postgres=# CREATE EXTENSION pg_stat_statements; 101 | postgres=# CREATE SCHEMA profile; 102 | postgres=# CREATE EXTENSION pg_profile SCHEMA profile; 103 | ``` 104 | 105 | All objects will be created in schema, defined by SCHEMA clause. Installation in dedicated schema is the recommended way - the extension will create its own tables, views, sequences and functions. It is a good idea to keep them separate. If you do not want to specify schema qualifier when using module, consider changing _search_path_ setting. 106 | 107 | ### Step 3 Update to new version 108 | 109 | New versions of pg_profile will contain all necessary to update from any previous one. So, in case of update you will only need to install extension files (see Step 1) and update the extension, like this: 110 | 111 | ``` 112 | postgres=# ALTER EXTENSION pg_profile UPDATE; 113 | ``` 114 | 115 | All your historic data will remain unchanged if possible. 116 | 117 | ## Building and installing pg_profile 118 | 119 | You will need postgresql development packages to build pg_profile. 120 | 121 | ``` 122 | sudo make USE_PGXS=y install && make USE_PGXS=y installcheck 123 | ``` 124 | 125 | If you only need to get sql-script for manual creation of *pg_profile* objects - it may be useful in case of RDS installation, do 126 | 127 | ``` 128 | make USE_PGXS=y sqlfile 129 | ``` 130 | 131 | Now you can use pg_profile--{version}.sql as sql script to create pg_profile objects. Such installation will lack extension benefits of PostgreSQL, but you can install it without server file system access. 132 | 133 | ------ 134 | 135 | Please, read full documentation in doc/pg_profile.md 136 | -------------------------------------------------------------------------------- /control.tpl: -------------------------------------------------------------------------------- 1 | # Profiler extension for PostgreSQL 2 | comment = 'PostgreSQL load profile repository and report builder' 3 | default_version = '{version}' 4 | relocatable = false 5 | requires = 'dblink,plpgsql' 6 | superuser = false 7 | 8 | -------------------------------------------------------------------------------- /data/Makefile: -------------------------------------------------------------------------------- 1 | data_files = \ 2 | report_static.sql \ 3 | report.sql \ 4 | report_struct.sql 5 | 6 | includes = \ 7 | static/js/utilities.js \ 8 | static/js/chart.js \ 9 | static/js/generate.js \ 10 | static/js/highlight.js \ 11 | static/js/menu.js \ 12 | static/js/preview.js \ 13 | static/js/main.js \ 14 | static/css/style.css 15 | 16 | report_templates.sql: $(data_files) $(includes) 17 | sed -f filter.sed \ 18 | static/js/utilities.js \ 19 | static/js/chart.js \ 20 | static/js/generate.js \ 21 | static/js/highlight.js \ 22 | static/js/menu.js \ 23 | static/js/preview.js \ 24 | static/js/main.js \ 25 | > static/js/script.js && \ 26 | sed -f script.sed $(data_files) \ 27 | > report_templates.sql; \ 28 | rm static/js/script.js 29 | -------------------------------------------------------------------------------- /data/filter.sed: -------------------------------------------------------------------------------- 1 | s/\/\*\*.*//g 2 | s/^[[:space:]]*\*.*//g 3 | /^[[:space:]]*$/d -------------------------------------------------------------------------------- /data/import_queries.sql: -------------------------------------------------------------------------------- 1 | /* ==== Version history table data ==== */ 2 | INSERT INTO import_queries_version_order VALUES 3 | ('pg_profile','0.3.1',NULL,NULL), 4 | ('pg_profile','0.3.2','pg_profile','0.3.1'), 5 | ('pg_profile','0.3.3','pg_profile','0.3.2'), 6 | ('pg_profile','0.3.4','pg_profile','0.3.3'), 7 | ('pg_profile','0.3.5','pg_profile','0.3.4'), 8 | ('pg_profile','0.3.6','pg_profile','0.3.5'), 9 | ('pg_profile','3.8','pg_profile','0.3.6'), 10 | ('pg_profile','3.9','pg_profile','3.8'), 11 | ('pg_profile','4.0','pg_profile','3.9'), 12 | ('pg_profile','4.1','pg_profile','4.0'), 13 | ('pg_profile','4.2','pg_profile','4.1'), 14 | ('pg_profile','4.3','pg_profile','4.2'), 15 | ('pg_profile','4.4','pg_profile','4.3'), 16 | ('pg_profile','4.5','pg_profile','4.4'), 17 | ('pg_profile','4.6','pg_profile','4.5'), 18 | ('pg_profile','4.7','pg_profile','4.6'), 19 | ('pg_profile','4.8','pg_profile','4.7') 20 | ; 21 | -------------------------------------------------------------------------------- /data/report.sql: -------------------------------------------------------------------------------- 1 | /* === report table data === */ 2 | INSERT INTO report(report_id, report_name, report_description, template) 3 | VALUES 4 | (1, 'report', 'Regular single interval report', 'report'), 5 | (2, 'diffreport', 'Differential report on two intervals', 'diffreport') 6 | ; 7 | -------------------------------------------------------------------------------- /data/report_static.sql: -------------------------------------------------------------------------------- 1 | /* === report_static table data === */ 2 | INSERT INTO report_static(static_name, static_text) 3 | VALUES 4 | ('css', $css$ 5 | {style.css} 6 | {static:css_post} 7 | $css$ 8 | ), 9 | ( 10 | 'script_js', $js$ 11 | {script.js} 12 | $js$ 13 | ), 14 | ('report', 15 | '' 16 | '' 17 | '' 18 | '' 19 | 'Postgres profile report ({properties:start1_id} -' 20 | ' {properties:end1_id})' 21 | '
' 22 | '

Postgres profile report

' 23 | '
' 24 | '' 25 | ''), 26 | ('diffreport', 27 | '' 28 | '' 29 | '' 30 | '' 31 | 'Postgres profile differential report (1): ({properties:start1_id} -' 32 | ' {properties:end1_id}) with (2): ({properties:start2_id} -' 33 | ' {properties:end2_id})' 34 | '
' 35 | '

Postgres profile differential report

' 36 | '
' 37 | '' 38 | '') 39 | ; 40 | -------------------------------------------------------------------------------- /data/script.sed: -------------------------------------------------------------------------------- 1 | /{script.js}/{ 2 | r static/js/script.js 3 | d 4 | } 5 | /{style.css}/{ 6 | r static/css/style.css 7 | d 8 | } 9 | -------------------------------------------------------------------------------- /data/static/css/style.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --main-bg-color: #68B8F9; 3 | --main-font-color: black; 4 | --secondary-bg-color: #a4d4fb; 5 | --main-bottom: -20%; 6 | --main-right: 2%; 7 | --main-opacity: 1; 8 | --main-border-radius: 5px; 9 | --main-box-shadow: rgba(0, 0, 0, 0.35) 0 5px 15px; 10 | --main-position: fixed; 11 | --main-width: 500px; 12 | --main-height: 100px; 13 | --main-transition-property: top; 14 | --main-transition-delay: 500ms; 15 | } 16 | 17 | #container { 18 | position: absolute; 19 | top: 40px; 20 | left: 0; 21 | overflow: auto; 22 | margin-left: 10px; 23 | } 24 | 25 | #burger { 26 | display: flex; 27 | width: 10px; 28 | height: 10px; 29 | justify-content: space-around; 30 | } 31 | 32 | #burger div { 33 | background-color: white; 34 | transition: width .2s, height .2s; 35 | } 36 | 37 | #burger.horizontal { 38 | flex-direction: column; 39 | } 40 | 41 | #burger.vertical { 42 | flex-direction: row; 43 | } 44 | 45 | #burger.horizontal div { 46 | width: 10px; 47 | height: 2px; 48 | } 49 | 50 | #burger.vertical div { 51 | width: 2px; 52 | height: 10px; 53 | } 54 | 55 | 56 | html { 57 | scroll-behavior: smooth; 58 | scroll-padding-left: 30%; 59 | margin-top: 40px; 60 | } 61 | 62 | h3 { 63 | scroll-margin-top: 60px; 64 | } 65 | 66 | td { 67 | scroll-margin-top: 50px; 68 | } 69 | 70 | tr { 71 | scroll-margin-top: 50px; 72 | } 73 | 74 | a:hover { 75 | text-decoration: none; 76 | } 77 | 78 | #pageContent { 79 | position: fixed; 80 | background-color: white; 81 | box-shadow: rgba(0, 0, 0, 0.35) 0 5px 15px; 82 | top: 40px; 83 | left: 0; 84 | bottom: 0; 85 | width: 0; 86 | overflow: auto; 87 | display: flex; 88 | flex-direction: row; 89 | flex-wrap: nowrap; 90 | } 91 | 92 | #pageContent.hidden { 93 | width: 0; 94 | } 95 | #pageContent ul.active { 96 | display: block; 97 | font-family: Monospace, serif; 98 | width: fit-content; 99 | height: fit-content; 100 | } 101 | 102 | #pageContent ul.hidden { 103 | display: none; 104 | } 105 | 106 | #pageContent li.current { 107 | border: 2px solid black; 108 | border-radius: 3px; 109 | } 110 | #pageContent li.hidden { 111 | display: none; 112 | } 113 | #pageContent li.active { 114 | background-color: var(--secondary-bg-color); 115 | border-radius: 3px; 116 | } 117 | 118 | select { 119 | padding: 6px; 120 | margin-top: 8px; 121 | margin-left: 4px; 122 | border: 0; 123 | } 124 | 125 | #commonstat { 126 | display: flex; 127 | flex-wrap: wrap; 128 | } 129 | 130 | #commonstat div { 131 | margin-right: 10px; 132 | } 133 | 134 | table { 135 | margin-bottom: 10px; 136 | } 137 | 138 | table, th, td { 139 | border: 1px solid black; 140 | border-collapse: collapse; 141 | padding: 4px; 142 | } 143 | 144 | table tr td.table_obj_value, table tr td.mono { 145 | font-family: Monospace, serif; 146 | } 147 | 148 | table tr td.table_obj_value { 149 | text-align: right; 150 | } 151 | 152 | table tr td.fullScreen { 153 | width: 85% 154 | } 155 | 156 | table tr td.halfScreen { 157 | width: 42%; 158 | } 159 | 160 | table p { 161 | margin: 0.2em; 162 | } 163 | 164 | table tr.new td.switch_bold { 165 | font-weight: bold; 166 | } 167 | 168 | table th { 169 | color: black; 170 | background-color: #ffcc99; 171 | } 172 | 173 | table tr:target, td:target { 174 | border: medium solid limegreen; 175 | } 176 | 177 | table tr:target td:first-of-type, table td:target { 178 | font-weight: bold; 179 | } 180 | 181 | table tr.active td { 182 | background-color: #CCF1FF; 183 | } 184 | 185 | table tr.active td { 186 | background-color: #CCF1FF; 187 | } 188 | 189 | table tr.active td { 190 | background-color: #CCF1FF; 191 | } 192 | 193 | table tr.active td { 194 | background-color: #CCF1FF; 195 | } 196 | 197 | table tr.active td:not(.hdr) { 198 | background-color: #CCF1FF; 199 | } 200 | 201 | div.warning { 202 | display: inline-flex; 203 | padding: 10px; 204 | max-width: 300px; 205 | border: 1px solid black; 206 | border-radius: 5px; 207 | margin-bottom: 10px; 208 | background-color: pink; 209 | font-size: 12px; 210 | font-family: Monospace, serif; 211 | } 212 | 213 | div.notice { 214 | display: inline-flex; 215 | padding: 10px; 216 | max-width: 300px; 217 | border: 1px solid black; 218 | border-radius: 5px; 219 | margin-bottom: 10px; 220 | background-color: #CCF1FF; 221 | font-size: 12px; 222 | font-family: Monospace, serif; 223 | } 224 | 225 | div.popup { 226 | background-color: var(--main-bg-color); 227 | bottom: var(--main-bottom); 228 | color: var(--main-font-color); 229 | right: var(--main-right); 230 | opacity: var(--main-opacity); 231 | border-radius: var(--main-border-radius); 232 | position: var(--main-position); 233 | max-width: var(--main-width); 234 | transition: bottom 250ms linear; 235 | z-index: 500 !important; 236 | padding: 10px 20px; 237 | } 238 | 239 | svg rect:hover, svg circle:hover, svg path:hover, a.copyButton svg:hover > rect { 240 | stroke: limegreen; 241 | cursor: pointer; 242 | fill: #D9FFCC; 243 | } 244 | 245 | /* Add a black background color to the top navigation bar */ 246 | #topnav { 247 | position: fixed; 248 | top: 0; 249 | width: 100%; 250 | background-color: #e9e9e9; 251 | z-index: 999; 252 | } 253 | 254 | /* Style the links inside the navigation bar */ 255 | #topnav a { 256 | float: left; 257 | display: block; 258 | background-color: #2196F3; 259 | color: black; 260 | text-align: center; 261 | padding: 14px 16px; 262 | text-decoration: none; 263 | cursor: pointer; 264 | font-size: 14px; 265 | font-family: Monospace, serif; 266 | } 267 | 268 | /* Change the color of links on hover */ 269 | #topnav a:hover { 270 | background-color: #ddd; 271 | } 272 | 273 | /* Style the "active" element to highlight the current page */ 274 | #topnav a.active { 275 | color: white; 276 | } 277 | 278 | /* Style the search box inside the navigation bar */ 279 | #topnav input[type=search] { 280 | float: left; 281 | border: none; 282 | margin-top: 8px; 283 | margin-left: 200px; 284 | font-size: 14px; 285 | font-family: Monospace, serif; 286 | -webkit-appearance: searchfield-cancel-button; 287 | } 288 | 289 | /* When the screen is less than 600px wide, stack the links and the search field vertically instead of horizontally */ 290 | @media screen and (max-width: 600px) { 291 | #topnav a, #topnav input[type=search] { 292 | float: none; 293 | display: block; 294 | text-align: left; 295 | width: 100%; 296 | margin: 0; 297 | padding: 14px; 298 | } 299 | 300 | #topnav input[type=search] { 301 | border: 1px solid #ccc; 302 | } 303 | } 304 | 305 | /* ----------- Differential report styles ----------- */ 306 | 307 | td.int1, .int1 td:not(.hdr), table tr.new_i1 { 308 | background-color: #FFEEEE; 309 | } 310 | 311 | td.int2, .int2 td:not(.hdr), table tr.new_i2 { 312 | background-color: #EEEEFF; 313 | } 314 | 315 | table tr.int1:not(.active) td:not(.hdr) { 316 | background-color: #FFEEEE; 317 | } 318 | 319 | table tr.int2:not(.active) td:not(.hdr) { 320 | background-color: #EEEEFF; 321 | } 322 | 323 | table tr.int2 td { 324 | border-top: hidden; 325 | } 326 | 327 | table tr.queryRow { 328 | background-color: white; 329 | } 330 | 331 | table tr.grey { 332 | background-color: #eee; 333 | } 334 | table tr.queryRow { 335 | background-color: white; 336 | } 337 | 338 | table tr.queryRow p, table tr.queryRow a, table tr.planRow p, table tr.planRow a { 339 | display: inline-block; 340 | } 341 | 342 | table tr.queryRow p, table tr.planRow p { 343 | width: 95%; 344 | } 345 | 346 | table tr.queryRow a, table tr.planRow a { 347 | float: right; 348 | } 349 | 350 | table tr td.queryText { 351 | font-size: 13px; 352 | } 353 | 354 | table tr.new_i1 td.switch_bold, table tr.new_i2 td.switch_bold, .new td.switch_bold { 355 | font-weight: bold; 356 | } 357 | 358 | table th { 359 | color: black; 360 | background-color: #ffcc99; 361 | } 362 | 363 | .label { 364 | color: grey; 365 | } 366 | 367 | table tr:target, td:target { 368 | border: medium solid limegreen; 369 | } 370 | 371 | table tr:target td:first-of-type, table td:target { 372 | font-weight: bold; 373 | } 374 | 375 | table tr.parent td { 376 | background-color: #D8E8C2; 377 | } 378 | 379 | table tr.child td { 380 | background-color: #BBDD97; 381 | border-top-style: hidden; 382 | } 383 | 384 | table tr.active td { 385 | background-color: #CCF1FF; 386 | } -------------------------------------------------------------------------------- /data/static/js/highlight.js: -------------------------------------------------------------------------------- 1 | /** 2 | * The class is designed to highlight the selected row and other rows 3 | * with matching date attributes. When the user selects any row in the 4 | * report, the Highlighter class determines the set of date attributes 5 | * for the selected row and looks for matches in other rows in the 6 | * report. If the set of attributes completely match, then the lines 7 | * are highlighted. 8 | */ 9 | 10 | class Highlighter { 11 | static transition = 'background-color 70ms'; 12 | 13 | /** 14 | * Method compares each data-attribute in target and in each row 15 | * and add active class if they fit. 16 | * @param tr 17 | * @param allRows 18 | */ 19 | static toggleClass(tr, allRows) { 20 | /** Firstly, clean all active rows */ 21 | Highlighter.cleanAllActiveClasses(allRows); 22 | 23 | /** If row has dataset and is not active (highlighted) */ 24 | if (Object.keys(tr.dataset).length && !tr.classList.contains('active')) { 25 | allRows.forEach((row) => { 26 | /** Removing data-search attr from dataset */ 27 | let isEqual = Highlighter.isDatasetEqual(tr, row); 28 | if (isEqual) { 29 | /** Remove smart hover and highlight row */ 30 | Highlighter.setBackgroundColorToRow(row, '', this.transition); 31 | row.classList.add('active'); 32 | 33 | /** Highlight menu item */ 34 | let navId = this.getClosestTag(row, 0, 'div').firstChild.id; 35 | if (navId) { 36 | let navLi = document.getElementById(`menu_${navId}`); 37 | if (navLi && !navLi.classList.contains('active')) { 38 | navLi.classList.add('active'); 39 | } 40 | } 41 | } 42 | }); 43 | } 44 | } 45 | 46 | static getAllRows() { 47 | return document.querySelectorAll('tr'); 48 | } 49 | 50 | static getHighlightableRows() { 51 | return document.querySelectorAll('table.highlight tr:not(.queryRow)'); 52 | } 53 | 54 | /** 55 | * The method is designed to determine the parent tag from the target tag 56 | * on which the user clicked. The method returns the parent tag or false if 57 | * not found. 58 | * Works like method htmlNode.closest('tag') but with additional logic 59 | * @param target - the tag the user clicked on. 60 | * @param curDeep - initial depth (to determine the depth of the recursion) 61 | * @param targetTag 62 | * @returns {*|boolean} 63 | */ 64 | static getClosestTag(target, curDeep, targetTag) { 65 | let tooDeep = curDeep >= 5; 66 | let headOfTable = target.tagName.toLowerCase() === 'th'; 67 | let stillNotRow = target.tagName.toLowerCase() !== targetTag; 68 | 69 | if (tooDeep) { 70 | return false; 71 | } else if (headOfTable) { 72 | return false; 73 | } else if (stillNotRow) { 74 | curDeep++; 75 | return Highlighter.getClosestTag(target.parentNode, curDeep, targetTag); 76 | } else { 77 | return target; 78 | } 79 | } 80 | 81 | static cleanAllActiveClasses(rows) { 82 | rows.forEach(elem => { 83 | if (elem.classList.contains('active')) { 84 | elem.classList.remove('active'); 85 | } 86 | }) 87 | let menu = document.getElementById('sections'); 88 | if (menu) { 89 | let allItems = document.querySelectorAll('li'); 90 | allItems.forEach(item => { 91 | if (item.classList.contains('active')) { 92 | item.classList.remove('active'); 93 | } 94 | }) 95 | } 96 | } 97 | 98 | static getTargetDS(tr) { 99 | let fieldsList = JSON.parse(tr.closest('table').dataset.highlight); 100 | let targetDS = {}; 101 | fieldsList.forEach(field => { 102 | targetDS[field.id] = tr.dataset[field.id] 103 | }) 104 | return targetDS; 105 | } 106 | /** 107 | * If datasets in target and in row are the same - highlight the row. 108 | * @param tr 109 | * @param row 110 | * @returns boolean 111 | */ 112 | static isDatasetEqual(tr, row) { 113 | let targetDataset = Highlighter.getTargetDS(tr); 114 | let rowDataset = row.dataset; 115 | 116 | /** Highlighting statements texts. If data-queryid and (data-planid) in statement list match */ 117 | let trIsSqlList = Highlighter.getClosestTag(tr, 0, 'table').id === 'sqllist_t'; 118 | let rowIsSqlList = Highlighter.getClosestTag(row, 0, 'table').id === 'sqllist_t'; 119 | 120 | let isSameQuery = targetDataset.hexqueryid !== undefined 121 | && targetDataset.hexqueryid === rowDataset.hexqueryid 122 | && targetDataset.planid === rowDataset.planid; 123 | 124 | if ((trIsSqlList || rowIsSqlList) && isSameQuery) { 125 | return true; 126 | } 127 | 128 | /** If at least one data in datasets doesn't match */ 129 | for (let data in targetDataset) { 130 | if (targetDataset[data] === '*' && rowDataset[data] !== undefined) { 131 | continue; 132 | } 133 | if (data === 'all') { 134 | continue; 135 | } 136 | if (targetDataset[data] !== rowDataset[data]) { 137 | return false; 138 | } 139 | } 140 | 141 | return true; 142 | } 143 | 144 | static setBackgroundColorToRow(tr, hoverColor, transition) { 145 | tr.querySelectorAll('td').forEach(td => { 146 | td.style.backgroundColor = hoverColor; 147 | td.style.transition = transition; 148 | }) 149 | 150 | let siblings = null; 151 | if (tr.classList.contains('int1')) { 152 | siblings = tr.nextSibling.querySelectorAll('td'); 153 | } else if (tr.classList.contains('int2')) { 154 | siblings = tr.previousSibling.querySelectorAll('td'); 155 | } 156 | if (siblings) { 157 | siblings.forEach(elem => { 158 | elem.style.backgroundColor = hoverColor; 159 | elem.style.transition = transition; 160 | }) 161 | } 162 | } 163 | 164 | static highlight(event, allRows) { 165 | /** If user clicked not on link */ 166 | if (event.target.tagName.toLowerCase() !== 'a') { 167 | let tr = Highlighter.getClosestTag(event.target, 0, 'tr'); 168 | if (tr && Object.keys(tr.dataset).length) { 169 | Highlighter.toggleClass(tr, allRows); 170 | } 171 | } 172 | } 173 | 174 | static smartHover(eventType, event, transition) { 175 | let hoverColor = '#D9FFCC'; 176 | let tr = Highlighter.getClosestTag(event.target, 0, 'tr'); 177 | 178 | if (tr && !tr.classList.contains('active') && eventType === 'mouseover') { 179 | Highlighter.setBackgroundColorToRow(tr, hoverColor, transition); 180 | } else if (tr && eventType === 'mouseout') { 181 | Highlighter.setBackgroundColorToRow(tr, '', transition); 182 | } 183 | } 184 | 185 | static init() { 186 | const ALL_ROWS = Highlighter.getAllRows(); 187 | const HIGHLIGHTABLE_ROWS = Highlighter.getHighlightableRows(); 188 | 189 | /** Highlighting chosen (and related) row */ 190 | HIGHLIGHTABLE_ROWS.forEach((elem) => { 191 | elem.addEventListener('click', (event) => { 192 | Highlighter.highlight(event, HIGHLIGHTABLE_ROWS); 193 | }); 194 | }) 195 | 196 | /** Smart hover */ 197 | ALL_ROWS.forEach((elem) => { 198 | ['mouseover', 'mouseout'].forEach(eventType => { 199 | elem.addEventListener(eventType, (event) => { 200 | Highlighter.smartHover(eventType, event, this.transition); 201 | }); 202 | }) 203 | }) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /data/static/js/main.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Recursive function for building report. Function accepts report data in JSON and parent node (html tag) in which 3 | * report should be inserted 4 | * @param data jsonb object with report data 5 | * @param parentNode node in html-page 6 | * @returns {*} 7 | */ 8 | function buildReport(data, parentNode) { 9 | data.sections.forEach(section => { 10 | let sectionHasNestedSections = ('sections' in section); 11 | let newSection = new BaseSection(section).init(); 12 | 13 | /** Recursive call for building nested sections if exists */ 14 | if (sectionHasNestedSections) { 15 | buildReport(section, newSection); 16 | } 17 | 18 | parentNode.appendChild(newSection); 19 | }) 20 | 21 | return parentNode; 22 | } 23 | 24 | function main() { 25 | 26 | /** Build report sections */ 27 | const CONTAINER = document.getElementById('container'); 28 | buildReport(data, CONTAINER); 29 | 30 | /** Add highlight feature */ 31 | Highlighter.init(); 32 | 33 | /** Add query text and plan feature */ 34 | Previewer.init(); 35 | 36 | /** Add menu feature */ 37 | Menu.init(); 38 | } 39 | 40 | main(); -------------------------------------------------------------------------------- /data/static/js/menu.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Class 3 | */ 4 | class Menu { 5 | static buildPageContent(data, parentNode, visibility) { 6 | data.sections.forEach(section => { 7 | let hasTableCap = ('toc_cap' in section); 8 | let hasNestedSections = ('sections' in section); 9 | let ul = document.createElement('ul'); 10 | let li = document.createElement('li'); 11 | li.classList.add(visibility); 12 | 13 | /** Creating
  • and tags inside
      */ 14 | if (hasTableCap) { 15 | 16 | let a = document.createElement('a'); 17 | 18 | a.innerHTML = section.toc_cap; 19 | a.href = `#${section.sect_id}`; 20 | a.classList.add('anchor'); 21 | 22 | li.setAttribute('id', `menu_${section.sect_id}`); 23 | li.appendChild(a); 24 | 25 | parentNode.appendChild(li); 26 | } 27 | /** Recursive call for building nested content */ 28 | if (hasNestedSections) { 29 | parentNode.appendChild(this.buildPageContent(section, ul, "hidden")); 30 | } 31 | }) 32 | return parentNode; 33 | } 34 | 35 | static addToggleMenuEvent() { 36 | let menuButton = document.getElementById("menuButton"); 37 | let reportContent = document.getElementById('pageContent'); 38 | let container = document.getElementById('container'); 39 | 40 | menuButton.addEventListener('click', function() { 41 | let isMenuActive = menuButton.classList.contains('active'); 42 | 43 | if (isMenuActive) { 44 | menuButton.setAttribute('class', ''); 45 | burger.setAttribute('class', 'horizontal'); 46 | container.style.left = "0"; 47 | reportContent.style.width = "0"; 48 | } else { 49 | menuButton.setAttribute('class', 'active'); 50 | burger.setAttribute('class', 'vertical'); 51 | container.style.left = "25%"; 52 | reportContent.style.width = "25%"; 53 | } 54 | }) 55 | } 56 | 57 | static addSearchFieldEvent() { 58 | let rowsForSearch = document.querySelectorAll('tr[data-all]'); 59 | let input = document.getElementById("inputField"); 60 | let searchParam = document.getElementById('searchParam'); 61 | 62 | input.addEventListener('input', ev => { 63 | let keyword = ev.target.value.trim(); 64 | let searchParam = document.getElementById('searchParam').value; 65 | 66 | /** Calling search only for rows that have data-search attr */ 67 | Utilities.search(rowsForSearch, searchParam, keyword); 68 | }); 69 | 70 | /** Add event for changing searchParam */ 71 | searchParam.addEventListener('change', ev => { 72 | let searchParam = ev.target.value; 73 | let keyword = document.getElementById("inputField").value; 74 | 75 | if (keyword) { 76 | /** Calling search only for rows that have data-search attr */ 77 | Utilities.search(rowsForSearch, searchParam, keyword); 78 | } 79 | }) 80 | } 81 | 82 | static addToggleSectionsEvent() { 83 | document.querySelectorAll("#sections li").forEach(section => { 84 | section.addEventListener("click", ev => { 85 | if (ev.target.parentNode.nextSibling && ev.target.parentNode.nextSibling.tagName === "UL") { 86 | ev.target.parentNode.nextSibling.childNodes.forEach(el => { 87 | if (el.classList.contains("hidden")) { 88 | el.classList.remove("hidden"); 89 | el.classList.add("visible"); 90 | } 91 | }) 92 | } 93 | }) 94 | }); 95 | } 96 | 97 | static buildHtml() { 98 | let body = document.querySelector('body'); 99 | 100 | /** Main Menu Button */ 101 | let menuButton = ` 102 | 103 |
      104 |
      105 |
      106 |
      107 |
      108 |
      109 | ` 110 | /** Search Field */ 111 | let searchField = ` 112 |
      113 | 114 | 122 |
      123 | ` 124 | /** Top Navigation */ 125 | let topNavigation = ` 126 |
      127 | ${menuButton} 128 | ${searchField} 129 |
      130 | ` 131 | /** Page Content */ 132 | let pageContent = ` 133 |
      134 |
        135 |
        136 | ` 137 | body.insertAdjacentHTML('beforeend', pageContent); 138 | body.insertAdjacentHTML('afterbegin', topNavigation); 139 | let sections = document.getElementById("sections"); 140 | Menu.buildPageContent(data, sections, "visible"); 141 | } 142 | 143 | static init() { 144 | this.buildHtml(); 145 | this.addToggleMenuEvent(); 146 | this.addSearchFieldEvent(); 147 | this.addToggleSectionsEvent(); 148 | } 149 | } -------------------------------------------------------------------------------- /data/static/js/preview.js: -------------------------------------------------------------------------------- 1 | /** 2 | * The class is designed to instantly preview the query text referenced by the selected row 3 | */ 4 | class Previewer { 5 | static getParentRows() { 6 | return document.querySelectorAll("table.preview tr:not(.header)"); 7 | } 8 | 9 | static preprocessQueryString(queryString) { 10 | let etc = ''; 11 | queryString = queryString.split(',').join(', '); 12 | queryString = queryString.split('+').join(' + '); 13 | queryString = queryString.split('/').join(' / '); 14 | 15 | /** Max length = 1000 chars */ 16 | if (queryString.length > 1000) { 17 | queryString = queryString.substring(0, 1000); 18 | etc = ' ...' 19 | } 20 | 21 | return `${queryString}${etc}` 22 | } 23 | 24 | static queryTextPreviewer(queryCell, queryRow, newRow, queryString) { 25 | queryCell.style.width = `${Math.floor(newRow.offsetWidth * 0.95)}px`; 26 | queryCell.style.fontFamily = 'Monospace'; 27 | queryRow.style.display = ''; 28 | 29 | /** Query text preview */ 30 | if (queryCell.firstChild && queryCell.firstChild.tagName.toLowerCase() !== 'p') { 31 | let preprocessedText = Previewer.preprocessQueryString(queryString); 32 | queryCell.insertAdjacentHTML('afterbegin', `

        ${preprocessedText}

        `); 33 | } 34 | } 35 | 36 | static findQuery(queryRaw) { 37 | // datasetName, dataID, parentRow.dataset[dataID] 38 | let datasetName = queryRaw.dataset["dataset_name"]; 39 | let dataID = queryRaw.dataset["dataset_col_id"]; 40 | let querySet = data.datasets[datasetName]; 41 | let queryId = queryRaw.dataset["dataset_id"] 42 | 43 | for (let i = 0; i < querySet.length; i++) { 44 | if (querySet[i][dataID] === queryId) { 45 | return i 46 | } 47 | } 48 | return -1 49 | } 50 | 51 | static drawCopyButton() { 52 | let button = document.createElement('a'); 53 | button.setAttribute('class', 'copyButton'); 54 | button.setAttribute('title', 'Copy to clipboard'); 55 | 56 | let svg = ` 57 | 58 | 59 | 60 | 61 | ` 62 | 63 | button.insertAdjacentHTML('afterbegin', svg); 64 | 65 | return button; 66 | } 67 | 68 | static init() { 69 | const PARENT_ROWS = Previewer.getParentRows(); 70 | 71 | PARENT_ROWS.forEach(parentRow => { 72 | 73 | /** Determine row and cell with query text */ 74 | let queryCell = document.createElement("td"); 75 | queryCell.setAttribute("colspan", "100"); 76 | let queryRow = document.createElement("tr"); 77 | queryRow.classList.add("queryRow"); 78 | 79 | let preview = JSON.parse(parentRow.closest('table').dataset["preview"])[0] 80 | queryRow.setAttribute("data-dataset_name", preview.dataset); 81 | queryRow.setAttribute("data-dataset_col_id", preview.id); 82 | queryRow.setAttribute("data-dataset_id", parentRow.dataset[preview.id]); 83 | queryRow.style.display = "none"; 84 | queryRow.appendChild(queryCell); 85 | 86 | if (!parentRow.classList.contains("int1")) { 87 | parentRow.insertAdjacentElement("afterend", queryRow); 88 | } 89 | 90 | /** Copy query text into clipboard button */ 91 | let copyQueryTextButton = Previewer.drawCopyButton(); 92 | copyQueryTextButton.setAttribute("class", "copyQueryTextButton"); 93 | queryCell.appendChild(copyQueryTextButton); 94 | 95 | parentRow.addEventListener("click", event => { 96 | if (parentRow.classList.contains('int1')) { 97 | queryRow = parentRow.nextSibling.nextSibling; 98 | queryCell = queryRow.firstChild; 99 | } 100 | 101 | /** Trigger event only if user clicked not on rect and link*/ 102 | if (event.target.tagName.toLowerCase() !== 'a' && event.target.tagName.toLowerCase() !== 'rect') { 103 | if (queryRow.style.display === 'none') { 104 | let queryIndex = Previewer.findQuery(queryRow); 105 | if (queryIndex >= 0) { 106 | let queryText = data.datasets[preview.dataset][queryIndex].query_texts[0]; 107 | Previewer.queryTextPreviewer(queryCell, queryRow, parentRow, queryText); 108 | copyQueryTextButton.addEventListener("click", event => { 109 | navigator.clipboard.writeText(queryText).then(r => console.log(queryText)); 110 | }); 111 | } 112 | } else { 113 | queryRow.style.display = 'none'; 114 | } 115 | } 116 | }) 117 | }) 118 | } 119 | } -------------------------------------------------------------------------------- /data/static/js/utilities.js: -------------------------------------------------------------------------------- 1 | class Utilities { 2 | /** 3 | * Sorting JSON array and returning sorted clone with array of Objects 4 | * @param data JSON array 5 | * @param key string with key for sorting 6 | * @param direction direction of sorting (1 means ASC, -1 means DESC) 7 | * @returns array of Objects 8 | */ 9 | static sort(data, key, direction) { 10 | return structuredClone(data.sort((a, b) => { 11 | /** Order index */ 12 | if (a[key] < b[key]) { 13 | return -1 * direction; 14 | } else if (a[key] > b[key]) { 15 | return direction; 16 | } else { 17 | return 0; 18 | } 19 | })) 20 | } 21 | 22 | static sum(data, key) { 23 | return data.reduce((partialSum, a) => partialSum + a[key], 0); 24 | } 25 | 26 | /** Advanced filter */ 27 | static filter(data, key) { 28 | if (key.type === "exists") { 29 | if (data.every(obj => key["field"] in obj)) { 30 | return structuredClone(data.filter(obj => obj[key["field"]])); 31 | } 32 | } else if (key.type === "equal") { 33 | if (data.every(obj => key["field"] in obj)) { 34 | return structuredClone(data.filter(obj => obj[key["field"]] === key["value"])); 35 | } 36 | } 37 | return data; 38 | } 39 | 40 | static find(data, key, value) { 41 | return structuredClone(data.filter(obj => obj[key] === value)); 42 | } 43 | 44 | /** Limit array of Objects */ 45 | static limit(data, num) { 46 | if (num > 0) { 47 | return structuredClone(data.slice(0, num)); 48 | } 49 | return data; 50 | } 51 | 52 | static getInputField() { 53 | return document.getElementById('inputField'); 54 | } 55 | 56 | static cancelSearchResults(rowsForSearch) { 57 | rowsForSearch.forEach(row => { 58 | row.style.display = ''; 59 | }) 60 | this.getInputField().value = ''; 61 | } 62 | 63 | static searchQueryWithStatistics(rowsForSearch, keyword) { 64 | let foundQueries = Utilities.searchQueryText(keyword); 65 | 66 | rowsForSearch.forEach(row => { 67 | if (row.dataset["hexqueryid"] 68 | && foundQueries[row.dataset["hexqueryid"]]) { 69 | row.style.display = ''; 70 | } else { 71 | row.style.display = 'none'; 72 | if (row.nextSibling && row.nextSibling.classList.contains('queryRow')) { 73 | row.nextSibling.style.display = 'none'; 74 | } 75 | } 76 | }) 77 | } 78 | 79 | static searchQueryText(keyword) { 80 | let foundQueries = {}; 81 | data.datasets.queries.forEach(query => { 82 | /** Search in query texts */ 83 | Object.keys(query).forEach(key => { 84 | query.query_texts.forEach(query_text => { 85 | if (query_text && query_text.toLowerCase().includes(keyword) && !foundQueries[query["hexqueryid"]]) { 86 | foundQueries[query["hexqueryid"]] = true; 87 | } 88 | }) 89 | }) 90 | /** Search in plan texts */ 91 | if (query.plans) { 92 | query.plans.forEach(plan => { 93 | if (plan.plan_text.toLowerCase().includes(keyword) && !foundQueries[query["hexqueryid"]]) { 94 | foundQueries[query["hexqueryid"]] = true; 95 | } 96 | }) 97 | } 98 | }) 99 | return foundQueries; 100 | } 101 | 102 | static searchWithParam(rowsForSearch, keyword, searchParam) { 103 | let foundQueries = {}; 104 | /** if we search everywhere, then first we need to 105 | * find the keyword in the query texts, and then 106 | * we display all the lines related to this query 107 | */ 108 | if (searchParam === 'all') { 109 | foundQueries = Utilities.searchQueryText(keyword) 110 | } 111 | 112 | rowsForSearch.forEach(row => { 113 | /** If dataset[searchParam] exists and has substring with keyword */ 114 | if (row.dataset[searchParam] && row.dataset[searchParam].toLowerCase().includes(keyword)) { 115 | row.style.display = ''; 116 | /** If dataset[searchParam] has hexqueryid, then put it into foundQueries collection */ 117 | if (row.dataset["hexqueryid"]) { 118 | foundQueries[row.dataset["hexqueryid"]] = true; 119 | } 120 | } else { 121 | row.style.display = 'none'; 122 | if (row.nextSibling && row.nextSibling.classList.contains('queryRow')) { 123 | row.nextSibling.style.display = 'none'; 124 | } 125 | } 126 | }) 127 | 128 | rowsForSearch.forEach(row => { 129 | /** If a row from a table with query texts or a search parameter data-all */ 130 | if (row.parentNode.id === 'sqllist_t' || searchParam === 'all') { 131 | /** Check foundQueries, if such index exists, then */ 132 | if (foundQueries[row.dataset["hexqueryid"]]) { 133 | row.style.display = ''; 134 | } 135 | } else { 136 | /** Otherwise, we check for a match between data-hexqueryid and the presence of a key phrase in dataset[searchParam]*/ 137 | if (foundQueries[row.dataset["hexqueryid"]] && row.dataset[searchParam].toLowerCase().includes(keyword)) { 138 | row.style.display = ''; 139 | } 140 | } 141 | }) 142 | } 143 | 144 | static search(rowsForSearch, searchParam, keyword) { 145 | keyword = keyword.toLowerCase(); 146 | 147 | if (!keyword) { 148 | Utilities.cancelSearchResults(rowsForSearch); 149 | } else if (searchParam === 'querytext') { 150 | Utilities.searchQueryWithStatistics(rowsForSearch, keyword); 151 | } else { 152 | Utilities.searchWithParam(rowsForSearch, keyword, searchParam); 153 | } 154 | } 155 | } -------------------------------------------------------------------------------- /expected/create_extension.out: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS profile; 2 | CREATE SCHEMA IF NOT EXISTS dblink; 3 | CREATE SCHEMA IF NOT EXISTS statements; 4 | CREATE EXTENSION dblink SCHEMA dblink; 5 | CREATE EXTENSION pg_stat_statements SCHEMA statements; 6 | CREATE EXTENSION pg_profile SCHEMA profile; 7 | -------------------------------------------------------------------------------- /expected/drop_extension.out: -------------------------------------------------------------------------------- 1 | /* Drop test objects */ 2 | DROP TABLE profile.grow_table; 3 | DROP FUNCTION profile.dummy_func(); 4 | DROP FUNCTION profile.grow_table_trg_f(); 5 | DROP FUNCTION profile.get_ids; 6 | DROP FUNCTION profile.get_sources; 7 | DROP FUNCTION profile.get_report_sections; 8 | DROP FUNCTION profile.check_dataset_queries; 9 | /* Testing drop server with data */ 10 | SELECT * FROM profile.drop_server('local'); 11 | drop_server 12 | ------------- 13 | 1 14 | (1 row) 15 | 16 | DROP EXTENSION pg_profile; 17 | DROP EXTENSION IF EXISTS pg_stat_statements; 18 | DROP EXTENSION IF EXISTS dblink; 19 | DROP SCHEMA profile; 20 | DROP SCHEMA dblink; 21 | DROP SCHEMA statements; 22 | -------------------------------------------------------------------------------- /expected/export_import.out: -------------------------------------------------------------------------------- 1 | SET client_min_messages = WARNING; 2 | /* === Create regular export table === */ 3 | CREATE TABLE profile.export AS SELECT * FROM profile.export_data(); 4 | /* === Create obfuscated export table === */ 5 | CREATE TABLE profile.blind_export AS SELECT * FROM profile.export_data(NULL,NULL,NULL,TRUE); 6 | BEGIN; 7 | /* === rename local server === */ 8 | SELECT profile.rename_server('local','src_local'); 9 | rename_server 10 | --------------- 11 | 1 12 | (1 row) 13 | 14 | /* === check matching by creation date and system identifier === */ 15 | SELECT profile.import_data('profile.export') > 0; 16 | ?column? 17 | ---------- 18 | f 19 | (1 row) 20 | 21 | /* === change src_local server creation time so it wont match === */ 22 | UPDATE profile.servers 23 | SET 24 | server_created = server_created - '1 minutes'::interval 25 | WHERE server_name = 'src_local'; 26 | /* === perform load === */ 27 | SELECT profile.import_data('profile.export') > 0; 28 | ?column? 29 | ---------- 30 | t 31 | (1 row) 32 | 33 | /* === Integral check - reports must match === */ 34 | \a 35 | \t on 36 | WITH res AS ( 37 | SELECT 38 | profile.get_report('local',1,4) AS imported, 39 | replace( 40 | replace( 41 | profile.get_report('src_local',1,4),'"server_name": "src_local"', 42 | '"server_name": "local"'), 43 | '

        Server name: src_local', 44 | '

        Server name: local' 45 | ) AS exported 46 | ) 47 | SELECT 48 | CASE 49 | WHEN 50 | sha224(convert_to(imported, 'UTF8')) != 51 | sha224(convert_to(exported, 'UTF8')) 52 | THEN 53 | format(E'\n%s\n\n\n%s\n', 54 | imported, 55 | exported 56 | ) 57 | ELSE 58 | 'ok' 59 | END as match 60 | FROM res; 61 | ok 62 | \a 63 | \t off 64 | /* === perform obfuscated load === */ 65 | SELECT profile.drop_server('local'); 66 | drop_server 67 | ------------- 68 | 1 69 | (1 row) 70 | 71 | SELECT profile.import_data('profile.blind_export') > 0; 72 | ?column? 73 | ---------- 74 | t 75 | (1 row) 76 | 77 | /* === check that there is no matching queries === */ 78 | SELECT 79 | count(*) 80 | FROM profile.servers s_src 81 | CROSS JOIN profile.servers s_blind 82 | JOIN profile.stmt_list q_src ON 83 | (q_src.server_id = s_src.server_id) 84 | JOIN profile.stmt_list q_blind ON 85 | (q_src.queryid_md5 = q_blind.queryid_md5 AND q_blind.server_id = s_blind.server_id) 86 | WHERE 87 | s_src.server_name = 'src_local' AND s_blind.server_name = 'local' 88 | AND q_src.query = q_blind.query; 89 | count 90 | ------- 91 | 0 92 | (1 row) 93 | 94 | ROLLBACK; 95 | /* === drop export tables === */ 96 | DROP TABLE profile.export; 97 | DROP TABLE profile.blind_export; 98 | -------------------------------------------------------------------------------- /expected/kcache_create_extension.out: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS profile; 2 | CREATE SCHEMA IF NOT EXISTS dblink; 3 | CREATE SCHEMA IF NOT EXISTS statements; 4 | CREATE SCHEMA IF NOT EXISTS kcache; 5 | CREATE EXTENSION dblink SCHEMA dblink; 6 | CREATE EXTENSION pg_stat_statements SCHEMA statements; 7 | CREATE EXTENSION pg_stat_kcache SCHEMA kcache; 8 | CREATE EXTENSION pg_profile SCHEMA profile; 9 | -------------------------------------------------------------------------------- /expected/kcache_drop_extension.out: -------------------------------------------------------------------------------- 1 | /* Drop test objects */ 2 | DROP TABLE profile.grow_table; 3 | DROP FUNCTION profile.dummy_func(); 4 | DROP FUNCTION profile.grow_table_trg_f(); 5 | DROP FUNCTION profile.get_ids; 6 | DROP FUNCTION profile.get_sources; 7 | DROP FUNCTION profile.get_report_sections; 8 | DROP FUNCTION profile.check_dataset_queries; 9 | /* Testing drop server with data */ 10 | SELECT * FROM profile.drop_server('local'); 11 | drop_server 12 | ------------- 13 | 1 14 | (1 row) 15 | 16 | DROP EXTENSION pg_profile; 17 | DROP EXTENSION pg_stat_kcache; 18 | DROP EXTENSION pg_stat_statements; 19 | DROP EXTENSION dblink; 20 | DROP SCHEMA profile; 21 | DROP SCHEMA dblink; 22 | DROP SCHEMA statements; 23 | DROP SCHEMA kcache; 24 | -------------------------------------------------------------------------------- /expected/kcache_stat_avail.out: -------------------------------------------------------------------------------- 1 | SELECT count(1) > 0 FROM profile.sample_kcache; 2 | ?column? 3 | ---------- 4 | t 5 | (1 row) 6 | 7 | SELECT count(1) > 0 FROM profile.sample_kcache_total; 8 | ?column? 9 | ---------- 10 | t 11 | (1 row) 12 | 13 | -------------------------------------------------------------------------------- /expected/retention_and_baselines.out: -------------------------------------------------------------------------------- 1 | UPDATE profile.samples 2 | SET sample_time = now() - (5 - sample_id) * '1 day'::interval - '10 minutes'::interval 3 | WHERE sample_id <= 5; 4 | SELECT server,result FROM profile.take_sample(); 5 | server | result 6 | --------+-------- 7 | local | OK 8 | (1 row) 9 | 10 | BEGIN; 11 | SELECT profile.delete_samples(); 12 | delete_samples 13 | ---------------- 14 | 5 15 | (1 row) 16 | 17 | SELECT sample FROM profile.show_samples() ORDER BY sample; 18 | sample 19 | -------- 20 | 6 21 | (1 row) 22 | 23 | ROLLBACK; 24 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 25 | count 26 | ------- 27 | 4 28 | (1 row) 29 | 30 | SELECT * FROM profile.set_server_max_sample_age('local',1); 31 | set_server_max_sample_age 32 | --------------------------- 33 | 1 34 | (1 row) 35 | 36 | /* Testing baseline creation */ 37 | SELECT * FROM profile.create_baseline('testline1',2,4); 38 | create_baseline 39 | ----------------- 40 | 1 41 | (1 row) 42 | 43 | BEGIN; 44 | SELECT profile.delete_samples('local',tstzrange( 45 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1), 46 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5), 47 | '[]' 48 | ) 49 | ); 50 | delete_samples 51 | ---------------- 52 | 2 53 | (1 row) 54 | 55 | SELECT sample FROM profile.show_samples() ORDER BY sample; 56 | sample 57 | -------- 58 | 2 59 | 3 60 | 4 61 | 6 62 | (4 rows) 63 | 64 | ROLLBACK; 65 | BEGIN; 66 | SELECT profile.delete_samples(tstzrange( 67 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1), 68 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5), 69 | '[]' 70 | ) 71 | ); 72 | delete_samples 73 | ---------------- 74 | 2 75 | (1 row) 76 | 77 | SELECT sample FROM profile.show_samples() ORDER BY sample; 78 | sample 79 | -------- 80 | 2 81 | 3 82 | 4 83 | 6 84 | (4 rows) 85 | 86 | ROLLBACK; 87 | SELECT * FROM profile.create_baseline('testline2',2,4); 88 | create_baseline 89 | ----------------- 90 | 2 91 | (1 row) 92 | 93 | SELECT count(*) FROM profile.baselines; 94 | count 95 | ------- 96 | 2 97 | (1 row) 98 | 99 | SELECT * FROM profile.keep_baseline('testline2',-1); 100 | keep_baseline 101 | --------------- 102 | 1 103 | (1 row) 104 | 105 | /* Testing baseline show */ 106 | SELECT baseline, min_sample, max_sample, keep_until_time IS NULL 107 | FROM profile.show_baselines() 108 | ORDER BY baseline; 109 | baseline | min_sample | max_sample | ?column? 110 | -----------+------------+------------+---------- 111 | testline1 | 2 | 4 | t 112 | testline2 | 2 | 4 | f 113 | (2 rows) 114 | 115 | /* Testing baseline deletion */ 116 | SELECT server,result FROM profile.take_sample(); 117 | server | result 118 | --------+-------- 119 | local | OK 120 | (1 row) 121 | 122 | SELECT count(*) FROM profile.baselines; 123 | count 124 | ------- 125 | 1 126 | (1 row) 127 | 128 | /* Testing samples retention override with baseline */ 129 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 130 | count 131 | ------- 132 | 3 133 | (1 row) 134 | 135 | SELECT * FROM profile.drop_baseline('testline1'); 136 | drop_baseline 137 | --------------- 138 | 1 139 | (1 row) 140 | 141 | /* Testing samples deletion after baseline removed */ 142 | SELECT server,result FROM profile.take_sample(); 143 | server | result 144 | --------+-------- 145 | local | OK 146 | (1 row) 147 | 148 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 149 | count 150 | ------- 151 | 0 152 | (1 row) 153 | 154 | -------------------------------------------------------------------------------- /expected/server_management.out: -------------------------------------------------------------------------------- 1 | /* == Testing server management functions == */ 2 | SELECT profile.create_server('srvtest','dbname=postgres host=localhost port=5432', TRUE, NULL, 'Server description 1'); 3 | create_server 4 | --------------- 5 | 2 6 | (1 row) 7 | 8 | SELECT server_id, server_name, server_description, db_exclude, 9 | enabled, connstr, max_sample_age, last_sample_id 10 | FROM profile.servers WHERE server_name != 'local'; 11 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id 12 | -----------+-------------+----------------------+------------+---------+------------------------------------------+----------------+---------------- 13 | 2 | srvtest | Server description 1 | | t | dbname=postgres host=localhost port=5432 | | 0 14 | (1 row) 15 | 16 | SELECT profile.rename_server('srvtest','srvtestrenamed'); 17 | rename_server 18 | --------------- 19 | 1 20 | (1 row) 21 | 22 | SELECT profile.set_server_connstr('srvtestrenamed','dbname=postgres host=localhost port=5433'); 23 | set_server_connstr 24 | -------------------- 25 | 1 26 | (1 row) 27 | 28 | SELECT profile.set_server_description('srvtestrenamed','Server description 2'); 29 | set_server_description 30 | ------------------------ 31 | 1 32 | (1 row) 33 | 34 | SELECT profile.set_server_db_exclude('srvtestrenamed',ARRAY['db1','db2','db3']); 35 | set_server_db_exclude 36 | ----------------------- 37 | 1 38 | (1 row) 39 | 40 | SELECT profile.set_server_max_sample_age('srvtestrenamed',3); 41 | set_server_max_sample_age 42 | --------------------------- 43 | 1 44 | (1 row) 45 | 46 | SELECT server_id, server_name, server_description, db_exclude, 47 | enabled, connstr, max_sample_age, last_sample_id 48 | FROM profile.servers WHERE server_name != 'local'; 49 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id 50 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+---------------- 51 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | t | dbname=postgres host=localhost port=5433 | 3 | 0 52 | (1 row) 53 | 54 | SELECT profile.disable_server('srvtestrenamed'); 55 | disable_server 56 | ---------------- 57 | 1 58 | (1 row) 59 | 60 | SELECT server_id, server_name, server_description, db_exclude, 61 | enabled, connstr, max_sample_age, last_sample_id 62 | FROM profile.servers WHERE server_name != 'local'; 63 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id 64 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+---------------- 65 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | f | dbname=postgres host=localhost port=5433 | 3 | 0 66 | (1 row) 67 | 68 | SELECT profile.enable_server('srvtestrenamed'); 69 | enable_server 70 | --------------- 71 | 1 72 | (1 row) 73 | 74 | SELECT server_id, server_name, server_description, db_exclude, 75 | enabled, connstr, max_sample_age, last_sample_id 76 | FROM profile.servers WHERE server_name != 'local'; 77 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id 78 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+---------------- 79 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | t | dbname=postgres host=localhost port=5433 | 3 | 0 80 | (1 row) 81 | 82 | SELECT * FROM profile.show_servers() where server_name != 'local'; 83 | server_name | connstr | enabled | max_sample_age | description 84 | ----------------+------------------------------------------+---------+----------------+---------------------- 85 | srvtestrenamed | dbname=postgres host=localhost port=5433 | t | 3 | Server description 2 86 | (1 row) 87 | 88 | SELECT * FROM profile.drop_server('srvtestrenamed'); 89 | drop_server 90 | ------------- 91 | 1 92 | (1 row) 93 | 94 | -------------------------------------------------------------------------------- /expected/sizes_collection.out: -------------------------------------------------------------------------------- 1 | SET client_min_messages = WARNING; 2 | /* Test size collection sampling settings */ 3 | INSERT INTO profile.grow_table (short_str,long_str) 4 | SELECT array_to_string(array 5 | (select 6 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', 7 | trunc(random() * 62)::integer + 1, 1) 8 | FROM generate_series(1, 40)), '' 9 | ) as arr1, 10 | array_to_string(array 11 | (select 12 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', 13 | trunc(random() * 62)::integer + 1, 1) 14 | FROM generate_series(1, 8000)), '' 15 | ) 16 | FROM generate_series(1,5); 17 | /* Test rare relation sizes collection */ 18 | SELECT profile.set_server_size_sampling('local',current_time - interval '10 minute',interval '30 minute',interval '2 minute'); 19 | set_server_size_sampling 20 | -------------------------- 21 | 1 22 | (1 row) 23 | 24 | -- check show_servers_size_sampling() 25 | SELECT server_name,window_duration,sample_interval FROM profile.show_servers_size_sampling(); 26 | server_name | window_duration | sample_interval 27 | -------------+-----------------+----------------- 28 | local | @ 30 mins | @ 2 mins 29 | (1 row) 30 | 31 | -- (sample 4) 32 | SELECT server,result FROM profile.take_sample(); 33 | server | result 34 | --------+-------- 35 | local | OK 36 | (1 row) 37 | 38 | -- Disable rare sizes collection 39 | SELECT profile.set_server_size_sampling('local',null,null,null); 40 | set_server_size_sampling 41 | -------------------------- 42 | 1 43 | (1 row) 44 | 45 | -- (sample 5) 46 | SELECT server,result FROM profile.take_sample(); 47 | server | result 48 | --------+-------- 49 | local | OK 50 | (1 row) 51 | 52 | -- check show_samples() 53 | SELECT sample, sizes_collected FROM profile.show_samples() WHERE NOT sizes_collected; 54 | sample | sizes_collected 55 | --------+----------------- 56 | 4 | f 57 | (1 row) 58 | 59 | -- check tables sizes collection 60 | SELECT 61 | sample_id, 62 | count(relsize) > 0 as relsize, 63 | count(relsize_diff) > 0 as relsize_diff, 64 | count(relpages_bytes) > 0 as relpages, 65 | count(relpages_bytes_diff) > 0 as relpages_diff 66 | FROM profile.sample_stat_tables GROUP BY sample_id 67 | ORDER BY sample_id; 68 | sample_id | relsize | relsize_diff | relpages | relpages_diff 69 | -----------+---------+--------------+----------+--------------- 70 | 1 | t | t | t | t 71 | 2 | t | t | t | t 72 | 3 | t | t | t | t 73 | 4 | f | f | t | t 74 | 5 | t | t | t | t 75 | (5 rows) 76 | 77 | -- check indexes sizes collection 78 | SELECT 79 | sample_id, 80 | count(relsize) > 0 as relsize, 81 | count(relsize_diff) > 0 as relsize_diff, 82 | count(relpages_bytes) > 0 as relpages, 83 | count(relpages_bytes_diff) > 0 as relpages_diff 84 | FROM profile.sample_stat_indexes GROUP BY sample_id 85 | ORDER BY sample_id; 86 | sample_id | relsize | relsize_diff | relpages | relpages_diff 87 | -----------+---------+--------------+----------+--------------- 88 | 1 | t | t | t | t 89 | 2 | t | t | t | t 90 | 3 | t | t | t | t 91 | 4 | f | f | t | t 92 | 5 | t | t | t | t 93 | (5 rows) 94 | 95 | -------------------------------------------------------------------------------- /grafana/README.md: -------------------------------------------------------------------------------- 1 | # Grafana dashboards for *pg_profile* # 2 | You can use provided grafana dashboards to visualize summary database load over time. Those dashboards are using *pg_profile* repository as the data source. Visualization of *pg_profile* samples should help you to detect time intervals with the specific load profile you want to see in a report. 3 | 4 | The header of a dashboard will provide you with the *get_report()* function call to build a *pg_profile* report on exact time interval you see in the grafana. 5 | 6 | Dashboards provided: 7 | * **pg_profile_visualization.json** - this is the main dashboard to use with pg_profile repository. It provides summary information for each database 8 | * **pg_profile_summary.json** - this dashboard provides summary information for the whole cluster. Use it if you have a lot of databases in your cluster and visualization in per-database manner seems overloaded 9 | * **pg_profile_waits.json** - detailed wait dashboard. Charts are based on data collected by *pg_profile* with *pg_wait_sampling* extension. 10 | * **pg_profile_io.json** - detailed I/O dashboard. Charts are based on data collected by *pg_profile* from *pg_stat_io* view. This view available only since PostgrSQL 16. Collection implemented in *pg_profile* 4.3 11 | * **pg_profile_activity.json** - charts of observed session counts by states and session wait events over time. They are based on the data collected in subsamples (added in 4.6). Pie-charts on the bottom of a dashboard shows distribution of observed sessions during a time interval by applications, databases, users and hosts. A single click on any segment of those pie-charts will limit all charts to sessions having selected value of the corresponding attribute. You can filter sessions by several identifying attributes. 12 | 13 | To use those dashboards you will need a PostgreSQL data source in your grafana installation. This data source should be pointing to the database with the *pg_profile* extension installed. If your *pg_profile* extension is installed in its own schema make sure that the database user used in grafana data source has this schema in *search_path* setting. 14 | 15 | ## Grafana dashboard controls ## 16 | Provided dashboards have some useful controls: 17 | * **Database with pg_profile extension** can be used to change the current grafana data source if you have several *pg_profile* instances 18 | * **Server** - the current server in *pg_profile* instance if your *pg_profile* instance have several servers defined 19 | * **Server starts** - toggles grafana annotations showing PostgreSQL database restarts captured by *pg_profile* 20 | * **Configuration loads** - toggles grafana annotations showing PostgreSQL configuration reloads captured by *pg_profile* 21 | * **Interval** - button is used to set dashboard time range that will cover all samples captured by *pg_profile* for the selected *pg_profile* server. Sometimes it should be clicked twice. 22 | * **pg_profile dashboard** - is a dropdown list of all *pg_profile* dashboards. When you switch between *pg_profile* dashboards using this control, the time interval and *pg_profile* server will be preserved. 23 | -------------------------------------------------------------------------------- /management/baseline.sql: -------------------------------------------------------------------------------- 1 | /* ========= Baseline management functions ========= */ 2 | 3 | CREATE FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$ 4 | DECLARE 5 | baseline_id integer; 6 | sserver_id integer; 7 | BEGIN 8 | SELECT server_id INTO sserver_id FROM servers WHERE server_name=server; 9 | IF sserver_id IS NULL THEN 10 | RAISE 'Server not found'; 11 | END IF; 12 | 13 | INSERT INTO baselines(server_id,bl_name,keep_until) 14 | VALUES (sserver_id,baseline,now() + (days || ' days')::interval) 15 | RETURNING bl_id INTO baseline_id; 16 | 17 | INSERT INTO bl_samples (server_id,sample_id,bl_id) 18 | SELECT server_id,sample_id,baseline_id 19 | FROM samples s JOIN servers n USING (server_id) 20 | WHERE server_id=sserver_id AND sample_id BETWEEN start_id AND end_id; 21 | 22 | RETURN baseline_id; 23 | END; 24 | $$ LANGUAGE plpgsql; 25 | COMMENT ON FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer) IS 'New baseline by ID''s'; 26 | 27 | CREATE FUNCTION create_baseline(IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$ 28 | BEGIN 29 | RETURN create_baseline('local',baseline,start_id,end_id,days); 30 | END; 31 | $$ LANGUAGE plpgsql; 32 | COMMENT ON FUNCTION create_baseline(IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer) IS 'Local server new baseline by ID''s'; 33 | 34 | CREATE FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN time_range tstzrange, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$ 35 | DECLARE 36 | range_ids record; 37 | BEGIN 38 | SELECT * INTO STRICT range_ids 39 | FROM get_sampleids_by_timerange(get_server_by_name(server), time_range); 40 | 41 | RETURN create_baseline(server,baseline,range_ids.start_id,range_ids.end_id,days); 42 | END; 43 | $$ LANGUAGE plpgsql; 44 | COMMENT ON FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN time_range tstzrange, IN days integer) IS 'New baseline by time range'; 45 | 46 | CREATE FUNCTION create_baseline(IN baseline varchar(25), IN time_range tstzrange, IN days integer = NULL) RETURNS integer 47 | SET search_path=@extschema@ AS $$ 48 | BEGIN 49 | RETURN create_baseline('local',baseline,time_range,days); 50 | END; 51 | $$ LANGUAGE plpgsql; 52 | COMMENT ON FUNCTION create_baseline(IN baseline varchar(25), IN time_range tstzrange, IN days integer) IS 'Local server new baseline by time range'; 53 | 54 | CREATE FUNCTION drop_baseline(IN server name, IN baseline varchar(25)) RETURNS integer SET search_path=@extschema@ AS $$ 55 | DECLARE 56 | del_rows integer; 57 | BEGIN 58 | DELETE FROM baselines WHERE bl_name = baseline AND server_id IN (SELECT server_id FROM servers WHERE server_name = server); 59 | GET DIAGNOSTICS del_rows = ROW_COUNT; 60 | RETURN del_rows; 61 | END; 62 | $$ LANGUAGE plpgsql; 63 | COMMENT ON FUNCTION drop_baseline(IN server name, IN baseline varchar(25)) IS 'Drop baseline on server'; 64 | 65 | CREATE FUNCTION drop_baseline(IN baseline varchar(25)) RETURNS integer SET search_path=@extschema@ AS $$ 66 | BEGIN 67 | RETURN drop_baseline('local',baseline); 68 | END; 69 | $$ LANGUAGE plpgsql; 70 | COMMENT ON FUNCTION drop_baseline(IN baseline varchar(25)) IS 'Drop baseline on local server'; 71 | 72 | CREATE FUNCTION keep_baseline(IN server name, IN baseline varchar(25) = null, IN days integer = null) RETURNS integer SET search_path=@extschema@ AS $$ 73 | DECLARE 74 | upd_rows integer; 75 | BEGIN 76 | UPDATE baselines SET keep_until = now() + (days || ' days')::interval WHERE (baseline IS NULL OR bl_name = baseline) AND server_id IN (SELECT server_id FROM servers WHERE server_name = server); 77 | GET DIAGNOSTICS upd_rows = ROW_COUNT; 78 | RETURN upd_rows; 79 | END; 80 | $$ LANGUAGE plpgsql; 81 | COMMENT ON FUNCTION keep_baseline(IN server name, IN baseline varchar(25), IN days integer) IS 'Set new baseline retention on server'; 82 | 83 | CREATE FUNCTION keep_baseline(IN baseline varchar(25) = null, IN days integer = null) RETURNS integer SET search_path=@extschema@ AS $$ 84 | BEGIN 85 | RETURN keep_baseline('local',baseline,days); 86 | END; 87 | $$ LANGUAGE plpgsql; 88 | COMMENT ON FUNCTION keep_baseline(IN baseline varchar(25), IN days integer) IS 'Set new baseline retention on local server'; 89 | 90 | CREATE FUNCTION show_baselines(IN server name = 'local') 91 | RETURNS TABLE ( 92 | baseline varchar(25), 93 | min_sample integer, 94 | max_sample integer, 95 | keep_until_time timestamp (0) with time zone 96 | ) SET search_path=@extschema@ AS $$ 97 | SELECT bl_name as baseline,min_sample_id,max_sample_id, keep_until 98 | FROM baselines b JOIN 99 | (SELECT server_id,bl_id,min(sample_id) min_sample_id,max(sample_id) max_sample_id FROM bl_samples GROUP BY server_id,bl_id) b_agg 100 | USING (server_id,bl_id) 101 | WHERE server_id IN (SELECT server_id FROM servers WHERE server_name = server) 102 | ORDER BY min_sample_id; 103 | $$ LANGUAGE sql; 104 | COMMENT ON FUNCTION show_baselines(IN server name) IS 'Show server baselines (local server assumed if omitted)'; 105 | -------------------------------------------------------------------------------- /management/internal.sql: -------------------------------------------------------------------------------- 1 | /* ========= Internal functions ========= */ 2 | 3 | CREATE FUNCTION get_connstr(IN sserver_id integer, INOUT properties jsonb) 4 | SET search_path=@extschema@ AS $$ 5 | DECLARE 6 | server_connstr text = NULL; 7 | server_host text = NULL; 8 | BEGIN 9 | ASSERT properties IS NOT NULL, 'properties must be not null'; 10 | --Getting server_connstr 11 | SELECT connstr INTO server_connstr FROM servers n WHERE n.server_id = sserver_id; 12 | ASSERT server_connstr IS NOT NULL, 'server_id not found'; 13 | /* 14 | When host= parameter is not specified, connection to unix socket is assumed. 15 | Unix socket can be in non-default location, so we need to specify it 16 | */ 17 | IF (SELECT count(*) = 0 FROM regexp_matches(server_connstr,$o$((\s|^)host\s*=)$o$)) AND 18 | (SELECT count(*) != 0 FROM pg_catalog.pg_settings 19 | WHERE name = 'unix_socket_directories' AND boot_val != reset_val) 20 | THEN 21 | -- Get suitable socket name from available list 22 | server_host := (SELECT COALESCE(t[1],t[4]) 23 | FROM pg_catalog.pg_settings, 24 | regexp_matches(reset_val,'("(("")|[^"])+")|([^,]+)','g') AS t 25 | WHERE name = 'unix_socket_directories' AND boot_val != reset_val 26 | -- libpq can't handle sockets with comma in their names 27 | AND position(',' IN COALESCE(t[1],t[4])) = 0 28 | LIMIT 1 29 | ); 30 | -- quoted string processing 31 | IF left(server_host, 1) = '"' AND 32 | right(server_host, 1) = '"' AND 33 | (length(server_host) > 1) 34 | THEN 35 | server_host := replace(substring(server_host,2,length(server_host)-2),'""','"'); 36 | END IF; 37 | -- append host parameter to the connection string 38 | IF server_host IS NOT NULL AND server_host != '' THEN 39 | server_connstr := concat_ws(server_connstr, format('host=%L',server_host), ' '); 40 | ELSE 41 | server_connstr := concat_ws(server_connstr, format('host=%L','localhost'), ' '); 42 | END IF; 43 | END IF; 44 | 45 | properties := jsonb_set(properties, '{properties, server_connstr}', 46 | to_jsonb(server_connstr)); 47 | END; 48 | $$ LANGUAGE plpgsql; 49 | 50 | CREATE FUNCTION get_sampleids_by_timerange(IN sserver_id integer, IN time_range tstzrange) 51 | RETURNS TABLE ( 52 | start_id integer, 53 | end_id integer 54 | ) SET search_path=@extschema@ AS $$ 55 | BEGIN 56 | SELECT min(s1.sample_id),max(s2.sample_id) INTO start_id,end_id FROM 57 | samples s1 JOIN 58 | /* Here redundant join condition s1.sample_id < s2.sample_id is needed 59 | * Otherwise optimizer is using tstzrange(s1.sample_time,s2.sample_time) && time_range 60 | * as first join condition and some times failes with error 61 | * ERROR: range lower bound must be less than or equal to range upper bound 62 | */ 63 | samples s2 ON (s1.sample_id < s2.sample_id AND s1.server_id = s2.server_id AND s1.sample_id + 1 = s2.sample_id) 64 | WHERE s1.server_id = sserver_id AND tstzrange(s1.sample_time,s2.sample_time) && time_range; 65 | 66 | IF start_id IS NULL OR end_id IS NULL THEN 67 | RAISE 'Suitable samples not found'; 68 | END IF; 69 | 70 | RETURN NEXT; 71 | RETURN; 72 | END; 73 | $$ LANGUAGE plpgsql; 74 | 75 | CREATE FUNCTION get_server_by_name(IN server name) 76 | RETURNS integer SET search_path=@extschema@ AS $$ 77 | DECLARE 78 | sserver_id integer; 79 | BEGIN 80 | SELECT server_id INTO sserver_id FROM servers WHERE server_name=server; 81 | IF sserver_id IS NULL THEN 82 | RAISE 'Server not found.'; 83 | END IF; 84 | 85 | RETURN sserver_id; 86 | END; 87 | $$ LANGUAGE plpgsql; 88 | 89 | CREATE FUNCTION get_baseline_samples(IN sserver_id integer, baseline varchar(25)) 90 | RETURNS TABLE ( 91 | start_id integer, 92 | end_id integer 93 | ) SET search_path=@extschema@ AS $$ 94 | BEGIN 95 | SELECT min(sample_id), max(sample_id) INTO start_id,end_id 96 | FROM baselines JOIN bl_samples USING (bl_id,server_id) 97 | WHERE server_id = sserver_id AND bl_name = baseline; 98 | IF start_id IS NULL OR end_id IS NULL THEN 99 | RAISE 'Baseline not found'; 100 | END IF; 101 | RETURN NEXT; 102 | RETURN; 103 | END; 104 | $$ LANGUAGE plpgsql; 105 | -------------------------------------------------------------------------------- /management/local_server.sql: -------------------------------------------------------------------------------- 1 | SELECT create_server('local','dbname='||current_database()||' port='||current_setting('port')); 2 | -------------------------------------------------------------------------------- /migration/Makefile: -------------------------------------------------------------------------------- 1 | MIGRATION = \ 2 | $(EXTENSION)--4.7--$(PGPROFILE_VERSION).sql 3 | 4 | $(EXTENSION)--4.7--4.8.sql: migration/func_drop.sql migration/func_create.sed \ 5 | migration/migration.sql data/report_templates.sql $(functions) 6 | sed \ 7 | -e '1i \\\echo Use "ALTER EXTENSION $(EXTENSION) UPDATE" to load this file. \\quit' \ 8 | $(sed_extension) \ 9 | migration/func_drop.sql \ 10 | schema/extension_versions.sql \ 11 | > $(EXTENSION)--4.7--4.8.sql; 12 | sed -n \ 13 | $(sed_extension) \ 14 | -f migration/func_create.sed \ 15 | $(functions) \ 16 | >> $(EXTENSION)--4.7--4.8.sql; 17 | sed \ 18 | $(sed_extension) \ 19 | migration/migration.sql data/report_templates.sql \ 20 | >> $(EXTENSION)--4.7--4.8.sql; 21 | -------------------------------------------------------------------------------- /migration/func_create.sed: -------------------------------------------------------------------------------- 1 | /^CREATE FUNCTION extension_versions_format(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 2 | /^CREATE FUNCTION collect_obj_stats(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 3 | /^CREATE FUNCTION delete_samples(IN server_id integer.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 4 | /^COMMENT ON FUNCTION delete_samples(.*$/,/';$/p 5 | /^CREATE FUNCTION get_report_context(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 6 | /^CREATE FUNCTION get_report_datasets(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 7 | /^CREATE FUNCTION import_section_data_profile(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 8 | /^CREATE FUNCTION sample_dbobj_delta(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 9 | /^CREATE FUNCTION stat_activity_states(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 10 | /^CREATE FUNCTION collect_pg_stat_statements_stats(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 11 | /^CREATE FUNCTION drop_server(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 12 | /^COMMENT ON FUNCTION drop_server(.*';$/p 13 | /^CREATE FUNCTION stat_activity_states_format(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 14 | /^CREATE FUNCTION statements_dbstats(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 15 | /^CREATE FUNCTION get_report(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 16 | /^COMMENT ON FUNCTION get_report(.*$/,/';$/p 17 | /^CREATE FUNCTION get_diffreport(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 18 | /^COMMENT ON FUNCTION get_diffreport(.*$/,/';$/p 19 | /^CREATE FUNCTION get_report_latest(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 20 | /^COMMENT ON FUNCTION get_report_latest.*';$/p 21 | /^CREATE FUNCTION import_data(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 22 | /^COMMENT ON FUNCTION import_data(.*$/,/';$/p 23 | /^CREATE FUNCTION collect_pg_wait_sampling_stats_11(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 24 | /^CREATE FUNCTION take_sample(IN sserver_id integer.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p 25 | /^COMMENT ON FUNCTION take_sample(IN sserver_id integer.*$/,/';$/p 26 | -------------------------------------------------------------------------------- /migration/func_drop.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION delete_samples(integer, integer, integer); 2 | DROP FUNCTION collect_obj_stats; 3 | DROP FUNCTION get_report_context; 4 | DROP FUNCTION get_report_datasets; 5 | DROP FUNCTION import_section_data_profile; 6 | DROP FUNCTION sample_dbobj_delta; 7 | DROP FUNCTION stat_activity_states; 8 | DROP FUNCTION collect_pg_stat_statements_stats; 9 | DROP FUNCTION drop_server; 10 | DROP FUNCTION stat_activity_states_format(integer, integer, integer, integer, integer); 11 | DROP FUNCTION stat_activity_states_format(integer, integer, integer); 12 | DROP FUNCTION statements_dbstats; 13 | DROP FUNCTION import_data; 14 | DROP FUNCTION collect_pg_wait_sampling_stats_11; 15 | 16 | DROP FUNCTION get_report(integer, integer, integer, text, boolean); 17 | DROP FUNCTION get_report(name, integer, integer, text, boolean); 18 | DROP FUNCTION get_report(integer, integer, text, boolean); 19 | DROP FUNCTION get_report(integer, tstzrange, text, boolean); 20 | DROP FUNCTION get_report(name, tstzrange, text, boolean); 21 | DROP FUNCTION get_report(tstzrange, text, boolean); 22 | DROP FUNCTION get_report(name, varchar, text, boolean); 23 | DROP FUNCTION get_report(varchar, text, boolean); 24 | DROP FUNCTION get_report_latest(name); 25 | 26 | DROP FUNCTION get_diffreport(integer, integer, integer, integer, integer, text, boolean); 27 | DROP FUNCTION get_diffreport(name, integer, integer, integer, integer, text, boolean); 28 | DROP FUNCTION get_diffreport(integer, integer, integer, integer, text, boolean); 29 | DROP FUNCTION get_diffreport(name, varchar, varchar, text, boolean); 30 | DROP FUNCTION get_diffreport(varchar, varchar, text, boolean); 31 | DROP FUNCTION get_diffreport(name, varchar, integer, integer, text, boolean); 32 | DROP FUNCTION get_diffreport(varchar, integer, integer, text, boolean); 33 | DROP FUNCTION get_diffreport(name, integer, integer, varchar, text, boolean); 34 | DROP FUNCTION get_diffreport(integer, integer, varchar, text, boolean); 35 | DROP FUNCTION get_diffreport(name, tstzrange, tstzrange, text, boolean); 36 | DROP FUNCTION get_diffreport(name, varchar, tstzrange, text, boolean); 37 | DROP FUNCTION get_diffreport(name, tstzrange, varchar, text, boolean); 38 | 39 | DROP FUNCTION take_sample(integer, boolean); 40 | -------------------------------------------------------------------------------- /migration/migration.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO import_queries_version_order VALUES 2 | ('pg_profile','4.8','pg_profile','4.7') 3 | ; 4 | 5 | DELETE FROM report_struct; 6 | DELETE FROM report; 7 | DELETE FROM report_static; 8 | 9 | GRANT SELECT ON extension_versions TO public; 10 | GRANT SELECT ON v_extension_versions TO public; 11 | -------------------------------------------------------------------------------- /privileges/pg_profile.sql: -------------------------------------------------------------------------------- 1 | -- 2 | GRANT USAGE ON SCHEMA @extschema@ TO public; 3 | -- 4 | GRANT SELECT ON sample_stat_cluster TO public; 5 | GRANT SELECT ON sample_stat_slru TO public; 6 | GRANT SELECT ON sample_stat_wal TO public; 7 | GRANT SELECT ON sample_stat_io TO public; 8 | GRANT SELECT ON sample_stat_archiver TO public; 9 | GRANT SELECT ON indexes_list TO public; 10 | GRANT SELECT ON sample_stat_indexes TO public; 11 | GRANT SELECT ON sample_stat_indexes_total TO public; 12 | GRANT SELECT ON tablespaces_list TO public; 13 | GRANT SELECT ON sample_stat_tablespaces TO public; 14 | GRANT SELECT ON tables_list TO public; 15 | GRANT SELECT ON sample_stat_tables TO public; 16 | GRANT SELECT ON sample_stat_tables_total TO public; 17 | GRANT SELECT ON sample_settings TO public; 18 | GRANT SELECT ON funcs_list TO public; 19 | GRANT SELECT ON sample_stat_user_functions TO public; 20 | GRANT SELECT ON sample_stat_user_func_total TO public; 21 | GRANT SELECT ON sample_stat_database TO public; 22 | GRANT SELECT ON sample_statements TO public; 23 | GRANT SELECT ON sample_statements_total TO public; 24 | GRANT SELECT ON sample_kcache TO public; 25 | GRANT SELECT ON sample_kcache_total TO public; 26 | GRANT SELECT ON roles_list TO public; 27 | GRANT SELECT ON wait_sampling_total TO public; 28 | GRANT SELECT (server_id, server_name, server_description, server_created, db_exclude, 29 | enabled, max_sample_age, last_sample_id, size_smp_wnd_start, size_smp_wnd_dur, size_smp_interval) 30 | ON servers TO public; 31 | GRANT SELECT ON samples TO public; 32 | GRANT SELECT ON baselines TO public; 33 | GRANT SELECT ON bl_samples TO public; 34 | GRANT SELECT ON report_static TO public; 35 | GRANT SELECT ON report TO public; 36 | GRANT SELECT ON report_struct TO public; 37 | GRANT SELECT ON extension_versions TO public; 38 | GRANT SELECT ON v_sample_stat_indexes TO public; 39 | GRANT SELECT ON v_sample_stat_tablespaces TO public; 40 | GRANT SELECT ON v_sample_timings TO public; 41 | GRANT SELECT ON v_sample_stat_tables TO public; 42 | GRANT SELECT ON v_sample_settings TO public; 43 | GRANT SELECT ON v_sample_stat_user_functions TO public; 44 | GRANT SELECT ON v_extension_versions TO public; 45 | 46 | -- pg_read_all_stats can see the query texts 47 | GRANT SELECT ON stmt_list TO pg_read_all_stats; 48 | -------------------------------------------------------------------------------- /report/Makefile: -------------------------------------------------------------------------------- 1 | report_files = \ 2 | functions/clusterstat.sql \ 3 | functions/stat_io.sql \ 4 | functions/stat_slru.sql \ 5 | functions/dbstat.sql \ 6 | functions/dead_mods_ix_unused.sql \ 7 | functions/functionstat.sql \ 8 | functions/indexstat.sql \ 9 | functions/kcachestat_checks.sql \ 10 | functions/kcachestat.sql \ 11 | functions/settings.sql \ 12 | functions/statements_checks.sql \ 13 | functions/statementstat_dbagg.sql \ 14 | functions/statementstat.sql \ 15 | functions/pg_wait_sampling.sql \ 16 | functions/tablespacestat.sql \ 17 | functions/tablestat.sql \ 18 | functions/top_io_stat.sql \ 19 | functions/walstat.sql \ 20 | functions/subsample.sql \ 21 | functions/extensions.sql \ 22 | section.sql \ 23 | report.sql \ 24 | reportdiff.sql 25 | 26 | report_build.sql: $(report_files) 27 | cat $(report_files) \ 28 | > report_build.sql 29 | -------------------------------------------------------------------------------- /report/functions/dead_mods_ix_unused.sql: -------------------------------------------------------------------------------- 1 | CREATE FUNCTION profile_checkavail_tbl_top_dead(IN sserver_id integer, IN start_id integer, IN end_id integer) 2 | RETURNS BOOLEAN 3 | SET search_path=@extschema@ AS 4 | $$ 5 | SELECT 6 | COUNT(*) > 0 7 | FROM v_sample_stat_tables st 8 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid) 9 | WHERE st.server_id=sserver_id AND NOT sample_db.datistemplate AND sample_id = end_id 10 | -- Min 5 MB in size 11 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2 12 | AND st.n_dead_tup > 0; 13 | $$ LANGUAGE sql; 14 | 15 | CREATE FUNCTION profile_checkavail_tbl_top_mods(IN sserver_id integer, IN start_id integer, IN end_id integer) 16 | RETURNS BOOLEAN 17 | SET search_path=@extschema@ AS 18 | $$ 19 | SELECT 20 | COUNT(*) > 0 21 | FROM v_sample_stat_tables st 22 | -- Database name and existance condition 23 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid) 24 | WHERE st.server_id = sserver_id AND NOT sample_db.datistemplate AND sample_id = end_id 25 | AND st.relkind IN ('r','m') 26 | -- Min 5 MB in size 27 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2 28 | AND n_mod_since_analyze > 0 29 | AND n_live_tup + n_dead_tup > 0; 30 | $$ LANGUAGE sql; 31 | 32 | CREATE FUNCTION top_tbl_last_sample_format(IN sserver_id integer, IN start_id integer, end_id integer) 33 | RETURNS TABLE( 34 | datid oid, 35 | relid oid, 36 | dbname name, 37 | tablespacename name, 38 | schemaname name, 39 | relname name, 40 | n_live_tup bigint, 41 | dead_pct numeric, 42 | last_autovacuum text, 43 | n_dead_tup bigint, 44 | n_mod_since_analyze bigint, 45 | mods_pct numeric, 46 | last_autoanalyze text, 47 | relsize_pretty text, 48 | 49 | ord_dead integer, 50 | ord_mod integer 51 | ) 52 | SET search_path=@extschema@ AS $$ 53 | SELECT 54 | datid, 55 | relid, 56 | sample_db.datname AS dbname, 57 | tablespacename, 58 | schemaname, 59 | relname, 60 | 61 | n_live_tup, 62 | n_dead_tup::numeric * 100 / NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) AS dead_pct, 63 | last_autovacuum::text, 64 | n_dead_tup, 65 | n_mod_since_analyze, 66 | n_mod_since_analyze::numeric * 100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) AS mods_pct, 67 | last_autoanalyze::text, 68 | COALESCE( 69 | pg_size_pretty(relsize), 70 | '['||pg_size_pretty(relpages_bytes)||']' 71 | ) AS relsize_pretty, 72 | 73 | CASE WHEN 74 | n_dead_tup > 0 75 | THEN 76 | row_number() OVER (ORDER BY 77 | n_dead_tup*100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) 78 | DESC NULLS LAST, 79 | datid,relid)::integer 80 | ELSE NULL END AS ord_dead, 81 | 82 | CASE WHEN 83 | n_mod_since_analyze > 0 84 | THEN 85 | row_number() OVER (ORDER BY 86 | n_mod_since_analyze*100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) 87 | DESC NULLS LAST, 88 | datid,relid)::integer 89 | ELSE NULL END AS ord_mod 90 | FROM 91 | v_sample_stat_tables st 92 | -- Database name 93 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid) 94 | WHERE 95 | (server_id, sample_id, datistemplate) = (sserver_id, end_id, false) 96 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2 97 | AND COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0) > 0 98 | $$ LANGUAGE sql; 99 | -------------------------------------------------------------------------------- /report/functions/extensions.sql: -------------------------------------------------------------------------------- 1 | /*===== Extensions reporting functions =====*/ 2 | CREATE FUNCTION extension_versions_format(IN sserver_id integer, 3 | IN start1_id integer, IN end1_id integer, 4 | IN start2_id integer = NULL, IN end2_id integer = NULL) 5 | RETURNS TABLE ( 6 | dbname name, 7 | extname name, 8 | extversion text, 9 | first_seen text, 10 | last_seen text, 11 | ord_ext integer -- report header ordering 12 | ) 13 | SET search_path=@extschema@ AS $$ 14 | SELECT 15 | evd.dbname, 16 | evd.extname, 17 | evd.extversion, 18 | CASE 19 | WHEN s_first.sample_id IS NOT NULL AND 20 | s_first.sample_id > least(start1_id, start2_id) OR 21 | evd.last_sample_id IS NOT NULL AND 22 | evd.last_sample_id < greatest(end1_id, end2_id) 23 | THEN evd.first_seen::text 24 | ELSE NULL 25 | END as first_seen, 26 | CASE 27 | WHEN s_first.sample_id IS NOT NULL AND 28 | s_first.sample_id > least(start1_id, start2_id) OR 29 | evd.last_sample_id IS NOT NULL AND 30 | evd.last_sample_id < greatest(end1_id, end2_id) 31 | THEN s_last.sample_time::text 32 | ELSE NULL 33 | END as last_seen, 34 | row_number() over (order by evd.extname, evd.dbname, evd.first_seen)::integer as ord_ext 35 | FROM ( 36 | SELECT DISTINCT 37 | ssd.datname as dbname, 38 | ev.extname, 39 | ev.extversion, 40 | ev.first_seen, 41 | ev.last_sample_id 42 | FROM v_extension_versions ev 43 | -- Database name 44 | JOIN sample_stat_database ssd USING (server_id, datid, sample_id) 45 | WHERE 46 | ev.server_id = sserver_id AND 47 | (ev.sample_id BETWEEN start1_id AND end1_id OR 48 | ev.sample_id BETWEEN start2_id AND end2_id) 49 | ) evd 50 | LEFT JOIN samples s_first ON (s_first.server_id, s_first.sample_time) = (sserver_id, evd.first_seen) 51 | LEFT JOIN samples s_last ON (s_last.server_id, s_last.sample_id) = (sserver_id, evd.last_sample_id) 52 | $$ LANGUAGE sql; 53 | -------------------------------------------------------------------------------- /report/functions/functionstat.sql: -------------------------------------------------------------------------------- 1 | /* ===== Function stats functions ===== */ 2 | CREATE FUNCTION profile_checkavail_functions(IN sserver_id integer, IN start_id integer, IN end_id integer) 3 | RETURNS BOOLEAN 4 | SET search_path=@extschema@ AS $$ 5 | -- Check if we have function calls collected for report interval 6 | SELECT COALESCE(sum(calls), 0) > 0 7 | FROM sample_stat_user_func_total sn 8 | WHERE sn.server_id = sserver_id AND sn.sample_id BETWEEN start_id + 1 AND end_id 9 | $$ LANGUAGE sql; 10 | 11 | CREATE FUNCTION profile_checkavail_trg_functions(IN sserver_id integer, IN start_id integer, IN end_id integer) 12 | RETURNS BOOLEAN 13 | SET search_path=@extschema@ AS $$ 14 | -- Check if we have trigger function calls collected for report interval 15 | SELECT COALESCE(sum(calls), 0) > 0 16 | FROM sample_stat_user_func_total sn 17 | WHERE sn.server_id = sserver_id AND sn.sample_id BETWEEN start_id + 1 AND end_id 18 | AND sn.trg_fn 19 | $$ LANGUAGE sql; 20 | /* ===== Function stats functions ===== */ 21 | 22 | CREATE FUNCTION top_functions(IN sserver_id integer, IN start_id integer, IN end_id integer) 23 | RETURNS TABLE( 24 | datid oid, 25 | funcid oid, 26 | dbname name, 27 | schemaname name, 28 | funcname name, 29 | funcargs text, 30 | trg_fn boolean, 31 | calls bigint, 32 | total_time double precision, 33 | self_time double precision, 34 | m_time double precision, 35 | m_stime double precision 36 | ) 37 | SET search_path=@extschema@ AS $$ 38 | SELECT 39 | st.datid, 40 | st.funcid, 41 | sample_db.datname AS dbname, 42 | st.schemaname, 43 | st.funcname, 44 | st.funcargs, 45 | st.trg_fn, 46 | sum(st.calls)::bigint AS calls, 47 | sum(st.total_time)/1000 AS total_time, 48 | sum(st.self_time)/1000 AS self_time, 49 | sum(st.total_time)/NULLIF(sum(st.calls),0)/1000 AS m_time, 50 | sum(st.self_time)/NULLIF(sum(st.calls),0)/1000 AS m_stime 51 | FROM v_sample_stat_user_functions st 52 | -- Database name 53 | JOIN sample_stat_database sample_db 54 | USING (server_id, sample_id, datid) 55 | WHERE 56 | st.server_id = sserver_id 57 | AND NOT sample_db.datistemplate 58 | AND st.sample_id BETWEEN start_id + 1 AND end_id 59 | GROUP BY 60 | st.datid, 61 | st.funcid, 62 | sample_db.datname, 63 | st.schemaname, 64 | st.funcname, 65 | st.funcargs, 66 | st.trg_fn 67 | $$ LANGUAGE sql; 68 | 69 | CREATE FUNCTION top_functions_format(IN sserver_id integer, IN start_id integer, IN end_id integer) 70 | RETURNS TABLE( 71 | datid oid, 72 | funcid oid, 73 | dbname name, 74 | schemaname name, 75 | funcname name, 76 | funcargs text, 77 | calls bigint, 78 | total_time numeric, 79 | self_time numeric, 80 | m_time numeric, 81 | m_stime numeric, 82 | 83 | ord_time integer, 84 | ord_calls integer, 85 | ord_trgtime integer 86 | ) 87 | SET search_path=@extschema@ AS $$ 88 | SELECT 89 | datid, 90 | funcid, 91 | dbname, 92 | schemaname, 93 | funcname, 94 | funcargs, 95 | NULLIF(calls, 0) AS calls, 96 | round(CAST(NULLIF(total_time, 0.0) AS numeric), 2) AS total_time, 97 | round(CAST(NULLIF(self_time, 0.0) AS numeric), 2) AS self_time, 98 | round(CAST(NULLIF(m_time, 0.0) AS numeric), 2) AS m_time, 99 | round(CAST(NULLIF(m_stime, 0.0) AS numeric), 2) AS m_stime, 100 | 101 | CASE WHEN 102 | total_time > 0 AND NOT trg_fn 103 | THEN 104 | row_number() OVER (ORDER BY 105 | total_time 106 | DESC NULLS LAST, 107 | datid, funcid)::integer 108 | ELSE NULL END AS ord_time, 109 | 110 | CASE WHEN 111 | calls > 0 AND NOT trg_fn 112 | THEN 113 | row_number() OVER (ORDER BY 114 | calls 115 | DESC NULLS LAST, 116 | datid, funcid)::integer 117 | ELSE NULL END AS ord_calls, 118 | 119 | CASE WHEN 120 | total_time > 0 AND trg_fn 121 | THEN 122 | row_number() OVER (ORDER BY 123 | total_time 124 | DESC NULLS LAST, 125 | datid, funcid)::integer 126 | ELSE NULL END AS ord_trgtime 127 | FROM 128 | top_functions(sserver_id, start_id, end_id) 129 | $$ LANGUAGE sql; 130 | 131 | CREATE FUNCTION top_functions_format_diff(IN sserver_id integer, 132 | IN start1_id integer, IN end1_id integer, 133 | IN start2_id integer, IN end2_id integer) 134 | RETURNS TABLE( 135 | datid oid, 136 | funcid oid, 137 | dbname name, 138 | schemaname name, 139 | funcname name, 140 | funcargs text, 141 | 142 | calls1 bigint, 143 | total_time1 numeric, 144 | self_time1 numeric, 145 | m_time1 numeric, 146 | m_stime1 numeric, 147 | 148 | calls2 bigint, 149 | total_time2 numeric, 150 | self_time2 numeric, 151 | m_time2 numeric, 152 | m_stime2 numeric, 153 | 154 | ord_time integer, 155 | ord_calls integer, 156 | ord_trgtime integer 157 | ) 158 | SET search_path=@extschema@ AS $$ 159 | SELECT 160 | COALESCE(f1.datid, f2.datid), 161 | COALESCE(f1.funcid, f2.funcid), 162 | COALESCE(f1.dbname, f2.dbname), 163 | COALESCE(f1.schemaname, f2.schemaname), 164 | COALESCE(f1.funcname, f2.funcname), 165 | COALESCE(f1.funcargs, f2.funcargs), 166 | 167 | NULLIF(f1.calls, 0) AS calls1, 168 | round(CAST(NULLIF(f1.total_time, 0.0) AS numeric), 2) AS total_time1, 169 | round(CAST(NULLIF(f1.self_time, 0.0) AS numeric), 2) AS self_time1, 170 | round(CAST(NULLIF(f1.m_time, 0.0) AS numeric), 2) AS m_time1, 171 | round(CAST(NULLIF(f1.m_stime, 0.0) AS numeric), 2) AS m_stime1, 172 | 173 | NULLIF(f2.calls, 0) AS calls2, 174 | round(CAST(NULLIF(f2.total_time, 0.0) AS numeric), 2) AS total_time2, 175 | round(CAST(NULLIF(f2.self_time, 0.0) AS numeric), 2) AS self_time2, 176 | round(CAST(NULLIF(f2.m_time, 0.0) AS numeric), 2) AS m_time2, 177 | round(CAST(NULLIF(f2.m_stime, 0.0) AS numeric), 2) AS m_stime2, 178 | 179 | CASE WHEN 180 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) > 0 181 | AND NOT COALESCE(f1.trg_fn, f2.trg_fn, false) 182 | THEN 183 | row_number() OVER (ORDER BY 184 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) 185 | DESC NULLS LAST, 186 | COALESCE(f1.datid, f2.datid), 187 | COALESCE(f1.funcid, f2.funcid))::integer 188 | ELSE NULL END AS ord_time, 189 | 190 | CASE WHEN 191 | COALESCE(f1.calls, 0) + COALESCE(f2.calls, 0) > 0 192 | AND NOT COALESCE(f1.trg_fn, f2.trg_fn, false) 193 | THEN 194 | row_number() OVER (ORDER BY 195 | COALESCE(f1.calls, 0) + COALESCE(f2.calls, 0) 196 | DESC NULLS LAST, 197 | COALESCE(f1.datid, f2.datid), 198 | COALESCE(f1.funcid, f2.funcid))::integer 199 | ELSE NULL END AS ord_calls, 200 | 201 | CASE WHEN 202 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) > 0 203 | AND COALESCE(f1.trg_fn, f2.trg_fn, false) 204 | THEN 205 | row_number() OVER (ORDER BY 206 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) 207 | DESC NULLS LAST, 208 | COALESCE(f1.datid, f2.datid), 209 | COALESCE(f1.funcid, f2.funcid))::integer 210 | ELSE NULL END AS ord_trgtime 211 | FROM 212 | top_functions(sserver_id, start1_id, end1_id) f1 213 | FULL OUTER JOIN 214 | top_functions(sserver_id, start2_id, end2_id) f2 215 | USING (datid, funcid) 216 | $$ LANGUAGE sql; 217 | -------------------------------------------------------------------------------- /report/functions/kcachestat_checks.sql: -------------------------------------------------------------------------------- 1 | /* ========= kcache stats functions ========= */ 2 | 3 | CREATE FUNCTION profile_checkavail_rusage(IN sserver_id integer, IN start_id integer, IN end_id integer) 4 | RETURNS BOOLEAN 5 | SET search_path=@extschema@ AS $$ 6 | SELECT 7 | count(*) = end_id - start_id 8 | FROM 9 | (SELECT 10 | sum(exec_user_time) > 0 as exec 11 | FROM sample_kcache_total 12 | WHERE server_id = sserver_id AND sample_id BETWEEN start_id + 1 AND end_id 13 | GROUP BY server_id, sample_id) exec_time_samples 14 | WHERE exec_time_samples.exec 15 | $$ LANGUAGE sql; 16 | 17 | CREATE FUNCTION profile_checkavail_rusage_planstats(IN sserver_id integer, IN start_id integer, IN end_id integer) 18 | RETURNS BOOLEAN 19 | SET search_path=@extschema@ AS $$ 20 | SELECT 21 | count(*) = end_id - start_id 22 | FROM 23 | (SELECT 24 | sum(plan_user_time) > 0 as plan 25 | FROM sample_kcache_total 26 | WHERE server_id = sserver_id AND sample_id BETWEEN start_id + 1 AND end_id 27 | GROUP BY server_id, sample_id) plan_time_samples 28 | WHERE plan_time_samples.plan 29 | $$ LANGUAGE sql; 30 | -------------------------------------------------------------------------------- /report/functions/pg_wait_sampling.sql: -------------------------------------------------------------------------------- 1 | /* pg_wait_sampling reporting functions */ 2 | CREATE FUNCTION profile_checkavail_wait_sampling_total(IN sserver_id integer, IN start_id integer, IN end_id integer) 3 | RETURNS BOOLEAN 4 | SET search_path=@extschema@ AS $$ 5 | -- Check if there is table sizes collected in both bounds 6 | SELECT 7 | count(*) > 0 8 | FROM wait_sampling_total 9 | WHERE 10 | server_id = sserver_id 11 | AND sample_id BETWEEN start_id + 1 AND end_id 12 | $$ LANGUAGE sql; 13 | 14 | CREATE FUNCTION wait_sampling_total_stats(IN sserver_id integer, 15 | IN start_id integer, IN end_id integer) 16 | RETURNS TABLE( 17 | event_type text, 18 | event text, 19 | tot_waited numeric, 20 | stmt_waited numeric 21 | ) 22 | SET search_path=@extschema@ AS $$ 23 | SELECT 24 | st.event_type, 25 | st.event, 26 | sum(st.tot_waited)::numeric / 1000 AS tot_waited, 27 | sum(st.stmt_waited)::numeric / 1000 AS stmt_waited 28 | FROM wait_sampling_total st 29 | WHERE st.server_id = sserver_id AND st.sample_id BETWEEN start_id + 1 AND end_id 30 | GROUP BY st.event_type, st.event; 31 | $$ LANGUAGE sql; 32 | 33 | CREATE FUNCTION wait_sampling_total_stats_format(IN sserver_id integer, 34 | IN start_id integer, IN end_id integer) 35 | RETURNS TABLE( 36 | event_type text, 37 | event_type_order bigint, 38 | tot_waited numeric, 39 | tot_waited_pct numeric, 40 | stmt_waited numeric, 41 | stmt_waited_pct numeric 42 | ) 43 | SET search_path=@extschema@ AS $$ 44 | WITH tot AS ( 45 | SELECT sum(tot_waited) AS tot_waited, sum(stmt_waited) AS stmt_waited 46 | FROM wait_sampling_total_stats(sserver_id, start_id, end_id)) 47 | SELECT 48 | COALESCE(event_type, 'Total'), 49 | row_number() OVER (ORDER BY event_type NULLS LAST) as event_type_order, 50 | round(sum(st.tot_waited), 2) as tot_waited, 51 | round(sum(st.tot_waited) * 100 / NULLIF(min(tot.tot_waited),0), 2) as tot_waited_pct, 52 | round(sum(st.stmt_waited), 2) as stmt_waited, 53 | round(sum(st.stmt_waited) * 100 / NULLIF(min(tot.stmt_waited),0), 2) as stmt_waited_pct 54 | FROM wait_sampling_total_stats(sserver_id, start_id, end_id) st CROSS JOIN tot 55 | GROUP BY ROLLUP(event_type) 56 | $$ LANGUAGE sql; 57 | 58 | CREATE FUNCTION wait_sampling_total_stats_format_diff(IN sserver_id integer, 59 | IN start1_id integer, IN end1_id integer, 60 | IN start2_id integer, IN end2_id integer) 61 | RETURNS TABLE( 62 | event_type text, 63 | event_type_order bigint, 64 | tot_waited1 numeric, 65 | tot_waited_pct1 numeric, 66 | stmt_waited1 numeric, 67 | stmt_waited_pct1 numeric, 68 | tot_waited2 numeric, 69 | tot_waited_pct2 numeric, 70 | stmt_waited2 numeric, 71 | stmt_waited_pct2 numeric 72 | ) 73 | SET search_path=@extschema@ AS $$ 74 | WITH tot1 AS ( 75 | SELECT sum(tot_waited) AS tot_waited, sum(stmt_waited) AS stmt_waited 76 | FROM wait_sampling_total_stats(sserver_id, start1_id, end1_id)), 77 | tot2 AS ( 78 | SELECT sum(tot_waited) AS tot_waited, sum(stmt_waited) AS stmt_waited 79 | FROM wait_sampling_total_stats(sserver_id, start2_id, end2_id)) 80 | SELECT 81 | COALESCE(event_type, 'Total'), 82 | row_number() OVER (ORDER BY event_type NULLS LAST) as event_type_order, 83 | round(sum(st1.tot_waited), 2) as tot_waited1, 84 | round(sum(st1.tot_waited) * 100 / NULLIF(min(tot1.tot_waited),0), 2) as tot_waited_pct1, 85 | round(sum(st1.stmt_waited), 2) as stmt_waited1, 86 | round(sum(st1.stmt_waited) * 100 / NULLIF(min(tot1.stmt_waited),0), 2) as stmt_waited_pct1, 87 | round(sum(st2.tot_waited), 2) as tot_waited2, 88 | round(sum(st2.tot_waited) * 100 / NULLIF(min(tot2.tot_waited),0), 2) as tot_waited_pct2, 89 | round(sum(st2.stmt_waited), 2) as stmt_waited2, 90 | round(sum(st2.stmt_waited) * 100 / NULLIF(min(tot2.stmt_waited),0), 2) as stmt_waited_pct2 91 | FROM (wait_sampling_total_stats(sserver_id, start1_id, end1_id) st1 CROSS JOIN tot1) 92 | FULL JOIN 93 | (wait_sampling_total_stats(sserver_id, start2_id, end2_id) st2 CROSS JOIN tot2) 94 | USING (event_type, event) 95 | GROUP BY ROLLUP(event_type) 96 | $$ LANGUAGE sql; 97 | 98 | CREATE FUNCTION top_wait_sampling_events_format(IN sserver_id integer, 99 | IN start_id integer, IN end_id integer) 100 | RETURNS TABLE( 101 | event_type text, 102 | event text, 103 | total_filter boolean, 104 | stmt_filter boolean, 105 | tot_waited numeric, 106 | tot_waited_pct numeric, 107 | stmt_waited numeric, 108 | stmt_waited_pct numeric 109 | ) 110 | SET search_path=@extschema@ AS $$ 111 | WITH tot AS ( 112 | SELECT 113 | sum(tot_waited) AS tot_waited, 114 | sum(stmt_waited) AS stmt_waited 115 | FROM wait_sampling_total_stats(sserver_id, start_id, end_id)) 116 | SELECT 117 | event_type, 118 | event, 119 | COALESCE(st.tot_waited > 0, false) AS total_filter, 120 | COALESCE(st.stmt_waited > 0, false) AS stmt_filter, 121 | round(st.tot_waited, 2) AS tot_waited, 122 | round(st.tot_waited * 100 / NULLIF(tot.tot_waited,0),2) AS tot_waited_pct, 123 | round(st.stmt_waited, 2) AS stmt_waited, 124 | round(st.stmt_waited * 100 / NULLIF(tot.stmt_waited,0),2) AS stmt_waited_pct 125 | FROM wait_sampling_total_stats(sserver_id, start_id, end_id) st CROSS JOIN tot 126 | $$ LANGUAGE sql; 127 | 128 | CREATE FUNCTION top_wait_sampling_events_format_diff(IN sserver_id integer, 129 | IN start1_id integer, IN end1_id integer, 130 | IN start2_id integer, IN end2_id integer) 131 | RETURNS TABLE( 132 | event_type text, 133 | event text, 134 | total_filter boolean, 135 | stmt_filter boolean, 136 | tot_ord bigint, 137 | stmt_ord bigint, 138 | tot_waited1 numeric, 139 | tot_waited_pct1 numeric, 140 | tot_waited2 numeric, 141 | tot_waited_pct2 numeric, 142 | stmt_waited1 numeric, 143 | stmt_waited_pct1 numeric, 144 | stmt_waited2 numeric, 145 | stmt_waited_pct2 numeric 146 | ) 147 | SET search_path=@extschema@ AS $$ 148 | WITH tot1 AS ( 149 | SELECT 150 | sum(tot_waited) AS tot_waited, 151 | sum(stmt_waited) AS stmt_waited 152 | FROM wait_sampling_total_stats(sserver_id, start1_id, end1_id)), 153 | tot2 AS ( 154 | SELECT 155 | sum(tot_waited) AS tot_waited, 156 | sum(stmt_waited) AS stmt_waited 157 | FROM wait_sampling_total_stats(sserver_id, start2_id, end2_id)) 158 | SELECT 159 | event_type, 160 | event, 161 | COALESCE(st1.tot_waited, 0) + COALESCE(st2.tot_waited, 0) > 0 AS total_filter, 162 | COALESCE(st1.stmt_waited, 0) + COALESCE(st2.stmt_waited, 0) > 0 AS stmt_filter, 163 | row_number() OVER (ORDER BY 164 | COALESCE(st1.tot_waited, 0) + COALESCE(st2.tot_waited, 0) DESC, 165 | event_type, event) AS tot_ord, 166 | row_number() OVER (ORDER BY 167 | COALESCE(st1.stmt_waited, 0) + COALESCE(st2.stmt_waited, 0) DESC, 168 | event_type, event) AS stmt_ord, 169 | round(st1.tot_waited, 2) AS tot_waited1, 170 | round(st1.tot_waited * 100 / NULLIF(tot1.tot_waited,0),2) AS tot_waited_pct1, 171 | round(st2.tot_waited, 2) AS tot_waited2, 172 | round(st2.tot_waited * 100 / NULLIF(tot2.tot_waited,0),2) AS tot_waited_pct2, 173 | round(st1.stmt_waited, 2) AS stmt_waited1, 174 | round(st1.stmt_waited * 100 / NULLIF(tot1.stmt_waited,0),2) AS stmt_waited_pct1, 175 | round(st2.stmt_waited, 2) AS stmt_waited2, 176 | round(st2.stmt_waited * 100 / NULLIF(tot2.stmt_waited,0),2) AS stmt_waited_pct2 177 | FROM (wait_sampling_total_stats(sserver_id, start1_id, end1_id) st1 CROSS JOIN tot1) 178 | FULL JOIN 179 | (wait_sampling_total_stats(sserver_id, start2_id, end2_id) st2 CROSS JOIN tot2) 180 | USING (event_type, event) 181 | $$ LANGUAGE sql; 182 | -------------------------------------------------------------------------------- /report/functions/settings.sql: -------------------------------------------------------------------------------- 1 | /*===== Settings reporting functions =====*/ 2 | CREATE FUNCTION settings_and_changes(IN sserver_id integer, IN start_id integer, IN end_id integer) 3 | RETURNS TABLE( 4 | first_seen timestamp(0) with time zone, 5 | setting_scope smallint, 6 | name text, 7 | setting text, 8 | reset_val text, 9 | boot_val text, 10 | unit text, 11 | sourcefile text, 12 | sourceline integer, 13 | pending_restart boolean, 14 | changed boolean, 15 | default_val boolean 16 | ) 17 | SET search_path=@extschema@ AS $$ 18 | SELECT 19 | first_seen, 20 | setting_scope, 21 | name, 22 | setting, 23 | reset_val, 24 | boot_val, 25 | unit, 26 | sourcefile, 27 | sourceline, 28 | pending_restart, 29 | false, 30 | boot_val IS NOT DISTINCT FROM reset_val 31 | FROM v_sample_settings 32 | WHERE (server_id, sample_id) = (sserver_id, start_id) 33 | UNION ALL 34 | SELECT 35 | first_seen, 36 | setting_scope, 37 | name, 38 | setting, 39 | reset_val, 40 | boot_val, 41 | unit, 42 | sourcefile, 43 | sourceline, 44 | pending_restart, 45 | true, 46 | boot_val IS NOT DISTINCT FROM reset_val 47 | FROM sample_settings s 48 | JOIN samples s_start ON (s_start.server_id = s.server_id AND s_start.sample_id = start_id) 49 | JOIN samples s_end ON (s_end.server_id = s.server_id AND s_end.sample_id = end_id) 50 | WHERE s.server_id = sserver_id AND s.first_seen > s_start.sample_time AND s.first_seen <= s_end.sample_time 51 | $$ LANGUAGE sql; 52 | 53 | CREATE FUNCTION settings_format(IN sserver_id integer, IN start_id integer, IN end_id integer) 54 | RETURNS TABLE ( 55 | klass text, 56 | name text, 57 | reset_val text, 58 | unit text, 59 | source text, 60 | notes text, 61 | default_val boolean, 62 | defined_val boolean, 63 | h_ord integer -- report header ordering 64 | ) 65 | SET search_path=@extschema@ AS $$ 66 | SELECT 67 | CASE WHEN changed THEN 'new' ELSE 'init' END AS klass, 68 | name, 69 | reset_val, 70 | unit, 71 | concat_ws(':', sourcefile, sourceline) AS source, 72 | concat_ws(', ', 73 | CASE WHEN changed THEN first_seen ELSE NULL END, 74 | CASE WHEN pending_restart THEN 'Pending restart' ELSE NULL END 75 | ) AS notes, 76 | default_val, 77 | NOT default_val, 78 | CASE 79 | WHEN name = 'version' THEN 10 80 | WHEN (name, setting_scope) = ('pgpro_version', 2) THEN 21 81 | WHEN (name, setting_scope) = ('pgpro_edition', 2) THEN 22 82 | WHEN (name, setting_scope) = ('pgpro_build', 2) THEN 23 83 | ELSE NULL 84 | END AS h_ord 85 | FROM 86 | settings_and_changes(sserver_id, start_id, end_id) 87 | ORDER BY 88 | name,setting_scope,first_seen,pending_restart ASC NULLS FIRST 89 | $$ LANGUAGE sql; 90 | 91 | CREATE FUNCTION settings_format_diff(IN sserver_id integer, 92 | IN start1_id integer, IN end1_id integer, 93 | IN start2_id integer, IN end2_id integer) 94 | RETURNS TABLE ( 95 | klass text, 96 | name text, 97 | reset_val text, 98 | unit text, 99 | source text, 100 | notes text, 101 | default_val boolean, 102 | defined_val boolean, 103 | h_ord integer -- report header ordering 104 | ) 105 | SET search_path=@extschema@ AS $$ 106 | SELECT 107 | concat_ws('_', 108 | CASE WHEN changed THEN 'new' ELSE 'init' END, 109 | CASE WHEN s1.name IS NULL THEN 'i2' 110 | WHEN s2.name IS NULL THEN 'i1' 111 | ELSE NULL 112 | END 113 | ) AS klass, 114 | name, 115 | reset_val, 116 | COALESCE(s1.unit,s2.unit) as unit, 117 | concat_ws(':', 118 | COALESCE(s1.sourcefile,s2.sourcefile), 119 | COALESCE(s1.sourceline,s2.sourceline) 120 | ) AS source, 121 | concat_ws(', ', 122 | CASE WHEN changed THEN first_seen ELSE NULL END, 123 | CASE WHEN pending_restart THEN 'Pending restart' ELSE NULL END 124 | ) AS notes, 125 | default_val, 126 | NOT default_val, 127 | CASE 128 | WHEN name = 'version' THEN 10 129 | WHEN (name, setting_scope) = ('pgpro_version', 2) THEN 21 130 | WHEN (name, setting_scope) = ('pgpro_edition', 2) THEN 22 131 | WHEN (name, setting_scope) = ('pgpro_build', 2) THEN 23 132 | ELSE NULL 133 | END AS h_ord 134 | FROM 135 | settings_and_changes(sserver_id, start1_id, end1_id) s1 136 | FULL OUTER JOIN 137 | settings_and_changes(sserver_id, start2_id, end2_id) s2 138 | USING(first_seen, setting_scope, name, setting, reset_val, pending_restart, changed, default_val) 139 | ORDER BY 140 | name,setting_scope,first_seen,pending_restart ASC NULLS FIRST 141 | $$ LANGUAGE sql; 142 | -------------------------------------------------------------------------------- /report/functions/stat_slru.sql: -------------------------------------------------------------------------------- 1 | CREATE FUNCTION cluster_stat_slru(IN sserver_id integer, 2 | IN start_id integer, IN end_id integer) 3 | RETURNS TABLE( 4 | server_id integer, 5 | name text, 6 | blks_zeroed bigint, 7 | blks_hit bigint, 8 | blks_read bigint, 9 | blks_written bigint, 10 | blks_exists bigint, 11 | flushes bigint, 12 | truncates bigint 13 | ) 14 | SET search_path=@extschema@ AS $$ 15 | SELECT 16 | st.server_id AS server_id, 17 | st.name AS name, 18 | SUM(blks_zeroed)::bigint AS blks_zeroed, 19 | SUM(blks_hit)::bigint AS blks_hit, 20 | SUM(blks_read)::bigint AS blks_read, 21 | SUM(blks_written)::bigint AS blks_written, 22 | SUM(blks_exists)::bigint AS blks_exists, 23 | SUM(flushes)::bigint AS flushes, 24 | SUM(truncates)::bigint AS truncates 25 | FROM sample_stat_slru st 26 | WHERE st.server_id = sserver_id AND st.sample_id BETWEEN start_id + 1 AND end_id 27 | GROUP BY st.server_id, st.name 28 | $$ LANGUAGE sql; 29 | 30 | CREATE FUNCTION cluster_stat_slru_format(IN sserver_id integer, 31 | IN start_id integer, IN end_id integer) 32 | RETURNS TABLE( 33 | name text, 34 | 35 | blks_zeroed bigint, 36 | blks_hit bigint, 37 | blks_read bigint, 38 | hit_pct numeric, 39 | blks_written bigint, 40 | blks_exists bigint, 41 | flushes bigint, 42 | truncates bigint 43 | ) SET search_path=@extschema@ AS $$ 44 | SELECT 45 | COALESCE(name, 'Total') AS name, 46 | 47 | NULLIF(SUM(blks_zeroed), 0)::bigint AS blks_zeroed, 48 | NULLIF(SUM(blks_hit), 0)::bigint AS blks_hit, 49 | NULLIF(SUM(blks_read), 0)::bigint AS blks_read, 50 | ROUND(NULLIF(SUM(blks_hit), 0)::numeric * 100 / 51 | NULLIF(COALESCE(SUM(blks_hit), 0) + COALESCE(SUM(blks_read), 0), 0), 2) 52 | AS hit_pct, 53 | NULLIF(SUM(blks_written), 0)::bigint AS blks_written, 54 | NULLIF(SUM(blks_exists), 0)::bigint AS blks_exists, 55 | NULLIF(SUM(flushes), 0)::bigint AS flushes, 56 | NULLIF(SUM(truncates), 0)::bigint AS truncates 57 | 58 | FROM cluster_stat_slru(sserver_id, start_id, end_id) 59 | GROUP BY ROLLUP(name) 60 | ORDER BY NULLIF(name, 'Total') ASC NULLS LAST 61 | $$ LANGUAGE sql; 62 | 63 | CREATE FUNCTION cluster_stat_slru_format(IN sserver_id integer, 64 | IN start1_id integer, IN end1_id integer, 65 | IN start2_id integer, IN end2_id integer) 66 | RETURNS TABLE( 67 | name text, 68 | 69 | blks_zeroed1 bigint, 70 | blks_hit1 bigint, 71 | blks_read1 bigint, 72 | hit_pct1 numeric, 73 | blks_written1 bigint, 74 | blks_exists1 bigint, 75 | flushes1 bigint, 76 | truncates1 bigint, 77 | 78 | blks_zeroed2 bigint, 79 | blks_hit2 bigint, 80 | blks_read2 bigint, 81 | hit_pct2 numeric, 82 | blks_written2 bigint, 83 | blks_exists2 bigint, 84 | flushes2 bigint, 85 | truncates2 bigint 86 | ) SET search_path=@extschema@ AS $$ 87 | SELECT 88 | COALESCE(name, 'Total') AS name, 89 | 90 | NULLIF(SUM(st1.blks_zeroed), 0)::bigint AS blks_zeroed1, 91 | NULLIF(SUM(st1.blks_hit), 0)::bigint AS blks_hit1, 92 | NULLIF(SUM(st1.blks_read), 0)::bigint AS blks_read1, 93 | ROUND(NULLIF(SUM(st1.blks_hit), 0)::numeric * 100 / 94 | NULLIF(COALESCE(SUM(st1.blks_hit), 0) + COALESCE(SUM(st1.blks_read), 0), 0), 2) 95 | AS hit_pct1, 96 | NULLIF(SUM(st1.blks_written), 0)::bigint AS blks_written1, 97 | NULLIF(SUM(st1.blks_exists), 0)::bigint AS blks_exists1, 98 | NULLIF(SUM(st1.flushes), 0)::bigint AS flushes1, 99 | NULLIF(SUM(st1.truncates), 0)::bigint AS truncates1, 100 | 101 | NULLIF(SUM(st2.blks_zeroed), 0)::bigint AS blks_zeroed2, 102 | NULLIF(SUM(st2.blks_hit), 0)::bigint AS blks_hit2, 103 | NULLIF(SUM(st2.blks_read), 0)::bigint AS blks_read2, 104 | ROUND(NULLIF(SUM(st2.blks_hit), 0)::numeric * 100 / 105 | NULLIF(COALESCE(SUM(st2.blks_hit), 0) + COALESCE(SUM(st2.blks_read), 0), 0), 2) 106 | AS hit_pct2, 107 | NULLIF(SUM(st2.blks_written), 0)::bigint AS blks_written2, 108 | NULLIF(SUM(st2.blks_exists), 0)::bigint AS blks_exists2, 109 | NULLIF(SUM(st2.flushes), 0)::bigint AS flushes2, 110 | NULLIF(SUM(st2.truncates), 0)::bigint AS truncates2 111 | 112 | FROM cluster_stat_slru(sserver_id, start1_id, end1_id) st1 113 | FULL OUTER JOIN cluster_stat_slru(sserver_id, start2_id, end2_id) st2 114 | USING (server_id, name) 115 | GROUP BY ROLLUP(name) 116 | ORDER BY NULLIF(name, 'Total') ASC NULLS LAST 117 | $$ LANGUAGE sql; 118 | 119 | CREATE FUNCTION cluster_stat_slru_resets(IN sserver_id integer, 120 | IN start_id integer, IN end_id integer) 121 | RETURNS TABLE( 122 | server_id integer, 123 | sample_id integer, 124 | name text, 125 | stats_reset timestamp with time zone 126 | ) 127 | SET search_path=@extschema@ AS $$ 128 | WITH first_val AS ( 129 | SELECT name, stats_reset 130 | FROM sample_stat_slru st JOIN ( 131 | SELECT name, MIN(sample_id) AS sample_id 132 | FROM sample_stat_slru 133 | WHERE server_id = sserver_id AND 134 | sample_id BETWEEN start_id AND end_id 135 | GROUP BY name 136 | ) f USING (name,sample_id) 137 | WHERE st.server_id = sserver_id 138 | ) 139 | SELECT 140 | server_id, 141 | min(sample_id), 142 | name, 143 | st.stats_reset 144 | FROM sample_stat_slru st JOIN first_val USING (name) 145 | WHERE st.server_id = sserver_id AND st.sample_id BETWEEN start_id + 1 AND end_id 146 | AND st.stats_reset IS DISTINCT FROM first_val.stats_reset 147 | GROUP BY server_id, name, st.stats_reset 148 | $$ LANGUAGE sql; 149 | 150 | CREATE FUNCTION cluster_stat_slru_reset_format(IN sserver_id integer, 151 | IN start_id integer, IN end_id integer) 152 | RETURNS TABLE( 153 | sample_id integer, 154 | name text, 155 | stats_reset timestamp with time zone 156 | ) SET search_path=@extschema@ AS $$ 157 | SELECT 158 | sample_id, 159 | name, 160 | stats_reset 161 | FROM cluster_stat_slru_resets(sserver_id, start_id, end_id) 162 | ORDER BY sample_id ASC 163 | $$ LANGUAGE sql; 164 | 165 | CREATE FUNCTION cluster_stat_slru_reset_format(IN sserver_id integer, 166 | IN start1_id integer, IN end1_id integer, IN start2_id integer, IN end2_id integer) 167 | RETURNS TABLE( 168 | sample_id integer, 169 | name text, 170 | stats_reset timestamp with time zone 171 | ) SET search_path=@extschema@ AS $$ 172 | SELECT 173 | sample_id, 174 | name, 175 | stats_reset 176 | FROM ( 177 | SELECT 178 | sample_id, 179 | name, 180 | stats_reset 181 | FROM cluster_stat_slru_resets(sserver_id, start1_id, end1_id) 182 | UNION 183 | SELECT 184 | sample_id, 185 | name, 186 | stats_reset 187 | FROM cluster_stat_slru_resets(sserver_id, start2_id, end2_id) 188 | ) st 189 | ORDER BY sample_id ASC 190 | $$ LANGUAGE sql; 191 | -------------------------------------------------------------------------------- /report/functions/statements_checks.sql: -------------------------------------------------------------------------------- 1 | /* ===== pg_stat_statements checks ===== */ 2 | CREATE FUNCTION profile_checkavail_stmt_cnt(IN sserver_id integer, IN start_id integer, IN end_id integer) 3 | RETURNS BOOLEAN 4 | SET search_path=@extschema@ AS 5 | $$ 6 | -- Check if statistics were reset 7 | SELECT COUNT(*) > 0 FROM samples 8 | JOIN ( 9 | SELECT sample_id,sum(statements) stmt_cnt 10 | FROM sample_statements_total 11 | WHERE server_id = sserver_id AND 12 | ((start_id,end_id) = (0,0) OR 13 | sample_id BETWEEN start_id + 1 AND end_id) 14 | GROUP BY sample_id 15 | ) sample_stmt_cnt USING(sample_id) 16 | JOIN v_sample_settings prm USING (server_id,sample_id) 17 | WHERE server_id = sserver_id AND prm.name='pg_stat_statements.max' AND 18 | stmt_cnt >= 0.9*cast(prm.setting AS integer); 19 | $$ LANGUAGE sql; 20 | 21 | CREATE FUNCTION stmt_cnt(IN sserver_id integer, IN start_id integer = 0, 22 | IN end_id integer = 0) 23 | RETURNS TABLE( 24 | sample_id integer, 25 | sample_time timestamp with time zone, 26 | stmt_cnt integer, 27 | max_cnt text 28 | ) 29 | SET search_path=@extschema@ 30 | AS $$ 31 | SELECT 32 | sample_id, 33 | sample_time, 34 | stmt_cnt, 35 | prm.setting AS max_cnt 36 | FROM samples 37 | JOIN ( 38 | SELECT 39 | sample_id, 40 | sum(statements)::integer AS stmt_cnt 41 | FROM sample_statements_total 42 | WHERE server_id = sserver_id 43 | AND ((start_id, end_id) = (0,0) OR sample_id BETWEEN start_id + 1 AND end_id) 44 | GROUP BY sample_id 45 | ) sample_stmt_cnt USING(sample_id) 46 | JOIN v_sample_settings prm USING (server_id, sample_id) 47 | WHERE server_id = sserver_id AND prm.name='pg_stat_statements.max' AND 48 | stmt_cnt >= 0.9*cast(prm.setting AS integer) 49 | $$ LANGUAGE sql; 50 | 51 | CREATE FUNCTION stmt_cnt_format(IN sserver_id integer, IN start_id integer = 0, 52 | IN end_id integer = 0) 53 | RETURNS TABLE( 54 | sample_id integer, 55 | sample_time text, 56 | stmt_cnt integer, 57 | max_cnt text 58 | ) 59 | SET search_path=@extschema@ AS $$ 60 | SELECT 61 | sample_id, 62 | sample_time::text, 63 | stmt_cnt, 64 | max_cnt 65 | FROM 66 | stmt_cnt(sserver_id, start_id, end_id) 67 | $$ LANGUAGE sql; 68 | 69 | CREATE FUNCTION stmt_cnt_format_diff(IN sserver_id integer, 70 | IN start1_id integer = 0, IN end1_id integer = 0, 71 | IN start2_id integer = 0, IN end2_id integer = 0) 72 | RETURNS TABLE( 73 | interval_num integer, 74 | sample_id integer, 75 | sample_time text, 76 | stmt_cnt integer, 77 | max_cnt text 78 | ) 79 | SET search_path=@extschema@ AS $$ 80 | SELECT 81 | 1 AS interval_num, 82 | sample_id, 83 | sample_time::text, 84 | stmt_cnt, 85 | max_cnt 86 | FROM 87 | stmt_cnt(sserver_id, start1_id, end1_id) 88 | UNION ALL 89 | SELECT 90 | 2 AS interval_num, 91 | sample_id, 92 | sample_time::text, 93 | stmt_cnt, 94 | max_cnt 95 | FROM 96 | stmt_cnt(sserver_id, start2_id, end2_id) 97 | $$ LANGUAGE sql; 98 | -------------------------------------------------------------------------------- /report/functions/tablespacestat.sql: -------------------------------------------------------------------------------- 1 | /* ===== Tables stats functions ===== */ 2 | 3 | CREATE FUNCTION tablespace_stats(IN sserver_id integer, IN start_id integer, IN end_id integer) 4 | RETURNS TABLE( 5 | server_id integer, 6 | tablespaceid oid, 7 | tablespacename name, 8 | tablespacepath text, 9 | size_delta bigint 10 | ) SET search_path=@extschema@ AS $$ 11 | SELECT 12 | st.server_id, 13 | st.tablespaceid, 14 | st.tablespacename, 15 | st.tablespacepath, 16 | sum(st.size_delta)::bigint AS size_delta 17 | FROM v_sample_stat_tablespaces st 18 | WHERE st.server_id = sserver_id 19 | AND st.sample_id BETWEEN start_id + 1 AND end_id 20 | GROUP BY st.server_id, st.tablespaceid, st.tablespacename, st.tablespacepath 21 | $$ LANGUAGE sql; 22 | 23 | CREATE FUNCTION tablespace_stats_format(IN sserver_id integer, IN start_id integer, IN end_id integer) 24 | RETURNS TABLE( 25 | tablespacename text, 26 | tablespacepath text, 27 | size text, 28 | size_delta text 29 | ) 30 | SET search_path=@extschema@ AS $$ 31 | SELECT 32 | st.tablespacename::text, 33 | st.tablespacepath, 34 | pg_size_pretty(NULLIF(st_last.size, 0)) as size, 35 | pg_size_pretty(NULLIF(st.size_delta, 0)) as size_delta 36 | FROM tablespace_stats(sserver_id, start_id, end_id) st 37 | LEFT OUTER JOIN v_sample_stat_tablespaces st_last ON 38 | (st_last.server_id, st_last.sample_id, st_last.tablespaceid) = 39 | (st.server_id, end_id, st.tablespaceid) 40 | ORDER BY st.tablespacename ASC; 41 | $$ LANGUAGE sql; 42 | 43 | CREATE FUNCTION tablespace_stats_format_diff(IN sserver_id integer, 44 | IN start1_id integer, IN end1_id integer, 45 | IN start2_id integer, IN end2_id integer) 46 | RETURNS TABLE( 47 | tablespacename text, 48 | tablespacepath text, 49 | size1 text, 50 | size2 text, 51 | size_delta1 text, 52 | size_delta2 text 53 | ) 54 | SET search_path=@extschema@ AS $$ 55 | SELECT 56 | COALESCE(stat1.tablespacename,stat2.tablespacename)::text AS tablespacename, 57 | COALESCE(stat1.tablespacepath,stat2.tablespacepath) AS tablespacepath, 58 | pg_size_pretty(NULLIF(st_last1.size, 0)) as size1, 59 | pg_size_pretty(NULLIF(st_last2.size, 0)) as size2, 60 | pg_size_pretty(NULLIF(stat1.size_delta, 0)) as size_delta1, 61 | pg_size_pretty(NULLIF(stat2.size_delta, 0)) as size_delta2 62 | FROM tablespace_stats(sserver_id,start1_id,end1_id) stat1 63 | FULL OUTER JOIN tablespace_stats(sserver_id,start2_id,end2_id) stat2 64 | USING (server_id,tablespaceid) 65 | LEFT OUTER JOIN v_sample_stat_tablespaces st_last1 ON 66 | (st_last1.server_id, st_last1.sample_id, st_last1.tablespaceid) = 67 | (stat1.server_id, end1_id, stat1.tablespaceid) 68 | LEFT OUTER JOIN v_sample_stat_tablespaces st_last2 ON 69 | (st_last2.server_id, st_last2.sample_id, st_last2.tablespaceid) = 70 | (stat2.server_id, end2_id, stat2.tablespaceid) 71 | $$ LANGUAGE sql; 72 | -------------------------------------------------------------------------------- /report/functions/walstat.sql: -------------------------------------------------------------------------------- 1 | /* ===== Cluster stats functions ===== */ 2 | CREATE FUNCTION profile_checkavail_walstats(IN sserver_id integer, IN start_id integer, IN end_id integer) 3 | RETURNS BOOLEAN 4 | SET search_path=@extschema@ AS $$ 5 | -- Check if there is WAL stats collected 6 | SELECT 7 | count(wal_bytes) > 0 8 | FROM sample_stat_wal 9 | WHERE 10 | server_id = sserver_id 11 | AND sample_id BETWEEN start_id + 1 AND end_id 12 | $$ LANGUAGE sql; 13 | 14 | CREATE FUNCTION wal_stats_reset(IN sserver_id integer, IN start_id integer, IN end_id integer) 15 | RETURNS TABLE( 16 | sample_id integer, 17 | wal_stats_reset timestamp with time zone 18 | ) 19 | SET search_path=@extschema@ AS $$ 20 | SELECT 21 | ws1.sample_id as sample_id, 22 | nullif(ws1.stats_reset,ws0.stats_reset) 23 | FROM sample_stat_wal ws1 24 | JOIN sample_stat_wal ws0 ON (ws1.server_id = ws0.server_id AND ws1.sample_id = ws0.sample_id + 1) 25 | WHERE ws1.server_id = sserver_id AND ws1.sample_id BETWEEN start_id + 1 AND end_id 26 | AND 27 | nullif(ws1.stats_reset,ws0.stats_reset) IS NOT NULL 28 | ORDER BY ws1.sample_id ASC 29 | $$ LANGUAGE sql; 30 | 31 | CREATE FUNCTION profile_checkavail_wal_stats_reset(IN sserver_id integer, IN start_id integer, IN end_id integer) 32 | RETURNS BOOLEAN 33 | SET search_path=@extschema@ AS $$ 34 | -- Check if wal statistics were reset 35 | SELECT count(*) > 0 FROM wal_stats_reset(sserver_id, start_id, end_id) 36 | $$ LANGUAGE sql; 37 | 38 | CREATE FUNCTION wal_stats_reset_format(IN sserver_id integer, IN start_id integer, IN end_id integer) 39 | RETURNS TABLE( 40 | sample_id integer, 41 | wal_stats_reset text 42 | ) 43 | SET search_path=@extschema@ AS $$ 44 | SELECT 45 | sample_id, 46 | wal_stats_reset::text 47 | FROM 48 | wal_stats_reset(sserver_id, start_id, end_id) 49 | $$ LANGUAGE sql; 50 | 51 | CREATE FUNCTION wal_stats_reset_format_diff(IN sserver_id integer, 52 | IN start1_id integer, IN end1_id integer, 53 | IN start2_id integer, IN end2_id integer) 54 | RETURNS TABLE( 55 | interval_num integer, 56 | sample_id integer, 57 | wal_stats_reset text 58 | ) 59 | SET search_path=@extschema@ AS $$ 60 | SELECT 61 | 1 AS interval_num, 62 | sample_id, 63 | wal_stats_reset::text 64 | FROM 65 | wal_stats_reset(sserver_id, start1_id, end1_id) 66 | UNION 67 | SELECT 68 | 2 AS interval_num, 69 | sample_id, 70 | wal_stats_reset::text 71 | FROM 72 | wal_stats_reset(sserver_id, start2_id, end2_id) 73 | $$ LANGUAGE sql; 74 | 75 | CREATE FUNCTION wal_stats(IN sserver_id integer, IN start_id integer, IN end_id integer) 76 | RETURNS TABLE( 77 | server_id integer, 78 | wal_records bigint, 79 | wal_fpi bigint, 80 | wal_bytes numeric, 81 | wal_buffers_full bigint, 82 | wal_write bigint, 83 | wal_sync bigint, 84 | wal_write_time double precision, 85 | wal_sync_time double precision 86 | ) 87 | SET search_path=@extschema@ AS $$ 88 | SELECT 89 | st.server_id as server_id, 90 | sum(wal_records)::bigint as wal_records, 91 | sum(wal_fpi)::bigint as wal_fpi, 92 | sum(wal_bytes)::numeric as wal_bytes, 93 | sum(wal_buffers_full)::bigint as wal_buffers_full, 94 | sum(wal_write)::bigint as wal_write, 95 | sum(wal_sync)::bigint as wal_sync, 96 | sum(wal_write_time)::double precision as wal_write_time, 97 | sum(wal_sync_time)::double precision as wal_sync_time 98 | FROM sample_stat_wal st 99 | WHERE st.server_id = sserver_id AND st.sample_id BETWEEN start_id + 1 AND end_id 100 | GROUP BY st.server_id 101 | $$ LANGUAGE sql; 102 | 103 | CREATE FUNCTION wal_stats_format(IN sserver_id integer, IN start_id integer, IN end_id integer, 104 | duration numeric) 105 | RETURNS TABLE( 106 | wal_records bigint, 107 | wal_fpi bigint, 108 | wal_bytes numeric, 109 | wal_bytes_text text, 110 | wal_bytes_per_sec text, 111 | wal_buffers_full bigint, 112 | wal_write bigint, 113 | wal_write_per_sec numeric, 114 | wal_sync bigint, 115 | wal_sync_per_sec numeric, 116 | wal_write_time numeric, 117 | wal_write_time_per_sec text, 118 | wal_sync_time numeric, 119 | wal_sync_time_per_sec text 120 | ) 121 | SET search_path=@extschema@ AS $$ 122 | SELECT 123 | NULLIF(wal_records, 0)::bigint, 124 | NULLIF(wal_fpi, 0)::bigint, 125 | NULLIF(wal_bytes, 0)::numeric, 126 | pg_size_pretty(NULLIF(wal_bytes, 0)), 127 | pg_size_pretty(round(NULLIF(wal_bytes, 0)/NULLIF(duration, 0))::bigint), 128 | NULLIF(wal_buffers_full, 0)::bigint, 129 | NULLIF(wal_write, 0)::bigint, 130 | round((NULLIF(wal_write, 0)/NULLIF(duration, 0))::numeric,2), 131 | NULLIF(wal_sync, 0)::bigint, 132 | round((NULLIF(wal_sync, 0)/NULLIF(duration, 0))::numeric,2), 133 | round(cast(NULLIF(wal_write_time, 0)/1000 as numeric),2), 134 | round((NULLIF(wal_write_time, 0)/10/NULLIF(duration, 0))::numeric,2) || '%', 135 | round(cast(NULLIF(wal_sync_time, 0)/1000 as numeric),2), 136 | round((NULLIF(wal_sync_time, 0)/10/NULLIF(duration, 0))::numeric,2) || '%' 137 | FROM 138 | wal_stats(sserver_id, start_id, end_id) 139 | $$ LANGUAGE sql; 140 | 141 | CREATE FUNCTION wal_stats_format(IN sserver_id integer, 142 | IN start1_id integer, IN end1_id integer, 143 | IN start2_id integer, IN end2_id integer, 144 | duration1 numeric, duration2 numeric) 145 | RETURNS TABLE( 146 | wal_records1 bigint, 147 | wal_fpi1 bigint, 148 | wal_bytes1 bigint, 149 | wal_bytes_text1 text, 150 | wal_bytes_per_sec1 text, 151 | wal_buffers_full1 bigint, 152 | wal_write1 bigint, 153 | wal_write_per_sec1 numeric, 154 | wal_sync1 bigint, 155 | wal_sync_per_sec1 numeric, 156 | wal_write_time1 numeric, 157 | wal_write_time_per_sec1 text, 158 | wal_sync_time1 numeric, 159 | wal_sync_time_per_sec1 text, 160 | 161 | wal_records2 bigint, 162 | wal_fpi2 bigint, 163 | wal_bytes2 bigint, 164 | wal_bytes_text2 text, 165 | wal_bytes_per_sec2 text, 166 | wal_buffers_full2 bigint, 167 | wal_write2 bigint, 168 | wal_write_per_sec2 numeric, 169 | wal_sync2 bigint, 170 | wal_sync_per_sec2 numeric, 171 | wal_write_time2 numeric, 172 | wal_write_time_per_sec2 text, 173 | wal_sync_time2 numeric, 174 | wal_sync_time_per_sec2 text 175 | ) 176 | SET search_path=@extschema@ AS $$ 177 | SELECT 178 | NULLIF(ws1.wal_records, 0)::bigint, 179 | NULLIF(ws1.wal_fpi, 0)::bigint, 180 | NULLIF(ws1.wal_bytes, 0)::bigint, 181 | pg_size_pretty(NULLIF(ws1.wal_bytes, 0)), 182 | pg_size_pretty(round(NULLIF(ws1.wal_bytes, 0)/NULLIF(duration1, 0))::bigint), 183 | NULLIF(ws1.wal_buffers_full, 0)::bigint, 184 | NULLIF(ws1.wal_write, 0)::bigint, 185 | round((NULLIF(ws1.wal_write, 0)/NULLIF(duration1, 0))::numeric,2), 186 | NULLIF(ws1.wal_sync, 0)::bigint, 187 | round((NULLIF(ws1.wal_sync, 0)/NULLIF(duration1, 0))::numeric,2), 188 | round(cast(NULLIF(ws1.wal_write_time, 0)/1000 as numeric),2), 189 | round((NULLIF(ws1.wal_write_time, 0)/10/NULLIF(duration1, 0))::numeric,2) || '%', 190 | round(cast(NULLIF(ws1.wal_sync_time, 0)/1000 as numeric),2), 191 | round((NULLIF(ws1.wal_sync_time, 0)/10/NULLIF(duration1, 0))::numeric,2) || '%', 192 | 193 | NULLIF(ws2.wal_records, 0)::bigint, 194 | NULLIF(ws2.wal_fpi, 0)::bigint, 195 | NULLIF(ws2.wal_bytes, 0)::bigint, 196 | pg_size_pretty(NULLIF(ws2.wal_bytes, 0)), 197 | pg_size_pretty(round(NULLIF(ws2.wal_bytes, 0)/NULLIF(duration2, 0))::bigint), 198 | NULLIF(ws2.wal_buffers_full, 0)::bigint, 199 | NULLIF(ws2.wal_write, 0)::bigint, 200 | round((NULLIF(ws2.wal_write, 0)/NULLIF(duration2, 0))::numeric,2), 201 | NULLIF(ws2.wal_sync, 0)::bigint, 202 | round((NULLIF(ws2.wal_sync, 0)/NULLIF(duration2, 0))::numeric,2), 203 | round(cast(NULLIF(ws2.wal_write_time, 0)/1000 as numeric),2), 204 | round((NULLIF(ws2.wal_write_time, 0)/10/NULLIF(duration2, 0))::numeric,2) || '%', 205 | round(cast(NULLIF(ws2.wal_sync_time, 0)/1000 as numeric),2), 206 | round((NULLIF(ws2.wal_sync_time, 0)/10/NULLIF(duration2, 0))::numeric,2) || '%' 207 | FROM 208 | wal_stats(sserver_id, start1_id, end1_id) ws1 209 | CROSS JOIN 210 | wal_stats(sserver_id, start2_id, end2_id) ws2 211 | $$ LANGUAGE sql; 212 | -------------------------------------------------------------------------------- /report/report.sql: -------------------------------------------------------------------------------- 1 | /* ===== Main report function ===== */ 2 | 3 | CREATE FUNCTION get_report(IN sserver_id integer, IN start_id integer, IN end_id integer, 4 | IN description text = NULL, IN with_growth boolean = false, 5 | IN db_exclude name[] = NULL) RETURNS text SET search_path=@extschema@ AS $$ 6 | DECLARE 7 | report text; 8 | report_data jsonb; 9 | report_context jsonb; 10 | BEGIN 11 | -- Interval expanding in case of growth stats requested 12 | IF with_growth THEN 13 | BEGIN 14 | SELECT left_bound, right_bound INTO STRICT start_id, end_id 15 | FROM get_sized_bounds(sserver_id, start_id, end_id); 16 | EXCEPTION 17 | WHEN OTHERS THEN 18 | RAISE 'Samples with sizes collected for requested interval (%) not found', 19 | format('%s - %s',start_id, end_id); 20 | END; 21 | END IF; 22 | 23 | -- Getting report context and check conditions 24 | report_context := get_report_context(sserver_id, start_id, end_id, description); 25 | 26 | -- Prepare report template 27 | report := get_report_template(report_context, 1); 28 | -- Populate template with report data 29 | report_data := sections_jsonb(report_context, sserver_id, 1); 30 | report_data := jsonb_set(report_data, '{datasets}', 31 | get_report_datasets(report_context, sserver_id, db_exclude)); 32 | report := replace(report, '{dynamic:data1}', report_data::text); 33 | 34 | RETURN report; 35 | END; 36 | $$ LANGUAGE plpgsql; 37 | 38 | COMMENT ON FUNCTION get_report(IN sserver_id integer, IN start_id integer, IN end_id integer, 39 | IN description text, IN with_growth boolean, IN db_exclude name[]) 40 | IS 'Statistics report generation function. Takes server_id and IDs of start and end sample (inclusive).'; 41 | 42 | CREATE FUNCTION get_report(IN server name, IN start_id integer, IN end_id integer, 43 | IN description text = NULL, IN with_growth boolean = false, 44 | IN db_exclude name[] = NULL) 45 | RETURNS text SET search_path=@extschema@ AS $$ 46 | SELECT get_report(get_server_by_name(server), start_id, end_id, 47 | description, with_growth, db_exclude); 48 | $$ LANGUAGE sql; 49 | COMMENT ON FUNCTION get_report(IN server name, IN start_id integer, IN end_id integer, 50 | IN description text, IN with_growth boolean, IN db_exclude name[]) 51 | IS 'Statistics report generation function. Takes server name and IDs of start and end sample (inclusive).'; 52 | 53 | CREATE FUNCTION get_report(IN start_id integer, IN end_id integer, 54 | IN description text = NULL, IN with_growth boolean = false, 55 | IN db_exclude name[] = NULL) 56 | RETURNS text SET search_path=@extschema@ AS $$ 57 | SELECT get_report('local',start_id,end_id,description,with_growth,db_exclude); 58 | $$ LANGUAGE sql; 59 | COMMENT ON FUNCTION get_report(IN start_id integer, IN end_id integer, 60 | IN description text, IN with_growth boolean, IN db_exclude name[]) 61 | IS 'Statistics report generation function for local server. Takes IDs of start and end sample (inclusive).'; 62 | 63 | CREATE FUNCTION get_report(IN sserver_id integer, IN time_range tstzrange, 64 | IN description text = NULL, IN with_growth boolean = false, 65 | IN db_exclude name[] = NULL) 66 | RETURNS text SET search_path=@extschema@ AS $$ 67 | SELECT get_report(sserver_id, start_id, end_id, description, with_growth, db_exclude) 68 | FROM get_sampleids_by_timerange(sserver_id, time_range) 69 | $$ LANGUAGE sql; 70 | COMMENT ON FUNCTION get_report(IN sserver_id integer, IN time_range tstzrange, 71 | IN description text, IN with_growth boolean, IN db_exclude name[]) 72 | IS 'Statistics report generation function. Takes server ID and time interval.'; 73 | 74 | CREATE FUNCTION get_report(IN server name, IN time_range tstzrange, 75 | IN description text = NULL, IN with_growth boolean = false, 76 | IN db_exclude name[] = NULL) 77 | RETURNS text SET search_path=@extschema@ AS $$ 78 | SELECT get_report(get_server_by_name(server), start_id, end_id, description, with_growth, db_exclude) 79 | FROM get_sampleids_by_timerange(get_server_by_name(server), time_range) 80 | $$ LANGUAGE sql; 81 | COMMENT ON FUNCTION get_report(IN server name, IN time_range tstzrange, 82 | IN description text, IN with_growth boolean, IN db_exclude name[]) 83 | IS 'Statistics report generation function. Takes server name and time interval.'; 84 | 85 | CREATE FUNCTION get_report(IN time_range tstzrange, IN description text = NULL, 86 | IN with_growth boolean = false, 87 | IN db_exclude name[] = NULL) 88 | RETURNS text SET search_path=@extschema@ AS $$ 89 | SELECT get_report(get_server_by_name('local'), start_id, end_id, description, with_growth, db_exclude) 90 | FROM get_sampleids_by_timerange(get_server_by_name('local'), time_range) 91 | $$ LANGUAGE sql; 92 | COMMENT ON FUNCTION get_report(IN time_range tstzrange, 93 | IN description text, IN with_growth boolean, IN db_exclude name[]) 94 | IS 'Statistics report generation function for local server. Takes time interval.'; 95 | 96 | CREATE FUNCTION get_report(IN server name, IN baseline varchar(25), 97 | IN description text = NULL, IN with_growth boolean = false, 98 | IN db_exclude name[] = NULL) 99 | RETURNS text SET search_path=@extschema@ AS $$ 100 | SELECT get_report(get_server_by_name(server), start_id, end_id, description, with_growth, db_exclude) 101 | FROM get_baseline_samples(get_server_by_name(server), baseline) 102 | $$ LANGUAGE sql; 103 | COMMENT ON FUNCTION get_report(IN server name, IN baseline varchar(25), 104 | IN description text, IN with_growth boolean, IN db_exclude name[]) 105 | IS 'Statistics report generation function for server baseline. Takes server name and baseline name.'; 106 | 107 | CREATE FUNCTION get_report(IN baseline varchar(25), IN description text = NULL, 108 | IN with_growth boolean = false, 109 | IN db_exclude name[] = NULL) 110 | RETURNS text SET search_path=@extschema@ AS $$ 111 | BEGIN 112 | RETURN get_report('local',baseline,description,with_growth,db_exclude); 113 | END; 114 | $$ LANGUAGE plpgsql; 115 | COMMENT ON FUNCTION get_report(IN baseline varchar(25), 116 | IN description text, IN with_growth boolean, IN db_exclude name[]) 117 | IS 'Statistics report generation function for local server baseline. Takes baseline name.'; 118 | 119 | CREATE FUNCTION get_report_latest(IN server name = NULL, 120 | IN db_exclude name[] = NULL) 121 | RETURNS text SET search_path=@extschema@ AS $$ 122 | SELECT get_report(srv.server_id, s.sample_id, e.sample_id, NULL, false, db_exclude) 123 | FROM samples s JOIN samples e ON (s.server_id = e.server_id AND s.sample_id = e.sample_id - 1) 124 | JOIN servers srv ON (e.server_id = srv.server_id AND e.sample_id = srv.last_sample_id) 125 | WHERE srv.server_name = COALESCE(server, 'local') 126 | $$ LANGUAGE sql; 127 | COMMENT ON FUNCTION get_report_latest(IN server name, IN db_exclude name[]) IS 'Statistics report generation function for last two samples'; 128 | -------------------------------------------------------------------------------- /sample/compat.sql: -------------------------------------------------------------------------------- 1 | /* ==== Backward compatibility functions ====*/ 2 | CREATE FUNCTION snapshot() RETURNS TABLE ( 3 | server name, 4 | result text, 5 | elapsed interval day to second (2) 6 | ) 7 | SET search_path=@extschema@ AS $$ 8 | SELECT * FROM take_sample() 9 | $$ LANGUAGE SQL; 10 | 11 | CREATE FUNCTION snapshot(IN server name) RETURNS integer SET search_path=@extschema@ AS $$ 12 | BEGIN 13 | RETURN take_sample(server); 14 | END; 15 | $$ LANGUAGE plpgsql; 16 | -------------------------------------------------------------------------------- /sample/pg_wait_sampling.sql: -------------------------------------------------------------------------------- 1 | /* pg_wait_sampling support */ 2 | 3 | CREATE FUNCTION collect_pg_wait_sampling_stats(IN properties jsonb, IN sserver_id integer, IN s_id integer, IN topn integer) 4 | RETURNS void SET search_path=@extschema@ AS $$ 5 | DECLARE 6 | BEGIN 7 | CASE ( 8 | SELECT extversion 9 | FROM jsonb_to_recordset(properties #> '{extensions}') 10 | AS ext(extname text, extversion text) 11 | WHERE extname = 'pg_wait_sampling' 12 | ) 13 | WHEN '1.1' THEN 14 | PERFORM collect_pg_wait_sampling_stats_11(properties, sserver_id, s_id, topn); 15 | ELSE 16 | NULL; 17 | END CASE; 18 | 19 | END; 20 | $$ LANGUAGE plpgsql; 21 | 22 | CREATE FUNCTION collect_pg_wait_sampling_stats_11(IN properties jsonb, IN sserver_id integer, IN s_id integer, IN topn integer) 23 | RETURNS void SET search_path=@extschema@ AS $$ 24 | DECLARE 25 | qres record; 26 | 27 | st_query text; 28 | BEGIN 29 | -- Adding dblink extension schema to search_path if it does not already there 30 | SELECT extnamespace::regnamespace AS dblink_schema INTO STRICT qres FROM pg_catalog.pg_extension WHERE extname = 'dblink'; 31 | IF NOT string_to_array(current_setting('search_path'),', ') @> ARRAY[qres.dblink_schema::text] THEN 32 | EXECUTE 'SET LOCAL search_path TO ' || current_setting('search_path')||','|| qres.dblink_schema; 33 | END IF; 34 | 35 | st_query := format('SELECT w.*,row_number() OVER () as weid ' 36 | 'FROM ( ' 37 | 'SELECT ' 38 | 'COALESCE(event_type, ''N/A'') as event_type, ' 39 | 'COALESCE(event, ''On CPU'') as event, ' 40 | 'sum(count * current_setting(''pg_wait_sampling.profile_period'')::bigint) as tot_waited, ' 41 | 'sum(count * current_setting(''pg_wait_sampling.profile_period'')::bigint) ' 42 | 'FILTER (WHERE queryid IS NOT NULL AND queryid != 0) as stmt_waited ' 43 | 'FROM ' 44 | '%1$I.pg_wait_sampling_profile ' 45 | 'GROUP BY ' 46 | 'event_type, ' 47 | 'event) as w', 48 | ( 49 | SELECT extnamespace FROM jsonb_to_recordset(properties #> '{extensions}') 50 | AS x(extname text, extnamespace text) 51 | WHERE extname = 'pg_wait_sampling' 52 | ) 53 | ); 54 | 55 | INSERT INTO wait_sampling_total( 56 | server_id, 57 | sample_id, 58 | sample_wevnt_id, 59 | event_type, 60 | event, 61 | tot_waited, 62 | stmt_waited 63 | ) 64 | SELECT 65 | sserver_id, 66 | s_id, 67 | dbl.weid, 68 | dbl.event_type, 69 | dbl.event, 70 | dbl.tot_waited, 71 | dbl.stmt_waited 72 | FROM 73 | dblink('server_connection', st_query) AS dbl( 74 | event_type text, 75 | event text, 76 | tot_waited bigint, 77 | stmt_waited bigint, 78 | weid integer 79 | ); 80 | 81 | -- reset wait sampling profile 82 | SELECT * INTO qres FROM dblink('server_connection', 83 | format('SELECT %1$I.pg_wait_sampling_reset_profile()', 84 | ( 85 | SELECT extnamespace FROM jsonb_to_recordset(properties #> '{extensions}') 86 | AS x(extname text, extnamespace text) 87 | WHERE extname = 'pg_wait_sampling' 88 | ) 89 | ) 90 | ) AS t(res char(1)); 91 | 92 | END; 93 | $$ LANGUAGE plpgsql; 94 | -------------------------------------------------------------------------------- /schema/Makefile: -------------------------------------------------------------------------------- 1 | schema_files = \ 2 | core.sql \ 3 | subsample.sql \ 4 | cluster.sql \ 5 | tablespaces.sql \ 6 | roles.sql \ 7 | db.sql \ 8 | tables.sql \ 9 | indexes.sql \ 10 | statements.sql \ 11 | pg_wait_sampling.sql \ 12 | rusage.sql \ 13 | funcs.sql \ 14 | import.sql \ 15 | settings.sql \ 16 | smpl_timing.sql \ 17 | reports.sql \ 18 | extension_versions.sql 19 | 20 | schema.sql: $(schema_files) 21 | cat $(schema_files) \ 22 | > schema.sql 23 | -------------------------------------------------------------------------------- /schema/cluster.sql: -------------------------------------------------------------------------------- 1 | /* ==== Clusterwide stats history tables ==== */ 2 | 3 | CREATE TABLE sample_stat_cluster 4 | ( 5 | server_id integer, 6 | sample_id integer, 7 | checkpoints_timed bigint, 8 | checkpoints_req bigint, 9 | checkpoint_write_time double precision, 10 | checkpoint_sync_time double precision, 11 | buffers_checkpoint bigint, 12 | buffers_clean bigint, 13 | maxwritten_clean bigint, 14 | buffers_backend bigint, 15 | buffers_backend_fsync bigint, 16 | buffers_alloc bigint, 17 | stats_reset timestamp with time zone, --bgwriter_stats_reset actually 18 | wal_size bigint, 19 | wal_lsn pg_lsn, 20 | in_recovery boolean, 21 | restartpoints_timed bigint, 22 | restartpoints_req bigint, 23 | restartpoints_done bigint, 24 | checkpoint_stats_reset timestamp with time zone, 25 | CONSTRAINT fk_statcluster_samples FOREIGN KEY (server_id, sample_id) 26 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 27 | DEFERRABLE INITIALLY IMMEDIATE, 28 | CONSTRAINT pk_sample_stat_cluster PRIMARY KEY (server_id, sample_id) 29 | ); 30 | COMMENT ON TABLE sample_stat_cluster IS 'Sample cluster statistics table (fields from pg_stat_bgwriter, etc.)'; 31 | 32 | CREATE TABLE last_stat_cluster(LIKE sample_stat_cluster); 33 | ALTER TABLE last_stat_cluster ADD CONSTRAINT pk_last_stat_cluster_samples 34 | PRIMARY KEY (server_id, sample_id); 35 | ALTER TABLE last_stat_cluster ADD CONSTRAINT fk_last_stat_cluster_samples 36 | FOREIGN KEY (server_id, sample_id) REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 37 | DEFERRABLE INITIALLY IMMEDIATE; 38 | COMMENT ON TABLE last_stat_cluster IS 'Last sample data for calculating diffs in next sample'; 39 | 40 | CREATE TABLE sample_stat_wal 41 | ( 42 | server_id integer, 43 | sample_id integer, 44 | wal_records bigint, 45 | wal_fpi bigint, 46 | wal_bytes numeric, 47 | wal_buffers_full bigint, 48 | wal_write bigint, 49 | wal_sync bigint, 50 | wal_write_time double precision, 51 | wal_sync_time double precision, 52 | stats_reset timestamp with time zone, 53 | CONSTRAINT fk_statwal_samples FOREIGN KEY (server_id, sample_id) 54 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 55 | DEFERRABLE INITIALLY IMMEDIATE, 56 | CONSTRAINT pk_sample_stat_wal PRIMARY KEY (server_id, sample_id) 57 | ); 58 | COMMENT ON TABLE sample_stat_wal IS 'Sample WAL statistics table'; 59 | 60 | CREATE TABLE last_stat_wal AS SELECT * FROM sample_stat_wal WHERE false; 61 | ALTER TABLE last_stat_wal ADD CONSTRAINT pk_last_stat_wal_samples 62 | PRIMARY KEY (server_id, sample_id); 63 | ALTER TABLE last_stat_wal ADD CONSTRAINT fk_last_stat_wal_samples 64 | FOREIGN KEY (server_id, sample_id) 65 | REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 66 | DEFERRABLE INITIALLY IMMEDIATE; 67 | COMMENT ON TABLE last_stat_wal IS 'Last WAL sample data for calculating diffs in next sample'; 68 | 69 | CREATE TABLE sample_stat_archiver 70 | ( 71 | server_id integer, 72 | sample_id integer, 73 | archived_count bigint, 74 | last_archived_wal text, 75 | last_archived_time timestamp with time zone, 76 | failed_count bigint, 77 | last_failed_wal text, 78 | last_failed_time timestamp with time zone, 79 | stats_reset timestamp with time zone, 80 | CONSTRAINT fk_sample_stat_archiver_samples FOREIGN KEY (server_id, sample_id) 81 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 82 | DEFERRABLE INITIALLY IMMEDIATE, 83 | CONSTRAINT pk_sample_stat_archiver PRIMARY KEY (server_id, sample_id) 84 | ); 85 | COMMENT ON TABLE sample_stat_archiver IS 'Sample archiver statistics table (fields from pg_stat_archiver)'; 86 | 87 | CREATE TABLE last_stat_archiver AS SELECT * FROM sample_stat_archiver WHERE 0=1; 88 | ALTER TABLE last_stat_archiver ADD CONSTRAINT pk_last_stat_archiver_samples 89 | PRIMARY KEY (server_id, sample_id); 90 | ALTER TABLE last_stat_archiver ADD CONSTRAINT fk_last_stat_archiver_samples 91 | FOREIGN KEY (server_id, sample_id) REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 92 | DEFERRABLE INITIALLY IMMEDIATE; 93 | COMMENT ON TABLE last_stat_archiver IS 'Last sample data for calculating diffs in next sample'; 94 | 95 | CREATE TABLE sample_stat_io 96 | ( 97 | server_id integer, 98 | sample_id integer, 99 | backend_type text, 100 | object text, 101 | context text, 102 | reads bigint, 103 | read_time double precision, 104 | writes bigint, 105 | write_time double precision, 106 | writebacks bigint, 107 | writeback_time double precision, 108 | extends bigint, 109 | extend_time double precision, 110 | op_bytes bigint, 111 | hits bigint, 112 | evictions bigint, 113 | reuses bigint, 114 | fsyncs bigint, 115 | fsync_time double precision, 116 | stats_reset timestamp with time zone, 117 | CONSTRAINT pk_sample_stat_io PRIMARY KEY (server_id, sample_id, backend_type, object, context), 118 | CONSTRAINT fk_sample_stat_io_samples FOREIGN KEY (server_id, sample_id) 119 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 120 | DEFERRABLE INITIALLY IMMEDIATE 121 | ); 122 | COMMENT ON TABLE sample_stat_io IS 'Sample IO statistics table (fields from pg_stat_io)'; 123 | 124 | CREATE TABLE last_stat_io (LIKE sample_stat_io); 125 | ALTER TABLE last_stat_io ADD CONSTRAINT pk_last_stat_io_samples 126 | PRIMARY KEY (server_id, sample_id, backend_type, object, context); 127 | ALTER TABLE last_stat_io ADD CONSTRAINT fk_last_stat_io_samples 128 | FOREIGN KEY (server_id, sample_id) REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 129 | DEFERRABLE INITIALLY IMMEDIATE; 130 | COMMENT ON TABLE last_stat_io IS 'Last sample data for calculating diffs in next sample'; 131 | 132 | CREATE TABLE sample_stat_slru 133 | ( 134 | server_id integer, 135 | sample_id integer, 136 | name text, 137 | blks_zeroed bigint, 138 | blks_hit bigint, 139 | blks_read bigint, 140 | blks_written bigint, 141 | blks_exists bigint, 142 | flushes bigint, 143 | truncates bigint, 144 | stats_reset timestamp with time zone, 145 | CONSTRAINT pk_sample_stat_slru PRIMARY KEY (server_id, sample_id, name), 146 | CONSTRAINT fk_sample_stat_slru_samples FOREIGN KEY (server_id, sample_id) 147 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 148 | DEFERRABLE INITIALLY IMMEDIATE 149 | ); 150 | COMMENT ON TABLE sample_stat_slru IS 'Sample SLRU statistics table (fields from pg_stat_slru)'; 151 | 152 | CREATE TABLE last_stat_slru (LIKE sample_stat_slru); 153 | ALTER TABLE last_stat_slru ADD CONSTRAINT pk_last_stat_slru_samples 154 | PRIMARY KEY (server_id, sample_id, name); 155 | ALTER TABLE last_stat_slru ADD CONSTRAINT fk_last_stat_slru_samples 156 | FOREIGN KEY (server_id, sample_id) REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 157 | DEFERRABLE INITIALLY IMMEDIATE; 158 | COMMENT ON TABLE last_stat_slru IS 'Last sample data for calculating diffs in next sample'; 159 | -------------------------------------------------------------------------------- /schema/core.sql: -------------------------------------------------------------------------------- 1 | /* ========= Core tables ========= */ 2 | 3 | CREATE TABLE servers ( 4 | server_id SERIAL PRIMARY KEY, 5 | server_name name UNIQUE NOT NULL, 6 | server_description text, 7 | server_created timestamp with time zone DEFAULT now(), 8 | db_exclude name[] DEFAULT NULL, 9 | enabled boolean DEFAULT TRUE, 10 | connstr text, 11 | max_sample_age integer NULL, 12 | last_sample_id integer DEFAULT 0 NOT NULL, 13 | size_smp_wnd_start time with time zone, 14 | size_smp_wnd_dur interval hour to second, 15 | size_smp_interval interval day to minute 16 | ); 17 | COMMENT ON TABLE servers IS 'Monitored servers (Postgres clusters) list'; 18 | 19 | CREATE TABLE samples ( 20 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE 21 | DEFERRABLE INITIALLY IMMEDIATE, 22 | sample_id integer NOT NULL, 23 | sample_time timestamp (0) with time zone, 24 | CONSTRAINT pk_samples PRIMARY KEY (server_id, sample_id) 25 | ); 26 | 27 | CREATE INDEX ix_sample_time ON samples(server_id, sample_time); 28 | COMMENT ON TABLE samples IS 'Sample times list'; 29 | 30 | CREATE TABLE baselines ( 31 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE, 32 | bl_id SERIAL, 33 | bl_name varchar (25) NOT NULL, 34 | keep_until timestamp (0) with time zone, 35 | CONSTRAINT pk_baselines PRIMARY KEY (server_id, bl_id), 36 | CONSTRAINT uk_baselines UNIQUE (server_id,bl_name) DEFERRABLE INITIALLY IMMEDIATE 37 | ); 38 | COMMENT ON TABLE baselines IS 'Baselines list'; 39 | 40 | CREATE TABLE bl_samples ( 41 | server_id integer NOT NULL, 42 | sample_id integer NOT NULL, 43 | bl_id integer NOT NULL, 44 | CONSTRAINT fk_bl_samples_samples FOREIGN KEY (server_id, sample_id) 45 | REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT 46 | DEFERRABLE INITIALLY IMMEDIATE, 47 | CONSTRAINT fk_bl_samples_baselines FOREIGN KEY (server_id, bl_id) 48 | REFERENCES baselines(server_id, bl_id) ON DELETE CASCADE 49 | DEFERRABLE INITIALLY IMMEDIATE, 50 | CONSTRAINT pk_bl_samples PRIMARY KEY (server_id, bl_id, sample_id) 51 | ); 52 | CREATE INDEX ix_bl_samples_blid ON bl_samples(bl_id); 53 | CREATE INDEX ix_bl_samples_sample ON bl_samples(server_id, sample_id); 54 | COMMENT ON TABLE bl_samples IS 'Samples in baselines'; 55 | -------------------------------------------------------------------------------- /schema/db.sql: -------------------------------------------------------------------------------- 1 | /* ==== Database stats history tables === */ 2 | 3 | CREATE TABLE sample_stat_database 4 | ( 5 | server_id integer, 6 | sample_id integer, 7 | datid oid, 8 | datname name NOT NULL, 9 | xact_commit bigint, 10 | xact_rollback bigint, 11 | blks_read bigint, 12 | blks_hit bigint, 13 | tup_returned bigint, 14 | tup_fetched bigint, 15 | tup_inserted bigint, 16 | tup_updated bigint, 17 | tup_deleted bigint, 18 | conflicts bigint, 19 | temp_files bigint, 20 | temp_bytes bigint, 21 | deadlocks bigint, 22 | blk_read_time double precision, 23 | blk_write_time double precision, 24 | stats_reset timestamp with time zone, 25 | datsize bigint, 26 | datsize_delta bigint, 27 | datistemplate boolean, 28 | session_time double precision, 29 | active_time double precision, 30 | idle_in_transaction_time double precision, 31 | sessions bigint, 32 | sessions_abandoned bigint, 33 | sessions_fatal bigint, 34 | sessions_killed bigint, 35 | checksum_failures bigint, 36 | checksum_last_failure timestamp with time zone, 37 | CONSTRAINT fk_statdb_samples FOREIGN KEY (server_id, sample_id) 38 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 39 | DEFERRABLE INITIALLY IMMEDIATE, 40 | CONSTRAINT pk_sample_stat_database PRIMARY KEY (server_id, sample_id, datid) 41 | ); 42 | COMMENT ON TABLE sample_stat_database IS 'Sample database statistics table (fields from pg_stat_database)'; 43 | 44 | CREATE TABLE last_stat_database (LIKE sample_stat_database, dattablespace oid, datallowconn boolean) 45 | PARTITION BY LIST (server_id); 46 | COMMENT ON TABLE last_stat_database IS 'Last sample data for calculating diffs in next sample'; 47 | -------------------------------------------------------------------------------- /schema/extension_versions.sql: -------------------------------------------------------------------------------- 1 | /* ==== Extension versions history table ==== */ 2 | CREATE TABLE extension_versions ( 3 | server_id integer, 4 | datid oid, 5 | first_seen timestamp (0) with time zone, 6 | last_sample_id integer, 7 | extname name, 8 | extversion text, 9 | CONSTRAINT pk_extension_versions PRIMARY KEY (server_id, datid, extname, first_seen), 10 | CONSTRAINT fk_extension_versions_servers FOREIGN KEY (server_id) 11 | REFERENCES servers (server_id) ON DELETE CASCADE 12 | DEFERRABLE INITIALLY IMMEDIATE, 13 | CONSTRAINT fk_extension_versions_samples FOREIGN KEY (server_id, last_sample_id) 14 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 15 | DEFERRABLE INITIALLY IMMEDIATE 16 | ); 17 | CREATE INDEX ix_extension_versions_last_sample_id ON extension_versions(server_id, last_sample_id); 18 | COMMENT ON TABLE extension_versions IS 'pg_extension values changes detected at time of sample'; 19 | 20 | CREATE TABLE last_extension_versions ( 21 | server_id integer, 22 | datid oid, 23 | sample_id integer, 24 | extname name, 25 | extversion text, 26 | CONSTRAINT pk_last_extension_versions PRIMARY KEY (server_id, sample_id, datid, extname) 27 | ); 28 | COMMENT ON TABLE last_extension_versions IS 'Last sample data of pg_extension for calculating diffs in next sample'; 29 | 30 | CREATE VIEW v_extension_versions AS 31 | SELECT 32 | ev.server_id, 33 | ev.datid, 34 | ev.extname, 35 | ev.first_seen, 36 | ev.extversion, 37 | ev.last_sample_id, 38 | s.sample_id, 39 | s.sample_time 40 | FROM extension_versions ev 41 | JOIN samples s ON 42 | s.server_id = ev.server_id AND 43 | s.sample_time >= ev.first_seen AND 44 | (s.sample_id <= ev.last_sample_id OR ev.last_sample_id IS NULL) 45 | ; 46 | COMMENT ON VIEW v_extension_versions IS 'Provides postgres extensions for samples'; 47 | -------------------------------------------------------------------------------- /schema/funcs.sql: -------------------------------------------------------------------------------- 1 | /* ==== Function stats history ==== */ 2 | 3 | CREATE TABLE funcs_list( 4 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE 5 | DEFERRABLE INITIALLY IMMEDIATE, 6 | datid oid, 7 | funcid oid, 8 | schemaname name NOT NULL, 9 | funcname name NOT NULL, 10 | funcargs text NOT NULL, 11 | last_sample_id integer, 12 | CONSTRAINT pk_funcs_list PRIMARY KEY (server_id, datid, funcid), 13 | CONSTRAINT fk_funcs_list_samples FOREIGN KEY (server_id, last_sample_id) 14 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 15 | DEFERRABLE INITIALLY IMMEDIATE 16 | ); 17 | CREATE INDEX ix_funcs_list_samples ON funcs_list (server_id, last_sample_id); 18 | COMMENT ON TABLE funcs_list IS 'Function names and schemas, captured in samples'; 19 | 20 | CREATE TABLE sample_stat_user_functions ( 21 | server_id integer, 22 | sample_id integer, 23 | datid oid, 24 | funcid oid, 25 | calls bigint, 26 | total_time double precision, 27 | self_time double precision, 28 | trg_fn boolean, 29 | CONSTRAINT fk_user_functions_functions FOREIGN KEY (server_id, datid, funcid) 30 | REFERENCES funcs_list (server_id, datid, funcid) 31 | ON DELETE NO ACTION 32 | DEFERRABLE INITIALLY IMMEDIATE, 33 | CONSTRAINT fk_user_functions_dat FOREIGN KEY (server_id, sample_id, datid) 34 | REFERENCES sample_stat_database (server_id, sample_id, datid) ON DELETE CASCADE 35 | DEFERRABLE INITIALLY IMMEDIATE, 36 | CONSTRAINT pk_sample_stat_user_functions PRIMARY KEY (server_id, sample_id, datid, funcid) 37 | ); 38 | CREATE INDEX ix_sample_stat_user_functions_fl ON sample_stat_user_functions(server_id, datid, funcid); 39 | 40 | COMMENT ON TABLE sample_stat_user_functions IS 'Stats increments for user functions in all databases by samples'; 41 | 42 | CREATE VIEW v_sample_stat_user_functions AS 43 | SELECT 44 | server_id, 45 | sample_id, 46 | datid, 47 | funcid, 48 | schemaname, 49 | funcname, 50 | funcargs, 51 | calls, 52 | total_time, 53 | self_time, 54 | trg_fn 55 | FROM sample_stat_user_functions JOIN funcs_list USING (server_id, datid, funcid); 56 | COMMENT ON VIEW v_sample_stat_user_functions IS 'Reconstructed stats view with function names and schemas'; 57 | 58 | CREATE TABLE last_stat_user_functions (LIKE v_sample_stat_user_functions, in_sample boolean NOT NULL DEFAULT false) 59 | PARTITION BY LIST (server_id); 60 | COMMENT ON TABLE last_stat_user_functions IS 'Last sample data for calculating diffs in next sample'; 61 | 62 | CREATE TABLE sample_stat_user_func_total ( 63 | server_id integer, 64 | sample_id integer, 65 | datid oid, 66 | calls bigint, 67 | total_time double precision, 68 | trg_fn boolean, 69 | CONSTRAINT fk_user_func_tot_dat FOREIGN KEY (server_id, sample_id, datid) 70 | REFERENCES sample_stat_database (server_id, sample_id, datid) ON DELETE CASCADE 71 | DEFERRABLE INITIALLY IMMEDIATE, 72 | CONSTRAINT pk_sample_stat_user_func_total PRIMARY KEY (server_id, sample_id, datid, trg_fn) 73 | ); 74 | COMMENT ON TABLE sample_stat_user_func_total IS 'Total stats for user functions in all databases by samples'; 75 | -------------------------------------------------------------------------------- /schema/import.sql: -------------------------------------------------------------------------------- 1 | /* === Data tables used in dump import process ==== */ 2 | CREATE TABLE import_queries_version_order ( 3 | extension text, 4 | version text, 5 | parent_extension text, 6 | parent_version text, 7 | CONSTRAINT pk_import_queries_version_order PRIMARY KEY (extension, version), 8 | CONSTRAINT fk_import_queries_version_order FOREIGN KEY (parent_extension, parent_version) 9 | REFERENCES import_queries_version_order (extension,version) 10 | ); 11 | COMMENT ON TABLE import_queries_version_order IS 'Version history used in import process'; 12 | -------------------------------------------------------------------------------- /schema/indexes.sql: -------------------------------------------------------------------------------- 1 | /* ==== Indexes stats tables ==== */ 2 | CREATE TABLE indexes_list( 3 | server_id integer NOT NULL, 4 | datid oid NOT NULL, 5 | indexrelid oid NOT NULL, 6 | relid oid NOT NULL, 7 | schemaname name NOT NULL, 8 | indexrelname name NOT NULL, 9 | last_sample_id integer, 10 | CONSTRAINT pk_indexes_list PRIMARY KEY (server_id, datid, indexrelid), 11 | CONSTRAINT fk_indexes_tables FOREIGN KEY (server_id, datid, relid) 12 | REFERENCES tables_list(server_id, datid, relid) 13 | ON DELETE NO ACTION ON UPDATE CASCADE 14 | DEFERRABLE INITIALLY IMMEDIATE, 15 | CONSTRAINT fk_indexes_list_samples FOREIGN KEY (server_id, last_sample_id) 16 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 17 | DEFERRABLE INITIALLY IMMEDIATE 18 | ); 19 | CREATE INDEX ix_indexes_list_rel ON indexes_list(server_id, datid, relid); 20 | CREATE INDEX ix_indexes_list_smp ON indexes_list(server_id, last_sample_id); 21 | 22 | COMMENT ON TABLE indexes_list IS 'Index names and schemas, captured in samples'; 23 | 24 | CREATE TABLE sample_stat_indexes ( 25 | server_id integer, 26 | sample_id integer, 27 | datid oid, 28 | indexrelid oid, 29 | tablespaceid oid NOT NULL, 30 | idx_scan bigint, 31 | idx_tup_read bigint, 32 | idx_tup_fetch bigint, 33 | idx_blks_read bigint, 34 | idx_blks_hit bigint, 35 | relsize bigint, 36 | relsize_diff bigint, 37 | indisunique bool, 38 | relpages_bytes bigint, 39 | relpages_bytes_diff bigint, 40 | last_idx_scan timestamp with time zone, 41 | CONSTRAINT fk_stat_indexes_indexes FOREIGN KEY (server_id, datid, indexrelid) 42 | REFERENCES indexes_list(server_id, datid, indexrelid) 43 | ON DELETE NO ACTION ON UPDATE RESTRICT 44 | DEFERRABLE INITIALLY IMMEDIATE, 45 | CONSTRAINT fk_stat_indexes_dat FOREIGN KEY (server_id, sample_id, datid) 46 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 47 | DEFERRABLE INITIALLY IMMEDIATE, 48 | CONSTRAINT fk_stat_indexes_tablespaces FOREIGN KEY (server_id, sample_id, tablespaceid) 49 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid) 50 | ON DELETE CASCADE 51 | DEFERRABLE INITIALLY IMMEDIATE, 52 | CONSTRAINT pk_sample_stat_indexes PRIMARY KEY (server_id, sample_id, datid, indexrelid) 53 | ); 54 | CREATE INDEX ix_sample_stat_indexes_il ON sample_stat_indexes(server_id, datid, indexrelid); 55 | CREATE INDEX ix_sample_stat_indexes_ts ON sample_stat_indexes(server_id, sample_id, tablespaceid); 56 | 57 | COMMENT ON TABLE sample_stat_indexes IS 'Stats increments for user indexes in all databases by samples'; 58 | 59 | CREATE VIEW v_sample_stat_indexes AS 60 | SELECT 61 | server_id, 62 | sample_id, 63 | datid, 64 | relid, 65 | indexrelid, 66 | tl.schemaname, 67 | tl.relname, 68 | il.indexrelname, 69 | idx_scan, 70 | idx_tup_read, 71 | idx_tup_fetch, 72 | idx_blks_read, 73 | idx_blks_hit, 74 | relsize, 75 | relsize_diff, 76 | tablespaceid, 77 | indisunique, 78 | relpages_bytes, 79 | relpages_bytes_diff, 80 | last_idx_scan 81 | FROM 82 | sample_stat_indexes s 83 | JOIN indexes_list il USING (datid, indexrelid, server_id) 84 | JOIN tables_list tl USING (datid, relid, server_id); 85 | COMMENT ON VIEW v_sample_stat_indexes IS 'Reconstructed stats view with table and index names and schemas'; 86 | 87 | CREATE TABLE last_stat_indexes ( 88 | server_id integer, 89 | sample_id integer, 90 | datid oid, 91 | relid oid NOT NULL, 92 | indexrelid oid, 93 | schemaname name, 94 | relname name, 95 | indexrelname name, 96 | idx_scan bigint, 97 | idx_tup_read bigint, 98 | idx_tup_fetch bigint, 99 | idx_blks_read bigint, 100 | idx_blks_hit bigint, 101 | relsize bigint, 102 | relsize_diff bigint, 103 | tablespaceid oid NOT NULL, 104 | indisunique bool, 105 | in_sample boolean NOT NULL DEFAULT false, 106 | relpages_bytes bigint, 107 | relpages_bytes_diff bigint, 108 | last_idx_scan timestamp with time zone 109 | ) 110 | PARTITION BY LIST (server_id); 111 | COMMENT ON TABLE last_stat_indexes IS 'Last sample data for calculating diffs in next sample'; 112 | 113 | CREATE TABLE sample_stat_indexes_total ( 114 | server_id integer, 115 | sample_id integer, 116 | datid oid, 117 | tablespaceid oid, 118 | idx_scan bigint, 119 | idx_tup_read bigint, 120 | idx_tup_fetch bigint, 121 | idx_blks_read bigint, 122 | idx_blks_hit bigint, 123 | relsize_diff bigint, 124 | CONSTRAINT fk_stat_indexes_tot_dat FOREIGN KEY (server_id, sample_id, datid) 125 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 126 | DEFERRABLE INITIALLY IMMEDIATE, 127 | CONSTRAINT fk_stat_tablespaces_tot_dat FOREIGN KEY (server_id, sample_id, tablespaceid) 128 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid) ON DELETE CASCADE 129 | DEFERRABLE INITIALLY IMMEDIATE, 130 | CONSTRAINT pk_sample_stat_indexes_tot PRIMARY KEY (server_id, sample_id, datid, tablespaceid) 131 | ); 132 | CREATE INDEX ix_sample_stat_indexes_total_ts ON sample_stat_indexes_total(server_id, sample_id, tablespaceid); 133 | 134 | COMMENT ON TABLE sample_stat_indexes_total IS 'Total stats for indexes in all databases by samples'; 135 | -------------------------------------------------------------------------------- /schema/pg_wait_sampling.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE wait_sampling_total( 2 | server_id integer, 3 | sample_id integer, 4 | sample_wevnt_id integer, 5 | event_type text NOT NULL, 6 | event text NOT NULL, 7 | tot_waited bigint NOT NULL, 8 | stmt_waited bigint, 9 | CONSTRAINT pk_sample_weid PRIMARY KEY (server_id, sample_id, sample_wevnt_id), 10 | CONSTRAINT uk_sample_we UNIQUE (server_id, sample_id, event_type, event), 11 | CONSTRAINT fk_wait_sampling_samples FOREIGN KEY (server_id, sample_id) 12 | REFERENCES samples(server_id, sample_id) ON DELETE CASCADE 13 | DEFERRABLE INITIALLY IMMEDIATE 14 | ); 15 | -------------------------------------------------------------------------------- /schema/reports.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE report_static ( 2 | static_name text, 3 | static_text text, 4 | CONSTRAINT pk_report_headers PRIMARY KEY (static_name) 5 | ); 6 | 7 | CREATE TABLE report ( 8 | report_id integer, 9 | report_name text, 10 | report_description text, 11 | template text, 12 | CONSTRAINT pk_report PRIMARY KEY (report_id), 13 | CONSTRAINT fk_report_template FOREIGN KEY (template) 14 | REFERENCES report_static(static_name) 15 | ON UPDATE CASCADE 16 | ); 17 | 18 | CREATE TABLE report_struct ( 19 | report_id integer, 20 | sect_id text, 21 | parent_sect_id text, 22 | s_ord integer, 23 | toc_cap text, 24 | tbl_cap text, 25 | feature text, 26 | function_name text, 27 | content jsonb DEFAULT NULL, 28 | sect_struct jsonb, 29 | CONSTRAINT pk_report_struct PRIMARY KEY (report_id, sect_id), 30 | CONSTRAINT fk_report_struct_report FOREIGN KEY (report_id) 31 | REFERENCES report(report_id) ON UPDATE CASCADE, 32 | CONSTRAINT fk_report_struct_tree FOREIGN KEY (report_id, parent_sect_id) 33 | REFERENCES report_struct(report_id, sect_id) ON UPDATE CASCADE 34 | ); 35 | CREATE INDEX ix_fk_report_struct_tree ON report_struct(report_id, parent_sect_id); 36 | -------------------------------------------------------------------------------- /schema/roles.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE roles_list( 2 | server_id integer REFERENCES servers(server_id) ON DELETE CASCADE 3 | DEFERRABLE INITIALLY IMMEDIATE, 4 | userid oid, 5 | username name NOT NULL, 6 | last_sample_id integer, 7 | CONSTRAINT pk_roles_list PRIMARY KEY (server_id, userid), 8 | CONSTRAINT fk_roles_list_smp FOREIGN KEY (server_id, last_sample_id) 9 | REFERENCES samples(server_id, sample_id) ON DELETE CASCADE 10 | DEFERRABLE INITIALLY IMMEDIATE 11 | ); 12 | CREATE INDEX ix_roles_list_smp ON roles_list(server_id, last_sample_id); 13 | 14 | COMMENT ON TABLE roles_list IS 'Roles, captured in samples'; 15 | -------------------------------------------------------------------------------- /schema/rusage.sql: -------------------------------------------------------------------------------- 1 | /* ==== rusage statements history tables ==== */ 2 | CREATE TABLE sample_kcache ( 3 | server_id integer, 4 | sample_id integer, 5 | userid oid, 6 | datid oid, 7 | queryid bigint, 8 | queryid_md5 char(32), 9 | plan_user_time double precision, -- User CPU time used 10 | plan_system_time double precision, -- System CPU time used 11 | plan_minflts bigint, -- Number of page reclaims (soft page faults) 12 | plan_majflts bigint, -- Number of page faults (hard page faults) 13 | plan_nswaps bigint, -- Number of swaps 14 | plan_reads bigint, -- Number of bytes read by the filesystem layer 15 | plan_writes bigint, -- Number of bytes written by the filesystem layer 16 | plan_msgsnds bigint, -- Number of IPC messages sent 17 | plan_msgrcvs bigint, -- Number of IPC messages received 18 | plan_nsignals bigint, -- Number of signals received 19 | plan_nvcsws bigint, -- Number of voluntary context switches 20 | plan_nivcsws bigint, 21 | exec_user_time double precision, -- User CPU time used 22 | exec_system_time double precision, -- System CPU time used 23 | exec_minflts bigint, -- Number of page reclaims (soft page faults) 24 | exec_majflts bigint, -- Number of page faults (hard page faults) 25 | exec_nswaps bigint, -- Number of swaps 26 | exec_reads bigint, -- Number of bytes read by the filesystem layer 27 | exec_writes bigint, -- Number of bytes written by the filesystem layer 28 | exec_msgsnds bigint, -- Number of IPC messages sent 29 | exec_msgrcvs bigint, -- Number of IPC messages received 30 | exec_nsignals bigint, -- Number of signals received 31 | exec_nvcsws bigint, -- Number of voluntary context switches 32 | exec_nivcsws bigint, 33 | toplevel boolean, 34 | stats_since timestamp with time zone, 35 | CONSTRAINT pk_sample_kcache_n PRIMARY KEY (server_id, sample_id, datid, userid, queryid, toplevel), 36 | CONSTRAINT fk_kcache_stmt_list FOREIGN KEY (server_id,queryid_md5) 37 | REFERENCES stmt_list (server_id,queryid_md5) 38 | ON DELETE NO ACTION ON UPDATE CASCADE 39 | DEFERRABLE INITIALLY IMMEDIATE, 40 | CONSTRAINT fk_kcache_st FOREIGN KEY (server_id, sample_id, datid, userid, queryid, toplevel) 41 | REFERENCES sample_statements(server_id, sample_id, datid, userid, queryid, toplevel) ON DELETE CASCADE 42 | DEFERRABLE INITIALLY IMMEDIATE 43 | ); 44 | CREATE INDEX ix_sample_kcache_sl ON sample_kcache(server_id,queryid_md5); 45 | 46 | COMMENT ON TABLE sample_kcache IS 'Sample sample_kcache statistics table (fields from pg_stat_kcache)'; 47 | 48 | CREATE TABLE last_stat_kcache ( 49 | server_id integer, 50 | sample_id integer, 51 | userid oid, 52 | datid oid, 53 | toplevel boolean DEFAULT true, 54 | queryid bigint, 55 | plan_user_time double precision, -- User CPU time used 56 | plan_system_time double precision, -- System CPU time used 57 | plan_minflts bigint, -- Number of page reclaims (soft page faults) 58 | plan_majflts bigint, -- Number of page faults (hard page faults) 59 | plan_nswaps bigint, -- Number of swaps 60 | plan_reads bigint, -- Number of bytes read by the filesystem layer 61 | plan_writes bigint, -- Number of bytes written by the filesystem layer 62 | plan_msgsnds bigint, -- Number of IPC messages sent 63 | plan_msgrcvs bigint, -- Number of IPC messages received 64 | plan_nsignals bigint, -- Number of signals received 65 | plan_nvcsws bigint, -- Number of voluntary context switches 66 | plan_nivcsws bigint, 67 | exec_user_time double precision, -- User CPU time used 68 | exec_system_time double precision, -- System CPU time used 69 | exec_minflts bigint, -- Number of page reclaims (soft page faults) 70 | exec_majflts bigint, -- Number of page faults (hard page faults) 71 | exec_nswaps bigint, -- Number of swaps 72 | exec_reads bigint, -- Number of bytes read by the filesystem layer 73 | exec_writes bigint, -- Number of bytes written by the filesystem layer 74 | exec_msgsnds bigint, -- Number of IPC messages sent 75 | exec_msgrcvs bigint, -- Number of IPC messages received 76 | exec_nsignals bigint, -- Number of signals received 77 | exec_nvcsws bigint, -- Number of voluntary context switches 78 | exec_nivcsws bigint, 79 | stats_since timestamp with time zone 80 | ) 81 | PARTITION BY LIST (server_id); 82 | 83 | CREATE TABLE sample_kcache_total ( 84 | server_id integer, 85 | sample_id integer, 86 | datid oid, 87 | plan_user_time double precision, -- User CPU time used 88 | plan_system_time double precision, -- System CPU time used 89 | plan_minflts bigint, -- Number of page reclaims (soft page faults) 90 | plan_majflts bigint, -- Number of page faults (hard page faults) 91 | plan_nswaps bigint, -- Number of swaps 92 | plan_reads bigint, -- Number of bytes read by the filesystem layer 93 | --plan_reads_blks bigint, -- Number of 8K blocks read by the filesystem layer 94 | plan_writes bigint, -- Number of bytes written by the filesystem layer 95 | --plan_writes_blks bigint, -- Number of 8K blocks written by the filesystem layer 96 | plan_msgsnds bigint, -- Number of IPC messages sent 97 | plan_msgrcvs bigint, -- Number of IPC messages received 98 | plan_nsignals bigint, -- Number of signals received 99 | plan_nvcsws bigint, -- Number of voluntary context switches 100 | plan_nivcsws bigint, 101 | exec_user_time double precision, -- User CPU time used 102 | exec_system_time double precision, -- System CPU time used 103 | exec_minflts bigint, -- Number of page reclaims (soft page faults) 104 | exec_majflts bigint, -- Number of page faults (hard page faults) 105 | exec_nswaps bigint, -- Number of swaps 106 | exec_reads bigint, -- Number of bytes read by the filesystem layer 107 | --exec_reads_blks bigint, -- Number of 8K blocks read by the filesystem layer 108 | exec_writes bigint, -- Number of bytes written by the filesystem layer 109 | --exec_writes_blks bigint, -- Number of 8K blocks written by the filesystem layer 110 | exec_msgsnds bigint, -- Number of IPC messages sent 111 | exec_msgrcvs bigint, -- Number of IPC messages received 112 | exec_nsignals bigint, -- Number of signals received 113 | exec_nvcsws bigint, -- Number of voluntary context switches 114 | exec_nivcsws bigint, 115 | statements bigint NOT NULL, 116 | CONSTRAINT pk_sample_kcache_total PRIMARY KEY (server_id, sample_id, datid), 117 | CONSTRAINT fk_kcache_t_st FOREIGN KEY (server_id, sample_id, datid) 118 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 119 | DEFERRABLE INITIALLY IMMEDIATE 120 | ); 121 | COMMENT ON TABLE sample_kcache_total IS 'Aggregated stats for kcache, based on pg_stat_kcache'; 122 | -------------------------------------------------------------------------------- /schema/settings.sql: -------------------------------------------------------------------------------- 1 | /* ==== Settings history table ==== */ 2 | CREATE TABLE sample_settings ( 3 | server_id integer, 4 | first_seen timestamp (0) with time zone, 5 | setting_scope smallint, -- Scope of setting. Currently may be 1 for pg_settings and 2 for other adm functions (like version) 6 | name text, 7 | setting text, 8 | reset_val text, 9 | boot_val text, 10 | unit text, 11 | sourcefile text, 12 | sourceline integer, 13 | pending_restart boolean, 14 | CONSTRAINT pk_sample_settings PRIMARY KEY (server_id, setting_scope, name, first_seen), 15 | CONSTRAINT fk_sample_settings_servers FOREIGN KEY (server_id) 16 | REFERENCES servers(server_id) ON DELETE CASCADE 17 | DEFERRABLE INITIALLY IMMEDIATE 18 | ); 19 | -- Unique index on system_identifier to ensure there is no versions 20 | -- as they are affecting export/import functionality 21 | CREATE UNIQUE INDEX uk_sample_settings_sysid ON 22 | sample_settings (server_id,name) WHERE name='system_identifier'; 23 | 24 | COMMENT ON TABLE sample_settings IS 'pg_settings values changes detected at time of sample'; 25 | 26 | CREATE VIEW v_sample_settings AS 27 | SELECT 28 | server_id, 29 | sample_id, 30 | first_seen, 31 | setting_scope, 32 | name, 33 | setting, 34 | reset_val, 35 | boot_val, 36 | unit, 37 | sourcefile, 38 | sourceline, 39 | pending_restart 40 | FROM samples s 41 | JOIN sample_settings ss USING (server_id) 42 | JOIN LATERAL 43 | (SELECT server_id, name, max(first_seen) as first_seen 44 | FROM sample_settings WHERE server_id = s.server_id AND first_seen <= s.sample_time 45 | GROUP BY server_id, name) lst 46 | USING (server_id, name, first_seen) 47 | ; 48 | COMMENT ON VIEW v_sample_settings IS 'Provides postgres settings for samples'; 49 | -------------------------------------------------------------------------------- /schema/smpl_timing.sql: -------------------------------------------------------------------------------- 1 | /* ==== Sample taking time tracking storage ==== */ 2 | CREATE TABLE sample_timings ( 3 | server_id integer NOT NULL, 4 | sample_id integer NOT NULL, 5 | event text, 6 | time_spent interval MINUTE TO SECOND (2), 7 | CONSTRAINT pk_sample_timings PRIMARY KEY (server_id, sample_id, event), 8 | CONSTRAINT fk_sample_timings_sample FOREIGN KEY (server_id, sample_id) 9 | REFERENCES samples(server_id, sample_id) ON DELETE CASCADE 10 | DEFERRABLE INITIALLY IMMEDIATE 11 | ); 12 | COMMENT ON TABLE sample_timings IS 'Sample taking time statistics'; 13 | 14 | CREATE VIEW v_sample_timings AS 15 | SELECT 16 | srv.server_name, 17 | smp.sample_id, 18 | smp.sample_time, 19 | tm.event as sampling_event, 20 | tm.time_spent 21 | FROM 22 | sample_timings tm 23 | JOIN servers srv USING (server_id) 24 | JOIN samples smp USING (server_id, sample_id); 25 | COMMENT ON VIEW v_sample_timings IS 'Sample taking time statistics with server names and sample times'; 26 | -------------------------------------------------------------------------------- /schema/statements.sql: -------------------------------------------------------------------------------- 1 | /* === Statements history tables ==== */ 2 | CREATE TABLE stmt_list( 3 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE 4 | DEFERRABLE INITIALLY IMMEDIATE, 5 | queryid_md5 char(32), 6 | query text, 7 | last_sample_id integer, 8 | CONSTRAINT pk_stmt_list PRIMARY KEY (server_id, queryid_md5), 9 | CONSTRAINT fk_stmt_list_samples FOREIGN KEY (server_id, last_sample_id) 10 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 11 | DEFERRABLE INITIALLY IMMEDIATE 12 | ); 13 | CREATE INDEX ix_stmt_list_smp ON stmt_list(server_id, last_sample_id); 14 | COMMENT ON TABLE stmt_list IS 'Statements, captured in samples'; 15 | 16 | CREATE TABLE sample_statements ( 17 | server_id integer, 18 | sample_id integer, 19 | userid oid, 20 | datid oid, 21 | queryid bigint, 22 | queryid_md5 char(32), 23 | plans bigint, 24 | total_plan_time double precision, 25 | min_plan_time double precision, 26 | max_plan_time double precision, 27 | mean_plan_time double precision, 28 | sum_plan_time_sq numeric, -- sum of plan times squared for stddev calculation 29 | calls bigint, 30 | total_exec_time double precision, 31 | min_exec_time double precision, 32 | max_exec_time double precision, 33 | mean_exec_time double precision, 34 | sum_exec_time_sq numeric, -- sum of exec times squared for stddev calculation 35 | rows bigint, 36 | shared_blks_hit bigint, 37 | shared_blks_read bigint, 38 | shared_blks_dirtied bigint, 39 | shared_blks_written bigint, 40 | local_blks_hit bigint, 41 | local_blks_read bigint, 42 | local_blks_dirtied bigint, 43 | local_blks_written bigint, 44 | temp_blks_read bigint, 45 | temp_blks_written bigint, 46 | shared_blk_read_time double precision, 47 | shared_blk_write_time double precision, 48 | wal_records bigint, 49 | wal_fpi bigint, 50 | wal_bytes numeric, 51 | toplevel boolean, 52 | jit_functions bigint, 53 | jit_generation_time double precision, 54 | jit_inlining_count bigint, 55 | jit_inlining_time double precision, 56 | jit_optimization_count bigint, 57 | jit_optimization_time double precision, 58 | jit_emission_count bigint, 59 | jit_emission_time double precision, 60 | temp_blk_read_time double precision, 61 | temp_blk_write_time double precision, 62 | local_blk_read_time double precision, 63 | local_blk_write_time double precision, 64 | jit_deform_count bigint, 65 | jit_deform_time double precision, 66 | stats_since timestamp with time zone, 67 | minmax_stats_since timestamp with time zone, 68 | CONSTRAINT pk_sample_statements_n PRIMARY KEY (server_id, sample_id, datid, userid, queryid, toplevel), 69 | CONSTRAINT fk_stmt_list FOREIGN KEY (server_id,queryid_md5) 70 | REFERENCES stmt_list (server_id,queryid_md5) 71 | ON DELETE NO ACTION ON UPDATE CASCADE 72 | DEFERRABLE INITIALLY IMMEDIATE, 73 | CONSTRAINT fk_statments_dat FOREIGN KEY (server_id, sample_id, datid) 74 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 75 | DEFERRABLE INITIALLY IMMEDIATE, 76 | CONSTRAINT fk_statements_roles FOREIGN KEY (server_id, userid) 77 | REFERENCES roles_list (server_id, userid) 78 | ON DELETE NO ACTION ON UPDATE CASCADE 79 | DEFERRABLE INITIALLY IMMEDIATE 80 | ); 81 | CREATE INDEX ix_sample_stmts_qid ON sample_statements (server_id,queryid_md5); 82 | CREATE INDEX ix_sample_stmts_rol ON sample_statements (server_id, userid); 83 | COMMENT ON TABLE sample_statements IS 'Sample statement statistics table (fields from pg_stat_statements)'; 84 | 85 | CREATE TABLE last_stat_statements ( 86 | server_id integer, 87 | sample_id integer, 88 | userid oid, 89 | username name, 90 | datid oid, 91 | queryid bigint, 92 | queryid_md5 char(32), 93 | plans bigint, 94 | total_plan_time double precision, 95 | min_plan_time double precision, 96 | max_plan_time double precision, 97 | mean_plan_time double precision, 98 | stddev_plan_time double precision, 99 | calls bigint, 100 | total_exec_time double precision, 101 | min_exec_time double precision, 102 | max_exec_time double precision, 103 | mean_exec_time double precision, 104 | stddev_exec_time double precision, 105 | rows bigint, 106 | shared_blks_hit bigint, 107 | shared_blks_read bigint, 108 | shared_blks_dirtied bigint, 109 | shared_blks_written bigint, 110 | local_blks_hit bigint, 111 | local_blks_read bigint, 112 | local_blks_dirtied bigint, 113 | local_blks_written bigint, 114 | temp_blks_read bigint, 115 | temp_blks_written bigint, 116 | shared_blk_read_time double precision, 117 | shared_blk_write_time double precision, 118 | wal_records bigint, 119 | wal_fpi bigint, 120 | wal_bytes numeric, 121 | toplevel boolean, 122 | in_sample boolean DEFAULT false, 123 | jit_functions bigint, 124 | jit_generation_time double precision, 125 | jit_inlining_count bigint, 126 | jit_inlining_time double precision, 127 | jit_optimization_count bigint, 128 | jit_optimization_time double precision, 129 | jit_emission_count bigint, 130 | jit_emission_time double precision, 131 | temp_blk_read_time double precision, 132 | temp_blk_write_time double precision, 133 | local_blk_read_time double precision, 134 | local_blk_write_time double precision, 135 | jit_deform_count bigint, 136 | jit_deform_time double precision, 137 | stats_since timestamp with time zone, 138 | minmax_stats_since timestamp with time zone 139 | ) 140 | PARTITION BY LIST (server_id); 141 | 142 | CREATE TABLE sample_statements_total ( 143 | server_id integer, 144 | sample_id integer, 145 | datid oid, 146 | plans bigint, 147 | total_plan_time double precision, 148 | calls bigint, 149 | total_exec_time double precision, 150 | rows bigint, 151 | shared_blks_hit bigint, 152 | shared_blks_read bigint, 153 | shared_blks_dirtied bigint, 154 | shared_blks_written bigint, 155 | local_blks_hit bigint, 156 | local_blks_read bigint, 157 | local_blks_dirtied bigint, 158 | local_blks_written bigint, 159 | temp_blks_read bigint, 160 | temp_blks_written bigint, 161 | shared_blk_read_time double precision, 162 | shared_blk_write_time double precision, 163 | wal_records bigint, 164 | wal_fpi bigint, 165 | wal_bytes numeric, 166 | statements bigint, 167 | jit_functions bigint, 168 | jit_generation_time double precision, 169 | jit_inlining_count bigint, 170 | jit_inlining_time double precision, 171 | jit_optimization_count bigint, 172 | jit_optimization_time double precision, 173 | jit_emission_count bigint, 174 | jit_emission_time double precision, 175 | temp_blk_read_time double precision, 176 | temp_blk_write_time double precision, 177 | mean_max_plan_time double precision, 178 | mean_max_exec_time double precision, 179 | mean_min_plan_time double precision, 180 | mean_min_exec_time double precision, 181 | local_blk_read_time double precision, 182 | local_blk_write_time double precision, 183 | jit_deform_count bigint, 184 | jit_deform_time double precision, 185 | CONSTRAINT pk_sample_statements_total PRIMARY KEY (server_id, sample_id, datid), 186 | CONSTRAINT fk_statments_t_dat FOREIGN KEY (server_id, sample_id, datid) 187 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 188 | DEFERRABLE INITIALLY IMMEDIATE 189 | ); 190 | COMMENT ON TABLE sample_statements_total IS 'Aggregated stats for sample, based on pg_stat_statements'; 191 | -------------------------------------------------------------------------------- /schema/tables.sql: -------------------------------------------------------------------------------- 1 | /* ==== Tables stats history ==== */ 2 | CREATE TABLE tables_list( 3 | server_id integer, 4 | datid oid, 5 | relid oid, 6 | relkind char(1) NOT NULL, 7 | schemaname name NOT NULL, 8 | relname name NOT NULL, 9 | last_sample_id integer, 10 | CONSTRAINT pk_tables_list PRIMARY KEY (server_id, datid, relid), 11 | CONSTRAINT fk_tables_list_samples FOREIGN KEY (server_id, last_sample_id) 12 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 13 | DEFERRABLE INITIALLY IMMEDIATE 14 | ); 15 | CREATE INDEX ix_tables_list_samples ON tables_list(server_id, last_sample_id); 16 | COMMENT ON TABLE tables_list IS 'Table names and schemas, captured in samples'; 17 | 18 | CREATE TABLE sample_stat_tables ( 19 | server_id integer, 20 | sample_id integer, 21 | datid oid, 22 | relid oid, 23 | tablespaceid oid NOT NULL, 24 | seq_scan bigint, 25 | seq_tup_read bigint, 26 | idx_scan bigint, 27 | idx_tup_fetch bigint, 28 | n_tup_ins bigint, 29 | n_tup_upd bigint, 30 | n_tup_del bigint, 31 | n_tup_hot_upd bigint, 32 | n_live_tup bigint, 33 | n_dead_tup bigint, 34 | n_mod_since_analyze bigint, 35 | n_ins_since_vacuum bigint, 36 | last_vacuum timestamp with time zone, 37 | last_autovacuum timestamp with time zone, 38 | last_analyze timestamp with time zone, 39 | last_autoanalyze timestamp with time zone, 40 | vacuum_count bigint, 41 | autovacuum_count bigint, 42 | analyze_count bigint, 43 | autoanalyze_count bigint, 44 | heap_blks_read bigint, 45 | heap_blks_hit bigint, 46 | idx_blks_read bigint, 47 | idx_blks_hit bigint, 48 | toast_blks_read bigint, 49 | toast_blks_hit bigint, 50 | tidx_blks_read bigint, 51 | tidx_blks_hit bigint, 52 | relsize bigint, 53 | relsize_diff bigint, 54 | relpages_bytes bigint, 55 | relpages_bytes_diff bigint, 56 | last_seq_scan timestamp with time zone, 57 | last_idx_scan timestamp with time zone, 58 | n_tup_newpage_upd bigint, 59 | reltoastrelid oid, 60 | CONSTRAINT pk_sample_stat_tables PRIMARY KEY (server_id, sample_id, datid, relid), 61 | CONSTRAINT fk_st_tables_dat FOREIGN KEY (server_id, sample_id, datid) 62 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 63 | DEFERRABLE INITIALLY IMMEDIATE, 64 | CONSTRAINT fk_st_tables_tablespace FOREIGN KEY (server_id, sample_id, tablespaceid) 65 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid) ON DELETE CASCADE 66 | DEFERRABLE INITIALLY IMMEDIATE, 67 | CONSTRAINT fk_st_tables_tables FOREIGN KEY (server_id, datid, relid) 68 | REFERENCES tables_list(server_id, datid, relid) 69 | ON DELETE NO ACTION ON UPDATE RESTRICT 70 | DEFERRABLE INITIALLY IMMEDIATE, 71 | CONSTRAINT fk_st_tables_toast FOREIGN KEY (server_id, sample_id, datid, reltoastrelid) 72 | REFERENCES sample_stat_tables(server_id, sample_id, datid, relid) 73 | ON DELETE NO ACTION ON UPDATE RESTRICT 74 | DEFERRABLE INITIALLY IMMEDIATE 75 | ); 76 | CREATE INDEX is_sample_stat_tables_ts ON sample_stat_tables(server_id, sample_id, tablespaceid); 77 | CREATE INDEX ix_sample_stat_tables_rel ON sample_stat_tables(server_id, datid, relid); 78 | 79 | COMMENT ON TABLE sample_stat_tables IS 'Stats increments for user tables in all databases by samples'; 80 | 81 | CREATE VIEW v_sample_stat_tables AS 82 | SELECT 83 | server_id, 84 | sample_id, 85 | datid, 86 | relid, 87 | tablespacename, 88 | schemaname, 89 | relname, 90 | seq_scan, 91 | seq_tup_read, 92 | idx_scan, 93 | idx_tup_fetch, 94 | n_tup_ins, 95 | n_tup_upd, 96 | n_tup_del, 97 | n_tup_hot_upd, 98 | n_live_tup, 99 | n_dead_tup, 100 | n_mod_since_analyze, 101 | n_ins_since_vacuum, 102 | last_vacuum, 103 | last_autovacuum, 104 | last_analyze, 105 | last_autoanalyze, 106 | vacuum_count, 107 | autovacuum_count, 108 | analyze_count, 109 | autoanalyze_count, 110 | heap_blks_read, 111 | heap_blks_hit, 112 | idx_blks_read, 113 | idx_blks_hit, 114 | toast_blks_read, 115 | toast_blks_hit, 116 | tidx_blks_read, 117 | tidx_blks_hit, 118 | relsize, 119 | relsize_diff, 120 | tablespaceid, 121 | reltoastrelid, 122 | relkind, 123 | relpages_bytes, 124 | relpages_bytes_diff, 125 | last_seq_scan, 126 | last_idx_scan, 127 | n_tup_newpage_upd 128 | FROM sample_stat_tables 129 | JOIN tables_list USING (server_id, datid, relid) 130 | JOIN tablespaces_list tl USING (server_id, tablespaceid); 131 | COMMENT ON VIEW v_sample_stat_tables IS 'Tables stats view with table names and schemas'; 132 | 133 | CREATE TABLE last_stat_tables( 134 | server_id integer, 135 | sample_id integer, 136 | datid oid, 137 | relid oid, 138 | schemaname name, 139 | relname name, 140 | seq_scan bigint, 141 | seq_tup_read bigint, 142 | idx_scan bigint, 143 | idx_tup_fetch bigint, 144 | n_tup_ins bigint, 145 | n_tup_upd bigint, 146 | n_tup_del bigint, 147 | n_tup_hot_upd bigint, 148 | n_live_tup bigint, 149 | n_dead_tup bigint, 150 | n_mod_since_analyze bigint, 151 | n_ins_since_vacuum bigint, 152 | last_vacuum timestamp with time zone, 153 | last_autovacuum timestamp with time zone, 154 | last_analyze timestamp with time zone, 155 | last_autoanalyze timestamp with time zone, 156 | vacuum_count bigint, 157 | autovacuum_count bigint, 158 | analyze_count bigint, 159 | autoanalyze_count bigint, 160 | heap_blks_read bigint, 161 | heap_blks_hit bigint, 162 | idx_blks_read bigint, 163 | idx_blks_hit bigint, 164 | toast_blks_read bigint, 165 | toast_blks_hit bigint, 166 | tidx_blks_read bigint, 167 | tidx_blks_hit bigint, 168 | relsize bigint, 169 | relsize_diff bigint, 170 | tablespaceid oid, 171 | reltoastrelid oid, 172 | relkind char(1), 173 | in_sample boolean NOT NULL DEFAULT false, 174 | relpages_bytes bigint, 175 | relpages_bytes_diff bigint, 176 | last_seq_scan timestamp with time zone, 177 | last_idx_scan timestamp with time zone, 178 | n_tup_newpage_upd bigint 179 | ) 180 | PARTITION BY LIST (server_id); 181 | COMMENT ON TABLE last_stat_tables IS 'Last sample data for calculating diffs in next sample'; 182 | 183 | CREATE TABLE sample_stat_tables_total ( 184 | server_id integer, 185 | sample_id integer, 186 | datid oid, 187 | tablespaceid oid, 188 | relkind char(1) NOT NULL, 189 | seq_scan bigint, 190 | seq_tup_read bigint, 191 | idx_scan bigint, 192 | idx_tup_fetch bigint, 193 | n_tup_ins bigint, 194 | n_tup_upd bigint, 195 | n_tup_del bigint, 196 | n_tup_hot_upd bigint, 197 | vacuum_count bigint, 198 | autovacuum_count bigint, 199 | analyze_count bigint, 200 | autoanalyze_count bigint, 201 | heap_blks_read bigint, 202 | heap_blks_hit bigint, 203 | idx_blks_read bigint, 204 | idx_blks_hit bigint, 205 | toast_blks_read bigint, 206 | toast_blks_hit bigint, 207 | tidx_blks_read bigint, 208 | tidx_blks_hit bigint, 209 | relsize_diff bigint, 210 | n_tup_newpage_upd bigint, 211 | CONSTRAINT pk_sample_stat_tables_tot PRIMARY KEY (server_id, sample_id, datid, relkind, tablespaceid), 212 | CONSTRAINT fk_st_tables_tot_dat FOREIGN KEY (server_id, sample_id, datid) 213 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE 214 | DEFERRABLE INITIALLY IMMEDIATE, 215 | CONSTRAINT fk_st_tablespaces_tot_dat FOREIGN KEY (server_id, sample_id, tablespaceid) 216 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid) ON DELETE CASCADE 217 | DEFERRABLE INITIALLY IMMEDIATE 218 | ); 219 | CREATE INDEX ix_sample_stat_tables_total_ts ON sample_stat_tables_total(server_id, sample_id, tablespaceid); 220 | 221 | COMMENT ON TABLE sample_stat_tables_total IS 'Total stats for all tables in all databases by samples'; 222 | -------------------------------------------------------------------------------- /schema/tablespaces.sql: -------------------------------------------------------------------------------- 1 | /* ==== Tablespaces stats history ==== */ 2 | CREATE TABLE tablespaces_list( 3 | server_id integer, 4 | tablespaceid oid, 5 | tablespacename name NOT NULL, 6 | tablespacepath text NOT NULL, -- cannot be changed without changing oid 7 | last_sample_id integer, 8 | CONSTRAINT pk_tablespace_list PRIMARY KEY (server_id, tablespaceid), 9 | CONSTRAINT fk_tablespaces_list_samples FOREIGN KEY (server_id, last_sample_id) 10 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 11 | DEFERRABLE INITIALLY IMMEDIATE 12 | ); 13 | CREATE INDEX ix_tablespaces_list_smp ON tablespaces_list(server_id, last_sample_id); 14 | COMMENT ON TABLE tablespaces_list IS 'Tablespaces, captured in samples'; 15 | 16 | CREATE TABLE sample_stat_tablespaces 17 | ( 18 | server_id integer, 19 | sample_id integer, 20 | tablespaceid oid, 21 | size bigint NOT NULL, 22 | size_delta bigint NOT NULL, 23 | CONSTRAINT fk_stattbs_samples FOREIGN KEY (server_id, sample_id) 24 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE 25 | DEFERRABLE INITIALLY IMMEDIATE, 26 | CONSTRAINT fk_st_tablespaces_tablespaces FOREIGN KEY (server_id, tablespaceid) 27 | REFERENCES tablespaces_list(server_id, tablespaceid) 28 | ON DELETE NO ACTION ON UPDATE CASCADE 29 | DEFERRABLE INITIALLY IMMEDIATE, 30 | CONSTRAINT pk_sample_stat_tablespaces PRIMARY KEY (server_id, sample_id, tablespaceid) 31 | ); 32 | CREATE INDEX ix_sample_stat_tablespaces_ts ON sample_stat_tablespaces(server_id, tablespaceid); 33 | 34 | COMMENT ON TABLE sample_stat_tablespaces IS 'Sample tablespaces statistics (fields from pg_tablespace)'; 35 | 36 | CREATE VIEW v_sample_stat_tablespaces AS 37 | SELECT 38 | server_id, 39 | sample_id, 40 | tablespaceid, 41 | tablespacename, 42 | tablespacepath, 43 | size, 44 | size_delta 45 | FROM sample_stat_tablespaces JOIN tablespaces_list USING (server_id, tablespaceid); 46 | COMMENT ON VIEW v_sample_stat_tablespaces IS 'Tablespaces stats view with tablespace names'; 47 | 48 | CREATE TABLE last_stat_tablespaces (LIKE v_sample_stat_tablespaces) 49 | PARTITION BY LIST (server_id); 50 | COMMENT ON TABLE last_stat_tablespaces IS 'Last sample data for calculating diffs in next sample'; 51 | -------------------------------------------------------------------------------- /sql/create_extension.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS profile; 2 | CREATE SCHEMA IF NOT EXISTS dblink; 3 | CREATE SCHEMA IF NOT EXISTS statements; 4 | CREATE EXTENSION dblink SCHEMA dblink; 5 | CREATE EXTENSION pg_stat_statements SCHEMA statements; 6 | CREATE EXTENSION pg_profile SCHEMA profile; 7 | -------------------------------------------------------------------------------- /sql/drop_extension.sql: -------------------------------------------------------------------------------- 1 | /* Drop test objects */ 2 | DROP TABLE profile.grow_table; 3 | DROP FUNCTION profile.dummy_func(); 4 | DROP FUNCTION profile.grow_table_trg_f(); 5 | DROP FUNCTION profile.get_ids; 6 | DROP FUNCTION profile.get_sources; 7 | DROP FUNCTION profile.get_report_sections; 8 | DROP FUNCTION profile.check_dataset_queries; 9 | /* Testing drop server with data */ 10 | SELECT * FROM profile.drop_server('local'); 11 | DROP EXTENSION pg_profile; 12 | DROP EXTENSION IF EXISTS pg_stat_statements; 13 | DROP EXTENSION IF EXISTS dblink; 14 | DROP SCHEMA profile; 15 | DROP SCHEMA dblink; 16 | DROP SCHEMA statements; 17 | -------------------------------------------------------------------------------- /sql/export_import.sql: -------------------------------------------------------------------------------- 1 | SET client_min_messages = WARNING; 2 | /* === Create regular export table === */ 3 | CREATE TABLE profile.export AS SELECT * FROM profile.export_data(); 4 | /* === Create obfuscated export table === */ 5 | CREATE TABLE profile.blind_export AS SELECT * FROM profile.export_data(NULL,NULL,NULL,TRUE); 6 | BEGIN; 7 | /* === rename local server === */ 8 | SELECT profile.rename_server('local','src_local'); 9 | /* === check matching by creation date and system identifier === */ 10 | SELECT profile.import_data('profile.export') > 0; 11 | /* === change src_local server creation time so it wont match === */ 12 | UPDATE profile.servers 13 | SET 14 | server_created = server_created - '1 minutes'::interval 15 | WHERE server_name = 'src_local'; 16 | /* === perform load === */ 17 | SELECT profile.import_data('profile.export') > 0; 18 | /* === Integral check - reports must match === */ 19 | \a 20 | \t on 21 | WITH res AS ( 22 | SELECT 23 | profile.get_report('local',1,4) AS imported, 24 | replace( 25 | replace( 26 | profile.get_report('src_local',1,4),'"server_name": "src_local"', 27 | '"server_name": "local"'), 28 | '

        Server name: src_local', 29 | '

        Server name: local' 30 | ) AS exported 31 | ) 32 | SELECT 33 | CASE 34 | WHEN 35 | sha224(convert_to(imported, 'UTF8')) != 36 | sha224(convert_to(exported, 'UTF8')) 37 | THEN 38 | format(E'\n%s\n\n\n%s\n', 39 | imported, 40 | exported 41 | ) 42 | ELSE 43 | 'ok' 44 | END as match 45 | FROM res; 46 | \a 47 | \t off 48 | /* === perform obfuscated load === */ 49 | SELECT profile.drop_server('local'); 50 | SELECT profile.import_data('profile.blind_export') > 0; 51 | /* === check that there is no matching queries === */ 52 | SELECT 53 | count(*) 54 | FROM profile.servers s_src 55 | CROSS JOIN profile.servers s_blind 56 | JOIN profile.stmt_list q_src ON 57 | (q_src.server_id = s_src.server_id) 58 | JOIN profile.stmt_list q_blind ON 59 | (q_src.queryid_md5 = q_blind.queryid_md5 AND q_blind.server_id = s_blind.server_id) 60 | WHERE 61 | s_src.server_name = 'src_local' AND s_blind.server_name = 'local' 62 | AND q_src.query = q_blind.query; 63 | ROLLBACK; 64 | /* === drop export tables === */ 65 | DROP TABLE profile.export; 66 | DROP TABLE profile.blind_export; 67 | -------------------------------------------------------------------------------- /sql/kcache_create_extension.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS profile; 2 | CREATE SCHEMA IF NOT EXISTS dblink; 3 | CREATE SCHEMA IF NOT EXISTS statements; 4 | CREATE SCHEMA IF NOT EXISTS kcache; 5 | CREATE EXTENSION dblink SCHEMA dblink; 6 | CREATE EXTENSION pg_stat_statements SCHEMA statements; 7 | CREATE EXTENSION pg_stat_kcache SCHEMA kcache; 8 | CREATE EXTENSION pg_profile SCHEMA profile; 9 | -------------------------------------------------------------------------------- /sql/kcache_drop_extension.sql: -------------------------------------------------------------------------------- 1 | /* Drop test objects */ 2 | DROP TABLE profile.grow_table; 3 | DROP FUNCTION profile.dummy_func(); 4 | DROP FUNCTION profile.grow_table_trg_f(); 5 | DROP FUNCTION profile.get_ids; 6 | DROP FUNCTION profile.get_sources; 7 | DROP FUNCTION profile.get_report_sections; 8 | DROP FUNCTION profile.check_dataset_queries; 9 | /* Testing drop server with data */ 10 | SELECT * FROM profile.drop_server('local'); 11 | DROP EXTENSION pg_profile; 12 | DROP EXTENSION pg_stat_kcache; 13 | DROP EXTENSION pg_stat_statements; 14 | DROP EXTENSION dblink; 15 | DROP SCHEMA profile; 16 | DROP SCHEMA dblink; 17 | DROP SCHEMA statements; 18 | DROP SCHEMA kcache; 19 | -------------------------------------------------------------------------------- /sql/kcache_stat_avail.sql: -------------------------------------------------------------------------------- 1 | SELECT count(1) > 0 FROM profile.sample_kcache; 2 | SELECT count(1) > 0 FROM profile.sample_kcache_total; 3 | -------------------------------------------------------------------------------- /sql/retention_and_baselines.sql: -------------------------------------------------------------------------------- 1 | UPDATE profile.samples 2 | SET sample_time = now() - (5 - sample_id) * '1 day'::interval - '10 minutes'::interval 3 | WHERE sample_id <= 5; 4 | SELECT server,result FROM profile.take_sample(); 5 | BEGIN; 6 | SELECT profile.delete_samples(); 7 | SELECT sample FROM profile.show_samples() ORDER BY sample; 8 | ROLLBACK; 9 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 10 | SELECT * FROM profile.set_server_max_sample_age('local',1); 11 | /* Testing baseline creation */ 12 | SELECT * FROM profile.create_baseline('testline1',2,4); 13 | BEGIN; 14 | SELECT profile.delete_samples('local',tstzrange( 15 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1), 16 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5), 17 | '[]' 18 | ) 19 | ); 20 | SELECT sample FROM profile.show_samples() ORDER BY sample; 21 | ROLLBACK; 22 | BEGIN; 23 | SELECT profile.delete_samples(tstzrange( 24 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1), 25 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5), 26 | '[]' 27 | ) 28 | ); 29 | SELECT sample FROM profile.show_samples() ORDER BY sample; 30 | ROLLBACK; 31 | SELECT * FROM profile.create_baseline('testline2',2,4); 32 | SELECT count(*) FROM profile.baselines; 33 | SELECT * FROM profile.keep_baseline('testline2',-1); 34 | /* Testing baseline show */ 35 | SELECT baseline, min_sample, max_sample, keep_until_time IS NULL 36 | FROM profile.show_baselines() 37 | ORDER BY baseline; 38 | /* Testing baseline deletion */ 39 | SELECT server,result FROM profile.take_sample(); 40 | SELECT count(*) FROM profile.baselines; 41 | /* Testing samples retention override with baseline */ 42 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 43 | SELECT * FROM profile.drop_baseline('testline1'); 44 | /* Testing samples deletion after baseline removed */ 45 | SELECT server,result FROM profile.take_sample(); 46 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval; 47 | -------------------------------------------------------------------------------- /sql/server_management.sql: -------------------------------------------------------------------------------- 1 | /* == Testing server management functions == */ 2 | SELECT profile.create_server('srvtest','dbname=postgres host=localhost port=5432', TRUE, NULL, 'Server description 1'); 3 | SELECT server_id, server_name, server_description, db_exclude, 4 | enabled, connstr, max_sample_age, last_sample_id 5 | FROM profile.servers WHERE server_name != 'local'; 6 | SELECT profile.rename_server('srvtest','srvtestrenamed'); 7 | SELECT profile.set_server_connstr('srvtestrenamed','dbname=postgres host=localhost port=5433'); 8 | SELECT profile.set_server_description('srvtestrenamed','Server description 2'); 9 | SELECT profile.set_server_db_exclude('srvtestrenamed',ARRAY['db1','db2','db3']); 10 | SELECT profile.set_server_max_sample_age('srvtestrenamed',3); 11 | SELECT server_id, server_name, server_description, db_exclude, 12 | enabled, connstr, max_sample_age, last_sample_id 13 | FROM profile.servers WHERE server_name != 'local'; 14 | SELECT profile.disable_server('srvtestrenamed'); 15 | SELECT server_id, server_name, server_description, db_exclude, 16 | enabled, connstr, max_sample_age, last_sample_id 17 | FROM profile.servers WHERE server_name != 'local'; 18 | SELECT profile.enable_server('srvtestrenamed'); 19 | SELECT server_id, server_name, server_description, db_exclude, 20 | enabled, connstr, max_sample_age, last_sample_id 21 | FROM profile.servers WHERE server_name != 'local'; 22 | SELECT * FROM profile.show_servers() where server_name != 'local'; 23 | SELECT * FROM profile.drop_server('srvtestrenamed'); 24 | -------------------------------------------------------------------------------- /sql/sizes_collection.sql: -------------------------------------------------------------------------------- 1 | SET client_min_messages = WARNING; 2 | /* Test size collection sampling settings */ 3 | INSERT INTO profile.grow_table (short_str,long_str) 4 | SELECT array_to_string(array 5 | (select 6 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', 7 | trunc(random() * 62)::integer + 1, 1) 8 | FROM generate_series(1, 40)), '' 9 | ) as arr1, 10 | array_to_string(array 11 | (select 12 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', 13 | trunc(random() * 62)::integer + 1, 1) 14 | FROM generate_series(1, 8000)), '' 15 | ) 16 | FROM generate_series(1,5); 17 | /* Test rare relation sizes collection */ 18 | SELECT profile.set_server_size_sampling('local',current_time - interval '10 minute',interval '30 minute',interval '2 minute'); 19 | -- check show_servers_size_sampling() 20 | SELECT server_name,window_duration,sample_interval FROM profile.show_servers_size_sampling(); 21 | -- (sample 4) 22 | SELECT server,result FROM profile.take_sample(); 23 | -- Disable rare sizes collection 24 | SELECT profile.set_server_size_sampling('local',null,null,null); 25 | -- (sample 5) 26 | SELECT server,result FROM profile.take_sample(); 27 | -- check show_samples() 28 | SELECT sample, sizes_collected FROM profile.show_samples() WHERE NOT sizes_collected; 29 | -- check tables sizes collection 30 | SELECT 31 | sample_id, 32 | count(relsize) > 0 as relsize, 33 | count(relsize_diff) > 0 as relsize_diff, 34 | count(relpages_bytes) > 0 as relpages, 35 | count(relpages_bytes_diff) > 0 as relpages_diff 36 | FROM profile.sample_stat_tables GROUP BY sample_id 37 | ORDER BY sample_id; 38 | -- check indexes sizes collection 39 | SELECT 40 | sample_id, 41 | count(relsize) > 0 as relsize, 42 | count(relsize_diff) > 0 as relsize_diff, 43 | count(relpages_bytes) > 0 as relpages, 44 | count(relpages_bytes_diff) > 0 as relpages_diff 45 | FROM profile.sample_stat_indexes GROUP BY sample_id 46 | ORDER BY sample_id; 47 | --------------------------------------------------------------------------------