4 | GRANT SELECT ON sample_stat_cluster TO public;
5 | GRANT SELECT ON sample_stat_slru TO public;
6 | GRANT SELECT ON sample_stat_wal TO public;
7 | GRANT SELECT ON sample_stat_io TO public;
8 | GRANT SELECT ON sample_stat_archiver TO public;
9 | GRANT SELECT ON indexes_list TO public;
10 | GRANT SELECT ON sample_stat_indexes TO public;
11 | GRANT SELECT ON sample_stat_indexes_total TO public;
12 | GRANT SELECT ON tablespaces_list TO public;
13 | GRANT SELECT ON sample_stat_tablespaces TO public;
14 | GRANT SELECT ON tables_list TO public;
15 | GRANT SELECT ON sample_stat_tables TO public;
16 | GRANT SELECT ON sample_stat_tables_total TO public;
17 | GRANT SELECT ON sample_settings TO public;
18 | GRANT SELECT ON funcs_list TO public;
19 | GRANT SELECT ON sample_stat_user_functions TO public;
20 | GRANT SELECT ON sample_stat_user_func_total TO public;
21 | GRANT SELECT ON sample_stat_database TO public;
22 | GRANT SELECT ON sample_statements TO public;
23 | GRANT SELECT ON sample_statements_total TO public;
24 | GRANT SELECT ON sample_kcache TO public;
25 | GRANT SELECT ON sample_kcache_total TO public;
26 | GRANT SELECT ON roles_list TO public;
27 | GRANT SELECT ON wait_sampling_total TO public;
28 | GRANT SELECT (server_id, server_name, server_description, server_created, db_exclude,
29 | enabled, max_sample_age, last_sample_id, size_smp_wnd_start, size_smp_wnd_dur, size_smp_interval, srv_settings)
30 | ON servers TO public;
31 | GRANT SELECT ON samples TO public;
32 | GRANT SELECT ON baselines TO public;
33 | GRANT SELECT ON bl_samples TO public;
34 | GRANT SELECT ON report_static TO public;
35 | GRANT SELECT ON report TO public;
36 | GRANT SELECT ON report_struct TO public;
37 | GRANT SELECT ON extension_versions TO public;
38 | GRANT SELECT ON table_storage_parameters TO public;
39 | GRANT SELECT ON index_storage_parameters TO public;
40 | GRANT SELECT ON v_sample_stat_indexes TO public;
41 | GRANT SELECT ON v_sample_stat_tablespaces TO public;
42 | GRANT SELECT ON v_sample_timings TO public;
43 | GRANT SELECT ON v_sample_stat_tables TO public;
44 | GRANT SELECT ON v_sample_settings TO public;
45 | GRANT SELECT ON v_sample_stat_user_functions TO public;
46 | GRANT SELECT ON v_extension_versions TO public;
47 | GRANT SELECT ON v_table_storage_parameters TO public;
48 | GRANT SELECT ON v_index_storage_parameters TO public;
49 |
50 | -- pg_read_all_stats can see the query texts
51 | GRANT SELECT ON stmt_list TO pg_read_all_stats;
52 |
--------------------------------------------------------------------------------
/sample/calculate_io_stats.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION calculate_io_stats(IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS void AS $$
3 | -- Calc I/O stat diff
4 | INSERT INTO sample_stat_io(
5 | server_id,
6 | sample_id,
7 | backend_type,
8 | object,
9 | context,
10 | reads,
11 | read_bytes,
12 | read_time,
13 | writes,
14 | write_bytes,
15 | write_time,
16 | writebacks,
17 | writeback_time,
18 | extends,
19 | extend_bytes,
20 | extend_time,
21 | op_bytes,
22 | hits,
23 | evictions,
24 | reuses,
25 | fsyncs,
26 | fsync_time,
27 | stats_reset
28 | )
29 | SELECT
30 | cur.server_id,
31 | cur.sample_id,
32 | cur.backend_type,
33 | cur.object,
34 | cur.context,
35 | cur.reads - COALESCE(lst.reads, 0),
36 | cur.read_bytes - COALESCE(lst.read_bytes, 0),
37 | cur.read_time - COALESCE(lst.read_time, 0),
38 | cur.writes - COALESCE(lst.writes, 0),
39 | cur.write_bytes - COALESCE(lst.write_bytes, 0),
40 | cur.write_time - COALESCE(lst.write_time, 0),
41 | cur.writebacks - COALESCE(lst.writebacks, 0),
42 | cur.writeback_time - COALESCE(lst.writeback_time, 0),
43 | cur.extends - COALESCE(lst.extends, 0),
44 | cur.extend_bytes - COALESCE(lst.extend_bytes, 0),
45 | cur.extend_time - COALESCE(lst.extend_time, 0),
46 | cur.op_bytes,
47 | cur.hits - COALESCE(lst.hits, 0),
48 | cur.evictions - COALESCE(lst.evictions, 0),
49 | cur.reuses - COALESCE(lst.reuses, 0),
50 | cur.fsyncs - COALESCE(lst.fsyncs, 0),
51 | cur.fsync_time - COALESCE(lst.fsync_time, 0),
52 | cur.stats_reset
53 | FROM last_stat_io cur
54 | LEFT OUTER JOIN last_stat_io lst ON
55 | (lst.server_id, lst.sample_id, lst.backend_type, lst.object, lst.context) =
56 | (sserver_id, ssample_id - 1, cur.backend_type, cur.object, cur.context)
57 | AND (cur.op_bytes,cur.stats_reset) IS NOT DISTINCT FROM (lst.op_bytes,lst.stats_reset)
58 | WHERE
59 | (cur.server_id, cur.sample_id) = (sserver_id, ssample_id) AND
60 | GREATEST(
61 | cur.reads - COALESCE(lst.reads, 0),
62 | cur.writes - COALESCE(lst.writes, 0),
63 | cur.writebacks - COALESCE(lst.writebacks, 0),
64 | cur.extends - COALESCE(lst.extends, 0),
65 | cur.hits - COALESCE(lst.hits, 0),
66 | cur.evictions - COALESCE(lst.evictions, 0),
67 | cur.reuses - COALESCE(lst.reuses, 0),
68 | cur.fsyncs - COALESCE(lst.fsyncs, 0)
69 | ) > 0;
70 |
71 | DELETE FROM last_stat_io WHERE server_id = sserver_id AND sample_id != ssample_id;
72 | $$ LANGUAGE sql;
--------------------------------------------------------------------------------
/report/functions/tablespacestat.sql:
--------------------------------------------------------------------------------
1 | /* ===== Tables stats functions ===== */
2 |
3 | CREATE FUNCTION tablespace_stats(IN sserver_id integer, IN start_id integer, IN end_id integer)
4 | RETURNS TABLE(
5 | server_id integer,
6 | tablespaceid oid,
7 | tablespacename name,
8 | tablespacepath text,
9 | size_delta bigint,
10 | last_size bigint
11 | ) SET search_path=@extschema@ AS $$
12 | SELECT
13 | st.server_id,
14 | st.tablespaceid,
15 | st.tablespacename,
16 | st.tablespacepath,
17 | sum(st.size_delta)::bigint AS size_delta,
18 | max(st.size) FILTER (WHERE st.sample_id = end_id) AS last_size
19 | FROM v_sample_stat_tablespaces st
20 | WHERE st.server_id = sserver_id
21 | AND st.sample_id BETWEEN start_id + 1 AND end_id
22 | GROUP BY st.server_id, st.tablespaceid, st.tablespacename, st.tablespacepath
23 | $$ LANGUAGE sql;
24 |
25 | CREATE FUNCTION tablespace_stats_format(IN sserver_id integer, IN start_id integer, IN end_id integer)
26 | RETURNS TABLE(
27 | tablespacename text,
28 | tablespacepath text,
29 | size text,
30 | size_delta text
31 | )
32 | SET search_path=@extschema@ AS $$
33 | SELECT
34 | st.tablespacename::text,
35 | st.tablespacepath,
36 | pg_size_pretty(NULLIF(st.last_size, 0)) as size,
37 | pg_size_pretty(NULLIF(st.size_delta, 0)) as size_delta
38 | FROM tablespace_stats(sserver_id, start_id, end_id) st
39 | ORDER BY st.tablespacename ASC;
40 | $$ LANGUAGE sql;
41 |
42 | CREATE FUNCTION tablespace_stats_format_diff(IN sserver_id integer,
43 | IN start1_id integer, IN end1_id integer,
44 | IN start2_id integer, IN end2_id integer)
45 | RETURNS TABLE(
46 | tablespacename text,
47 | tablespacepath text,
48 | size1 text,
49 | size2 text,
50 | size_delta1 text,
51 | size_delta2 text
52 | )
53 | SET search_path=@extschema@ AS $$
54 | SELECT
55 | COALESCE(stat1.tablespacename,stat2.tablespacename)::text AS tablespacename,
56 | COALESCE(stat1.tablespacepath,stat2.tablespacepath) AS tablespacepath,
57 | pg_size_pretty(NULLIF(stat1.last_size, 0)) as size1,
58 | pg_size_pretty(NULLIF(stat2.last_size, 0)) as size2,
59 | pg_size_pretty(NULLIF(stat1.size_delta, 0)) as size_delta1,
60 | pg_size_pretty(NULLIF(stat2.size_delta, 0)) as size_delta2
61 | FROM tablespace_stats(sserver_id,start1_id,end1_id) stat1
62 | FULL OUTER JOIN tablespace_stats(sserver_id,start2_id,end2_id) stat2
63 | USING (server_id,tablespaceid)
64 | $$ LANGUAGE sql;
65 |
--------------------------------------------------------------------------------
/schema/core.sql:
--------------------------------------------------------------------------------
1 | /* ========= Core tables ========= */
2 |
3 | /* This table has custom processing in export_data() function - review
4 | of export_data() should be performed after any fiels change */
5 |
6 | CREATE TABLE servers (
7 | server_id SERIAL PRIMARY KEY,
8 | server_name name UNIQUE NOT NULL,
9 | server_description text,
10 | server_created timestamp with time zone DEFAULT now(),
11 | db_exclude name[] DEFAULT NULL,
12 | enabled boolean DEFAULT TRUE,
13 | connstr text,
14 | max_sample_age integer NULL,
15 | last_sample_id integer DEFAULT 0 NOT NULL,
16 | size_smp_wnd_start time with time zone,
17 | size_smp_wnd_dur interval hour to second,
18 | size_smp_interval interval day to minute,
19 | srv_settings jsonb
20 | );
21 | COMMENT ON TABLE servers IS 'Monitored servers (Postgres clusters) list';
22 |
23 | CREATE TABLE samples (
24 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE
25 | DEFERRABLE INITIALLY IMMEDIATE,
26 | sample_id integer NOT NULL,
27 | sample_time timestamp (0) with time zone,
28 | CONSTRAINT pk_samples PRIMARY KEY (server_id, sample_id)
29 | );
30 |
31 | CREATE INDEX ix_sample_time ON samples(server_id, sample_time);
32 | COMMENT ON TABLE samples IS 'Sample times list';
33 |
34 | CREATE TABLE baselines (
35 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE,
36 | bl_id SERIAL,
37 | bl_name varchar (25) NOT NULL,
38 | keep_until timestamp (0) with time zone,
39 | CONSTRAINT pk_baselines PRIMARY KEY (server_id, bl_id),
40 | CONSTRAINT uk_baselines UNIQUE (server_id,bl_name) DEFERRABLE INITIALLY IMMEDIATE
41 | );
42 | COMMENT ON TABLE baselines IS 'Baselines list';
43 |
44 | CREATE TABLE bl_samples (
45 | server_id integer NOT NULL,
46 | sample_id integer NOT NULL,
47 | bl_id integer NOT NULL,
48 | CONSTRAINT fk_bl_samples_samples FOREIGN KEY (server_id, sample_id)
49 | REFERENCES samples(server_id, sample_id) ON DELETE RESTRICT
50 | DEFERRABLE INITIALLY IMMEDIATE,
51 | CONSTRAINT fk_bl_samples_baselines FOREIGN KEY (server_id, bl_id)
52 | REFERENCES baselines(server_id, bl_id) ON DELETE CASCADE
53 | DEFERRABLE INITIALLY IMMEDIATE,
54 | CONSTRAINT pk_bl_samples PRIMARY KEY (server_id, bl_id, sample_id)
55 | );
56 | CREATE INDEX ix_bl_samples_blid ON bl_samples(bl_id);
57 | CREATE INDEX ix_bl_samples_sample ON bl_samples(server_id, sample_id);
58 | COMMENT ON TABLE bl_samples IS 'Samples in baselines';
59 |
--------------------------------------------------------------------------------
/sample/query_pg_stat_wal.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION query_pg_stat_wal(IN server_properties jsonb, IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS jsonb AS $$
3 | declare
4 | server_query text;
5 | pg_version int := (get_sp_setting(server_properties, 'server_version_num')).reset_val::integer;
6 | begin
7 | server_properties := log_sample_timings(server_properties, 'query pg_stat_wal', 'start');
8 | -- pg_stat_wal data
9 | CASE
10 | WHEN pg_version >= 180000 THEN
11 | server_query := 'SELECT '
12 | 'wal.wal_records,'
13 | 'wal.wal_fpi,'
14 | 'wal.wal_bytes,'
15 | 'wal.wal_buffers_full,'
16 | 'NULL as wal_write,'
17 | 'NULL as wal_sync,'
18 | 'NULL as wal_write_time,'
19 | 'NULL as wal_sync_time,'
20 | 'wal.stats_reset '
21 | 'FROM pg_catalog.pg_stat_wal wal';
22 | WHEN pg_version >= 140000 THEN
23 | server_query := 'SELECT '
24 | 'wal_records,'
25 | 'wal_fpi,'
26 | 'wal_bytes,'
27 | 'wal_buffers_full,'
28 | 'wal_write,'
29 | 'wal_sync,'
30 | 'wal_write_time,'
31 | 'wal_sync_time,'
32 | 'stats_reset '
33 | 'FROM pg_catalog.pg_stat_wal';
34 | ELSE
35 | server_query := NULL;
36 | END CASE;
37 |
38 | IF server_query IS NOT NULL THEN
39 | INSERT INTO last_stat_wal (
40 | server_id,
41 | sample_id,
42 | wal_records,
43 | wal_fpi,
44 | wal_bytes,
45 | wal_buffers_full,
46 | wal_write,
47 | wal_sync,
48 | wal_write_time,
49 | wal_sync_time,
50 | stats_reset
51 | )
52 | SELECT
53 | sserver_id,
54 | ssample_id,
55 | wal_records,
56 | wal_fpi,
57 | wal_bytes,
58 | wal_buffers_full,
59 | wal_write,
60 | wal_sync,
61 | wal_write_time,
62 | wal_sync_time,
63 | stats_reset
64 | FROM dblink('server_connection',server_query) AS rs (
65 | wal_records bigint,
66 | wal_fpi bigint,
67 | wal_bytes numeric,
68 | wal_buffers_full bigint,
69 | wal_write bigint,
70 | wal_sync bigint,
71 | wal_write_time double precision,
72 | wal_sync_time double precision,
73 | stats_reset timestamp with time zone);
74 | END IF;
75 | server_properties := log_sample_timings(server_properties, 'query pg_stat_wal', 'end');
76 | return server_properties;
77 | end;
78 | $$ LANGUAGE plpgsql;
79 |
--------------------------------------------------------------------------------
/sql/sizes_collection.sql:
--------------------------------------------------------------------------------
1 | SET client_min_messages = WARNING;
2 | SET pg_profile.relsize_collect_mode = 'on';
3 | /* Test size collection sampling settings */
4 | INSERT INTO profile.grow_table (short_str,long_str)
5 | SELECT array_to_string(array
6 | (select
7 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
8 | trunc(random() * 62)::integer + 1, 1)
9 | FROM generate_series(1, 40)), ''
10 | ) as arr1,
11 | array_to_string(array
12 | (select
13 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
14 | trunc(random() * 62)::integer + 1, 1)
15 | FROM generate_series(1, 8000)), ''
16 | )
17 | FROM generate_series(1,5);
18 | /* Enable scheduled relation sizes collection at server*/
19 | SELECT profile.set_server_size_sampling('local',current_time - interval '10 minute',interval '30 minute',interval '2 minute','schedule');
20 | -- check show_servers_size_sampling()
21 | SELECT server_name,window_duration,sample_interval,collect_mode FROM profile.show_servers_size_sampling();
22 | -- (sample 4)
23 | SELECT server,result FROM profile.take_sample();
24 | -- Disable relation sizes collection at server
25 | SELECT profile.set_server_size_sampling('local',null,null,null,'failure');
26 | SELECT profile.set_server_size_sampling('local',null,null,null,'off');
27 | -- (sample 5)
28 | SELECT server,result FROM profile.take_sample();
29 | -- Enable relation sizes collection at server
30 | SELECT profile.set_server_size_sampling('local',null,null,null,'on');
31 | -- (sample 6)
32 | SELECT server,result FROM profile.take_sample();
33 | -- Reset relation sizes collection mode at server
34 | SELECT profile.set_server_size_sampling('local',null,null,null,null);
35 | -- Enable relation sizes collection configuration parameter
36 | SET pg_profile.relsize_collect_mode = 'on';
37 | -- (sample 7)
38 | SELECT server,result FROM profile.take_sample();
39 | -- Disable relation sizes collection configuration parameter
40 | SET pg_profile.relsize_collect_mode = 'off';
41 | -- (sample 8)
42 | SELECT server,result FROM profile.take_sample();
43 | -- check show_samples()
44 | SELECT sample, sizes_collected FROM profile.show_samples() WHERE NOT sizes_collected;
45 | -- check tables sizes collection
46 | SELECT
47 | sample_id,
48 | count(relsize) > 0 as relsize,
49 | count(relsize_diff) > 0 as relsize_diff,
50 | count(relpages_bytes) > 0 as relpages,
51 | count(relpages_bytes_diff) > 0 as relpages_diff
52 | FROM profile.sample_stat_tables GROUP BY sample_id
53 | ORDER BY sample_id;
54 | -- check indexes sizes collection
55 | SELECT
56 | sample_id,
57 | count(relsize) > 0 as relsize,
58 | count(relsize_diff) > 0 as relsize_diff,
59 | count(relpages_bytes) > 0 as relpages,
60 | count(relpages_bytes_diff) > 0 as relpages_diff
61 | FROM profile.sample_stat_indexes GROUP BY sample_id
62 | ORDER BY sample_id;
63 |
--------------------------------------------------------------------------------
/sql/retention_and_baselines.sql:
--------------------------------------------------------------------------------
1 | UPDATE profile.samples
2 | SET sample_time = now() - (5 - sample_id) * '1 day'::interval - '10 minutes'::interval
3 | WHERE sample_id <= 5;
4 |
5 | ALTER TABLE profile.test_rel_storage_params1 RESET (autovacuum_vacuum_threshold,fillfactor,autovacuum_enabled);
6 | ALTER INDEX profile.ix_test_rel_storage_params_id1 RESET (fillfactor);
7 | ALTER INDEX profile.ix_test_rel_storage_params_val1 RESET (fillfactor);
8 |
9 | SELECT server,result FROM profile.take_sample();
10 |
11 | SELECT relname, t.last_sample_id, reloptions
12 | FROM profile.table_storage_parameters t JOIN profile.tables_list
13 | USING (server_id, relid)
14 | WHERE relname IN ('test_rel_storage_params1','test_rel_storage_params2') ORDER BY relid, last_sample_id;
15 | SELECT relname, indexrelname, i.last_sample_id, reloptions
16 | FROM profile.index_storage_parameters i JOIN profile.tables_list
17 | USING (server_id, relid)
18 | JOIN profile.indexes_list USING (server_id, relid, indexrelid)
19 | WHERE relname IN ('test_rel_storage_params1','test_rel_storage_params2') ORDER BY relid, indexrelid, last_sample_id;
20 |
21 | BEGIN;
22 | SELECT profile.delete_samples();
23 | SELECT sample FROM profile.show_samples() ORDER BY sample;
24 | ROLLBACK;
25 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
26 | SELECT * FROM profile.set_server_max_sample_age('local',1);
27 | /* Testing baseline creation */
28 | SELECT * FROM profile.create_baseline('testline1',2,4);
29 | BEGIN;
30 | SELECT profile.delete_samples('local',tstzrange(
31 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1),
32 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5),
33 | '[]'
34 | )
35 | );
36 | SELECT sample FROM profile.show_samples() ORDER BY sample;
37 | ROLLBACK;
38 | BEGIN;
39 | SELECT profile.delete_samples(tstzrange(
40 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1),
41 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5),
42 | '[]'
43 | )
44 | );
45 | SELECT sample FROM profile.show_samples() ORDER BY sample;
46 | ROLLBACK;
47 | SELECT * FROM profile.create_baseline('testline2',2,4);
48 | SELECT count(*) FROM profile.baselines;
49 | SELECT * FROM profile.keep_baseline('testline2',-1);
50 | /* Testing baseline show */
51 | SELECT baseline, min_sample, max_sample, keep_until_time IS NULL
52 | FROM profile.show_baselines()
53 | ORDER BY baseline;
54 | /* Testing baseline deletion */
55 | SELECT server,result FROM profile.take_sample();
56 | SELECT count(*) FROM profile.baselines;
57 | /* Testing samples retention override with baseline */
58 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
59 | SELECT * FROM profile.drop_baseline('testline1');
60 | /* Testing samples deletion after baseline removed */
61 | SELECT server,result FROM profile.take_sample();
62 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
63 |
--------------------------------------------------------------------------------
/sample/take_sample.1.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION take_sample(IN server name, IN skip_sizes boolean = NULL)
2 | RETURNS TABLE (
3 | result text,
4 | elapsed interval day to second (2)
5 | )
6 | SET search_path=@extschema@ AS $$
7 | DECLARE
8 | sserver_id integer;
9 | server_sampleres integer;
10 | etext text := '';
11 | edetail text := '';
12 | econtext text := '';
13 |
14 | qres record;
15 | conname text;
16 | start_clock timestamp (2) with time zone;
17 | BEGIN
18 | SELECT server_id INTO sserver_id FROM servers WHERE server_name = take_sample.server;
19 | IF sserver_id IS NULL THEN
20 | RAISE 'Server not found';
21 | ELSE
22 | /*
23 | * We should include dblink schema to perform disconnections
24 | * on exception conditions
25 | */
26 | SELECT extnamespace::regnamespace AS dblink_schema INTO STRICT qres FROM pg_catalog.pg_extension WHERE extname = 'dblink';
27 | IF NOT string_to_array(current_setting('search_path'),', ') @> ARRAY[qres.dblink_schema::text] THEN
28 | EXECUTE 'SET LOCAL search_path TO ' || current_setting('search_path')||','|| qres.dblink_schema;
29 | END IF;
30 |
31 | BEGIN
32 | start_clock := clock_timestamp()::timestamp (2) with time zone;
33 | server_sampleres := take_sample(sserver_id, take_sample.skip_sizes);
34 | elapsed := clock_timestamp()::timestamp (2) with time zone - start_clock;
35 | CASE server_sampleres
36 | WHEN 0 THEN
37 | result := 'OK';
38 | ELSE
39 | result := 'FAIL';
40 | END CASE;
41 | RETURN NEXT;
42 | EXCEPTION
43 | WHEN OTHERS THEN
44 | BEGIN
45 | GET STACKED DIAGNOSTICS etext = MESSAGE_TEXT,
46 | edetail = PG_EXCEPTION_DETAIL,
47 | econtext = PG_EXCEPTION_CONTEXT;
48 | result := format (E'%s\n%s\n%s', etext, econtext, edetail);
49 | elapsed := clock_timestamp()::timestamp (2) with time zone - start_clock;
50 | RETURN NEXT;
51 | /*
52 | Cleanup dblink connections
53 | */
54 | FOREACH conname IN ARRAY
55 | coalesce(dblink_get_connections(), array[]::text[])
56 | LOOP
57 | IF conname IN ('server_connection', 'server_db_connection') THEN
58 | PERFORM dblink_disconnect(conname);
59 | END IF;
60 | END LOOP;
61 | END;
62 | END;
63 | END IF;
64 | RETURN;
65 | END;
66 | $$ LANGUAGE plpgsql;
67 |
68 | COMMENT ON FUNCTION take_sample(IN server name, IN skip_sizes boolean) IS
69 | 'Statistics sample creation function (by server name)';
70 |
--------------------------------------------------------------------------------
/migration/func_create.sed:
--------------------------------------------------------------------------------
1 | /^CREATE FUNCTION top_tables(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
2 | /^CREATE FUNCTION top_toasts(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
3 | /^CREATE FUNCTION collect_obj_stats(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
4 | /^CREATE FUNCTION sample_dbobj_delta(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
5 | /^CREATE FUNCTION get_report_context(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
6 | /^CREATE FUNCTION import_section_data_profile(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
7 | /^CREATE FUNCTION import_section_data_subsample(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
8 | /^CREATE FUNCTION import_data(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
9 | /^COMMENT ON FUNCTION import_data(.*$/,/';$/p
10 | /^CREATE FUNCTION init_sample(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
11 | /^CREATE FUNCTION log_sample_timings(.*$/,/'log event to sample_timings';$/p
12 | /^CREATE FUNCTION take_sample(IN sserver_id integer.*$/,/'Statistics sample creation function (by server_id)';$/p
13 | /^CREATE FUNCTION take_sample(IN server name.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
14 | /^COMMENT ON FUNCTION take_sample(IN server name.*$/,/';$/p
15 | /^CREATE FUNCTION take_sample_subset(IN sets_cnt integer.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
16 | /^COMMENT ON FUNCTION take_sample_subset(IN sets_cnt integer.*$/,/';$/p
17 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+calculate_.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
18 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+collect_tablespace_stats(.*$/,/\$\w*\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
19 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+collect_database_stats(.*$/,/\$\w*\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
20 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+delete_obsolete_samples(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
21 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+get_sp_setting(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*IMMUTABLE[[:space:]]*;[[:space:]]*$/p
22 | /^CREATE[[:space:]]\+FUNCTION[[:space:]]\+query_pg_stat_.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
23 | /^CREATE FUNCTION take_subsample(IN sserver_id integer.*$/,/'Take a sub-sample for a server by server_id';$/p
24 | /^CREATE FUNCTION export_data(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
25 | /^COMMENT ON FUNCTION export_data(.*$/,/';$/p
26 | /^CREATE FUNCTION set_server_setting(.*$/,/\$\$[[:space:]]*LANGUAGE[[:space:]]\+\(plpg\)\?sql[[:space:]]*;[[:space:]]*$/p
27 |
--------------------------------------------------------------------------------
/grafana/README.md:
--------------------------------------------------------------------------------
1 | # Grafana dashboards for *pg_profile* #
2 | You can use provided grafana dashboards to visualize summary database load over time. Those dashboards are using *pg_profile* repository as the data source. Visualization of *pg_profile* samples should help you to detect time intervals with the specific load profile you want to see in a report.
3 |
4 | The header of a dashboard will provide you with the *get_report()* function call to build a *pg_profile* report on exact time interval you see in the grafana.
5 |
6 | Dashboards provided:
7 | * **pg_profile_visualization.json** - this is the main dashboard to use with pg_profile repository. It provides summary information for each database
8 | * **pg_profile_summary.json** - this dashboard provides summary information for the whole cluster. Use it if you have a lot of databases in your cluster and visualization in per-database manner seems overloaded
9 | * **pg_profile_waits.json** - detailed wait dashboard. Charts are based on data collected by *pg_profile* with *pg_wait_sampling* extension.
10 | * **pg_profile_io.json** - detailed I/O dashboard. Charts are based on data collected by *pg_profile* from *pg_stat_io* view. This view available only since PostgrSQL 16. Collection implemented in *pg_profile* 4.3
11 | * **pg_profile_activity.json** - charts of observed session counts by states and session wait events over time. They are based on the data collected in subsamples (added in 4.6). Pie-charts on the bottom of a dashboard shows distribution of observed sessions during a time interval by applications, databases, users and hosts. A single click on any segment of those pie-charts will limit all charts to sessions having selected value of the corresponding attribute. You can filter sessions by several identifying attributes.
12 |
13 | To use those dashboards you will need a PostgreSQL data source in your grafana installation. This data source should be pointing to the database with the *pg_profile* extension installed. If your *pg_profile* extension is installed in its own schema make sure that the database user used in grafana data source has this schema in *search_path* setting.
14 |
15 | ## Grafana dashboard controls ##
16 | Provided dashboards have some useful controls:
17 | * **Database with pg_profile extension** can be used to change the current grafana data source if you have several *pg_profile* instances
18 | * **Server** - the current server in *pg_profile* instance if your *pg_profile* instance have several servers defined
19 | * **Server starts** - toggles grafana annotations showing PostgreSQL database restarts captured by *pg_profile*
20 | * **Configuration loads** - toggles grafana annotations showing PostgreSQL configuration reloads captured by *pg_profile*
21 | * **Interval** - button is used to set dashboard time range that will cover all samples captured by *pg_profile* for the selected *pg_profile* server. Sometimes it should be clicked twice.
22 | * **pg_profile dashboard** - is a dropdown list of all *pg_profile* dashboards. When you switch between *pg_profile* dashboards using this control, the time interval and *pg_profile* server will be preserved.
23 |
--------------------------------------------------------------------------------
/sql/export_import.sql:
--------------------------------------------------------------------------------
1 | SET client_min_messages = WARNING;
2 | /* === Create regular export table === */
3 | CREATE TABLE profile.export AS SELECT * FROM profile.export_data();
4 | /* === Create obfuscated export table === */
5 | CREATE TABLE profile.blind_export AS
6 | SELECT * FROM profile.export_data(obfuscate_queries => true);
7 | CREATE TABLE profile.connstr_export AS
8 | SELECT * FROM profile.export_data(hide_connstr => true);
9 | BEGIN;
10 | /* === rename local server === */
11 | SELECT profile.rename_server('local','src_local');
12 | /* === check matching by creation date and system identifier === */
13 | SELECT profile.import_data('profile.export') > 0;
14 | /* === change src_local server creation time so it wont match === */
15 | UPDATE profile.servers
16 | SET
17 | server_created = server_created - '1 minutes'::interval
18 | WHERE server_name = 'src_local';
19 | /* === perform load === */
20 | SELECT profile.import_data('profile.export') > 0;
21 | /* === Integral check - reports must match === */
22 | \a
23 | \t on
24 | WITH res AS (
25 | SELECT
26 | profile.get_report('local',1,4) AS imported,
27 | replace(
28 | replace(
29 | profile.get_report('src_local',1,4),'"server_name": "src_local"',
30 | '"server_name": "local"'),
31 | 'Server name: src_local',
32 | '
Server name: local'
33 | ) AS exported
34 | )
35 | SELECT
36 | CASE
37 | WHEN
38 | sha224(convert_to(imported, 'UTF8')) !=
39 | sha224(convert_to(exported, 'UTF8'))
40 | THEN
41 | format(E'\n%s\n\n\n%s\n',
42 | imported,
43 | exported
44 | )
45 | ELSE
46 | 'ok'
47 | END as match
48 | FROM res;
49 | \a
50 | \t off
51 | /* === perform obfuscated load === */
52 | SELECT profile.drop_server('local');
53 | SELECT profile.import_data('profile.blind_export') > 0;
54 | /* === check that there is no matching queries === */
55 | SELECT
56 | count(*)
57 | FROM profile.servers s_src
58 | CROSS JOIN profile.servers s_blind
59 | JOIN profile.stmt_list q_src ON
60 | (q_src.server_id = s_src.server_id)
61 | JOIN profile.stmt_list q_blind ON
62 | (q_src.queryid_md5 = q_blind.queryid_md5 AND q_blind.server_id = s_blind.server_id)
63 | WHERE
64 | s_src.server_name = 'src_local' AND s_blind.server_name = 'local'
65 | AND q_src.query = q_blind.query;
66 | /* === ensure connstrings was in original data === */
67 | SELECT count(connstr) > 0 FROM profile.servers WHERE server_name = 'src_local';
68 | /* === check that blind export excludes connection strings === */
69 | SELECT count(connstr) = 0 FROM profile.servers WHERE server_name = 'local';
70 | /* === perform hidden connstr load === */
71 | SELECT profile.drop_server('local');
72 | SELECT profile.import_data('profile.connstr_export') > 0;
73 | /* === check that hidden connstr export excludes connection strings === */
74 | SELECT count(connstr) = 0 FROM profile.servers WHERE server_name = 'local';
75 | ROLLBACK;
76 | /* === drop export tables === */
77 | DROP TABLE profile.export;
78 | DROP TABLE profile.blind_export;
79 | DROP TABLE profile.connstr_export;
80 |
--------------------------------------------------------------------------------
/report/functions/statements_checks.sql:
--------------------------------------------------------------------------------
1 | /* ===== pg_stat_statements checks ===== */
2 | CREATE FUNCTION profile_checkavail_stmt_cnt(IN sserver_id integer, IN start_id integer, IN end_id integer)
3 | RETURNS BOOLEAN
4 | SET search_path=@extschema@ AS
5 | $$
6 | -- Check if statistics were reset
7 | SELECT COUNT(*) > 0 FROM samples
8 | JOIN (
9 | SELECT sample_id,sum(statements) stmt_cnt
10 | FROM sample_statements_total
11 | WHERE server_id = sserver_id AND
12 | ((start_id,end_id) = (0,0) OR
13 | sample_id BETWEEN start_id + 1 AND end_id)
14 | GROUP BY sample_id
15 | ) sample_stmt_cnt USING(sample_id)
16 | JOIN v_sample_settings prm USING (server_id,sample_id)
17 | WHERE server_id = sserver_id AND prm.name='pg_stat_statements.max' AND
18 | stmt_cnt >= 0.9*cast(prm.setting AS integer);
19 | $$ LANGUAGE sql;
20 |
21 | CREATE FUNCTION stmt_cnt(IN sserver_id integer, IN start_id integer = 0,
22 | IN end_id integer = 0)
23 | RETURNS TABLE(
24 | sample_id integer,
25 | sample_time timestamp with time zone,
26 | stmt_cnt integer,
27 | max_cnt text
28 | )
29 | SET search_path=@extschema@
30 | AS $$
31 | SELECT
32 | sample_id,
33 | sample_time,
34 | stmt_cnt,
35 | prm.setting AS max_cnt
36 | FROM samples
37 | JOIN (
38 | SELECT
39 | sample_id,
40 | sum(statements)::integer AS stmt_cnt
41 | FROM sample_statements_total
42 | WHERE server_id = sserver_id
43 | AND ((start_id, end_id) = (0,0) OR sample_id BETWEEN start_id + 1 AND end_id)
44 | GROUP BY sample_id
45 | ) sample_stmt_cnt USING(sample_id)
46 | JOIN v_sample_settings prm USING (server_id, sample_id)
47 | WHERE server_id = sserver_id AND prm.name='pg_stat_statements.max' AND
48 | stmt_cnt >= 0.9*cast(prm.setting AS integer)
49 | $$ LANGUAGE sql;
50 |
51 | CREATE FUNCTION stmt_cnt_format(IN sserver_id integer, IN start_id integer = 0,
52 | IN end_id integer = 0)
53 | RETURNS TABLE(
54 | sample_id integer,
55 | sample_time text,
56 | stmt_cnt integer,
57 | max_cnt text
58 | )
59 | SET search_path=@extschema@ AS $$
60 | SELECT
61 | sample_id,
62 | sample_time::text,
63 | stmt_cnt,
64 | max_cnt
65 | FROM
66 | stmt_cnt(sserver_id, start_id, end_id)
67 | $$ LANGUAGE sql;
68 |
69 | CREATE FUNCTION stmt_cnt_format_diff(IN sserver_id integer,
70 | IN start1_id integer = 0, IN end1_id integer = 0,
71 | IN start2_id integer = 0, IN end2_id integer = 0)
72 | RETURNS TABLE(
73 | interval_num integer,
74 | sample_id integer,
75 | sample_time text,
76 | stmt_cnt integer,
77 | max_cnt text
78 | )
79 | SET search_path=@extschema@ AS $$
80 | SELECT
81 | 1 AS interval_num,
82 | sample_id,
83 | sample_time::text,
84 | stmt_cnt,
85 | max_cnt
86 | FROM
87 | stmt_cnt(sserver_id, start1_id, end1_id)
88 | UNION ALL
89 | SELECT
90 | 2 AS interval_num,
91 | sample_id,
92 | sample_time::text,
93 | stmt_cnt,
94 | max_cnt
95 | FROM
96 | stmt_cnt(sserver_id, start2_id, end2_id)
97 | $$ LANGUAGE sql;
98 |
--------------------------------------------------------------------------------
/sample/calculate_cluster_stats.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION calculate_cluster_stats(IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS void AS $$
3 | -- Calc stat cluster diff
4 | INSERT INTO sample_stat_cluster(
5 | server_id,
6 | sample_id,
7 | checkpoints_timed,
8 | checkpoints_req,
9 | checkpoints_done,
10 | checkpoint_write_time,
11 | checkpoint_sync_time,
12 | buffers_checkpoint,
13 | slru_checkpoint,
14 | buffers_clean,
15 | maxwritten_clean,
16 | buffers_backend,
17 | buffers_backend_fsync,
18 | buffers_alloc,
19 | stats_reset,
20 | wal_size,
21 | wal_lsn,
22 | in_recovery,
23 | restartpoints_timed,
24 | restartpoints_req,
25 | restartpoints_done,
26 | checkpoint_stats_reset
27 | )
28 | SELECT
29 | cur.server_id,
30 | cur.sample_id,
31 | cur.checkpoints_timed - COALESCE(lstc.checkpoints_timed,0),
32 | cur.checkpoints_req - COALESCE(lstc.checkpoints_req,0),
33 | cur.checkpoints_done - COALESCE(lstc.checkpoints_done,0),
34 | cur.checkpoint_write_time - COALESCE(lstc.checkpoint_write_time,0),
35 | cur.checkpoint_sync_time - COALESCE(lstc.checkpoint_sync_time,0),
36 | cur.buffers_checkpoint - COALESCE(lstc.buffers_checkpoint,0),
37 | cur.slru_checkpoint - COALESCE(lstc.slru_checkpoint,0),
38 | cur.buffers_clean - COALESCE(lstb.buffers_clean,0),
39 | cur.maxwritten_clean - COALESCE(lstb.maxwritten_clean,0),
40 | cur.buffers_backend - COALESCE(lstb.buffers_backend,0),
41 | cur.buffers_backend_fsync - COALESCE(lstb.buffers_backend_fsync,0),
42 | cur.buffers_alloc - COALESCE(lstb.buffers_alloc,0),
43 | cur.stats_reset,
44 | cur.wal_size - COALESCE(lstb.wal_size,0),
45 | /* We will overwrite this value in case of stats reset
46 | * (see below)
47 | */
48 | cur.wal_lsn,
49 | cur.in_recovery,
50 | cur.restartpoints_timed - COALESCE(lstc.restartpoints_timed,0),
51 | cur.restartpoints_timed - COALESCE(lstc.restartpoints_timed,0),
52 | cur.restartpoints_timed - COALESCE(lstc.restartpoints_timed,0),
53 | cur.checkpoint_stats_reset
54 | FROM last_stat_cluster cur
55 | LEFT OUTER JOIN last_stat_cluster lstb ON
56 | (lstb.server_id, lstb.sample_id) =
57 | (sserver_id, ssample_id - 1)
58 | AND cur.stats_reset IS NOT DISTINCT FROM lstb.stats_reset
59 | LEFT OUTER JOIN last_stat_cluster lstc ON
60 | (lstc.server_id, lstc.sample_id) =
61 | (sserver_id, ssample_id - 1)
62 | AND cur.checkpoint_stats_reset IS NOT DISTINCT FROM lstc.checkpoint_stats_reset
63 | WHERE
64 | (cur.server_id, cur.sample_id) = (sserver_id, ssample_id);
65 |
66 | /* wal_size is calculated since 0 to current value when stats reset happened
67 | * so, we need to update it
68 | */
69 | UPDATE sample_stat_cluster ssc
70 | SET wal_size = cur.wal_size - lst.wal_size
71 | FROM last_stat_cluster cur
72 | JOIN last_stat_cluster lst ON
73 | (lst.server_id, lst.sample_id) =
74 | (sserver_id, ssample_id - 1)
75 | WHERE
76 | (ssc.server_id, ssc.sample_id) = (sserver_id, ssample_id) AND
77 | (cur.server_id, cur.sample_id) = (sserver_id, ssample_id) AND
78 | cur.stats_reset IS DISTINCT FROM lst.stats_reset;
79 |
80 | DELETE FROM last_stat_cluster WHERE server_id = sserver_id AND sample_id != ssample_id;
81 | $$ LANGUAGE sql;
--------------------------------------------------------------------------------
/schema/funcs.sql:
--------------------------------------------------------------------------------
1 | /* ==== Function stats history ==== */
2 |
3 | CREATE TABLE funcs_list(
4 | server_id integer NOT NULL REFERENCES servers(server_id) ON DELETE CASCADE
5 | DEFERRABLE INITIALLY IMMEDIATE,
6 | datid oid,
7 | funcid oid,
8 | schemaname name NOT NULL,
9 | funcname name NOT NULL,
10 | funcargs text NOT NULL,
11 | last_sample_id integer,
12 | CONSTRAINT pk_funcs_list PRIMARY KEY (server_id, datid, funcid),
13 | CONSTRAINT fk_funcs_list_samples FOREIGN KEY (server_id, last_sample_id)
14 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE
15 | DEFERRABLE INITIALLY IMMEDIATE
16 | );
17 | CREATE INDEX ix_funcs_list_samples ON funcs_list (server_id, last_sample_id);
18 | COMMENT ON TABLE funcs_list IS 'Function names and schemas, captured in samples';
19 |
20 | CREATE TABLE sample_stat_user_functions (
21 | server_id integer,
22 | sample_id integer,
23 | datid oid,
24 | funcid oid,
25 | calls bigint,
26 | total_time double precision,
27 | self_time double precision,
28 | trg_fn boolean,
29 | CONSTRAINT fk_user_functions_functions FOREIGN KEY (server_id, datid, funcid)
30 | REFERENCES funcs_list (server_id, datid, funcid)
31 | ON DELETE NO ACTION
32 | DEFERRABLE INITIALLY IMMEDIATE,
33 | CONSTRAINT fk_user_functions_dat FOREIGN KEY (server_id, sample_id, datid)
34 | REFERENCES sample_stat_database (server_id, sample_id, datid) ON DELETE CASCADE
35 | DEFERRABLE INITIALLY IMMEDIATE,
36 | CONSTRAINT pk_sample_stat_user_functions PRIMARY KEY (server_id, sample_id, datid, funcid)
37 | );
38 | CREATE INDEX ix_sample_stat_user_functions_fl ON sample_stat_user_functions(server_id, datid, funcid);
39 |
40 | COMMENT ON TABLE sample_stat_user_functions IS 'Stats increments for user functions in all databases by samples';
41 |
42 | CREATE VIEW v_sample_stat_user_functions AS
43 | SELECT
44 | server_id,
45 | sample_id,
46 | datid,
47 | funcid,
48 | schemaname,
49 | funcname,
50 | funcargs,
51 | calls,
52 | total_time,
53 | self_time,
54 | trg_fn
55 | FROM sample_stat_user_functions JOIN funcs_list USING (server_id, datid, funcid);
56 | COMMENT ON VIEW v_sample_stat_user_functions IS 'Reconstructed stats view with function names and schemas';
57 |
58 | CREATE TABLE last_stat_user_functions (LIKE v_sample_stat_user_functions, in_sample boolean NOT NULL DEFAULT false)
59 | PARTITION BY LIST (server_id);
60 | COMMENT ON TABLE last_stat_user_functions IS 'Last sample data for calculating diffs in next sample';
61 |
62 | CREATE TABLE sample_stat_user_func_total (
63 | server_id integer,
64 | sample_id integer,
65 | datid oid,
66 | calls bigint,
67 | total_time double precision,
68 | trg_fn boolean,
69 | CONSTRAINT fk_user_func_tot_dat FOREIGN KEY (server_id, sample_id, datid)
70 | REFERENCES sample_stat_database (server_id, sample_id, datid) ON DELETE CASCADE
71 | DEFERRABLE INITIALLY IMMEDIATE,
72 | CONSTRAINT pk_sample_stat_user_func_total PRIMARY KEY (server_id, sample_id, datid, trg_fn)
73 | );
74 | COMMENT ON TABLE sample_stat_user_func_total IS 'Total stats for user functions in all databases by samples';
75 |
--------------------------------------------------------------------------------
/sample/pg_wait_sampling.sql:
--------------------------------------------------------------------------------
1 | /* pg_wait_sampling support */
2 |
3 | CREATE FUNCTION collect_pg_wait_sampling_stats(IN properties jsonb, IN sserver_id integer, IN s_id integer, IN topn integer)
4 | RETURNS void SET search_path=@extschema@ AS $$
5 | DECLARE
6 | BEGIN
7 | CASE (
8 | SELECT extversion
9 | FROM jsonb_to_recordset(properties #> '{extensions}')
10 | AS ext(extname text, extversion text)
11 | WHERE extname = 'pg_wait_sampling'
12 | )
13 | WHEN '1.1' THEN
14 | PERFORM collect_pg_wait_sampling_stats_11(properties, sserver_id, s_id, topn);
15 | ELSE
16 | NULL;
17 | END CASE;
18 |
19 | END;
20 | $$ LANGUAGE plpgsql;
21 |
22 | CREATE FUNCTION collect_pg_wait_sampling_stats_11(IN properties jsonb, IN sserver_id integer, IN s_id integer, IN topn integer)
23 | RETURNS void SET search_path=@extschema@ AS $$
24 | DECLARE
25 | qres record;
26 |
27 | st_query text;
28 | BEGIN
29 | -- Adding dblink extension schema to search_path if it does not already there
30 | SELECT extnamespace::regnamespace AS dblink_schema INTO STRICT qres FROM pg_catalog.pg_extension WHERE extname = 'dblink';
31 | IF NOT string_to_array(current_setting('search_path'),', ') @> ARRAY[qres.dblink_schema::text] THEN
32 | EXECUTE 'SET LOCAL search_path TO ' || current_setting('search_path')||','|| qres.dblink_schema;
33 | END IF;
34 |
35 | st_query := format('SELECT w.*,row_number() OVER () as weid '
36 | 'FROM ( '
37 | 'SELECT '
38 | 'COALESCE(event_type, ''N/A'') as event_type, '
39 | 'COALESCE(event, ''On CPU'') as event, '
40 | 'sum(count * current_setting(''pg_wait_sampling.profile_period'')::bigint) as tot_waited, '
41 | 'sum(count * current_setting(''pg_wait_sampling.profile_period'')::bigint) '
42 | 'FILTER (WHERE queryid IS NOT NULL AND queryid != 0) as stmt_waited '
43 | 'FROM '
44 | '%1$I.pg_wait_sampling_profile '
45 | 'GROUP BY '
46 | 'event_type, '
47 | 'event) as w',
48 | (
49 | SELECT extnamespace FROM jsonb_to_recordset(properties #> '{extensions}')
50 | AS x(extname text, extnamespace text)
51 | WHERE extname = 'pg_wait_sampling'
52 | )
53 | );
54 |
55 | INSERT INTO wait_sampling_total(
56 | server_id,
57 | sample_id,
58 | sample_wevnt_id,
59 | event_type,
60 | event,
61 | tot_waited,
62 | stmt_waited
63 | )
64 | SELECT
65 | sserver_id,
66 | s_id,
67 | dbl.weid,
68 | dbl.event_type,
69 | dbl.event,
70 | dbl.tot_waited,
71 | dbl.stmt_waited
72 | FROM
73 | dblink('server_connection', st_query) AS dbl(
74 | event_type text,
75 | event text,
76 | tot_waited bigint,
77 | stmt_waited bigint,
78 | weid integer
79 | );
80 |
81 | -- reset wait sampling profile
82 | SELECT * INTO qres FROM dblink('server_connection',
83 | format('SELECT %1$I.pg_wait_sampling_reset_profile()',
84 | (
85 | SELECT extnamespace FROM jsonb_to_recordset(properties #> '{extensions}')
86 | AS x(extname text, extnamespace text)
87 | WHERE extname = 'pg_wait_sampling'
88 | )
89 | )
90 | ) AS t(res char(1));
91 |
92 | END;
93 | $$ LANGUAGE plpgsql;
94 |
--------------------------------------------------------------------------------
/schema/db.sql:
--------------------------------------------------------------------------------
1 | /* ==== Database stats history tables === */
2 |
3 | CREATE TABLE sample_stat_database
4 | (
5 | server_id integer,
6 | sample_id integer,
7 | datid oid,
8 | datname name NOT NULL,
9 | xact_commit bigint,
10 | xact_rollback bigint,
11 | blks_read bigint,
12 | blks_hit bigint,
13 | tup_returned bigint,
14 | tup_fetched bigint,
15 | tup_inserted bigint,
16 | tup_updated bigint,
17 | tup_deleted bigint,
18 | conflicts bigint,
19 | temp_files bigint,
20 | temp_bytes bigint,
21 | deadlocks bigint,
22 | blk_read_time double precision,
23 | blk_write_time double precision,
24 | stats_reset timestamp with time zone,
25 | datsize bigint,
26 | datsize_delta bigint,
27 | datistemplate boolean,
28 | session_time double precision,
29 | active_time double precision,
30 | idle_in_transaction_time double precision,
31 | sessions bigint,
32 | sessions_abandoned bigint,
33 | sessions_fatal bigint,
34 | sessions_killed bigint,
35 | checksum_failures bigint,
36 | checksum_last_failure timestamp with time zone,
37 | parallel_workers_to_launch bigint,
38 | parallel_workers_launched bigint,
39 | CONSTRAINT fk_statdb_samples FOREIGN KEY (server_id, sample_id)
40 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE
41 | DEFERRABLE INITIALLY IMMEDIATE,
42 | CONSTRAINT pk_sample_stat_database PRIMARY KEY (server_id, sample_id, datid)
43 | );
44 | COMMENT ON TABLE sample_stat_database IS 'Sample database statistics table (fields from pg_stat_database)';
45 |
46 | CREATE TABLE last_stat_database
47 | (
48 | server_id integer NOT NULL,
49 | sample_id integer NOT NULL,
50 | datid oid NOT NULL,
51 | datname name NOT NULL,
52 | xact_commit bigint,
53 | xact_rollback bigint,
54 | blks_read bigint,
55 | blks_hit bigint,
56 | tup_returned bigint,
57 | tup_fetched bigint,
58 | tup_inserted bigint,
59 | tup_updated bigint,
60 | tup_deleted bigint,
61 | conflicts bigint,
62 | temp_files bigint,
63 | temp_bytes bigint,
64 | deadlocks bigint,
65 | blk_read_time double precision,
66 | blk_write_time double precision,
67 | stats_reset timestamp with time zone,
68 | datsize bigint,
69 | datsize_delta bigint,
70 | datistemplate boolean,
71 | session_time double precision,
72 | active_time double precision,
73 | idle_in_transaction_time double precision,
74 | sessions bigint,
75 | sessions_abandoned bigint,
76 | sessions_fatal bigint,
77 | sessions_killed bigint,
78 | checksum_failures bigint,
79 | checksum_last_failure timestamp with time zone,
80 | dattablespace oid,
81 | datallowconn boolean,
82 | parallel_workers_to_launch bigint,
83 | parallel_workers_launched bigint
84 | ) PARTITION BY LIST (server_id);
85 |
86 | COMMENT ON TABLE last_stat_database IS 'Last sample data for calculating diffs in next sample';
87 |
--------------------------------------------------------------------------------
/sample/take_sample_subset.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION take_sample_subset(IN sets_cnt integer = 1, IN current_set integer = 0) RETURNS TABLE (
2 | server name,
3 | result text,
4 | elapsed interval day to second (2)
5 | )
6 | SET search_path=@extschema@ AS $$
7 | DECLARE
8 | c_servers CURSOR FOR
9 | SELECT server_id,server_name FROM (
10 | SELECT server_id,server_name, row_number() OVER (ORDER BY server_id) AS srv_rn
11 | FROM servers WHERE enabled
12 | ) AS t1
13 | WHERE srv_rn % sets_cnt = current_set;
14 | server_sampleres integer;
15 | etext text := '';
16 | edetail text := '';
17 | econtext text := '';
18 |
19 | qres RECORD;
20 | conname text;
21 | start_clock timestamp (2) with time zone;
22 | BEGIN
23 | IF sets_cnt IS NULL OR sets_cnt < 1 THEN
24 | RAISE 'sets_cnt value is invalid. Must be positive';
25 | END IF;
26 | IF current_set IS NULL OR current_set < 0 OR current_set > sets_cnt - 1 THEN
27 | RAISE 'current_cnt value is invalid. Must be between 0 and sets_cnt - 1';
28 | END IF;
29 | /*
30 | * We should include dblink schema to perform disconnections
31 | * on exception conditions
32 | */
33 | SELECT extnamespace::regnamespace AS dblink_schema INTO STRICT qres FROM pg_catalog.pg_extension WHERE extname = 'dblink';
34 | IF NOT string_to_array(current_setting('search_path'),', ') @> ARRAY[qres.dblink_schema::text] THEN
35 | EXECUTE 'SET LOCAL search_path TO ' || current_setting('search_path')||','|| qres.dblink_schema;
36 | END IF;
37 |
38 | FOR qres IN c_servers LOOP
39 | BEGIN
40 | start_clock := clock_timestamp()::timestamp (2) with time zone;
41 | server := qres.server_name;
42 | server_sampleres := take_sample(qres.server_id, NULL);
43 | elapsed := clock_timestamp()::timestamp (2) with time zone - start_clock;
44 | CASE server_sampleres
45 | WHEN 0 THEN
46 | result := 'OK';
47 | ELSE
48 | result := 'FAIL';
49 | END CASE;
50 | RETURN NEXT;
51 | EXCEPTION
52 | WHEN OTHERS THEN
53 | BEGIN
54 | GET STACKED DIAGNOSTICS etext = MESSAGE_TEXT,
55 | edetail = PG_EXCEPTION_DETAIL,
56 | econtext = PG_EXCEPTION_CONTEXT;
57 | result := format (E'%s\n%s\n%s', etext, econtext, edetail);
58 | elapsed := clock_timestamp()::timestamp (2) with time zone - start_clock;
59 | RETURN NEXT;
60 | /*
61 | Cleanup dblink connections
62 | */
63 | FOREACH conname IN ARRAY
64 | coalesce(dblink_get_connections(), array[]::text[])
65 | LOOP
66 | IF conname IN ('server_connection', 'server_db_connection') THEN
67 | PERFORM dblink_disconnect(conname);
68 | END IF;
69 | END LOOP;
70 | END;
71 | END;
72 | END LOOP;
73 | RETURN;
74 | END;
75 | $$ LANGUAGE plpgsql;
76 |
77 | COMMENT ON FUNCTION take_sample_subset(IN sets_cnt integer, IN current_set integer) IS
78 | 'Statistics sample creation function (for subset of enabled servers). Used for simplification of parallel sample collection.';
79 |
--------------------------------------------------------------------------------
/report/functions/dead_mods_ix_unused.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION profile_checkavail_tbl_top_dead(IN sserver_id integer, IN start_id integer, IN end_id integer)
2 | RETURNS BOOLEAN
3 | SET search_path=@extschema@ AS
4 | $$
5 | SELECT
6 | COUNT(*) > 0
7 | FROM v_sample_stat_tables st
8 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid)
9 | WHERE st.server_id=sserver_id AND NOT sample_db.datistemplate AND sample_id = end_id
10 | -- Min 5 MB in size
11 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2
12 | AND st.n_dead_tup > 0;
13 | $$ LANGUAGE sql;
14 |
15 | CREATE FUNCTION profile_checkavail_tbl_top_mods(IN sserver_id integer, IN start_id integer, IN end_id integer)
16 | RETURNS BOOLEAN
17 | SET search_path=@extschema@ AS
18 | $$
19 | SELECT
20 | COUNT(*) > 0
21 | FROM v_sample_stat_tables st
22 | -- Database name and existance condition
23 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid)
24 | WHERE st.server_id = sserver_id AND NOT sample_db.datistemplate AND sample_id = end_id
25 | AND st.relkind IN ('r','m')
26 | -- Min 5 MB in size
27 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2
28 | AND n_mod_since_analyze > 0
29 | AND n_live_tup + n_dead_tup > 0;
30 | $$ LANGUAGE sql;
31 |
32 | CREATE FUNCTION top_tbl_last_sample_format(IN sserver_id integer, IN start_id integer, end_id integer)
33 | RETURNS TABLE(
34 | datid oid,
35 | relid oid,
36 | dbname name,
37 | tablespacename name,
38 | schemaname name,
39 | relname name,
40 | n_live_tup bigint,
41 | dead_pct numeric,
42 | last_autovacuum text,
43 | n_dead_tup bigint,
44 | n_mod_since_analyze bigint,
45 | mods_pct numeric,
46 | last_autoanalyze text,
47 | relsize_pretty text,
48 |
49 | ord_dead integer,
50 | ord_mod integer
51 | )
52 | SET search_path=@extschema@ AS $$
53 | SELECT
54 | datid,
55 | relid,
56 | sample_db.datname AS dbname,
57 | tablespacename,
58 | schemaname,
59 | relname,
60 |
61 | n_live_tup,
62 | n_dead_tup::numeric * 100 / NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) AS dead_pct,
63 | last_autovacuum::text,
64 | n_dead_tup,
65 | n_mod_since_analyze,
66 | n_mod_since_analyze::numeric * 100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0) AS mods_pct,
67 | last_autoanalyze::text,
68 | COALESCE(
69 | pg_size_pretty(relsize),
70 | '['||pg_size_pretty(relpages_bytes)||']'
71 | ) AS relsize_pretty,
72 |
73 | CASE WHEN
74 | n_dead_tup > 0
75 | THEN
76 | row_number() OVER (ORDER BY
77 | n_dead_tup*100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0)
78 | DESC NULLS LAST,
79 | datid,relid)::integer
80 | ELSE NULL END AS ord_dead,
81 |
82 | CASE WHEN
83 | n_mod_since_analyze > 0
84 | THEN
85 | row_number() OVER (ORDER BY
86 | n_mod_since_analyze*100/NULLIF(COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0), 0)
87 | DESC NULLS LAST,
88 | datid,relid)::integer
89 | ELSE NULL END AS ord_mod
90 | FROM
91 | v_sample_stat_tables st
92 | -- Database name
93 | JOIN sample_stat_database sample_db USING (server_id, sample_id, datid)
94 | WHERE
95 | (server_id, sample_id, datistemplate) = (sserver_id, end_id, false)
96 | AND COALESCE(st.relsize,st.relpages_bytes) > 5 * 1024^2
97 | AND COALESCE(n_live_tup, 0) + COALESCE(n_dead_tup, 0) > 0
98 | $$ LANGUAGE sql;
99 |
--------------------------------------------------------------------------------
/schema/relation_storage_parameters.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE table_storage_parameters (
2 | server_id integer,
3 | datid oid,
4 | relid oid,
5 | first_seen timestamp (0) with time zone,
6 | last_sample_id integer,
7 | reloptions jsonb,
8 | CONSTRAINT pk_table_storage_parameters PRIMARY KEY (server_id, datid, relid, first_seen),
9 | CONSTRAINT fk_table_storage_parameters_servers FOREIGN KEY (server_id)
10 | REFERENCES servers (server_id) ON DELETE CASCADE
11 | DEFERRABLE INITIALLY IMMEDIATE,
12 | CONSTRAINT fk_table_storage_parameters_samples FOREIGN KEY (server_id, last_sample_id)
13 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE
14 | DEFERRABLE INITIALLY IMMEDIATE,
15 | CONSTRAINT fk_table_storage_parameters_tables FOREIGN KEY (server_id, datid, relid)
16 | REFERENCES tables_list(server_id, datid, relid)
17 | ON DELETE CASCADE ON UPDATE RESTRICT
18 | DEFERRABLE INITIALLY IMMEDIATE
19 | );
20 | CREATE INDEX ix_table_storage_parameters_lsi ON table_storage_parameters(server_id, last_sample_id);
21 | COMMENT ON TABLE table_storage_parameters IS 'Table storage parameters (pg_class.reloptions) changes detected at time of sample';
22 |
23 | CREATE VIEW v_table_storage_parameters AS
24 | SELECT
25 | tsp.server_id,
26 | tsp.datid,
27 | tsp.relid,
28 | tsp.first_seen,
29 | tsp.last_sample_id,
30 | s.sample_id,
31 | s.sample_time,
32 | tsp.reloptions
33 | FROM table_storage_parameters tsp
34 | JOIN samples s ON
35 | s.server_id = tsp.server_id AND
36 | s.sample_time >= tsp.first_seen AND
37 | (s.sample_id <= tsp.last_sample_id OR tsp.last_sample_id IS NULL)
38 | ;
39 | COMMENT ON VIEW v_table_storage_parameters IS 'Provides table storage parameters for samples';
40 |
41 | CREATE TABLE index_storage_parameters (
42 | server_id integer,
43 | datid oid,
44 | relid oid,
45 | indexrelid oid,
46 | first_seen timestamp (0) with time zone,
47 | last_sample_id integer,
48 | reloptions jsonb,
49 | CONSTRAINT pk_index_storage_parameters PRIMARY KEY (server_id, datid, relid, indexrelid, first_seen),
50 | CONSTRAINT fk_index_storage_parameters_servers FOREIGN KEY (server_id)
51 | REFERENCES servers (server_id) ON DELETE CASCADE
52 | DEFERRABLE INITIALLY IMMEDIATE,
53 | CONSTRAINT fk_index_storage_parameters_samples FOREIGN KEY (server_id, last_sample_id)
54 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE
55 | DEFERRABLE INITIALLY IMMEDIATE,
56 | CONSTRAINT fk_index_storage_parameters_indexes FOREIGN KEY (server_id, datid, indexrelid)
57 | REFERENCES indexes_list(server_id, datid, indexrelid)
58 | ON DELETE CASCADE ON UPDATE RESTRICT
59 | DEFERRABLE INITIALLY IMMEDIATE
60 | );
61 | CREATE INDEX ix_index_storage_parameters_lsi ON index_storage_parameters(server_id, last_sample_id);
62 | CREATE INDEX ix_index_storage_parameters_idx ON index_storage_parameters(server_id, datid, indexrelid);
63 | COMMENT ON TABLE index_storage_parameters IS 'Index storage parameters (pg_class.reloptions) changes detected at time of sample';
64 |
65 | CREATE VIEW v_index_storage_parameters AS
66 | SELECT
67 | tsp.server_id,
68 | tsp.datid,
69 | tsp.relid,
70 | tsp.indexrelid,
71 | tsp.first_seen,
72 | tsp.last_sample_id,
73 | s.sample_id,
74 | s.sample_time,
75 | tsp.reloptions
76 | FROM index_storage_parameters tsp
77 | JOIN samples s ON
78 | s.server_id = tsp.server_id AND
79 | s.sample_time >= tsp.first_seen AND
80 | (s.sample_id <= tsp.last_sample_id OR tsp.last_sample_id IS NULL)
81 | ;
82 | COMMENT ON VIEW v_index_storage_parameters IS 'Provides index storage parameters for samples';
83 |
--------------------------------------------------------------------------------
/sample/delete_obsolete_samples.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION delete_obsolete_samples(IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS void SET search_path=@extschema@ AS $$
3 | declare
4 | ret integer;
5 | begin
6 | -- Updating dictionary tables setting last_sample_id
7 | UPDATE tablespaces_list utl SET last_sample_id = ssample_id - 1
8 | FROM tablespaces_list tl LEFT JOIN sample_stat_tablespaces cur
9 | ON (cur.server_id, cur.sample_id, cur.tablespaceid) =
10 | (sserver_id, ssample_id, tl.tablespaceid)
11 | WHERE
12 | tl.last_sample_id IS NULL AND
13 | (utl.server_id, utl.tablespaceid) = (sserver_id, tl.tablespaceid) AND
14 | tl.server_id = sserver_id AND cur.server_id IS NULL;
15 |
16 | UPDATE funcs_list ufl SET last_sample_id = ssample_id - 1
17 | FROM funcs_list fl LEFT JOIN sample_stat_user_functions cur
18 | ON (cur.server_id, cur.sample_id, cur.datid, cur.funcid) =
19 | (sserver_id, ssample_id, fl.datid, fl.funcid)
20 | WHERE
21 | fl.last_sample_id IS NULL AND
22 | fl.server_id = sserver_id AND cur.server_id IS NULL AND
23 | (ufl.server_id, ufl.datid, ufl.funcid) =
24 | (sserver_id, fl.datid, fl.funcid);
25 |
26 | UPDATE indexes_list uil SET last_sample_id = ssample_id - 1
27 | FROM indexes_list il LEFT JOIN sample_stat_indexes cur
28 | ON (cur.server_id, cur.sample_id, cur.datid, cur.indexrelid) =
29 | (sserver_id, ssample_id, il.datid, il.indexrelid)
30 | WHERE
31 | il.last_sample_id IS NULL AND
32 | il.server_id = sserver_id AND cur.server_id IS NULL AND
33 | (uil.server_id, uil.datid, uil.indexrelid) =
34 | (sserver_id, il.datid, il.indexrelid);
35 |
36 | UPDATE tables_list utl SET last_sample_id = ssample_id - 1
37 | FROM tables_list tl LEFT JOIN sample_stat_tables cur
38 | ON (cur.server_id, cur.sample_id, cur.datid, cur.relid) =
39 | (sserver_id, ssample_id, tl.datid, tl.relid)
40 | WHERE
41 | tl.last_sample_id IS NULL AND
42 | tl.server_id = sserver_id AND cur.server_id IS NULL AND
43 | (utl.server_id, utl.datid, utl.relid) =
44 | (sserver_id, tl.datid, tl.relid);
45 |
46 | UPDATE stmt_list slu SET last_sample_id = ssample_id - 1
47 | FROM sample_statements ss RIGHT JOIN stmt_list sl
48 | ON (ss.server_id, ss.sample_id, ss.queryid_md5) =
49 | (sserver_id, ssample_id, sl.queryid_md5)
50 | WHERE
51 | sl.server_id = sserver_id AND
52 | sl.last_sample_id IS NULL AND
53 | ss.server_id IS NULL AND
54 | (slu.server_id, slu.queryid_md5) = (sserver_id, sl.queryid_md5);
55 |
56 | UPDATE roles_list rlu SET last_sample_id = ssample_id - 1
57 | FROM
58 | sample_statements ss
59 | RIGHT JOIN roles_list rl
60 | ON (ss.server_id, ss.sample_id, ss.userid) =
61 | (sserver_id, ssample_id, rl.userid)
62 | WHERE
63 | rl.server_id = sserver_id AND
64 | rl.last_sample_id IS NULL AND
65 | ss.server_id IS NULL AND
66 | (rlu.server_id, rlu.userid) = (sserver_id, rl.userid);
67 |
68 | -- Deleting obsolete baselines
69 | DELETE FROM baselines
70 | WHERE keep_until < now()
71 | AND server_id = sserver_id;
72 |
73 | -- Getting max_sample_age setting
74 | BEGIN
75 | ret := COALESCE(current_setting('{pg_profile}.max_sample_age')::integer);
76 | EXCEPTION
77 | WHEN OTHERS THEN ret := 7;
78 | END;
79 |
80 | -- Deleting obsolete samples
81 | PERFORM num_nulls(min(s.sample_id),max(s.sample_id)) > 0 OR
82 | delete_samples(sserver_id, min(s.sample_id), max(s.sample_id)) > 0
83 | FROM samples s JOIN
84 | servers n USING (server_id)
85 | WHERE s.server_id = sserver_id
86 | AND s.sample_time < now() - (COALESCE(n.max_sample_age,ret) || ' days')::interval
87 | AND (s.server_id,s.sample_id) NOT IN (SELECT server_id,sample_id FROM bl_samples WHERE server_id = sserver_id);
88 | end;
89 | $$ LANGUAGE plpgsql;
--------------------------------------------------------------------------------
/expected/export_import.out:
--------------------------------------------------------------------------------
1 | SET client_min_messages = WARNING;
2 | /* === Create regular export table === */
3 | CREATE TABLE profile.export AS SELECT * FROM profile.export_data();
4 | /* === Create obfuscated export table === */
5 | CREATE TABLE profile.blind_export AS
6 | SELECT * FROM profile.export_data(obfuscate_queries => true);
7 | CREATE TABLE profile.connstr_export AS
8 | SELECT * FROM profile.export_data(hide_connstr => true);
9 | BEGIN;
10 | /* === rename local server === */
11 | SELECT profile.rename_server('local','src_local');
12 | rename_server
13 | ---------------
14 | 1
15 | (1 row)
16 |
17 | /* === check matching by creation date and system identifier === */
18 | SELECT profile.import_data('profile.export') > 0;
19 | ?column?
20 | ----------
21 | f
22 | (1 row)
23 |
24 | /* === change src_local server creation time so it wont match === */
25 | UPDATE profile.servers
26 | SET
27 | server_created = server_created - '1 minutes'::interval
28 | WHERE server_name = 'src_local';
29 | /* === perform load === */
30 | SELECT profile.import_data('profile.export') > 0;
31 | ?column?
32 | ----------
33 | t
34 | (1 row)
35 |
36 | /* === Integral check - reports must match === */
37 | \a
38 | \t on
39 | WITH res AS (
40 | SELECT
41 | profile.get_report('local',1,4) AS imported,
42 | replace(
43 | replace(
44 | profile.get_report('src_local',1,4),'"server_name": "src_local"',
45 | '"server_name": "local"'),
46 | 'Server name: src_local',
47 | '
Server name: local'
48 | ) AS exported
49 | )
50 | SELECT
51 | CASE
52 | WHEN
53 | sha224(convert_to(imported, 'UTF8')) !=
54 | sha224(convert_to(exported, 'UTF8'))
55 | THEN
56 | format(E'\n%s\n\n\n%s\n',
57 | imported,
58 | exported
59 | )
60 | ELSE
61 | 'ok'
62 | END as match
63 | FROM res;
64 | ok
65 | \a
66 | \t off
67 | /* === perform obfuscated load === */
68 | SELECT profile.drop_server('local');
69 | drop_server
70 | -------------
71 | 1
72 | (1 row)
73 |
74 | SELECT profile.import_data('profile.blind_export') > 0;
75 | ?column?
76 | ----------
77 | t
78 | (1 row)
79 |
80 | /* === check that there is no matching queries === */
81 | SELECT
82 | count(*)
83 | FROM profile.servers s_src
84 | CROSS JOIN profile.servers s_blind
85 | JOIN profile.stmt_list q_src ON
86 | (q_src.server_id = s_src.server_id)
87 | JOIN profile.stmt_list q_blind ON
88 | (q_src.queryid_md5 = q_blind.queryid_md5 AND q_blind.server_id = s_blind.server_id)
89 | WHERE
90 | s_src.server_name = 'src_local' AND s_blind.server_name = 'local'
91 | AND q_src.query = q_blind.query;
92 | count
93 | -------
94 | 0
95 | (1 row)
96 |
97 | /* === ensure connstrings was in original data === */
98 | SELECT count(connstr) > 0 FROM profile.servers WHERE server_name = 'src_local';
99 | ?column?
100 | ----------
101 | t
102 | (1 row)
103 |
104 | /* === check that blind export excludes connection strings === */
105 | SELECT count(connstr) = 0 FROM profile.servers WHERE server_name = 'local';
106 | ?column?
107 | ----------
108 | t
109 | (1 row)
110 |
111 | /* === perform hidden connstr load === */
112 | SELECT profile.drop_server('local');
113 | drop_server
114 | -------------
115 | 1
116 | (1 row)
117 |
118 | SELECT profile.import_data('profile.connstr_export') > 0;
119 | ?column?
120 | ----------
121 | t
122 | (1 row)
123 |
124 | /* === check that hidden connstr export excludes connection strings === */
125 | SELECT count(connstr) = 0 FROM profile.servers WHERE server_name = 'local';
126 | ?column?
127 | ----------
128 | t
129 | (1 row)
130 |
131 | ROLLBACK;
132 | /* === drop export tables === */
133 | DROP TABLE profile.export;
134 | DROP TABLE profile.blind_export;
135 | DROP TABLE profile.connstr_export;
136 |
--------------------------------------------------------------------------------
/sample/calculate_database_stats.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION calculate_database_stats(IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS void AS $$
3 | -- Calc stat_database diff
4 | INSERT INTO sample_stat_database(
5 | server_id,
6 | sample_id,
7 | datid,
8 | datname,
9 | xact_commit,
10 | xact_rollback,
11 | blks_read,
12 | blks_hit,
13 | tup_returned,
14 | tup_fetched,
15 | tup_inserted,
16 | tup_updated,
17 | tup_deleted,
18 | conflicts,
19 | temp_files,
20 | temp_bytes,
21 | deadlocks,
22 | checksum_failures,
23 | checksum_last_failure,
24 | blk_read_time,
25 | blk_write_time,
26 | session_time,
27 | active_time,
28 | idle_in_transaction_time,
29 | sessions,
30 | sessions_abandoned,
31 | sessions_fatal,
32 | sessions_killed,
33 | parallel_workers_to_launch,
34 | parallel_workers_launched,
35 | stats_reset,
36 | datsize,
37 | datsize_delta,
38 | datistemplate
39 | )
40 | SELECT
41 | cur.server_id,
42 | cur.sample_id,
43 | cur.datid,
44 | cur.datname,
45 | cur.xact_commit - COALESCE(lst.xact_commit,0),
46 | cur.xact_rollback - COALESCE(lst.xact_rollback,0),
47 | cur.blks_read - COALESCE(lst.blks_read,0),
48 | cur.blks_hit - COALESCE(lst.blks_hit,0),
49 | cur.tup_returned - COALESCE(lst.tup_returned,0),
50 | cur.tup_fetched - COALESCE(lst.tup_fetched,0),
51 | cur.tup_inserted - COALESCE(lst.tup_inserted,0),
52 | cur.tup_updated - COALESCE(lst.tup_updated,0),
53 | cur.tup_deleted - COALESCE(lst.tup_deleted,0),
54 | cur.conflicts - COALESCE(lst.conflicts,0),
55 | cur.temp_files - COALESCE(lst.temp_files,0),
56 | cur.temp_bytes - COALESCE(lst.temp_bytes,0),
57 | cur.deadlocks - COALESCE(lst.deadlocks,0),
58 | cur.checksum_failures - COALESCE(lst.checksum_failures,0),
59 | cur.checksum_last_failure,
60 | cur.blk_read_time - COALESCE(lst.blk_read_time,0),
61 | cur.blk_write_time - COALESCE(lst.blk_write_time,0),
62 | cur.session_time - COALESCE(lst.session_time,0),
63 | cur.active_time - COALESCE(lst.active_time,0),
64 | cur.idle_in_transaction_time - COALESCE(lst.idle_in_transaction_time,0),
65 | cur.sessions - COALESCE(lst.sessions,0),
66 | cur.sessions_abandoned - COALESCE(lst.sessions_abandoned,0),
67 | cur.sessions_fatal - COALESCE(lst.sessions_fatal,0),
68 | cur.sessions_killed - COALESCE(lst.sessions_killed,0),
69 | cur.parallel_workers_to_launch - COALESCE(lst.parallel_workers_to_launch,0),
70 | cur.parallel_workers_launched - COALESCE(lst.parallel_workers_launched,0),
71 | cur.stats_reset,
72 | cur.datsize as datsize,
73 | cur.datsize - COALESCE(lst.datsize,0) as datsize_delta,
74 | cur.datistemplate
75 | FROM last_stat_database cur
76 | LEFT OUTER JOIN last_stat_database lst ON
77 | (lst.server_id, lst.sample_id, lst.datid, lst.datname) =
78 | (sserver_id, ssample_id - 1, cur.datid, cur.datname)
79 | AND lst.stats_reset IS NOT DISTINCT FROM cur.stats_reset
80 | WHERE
81 | (cur.server_id, cur.sample_id) = (sserver_id, ssample_id);
82 |
83 | /*
84 | * In case of statistics reset full database size, and checksum checksum_failures
85 | * is incorrectly considered as increment by previous query.
86 | * So, we need to update it with correct value
87 | */
88 | UPDATE sample_stat_database sdb
89 | SET
90 | datsize_delta = cur.datsize - lst.datsize,
91 | checksum_failures = cur.checksum_failures - lst.checksum_failures,
92 | checksum_last_failure = cur.checksum_last_failure
93 | FROM
94 | last_stat_database cur
95 | JOIN last_stat_database lst ON
96 | (lst.server_id, lst.sample_id, lst.datid, lst.datname) =
97 | (sserver_id, ssample_id - 1, cur.datid, cur.datname)
98 | WHERE cur.stats_reset IS DISTINCT FROM lst.stats_reset AND
99 | (cur.server_id, cur.sample_id) = (sserver_id, ssample_id) AND
100 | (sdb.server_id, sdb.sample_id, sdb.datid, sdb.datname) =
101 | (cur.server_id, cur.sample_id, cur.datid, cur.datname);
102 | $$ LANGUAGE sql;
103 |
--------------------------------------------------------------------------------
/report/functions/relation_storage_parameters.sql:
--------------------------------------------------------------------------------
1 | /*===== Relation storage parameters reporting functions =====*/
2 | CREATE FUNCTION table_storage_parameters_format(IN sserver_id integer,
3 | IN start1_id integer, IN end1_id integer,
4 | IN start2_id integer = NULL, IN end2_id integer = NULL)
5 | RETURNS TABLE (
6 | dbname name,
7 | dbid oid,
8 | schemaname name,
9 | relname name,
10 | relid oid,
11 | reloptions jsonb,
12 | first_seen text,
13 | ord_param integer -- report header ordering
14 | )
15 | SET search_path=@extschema@ AS $$
16 | SELECT
17 | evd.dbname,
18 | evd.dbid,
19 | evd.schemaname,
20 | evd.relname,
21 | evd.relid,
22 | evd.reloptions,
23 | CASE
24 | WHEN s_first.sample_id IS NOT NULL AND
25 | s_first.sample_id > least(start1_id, start2_id) OR
26 | evd.last_sample_id IS NOT NULL AND
27 | evd.last_sample_id < greatest(end1_id, end2_id)
28 | THEN evd.first_seen::text
29 | ELSE NULL
30 | END as first_seen,
31 | row_number() over (order by evd.dbname, evd.schemaname, evd.relname, evd.first_seen)::integer as ord_param
32 | FROM (
33 | SELECT DISTINCT
34 | ssd.datname as dbname,
35 | tsp.datid as dbid,
36 | tl.schemaname,
37 | tl.relname,
38 | tsp.relid,
39 | tsp.first_seen,
40 | tsp.last_sample_id,
41 | tsp.reloptions
42 | FROM v_table_storage_parameters tsp
43 | JOIN tables_list tl USING (server_id, datid, relid)
44 | JOIN sample_stat_database ssd USING (server_id, datid, sample_id)
45 | WHERE
46 | tsp.server_id = sserver_id AND
47 | (tsp.sample_id BETWEEN start1_id AND end1_id OR
48 | tsp.sample_id BETWEEN start2_id AND end2_id)
49 | ) evd
50 | LEFT JOIN samples s_first ON (s_first.server_id, s_first.sample_time) = (sserver_id, evd.first_seen)
51 | $$ LANGUAGE sql;
52 |
53 | CREATE FUNCTION index_storage_parameters_format(IN sserver_id integer,
54 | IN start1_id integer, IN end1_id integer,
55 | IN start2_id integer = NULL, IN end2_id integer = NULL)
56 | RETURNS TABLE (
57 | dbname name,
58 | dbid oid,
59 | schemaname name,
60 | relname name,
61 | relid oid,
62 | indexrelname name,
63 | indexrelid oid,
64 | reloptions jsonb,
65 | first_seen text,
66 | ord_param integer -- report header ordering
67 | )
68 | SET search_path=@extschema@ AS $$
69 | SELECT
70 | evd.dbname,
71 | evd.dbid,
72 | evd.schemaname,
73 | evd.relname,
74 | evd.relid,
75 | evd.indexrelname,
76 | evd.indexrelid,
77 | evd.reloptions,
78 | CASE
79 | WHEN s_first.sample_id IS NOT NULL AND
80 | s_first.sample_id > least(start1_id, start2_id) OR
81 | evd.last_sample_id IS NOT NULL AND
82 | evd.last_sample_id < greatest(end1_id, end2_id)
83 | THEN evd.first_seen::text
84 | ELSE NULL
85 | END as first_seen,
86 | row_number() over (order by evd.dbname, evd.schemaname, evd.relname, evd.indexrelname, evd.first_seen)::integer as ord_param
87 | FROM (
88 | SELECT DISTINCT
89 | ssd.datname as dbname,
90 | isp.datid as dbid,
91 | tl.schemaname,
92 | tl.relname,
93 | isp.relid,
94 | il.indexrelname,
95 | isp.indexrelid,
96 | isp.first_seen,
97 | isp.last_sample_id,
98 | isp.reloptions
99 | FROM v_index_storage_parameters isp
100 | JOIN tables_list tl USING (server_id, datid, relid)
101 | JOIN indexes_list il USING (server_id, datid, relid, indexrelid)
102 | JOIN sample_stat_database ssd USING (server_id, datid, sample_id)
103 | WHERE
104 | isp.server_id = sserver_id AND
105 | (isp.sample_id BETWEEN start1_id AND end1_id OR
106 | isp.sample_id BETWEEN start2_id AND end2_id)
107 | ) evd
108 | LEFT JOIN samples s_first ON (s_first.server_id, s_first.sample_time) = (sserver_id, evd.first_seen)
109 | $$ LANGUAGE sql;
110 |
--------------------------------------------------------------------------------
/management/internal.sql:
--------------------------------------------------------------------------------
1 | /* ========= Internal functions ========= */
2 |
3 | CREATE FUNCTION get_connstr(IN sserver_id integer, INOUT properties jsonb)
4 | SET search_path=@extschema@ AS $$
5 | DECLARE
6 | server_connstr text = NULL;
7 | server_host text = NULL;
8 | BEGIN
9 | ASSERT properties IS NOT NULL, 'properties must be not null';
10 | --Getting server_connstr
11 | SELECT connstr INTO server_connstr FROM servers n WHERE n.server_id = sserver_id;
12 | ASSERT server_connstr IS NOT NULL, 'server_id not found';
13 | /*
14 | When host= parameter is not specified, connection to unix socket is assumed.
15 | Unix socket can be in non-default location, so we need to specify it
16 | */
17 | IF (SELECT count(*) = 0 FROM regexp_matches(server_connstr,$o$((\s|^)host\s*=)$o$)) AND
18 | (SELECT count(*) != 0 FROM pg_catalog.pg_settings
19 | WHERE name = 'unix_socket_directories' AND boot_val != reset_val)
20 | THEN
21 | -- Get suitable socket name from available list
22 | server_host := (SELECT COALESCE(t[1],t[4])
23 | FROM pg_catalog.pg_settings,
24 | regexp_matches(reset_val,'("(("")|[^"])+")|([^,]+)','g') AS t
25 | WHERE name = 'unix_socket_directories' AND boot_val != reset_val
26 | -- libpq can't handle sockets with comma in their names
27 | AND position(',' IN COALESCE(t[1],t[4])) = 0
28 | LIMIT 1
29 | );
30 | -- quoted string processing
31 | IF left(server_host, 1) = '"' AND
32 | right(server_host, 1) = '"' AND
33 | (length(server_host) > 1)
34 | THEN
35 | server_host := replace(substring(server_host,2,length(server_host)-2),'""','"');
36 | END IF;
37 | -- append host parameter to the connection string
38 | IF server_host IS NOT NULL AND server_host != '' THEN
39 | server_connstr := concat_ws(server_connstr, format('host=%L',server_host), ' ');
40 | ELSE
41 | server_connstr := concat_ws(server_connstr, format('host=%L','localhost'), ' ');
42 | END IF;
43 | END IF;
44 |
45 | properties := jsonb_set(properties, '{properties, server_connstr}',
46 | to_jsonb(server_connstr));
47 | END;
48 | $$ LANGUAGE plpgsql;
49 |
50 | CREATE FUNCTION get_sampleids_by_timerange(IN sserver_id integer, IN time_range tstzrange)
51 | RETURNS TABLE (
52 | start_id integer,
53 | end_id integer
54 | ) SET search_path=@extschema@ AS $$
55 | BEGIN
56 | SELECT min(s1.sample_id),max(s2.sample_id) INTO start_id,end_id FROM
57 | samples s1 JOIN
58 | /* Here redundant join condition s1.sample_id < s2.sample_id is needed
59 | * Otherwise optimizer is using tstzrange(s1.sample_time,s2.sample_time) && time_range
60 | * as first join condition and some times failes with error
61 | * ERROR: range lower bound must be less than or equal to range upper bound
62 | */
63 | samples s2 ON (s1.sample_id < s2.sample_id AND s1.server_id = s2.server_id AND s1.sample_id + 1 = s2.sample_id)
64 | WHERE s1.server_id = sserver_id AND tstzrange(s1.sample_time,s2.sample_time) && time_range;
65 |
66 | IF start_id IS NULL OR end_id IS NULL THEN
67 | RAISE 'Suitable samples not found';
68 | END IF;
69 |
70 | RETURN NEXT;
71 | RETURN;
72 | END;
73 | $$ LANGUAGE plpgsql;
74 |
75 | CREATE FUNCTION get_server_by_name(IN server name)
76 | RETURNS integer SET search_path=@extschema@ AS $$
77 | DECLARE
78 | sserver_id integer;
79 | BEGIN
80 | SELECT server_id INTO sserver_id FROM servers WHERE server_name=server;
81 | IF sserver_id IS NULL THEN
82 | RAISE 'Server not found.';
83 | END IF;
84 |
85 | RETURN sserver_id;
86 | END;
87 | $$ LANGUAGE plpgsql;
88 |
89 | CREATE FUNCTION get_baseline_samples(IN sserver_id integer, baseline varchar(25))
90 | RETURNS TABLE (
91 | start_id integer,
92 | end_id integer
93 | ) SET search_path=@extschema@ AS $$
94 | BEGIN
95 | SELECT min(sample_id), max(sample_id) INTO start_id,end_id
96 | FROM baselines JOIN bl_samples USING (bl_id,server_id)
97 | WHERE server_id = sserver_id AND bl_name = baseline;
98 | IF start_id IS NULL OR end_id IS NULL THEN
99 | RAISE 'Baseline not found';
100 | END IF;
101 | RETURN NEXT;
102 | RETURN;
103 | END;
104 | $$ LANGUAGE plpgsql;
105 |
--------------------------------------------------------------------------------
/report/functions/settings.sql:
--------------------------------------------------------------------------------
1 | /*===== Settings reporting functions =====*/
2 | CREATE FUNCTION settings_and_changes(IN sserver_id integer, IN start_id integer, IN end_id integer)
3 | RETURNS TABLE(
4 | first_seen timestamp(0) with time zone,
5 | setting_scope smallint,
6 | name text,
7 | setting text,
8 | reset_val text,
9 | boot_val text,
10 | unit text,
11 | sourcefile text,
12 | sourceline integer,
13 | pending_restart boolean,
14 | changed boolean,
15 | default_val boolean
16 | )
17 | SET search_path=@extschema@ AS $$
18 | SELECT
19 | first_seen,
20 | setting_scope,
21 | name,
22 | setting,
23 | reset_val,
24 | boot_val,
25 | unit,
26 | sourcefile,
27 | sourceline,
28 | pending_restart,
29 | false,
30 | boot_val IS NOT DISTINCT FROM reset_val
31 | FROM v_sample_settings
32 | WHERE (server_id, sample_id) = (sserver_id, start_id)
33 | UNION ALL
34 | SELECT
35 | first_seen,
36 | setting_scope,
37 | name,
38 | setting,
39 | reset_val,
40 | boot_val,
41 | unit,
42 | sourcefile,
43 | sourceline,
44 | pending_restart,
45 | true,
46 | boot_val IS NOT DISTINCT FROM reset_val
47 | FROM sample_settings s
48 | JOIN samples s_start ON (s_start.server_id = s.server_id AND s_start.sample_id = start_id)
49 | JOIN samples s_end ON (s_end.server_id = s.server_id AND s_end.sample_id = end_id)
50 | WHERE s.server_id = sserver_id AND s.first_seen > s_start.sample_time AND s.first_seen <= s_end.sample_time
51 | $$ LANGUAGE sql;
52 |
53 | CREATE FUNCTION settings_format(IN sserver_id integer, IN start_id integer, IN end_id integer)
54 | RETURNS TABLE (
55 | klass text,
56 | name text,
57 | reset_val text,
58 | unit text,
59 | source text,
60 | notes text,
61 | default_val boolean,
62 | defined_val boolean,
63 | h_ord integer -- report header ordering
64 | )
65 | SET search_path=@extschema@ AS $$
66 | SELECT
67 | CASE WHEN changed THEN 'new' ELSE 'init' END AS klass,
68 | name,
69 | reset_val,
70 | unit,
71 | concat_ws(':', sourcefile, sourceline) AS source,
72 | concat_ws(', ',
73 | CASE WHEN changed THEN first_seen ELSE NULL END,
74 | CASE WHEN pending_restart THEN 'Pending restart' ELSE NULL END
75 | ) AS notes,
76 | default_val,
77 | NOT default_val,
78 | CASE
79 | WHEN name = 'version' THEN 10
80 | WHEN (name, setting_scope) = ('pgpro_version', 1) THEN 21
81 | WHEN (name, setting_scope) = ('pgpro_edition', 1) THEN 22
82 | WHEN (name, setting_scope) = ('pgpro_build', 1) THEN 23
83 | ELSE NULL
84 | END AS h_ord
85 | FROM
86 | settings_and_changes(sserver_id, start_id, end_id)
87 | ORDER BY
88 | name,setting_scope,first_seen,pending_restart ASC NULLS FIRST
89 | $$ LANGUAGE sql;
90 |
91 | CREATE FUNCTION settings_format_diff(IN sserver_id integer,
92 | IN start1_id integer, IN end1_id integer,
93 | IN start2_id integer, IN end2_id integer)
94 | RETURNS TABLE (
95 | klass text,
96 | name text,
97 | reset_val text,
98 | unit text,
99 | source text,
100 | notes text,
101 | default_val boolean,
102 | defined_val boolean,
103 | h_ord integer -- report header ordering
104 | )
105 | SET search_path=@extschema@ AS $$
106 | SELECT
107 | concat_ws('_',
108 | CASE WHEN changed THEN 'new' ELSE 'init' END,
109 | CASE WHEN s1.name IS NULL THEN 'i2'
110 | WHEN s2.name IS NULL THEN 'i1'
111 | ELSE NULL
112 | END
113 | ) AS klass,
114 | name,
115 | reset_val,
116 | COALESCE(s1.unit,s2.unit) as unit,
117 | concat_ws(':',
118 | COALESCE(s1.sourcefile,s2.sourcefile),
119 | COALESCE(s1.sourceline,s2.sourceline)
120 | ) AS source,
121 | concat_ws(', ',
122 | CASE WHEN changed THEN first_seen ELSE NULL END,
123 | CASE WHEN pending_restart THEN 'Pending restart' ELSE NULL END
124 | ) AS notes,
125 | default_val,
126 | NOT default_val,
127 | CASE
128 | WHEN name = 'version' THEN 10
129 | WHEN (name, setting_scope) = ('pgpro_version', 1) THEN 21
130 | WHEN (name, setting_scope) = ('pgpro_edition', 1) THEN 22
131 | WHEN (name, setting_scope) = ('pgpro_build', 1) THEN 23
132 | ELSE NULL
133 | END AS h_ord
134 | FROM
135 | settings_and_changes(sserver_id, start1_id, end1_id) s1
136 | FULL OUTER JOIN
137 | settings_and_changes(sserver_id, start2_id, end2_id) s2
138 | USING(first_seen, setting_scope, name, setting, reset_val, pending_restart, changed, default_val)
139 | ORDER BY
140 | name,setting_scope,first_seen,pending_restart ASC NULLS FIRST
141 | $$ LANGUAGE sql;
142 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | PGPROFILE_VERSION = 4.11
2 | EXTENSION = pg_profile
3 |
4 | TAR_pkg = $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz
5 |
6 | default: all
7 |
8 | include migration/Makefile
9 |
10 | DATA_built = $(EXTENSION)--$(PGPROFILE_VERSION).sql $(EXTENSION).control $(MIGRATION)
11 |
12 | EXTRA_CLEAN = $(TAR_pkg) $(MIGRATION) $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql $(schema) \
13 | $(report) data/report_templates.sql
14 |
15 | REGRESS = \
16 | create_extension \
17 | server_management \
18 | samples_and_reports \
19 | sizes_collection \
20 | export_import \
21 | retention_and_baselines \
22 | drop_extension
23 |
24 | # pg_stat_kcache tests
25 | ifdef USE_KCACHE
26 | REGRESS += \
27 | kcache_create_extension \
28 | server_management \
29 | samples_and_reports \
30 | sizes_collection \
31 | kcache_stat_avail \
32 | export_import \
33 | retention_and_baselines \
34 | kcache_drop_extension
35 | endif
36 |
37 | PG_CONFIG ?= pg_config
38 |
39 | ifdef USE_PGXS
40 | PGXS := $(shell $(PG_CONFIG) --pgxs)
41 | include $(PGXS)
42 | else
43 | subdir = contrib/$(EXTENSION)
44 | top_builddir = ../..
45 | include $(top_builddir)/src/Makefile.global
46 | include $(top_srcdir)/contrib/contrib-global.mk
47 | endif
48 |
49 | schema = schema/schema.sql
50 |
51 | data = data/import_queries.sql \
52 | data/report_templates.sql
53 | common = management/internal.sql
54 | adm_funcs = management/baseline.sql \
55 | management/server.sql \
56 | management/local_server.sql
57 | export_funcs = \
58 | management/export.sql
59 | sample = \
60 | sample/log_sample_timings.sql \
61 | sample/sample_pg_stat_statements.sql \
62 | sample/get_sp_setting.sql \
63 | sample/calculate_archiver_stats.sql \
64 | sample/calculate_cluster_stats.sql \
65 | sample/calculate_database_stats.sql \
66 | sample/calculate_io_stats.sql \
67 | sample/calculate_slru_stats.sql \
68 | sample/calculate_tablespace_stats.sql \
69 | sample/calculate_wal_stats.sql \
70 | sample/collect_database_stats.sql \
71 | sample/collect_obj_stats.sql \
72 | sample/collect_subsamples.sql \
73 | sample/collect_tablespace_stats.sql \
74 | sample/delete_obsolete_samples.sql \
75 | sample/get_sized_bounds.sql \
76 | sample/init_sample.sql \
77 | sample/pg_wait_sampling.sql \
78 | sample/query_pg_stat_archiver.sql \
79 | sample/query_pg_stat_bgwriter.sql \
80 | sample/query_pg_stat_io.sql \
81 | sample/query_pg_stat_slru.sql \
82 | sample/query_pg_stat_wal.sql \
83 | sample/sample_dbobj_delta.sql \
84 | sample/show_samples.0.sql \
85 | sample/show_samples.1.sql \
86 | sample/take_sample_subset.sql \
87 | sample/take_sample.0.sql \
88 | sample/take_sample.1.sql \
89 | sample/take_sample.2.sql \
90 | sample/compat.sql \
91 | sample/take_subsample_subset.sql \
92 | sample/take_subsample.0.sql \
93 | sample/take_subsample.1.sql \
94 | sample/take_subsample.2.sql
95 |
96 | report = report/report_build.sql
97 |
98 | grants = \
99 | privileges/pg_profile.sql
100 |
101 | # Extension script contents
102 | functions = $(common) $(adm_funcs) $(export_funcs) $(sample) $(report)
103 | script = $(schema) $(data) $(functions) $(grants)
104 |
105 | # Manual script contents
106 | functions_man = $(common) $(adm_funcs) $(sample) $(report)
107 | script_man = $(schema) $(functions_man) $(grants) data/report_templates.sql
108 |
109 | # Common sed replacement script
110 | sed_extension = -e 's/{pg_profile}/$(EXTENSION)/; s/{extension_version}/$(PGPROFILE_VERSION)/; /--/,/--/d; /--/d; /--/d'
111 | sed_manual = -e 's/{pg_profile}/$(EXTENSION)/; s/{extension_version}/$(PGPROFILE_VERSION)/; /--/,/--/d; /--/d; /--/d'
112 |
113 | schema/schema.sql:
114 | ${MAKE} -C schema
115 |
116 | data/report_templates.sql:
117 | ${MAKE} -C data
118 |
119 | report/report_build.sql:
120 | ${MAKE} -C report
121 |
122 | sqlfile: $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql
123 |
124 | $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql: $(script)
125 | sed -e 's/SET search_path=@extschema@//' \
126 | $(sed_manual) \
127 | $(script_man) \
128 | -e '1i \\\set ON_ERROR_STOP on' \
129 | > $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql
130 |
131 | $(EXTENSION).control: control.tpl
132 | sed -e 's/{version}/$(PGPROFILE_VERSION)/' control.tpl > $(EXTENSION).control
133 |
134 | $(EXTENSION)--$(PGPROFILE_VERSION).sql: $(script)
135 | sed \
136 | -e '1i \\\echo Use "CREATE EXTENSION $(EXTENSION)" to load this file. \\quit' \
137 | $(sed_extension) \
138 | $(script) \
139 | > $(EXTENSION)--$(PGPROFILE_VERSION).sql
140 |
141 | $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz: sqlfile
142 | tar czf $(EXTENSION)--$(PGPROFILE_VERSION)_manual.tar.gz $(EXTENSION)--$(PGPROFILE_VERSION)_manual.sql
143 |
144 | $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz: $(DATA_built)
145 | tar czf $(EXTENSION)--$(PGPROFILE_VERSION).tar.gz $(DATA_built)
146 |
147 | tarpkg: $(TAR_pkg)
148 |
--------------------------------------------------------------------------------
/sample/query_pg_stat_io.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION query_pg_stat_io(IN server_properties jsonb, IN sserver_id integer, IN ssample_id integer
2 | ) RETURNS jsonb AS $$
3 | declare
4 | server_query text;
5 | pg_version int := (get_sp_setting(server_properties, 'server_version_num')).reset_val::integer;
6 | begin
7 | server_properties := log_sample_timings(server_properties, 'query pg_stat_io', 'start');
8 | -- pg_stat_io data
9 | CASE
10 | WHEN pg_version >= 180000 THEN
11 | server_query := 'SELECT '
12 | 'backend_type,'
13 | 'object,'
14 | 'pg_stat_io.context,'
15 | 'reads,'
16 | 'read_bytes,'
17 | 'read_time,'
18 | 'writes,'
19 | 'write_bytes,'
20 | 'write_time,'
21 | 'writebacks,'
22 | 'writeback_time,'
23 | 'extends,'
24 | 'extend_bytes,'
25 | 'extend_time,'
26 | 'ps.setting::integer AS op_bytes,'
27 | 'hits,'
28 | 'evictions,'
29 | 'reuses,'
30 | 'fsyncs,'
31 | 'fsync_time,'
32 | 'stats_reset '
33 | 'FROM pg_catalog.pg_stat_io '
34 | 'JOIN pg_catalog.pg_settings ps ON name = ''block_size'' '
35 | 'WHERE greatest('
36 | 'reads,'
37 | 'writes,'
38 | 'writebacks,'
39 | 'extends,'
40 | 'hits,'
41 | 'evictions,'
42 | 'reuses,'
43 | 'fsyncs'
44 | ') > 0'
45 | ;
46 | WHEN pg_version >= 160000 THEN
47 | server_query := 'SELECT '
48 | 'backend_type,'
49 | 'object,'
50 | 'context,'
51 | 'reads,'
52 | 'NULL as read_bytes,'
53 | 'read_time,'
54 | 'writes,'
55 | 'NULL as write_bytes,'
56 | 'write_time,'
57 | 'writebacks,'
58 | 'writeback_time,'
59 | 'extends,'
60 | 'NULL as extend_bytes,'
61 | 'extend_time,'
62 | 'op_bytes,'
63 | 'hits,'
64 | 'evictions,'
65 | 'reuses,'
66 | 'fsyncs,'
67 | 'fsync_time,'
68 | 'stats_reset '
69 | 'FROM pg_catalog.pg_stat_io '
70 | 'WHERE greatest('
71 | 'reads,'
72 | 'writes,'
73 | 'writebacks,'
74 | 'extends,'
75 | 'hits,'
76 | 'evictions,'
77 | 'reuses,'
78 | 'fsyncs'
79 | ') > 0'
80 | ;
81 | ELSE
82 | server_query := NULL;
83 | END CASE;
84 |
85 | IF server_query IS NOT NULL THEN
86 | INSERT INTO last_stat_io (
87 | server_id,
88 | sample_id,
89 | backend_type,
90 | object,
91 | context,
92 | reads,
93 | read_bytes,
94 | read_time,
95 | writes,
96 | write_bytes,
97 | write_time,
98 | writebacks,
99 | writeback_time,
100 | extends,
101 | extend_bytes,
102 | extend_time,
103 | op_bytes,
104 | hits,
105 | evictions,
106 | reuses,
107 | fsyncs,
108 | fsync_time,
109 | stats_reset
110 | )
111 | SELECT
112 | sserver_id,
113 | ssample_id,
114 | backend_type,
115 | object,
116 | context,
117 | reads,
118 | read_bytes,
119 | read_time,
120 | writes,
121 | write_bytes,
122 | write_time,
123 | writebacks,
124 | writeback_time,
125 | extends,
126 | extend_bytes,
127 | extend_time,
128 | op_bytes,
129 | hits,
130 | evictions,
131 | reuses,
132 | fsyncs,
133 | fsync_time,
134 | stats_reset
135 | FROM dblink('server_connection',server_query) AS rs (
136 | backend_type text,
137 | object text,
138 | context text,
139 | reads bigint,
140 | read_bytes numeric,
141 | read_time double precision,
142 | writes bigint,
143 | write_bytes numeric,
144 | write_time double precision,
145 | writebacks bigint,
146 | writeback_time double precision,
147 | extends bigint,
148 | extend_bytes numeric,
149 | extend_time double precision,
150 | op_bytes bigint,
151 | hits bigint,
152 | evictions bigint,
153 | reuses bigint,
154 | fsyncs bigint,
155 | fsync_time double precision,
156 | stats_reset timestamp with time zone
157 | );
158 | END IF;
159 | server_properties := log_sample_timings(server_properties, 'query pg_stat_io', 'end');
160 | return server_properties;
161 | end;
162 | $$ LANGUAGE plpgsql;
163 |
--------------------------------------------------------------------------------
/management/baseline.sql:
--------------------------------------------------------------------------------
1 | /* ========= Baseline management functions ========= */
2 |
3 | CREATE FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$
4 | DECLARE
5 | baseline_id integer;
6 | sserver_id integer;
7 | BEGIN
8 | SELECT server_id INTO sserver_id FROM servers WHERE server_name=server;
9 | IF sserver_id IS NULL THEN
10 | RAISE 'Server not found';
11 | END IF;
12 |
13 | INSERT INTO baselines(server_id,bl_name,keep_until)
14 | VALUES (sserver_id,baseline,now() + (days || ' days')::interval)
15 | RETURNING bl_id INTO baseline_id;
16 |
17 | INSERT INTO bl_samples (server_id,sample_id,bl_id)
18 | SELECT server_id,sample_id,baseline_id
19 | FROM samples s JOIN servers n USING (server_id)
20 | WHERE server_id=sserver_id AND sample_id BETWEEN start_id AND end_id;
21 |
22 | RETURN baseline_id;
23 | END;
24 | $$ LANGUAGE plpgsql;
25 | COMMENT ON FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer) IS 'New baseline by ID''s';
26 |
27 | CREATE FUNCTION create_baseline(IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$
28 | BEGIN
29 | RETURN create_baseline('local',baseline,start_id,end_id,days);
30 | END;
31 | $$ LANGUAGE plpgsql;
32 | COMMENT ON FUNCTION create_baseline(IN baseline varchar(25), IN start_id integer, IN end_id integer, IN days integer) IS 'Local server new baseline by ID''s';
33 |
34 | CREATE FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN time_range tstzrange, IN days integer = NULL) RETURNS integer SET search_path=@extschema@ AS $$
35 | DECLARE
36 | range_ids record;
37 | BEGIN
38 | SELECT * INTO STRICT range_ids
39 | FROM get_sampleids_by_timerange(get_server_by_name(server), time_range);
40 |
41 | RETURN create_baseline(server,baseline,range_ids.start_id,range_ids.end_id,days);
42 | END;
43 | $$ LANGUAGE plpgsql;
44 | COMMENT ON FUNCTION create_baseline(IN server name, IN baseline varchar(25), IN time_range tstzrange, IN days integer) IS 'New baseline by time range';
45 |
46 | CREATE FUNCTION create_baseline(IN baseline varchar(25), IN time_range tstzrange, IN days integer = NULL) RETURNS integer
47 | SET search_path=@extschema@ AS $$
48 | BEGIN
49 | RETURN create_baseline('local',baseline,time_range,days);
50 | END;
51 | $$ LANGUAGE plpgsql;
52 | COMMENT ON FUNCTION create_baseline(IN baseline varchar(25), IN time_range tstzrange, IN days integer) IS 'Local server new baseline by time range';
53 |
54 | CREATE FUNCTION drop_baseline(IN server name, IN baseline varchar(25)) RETURNS integer SET search_path=@extschema@ AS $$
55 | DECLARE
56 | del_rows integer;
57 | BEGIN
58 | DELETE FROM baselines WHERE bl_name = baseline AND server_id IN (SELECT server_id FROM servers WHERE server_name = server);
59 | GET DIAGNOSTICS del_rows = ROW_COUNT;
60 | RETURN del_rows;
61 | END;
62 | $$ LANGUAGE plpgsql;
63 | COMMENT ON FUNCTION drop_baseline(IN server name, IN baseline varchar(25)) IS 'Drop baseline on server';
64 |
65 | CREATE FUNCTION drop_baseline(IN baseline varchar(25)) RETURNS integer SET search_path=@extschema@ AS $$
66 | BEGIN
67 | RETURN drop_baseline('local',baseline);
68 | END;
69 | $$ LANGUAGE plpgsql;
70 | COMMENT ON FUNCTION drop_baseline(IN baseline varchar(25)) IS 'Drop baseline on local server';
71 |
72 | CREATE FUNCTION keep_baseline(IN server name, IN baseline varchar(25) = null, IN days integer = null) RETURNS integer SET search_path=@extschema@ AS $$
73 | DECLARE
74 | upd_rows integer;
75 | BEGIN
76 | UPDATE baselines SET keep_until = now() + (days || ' days')::interval WHERE (baseline IS NULL OR bl_name = baseline) AND server_id IN (SELECT server_id FROM servers WHERE server_name = server);
77 | GET DIAGNOSTICS upd_rows = ROW_COUNT;
78 | RETURN upd_rows;
79 | END;
80 | $$ LANGUAGE plpgsql;
81 | COMMENT ON FUNCTION keep_baseline(IN server name, IN baseline varchar(25), IN days integer) IS 'Set new baseline retention on server';
82 |
83 | CREATE FUNCTION keep_baseline(IN baseline varchar(25) = null, IN days integer = null) RETURNS integer SET search_path=@extschema@ AS $$
84 | BEGIN
85 | RETURN keep_baseline('local',baseline,days);
86 | END;
87 | $$ LANGUAGE plpgsql;
88 | COMMENT ON FUNCTION keep_baseline(IN baseline varchar(25), IN days integer) IS 'Set new baseline retention on local server';
89 |
90 | CREATE FUNCTION show_baselines(IN server name = 'local')
91 | RETURNS TABLE (
92 | baseline varchar(25),
93 | min_sample integer,
94 | max_sample integer,
95 | keep_until_time timestamp (0) with time zone
96 | ) SET search_path=@extschema@ AS $$
97 | SELECT bl_name as baseline,min_sample_id,max_sample_id, keep_until
98 | FROM baselines b JOIN
99 | (SELECT server_id,bl_id,min(sample_id) min_sample_id,max(sample_id) max_sample_id FROM bl_samples GROUP BY server_id,bl_id) b_agg
100 | USING (server_id,bl_id)
101 | WHERE server_id IN (SELECT server_id FROM servers WHERE server_name = server)
102 | ORDER BY min_sample_id;
103 | $$ LANGUAGE sql;
104 | COMMENT ON FUNCTION show_baselines(IN server name) IS 'Show server baselines (local server assumed if omitted)';
105 |
--------------------------------------------------------------------------------
/expected/sizes_collection.out:
--------------------------------------------------------------------------------
1 | SET client_min_messages = WARNING;
2 | SET pg_profile.relsize_collect_mode = 'on';
3 | /* Test size collection sampling settings */
4 | INSERT INTO profile.grow_table (short_str,long_str)
5 | SELECT array_to_string(array
6 | (select
7 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
8 | trunc(random() * 62)::integer + 1, 1)
9 | FROM generate_series(1, 40)), ''
10 | ) as arr1,
11 | array_to_string(array
12 | (select
13 | substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
14 | trunc(random() * 62)::integer + 1, 1)
15 | FROM generate_series(1, 8000)), ''
16 | )
17 | FROM generate_series(1,5);
18 | /* Enable scheduled relation sizes collection at server*/
19 | SELECT profile.set_server_size_sampling('local',current_time - interval '10 minute',interval '30 minute',interval '2 minute','schedule');
20 | set_server_size_sampling
21 | --------------------------
22 | 1
23 | (1 row)
24 |
25 | -- check show_servers_size_sampling()
26 | SELECT server_name,window_duration,sample_interval,collect_mode FROM profile.show_servers_size_sampling();
27 | server_name | window_duration | sample_interval | collect_mode
28 | -------------+-----------------+-----------------+--------------
29 | local | @ 30 mins | @ 2 mins | schedule
30 | (1 row)
31 |
32 | -- (sample 4)
33 | SELECT server,result FROM profile.take_sample();
34 | server | result
35 | --------+--------
36 | local | OK
37 | (1 row)
38 |
39 | -- Disable relation sizes collection at server
40 | SELECT profile.set_server_size_sampling('local',null,null,null,'failure');
41 | ERROR: collect_mode can only be 'on', 'off' or 'schedule'
42 | CONTEXT: PL/pgSQL function set_server_size_sampling(name,time with time zone,interval,interval,text) line 8 at RAISE
43 | SELECT profile.set_server_size_sampling('local',null,null,null,'off');
44 | set_server_size_sampling
45 | --------------------------
46 | 1
47 | (1 row)
48 |
49 | -- (sample 5)
50 | SELECT server,result FROM profile.take_sample();
51 | server | result
52 | --------+--------
53 | local | OK
54 | (1 row)
55 |
56 | -- Enable relation sizes collection at server
57 | SELECT profile.set_server_size_sampling('local',null,null,null,'on');
58 | set_server_size_sampling
59 | --------------------------
60 | 1
61 | (1 row)
62 |
63 | -- (sample 6)
64 | SELECT server,result FROM profile.take_sample();
65 | server | result
66 | --------+--------
67 | local | OK
68 | (1 row)
69 |
70 | -- Reset relation sizes collection mode at server
71 | SELECT profile.set_server_size_sampling('local',null,null,null,null);
72 | set_server_size_sampling
73 | --------------------------
74 | 1
75 | (1 row)
76 |
77 | -- Enable relation sizes collection configuration parameter
78 | SET pg_profile.relsize_collect_mode = 'on';
79 | -- (sample 7)
80 | SELECT server,result FROM profile.take_sample();
81 | server | result
82 | --------+--------
83 | local | OK
84 | (1 row)
85 |
86 | -- Disable relation sizes collection configuration parameter
87 | SET pg_profile.relsize_collect_mode = 'off';
88 | -- (sample 8)
89 | SELECT server,result FROM profile.take_sample();
90 | server | result
91 | --------+--------
92 | local | OK
93 | (1 row)
94 |
95 | -- check show_samples()
96 | SELECT sample, sizes_collected FROM profile.show_samples() WHERE NOT sizes_collected;
97 | sample | sizes_collected
98 | --------+-----------------
99 | 4 | f
100 | 5 | f
101 | 8 | f
102 | (3 rows)
103 |
104 | -- check tables sizes collection
105 | SELECT
106 | sample_id,
107 | count(relsize) > 0 as relsize,
108 | count(relsize_diff) > 0 as relsize_diff,
109 | count(relpages_bytes) > 0 as relpages,
110 | count(relpages_bytes_diff) > 0 as relpages_diff
111 | FROM profile.sample_stat_tables GROUP BY sample_id
112 | ORDER BY sample_id;
113 | sample_id | relsize | relsize_diff | relpages | relpages_diff
114 | -----------+---------+--------------+----------+---------------
115 | 1 | t | t | t | t
116 | 2 | t | t | t | t
117 | 3 | t | t | t | t
118 | 4 | f | f | t | t
119 | 5 | f | f | t | t
120 | 6 | t | t | t | t
121 | 7 | t | t | t | t
122 | 8 | f | f | t | t
123 | (8 rows)
124 |
125 | -- check indexes sizes collection
126 | SELECT
127 | sample_id,
128 | count(relsize) > 0 as relsize,
129 | count(relsize_diff) > 0 as relsize_diff,
130 | count(relpages_bytes) > 0 as relpages,
131 | count(relpages_bytes_diff) > 0 as relpages_diff
132 | FROM profile.sample_stat_indexes GROUP BY sample_id
133 | ORDER BY sample_id;
134 | sample_id | relsize | relsize_diff | relpages | relpages_diff
135 | -----------+---------+--------------+----------+---------------
136 | 1 | t | t | t | t
137 | 2 | t | t | t | t
138 | 3 | t | t | t | t
139 | 4 | f | f | t | t
140 | 5 | f | f | t | t
141 | 6 | t | t | t | t
142 | 7 | t | t | t | t
143 | 8 | f | f | t | t
144 | (8 rows)
145 |
146 |
--------------------------------------------------------------------------------
/schema/indexes.sql:
--------------------------------------------------------------------------------
1 | /* ==== Indexes stats tables ==== */
2 | CREATE TABLE indexes_list(
3 | server_id integer NOT NULL,
4 | datid oid NOT NULL,
5 | indexrelid oid NOT NULL,
6 | relid oid NOT NULL,
7 | schemaname name NOT NULL,
8 | indexrelname name NOT NULL,
9 | last_sample_id integer,
10 | CONSTRAINT pk_indexes_list PRIMARY KEY (server_id, datid, indexrelid),
11 | CONSTRAINT fk_indexes_tables FOREIGN KEY (server_id, datid, relid)
12 | REFERENCES tables_list(server_id, datid, relid)
13 | ON DELETE NO ACTION ON UPDATE CASCADE
14 | DEFERRABLE INITIALLY IMMEDIATE,
15 | CONSTRAINT fk_indexes_list_samples FOREIGN KEY (server_id, last_sample_id)
16 | REFERENCES samples (server_id, sample_id) ON DELETE CASCADE
17 | DEFERRABLE INITIALLY IMMEDIATE
18 | );
19 | CREATE INDEX ix_indexes_list_rel ON indexes_list(server_id, datid, relid);
20 | CREATE INDEX ix_indexes_list_smp ON indexes_list(server_id, last_sample_id);
21 |
22 | COMMENT ON TABLE indexes_list IS 'Index names and schemas, captured in samples';
23 |
24 | CREATE TABLE sample_stat_indexes (
25 | server_id integer,
26 | sample_id integer,
27 | datid oid,
28 | indexrelid oid,
29 | tablespaceid oid NOT NULL,
30 | idx_scan bigint,
31 | idx_tup_read bigint,
32 | idx_tup_fetch bigint,
33 | idx_blks_read bigint,
34 | idx_blks_hit bigint,
35 | relsize bigint,
36 | relsize_diff bigint,
37 | indisunique bool,
38 | relpages_bytes bigint,
39 | relpages_bytes_diff bigint,
40 | last_idx_scan timestamp with time zone,
41 | CONSTRAINT fk_stat_indexes_indexes FOREIGN KEY (server_id, datid, indexrelid)
42 | REFERENCES indexes_list(server_id, datid, indexrelid)
43 | ON DELETE NO ACTION ON UPDATE RESTRICT
44 | DEFERRABLE INITIALLY IMMEDIATE,
45 | CONSTRAINT fk_stat_indexes_dat FOREIGN KEY (server_id, sample_id, datid)
46 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE
47 | DEFERRABLE INITIALLY IMMEDIATE,
48 | CONSTRAINT fk_stat_indexes_tablespaces FOREIGN KEY (server_id, sample_id, tablespaceid)
49 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid)
50 | ON DELETE CASCADE
51 | DEFERRABLE INITIALLY IMMEDIATE,
52 | CONSTRAINT pk_sample_stat_indexes PRIMARY KEY (server_id, sample_id, datid, indexrelid)
53 | );
54 | CREATE INDEX ix_sample_stat_indexes_il ON sample_stat_indexes(server_id, datid, indexrelid);
55 | CREATE INDEX ix_sample_stat_indexes_ts ON sample_stat_indexes(server_id, sample_id, tablespaceid);
56 |
57 | COMMENT ON TABLE sample_stat_indexes IS 'Stats increments for user indexes in all databases by samples';
58 |
59 | CREATE VIEW v_sample_stat_indexes AS
60 | SELECT
61 | server_id,
62 | sample_id,
63 | datid,
64 | relid,
65 | indexrelid,
66 | tl.schemaname,
67 | tl.relname,
68 | tl.relkind,
69 | il.indexrelname,
70 | idx_scan,
71 | idx_tup_read,
72 | idx_tup_fetch,
73 | idx_blks_read,
74 | idx_blks_hit,
75 | relsize,
76 | relsize_diff,
77 | tablespaceid,
78 | indisunique,
79 | relpages_bytes,
80 | relpages_bytes_diff,
81 | last_idx_scan
82 | FROM
83 | sample_stat_indexes s
84 | JOIN indexes_list il USING (datid, indexrelid, server_id)
85 | JOIN tables_list tl USING (datid, relid, server_id);
86 | COMMENT ON VIEW v_sample_stat_indexes IS 'Reconstructed stats view with table and index names and schemas';
87 |
88 | CREATE TABLE last_stat_indexes (
89 | server_id integer,
90 | sample_id integer,
91 | datid oid,
92 | relid oid NOT NULL,
93 | indexrelid oid,
94 | schemaname name,
95 | relname name,
96 | indexrelname name,
97 | idx_scan bigint,
98 | idx_tup_read bigint,
99 | idx_tup_fetch bigint,
100 | idx_blks_read bigint,
101 | idx_blks_hit bigint,
102 | relsize bigint,
103 | relsize_diff bigint,
104 | tablespaceid oid NOT NULL,
105 | indisunique bool,
106 | in_sample boolean NOT NULL DEFAULT false,
107 | relpages_bytes bigint,
108 | relpages_bytes_diff bigint,
109 | last_idx_scan timestamp with time zone,
110 | reloptions jsonb
111 | )
112 | PARTITION BY LIST (server_id);
113 | COMMENT ON TABLE last_stat_indexes IS 'Last sample data for calculating diffs in next sample';
114 |
115 | CREATE TABLE sample_stat_indexes_total (
116 | server_id integer,
117 | sample_id integer,
118 | datid oid,
119 | tablespaceid oid,
120 | idx_scan bigint,
121 | idx_tup_read bigint,
122 | idx_tup_fetch bigint,
123 | idx_blks_read bigint,
124 | idx_blks_hit bigint,
125 | relsize_diff bigint,
126 | CONSTRAINT fk_stat_indexes_tot_dat FOREIGN KEY (server_id, sample_id, datid)
127 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE
128 | DEFERRABLE INITIALLY IMMEDIATE,
129 | CONSTRAINT fk_stat_tablespaces_tot_dat FOREIGN KEY (server_id, sample_id, tablespaceid)
130 | REFERENCES sample_stat_tablespaces(server_id, sample_id, tablespaceid) ON DELETE CASCADE
131 | DEFERRABLE INITIALLY IMMEDIATE,
132 | CONSTRAINT pk_sample_stat_indexes_tot PRIMARY KEY (server_id, sample_id, datid, tablespaceid)
133 | );
134 | CREATE INDEX ix_sample_stat_indexes_total_ts ON sample_stat_indexes_total(server_id, sample_id, tablespaceid);
135 |
136 | COMMENT ON TABLE sample_stat_indexes_total IS 'Total stats for indexes in all databases by samples';
137 |
--------------------------------------------------------------------------------
/expected/retention_and_baselines.out:
--------------------------------------------------------------------------------
1 | UPDATE profile.samples
2 | SET sample_time = now() - (5 - sample_id) * '1 day'::interval - '10 minutes'::interval
3 | WHERE sample_id <= 5;
4 | ALTER TABLE profile.test_rel_storage_params1 RESET (autovacuum_vacuum_threshold,fillfactor,autovacuum_enabled);
5 | ALTER INDEX profile.ix_test_rel_storage_params_id1 RESET (fillfactor);
6 | ALTER INDEX profile.ix_test_rel_storage_params_val1 RESET (fillfactor);
7 | SELECT server,result FROM profile.take_sample();
8 | server | result
9 | --------+--------
10 | local | OK
11 | (1 row)
12 |
13 | SELECT relname, t.last_sample_id, reloptions
14 | FROM profile.table_storage_parameters t JOIN profile.tables_list
15 | USING (server_id, relid)
16 | WHERE relname IN ('test_rel_storage_params1','test_rel_storage_params2') ORDER BY relid, last_sample_id;
17 | relname | last_sample_id | reloptions
18 | --------------------------+----------------+---------------------------------------------------------------------------------------------
19 | test_rel_storage_params1 | 8 | ["autovacuum_vacuum_threshold=50", "fillfactor=100", "autovacuum_enabled=true"]
20 | test_rel_storage_params2 | | ["vacuum_index_cleanup=true", "vacuum_truncate=true", "autovacuum_vacuum_scale_factor=0.2"]
21 | (2 rows)
22 |
23 | SELECT relname, indexrelname, i.last_sample_id, reloptions
24 | FROM profile.index_storage_parameters i JOIN profile.tables_list
25 | USING (server_id, relid)
26 | JOIN profile.indexes_list USING (server_id, relid, indexrelid)
27 | WHERE relname IN ('test_rel_storage_params1','test_rel_storage_params2') ORDER BY relid, indexrelid, last_sample_id;
28 | relname | indexrelname | last_sample_id | reloptions
29 | --------------------------+---------------------------------+----------------+--------------------
30 | test_rel_storage_params1 | ix_test_rel_storage_params_id1 | 8 | ["fillfactor=100"]
31 | test_rel_storage_params1 | ix_test_rel_storage_params_val1 | 8 | ["fillfactor=90"]
32 | test_rel_storage_params2 | ix_test_rel_storage_params_id2 | | ["fillfactor=80"]
33 | test_rel_storage_params2 | ix_test_rel_storage_params_val2 | | ["fillfactor=70"]
34 | (4 rows)
35 |
36 | BEGIN;
37 | SELECT profile.delete_samples();
38 | delete_samples
39 | ----------------
40 | 8
41 | (1 row)
42 |
43 | SELECT sample FROM profile.show_samples() ORDER BY sample;
44 | sample
45 | --------
46 | 9
47 | (1 row)
48 |
49 | ROLLBACK;
50 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
51 | count
52 | -------
53 | 4
54 | (1 row)
55 |
56 | SELECT * FROM profile.set_server_max_sample_age('local',1);
57 | set_server_max_sample_age
58 | ---------------------------
59 | 1
60 | (1 row)
61 |
62 | /* Testing baseline creation */
63 | SELECT * FROM profile.create_baseline('testline1',2,4);
64 | create_baseline
65 | -----------------
66 | 1
67 | (1 row)
68 |
69 | BEGIN;
70 | SELECT profile.delete_samples('local',tstzrange(
71 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1),
72 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5),
73 | '[]'
74 | )
75 | );
76 | delete_samples
77 | ----------------
78 | 2
79 | (1 row)
80 |
81 | SELECT sample FROM profile.show_samples() ORDER BY sample;
82 | sample
83 | --------
84 | 2
85 | 3
86 | 4
87 | 6
88 | 7
89 | 8
90 | 9
91 | (7 rows)
92 |
93 | ROLLBACK;
94 | BEGIN;
95 | SELECT profile.delete_samples(tstzrange(
96 | (SELECT sample_time FROM profile.samples WHERE sample_id = 1),
97 | (SELECT sample_time FROM profile.samples WHERE sample_id = 5),
98 | '[]'
99 | )
100 | );
101 | delete_samples
102 | ----------------
103 | 2
104 | (1 row)
105 |
106 | SELECT sample FROM profile.show_samples() ORDER BY sample;
107 | sample
108 | --------
109 | 2
110 | 3
111 | 4
112 | 6
113 | 7
114 | 8
115 | 9
116 | (7 rows)
117 |
118 | ROLLBACK;
119 | SELECT * FROM profile.create_baseline('testline2',2,4);
120 | create_baseline
121 | -----------------
122 | 2
123 | (1 row)
124 |
125 | SELECT count(*) FROM profile.baselines;
126 | count
127 | -------
128 | 2
129 | (1 row)
130 |
131 | SELECT * FROM profile.keep_baseline('testline2',-1);
132 | keep_baseline
133 | ---------------
134 | 1
135 | (1 row)
136 |
137 | /* Testing baseline show */
138 | SELECT baseline, min_sample, max_sample, keep_until_time IS NULL
139 | FROM profile.show_baselines()
140 | ORDER BY baseline;
141 | baseline | min_sample | max_sample | ?column?
142 | -----------+------------+------------+----------
143 | testline1 | 2 | 4 | t
144 | testline2 | 2 | 4 | f
145 | (2 rows)
146 |
147 | /* Testing baseline deletion */
148 | SELECT server,result FROM profile.take_sample();
149 | server | result
150 | --------+--------
151 | local | OK
152 | (1 row)
153 |
154 | SELECT count(*) FROM profile.baselines;
155 | count
156 | -------
157 | 1
158 | (1 row)
159 |
160 | /* Testing samples retention override with baseline */
161 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
162 | count
163 | -------
164 | 3
165 | (1 row)
166 |
167 | SELECT * FROM profile.drop_baseline('testline1');
168 | drop_baseline
169 | ---------------
170 | 1
171 | (1 row)
172 |
173 | /* Testing samples deletion after baseline removed */
174 | SELECT server,result FROM profile.take_sample();
175 | server | result
176 | --------+--------
177 | local | OK
178 | (1 row)
179 |
180 | SELECT count(*) FROM profile.samples WHERE sample_time < now() - '1 days'::interval;
181 | count
182 | -------
183 | 0
184 | (1 row)
185 |
186 |
--------------------------------------------------------------------------------
/expected/server_management.out:
--------------------------------------------------------------------------------
1 | /* == Testing server management functions == */
2 | SELECT profile.create_server('srvtest','dbname=postgres host=localhost port=5432', TRUE, NULL, 'Server description 1');
3 | create_server
4 | ---------------
5 | 2
6 | (1 row)
7 |
8 | SELECT server_id, server_name, server_description, db_exclude,
9 | enabled, connstr, max_sample_age, last_sample_id
10 | FROM profile.servers WHERE server_name != 'local';
11 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id
12 | -----------+-------------+----------------------+------------+---------+------------------------------------------+----------------+----------------
13 | 2 | srvtest | Server description 1 | | t | dbname=postgres host=localhost port=5432 | | 0
14 | (1 row)
15 |
16 | SELECT profile.rename_server('srvtest','srvtestrenamed');
17 | rename_server
18 | ---------------
19 | 1
20 | (1 row)
21 |
22 | SELECT profile.set_server_connstr('srvtestrenamed','dbname=postgres host=localhost port=5433');
23 | set_server_connstr
24 | --------------------
25 | 1
26 | (1 row)
27 |
28 | SELECT profile.set_server_description('srvtestrenamed','Server description 2');
29 | set_server_description
30 | ------------------------
31 | 1
32 | (1 row)
33 |
34 | SELECT profile.set_server_db_exclude('srvtestrenamed',ARRAY['db1','db2','db3']);
35 | set_server_db_exclude
36 | -----------------------
37 | 1
38 | (1 row)
39 |
40 | SELECT profile.set_server_max_sample_age('srvtestrenamed',3);
41 | set_server_max_sample_age
42 | ---------------------------
43 | 1
44 | (1 row)
45 |
46 | -- settings validation test
47 | SELECT profile.set_server_setting('srvtestrenamed','name_failure','test');
48 | ERROR: Unsupported setting
49 | CONTEXT: PL/pgSQL function set_server_setting(name,text,text) line 53 at RAISE
50 | SELECT profile.set_server_size_sampling( server => 'srvtestrenamed', collect_mode => 'on' );
51 | set_server_size_sampling
52 | --------------------------
53 | 1
54 | (1 row)
55 |
56 | SELECT profile.set_server_setting('srvtestrenamed','collect_vacuum_stats','value_failure');
57 | ERROR: Value for collection conditions should be boolean: invalid input syntax for type boolean: "value_failure"
58 | DETAIL:
59 | CONTEXT: PL/pgSQL function set_server_setting(name,text,text) line 26 at RAISE
60 | SELECT profile.set_server_setting('srvtestrenamed','collect_vacuum_stats','on');
61 | set_server_setting
62 | --------------------
63 | 1
64 | (1 row)
65 |
66 | SELECT srv_settings::text FROM profile.servers ORDER BY server_id;
67 | srv_settings
68 | -------------------------------------------------------------------------
69 |
70 | {"collect": {"vacuum_stats": true}, "relsizes": {"collect_mode": "on"}}
71 | (2 rows)
72 |
73 | SELECT * FROM profile.show_server_settings('srvtestrenamed');
74 | scope | setting | value
75 | ----------+--------------+-------
76 | collect | vacuum_stats | true
77 | relsizes | collect_mode | "on"
78 | (2 rows)
79 |
80 | SELECT profile.set_server_setting('srvtestrenamed','collect_vacuum_stats');
81 | set_server_setting
82 | --------------------
83 | 1
84 | (1 row)
85 |
86 | SELECT * FROM profile.show_server_settings('srvtestrenamed');
87 | scope | setting | value
88 | -------+---------+-------
89 | (0 rows)
90 |
91 | SELECT server_id, server_name, server_description, db_exclude,
92 | enabled, connstr, max_sample_age, last_sample_id
93 | FROM profile.servers WHERE server_name != 'local';
94 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id
95 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+----------------
96 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | t | dbname=postgres host=localhost port=5433 | 3 | 0
97 | (1 row)
98 |
99 | SELECT profile.disable_server('srvtestrenamed');
100 | disable_server
101 | ----------------
102 | 1
103 | (1 row)
104 |
105 | SELECT server_id, server_name, server_description, db_exclude,
106 | enabled, connstr, max_sample_age, last_sample_id
107 | FROM profile.servers WHERE server_name != 'local';
108 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id
109 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+----------------
110 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | f | dbname=postgres host=localhost port=5433 | 3 | 0
111 | (1 row)
112 |
113 | SELECT profile.enable_server('srvtestrenamed');
114 | enable_server
115 | ---------------
116 | 1
117 | (1 row)
118 |
119 | SELECT server_id, server_name, server_description, db_exclude,
120 | enabled, connstr, max_sample_age, last_sample_id
121 | FROM profile.servers WHERE server_name != 'local';
122 | server_id | server_name | server_description | db_exclude | enabled | connstr | max_sample_age | last_sample_id
123 | -----------+----------------+----------------------+---------------+---------+------------------------------------------+----------------+----------------
124 | 2 | srvtestrenamed | Server description 2 | {db1,db2,db3} | t | dbname=postgres host=localhost port=5433 | 3 | 0
125 | (1 row)
126 |
127 | SELECT * FROM profile.show_servers() where server_name != 'local';
128 | server_name | connstr | enabled | max_sample_age | description
129 | ----------------+------------------------------------------+---------+----------------+----------------------
130 | srvtestrenamed | dbname=postgres host=localhost port=5433 | t | 3 | Server description 2
131 | (1 row)
132 |
133 | SELECT * FROM profile.drop_server('srvtestrenamed');
134 | drop_server
135 | -------------
136 | 1
137 | (1 row)
138 |
139 |
--------------------------------------------------------------------------------
/report/functions/stat_slru.sql:
--------------------------------------------------------------------------------
1 | CREATE FUNCTION cluster_stat_slru(IN sserver_id integer,
2 | IN start_id integer, IN end_id integer)
3 | RETURNS TABLE(
4 | server_id integer,
5 | name text,
6 | blks_zeroed bigint,
7 | blks_hit bigint,
8 | blks_read bigint,
9 | blks_written bigint,
10 | blks_exists bigint,
11 | flushes bigint,
12 | truncates bigint
13 | )
14 | SET search_path=@extschema@ AS $$
15 | SELECT
16 | st.server_id AS server_id,
17 | st.name AS name,
18 | SUM(blks_zeroed)::bigint AS blks_zeroed,
19 | SUM(blks_hit)::bigint AS blks_hit,
20 | SUM(blks_read)::bigint AS blks_read,
21 | SUM(blks_written)::bigint AS blks_written,
22 | SUM(blks_exists)::bigint AS blks_exists,
23 | SUM(flushes)::bigint AS flushes,
24 | SUM(truncates)::bigint AS truncates
25 | FROM sample_stat_slru st
26 | WHERE st.server_id = sserver_id AND st.sample_id BETWEEN start_id + 1 AND end_id
27 | GROUP BY st.server_id, st.name
28 | $$ LANGUAGE sql;
29 |
30 | CREATE FUNCTION cluster_stat_slru_format(IN sserver_id integer,
31 | IN start_id integer, IN end_id integer)
32 | RETURNS TABLE(
33 | name text,
34 |
35 | blks_zeroed bigint,
36 | blks_hit bigint,
37 | blks_read bigint,
38 | hit_pct numeric,
39 | blks_written bigint,
40 | blks_exists bigint,
41 | flushes bigint,
42 | truncates bigint
43 | ) SET search_path=@extschema@ AS $$
44 | SELECT
45 | COALESCE(name, 'Total') AS name,
46 |
47 | NULLIF(SUM(blks_zeroed), 0)::bigint AS blks_zeroed,
48 | NULLIF(SUM(blks_hit), 0)::bigint AS blks_hit,
49 | NULLIF(SUM(blks_read), 0)::bigint AS blks_read,
50 | ROUND(NULLIF(SUM(blks_hit), 0)::numeric * 100 /
51 | NULLIF(COALESCE(SUM(blks_hit), 0) + COALESCE(SUM(blks_read), 0), 0), 2)
52 | AS hit_pct,
53 | NULLIF(SUM(blks_written), 0)::bigint AS blks_written,
54 | NULLIF(SUM(blks_exists), 0)::bigint AS blks_exists,
55 | NULLIF(SUM(flushes), 0)::bigint AS flushes,
56 | NULLIF(SUM(truncates), 0)::bigint AS truncates
57 |
58 | FROM cluster_stat_slru(sserver_id, start_id, end_id)
59 | GROUP BY ROLLUP(name)
60 | ORDER BY NULLIF(name, 'Total') ASC NULLS LAST
61 | $$ LANGUAGE sql;
62 |
63 | CREATE FUNCTION cluster_stat_slru_format(IN sserver_id integer,
64 | IN start1_id integer, IN end1_id integer,
65 | IN start2_id integer, IN end2_id integer)
66 | RETURNS TABLE(
67 | name text,
68 |
69 | blks_zeroed1 bigint,
70 | blks_hit1 bigint,
71 | blks_read1 bigint,
72 | hit_pct1 numeric,
73 | blks_written1 bigint,
74 | blks_exists1 bigint,
75 | flushes1 bigint,
76 | truncates1 bigint,
77 |
78 | blks_zeroed2 bigint,
79 | blks_hit2 bigint,
80 | blks_read2 bigint,
81 | hit_pct2 numeric,
82 | blks_written2 bigint,
83 | blks_exists2 bigint,
84 | flushes2 bigint,
85 | truncates2 bigint
86 | ) SET search_path=@extschema@ AS $$
87 | SELECT
88 | COALESCE(name, 'Total') AS name,
89 |
90 | NULLIF(SUM(st1.blks_zeroed), 0)::bigint AS blks_zeroed1,
91 | NULLIF(SUM(st1.blks_hit), 0)::bigint AS blks_hit1,
92 | NULLIF(SUM(st1.blks_read), 0)::bigint AS blks_read1,
93 | ROUND(NULLIF(SUM(st1.blks_hit), 0)::numeric * 100 /
94 | NULLIF(COALESCE(SUM(st1.blks_hit), 0) + COALESCE(SUM(st1.blks_read), 0), 0), 2)
95 | AS hit_pct1,
96 | NULLIF(SUM(st1.blks_written), 0)::bigint AS blks_written1,
97 | NULLIF(SUM(st1.blks_exists), 0)::bigint AS blks_exists1,
98 | NULLIF(SUM(st1.flushes), 0)::bigint AS flushes1,
99 | NULLIF(SUM(st1.truncates), 0)::bigint AS truncates1,
100 |
101 | NULLIF(SUM(st2.blks_zeroed), 0)::bigint AS blks_zeroed2,
102 | NULLIF(SUM(st2.blks_hit), 0)::bigint AS blks_hit2,
103 | NULLIF(SUM(st2.blks_read), 0)::bigint AS blks_read2,
104 | ROUND(NULLIF(SUM(st2.blks_hit), 0)::numeric * 100 /
105 | NULLIF(COALESCE(SUM(st2.blks_hit), 0) + COALESCE(SUM(st2.blks_read), 0), 0), 2)
106 | AS hit_pct2,
107 | NULLIF(SUM(st2.blks_written), 0)::bigint AS blks_written2,
108 | NULLIF(SUM(st2.blks_exists), 0)::bigint AS blks_exists2,
109 | NULLIF(SUM(st2.flushes), 0)::bigint AS flushes2,
110 | NULLIF(SUM(st2.truncates), 0)::bigint AS truncates2
111 |
112 | FROM cluster_stat_slru(sserver_id, start1_id, end1_id) st1
113 | FULL OUTER JOIN cluster_stat_slru(sserver_id, start2_id, end2_id) st2
114 | USING (server_id, name)
115 | GROUP BY ROLLUP(name)
116 | ORDER BY NULLIF(name, 'Total') ASC NULLS LAST
117 | $$ LANGUAGE sql;
118 |
119 | CREATE FUNCTION cluster_stat_slru_resets(IN sserver_id integer,
120 | IN start_id integer, IN end_id integer)
121 | RETURNS TABLE(
122 | server_id integer,
123 | sample_id integer,
124 | name text,
125 | stats_reset timestamp with time zone
126 | )
127 | SET search_path=@extschema@ AS $$
128 | SELECT
129 | server_id,
130 | min(sample_id) AS sample_id,
131 | name,
132 | stats_reset
133 | FROM (
134 | SELECT
135 | server_id,
136 | name,
137 | sample_id,
138 | stats_reset,
139 | stats_reset IS DISTINCT FROM first_value(stats_reset) OVER (PARTITION BY server_id, name ORDER BY sample_id) AS stats_reset_changed
140 | FROM sample_stat_slru
141 | WHERE server_id = sserver_id AND sample_id BETWEEN start_id AND end_id) st
142 | WHERE st.stats_reset_changed
143 | GROUP BY server_id, name, stats_reset;
144 | $$ LANGUAGE sql;
145 |
146 | CREATE FUNCTION cluster_stat_slru_reset_format(IN sserver_id integer,
147 | IN start_id integer, IN end_id integer)
148 | RETURNS TABLE(
149 | sample_id integer,
150 | name text,
151 | stats_reset timestamp with time zone
152 | ) SET search_path=@extschema@ AS $$
153 | SELECT
154 | sample_id,
155 | name,
156 | stats_reset
157 | FROM cluster_stat_slru_resets(sserver_id, start_id, end_id)
158 | ORDER BY sample_id ASC
159 | $$ LANGUAGE sql;
160 |
161 | CREATE FUNCTION cluster_stat_slru_reset_format(IN sserver_id integer,
162 | IN start1_id integer, IN end1_id integer, IN start2_id integer, IN end2_id integer)
163 | RETURNS TABLE(
164 | sample_id integer,
165 | name text,
166 | stats_reset timestamp with time zone
167 | ) SET search_path=@extschema@ AS $$
168 | SELECT
169 | sample_id,
170 | name,
171 | stats_reset
172 | FROM (
173 | SELECT
174 | sample_id,
175 | name,
176 | stats_reset
177 | FROM cluster_stat_slru_resets(sserver_id, start1_id, end1_id)
178 | UNION
179 | SELECT
180 | sample_id,
181 | name,
182 | stats_reset
183 | FROM cluster_stat_slru_resets(sserver_id, start2_id, end2_id)
184 | ) st
185 | ORDER BY sample_id ASC
186 | $$ LANGUAGE sql;
187 |
--------------------------------------------------------------------------------
/report/report.sql:
--------------------------------------------------------------------------------
1 | /* ===== Main report function ===== */
2 |
3 | CREATE FUNCTION get_report(IN sserver_id integer, IN start_id integer, IN end_id integer,
4 | IN description text = NULL, IN with_growth boolean = false,
5 | IN db_exclude name[] = NULL) RETURNS text SET search_path=@extschema@ AS $$
6 | DECLARE
7 | report text;
8 | report_data jsonb;
9 | report_context jsonb;
10 | BEGIN
11 | -- Interval expanding in case of growth stats requested
12 | IF with_growth THEN
13 | BEGIN
14 | SELECT left_bound, right_bound INTO STRICT start_id, end_id
15 | FROM get_sized_bounds(sserver_id, start_id, end_id);
16 | EXCEPTION
17 | WHEN OTHERS THEN
18 | RAISE 'Samples with sizes collected for requested interval (%) not found',
19 | format('%s - %s',start_id, end_id);
20 | END;
21 | END IF;
22 |
23 | -- Getting report context and check conditions
24 | report_context := get_report_context(sserver_id, start_id, end_id, description);
25 |
26 | -- Prepare report template
27 | report := get_report_template(report_context, 1);
28 | -- Populate template with report data
29 | report_data := sections_jsonb(report_context, sserver_id, 1);
30 | report_data := jsonb_set(report_data, '{datasets}',
31 | get_report_datasets(report_context, sserver_id, db_exclude));
32 | report := replace(report, '{dynamic:data1}', report_data::text);
33 |
34 | RETURN report;
35 | END;
36 | $$ LANGUAGE plpgsql;
37 |
38 | COMMENT ON FUNCTION get_report(IN sserver_id integer, IN start_id integer, IN end_id integer,
39 | IN description text, IN with_growth boolean, IN db_exclude name[])
40 | IS 'Statistics report generation function. Takes server_id and IDs of start and end sample (inclusive).';
41 |
42 | CREATE FUNCTION get_report(IN server name, IN start_id integer, IN end_id integer,
43 | IN description text = NULL, IN with_growth boolean = false,
44 | IN db_exclude name[] = NULL)
45 | RETURNS text SET search_path=@extschema@ AS $$
46 | SELECT get_report(get_server_by_name(server), start_id, end_id,
47 | description, with_growth, db_exclude);
48 | $$ LANGUAGE sql;
49 | COMMENT ON FUNCTION get_report(IN server name, IN start_id integer, IN end_id integer,
50 | IN description text, IN with_growth boolean, IN db_exclude name[])
51 | IS 'Statistics report generation function. Takes server name and IDs of start and end sample (inclusive).';
52 |
53 | CREATE FUNCTION get_report(IN start_id integer, IN end_id integer,
54 | IN description text = NULL, IN with_growth boolean = false,
55 | IN db_exclude name[] = NULL)
56 | RETURNS text SET search_path=@extschema@ AS $$
57 | SELECT get_report('local',start_id,end_id,description,with_growth,db_exclude);
58 | $$ LANGUAGE sql;
59 | COMMENT ON FUNCTION get_report(IN start_id integer, IN end_id integer,
60 | IN description text, IN with_growth boolean, IN db_exclude name[])
61 | IS 'Statistics report generation function for local server. Takes IDs of start and end sample (inclusive).';
62 |
63 | CREATE FUNCTION get_report(IN sserver_id integer, IN time_range tstzrange,
64 | IN description text = NULL, IN with_growth boolean = false,
65 | IN db_exclude name[] = NULL)
66 | RETURNS text SET search_path=@extschema@ AS $$
67 | SELECT get_report(sserver_id, start_id, end_id, description, with_growth, db_exclude)
68 | FROM get_sampleids_by_timerange(sserver_id, time_range)
69 | $$ LANGUAGE sql;
70 | COMMENT ON FUNCTION get_report(IN sserver_id integer, IN time_range tstzrange,
71 | IN description text, IN with_growth boolean, IN db_exclude name[])
72 | IS 'Statistics report generation function. Takes server ID and time interval.';
73 |
74 | CREATE FUNCTION get_report(IN server name, IN time_range tstzrange,
75 | IN description text = NULL, IN with_growth boolean = false,
76 | IN db_exclude name[] = NULL)
77 | RETURNS text SET search_path=@extschema@ AS $$
78 | SELECT get_report(get_server_by_name(server), start_id, end_id, description, with_growth, db_exclude)
79 | FROM get_sampleids_by_timerange(get_server_by_name(server), time_range)
80 | $$ LANGUAGE sql;
81 | COMMENT ON FUNCTION get_report(IN server name, IN time_range tstzrange,
82 | IN description text, IN with_growth boolean, IN db_exclude name[])
83 | IS 'Statistics report generation function. Takes server name and time interval.';
84 |
85 | CREATE FUNCTION get_report(IN time_range tstzrange, IN description text = NULL,
86 | IN with_growth boolean = false,
87 | IN db_exclude name[] = NULL)
88 | RETURNS text SET search_path=@extschema@ AS $$
89 | SELECT get_report(get_server_by_name('local'), start_id, end_id, description, with_growth, db_exclude)
90 | FROM get_sampleids_by_timerange(get_server_by_name('local'), time_range)
91 | $$ LANGUAGE sql;
92 | COMMENT ON FUNCTION get_report(IN time_range tstzrange,
93 | IN description text, IN with_growth boolean, IN db_exclude name[])
94 | IS 'Statistics report generation function for local server. Takes time interval.';
95 |
96 | CREATE FUNCTION get_report(IN server name, IN baseline varchar(25),
97 | IN description text = NULL, IN with_growth boolean = false,
98 | IN db_exclude name[] = NULL)
99 | RETURNS text SET search_path=@extschema@ AS $$
100 | SELECT get_report(get_server_by_name(server), start_id, end_id, description, with_growth, db_exclude)
101 | FROM get_baseline_samples(get_server_by_name(server), baseline)
102 | $$ LANGUAGE sql;
103 | COMMENT ON FUNCTION get_report(IN server name, IN baseline varchar(25),
104 | IN description text, IN with_growth boolean, IN db_exclude name[])
105 | IS 'Statistics report generation function for server baseline. Takes server name and baseline name.';
106 |
107 | CREATE FUNCTION get_report(IN baseline varchar(25), IN description text = NULL,
108 | IN with_growth boolean = false,
109 | IN db_exclude name[] = NULL)
110 | RETURNS text SET search_path=@extschema@ AS $$
111 | BEGIN
112 | RETURN get_report('local',baseline,description,with_growth,db_exclude);
113 | END;
114 | $$ LANGUAGE plpgsql;
115 | COMMENT ON FUNCTION get_report(IN baseline varchar(25),
116 | IN description text, IN with_growth boolean, IN db_exclude name[])
117 | IS 'Statistics report generation function for local server baseline. Takes baseline name.';
118 |
119 | CREATE FUNCTION get_report_latest(IN server name = NULL,
120 | IN db_exclude name[] = NULL)
121 | RETURNS text SET search_path=@extschema@ AS $$
122 | SELECT get_report(srv.server_id, s.sample_id, e.sample_id, NULL, false, db_exclude)
123 | FROM samples s JOIN samples e ON (s.server_id = e.server_id AND s.sample_id = e.sample_id - 1)
124 | JOIN servers srv ON (e.server_id = srv.server_id AND e.sample_id = srv.last_sample_id)
125 | WHERE srv.server_name = COALESCE(server, 'local')
126 | $$ LANGUAGE sql;
127 | COMMENT ON FUNCTION get_report_latest(IN server name, IN db_exclude name[]) IS 'Statistics report generation function for last two samples';
128 |
--------------------------------------------------------------------------------
/data/static/js/utilities.js:
--------------------------------------------------------------------------------
1 | class Utilities {
2 | /**
3 | * Sorting JSON array and returning sorted clone with array of Objects
4 | * @param data JSON array
5 | * @param key string with key for sorting
6 | * @param direction direction of sorting (1 means ASC, -1 means DESC)
7 | * @returns array of Objects
8 | */
9 | static sort(data, key, direction) {
10 | return structuredClone(data.sort((a, b) => {
11 | /** Order index */
12 | if (a[key] < b[key]) {
13 | return -1 * direction;
14 | } else if (a[key] > b[key]) {
15 | return direction;
16 | } else {
17 | return 0;
18 | }
19 | }))
20 | }
21 |
22 | static sum(data, key) {
23 | return data.reduce((partialSum, a) => partialSum + a[key], 0);
24 | }
25 |
26 | /** Advanced filter */
27 | static filter(data, key) {
28 | if (key.type === "exists") {
29 | if (data.every(obj => key["field"] in obj)) {
30 | return structuredClone(data.filter(obj => obj[key["field"]]));
31 | }
32 | } else if (key.type === "equal") {
33 | if (data.every(obj => key["field"] in obj)) {
34 | return structuredClone(data.filter(obj => obj[key["field"]] === key["value"]));
35 | }
36 | }
37 | return data;
38 | }
39 |
40 | static find(data, key, value) {
41 | return structuredClone(data.filter(obj => obj[key] === value));
42 | }
43 |
44 | /** Limit array of Objects */
45 | static limit(data, num) {
46 | if (num > 0) {
47 | return structuredClone(data.slice(0, num));
48 | }
49 | return data;
50 | }
51 |
52 | static getInputField() {
53 | return document.getElementById('inputField');
54 | }
55 |
56 | static cancelSearchResults(rowsForSearch) {
57 | rowsForSearch.forEach(row => {
58 | row.style.display = '';
59 | })
60 | }
61 |
62 | static searchQueryWithStatistics(rowsForSearch, keyword) {
63 | let foundQueries = Utilities.searchQueryText(keyword);
64 |
65 | rowsForSearch.forEach(row => {
66 | if (row.dataset["hexqueryid"]
67 | && foundQueries[row.dataset["hexqueryid"]]) {
68 | row.style.display = '';
69 | } else {
70 | row.style.display = 'none';
71 | if (row.nextSibling && row.nextSibling.classList.contains('queryRow')) {
72 | row.nextSibling.style.display = 'none';
73 | }
74 | }
75 | })
76 | }
77 |
78 | static preprocessQueryString(queryString, limit) {
79 | let etc = '';
80 | queryString = queryString.split(',').join(', ');
81 | queryString = queryString.split('+').join(' + ');
82 | queryString = queryString.split('/').join(' / ');
83 |
84 | if (limit) {
85 | if (queryString.length > limit) {
86 | queryString = queryString.substring(0, limit);
87 | etc = ' ...'
88 | }
89 | }
90 |
91 | return `${queryString}${etc}`
92 | }
93 |
94 | static searchQueryText(keyword) {
95 | let foundQueries = {};
96 | data.datasets.queries.forEach(query => {
97 | /** Search in query texts */
98 | Object.keys(query).forEach(key => {
99 | query.query_texts.forEach(query_text => {
100 | if (query_text && query_text.toLowerCase().includes(keyword) && !foundQueries[query["hexqueryid"]]) {
101 | foundQueries[query["hexqueryid"]] = true;
102 | }
103 | })
104 | })
105 | /** Search in plan texts */
106 | if (query.plans) {
107 | query.plans.forEach(plan => {
108 | if (plan.plan_text.toLowerCase().includes(keyword) && !foundQueries[query["hexqueryid"]]) {
109 | foundQueries[query["hexqueryid"]] = true;
110 | }
111 | })
112 | }
113 | })
114 | return foundQueries;
115 | }
116 |
117 | static searchWithParam(rowsForSearch, keyword, searchParam) {
118 | let foundQueries = {};
119 | /** if we search everywhere, then first we need to
120 | * find the keyword in the query texts, and then
121 | * we display all the lines related to this query
122 | */
123 | if (searchParam === 'all') {
124 | foundQueries = Utilities.searchQueryText(keyword)
125 | }
126 |
127 | rowsForSearch.forEach(row => {
128 | /** If dataset[searchParam] exists and has substring with keyword */
129 | if (row.dataset[searchParam] && row.dataset[searchParam].toLowerCase().includes(keyword)) {
130 | row.style.display = '';
131 | /** If dataset[searchParam] has hexqueryid, then put it into foundQueries collection */
132 | if (row.dataset["hexqueryid"]) {
133 | foundQueries[row.dataset["hexqueryid"]] = true;
134 | }
135 | } else {
136 | row.style.display = 'none';
137 | if (row.nextSibling && row.nextSibling.classList.contains('queryRow')) {
138 | row.nextSibling.style.display = 'none';
139 | }
140 | }
141 | })
142 |
143 | rowsForSearch.forEach(row => {
144 | /** If a row from a table with query texts or a search parameter data-all */
145 | if (row.parentNode.id === 'sqllist_t' || searchParam === 'all') {
146 | /** Check foundQueries, if such index exists, then */
147 | if (foundQueries[row.dataset["hexqueryid"]]) {
148 | row.style.display = '';
149 | }
150 | } else {
151 | /** Otherwise, we check for a match between data-hexqueryid and the presence of a key phrase in dataset[searchParam]*/
152 | if (foundQueries[row.dataset["hexqueryid"]] && row.dataset[searchParam].toLowerCase().includes(keyword)) {
153 | row.style.display = '';
154 | }
155 | }
156 | })
157 | }
158 |
159 | static search(rowsForSearch, searchParam, keyword) {
160 | keyword = keyword.toLowerCase();
161 |
162 | if (!keyword) {
163 | Utilities.cancelSearchResults(rowsForSearch);
164 | } else if (searchParam === 'querytext') {
165 | Utilities.searchQueryWithStatistics(rowsForSearch, keyword);
166 | } else {
167 | Utilities.searchWithParam(rowsForSearch, keyword, searchParam);
168 | }
169 | }
170 |
171 | static getValue(el) {
172 | console.log(el.value)
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/data/static/js/preview.js:
--------------------------------------------------------------------------------
1 | /**
2 | * The class is designed to instantly preview the query text referenced by the selected row
3 | */
4 | class Previewer {
5 | static getParentRows() {
6 | return document.querySelectorAll("table.preview tr:not(.header)");
7 | }
8 |
9 | static queryTextPreviewer(queryCell, queryRow, newRow, queryString) {
10 | queryCell.style.width = `${Math.floor(newRow.offsetWidth * 0.95)}px`;
11 | queryCell.style.fontFamily = 'Monospace';
12 | queryRow.style.display = '';
13 |
14 | /** Query text preview */
15 | if (!queryCell.hasChildNodes()) {
16 | let preprocessedText = Utilities.preprocessQueryString(queryString);
17 | queryCell.insertAdjacentHTML('afterbegin', `${preprocessedText}
`);
18 | }
19 | }
20 |
21 | static storageParamsPreviewer(previewCell, previewRow, newRow, previewData) {
22 | previewCell.style.width = `${Math.floor(newRow.offsetWidth * 0.95)}px`;
23 | previewCell.style.fontFamily = 'Monospace';
24 | previewRow.style.display = '';
25 |
26 | /** Query text preview */
27 | if (!previewCell.hasChildNodes()) {
28 | let topn = data.properties.topn || 20;
29 | previewData.slice(-topn).reverse().forEach(item => {
30 | let preprocessedText = Utilities.preprocessQueryString(`${item['first_seen']}: ${item['reloptions']}`);
31 | previewCell.insertAdjacentHTML('afterbegin', `${preprocessedText}
`);
32 | })
33 | }
34 | }
35 |
36 | static findQuery(queryRaw) {
37 | // datasetName, dataID, parentRow.dataset[dataID]
38 | let datasetName = queryRaw.dataset["dataset_name"];
39 | let dataID = queryRaw.dataset["dataset_col_id"];
40 | let querySet = data.datasets[datasetName];
41 | let queryId = queryRaw.dataset["dataset_id"]
42 |
43 | for (let i = 0; i < querySet.length; i++) {
44 | if (querySet[i][dataID] === queryId) {
45 | return i
46 | }
47 | }
48 | return -1
49 | }
50 |
51 | static drawCopyButton() {
52 | let button = document.createElement('a');
53 | button.setAttribute('class', 'copyButton');
54 | button.setAttribute('title', 'Copy to clipboard');
55 |
56 | let svg = `
57 |
60 | `
61 |
62 | button.insertAdjacentHTML('afterbegin', svg);
63 |
64 | return button;
65 | }
66 |
67 | static init() {
68 | const PARENT_ROWS = Previewer.getParentRows();
69 |
70 | PARENT_ROWS.forEach(parentRow => {
71 |
72 | /** Determine row and cell with query text */
73 | let previewCell = document.createElement("td");
74 | previewCell.setAttribute("colspan", "100");
75 | let previewRow = document.createElement("tr");
76 | previewRow.classList.add("previewRow");
77 |
78 | let preview = JSON.parse(parentRow.closest('table').dataset["preview"])[0]
79 | let sourceDatasetName = preview.dataset;
80 | let sourceDatasetKey = preview.id;
81 |
82 | previewRow.setAttribute("data-dataset_name", sourceDatasetName);
83 | previewRow.setAttribute("data-dataset_col_id", sourceDatasetKey);
84 | previewRow.setAttribute("data-dataset_id", parentRow.dataset[sourceDatasetKey]);
85 | previewRow.style.display = "none";
86 | previewRow.appendChild(previewCell);
87 |
88 | if (!parentRow.classList.contains("int1")) {
89 | parentRow.insertAdjacentElement("afterend", previewRow);
90 | }
91 |
92 | parentRow.addEventListener("click", event => {
93 | if (parentRow.classList.contains('int1')) {
94 | previewRow = parentRow.nextSibling.nextSibling;
95 | previewCell = previewRow.firstChild;
96 | }
97 |
98 | /** Trigger event only if user clicked not on rect and link*/
99 | if (
100 | event.target.tagName.toLowerCase() !== 'a' &&
101 | event.target.tagName.toLowerCase() !== 'rect' &&
102 | event.target.tagName.toLowerCase() !== 'svg' &&
103 | event.target.tagName.toLowerCase() !== 'path'
104 | ) {
105 | if (previewRow.style.display === 'none') {
106 |
107 | /** Preview SQL query text */
108 | if (sourceDatasetName === "queries" || sourceDatasetName === "act_queries") {
109 | let queryIndex = Previewer.findQuery(previewRow);
110 | if (queryIndex >= 0) {
111 |
112 | let queryText = data.datasets[sourceDatasetName][queryIndex].query_texts[0];
113 | Previewer.queryTextPreviewer(previewCell, previewRow, parentRow, queryText);
114 |
115 | /** Copy query text into clipboard button */
116 | if (!previewCell.querySelector('.copyQueryTextButton')) {
117 | let copyQueryTextButton = Previewer.drawCopyButton();
118 | copyQueryTextButton.setAttribute("class", "copyQueryTextButton");
119 | previewCell.appendChild(copyQueryTextButton);
120 |
121 | copyQueryTextButton.addEventListener("click", event => {
122 | navigator.clipboard.writeText(queryText).then(r => console.log(queryText));
123 | });
124 | }
125 | }
126 | }
127 |
128 | /** Preview Table storage parameters */
129 | if (sourceDatasetName === "table_storage_parameters" || sourceDatasetName === "index_storage_parameters") {
130 | let sourceDataset = data.datasets[sourceDatasetName];
131 | let targetDatasetValue = parentRow.dataset[sourceDatasetKey];
132 |
133 | let previewData = Utilities.find(sourceDataset, sourceDatasetKey, targetDatasetValue);
134 |
135 | if (previewData.length) {
136 | let previewDataJSON = JSON.stringify(previewData);
137 | Previewer.storageParamsPreviewer(previewCell, previewRow, parentRow, previewData);
138 | }
139 | }
140 | } else {
141 | previewRow.style.display = 'none';
142 | }
143 | }
144 | })
145 | })
146 | }
147 | }
--------------------------------------------------------------------------------
/report/functions/functionstat.sql:
--------------------------------------------------------------------------------
1 | /* ===== Function stats functions ===== */
2 | CREATE FUNCTION profile_checkavail_functions(IN sserver_id integer, IN start_id integer, IN end_id integer)
3 | RETURNS BOOLEAN
4 | SET search_path=@extschema@ AS $$
5 | -- Check if we have function calls collected for report interval
6 | SELECT COALESCE(sum(calls), 0) > 0
7 | FROM sample_stat_user_func_total sn
8 | WHERE sn.server_id = sserver_id AND sn.sample_id BETWEEN start_id + 1 AND end_id
9 | $$ LANGUAGE sql;
10 |
11 | CREATE FUNCTION profile_checkavail_trg_functions(IN sserver_id integer, IN start_id integer, IN end_id integer)
12 | RETURNS BOOLEAN
13 | SET search_path=@extschema@ AS $$
14 | -- Check if we have trigger function calls collected for report interval
15 | SELECT COALESCE(sum(calls), 0) > 0
16 | FROM sample_stat_user_func_total sn
17 | WHERE sn.server_id = sserver_id AND sn.sample_id BETWEEN start_id + 1 AND end_id
18 | AND sn.trg_fn
19 | $$ LANGUAGE sql;
20 | /* ===== Function stats functions ===== */
21 |
22 | CREATE FUNCTION top_functions(IN sserver_id integer, IN start_id integer, IN end_id integer)
23 | RETURNS TABLE(
24 | datid oid,
25 | funcid oid,
26 | dbname name,
27 | schemaname name,
28 | funcname name,
29 | funcargs text,
30 | trg_fn boolean,
31 | calls bigint,
32 | total_time double precision,
33 | self_time double precision,
34 | m_time double precision,
35 | m_stime double precision
36 | )
37 | SET search_path=@extschema@ AS $$
38 | SELECT
39 | st.datid,
40 | st.funcid,
41 | sample_db.datname AS dbname,
42 | st.schemaname,
43 | st.funcname,
44 | st.funcargs,
45 | st.trg_fn,
46 | sum(st.calls)::bigint AS calls,
47 | sum(st.total_time)/1000 AS total_time,
48 | sum(st.self_time)/1000 AS self_time,
49 | sum(st.total_time)/NULLIF(sum(st.calls),0)/1000 AS m_time,
50 | sum(st.self_time)/NULLIF(sum(st.calls),0)/1000 AS m_stime
51 | FROM v_sample_stat_user_functions st
52 | -- Database name
53 | JOIN sample_stat_database sample_db
54 | USING (server_id, sample_id, datid)
55 | WHERE
56 | st.server_id = sserver_id
57 | AND NOT sample_db.datistemplate
58 | AND st.sample_id BETWEEN start_id + 1 AND end_id
59 | GROUP BY
60 | st.datid,
61 | st.funcid,
62 | sample_db.datname,
63 | st.schemaname,
64 | st.funcname,
65 | st.funcargs,
66 | st.trg_fn
67 | $$ LANGUAGE sql;
68 |
69 | CREATE FUNCTION top_functions_format(IN sserver_id integer, IN start_id integer, IN end_id integer)
70 | RETURNS TABLE(
71 | datid oid,
72 | funcid oid,
73 | dbname name,
74 | schemaname name,
75 | funcname name,
76 | funcargs text,
77 | calls bigint,
78 | total_time numeric,
79 | self_time numeric,
80 | m_time numeric,
81 | m_stime numeric,
82 |
83 | ord_time integer,
84 | ord_calls integer,
85 | ord_trgtime integer
86 | )
87 | SET search_path=@extschema@ AS $$
88 | SELECT
89 | datid,
90 | funcid,
91 | dbname,
92 | schemaname,
93 | funcname,
94 | funcargs,
95 | NULLIF(calls, 0) AS calls,
96 | round(CAST(NULLIF(total_time, 0.0) AS numeric), 2) AS total_time,
97 | round(CAST(NULLIF(self_time, 0.0) AS numeric), 2) AS self_time,
98 | round(CAST(NULLIF(m_time, 0.0) AS numeric), 2) AS m_time,
99 | round(CAST(NULLIF(m_stime, 0.0) AS numeric), 2) AS m_stime,
100 |
101 | CASE WHEN
102 | total_time > 0 AND NOT trg_fn
103 | THEN
104 | row_number() OVER (ORDER BY
105 | total_time
106 | DESC NULLS LAST,
107 | datid, funcid)::integer
108 | ELSE NULL END AS ord_time,
109 |
110 | CASE WHEN
111 | calls > 0 AND NOT trg_fn
112 | THEN
113 | row_number() OVER (ORDER BY
114 | calls
115 | DESC NULLS LAST,
116 | datid, funcid)::integer
117 | ELSE NULL END AS ord_calls,
118 |
119 | CASE WHEN
120 | total_time > 0 AND trg_fn
121 | THEN
122 | row_number() OVER (ORDER BY
123 | total_time
124 | DESC NULLS LAST,
125 | datid, funcid)::integer
126 | ELSE NULL END AS ord_trgtime
127 | FROM
128 | top_functions(sserver_id, start_id, end_id)
129 | $$ LANGUAGE sql;
130 |
131 | CREATE FUNCTION top_functions_format_diff(IN sserver_id integer,
132 | IN start1_id integer, IN end1_id integer,
133 | IN start2_id integer, IN end2_id integer)
134 | RETURNS TABLE(
135 | datid oid,
136 | funcid oid,
137 | dbname name,
138 | schemaname name,
139 | funcname name,
140 | funcargs text,
141 |
142 | calls1 bigint,
143 | total_time1 numeric,
144 | self_time1 numeric,
145 | m_time1 numeric,
146 | m_stime1 numeric,
147 |
148 | calls2 bigint,
149 | total_time2 numeric,
150 | self_time2 numeric,
151 | m_time2 numeric,
152 | m_stime2 numeric,
153 |
154 | ord_time integer,
155 | ord_calls integer,
156 | ord_trgtime integer
157 | )
158 | SET search_path=@extschema@ AS $$
159 | SELECT
160 | COALESCE(f1.datid, f2.datid),
161 | COALESCE(f1.funcid, f2.funcid),
162 | COALESCE(f1.dbname, f2.dbname),
163 | COALESCE(f1.schemaname, f2.schemaname),
164 | COALESCE(f1.funcname, f2.funcname),
165 | COALESCE(f1.funcargs, f2.funcargs),
166 |
167 | NULLIF(f1.calls, 0) AS calls1,
168 | round(CAST(NULLIF(f1.total_time, 0.0) AS numeric), 2) AS total_time1,
169 | round(CAST(NULLIF(f1.self_time, 0.0) AS numeric), 2) AS self_time1,
170 | round(CAST(NULLIF(f1.m_time, 0.0) AS numeric), 2) AS m_time1,
171 | round(CAST(NULLIF(f1.m_stime, 0.0) AS numeric), 2) AS m_stime1,
172 |
173 | NULLIF(f2.calls, 0) AS calls2,
174 | round(CAST(NULLIF(f2.total_time, 0.0) AS numeric), 2) AS total_time2,
175 | round(CAST(NULLIF(f2.self_time, 0.0) AS numeric), 2) AS self_time2,
176 | round(CAST(NULLIF(f2.m_time, 0.0) AS numeric), 2) AS m_time2,
177 | round(CAST(NULLIF(f2.m_stime, 0.0) AS numeric), 2) AS m_stime2,
178 |
179 | CASE WHEN
180 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) > 0
181 | AND NOT COALESCE(f1.trg_fn, f2.trg_fn, false)
182 | THEN
183 | row_number() OVER (ORDER BY
184 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0)
185 | DESC NULLS LAST,
186 | COALESCE(f1.datid, f2.datid),
187 | COALESCE(f1.funcid, f2.funcid))::integer
188 | ELSE NULL END AS ord_time,
189 |
190 | CASE WHEN
191 | COALESCE(f1.calls, 0) + COALESCE(f2.calls, 0) > 0
192 | AND NOT COALESCE(f1.trg_fn, f2.trg_fn, false)
193 | THEN
194 | row_number() OVER (ORDER BY
195 | COALESCE(f1.calls, 0) + COALESCE(f2.calls, 0)
196 | DESC NULLS LAST,
197 | COALESCE(f1.datid, f2.datid),
198 | COALESCE(f1.funcid, f2.funcid))::integer
199 | ELSE NULL END AS ord_calls,
200 |
201 | CASE WHEN
202 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0) > 0
203 | AND COALESCE(f1.trg_fn, f2.trg_fn, false)
204 | THEN
205 | row_number() OVER (ORDER BY
206 | COALESCE(f1.total_time, 0) + COALESCE(f2.total_time, 0)
207 | DESC NULLS LAST,
208 | COALESCE(f1.datid, f2.datid),
209 | COALESCE(f1.funcid, f2.funcid))::integer
210 | ELSE NULL END AS ord_trgtime
211 | FROM
212 | top_functions(sserver_id, start1_id, end1_id) f1
213 | FULL OUTER JOIN
214 | top_functions(sserver_id, start2_id, end2_id) f2
215 | USING (datid, funcid)
216 | $$ LANGUAGE sql;
217 |
--------------------------------------------------------------------------------
/schema/rusage.sql:
--------------------------------------------------------------------------------
1 | /* ==== rusage statements history tables ==== */
2 | CREATE TABLE sample_kcache (
3 | server_id integer,
4 | sample_id integer,
5 | userid oid,
6 | datid oid,
7 | queryid bigint,
8 | queryid_md5 char(32),
9 | plan_user_time double precision, -- User CPU time used
10 | plan_system_time double precision, -- System CPU time used
11 | plan_minflts bigint, -- Number of page reclaims (soft page faults)
12 | plan_majflts bigint, -- Number of page faults (hard page faults)
13 | plan_nswaps bigint, -- Number of swaps
14 | plan_reads bigint, -- Number of bytes read by the filesystem layer
15 | plan_writes bigint, -- Number of bytes written by the filesystem layer
16 | plan_msgsnds bigint, -- Number of IPC messages sent
17 | plan_msgrcvs bigint, -- Number of IPC messages received
18 | plan_nsignals bigint, -- Number of signals received
19 | plan_nvcsws bigint, -- Number of voluntary context switches
20 | plan_nivcsws bigint,
21 | exec_user_time double precision, -- User CPU time used
22 | exec_system_time double precision, -- System CPU time used
23 | exec_minflts bigint, -- Number of page reclaims (soft page faults)
24 | exec_majflts bigint, -- Number of page faults (hard page faults)
25 | exec_nswaps bigint, -- Number of swaps
26 | exec_reads bigint, -- Number of bytes read by the filesystem layer
27 | exec_writes bigint, -- Number of bytes written by the filesystem layer
28 | exec_msgsnds bigint, -- Number of IPC messages sent
29 | exec_msgrcvs bigint, -- Number of IPC messages received
30 | exec_nsignals bigint, -- Number of signals received
31 | exec_nvcsws bigint, -- Number of voluntary context switches
32 | exec_nivcsws bigint,
33 | toplevel boolean,
34 | stats_since timestamp with time zone,
35 | CONSTRAINT pk_sample_kcache_n PRIMARY KEY (server_id, sample_id, datid, userid, queryid, toplevel),
36 | CONSTRAINT fk_kcache_stmt_list FOREIGN KEY (server_id,queryid_md5)
37 | REFERENCES stmt_list (server_id,queryid_md5)
38 | ON DELETE NO ACTION ON UPDATE CASCADE
39 | DEFERRABLE INITIALLY IMMEDIATE,
40 | CONSTRAINT fk_kcache_st FOREIGN KEY (server_id, sample_id, datid, userid, queryid, toplevel)
41 | REFERENCES sample_statements(server_id, sample_id, datid, userid, queryid, toplevel) ON DELETE CASCADE
42 | DEFERRABLE INITIALLY IMMEDIATE
43 | );
44 | CREATE INDEX ix_sample_kcache_sl ON sample_kcache(server_id,queryid_md5);
45 |
46 | COMMENT ON TABLE sample_kcache IS 'Sample sample_kcache statistics table (fields from pg_stat_kcache)';
47 |
48 | CREATE TABLE last_stat_kcache (
49 | server_id integer,
50 | sample_id integer,
51 | userid oid,
52 | datid oid,
53 | toplevel boolean DEFAULT true,
54 | queryid bigint,
55 | plan_user_time double precision, -- User CPU time used
56 | plan_system_time double precision, -- System CPU time used
57 | plan_minflts bigint, -- Number of page reclaims (soft page faults)
58 | plan_majflts bigint, -- Number of page faults (hard page faults)
59 | plan_nswaps bigint, -- Number of swaps
60 | plan_reads bigint, -- Number of bytes read by the filesystem layer
61 | plan_writes bigint, -- Number of bytes written by the filesystem layer
62 | plan_msgsnds bigint, -- Number of IPC messages sent
63 | plan_msgrcvs bigint, -- Number of IPC messages received
64 | plan_nsignals bigint, -- Number of signals received
65 | plan_nvcsws bigint, -- Number of voluntary context switches
66 | plan_nivcsws bigint,
67 | exec_user_time double precision, -- User CPU time used
68 | exec_system_time double precision, -- System CPU time used
69 | exec_minflts bigint, -- Number of page reclaims (soft page faults)
70 | exec_majflts bigint, -- Number of page faults (hard page faults)
71 | exec_nswaps bigint, -- Number of swaps
72 | exec_reads bigint, -- Number of bytes read by the filesystem layer
73 | exec_writes bigint, -- Number of bytes written by the filesystem layer
74 | exec_msgsnds bigint, -- Number of IPC messages sent
75 | exec_msgrcvs bigint, -- Number of IPC messages received
76 | exec_nsignals bigint, -- Number of signals received
77 | exec_nvcsws bigint, -- Number of voluntary context switches
78 | exec_nivcsws bigint,
79 | stats_since timestamp with time zone
80 | )
81 | PARTITION BY LIST (server_id);
82 |
83 | CREATE TABLE sample_kcache_total (
84 | server_id integer,
85 | sample_id integer,
86 | datid oid,
87 | plan_user_time double precision, -- User CPU time used
88 | plan_system_time double precision, -- System CPU time used
89 | plan_minflts bigint, -- Number of page reclaims (soft page faults)
90 | plan_majflts bigint, -- Number of page faults (hard page faults)
91 | plan_nswaps bigint, -- Number of swaps
92 | plan_reads bigint, -- Number of bytes read by the filesystem layer
93 | --plan_reads_blks bigint, -- Number of 8K blocks read by the filesystem layer
94 | plan_writes bigint, -- Number of bytes written by the filesystem layer
95 | --plan_writes_blks bigint, -- Number of 8K blocks written by the filesystem layer
96 | plan_msgsnds bigint, -- Number of IPC messages sent
97 | plan_msgrcvs bigint, -- Number of IPC messages received
98 | plan_nsignals bigint, -- Number of signals received
99 | plan_nvcsws bigint, -- Number of voluntary context switches
100 | plan_nivcsws bigint,
101 | exec_user_time double precision, -- User CPU time used
102 | exec_system_time double precision, -- System CPU time used
103 | exec_minflts bigint, -- Number of page reclaims (soft page faults)
104 | exec_majflts bigint, -- Number of page faults (hard page faults)
105 | exec_nswaps bigint, -- Number of swaps
106 | exec_reads bigint, -- Number of bytes read by the filesystem layer
107 | --exec_reads_blks bigint, -- Number of 8K blocks read by the filesystem layer
108 | exec_writes bigint, -- Number of bytes written by the filesystem layer
109 | --exec_writes_blks bigint, -- Number of 8K blocks written by the filesystem layer
110 | exec_msgsnds bigint, -- Number of IPC messages sent
111 | exec_msgrcvs bigint, -- Number of IPC messages received
112 | exec_nsignals bigint, -- Number of signals received
113 | exec_nvcsws bigint, -- Number of voluntary context switches
114 | exec_nivcsws bigint,
115 | statements bigint NOT NULL,
116 | CONSTRAINT pk_sample_kcache_total PRIMARY KEY (server_id, sample_id, datid),
117 | CONSTRAINT fk_kcache_t_st FOREIGN KEY (server_id, sample_id, datid)
118 | REFERENCES sample_stat_database(server_id, sample_id, datid) ON DELETE CASCADE
119 | DEFERRABLE INITIALLY IMMEDIATE
120 | );
121 | COMMENT ON TABLE sample_kcache_total IS 'Aggregated stats for kcache, based on pg_stat_kcache';
122 |
--------------------------------------------------------------------------------