├── .github
└── workflows
│ └── main.yml
├── LICENSE
├── Makefile
├── SPECS
└── pg_store_plans17.spec
├── docs
├── index.html
└── stylesheet.css
├── expected
├── convert.out
├── store.out
└── store_2.out
├── json2sql.pl
├── makeplanfile.sql
├── pg_store_plans--1.9.sql
├── pg_store_plans.c
├── pg_store_plans.control
├── pgsp_explain.c
├── pgsp_explain.h
├── pgsp_json.c
├── pgsp_json.h
├── pgsp_json_int.h
├── pgsp_json_text.c
├── pgsp_json_text.h
├── pgsp_token_types.h
├── regress.conf
└── sql
├── convert.sql
└── store.sql
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2023, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
3 | #
4 | name: CI
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 | branches: [ master ]
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 |
16 | defaults:
17 | run:
18 | shell: sh
19 |
20 | strategy:
21 | fail-fast: false
22 | matrix:
23 | version:
24 | - 17
25 | - 16
26 | - 15
27 | - 14
28 | - 13
29 |
30 | env:
31 | PGVERSION: ${{ matrix.version }}
32 |
33 | steps:
34 | - name: checkout
35 | uses: actions/checkout@v4
36 |
37 | - name: install pg
38 | run: |
39 | sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -v $PGVERSION -p -i
40 | sudo -u postgres createuser -s "$USER"
41 |
42 | - name: build
43 | run: |
44 | make PROFILE="-Werror" USE_PGXS=1
45 | sudo -E make install USE_PGXS=1
46 |
47 | - name: configure shared_preload_libraries
48 | run: |
49 | psql -c 'alter system set shared_preload_libraries = pg_stat_statements, pg_store_plans' postgres
50 | sudo service postgresql restart
51 |
52 | - name: test
53 | run: |
54 | make installcheck USE_PGXS=1
55 |
56 | - name: show regression diffs
57 | if: ${{ failure() }}
58 | run: |
59 | cat regression.diffs
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 1996-2025, PostgreSQL Global Development Group
2 |
3 | Portions Copyright (c) 2009-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
4 | Portions Copyright (c) 1994, The Regents of the University of California
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice,
10 | this list of conditions and the following disclaimer.
11 | * Redistributions in binary form must reproduce the above copyright
12 | notice, this list of conditions and the following disclaimer in the
13 | documentation and/or other materials provided with the distribution.
14 | * Neither the name of the NIPPON TELEGRAPH AND TELEPHONE CORPORATION
15 | (NTT) nor the names of its contributors may be used to endorse or
16 | promote products derived from this software without specific prior
17 | written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # pg_stat_plan/Makefile
2 |
3 | MODULES = pg_store_plans
4 | STOREPLANSVER = 1.9
5 |
6 | MODULE_big = pg_store_plans
7 | OBJS = pg_store_plans.o pgsp_json.o pgsp_json_text.o pgsp_explain.o
8 |
9 | EXTENSION = pg_store_plans
10 |
11 | PG_VERSION := $(shell pg_config --version | sed "s/^PostgreSQL //" | sed "s/\.[0-9]*$$//")
12 |
13 | DATA = pg_store_plans--1.9.sql
14 |
15 | REGRESS = convert store
16 | REGRESS_OPTS = --temp-config=regress.conf
17 | ifdef USE_PGXS
18 | PG_CONFIG = pg_config
19 | PGXS := $(shell $(PG_CONFIG) --pgxs)
20 | include $(PGXS)
21 | else
22 | subdir = contrib/pg_store_plans
23 | top_builddir = ../..
24 | include $(top_builddir)/src/Makefile.global
25 | include $(top_srcdir)/contrib/contrib-global.mk
26 | endif
27 |
28 | STARBALL17 = pg_store_plans17-$(STOREPLANSVER).tar.gz
29 | STARBALLS = $(STARBALL17)
30 |
31 | TARSOURCES = Makefile *.c *.h \
32 | pg_store_plans--*.sql \
33 | pg_store_plans.control \
34 | docs/* expected/*.out sql/*.sql \
35 |
36 | ifneq ($(shell uname), SunOS)
37 | LDFLAGS+=-Wl,--build-id
38 | endif
39 |
40 | ## These entries need running server
41 | DBNAME = postgres
42 |
43 | rpms: rpm17
44 |
45 | $(STARBALLS): $(TARSOURCES)
46 | if [ -h $(subst .tar.gz,,$@) ]; then rm $(subst .tar.gz,,$@); fi
47 | if [ -e $(subst .tar.gz,,$@) ]; then \
48 | echo "$(subst .tar.gz,,$@) is not a symlink. Stop."; \
49 | exit 1; \
50 | fi
51 | ln -s . $(subst .tar.gz,,$@)
52 | tar -chzf $@ $(addprefix $(subst .tar.gz,,$@)/, $^)
53 | rm $(subst .tar.gz,,$@)
54 |
55 | rpm17: $(STARBALL17)
56 | MAKE_ROOT=`pwd` rpmbuild -bb SPECS/pg_store_plans17.spec
57 |
58 | testfiles: convert.out convert.sql
59 |
60 | convert.out: convert.sql
61 | psql $(DBNAME) -a -q -X -f convert.sql > $@
62 |
63 | convert.sql: makeplanfile.sql json2sql.pl
64 | psql $(DBNAME) -X -f makeplanfile.sql |& ./json2sql.pl > $@
65 |
66 | clean-testfiles:
67 | rm -f convert.out convert.sql
68 |
69 | deploy-testfiles: testfiles
70 | mv convert.sql sql/
71 | mv convert.out expected/
72 |
--------------------------------------------------------------------------------
/SPECS/pg_store_plans17.spec:
--------------------------------------------------------------------------------
1 | # SPEC file for pg_store_plans
2 | # Copyright(c) 2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
3 |
4 | %define _pgdir /usr/pgsql-17
5 | %define _bindir %{_pgdir}/bin
6 | %define _libdir %{_pgdir}/lib
7 | %define _datadir %{_pgdir}/share
8 | %define _bcdir %{_libdir}/bitcode
9 | %define _mybcdir %{_bcdir}/pg_store_plans
10 |
11 | %if "%(echo ${MAKE_ROOT})" != ""
12 | %define _rpmdir %(echo ${MAKE_ROOT})/RPMS
13 | %define _sourcedir %(echo ${MAKE_ROOT})
14 | %endif
15 |
16 | ## Set general information for pg_store_plans.
17 | Summary: Record executed plans on PostgreSQL 17
18 | Name: pg_store_plans17
19 | Version: 1.9
20 | Release: 1%{?dist}
21 | License: BSD
22 | Group: Applications/Databases
23 | Source0: %{name}-%{version}.tar.gz
24 | #URL: http://example.com/pg_store_plans/
25 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-%(%{__id_u} -n)
26 | Vendor: NIPPON TELEGRAPH AND TELEPHONE CORPORATION
27 |
28 | ## We use postgresql-devel package
29 | BuildRequires: postgresql17-devel
30 | Requires: postgresql17-libs
31 |
32 | ## Description for "pg_store_plans"
33 | %description
34 |
35 | pg_store_plans provides capability to record statistics for every plan
36 | executed on PostgreSQL.
37 |
38 | Note that this package is available for only PostgreSQL 17.
39 |
40 | %package llvmjit
41 | Requires: postgresql17-server, postgresql17-llvmjit
42 | Requires: pg_store_plans17 = 1.9
43 | Summary: Just-in-time compilation support for pg_store_plans17
44 |
45 | %description llvmjit
46 | Just-in-time compilation support for pg_store_plans17
47 |
48 | ## pre work for build pg_store_plans
49 | %prep
50 | PATH=/usr/pgsql-17/bin:$PATH
51 | if [ "${MAKE_ROOT}" != "" ]; then
52 | pushd ${MAKE_ROOT}
53 | make clean %{name}-%{version}.tar.gz
54 | popd
55 | fi
56 | if [ ! -d %{_rpmdir} ]; then mkdir -p %{_rpmdir}; fi
57 | %setup -q
58 |
59 | ## Set variables for build environment
60 | %build
61 | PATH=/usr/pgsql-17/bin:$PATH
62 | pg_config
63 | make USE_PGXS=1 %{?_smp_mflags}
64 |
65 | ## Set variables for install
66 | %install
67 | rm -rf %{buildroot}
68 | PATH=/usr/pgsql-17/bin:$PATH
69 | make install DESTDIR=%{buildroot}
70 |
71 | %clean
72 | rm -rf %{buildroot}
73 |
74 | %files
75 | %defattr(0755,root,root)
76 | %{_libdir}/pg_store_plans.so
77 | %defattr(0644,root,root)
78 | %{_datadir}/extension/pg_store_plans--1.9.sql
79 | %{_datadir}/extension/pg_store_plans.control
80 |
81 | %files llvmjit
82 | %defattr(0644,root,root)
83 | %{_bcdir}/pg_store_plans.index.bc
84 | %{_mybcdir}/*.bc
85 |
86 | # History of pg_store_plans.
87 | %changelog
88 | * Mon Jan 20 2025 Kyotaro Horiguchi
89 | - Version 1.9. Support PostgreSQL 17.
90 | * Fri Feb 02 2024 Kyotaro Horiguchi
91 | - Version 1.8. Support PostgreSQL 16.
92 | * Wed Dec 14 2022 Kyotaro Horiguchi
93 | - Version 1.7. Support PostgreSQL 15.
94 | * Thu Feb 03 2022 Kyotaro Horiguchi
95 | - Version 1.6.1.
96 | * Mon Jan 17 2022 Tatsuro Yamada, Julien Rouhaud, Kyotaro Horiguchi
97 | - Version 1.6. Supports PostgreSQL 14
98 | * Wed Jan 27 2021 Kyotaro Horiguchi
99 | - Version 1.5. Supports PostgreSQL 13
100 | * Thu Jan 30 2020 Kyotaro Horiguchi
101 | - Version 1.4. Supports PostgreSQL 12
102 | * Tue Jan 22 2019 Kyotaro Horiguchi
103 | - Supports PostgreSQL 11
104 | * Tue Oct 10 2017 Kyotaro Horiguchi
105 | - Supports PostgreSQL 10
106 | * Fri Aug 26 2016 Kyotaro Horiguchi
107 | - Some fix in plan representation functions.
108 | * Wed Apr 13 2016 Kyotaro Horiguchi
109 | - Support PostgreSQL 9.5
110 | * Fri Jun 12 2015 Kyotaro Horiguchi
111 | - Initial version.
112 |
113 |
114 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
19 |
The pg_store_plans module provides a
20 | means for tracking execution plan statistics of all SQL statements
21 | executed by a server.
22 |
23 |
24 |
The module must be loaded by
25 | adding pg_store_plans to
26 | shared_preload_libraries in
27 | postgresql.conf , because it requires
28 | additional shared memory. This means that a server restart is
29 | required to add or remove the
30 | module. pg_store_plans requires the GUC
31 | variable compute_query_id to be "on" or
32 | "auto". If it is set to
33 | "no", pg_store_plans is silently disabled.
34 |
35 |
36 |
1. The pg_store_plans
37 | View
38 |
39 |
The statistics gathered by the module are available via a system
40 | view named pg_store_plans . This view
41 | contains one row for each distinct set of database ID, user ID and
42 | query ID. The columns of the view are described in
43 | Table 1 .
44 |
45 |
46 |
47 |
48 |
Table 1. pg_store_plans Columns
49 |
50 |
51 |
52 | Name Type References
53 | Description
54 |
55 |
56 | userid
57 | oid
58 |
59 | pg_authid .oid
60 | OID of user who executed the statement
61 | dbid
62 | oid
63 |
64 | pg_database .oid
65 | OID of database in which the statement was executed
66 | queryid
67 | bigint
68 |
69 | Core-generated query ID. If compute_query_id is set to "no", pg_store_plan is silently disabled. This is usable as the join key
70 | with pg_stat_statements .
71 |
72 | planid
73 | bigint
74 |
75 | Plan hash code, computed from the normalized plan representation.
76 |
77 | plan
78 | text
79 |
80 | Text of a representative plan. The format is specified by the
81 | configuration
82 | parameter pg_store_plans.plan_format.
83 |
84 | calls
85 | bigint
86 |
87 | Number of times executed
88 |
89 | total_time
90 | double precision
91 |
92 | Total time spent in the statement using the plan, in milliseconds
93 |
94 | rows
95 | bigint
96 |
97 | Total number of rows retrieved or affected by the statement
98 | using the plan
99 |
100 | shared_blks_hit
101 | bigint
102 |
103 | Total number of shared block cache hits by the statement using
104 | the plan
105 |
106 | shared_blks_read
107 | bigint
108 |
109 | Total number of shared blocks read by the statement using the
110 | plan
111 |
112 | shared_blks_dirtied
113 | bigint
114 |
115 | Total number of shared blocks dirtied by the statement using
116 | the plan
117 |
118 | shared_blks_written
119 | bigint
120 |
121 | Total number of shared blocks written by the statement using
122 | the plan
123 |
124 | local_blks_hit
125 | bigint
126 |
127 | Total number of local block cache hits by the statement using
128 | the plan
129 |
130 | local_blks_read
131 | bigint
132 |
133 | Total number of local blocks read by the statement using the
134 | plan
135 |
136 | local_blks_dirtied
137 | bigint
138 | Total number of local blocks dirtied by the statement using
139 | the plan
140 |
141 | local_blks_written
142 | bigint
143 |
144 | Total number of local blocks written by the statement using
145 | the plan
146 |
147 | temp_blks_read
148 | bigint
149 |
150 | Total number of temp blocks read by the statement using the
151 | plan
152 |
153 | temp_blks_written
154 | bigint
155 |
156 | Total number of temp blocks written by the statement using the
157 | plan
158 |
159 | shared_blk_read_time
160 | double precision
161 |
162 |
163 | Total time the statement using the plan spent reading shared blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
164 |
165 | shared_blk_write_time
166 | double precision
167 |
168 |
169 | Total time the statement using the plan spent writing shared blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
170 |
171 | local_blk_read_time
172 | double precision
173 |
174 |
175 | Total time the statement using the plan spent reading local blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
176 |
177 | local_blk_write_time
178 | double precision
179 |
180 |
181 | Total time the statement using the plan spent writing local blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
182 |
183 | temp_blk_read_time
184 | double precision
185 |
186 |
187 | Total time the statement using the plan spent reading temporary file blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
188 |
189 | temp_blk_write_time
190 | double precision
191 |
192 |
193 | Total time the statement using the plan spent writing temporary file blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)
194 |
195 | first_call
196 | timestamp with time zone
197 |
198 | Timestamp for the least recently call of the query using this
199 | plan.
200 |
201 | last_call
202 | timestamp with time zone
203 |
204 | Timestamp for the most recently call of the query using this
205 | plan.
206 |
207 |
208 |
209 |
210 |
211 |
This view, and the functions pg_store_plans_reset
212 |
and pg_store_plans
and other
213 | auxiliary functions, are available only in databases where
214 | the pg_store_plans is installed
215 | by CREATE EXTENSION . However,
216 | statistics are tracked across all databases of the server whenever
217 | the pg_store_plans module is loaded onto
218 | the server, regardless of presence of the view.
219 |
220 |
221 |
For security reasons, non-superusers are not allowed to see the
222 | plan representation, queryid or planid for the queries executed by
223 | other users.
224 |
225 |
226 |
227 |
228 | queryid is calculated to identify the source
229 | query similary to pg_stat_statements but
230 | in a different algorithm. plan is
231 | calculated in a similar way. Two plans are considered the same if they
232 | are seemingly equivalent except for the values of literal constants
233 | or fluctuating values such like costs or measured time.
234 |
235 |
For PostgreSQL 14 or later, you can find the corresponding query
236 | for a pg_store_plans entry
237 | in pg_stat_statements by joining using
238 | queryid , like the following.
239 |
240 |
241 |
242 |
SELECT s.query, p.plan FROM pg_store_plans p JOIN pg_stat_statements s USING (queryid);
243 |
244 |
245 |
Plan ID is calculated excluding fluctuating properties of plans. On the other hand, the pg_store_plans.plan view keeps showing the most recent values for those fluctuating properties.
246 |
247 |
pg_store_plans
248 | and pg_stat_statements maintain thier
249 | entries individually so there is certain unavoidable chance
250 | especially for entries with low execution frequency that no
251 | correspondent is found.
252 |
253 |
254 |
255 |
256 |
2. The pg_store_plans
257 | View
258 |
259 |
The statistics of pg_store_plans module itself are tracked and made available via a view named pg_store_plans_info . This view contains only a single row. The columns of the view are shown in Table 2 .
260 |
261 |
262 |
263 |
264 |
Table 2. pg_store_plans_info Columns
265 |
266 |
267 |
268 | Name Type References
269 | Description
270 |
271 |
272 | dealloc
273 | bigint
274 |
275 | Total number of times pg_store_plans entries about the least-executed statements were deallocated because more distinct statements than pg_store_plans.max were observed.
276 | stats_reset
277 | timestamp with time zone
278 |
279 | Time at which all statistics in the pg_store_plans view were last reset.
280 |
281 |
282 |
283 |
284 |
285 |
286 |
289 |
290 |
pg_store_plans_reset() returns void
291 |
292 |
293 |
294 | pg_store_plans_reset
discards all
295 | statistics gathered so far
296 | by pg_store_plans . By default, only
297 | superusers can execute this function.
298 |
299 |
300 |
301 | pg_store_plans(showtext boolean) returns setof
302 | record
303 |
304 |
305 | The pg_store_plans view is defined in
306 | terms of a function also
307 | named pg_store_plans
.
308 |
309 |
310 | pg_store_plans_info() returns record
311 |
312 |
313 |
314 | pg_store_plans_info
view is defined in terms of a function also named pg_store_plans_info
.
315 |
316 |
317 |
318 | pg_store_hash_query(query text) returns oid
319 |
320 |
321 | This function calculates hash value of a query text. The same
322 | algorithm is used to
323 | calculate queryid
324 | in pg_store_plans so this function
325 | is usable to join
326 | with pg_store_plans .
327 |
328 |
329 |
330 | pg_store_plans_textplan(query text) returns text
331 |
332 |
333 | This function generates a ordinary text representation from raw
334 | representation of plan
335 | in pg_store_plans , which is shown
336 | there when pg_store_plans.plan_formats
337 | = 'raw'. Since the result plan text is generated from json
338 | representation, it might be slightly different from what you
339 | will get directly from 'EXPLAIN' commnand.
340 |
341 |
342 |
343 | pg_store_plans_jsonplan(query text) returns text
344 |
345 |
346 | This function infaltes a "short format json plan" or "raw format"
347 | into normal json format. Short format json is internal format
348 | for plan
349 | in pg_store_plans , which is shown
350 | there when pg_store_plans.plan_formats =
351 | 'raw'.
352 |
353 |
354 |
355 | pg_store_plans_xmlplan(query text) returns text
356 |
357 |
358 | This function generates a XML representation from raw
359 | representation of plan
360 | in pg_store_plans , which is shown
361 | there when pg_store_plans.plan_formats
362 | = 'raw'.
363 |
364 |
365 |
366 | pg_store_plans_yamlplan(query text) returns text
367 |
368 |
369 | This function generates a YAML representation from raw
370 | representation of plan
371 | in pg_store_plans , which is shown
372 | there when pg_store_plans.plan_formats
373 | = 'raw'.
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 | 4. Configuration Parameters
382 |
383 |
384 |
385 |
386 |
387 |
388 | pg_store_plans.max
389 | (integer )
390 |
391 | pg_store_plans.max is the maximum number
392 | of plans tracked by the module (i.e., the maximum number of rows in
393 | the pg_store_plans view). If more
394 | distinct plans than that are observed, information about the
395 | least-executed plan is discarded. The default value is 1000. This
396 | parameter can only be set at server start.
397 |
398 |
399 |
400 | pg_store_plans.track
401 | (enum )
402 |
403 |
404 | Similarly to pg_stat_statements ,
405 | pg_store_plans.track controls which
406 | statements are counted by the module.
407 | Specify top to track top-level statements
408 | (those issued directly by clients), all to
409 | also track nested statements (such as statements invoked within
410 | functions except for some commands, see below),
411 | or none to disable statement statistics
412 | collection. The default value is top .
413 | When all is specified, the commands
414 | executed under CREATE EXTENSION
415 | and ALTER EXTENSION commands are still
416 | ignored. Specify verbose to track all
417 | commands including ones excluded by all .
418 | Only superusers can change this setting.
419 |
420 |
421 | pg_store_plans.max_plan_length
422 | (integer )
423 |
424 | pg_store_plans.max_plan_length is the
425 | maximum byte length of plans in the raw (shortened JSON) format to
426 | store. The plan text is truncated at the length if it is longer than
427 | that value. The default value is 5000. This parameter can only be set
428 | at server start.
429 |
430 |
431 |
432 | pg_store_plans.plan_storage
433 | (integer )
434 |
435 | pg_store_plans.plan_storage specifies
436 | how plan texts are stored while server is running. If it is set
437 | to file , the plan texts are stored in a
438 | temporary file as pg_stat_statements
439 | does. shmem means to store plan texts
440 | on-memory. The default value is "file". See
441 | the discussion below for details.
442 |
443 |
444 |
445 | pg_store_plans.plan_format
446 | (enum )
447 |
448 |
449 | pg_store_plans.plan_format controls the
450 | format of plans
451 | in pg_store_plans . text
452 | is the default value and to show in ordinary text
453 | representation, json , xml
454 | and yaml to show in corresponding format.
455 | raw to get internal representation which
456 | can be fed to pg_store_plans_*plan
457 | functions.
458 |
459 |
460 |
461 | pg_store_plans.min_duration
462 | (integer )
463 |
464 |
465 | pg_store_plans.min_duration is the
466 | minumum statement execution time, in milliseconds, that will cause the
467 | statement's plan to be logged. Setting this to zero (the default) logs
468 | all plans. Only superuses can change this setting.
469 |
470 |
471 |
472 | pg_store_plans.log_analyze
473 | (boolean )
474 |
475 |
476 | pg_store_plans.log_analyze
477 | causes EXPLAIN ANALYZE output, rather than
478 | just EXPLAIN output, to be included
479 | in plan . This parameter is off by
480 | default.
481 |
482 |
483 |
484 | pg_store_plans.log_buffers
485 | (boolean )
486 |
487 |
488 | pg_store_plans.log_buffers
489 | causes EXPLAIN (ANALYZE, BUFFERS) output,
490 | rather than just EXPLAIN output, to be
491 | included in plan . This parameter is off
492 | by default.
493 |
494 |
495 |
496 | pg_store_plans.log_timing
497 | (boolean )
498 |
499 | Setting pg_store_plans.log_timing to
500 | false disables to record actual timings. The overhead of repeatedly
501 | reading the system clock can slow down the query significantly on
502 | some systems, so it may be useful to set this parameter to FALSE
503 | when only actual row counts, and not exact execution times for each
504 | execution nodes, are needed. Run time of the entire statement is
505 | always measured
506 | when pg_store_plans.log_analyze is
507 | TRUE. It defaults to TRUE.
508 |
509 |
510 |
511 | pg_store_plans.log_triggers (boolean )
514 |
515 | pg_store_plans.log_triggers causes
516 | trigger execution statistics to be included in recoreded plans. This
517 | parameter has no effect
518 | unless pg_store_plans.log_analyze is
519 | turned on.
520 |
521 |
522 |
523 | pg_store_plans.verbose
524 | (boolean )
525 |
526 |
527 | pg_store_plans.verbose
528 | causes EXPLAIN VERBOSE output, rather than
529 | just EXPLAIN output, to be included
530 | in plan . This parameter is off by
531 | default.
532 |
533 |
534 |
535 | pg_store_plans.save
536 | (boolean )
537 |
538 |
539 | pg_store_plans.save specifies whether to
540 | save plan statistics across server shutdowns. If it
541 | is off then statistics are not saved at
542 | shutdown nor reloaded at server start. The default value
543 | is on . This parameter can only be set in
544 | the postgresql.conf file or on the server
545 | command line.
546 |
547 |
548 |
549 |
550 |
551 |
554 |
pg_store_plans claims additional shared memory proportional to pg_store_plans.max . When pg_store_plans.plan_storage is set to "shmem", it claims further additional shared memory to store plan texts in an amount of the product of the maximum number of plans to store (pg_store_plans.max) and the maximum length of individual plan (pg_store_plans.max_plan_length). If pg_store_plans.plan_storage is set to "file", plan texts are written to a temporary file as pg_stat_statements does. If pg_store_plans.max is not large enough to store all plans, pg_store_plans reclaims the space for new plans by evicting some portion of the entries. After several rounds of that eviction, pg_store_plans runs garbage collection on the temporary file, which might be painful for certain workloads. You can see how frequntly that eviction happens in pg_store_plans_info.dealloc .
555 |
If pg_store_plans.max is sufficiently large so that garbage collection doesn't happen, "file" is recommended as pg_store_plans.plan_storage .
556 |
557 |
These parameters must be set in
558 | postgresql.conf . An example setting follows:
559 |
# postgresql.conf
560 | shared_preload_libraries = 'pg_store_plans, pg_stat_statements'
561 | pg_store_plans.max = 10000
562 | pg_store_plans.track = all
563 |
564 |
565 |
566 |
567 |
570 |
(postgresql.conf has following settings)
571 | shared_preload_libraries = 'pg_store_plans,pg_stat_statements'
572 | pg_store_plans.log_analyze = true
573 | pg_store_plans.log_timing = false
574 |
575 | bench=# SELECT pg_store_plans_reset();
576 |
577 | $ pgbench -i bench
578 | $ pgbench -c10 -t1000 bench
579 |
580 | bench=# \x
581 | bench=# SELECT s.query, p.plan,
582 | p.calls as "plan calls", s.calls as "stmt calls",
583 | p.total_time / p.calls as "time/call", p.first_call, p.last_call
584 | FROM pg_stat_statements s
585 | JOIN pg_store_plans p USING (queryid) WHERE p.calls < s.calls
586 | ORDER BY query ASC, "time/call" DESC;
587 | -[ RECORD 1 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
588 | query | UPDATE pgbench_tellers SET tbalance = tbalance + $1 WHERE tid = $2
589 | plan | Update on pgbench_tellers (cost=0.00..7.88 rows=0 width=0) +
590 | | -> Seq Scan on pgbench_tellers (cost=0.00..7.88 rows=1 width=10) +
591 | | Filter: (tid = 1)
592 | plan calls | 396
593 | stmt calls | 10000
594 | time/call | 16.15434492676767
595 | first_call | 2021-11-25 15:11:38.258838+09
596 | last_call | 2021-11-25 15:11:40.170291+09
597 | -[ RECORD 2 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
598 | query | UPDATE pgbench_tellers SET tbalance = tbalance + $1 WHERE tid = $2
599 | plan | Update on pgbench_tellers (cost=0.14..8.15 rows=0 width=0) +
600 | | -> Index Scan using pgbench_tellers_pkey on pgbench_tellers (cost=0.14..8.15 rows=1 width=10) +
601 | | Index Cond: (tid = 8) +
602 | plan calls | 9604
603 | stmt calls | 10000
604 | time/call | 10.287281695439345
605 | first_call | 2021-11-25 15:11:40.161556+09
606 | last_call | 2021-11-25 15:12:09.957773+09
607 | -[ RECORD 3 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
608 | query | select s.query, p.plan, p.calls as "plan calls", s.calls as "stmt calls", p.total_time / p.calls as "time/call", p.first_call, p.last_call from pg_stat_statements s join pg_store_plans p using (queryid) where p.calls < s.calls order by query asc, "time/call" desc
609 | plan | Sort (cost=309.71..313.88 rows=1667 width=104) +
610 | | Sort Key: pg_stat_statements.query, ((pg_store_plans.total_time / (pg_store_plans.calls)::double precision)) DESC +
611 | | -> Merge Join (cost=119.66..220.50 rows=1667 width=104) +
612 | | Merge Cond: (pg_stat_statements.queryid = pg_store_plans.queryid) +
613 | | Join Filter: (pg_store_plans.calls < pg_stat_statements.calls) +
614 | | -> Sort (cost=59.83..62.33 rows=1000 width=48) +
615 | | Sort Key: pg_stat_statements.queryid +
616 | | -> Function Scan on pg_stat_statements (cost=0.00..10.00 rows=1000 width=48) +
617 | | -> Sort (cost=59.83..62.33 rows=1000 width=72) +
618 | | Sort Key: pg_store_plans.queryid +
619 | | -> Function Scan on pg_store_plans (cost=0.00..10.00 rows=1000 width=72) +
620 | plan calls | 3
621 | stmt calls | 4
622 | time/call | 16.387161
623 | first_call | 2021-11-25 15:20:57.978082+09
624 | last_call | 2021-11-25 15:23:48.631993+09
625 | -[ RECORD 4 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
626 | query | select s.query, p.plan, p.calls as "plan calls", s.calls as "stmt calls", p.total_time / p.calls as "time/call", p.first_call, p.last_call from pg_stat_statements s join pg_store_plans p using (queryid) where p.calls < s.calls order by query asc, "time/call" desc
627 | plan | Sort (cost=309.71..313.88 rows=1667 width=104) +
628 | | Sort Key: pg_stat_statements.query, ((pg_store_plans.total_time / (pg_store_plans.calls)::double precision)) DESC +
629 | | Sort Method: quicksort Memory: 26kB +
630 | | -> Merge Join (cost=119.66..220.50 rows=1667 width=104) +
631 | | Merge Cond: (pg_stat_statements.queryid = pg_store_plans.queryid) +
632 | | Join Filter: (pg_store_plans.calls < pg_stat_statements.calls) +
633 | | Rows Removed by Join Filter: 7 +
634 | | -> Sort (cost=59.83..62.33 rows=1000 width=48) +
635 | | Sort Key: pg_stat_statements.queryid +
636 | | Sort Method: quicksort Memory: 27kB +
637 | | -> Function Scan on pg_stat_statements (cost=0.00..10.00 rows=1000 width=48) +
638 | | -> Sort (cost=59.83..62.33 rows=1000 width=72) +
639 | | Sort Key: pg_store_plans.queryid +
640 | | Sort Method: quicksort Memory: 30kB +
641 | | -> Function Scan on pg_store_plans (cost=0.00..10.00 rows=1000 width=72) +
642 | plan calls | 1
643 | stmt calls | 4
644 | time/call | 4.46928
645 | first_call | 2021-11-25 15:12:27.142535+09
646 | last_call | 2021-11-25 15:12:27.142536+09
647 |
648 | postgres=#
649 |
650 |
651 |
652 |
653 |
654 |
655 |
--------------------------------------------------------------------------------
/docs/stylesheet.css:
--------------------------------------------------------------------------------
1 | /* doc/src/sgml/stylesheet.css */
2 |
3 | /* color scheme similar to www.postgresql.org */
4 |
5 | BODY {
6 | color: #000000;
7 | background: #FFFFFF;
8 | font-family: verdana, Arial, sans-serif;
9 | }
10 |
11 | A:link { color:#0066A2; }
12 | A:visited { color:#004E66; }
13 | A:active { color:#0066A2; }
14 | A:hover { color:#000000; }
15 |
16 | H1 {
17 | font-size: 2.0em;
18 | font-weight: bold;
19 | margin-top: 0em;
20 | margin-bottom: 0em;
21 | color: #EC5800;
22 | }
23 |
24 | H2 {
25 | font-size: 1.2em;
26 | margin: 2.0em 0em 1.2em 0em;
27 | font-weight: bold;
28 | color: #666;
29 | }
30 |
31 | H3 {
32 | font-size: 1.1em;
33 | margin: 1.2em 0em 1.2em 0em;
34 | font-weight: bold;
35 | color: #666;
36 | }
37 |
38 | H4 {
39 | font-size: 0.95em;
40 | margin: 1.2em 0em 1.2em 0em;
41 | font-weight: normal;
42 | color: #666;
43 | }
44 |
45 | H5 {
46 | font-size: 0.9em;
47 | margin: 1.2em 0em 1.2em 0em;
48 | font-weight: normal;
49 | }
50 |
51 | H6 {
52 | font-size: 0.85em;
53 | margin: 1.2em 0em 1.2em 0em;
54 | font-weight: normal;
55 | }
56 |
57 | PRE,CODE,KBD,SAMP,TT {
58 | font-family:monospace,monospace;
59 | font-size:1em;
60 | }
--------------------------------------------------------------------------------
/expected/store.out:
--------------------------------------------------------------------------------
1 | SET client_min_messages = 'error';
2 | CREATE EXTENSION IF NOT EXISTS pg_store_plans;
3 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
4 | SELECT pg_stat_statements_reset() IS NOT NULL AS t;
5 | t
6 | ---
7 | t
8 | (1 row)
9 |
10 | SELECT pg_store_plans_reset();
11 | pg_store_plans_reset
12 | ----------------------
13 |
14 | (1 row)
15 |
16 | DROP TABLE IF EXISTS t1;
17 | CREATE TABLE t1 (a int);
18 | CREATE INDEX ON t1 (a);
19 | INSERT INTO t1 (SELECT a FROM generate_series(0, 9999) a);
20 | RESET enable_seqscan;
21 | RESET enable_bitmapscan;
22 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
23 | count
24 | -------
25 | 10000
26 | (1 row)
27 |
28 | SET enable_seqscan TO false;
29 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
30 | count
31 | -------
32 | 10000
33 | (1 row)
34 |
35 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
36 | count
37 | -------
38 | 10000
39 | (1 row)
40 |
41 | SET enable_bitmapscan TO false;
42 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
43 | count
44 | -------
45 | 10000
46 | (1 row)
47 |
48 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
49 | count
50 | -------
51 | 10000
52 | (1 row)
53 |
54 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
55 | count
56 | -------
57 | 10000
58 | (1 row)
59 |
60 | RESET enable_seqscan;
61 | RESET enable_bitmapscan;
62 | CREATE OR REPLACE FUNCTION test_explain() RETURNS text AS
63 | $x$
64 | DECLARE
65 | r record;
66 | s text;
67 | p text;
68 | totalrows int;
69 | totalcalls int;
70 | first bool;
71 | BEGIN
72 | s := '';
73 | first = true;
74 | SELECT calls, rows INTO totalcalls, totalrows
75 | FROM pg_stat_statements
76 | WHERE query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x';
77 |
78 | FOR r IN SELECT s.query as q, p.plan as p, p.calls as c, p.rows r
79 | FROM pg_stat_statements s
80 | JOIN pg_store_plans p USING (queryid)
81 | WHERE s.query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x'
82 | ORDER BY p.calls
83 | LOOP
84 | IF first then
85 | s = r.q || E'\n totalcalls=' || totalcalls ||
86 | ' , totalrows=' || totalrows || E'\n';
87 | first := false;
88 | END IF;
89 | p := regexp_replace(r.p, '=[0-9.]+([^0-9.])', '=xxx\1', 'g');
90 | s := s || p || E'\n calls=' || r.c || ', rows=' || r.r || E'\n';
91 | END LOOP;
92 |
93 | RETURN s;
94 | END
95 | $x$
96 | LANGUAGE plpgsql;
97 | SELECT test_explain();
98 | test_explain
99 | -----------------------------------------------------------------------------
100 | SELECT count(*) FROM (SELECT * FROM t1) AS x +
101 | totalcalls=6 , totalrows=6 +
102 | Aggregate (cost=xxx rows=xxx width=xxx) +
103 | -> Seq Scan on t1 (cost=xxx rows=xxx width=xxx) +
104 | calls=1, rows=1 +
105 | Aggregate (cost=xxx rows=xxx width=xxx) +
106 | -> Bitmap Heap Scan on t1 (cost=xxx rows=xxx width=xxx) +
107 | -> Bitmap Index Scan using t1_a_idx (cost=xxx rows=xxx width=xxx)+
108 | calls=2, rows=2 +
109 | Aggregate (cost=xxx rows=xxx width=xxx) +
110 | -> Index Only Scan using t1_a_idx on t1 (cost=xxx rows=xxx width=xxx) +
111 | calls=3, rows=3 +
112 |
113 | (1 row)
114 |
115 | DROP FUNCTION test_explain();
116 | DROP TABLE t1;
117 |
--------------------------------------------------------------------------------
/expected/store_2.out:
--------------------------------------------------------------------------------
1 | SET client_min_messages = 'error';
2 | CREATE EXTENSION IF NOT EXISTS pg_store_plans;
3 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
4 | SELECT pg_stat_statements_reset() IS NOT NULL AS t;
5 | t
6 | ---
7 | t
8 | (1 row)
9 |
10 | SELECT pg_store_plans_reset();
11 | pg_store_plans_reset
12 | ----------------------
13 |
14 | (1 row)
15 |
16 | DROP TABLE IF EXISTS t1;
17 | CREATE TABLE t1 (a int);
18 | CREATE INDEX ON t1 (a);
19 | INSERT INTO t1 (SELECT a FROM generate_series(0, 9999) a);
20 | RESET enable_seqscan;
21 | RESET enable_bitmapscan;
22 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
23 | count
24 | -------
25 | 10000
26 | (1 row)
27 |
28 | SET enable_seqscan TO false;
29 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
30 | count
31 | -------
32 | 10000
33 | (1 row)
34 |
35 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
36 | count
37 | -------
38 | 10000
39 | (1 row)
40 |
41 | SET enable_bitmapscan TO false;
42 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
43 | count
44 | -------
45 | 10000
46 | (1 row)
47 |
48 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
49 | count
50 | -------
51 | 10000
52 | (1 row)
53 |
54 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
55 | count
56 | -------
57 | 10000
58 | (1 row)
59 |
60 | RESET enable_seqscan;
61 | RESET enable_bitmapscan;
62 | CREATE OR REPLACE FUNCTION test_explain() RETURNS text AS
63 | $x$
64 | DECLARE
65 | r record;
66 | s text;
67 | p text;
68 | totalrows int;
69 | totalcalls int;
70 | first bool;
71 | BEGIN
72 | s := '';
73 | first = true;
74 | SELECT calls, rows INTO totalcalls, totalrows
75 | FROM pg_stat_statements
76 | WHERE query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x';
77 |
78 | FOR r IN SELECT s.query as q, p.plan as p, p.calls as c, p.rows r
79 | FROM pg_stat_statements s
80 | JOIN pg_store_plans p USING (queryid)
81 | WHERE s.query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x'
82 | ORDER BY p.calls
83 | LOOP
84 | IF first then
85 | s = r.q || E'\n totalcalls=' || totalcalls ||
86 | ' , totalrows=' || totalrows || E'\n';
87 | first := false;
88 | END IF;
89 | p := regexp_replace(r.p, '=[0-9.]+([^0-9.])', '=xxx\1', 'g');
90 | s := s || p || E'\n calls=' || r.c || ', rows=' || r.r || E'\n';
91 | END LOOP;
92 |
93 | RETURN s;
94 | END
95 | $x$
96 | LANGUAGE plpgsql;
97 | SELECT test_explain();
98 | test_explain
99 | -----------------------------------------------------------------------------
100 | SELECT count(*) FROM (SELECT * FROM t1) AS x +
101 | totalcalls=6 , totalrows=6 +
102 | Aggregate (cost=xxx rows=xxx width=xxx) +
103 | Async Capable: false +
104 | -> Seq Scan on t1 (cost=xxx rows=xxx width=xxx) +
105 | Async Capable: false +
106 | calls=1, rows=1 +
107 | Aggregate (cost=xxx rows=xxx width=xxx) +
108 | Async Capable: false +
109 | -> Bitmap Heap Scan on t1 (cost=xxx rows=xxx width=xxx) +
110 | Async Capable: false +
111 | -> Bitmap Index Scan using t1_a_idx (cost=xxx rows=xxx width=xxx)+
112 | Async Capable: false +
113 | calls=2, rows=2 +
114 | Aggregate (cost=xxx rows=xxx width=xxx) +
115 | Async Capable: false +
116 | -> Index Only Scan using t1_a_idx on t1 (cost=xxx rows=xxx width=xxx) +
117 | Async Capable: false +
118 | calls=3, rows=3 +
119 |
120 | (1 row)
121 |
122 | DROP FUNCTION test_explain();
123 | DROP TABLE t1;
124 |
--------------------------------------------------------------------------------
/json2sql.pl:
--------------------------------------------------------------------------------
1 | #! /usr/bin/perl
2 |
3 | print <<'EOS';
4 | \echo ###### Prepare for the test
5 | \set QUIET
6 | SET client_min_messages = 'error';
7 | CREATE EXTENSION IF NOT EXISTS pg_store_plans;
8 | DROP TABLE IF EXISTS plans;
9 | CREATE TABLE plans (id int, title text, lplan text, splan text);
10 | SET client_min_messages = 'notice';
11 | \set ECHO none
12 |
13 | \echo ###### insert original JSON plans
14 | INSERT INTO plans (VALUES
15 | EOS
16 |
17 | $plan_no = -1;
18 | $title = "###### Plan $plan_no: all properties 1/2";
19 | setplan0(0); # Without "Unknown Key"
20 | print "($plan_no, \'$title\',\n";
21 | print " $escape'$plan')";
22 | $plan_no--;
23 |
24 | $title = "###### Plan $plan_no: all properties 2/2";
25 | setplan1();
26 | print ",($plan_no, \'$title\',\n";
27 | print " $escape'$plan')";
28 | $plan_no--;
29 |
30 | $title = "###### Plan $plan_no: expression tokens 1/1";
31 | setplan2();
32 | print ",($plan_no, \'$title\',\n";
33 | print " $escape'$plan')";
34 | $plan_no--;
35 |
36 | $title = "###### Plan $plan_no: some properties plus unknown key";
37 | setplan0(1); # With "Unknown Key"
38 | print ",($plan_no, \'$title\',\n";
39 | print " $escape'$plan')";
40 |
41 | $plan_no = 1;
42 | $state = 0;
43 | $indent = "";
44 | while(<>) {
45 | chomp;
46 | if ($state == 0) {
47 | next if (!/^###### (.*$)/);
48 | $title = "###### Plan $plan_no: $1";
49 | $state = 1;
50 | } elsif ($state == 1) {
51 | # edit auto_explain's result
52 | next if (/^psql:makeplanfile.sql/);
53 |
54 | if (/[}\]:,]/) {
55 | die("??? : $_");
56 | }
57 | next if (!/^( *)\{ *\+?$/);
58 | $indent = $1;
59 | $plan = $_;
60 | $plan =~ s/^ (.*[^ ]) *\+$/$1\n/;
61 | chomp($plan);
62 | $plan .= "\n";
63 | $state = 2;
64 | } elsif ($state == 2) {
65 | # edit auto_explain's result
66 | next if (/^ "Query Text":/);
67 |
68 | if (/^$indent} *\+?$/) {
69 | $state = 3;
70 | }
71 | $l = $_;
72 | $l =~ s/^ (.*[^ ]) *\+$/$1/;
73 | $plan .= "$l";
74 | $plan .= "\n" if ($state == 2);
75 | } elsif ($state == 3) {
76 | $escape = "";
77 | if ($plan =~ /'/ || $plan =~ /\\\"/) {
78 | $escape = "E";
79 | }
80 | # Add escape char for '''
81 | $plan =~ s/'/\\'/g;
82 | # Add escape char for '\"'
83 | $plan =~ s/\\\"/\\\\\"/g;
84 |
85 | # Remove "Total Runtime" line.
86 | $plan =~ s/,\n *"Total Runtime":.*\n/\n/;
87 |
88 | print ",\n($plan_no, \'$title\',\n";
89 | print " $escape'$plan')";
90 | $plan_no++;
91 | $state = 0;
92 | }
93 | }
94 |
95 | print <<'EOS';
96 | );
97 |
98 | \pset pager
99 | \set ECHO queries
100 |
101 | \echo ###### set shortened JSON
102 | UPDATE plans SET splan = pg_store_plans_shorten(lplan);
103 |
104 | \echo ###### tag abbreviation test
105 | SELECT splan FROM plans WHERE id in (-1, -2);
106 |
107 | \echo ###### JSON properties round-trip test: !!! This shouldn''''t return a row
108 | SELECT id FROM plans
109 | where pg_store_plans_jsonplan(splan) <> lplan;
110 |
111 | \pset format unaligned
112 | \pset tuples_only on
113 | \pset recordsep '\n\n=======\n'
114 | \echo ###### format conversion tests
115 | SELECT '### '||'yaml-short '||title||E'\n'||
116 | pg_store_plans_yamlplan(splan)
117 | FROM plans WHERE id BETWEEN 1 AND 3 or id = 1 ORDER BY id;
118 | \echo ##################
119 | SELECT '### '||'xml-short '||title||E'\n'||
120 | pg_store_plans_xmlplan(splan)
121 | FROM plans WHERE id BETWEEN 4 AND 6 or id = 1 ORDER BY id;
122 |
123 | \echo ###### text format output test
124 | SELECT '### '||'TEXT-short '||title||E'\n'||
125 | pg_store_plans_textplan(splan)
126 | FROM plans WHERE id >= 0 ORDER BY id;
127 |
128 |
129 | \echo ###### long-json-as-a-source test
130 | SELECT '### '||'yaml-long JSON '||title||E'\n'||
131 | pg_store_plans_yamlplan(lplan)
132 | FROM plans WHERE id = 1 ORDER BY id;
133 | \echo ##################
134 | SELECT '### '||'xml-long JSON '||title||E'\n'||
135 | pg_store_plans_xmlplan(lplan)
136 | FROM plans WHERE id = 1 ORDER BY id;
137 | \echo ##################
138 | SELECT '### '||'text-long JSON '||title||E'\n'||
139 | pg_store_plans_xmlplan(lplan)
140 | FROM plans WHERE id = 1 ORDER BY id;
141 |
142 | \echo ###### chopped-source test
143 | SELECT '### '||'inflate-chopped '||title||E'\n'||
144 | pg_store_plans_jsonplan(substring(splan from 1 for char_length(splan) / 3))
145 | FROM plans WHERE id BETWEEN 16 AND 18 ORDER BY id;
146 | \echo ##################
147 | SELECT '### '||'yaml-chopped '||title||E'\n'||
148 | pg_store_plans_yamlplan(substring(splan from 1 for char_length(splan) / 3))
149 | FROM plans WHERE id BETWEEN 19 AND 21 ORDER BY id;
150 | \echo ##################
151 | SELECT '### '||'xml-chopped '||title||E'\n'||
152 | pg_store_plans_xmlplan(substring(splan from 1 for char_length(splan) / 3))
153 | FROM plans WHERE id BETWEEN 22 AND 24 ORDER BY id;
154 | \echo ##################
155 | SELECT '### '||'text-chopped '||title||E'\n'||
156 | pg_store_plans_textplan(substring(splan from 1 for char_length(splan) / 3))
157 | FROM plans WHERE id BETWEEN 25 AND 27 ORDER BY id;
158 |
159 | \echo ###### shorten test
160 | SELECT '### '||'shorten '||title||E'\n'||
161 | pg_store_plans_shorten(lplan)
162 | FROM plans WHERE id = -2 ORDER BY id;
163 | \echo ###### normalize test
164 | SELECT '### '||'normalize '||title||E'\n'||
165 | pg_store_plans_normalize(lplan)
166 | FROM plans ORDER BY id;
167 |
168 | EOS
169 |
170 | sub setplan0 {
171 | my($addunknown) = @_;
172 | $plan = << 'EOS';
173 | {
174 | "Plan": 0,
175 | "Plans": 0,
176 | "Node Type": "Result",
177 | "Node Type": "ModifyTable",
178 | "Node Type": "Append",
179 | "Node Type": "Merge Append",
180 | "Node Type": "Recursive Union",
181 | "Node Type": "BitmapAnd",
182 | "Node Type": "BitmapOr",
183 | "Node Type": "Seq Scan",
184 | "Node Type": "Index Scan",
185 | "Node Type": "Index Only Scan",
186 | "Node Type": "Bitmap Index Scan",
187 | "Node Type": "Bitmap Heap Scan",
188 | "Node Type": "Tid Scan",
189 | "Node Type": "Subquery Scan",
190 | "Node Type": "Function Scan",
191 | "Node Type": "Values Scan",
192 | "Node Type": "CTE Scan",
193 | "Node Type": "WorkTable Scan",
194 | "Node Type": "Foreign Scan",
195 | "Node Type": "Nested Loop",
196 | "Node Type": "Merge Join",
197 | "Node Type": "Hash Join",
198 | "Node Type": "Materialize",
199 | "Node Type": "Sort",
200 | "Node Type": "Group",
201 | "Node Type": "Aggregate",
202 | "Node Type": "WindowAgg",
203 | "Node Type": "Unique",
204 | "Node Type": "Hash",
205 | "Node Type": "SetOp",
206 | "Node Type": "LockRows",
207 | "Node Type": "Limit",
208 | "Node Type": "Sample Scan",
209 | "Node Type": "Gather",
210 | "Node Type": "ProjectSet",
211 | "Node Type": "Table Function Scan",
212 | "Node Type": "Named Tuplestore Scan",
213 | "Node Type": "Gather Merge",
214 | "Parent Relationship": "Outer",
215 | "Parent Relationship": "Inner",
216 | "Parent Relationship": "Subquery",
217 | "Parent Relationship": "Member",
218 | "Parent Relationship": "InitPlan",
219 | "Parent Relationship": "SubPlan",
220 | "Scan Direction": "Backward",
221 | "Scan Direction": "NoMovement",
222 | "Scan Direction": "Forward",
223 | "Index Name": 0,
224 | "Relation Name": 0,
225 | "Function Name": 0,
226 | "CTE Name": 0,
227 | "Schema": 0,
228 | "Alias": 0,
229 | "Output": "[]",
230 | "Merge Cond": "a",
231 | "Strategy": "Plain",
232 | "Strategy": "Sorted",
233 | "Strategy": "Hashed",
234 | "Strategy": "Mixed",
235 | "Join Type": "Inner",
236 | "Join Type": "Left",
237 | "Join Type": "Full",
238 | "Join Type": "Right",
239 | "Join Type": "Semi",
240 | "Join Type": "Anti",
241 | "Command": "Intersect",
242 | "Command": "Intersect All",
243 | "Command": "Except",
244 | "Command": "Except All",
245 | "Sort Method": "top-N heapsort",
246 | "Sort Method": "quicksort",
247 | "Sort Method": "external sort",
248 | "Sort Method": "external merge",
249 | "Sort Method": "still in progress",
250 | "Sort Key": "a",
251 | "Group Key": "a",
252 | "Grouping Sets": "a",
253 | "Group Keys": "a",
254 | "Hash Keys": "a",
255 | "Hash Key": "a",
256 | "Parallel Aware": "true",
257 | "Workers Planned": "0",
258 | "Workers Launched": "0",
259 | "Workers": "x",
260 | "Worker Number": "0",
261 | "Async Capable": "false",
262 | "Filter": "a",
263 | "Join Filter": "a",
264 | "Hash Cond": "a",
265 | "Index Cond": "a",
266 | "TID Cond": "a",
267 | "Recheck Cond": "a",
268 | "Conflict Resolution": "a",
269 | "Conflict Arbiter Indexes": "a",
270 | "Tuples Inserted": 0,
271 | "Conflicting Tuples": 0,
272 | "Target Tables": "a",
273 | "Operation": "Insert",
274 | "Operation": "Delete",
275 | "Operation": "Update",
276 | "Subplan Name": "a",
277 | "Triggers": 0,
278 | "Trigger": 0,
279 | "Trigger Name": 0,
280 | "Relation": 0,
281 | "Constraint Name": 0,
282 | "Function Call": 0,
283 | "Startup Cost": 0,
284 | "Total Cost": 0,
285 | "Plan Rows": 0,
286 | "Plan Width": 0,
287 | "Actual Startup Time": 0,
288 | "Actual Total Time": 0,
289 | "Actual Rows": 0,
290 | "Actual Loops": 0,
291 | "Heap Fetches": 0,
292 | "Shared Hit Blocks": 0,
293 | "Shared Read Blocks": 0,
294 | "Shared Dirtied Blocks": 0,
295 | "Shared Written Blocks": 0,
296 | "Local Hit Blocks": 0,
297 | "Local Read Blocks": 0,
298 | "Local Dirtied Blocks": 0,
299 | "Local Written Blocks": 0,
300 | "Temp Read Blocks": 0,
301 | "Temp Written Blocks": 0,
302 | "I/O Read Time": 0,
303 | "I/O Write Time": 0,
304 | "Sort Space Used": 0,
305 | "Sort Space Type": "Disk",
306 | "Sort Space Type": "Memory",
307 | "Peak Memory Usage": 0,
308 | "Original Hash Batches": 0,
309 | "Original Hash Buckets": 0
310 | EOS
311 | chop $plan;
312 | if ($addunknown) {
313 | $plan .= ",\n \"Unknown Key\": \"Unknown Value\"";
314 | }
315 | $plan .= "\n}";
316 |
317 | }
318 |
319 | sub setplan1 {
320 | my($addunknown) = @_;
321 | $plan = << 'EOS';
322 | {
323 | "Hash Batches": 0,
324 | "Hash Buckets": 0,
325 | "Rows Removed by Filter": 0,
326 | "Rows Removed by Index Recheck": 0,
327 | "Time": 0,
328 | "Calls": 0,
329 | "Planning Time": 0,
330 | "Execution Time": 0,
331 | "Exact Heap Blocks": 0,
332 | "Lossy Heap Blocks": 0,
333 | "Rows Removed by Join Filter": 0,
334 | "Target Tables": "dummy",
335 | "Conflict Resolution": "NOTHING",
336 | "Conflict Arbiter Indexes": "ia",
337 | "Tuples Inserted": 123,
338 | "Conflicting Tuples": 234,
339 | "Sampling Method": "system",
340 | "Sampling Parameters": ["''10''::real"],
341 | "Repeatable Seed": "''0''::double precision",
342 | "Workers": "dummy",
343 | "Worker Number": 0,
344 | "Async Capable": 0
345 | EOS
346 |
347 | # Avoid trailing new line
348 | $plan .= "}";
349 |
350 | }
351 |
352 | sub setplan2 {
353 | my($addunknown) = @_;
354 | $plan = << 'EOS';
355 | {"Output": ["~NULL!1.2@''abc''#0b111^0xfff&123|CURRENT_CATALOG`CURRENT_DATE?CURRENT_ROLE+CURRENT_SCHEMA-CURRENT_TIME*CURRENT_TIMESTAMP/CURRENT_USER%LOCALTIME
true=false"]}
356 | EOS
357 |
358 | # Avoid trailing new line
359 | $plan .= "}";
360 |
361 | }
362 |
363 |
364 |
365 |
--------------------------------------------------------------------------------
/makeplanfile.sql:
--------------------------------------------------------------------------------
1 | \set format json
2 | \pset pager
3 |
4 | set work_mem = '1MB';
5 | drop table if exists tt1;
6 | drop table if exists tt2;
7 | drop table if exists tt3;
8 | drop table if exists p cascade;
9 | drop table if exists ct1;
10 | create table p (a int, b int, c text);
11 | create table tt1 (a int, b int not null, c text) inherits (p);
12 | create table tt2 (a int, b int, c text) inherits (p);
13 | create table tt3 (a int, b int, c text) inherits (p);
14 | create index i_tt1 on tt1(a);
15 | create index i_tt2 on tt2(a);
16 | create index i_tt3_a on tt3(a);
17 | create index i_tt3_b on tt3(b);
18 | create table ct1 (a int unique, b int);
19 | insert into ct1 values (1,1), (2,2);
20 |
21 | create or replace function t_tt1_1() returns trigger as $$
22 | BEGIN
23 | NEW.b := -NEW.a;
24 | RETURN NEW;
25 | END;
26 | $$ language plpgsql;
27 | create or replace function t_tt1_2() returns trigger as $$
28 | BEGIN
29 | NEW.c := 'tt1';
30 | RETURN NEW;
31 | END;
32 | $$ language plpgsql;
33 | create trigger tt1_trig_1 before insert or update on tt1
34 | for each row execute procedure t_tt1_1();
35 | create trigger tt1_trig_2 before insert or update on tt1
36 | for each row execute procedure t_tt1_2();
37 | insert into tt2 (select a, -a, 'tt2' from generate_series(7000, 17000) a);
38 | insert into tt3 (select a, -a, 'tt3' from generate_series(0, 100000) a);
39 | insert into tt3 (select 5000, a, 'tt3' from generate_series(0, 40000) a);
40 | insert into tt3 (select a, 555, 'tt3' from generate_series(0, 40000) a);
41 |
42 | \echo ###### Insert, Trigger
43 | explain (analyze on, buffers on, verbose on, format :format)
44 | insert into tt1 (select a from generate_series(0, 10000) a);
45 |
46 | \echo ###### Update, Trigger
47 | explain (analyze on, buffers on, verbose on, format :format)
48 | update tt1 set a = a + 1;
49 | \echo ###### Delete
50 | explain (analyze on, buffers on, verbose on, format :format)
51 | delete from tt1 where a % 10 = 0;
52 |
53 | ----
54 | delete from tt1;
55 | insert into tt1 (select a from generate_series(0, 10000) a);
56 | analyze;
57 |
58 | \echo ###### Result, Append Seq Scan
59 | explain (analyze on, buffers on, verbose on, format :format)
60 | select *, 1 from
61 | (select a + 1, 3 from tt1 union all select a, 4 from tt2) as x;
62 | \echo ###### Index scan (forward) ANY, array in expr, escape
63 | explain (analyze on, buffers on, verbose on, format :format)
64 | select * from tt1 "x""y" where a in (50, 120, 300, 500);
65 | \echo ###### Index scan (backward), MergeJoin, Sort, quicksort, alias
66 | explain (analyze on, buffers on, verbose on, format :format)
67 | select x.b, x.c from tt1 x join tt2 y on (x.a = -y.b * 3)
68 | order by x.a desc limit 10;
69 | \echo ###### IndexOnlyScan
70 | explain (analyze on, buffers on, verbose on, format :format)
71 | select a from tt1 where a < 10;
72 | \echo ###### Plain Aggregate, CTE, Recursive Union, WorkTable Scan, CTE Scan
73 | explain (analyze on, buffers on, verbose on, format :format)
74 | with recursive cte1(a) as
75 | (select 1 union all
76 | select a + 1 from cte1 where a < 10)
77 | select sum(a) from cte1;
78 | \echo ###### FunctionScan, Hash/HashJoin, Nested Loop
79 | explain (analyze on, buffers on, verbose on, format :format)
80 | select datname from pg_stat_activity;
81 | \echo ###### MergeAppend, Values
82 | explain (analyze on, buffers on, verbose on, format :format)
83 | (select a from tt1 order by a) union all
84 | (select a from (values (100), (200), (300)) as tv(a))
85 | order by a;
86 | \echo ###### Append, HashAggregate
87 | explain (analyze on, buffers on, verbose on, format :format)
88 | select a from tt1 union select b from tt2;
89 | \echo ###### GroupAggregate
90 | set work_mem = '128kB';
91 | explain (analyze on, buffers on, verbose on, format :format)
92 | select sum(a) from tt1 group by b;
93 | set work_mem = '1MB';
94 | \echo ###### Group
95 | set work_mem = '128kB';
96 | explain (analyze on, buffers on, verbose on, format :format)
97 | select b from tt1 group by b;
98 | set work_mem = '1MB';
99 | \echo ###### SetOp intersect, SubqueryScan
100 | explain (analyze on, buffers on, verbose on, format :format)
101 | select a from tt1 intersect select b from tt2 order by a;
102 | \echo ###### Sorted SetOp, Sort on Disk
103 | set work_mem = '128kB';
104 | explain (analyze on, buffers on, verbose on, format :format)
105 | select a from tt1 intersect select b from tt2 order by a;
106 | set work_mem = '1MB';
107 | \echo ###### HashSetOp intersect All, SubqueryScan
108 | explain (analyze on, buffers on, verbose on, format :format)
109 | select a from tt1 intersect all select b from tt2 order by a;
110 | \echo ###### HashSetOp except, SubqueryScan
111 | explain (analyze on, buffers on, verbose on, format :format)
112 | select a from tt1 except select b from tt2 order by a;
113 | \echo ###### HashSetOp except all, SubqueryScan
114 | explain (analyze on, buffers on, verbose on, format :format)
115 | select a from tt1 except all select b from tt2 order by a;
116 | \echo ###### merge LEFT join
117 | set work_mem = '64kB';
118 | explain (analyze on, buffers on, verbose on, format :format)
119 | select x.b from tt1 x left join tt3 y on (x.a = y.a);
120 | set work_mem = '1MB';
121 | \echo ###### hash FULL join
122 | explain (analyze on, buffers on, verbose on, format :format)
123 | select x.b from tt1 x full outer join tt2 y on (x.a = y.a);
124 | \echo ###### hash SEMI join
125 | explain (analyze on, buffers on, verbose on, format :format)
126 | select * from tt1 where a = any(select b from tt2);
127 | \echo ###### Hash Anti Join
128 | explain (analyze on, buffers on, verbose on, format :format)
129 | select * from tt1 where not exists (select * from tt2 where tt1.a = tt2.b);
130 | \echo ###### WindowAgg
131 | explain (analyze on, buffers on, verbose on, format :format)
132 | select first_value(a) over (partition by a / 10) from tt1;
133 | \echo ###### Unique
134 | explain (analyze on, buffers on, verbose on, format :format)
135 | select distinct a from tt1 order by a;
136 | \echo ###### PlainAggregate
137 | explain (analyze on, buffers on, verbose on, format :format)
138 | select sum(a) from tt1;
139 | \echo ###### BitmapIndexScan/BitmapHeapScan, BitmapOr, lossy
140 | set enable_seqscan to false;
141 | set work_mem to '64kB';
142 | explain (analyze on, buffers on, verbose on, format :format)
143 | select * from tt3 where b > -99998;
144 | \echo ###### Join Filter
145 | set enable_seqscan to true;
146 | set enable_indexscan to false;
147 | set enable_bitmapscan to false;
148 | explain (analyze on, buffers on, verbose on, format :format)
149 | SELECT tt2.* from tt2
150 | LEFT OUTER JOIN tt3 ON (tt2.a < tt3.a) where tt3.a + tt2.a < 100000
151 | LIMIT 100;
152 | reset enable_seqscan;
153 | reset enable_indexscan;
154 | reset enable_bitmapscan;
155 | reset work_mem;
156 | \echo ###### TidScan
157 | explain (analyze on, buffers on, verbose on, format :format)
158 | select * from tt3 where ctid = '(0,28)';
159 | \echo ###### LockRows
160 | begin;
161 | explain (analyze on, buffers on, verbose on, format :format)
162 | select a from tt1 where a % 10 = 0 for update;
163 | rollback;
164 | \echo ###### Materialize
165 | explain (analyze on, buffers on, verbose on, format :format)
166 | select * from tt1 where a = all(select b from tt2);
167 | \echo ###### Update on partitioned tables
168 | explain (analyze on, buffers on, verbose on, format :format)
169 | UPDATE p SET b = b + 1;
170 | \echo ###### Delete on partitioned tables
171 | explain (analyze on, buffers on, verbose on, format :format)
172 | DELETE FROM p WHERE a = 100;
173 | \echo ###### ON CONFLICT
174 | explain (analyze on, buffers on, verbose on, format :format)
175 | INSERT INTO ct1 VALUES (1,1) ON CONFLICT (a) DO UPDATE SET b = EXCLUDED.b + 1;
176 | \echo ###### GROUP BY
177 | explain (analyze on, buffers on, verbose on, format :format)
178 | SELECT a, b, max(c) FROM tt1 GROUP BY a, b;
179 | \echo ###### GROUPING SETS
180 | explain (analyze on, buffers on, verbose on, format :format)
181 | SELECT a, b, max(c) FROM tt1 GROUP BY GROUPING SETS ((a), (b), ());
182 | \echo ###### Table sample
183 | explain (analyze on, buffers on, verbose on, format :format)
184 | SELECT * FROM tt1 TABLESAMPLE system(1) REPEATABLE (1);
185 |
186 | \echo ###### Project Set
187 | explain (analyze on, buffers on, verbose on, format :format)
188 | SELECT * from XMLTABLE('//towns/town'
189 | PASSING BY REF 'Toronto Ottawa '
190 | COLUMNS name text);
191 |
192 | \echo ###### Incremental Sort
193 | explain (analyze on, buffers on, verbose on, format :format)
194 | WITH x AS (SELECT i/100 + 1 AS a, i + 1 AS b FROM generate_series(0, 999) i)
195 | SELECT * FROM (SELECT * FROM x ORDER BY a) s ORDER BY a, b LIMIT 31;
196 |
197 | -- Named Tuplestore Scan -- requires auto_explain
198 | DROP TABLE IF EXISTS e1 CASCADE;
199 | CREATE TABLE e1 (a int, b int);
200 | CREATE OR REPLACE function e1_t1() RETURNS TRIGGER AS $$
201 | DECLARE
202 | total int;
203 | BEGIN
204 | SELECT sum(a) INTO total FROM post;
205 | NEW.b := total;
206 | RETURN NEW;
207 | END;
208 | $$ LANGUAGE plpgsql;
209 | CREATE TRIGGER e1_t1 AFTER INSERT ON e1
210 | REFERENCING NEW TABLE AS post
211 | FOR EACH ROW EXECUTE PROCEDURE e1_t1();
212 |
213 | INSERT INTO e1 VALUES (1, 1);
214 |
215 | load 'auto_explain';
216 | set auto_explain.log_min_duration to 0;
217 | set auto_explain.log_analyze to true;
218 | set auto_explain.log_buffers to true;
219 | set auto_explain.log_buffers to true;
220 | set auto_explain.log_format to :format;
221 | set auto_explain.log_timing to true;
222 | set auto_explain.log_nested_statements to true;
223 | set client_min_messages to LOG;
224 | set log_min_messages to FATAL; -- Inhibit LOG by auto_explain
225 | \echo ###### Named Tuplestore Scan
226 | CREATE TRIGGER e1_t2 AFTER UPDATE ON e1
227 | REFERENCING NEW TABLE AS post OLD TABLE AS pre
228 | FOR EACH ROW EXECUTE PROCEDURE e1_t1();
229 | UPDATE e1 SET a = a + 1;
230 | set client_min_messages to DEFAULT;
231 | set log_min_messages to DEFAULT;
232 | set auto_explain.log_min_duration to -1;
233 |
234 | -- ###### Parallel
235 | drop table if exists lt1;
236 | create table lt1 (a int, b text);
237 | alter table lt1 alter column b set storage plain;
238 | insert into lt1 (select a, repeat('x', 1000) from generate_series(0, 99999) a);
239 | set max_parallel_workers_per_gather to 2;
240 | set parallel_tuple_cost to 0;
241 | set parallel_setup_cost to 0;
242 | set min_parallel_table_scan_size to 0;
243 | set min_parallel_index_scan_size to 0;
244 |
245 | \echo ###### Parallel Seq Scan
246 | explain (analyze on, buffers on, verbose on, format :format)
247 | SELECT * FROM lt1;
248 |
249 | \echo ###### Parallel Index Scan
250 | explain (analyze on, buffers on, verbose on, format :format)
251 | SELECT * FROM tt1 where a < 100;
252 |
253 | \echo ###### Gather Merge
254 | explain (analyze on, buffers on, verbose on, format :format)
255 | SELECT a FROM tt1 ORDER BY a;
256 |
257 | -- BitmapAnd/Inner/Right/ForegnScan
258 |
--------------------------------------------------------------------------------
/pg_store_plans--1.9.sql:
--------------------------------------------------------------------------------
1 | /* pg_store_plans/pg_store_plans--1.9.sql */
2 |
3 | -- complain if script is sourced in psql, rather than via CREATE EXTENSION
4 | \echo Use "CREATE EXTENSION pg_store_plans" to load this file. \quit
5 |
6 | --- Define pg_store_plans_info
7 | CREATE FUNCTION pg_store_plans_info(
8 | OUT dealloc bigint,
9 | OUT stats_reset timestamp with time zone
10 | )
11 | RETURNS record
12 | AS 'MODULE_PATHNAME'
13 | LANGUAGE C STRICT VOLATILE PARALLEL SAFE;
14 |
15 | CREATE VIEW pg_store_plans_info AS
16 | SELECT * FROM pg_store_plans_info();
17 |
18 | GRANT SELECT ON pg_store_plans_info TO PUBLIC;
19 |
20 | -- Register functions.
21 | CREATE FUNCTION pg_store_plans_reset()
22 | RETURNS void
23 | AS 'MODULE_PATHNAME'
24 | LANGUAGE C PARALLEL SAFE;
25 | CREATE FUNCTION pg_store_plans_shorten(text)
26 | RETURNS text
27 | AS 'MODULE_PATHNAME'
28 | LANGUAGE C
29 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
30 | CREATE FUNCTION pg_store_plans_normalize(text)
31 | RETURNS text
32 | AS 'MODULE_PATHNAME'
33 | LANGUAGE C
34 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
35 | CREATE FUNCTION pg_store_plans_jsonplan(text)
36 | RETURNS text
37 | AS 'MODULE_PATHNAME'
38 | LANGUAGE C
39 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
40 | CREATE FUNCTION pg_store_plans_textplan(text)
41 | RETURNS text
42 | AS 'MODULE_PATHNAME'
43 | LANGUAGE C
44 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
45 | CREATE FUNCTION pg_store_plans_yamlplan(text)
46 | RETURNS text
47 | AS 'MODULE_PATHNAME'
48 | LANGUAGE C
49 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
50 | CREATE FUNCTION pg_store_plans_xmlplan(text)
51 | RETURNS text
52 | AS 'MODULE_PATHNAME'
53 | LANGUAGE C
54 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
55 | CREATE FUNCTION pg_store_plans_hash_query(text)
56 | RETURNS oid
57 | AS 'MODULE_PATHNAME'
58 | LANGUAGE C
59 | RETURNS NULL ON NULL INPUT PARALLEL SAFE;
60 | CREATE FUNCTION pg_store_plans(
61 | OUT userid oid,
62 | OUT dbid oid,
63 | OUT queryid int8,
64 | OUT planid int8,
65 | OUT plan text,
66 | OUT calls int8,
67 | OUT total_time float8,
68 | OUT min_time float8,
69 | OUT max_time float8,
70 | OUT mean_time float8,
71 | OUT stddev_time float8,
72 | OUT rows int8,
73 | OUT shared_blks_hit int8,
74 | OUT shared_blks_read int8,
75 | OUT shared_blks_dirtied int8,
76 | OUT shared_blks_written int8,
77 | OUT local_blks_hit int8,
78 | OUT local_blks_read int8,
79 | OUT local_blks_dirtied int8,
80 | OUT local_blks_written int8,
81 | OUT temp_blks_read int8,
82 | OUT temp_blks_written int8,
83 | OUT shared_blk_read_time float8,
84 | OUT shared_blk_write_time float8,
85 | OUT local_blk_read_time float8,
86 | OUT local_blk_write_time float8,
87 | OUT temp_blk_read_time float8,
88 | OUT temp_blk_write_time float8,
89 | OUT first_call timestamptz,
90 | OUT last_call timestamptz
91 | )
92 | RETURNS SETOF record
93 | AS 'MODULE_PATHNAME', 'pg_store_plans_1_9'
94 | LANGUAGE C
95 | VOLATILE PARALLEL SAFE;
96 |
97 | -- Register a view on the function for ease of use.
98 | CREATE VIEW pg_store_plans AS
99 | SELECT * FROM pg_store_plans();
100 |
101 | GRANT SELECT ON pg_store_plans TO PUBLIC;
102 |
103 | -- Don't want this to be available to non-superusers.
104 | REVOKE ALL ON FUNCTION pg_store_plans_reset() FROM PUBLIC;
105 |
--------------------------------------------------------------------------------
/pg_store_plans.control:
--------------------------------------------------------------------------------
1 | # pg_store_plans extension
2 | comment = 'track plan statistics of all SQL statements executed'
3 | default_version = '1.9'
4 | module_pathname = '$libdir/pg_store_plans'
5 | relocatable = true
6 |
--------------------------------------------------------------------------------
/pgsp_explain.c:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_explain.c: extracted code from explain.c for explain of triggers.
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_explain.c
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | #include "postgres.h"
15 | #include "commands/explain.h"
16 | #include "utils/rel.h"
17 | #include "utils/lsyscache.h"
18 | #include "utils/json.h"
19 | #include "pgsp_explain.h"
20 |
21 | static void pgspExplainOpenGroup(const char *objtype, const char *labelname,
22 | bool labeled, ExplainState *es);
23 | static void pgspExplainCloseGroup(const char *objtype, const char *labelname,
24 | bool labeled, ExplainState *es);
25 | static void report_triggers(ResultRelInfo *rInfo, bool show_relname,
26 | ExplainState *es);
27 | static void pgspExplainPropertyText(const char *qlabel, const char *value, ExplainState *es);
28 | static void pgspExplainPropertyFloat(const char *qlabel, double value, int ndigits,
29 | ExplainState *es);
30 | static void pgspExplainProperty(const char *qlabel, const char *value, bool numeric,
31 | ExplainState *es);
32 | static void pgspExplainJSONLineEnding(ExplainState *es);
33 |
34 | /*
35 | * ExplainState is modified at 9.4.1 and 9.3.6. But the change is for
36 | * internal use and to avoid binary-incompatibility not changing the
37 | * size of ExplainState. So we can use ExplainState->extra as if it
38 | * were grouping_stack safely and should do so. Using ->extra as List*
39 | * discards the memory for ExplainStateExtra but it is not a problem
40 | * since it is allocated by palloc.
41 | */
42 | #if (PG_VERSION_NUM >= 90401 && PG_VERSION_NUM < 90500) || \
43 | (PG_VERSION_NUM >= 90306 && PG_VERSION_NUM < 90400)
44 | #define GROUPING_STACK(es) (*((List **)(&(es)->extra)))
45 | #else
46 | #define GROUPING_STACK(es) ((es)->grouping_stack)
47 | #endif
48 |
49 | /* ExplainInitState() is replaced with NewExlainState() in 9.5 */
50 | #if PG_VERSION_NUM < 90500
51 | ExplainState *
52 | NewExplainState(void)
53 | {
54 | ExplainState *es = (ExplainState *)palloc0(sizeof(ExplainState));
55 |
56 | ExplainInitState(es);
57 | es->costs = true;
58 | return es;
59 | }
60 | #endif
61 |
62 | void
63 | pgspExplainTriggers(ExplainState *es, QueryDesc *queryDesc)
64 | {
65 | if (es->analyze)
66 | {
67 | ResultRelInfo *rInfo;
68 | bool show_relname;
69 | #if PG_VERSION_NUM < 140000
70 | int numrels = queryDesc->estate->es_num_result_relations;
71 | int nr;
72 | #else
73 | List *resultrels;
74 | List *routerels;
75 | #endif
76 | List *targrels = queryDesc->estate->es_trig_target_relations;
77 | ListCell *l;
78 |
79 | #if PG_VERSION_NUM >= 140000
80 | resultrels = queryDesc->estate->es_opened_result_relations;
81 | routerels = queryDesc->estate->es_tuple_routing_result_relations;
82 | targrels = queryDesc->estate->es_trig_target_relations;
83 | #endif
84 |
85 | pgspExplainOpenGroup("Triggers", "Triggers", false, es);
86 |
87 | #if PG_VERSION_NUM < 140000
88 | show_relname = (numrels > 1 || targrels != NIL);
89 | rInfo = queryDesc->estate->es_result_relations;
90 | for (nr = 0; nr < numrels; rInfo++, nr++)
91 | #else
92 | show_relname = (list_length(resultrels) > 1 ||
93 | routerels != NIL || targrels != NIL);
94 | foreach(l, resultrels)
95 | {
96 | rInfo = (ResultRelInfo *) lfirst(l);
97 | #endif
98 | report_triggers(rInfo, show_relname, es);
99 | #if PG_VERSION_NUM >= 140000
100 | }
101 |
102 | foreach(l, routerels)
103 | {
104 | rInfo = (ResultRelInfo *) lfirst(l);
105 | report_triggers(rInfo, show_relname, es);
106 | }
107 | #endif
108 |
109 | foreach(l, targrels)
110 | {
111 | rInfo = (ResultRelInfo *) lfirst(l);
112 | report_triggers(rInfo, show_relname, es);
113 | }
114 |
115 | pgspExplainCloseGroup("Triggers", "Triggers", false, es);
116 | }
117 | }
118 |
119 | static void
120 | pgspExplainOpenGroup(const char *objtype, const char *labelname,
121 | bool labeled, ExplainState *es)
122 | {
123 | pgspExplainJSONLineEnding(es);
124 | appendStringInfoSpaces(es->str, 2 * es->indent);
125 | if (labelname)
126 | {
127 | escape_json(es->str, labelname);
128 | appendStringInfoString(es->str, ": ");
129 | }
130 | appendStringInfoChar(es->str, labeled ? '{' : '[');
131 |
132 | GROUPING_STACK(es) = lcons_int(0, GROUPING_STACK(es));
133 | es->indent++;
134 | }
135 |
136 | static void
137 | pgspExplainCloseGroup(const char *objtype, const char *labelname,
138 | bool labeled, ExplainState *es)
139 | {
140 | es->indent--;
141 | appendStringInfoChar(es->str, '\n');
142 | appendStringInfoSpaces(es->str, 2 * es->indent);
143 | appendStringInfoChar(es->str, labeled ? '}' : ']');
144 | GROUPING_STACK(es) = list_delete_first(GROUPING_STACK(es));
145 | }
146 |
147 | static void
148 | report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es)
149 | {
150 | int nt;
151 |
152 | if (!rInfo->ri_TrigDesc || !rInfo->ri_TrigInstrument)
153 | return;
154 | for (nt = 0; nt < rInfo->ri_TrigDesc->numtriggers; nt++)
155 | {
156 | Trigger *trig = rInfo->ri_TrigDesc->triggers + nt;
157 | Instrumentation *instr = rInfo->ri_TrigInstrument + nt;
158 | char *relname;
159 | char *conname = NULL;
160 |
161 | /* Must clean up instrumentation state */
162 | InstrEndLoop(instr);
163 |
164 | /*
165 | * We ignore triggers that were never invoked; they likely aren't
166 | * relevant to the current query type.
167 | */
168 | if (instr->ntuples == 0)
169 | continue;
170 |
171 | pgspExplainOpenGroup("Trigger", NULL, true, es);
172 |
173 | relname = RelationGetRelationName(rInfo->ri_RelationDesc);
174 | if (OidIsValid(trig->tgconstraint))
175 | conname = get_constraint_name(trig->tgconstraint);
176 |
177 | pgspExplainPropertyText("Trigger Name", trig->tgname, es);
178 | if (conname)
179 | pgspExplainPropertyText("Constraint Name", conname, es);
180 | pgspExplainPropertyText("Relation", relname, es);
181 | pgspExplainPropertyFloat("Time", 1000.0 * instr->total, 3, es);
182 | pgspExplainPropertyFloat("Calls", instr->ntuples, 0, es);
183 |
184 | if (conname)
185 | pfree(conname);
186 |
187 | pgspExplainCloseGroup("Trigger", NULL, true, es);
188 | }
189 | }
190 |
191 | static void
192 | pgspExplainPropertyText(const char *qlabel, const char *value, ExplainState *es)
193 | {
194 | pgspExplainProperty(qlabel, value, false, es);
195 | }
196 |
197 | static void
198 | pgspExplainPropertyFloat(const char *qlabel, double value, int ndigits,
199 | ExplainState *es)
200 | {
201 | char buf[256];
202 |
203 | snprintf(buf, sizeof(buf), "%.*f", ndigits, value);
204 | pgspExplainProperty(qlabel, buf, true, es);
205 | }
206 |
207 |
208 | static void
209 | pgspExplainProperty(const char *qlabel, const char *value, bool numeric,
210 | ExplainState *es)
211 | {
212 | pgspExplainJSONLineEnding(es);
213 | appendStringInfoSpaces(es->str, es->indent * 2);
214 | escape_json(es->str, qlabel);
215 | appendStringInfoString(es->str, ": ");
216 | if (numeric)
217 | appendStringInfoString(es->str, value);
218 | else
219 | escape_json(es->str, value);
220 | }
221 |
222 | static void
223 | pgspExplainJSONLineEnding(ExplainState *es)
224 | {
225 | Assert(es->format == EXPLAIN_FORMAT_JSON);
226 | if (linitial_int(GROUPING_STACK(es)) != 0)
227 | appendStringInfoChar(es->str, ',');
228 | else
229 | linitial_int(GROUPING_STACK(es)) = 1;
230 | appendStringInfoChar(es->str, '\n');
231 | }
232 |
233 |
--------------------------------------------------------------------------------
/pgsp_explain.h:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_explain.h: extracted code from explain.c for explain of triggers.
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_explain.h
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | void pgspExplainTriggers(ExplainState *es, QueryDesc *queryDesc);
15 |
16 | /* ExplainInitState() is replaced with NewExlainState() in 9.5 */
17 | #if PG_VERSION_NUM < 90500
18 | ExplainState *NewExplainState(void);
19 | #endif
20 |
--------------------------------------------------------------------------------
/pgsp_json.c:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json.c: Plan handler for JSON/XML/YAML style plans
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json.c
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | #include "postgres.h"
15 | #if PG_VERSION_NUM >= 130000
16 | #include "mb/pg_wchar.h"
17 | #endif
18 | #include "miscadmin.h"
19 | #include "nodes/nodes.h"
20 | #include "nodes/parsenodes.h"
21 | #include "nodes/bitmapset.h"
22 | #include "parser/scanner.h"
23 | #include "utils/xml.h"
24 | #include "utils/json.h"
25 | #if PG_VERSION_NUM < 130000
26 | #include "utils/jsonapi.h"
27 | #else
28 | #include "common/jsonapi.h"
29 | #endif
30 | #include "pgsp_json.h"
31 | #include "pgsp_json_int.h"
32 |
33 | #if PG_VERSION_NUM < 160000
34 | #include "parser/gram.h"
35 | #define JsonParseErrorType void
36 | #define JSONACTION_RETURN_SUCCESS() return
37 | #else
38 | /* In PG16, include/scan.h was gone. Define required symbols manually.. */
39 | /* must be in sync with src/backend/parser/gram.h */
40 | #include "pgsp_token_types.h"
41 | #define JSONACTION_RETURN_SUCCESS() return JSON_SUCCESS
42 | #endif
43 |
44 | #define INDENT_STEP 2
45 |
46 |
47 | void normalize_expr(char *expr, bool preserve_space);
48 | static const char *converter_core(word_table *tbl,
49 | const char *src, pgsp_parser_mode mode);
50 |
51 | static JsonParseErrorType json_objstart(void *state);
52 | static JsonParseErrorType json_objend(void *state);
53 | static JsonParseErrorType json_arrstart(void *state);
54 | static JsonParseErrorType json_arrend(void *state);
55 | static JsonParseErrorType json_ofstart(void *state, char *fname, bool isnull);
56 | static JsonParseErrorType json_aestart(void *state, bool isnull);
57 | static JsonParseErrorType json_scalar(void *state, char *token,
58 | JsonTokenType tokentype);
59 |
60 | static JsonParseErrorType yaml_objstart(void *state);
61 | static JsonParseErrorType yaml_objend(void *state);
62 | static JsonParseErrorType yaml_arrstart(void *state);
63 | static JsonParseErrorType yaml_arrend(void *state);
64 | static JsonParseErrorType yaml_ofstart(void *state, char *fname, bool isnull);
65 | static JsonParseErrorType yaml_aestart(void *state, bool isnull);
66 | static JsonParseErrorType yaml_scalar(void *state, char *token,
67 | JsonTokenType tokentype);
68 |
69 | static void adjust_wbuf(pgspParserContext *ctx, int len);
70 | static char *hyphenate_words(pgspParserContext *ctx, char *src);
71 | static JsonParseErrorType xml_objstart(void *state);
72 | static JsonParseErrorType xml_objend(void *state);
73 | static JsonParseErrorType xml_arrend(void *state);
74 | static JsonParseErrorType xml_ofstart(void *state, char *fname, bool isnull);
75 | static JsonParseErrorType xml_ofend(void *state, char *fname, bool isnull);
76 | static JsonParseErrorType xml_aestart(void *state, bool isnull);
77 | static JsonParseErrorType xml_aeend(void *state, bool isnull);
78 | static JsonParseErrorType xml_scalar(void *state, char *token,
79 | JsonTokenType tokentype) ;
80 |
81 | static void init_json_semaction(JsonSemAction *sem,
82 | pgspParserContext *ctx);
83 |
84 | word_table propfields[] =
85 | {
86 | {P_NodeType, "t" ,"Node Type", NULL, true, conv_nodetype, SETTER(node_type)},
87 | {P_RelationShip, "h" ,"Parent Relationship", NULL, true, conv_relasionship, NULL},
88 | {P_RelationName, "n" ,"Relation Name", NULL, true, NULL, SETTER(obj_name)},
89 | {P_FunctioName, "f" ,"Function Name", NULL, true, NULL, SETTER(obj_name)},
90 | {P_IndexName, "i" ,"Index Name", NULL, true, NULL, SETTER(index_name)},
91 | {P_CTEName, "c" ,"CTE Name", NULL, true, NULL, SETTER(obj_name)},
92 | {P_TrgRelation, "w" ,"Relation", NULL, true, NULL, SETTER(trig_relation)},
93 | {P_Schema, "s" ,"Schema", NULL, true, NULL, SETTER(schema_name)},
94 | {P_Alias, "a" ,"Alias", NULL, true, NULL, SETTER(alias)},
95 | {P_Output, "o" ,"Output", NULL, true, conv_expression, SETTER(output)},
96 | {P_ScanDir, "d" ,"Scan Direction", NULL, true, conv_scandir, SETTER(scan_dir)},
97 | {P_MergeCond, "m" ,"Merge Cond", NULL, true, conv_expression, SETTER(merge_cond)},
98 | {P_Strategy, "g" ,"Strategy", NULL, true, conv_strategy, SETTER(strategy)},
99 | {P_JoinType, "j" ,"Join Type", NULL, true, conv_jointype, SETTER(join_type)},
100 | {P_SortMethod, "e" ,"Sort Method", NULL, true, conv_sortmethod, SETTER(sort_method)},
101 | {P_SortKey, "k" ,"Sort Key", NULL, true, conv_expression, SETTER(sort_key)},
102 | {P_Filter, "5" ,"Filter", NULL, true, conv_expression, SETTER(filter)},
103 | {P_JoinFilter, "6" ,"Join Filter", NULL, true, conv_expression, SETTER(join_filter)},
104 | {P_HashCond, "7" ,"Hash Cond", NULL, true, conv_expression, SETTER(hash_cond)},
105 | {P_IndexCond, "8" ,"Index Cond", NULL, true, conv_expression, SETTER(index_cond)},
106 | {P_TidCond, "9" ,"TID Cond", NULL, true, conv_expression, SETTER(tid_cond)},
107 | {P_RecheckCond, "0" ,"Recheck Cond", NULL, true, conv_expression, SETTER(recheck_cond)},
108 | {P_Operation, "!" ,"Operation", NULL, true, conv_operation, SETTER(operation)},
109 | {P_SubplanName, "q" ,"Subplan Name", NULL, true, NULL, SETTER(subplan_name)},
110 | {P_Command, "b" ,"Command", NULL, true, conv_setsetopcommand,SETTER(setopcommand)},
111 | {P_Triggers, "r" ,"Triggers", NULL, true, NULL, NULL},
112 | {P_Trigger, "u" ,"Trigger", NULL, true, NULL, SETTER(node_type)},
113 | {P_TriggerName, "v" ,"Trigger Name", NULL, true, NULL, SETTER(trig_name)},
114 | {P_ConstraintName, "x" ,"Constraint Name", NULL, true, NULL, NULL},
115 | {P_Plans, "l" ,"Plans", NULL, true, NULL, NULL},
116 | {P_Plan, "p" ,"Plan", NULL, true, NULL, NULL},
117 | {P_GroupKey, "-" ,"Group Key", NULL, true, NULL, SETTER(group_key)},
118 | {P_GroupSets, "=" ,"Grouping Sets", NULL, true, NULL, NULL},
119 | {P_GroupKeys, "\\" ,"Group Keys", NULL, true, NULL, SETTER(group_key)},
120 |
121 | {P_HashKeys, "~" ,"Hash Keys", NULL, true, NULL, SETTER(hash_key)},
122 | {P_HashKey, "|" ,"Hash Key", NULL, true, NULL, SETTER(hash_key)},
123 |
124 | {P_Parallel, "`" ,"Parallel Aware", NULL, true, NULL, SETTER(parallel_aware)},
125 | {P_PartialMode, ">" ,"Partial Mode", NULL, true, conv_partialmode,SETTER(partial_mode)},
126 | {P_WorkersPlanned, "{" ,"Workers Planned", NULL, true, NULL, SETTER(workers_planned)},
127 | {P_WorkersLaunched, "}" ,"Workers Launched", NULL, true, NULL, SETTER(workers_launched)},
128 | {P_InnerUnique, "?" ,"Inner Unique", NULL, true, NULL, SETTER(inner_unique)},
129 | {P_AsyncCapable, "ac", "Async Capable", NULL, true, NULL, SETTER(async_capable)},
130 |
131 | /* Values of these properties are ignored on normalization */
132 | {P_FunctionCall, "y" ,"Function Call", NULL, false, NULL, SETTER(func_call)},
133 | {P_StartupCost, "1" ,"Startup Cost", NULL, false, NULL, SETTER(startup_cost)},
134 | {P_TotalCost, "2" ,"Total Cost", NULL, false, NULL, SETTER(total_cost)},
135 | {P_PlanRows, "3" ,"Plan Rows", NULL, false, NULL, SETTER(plan_rows)},
136 | {P_PlanWidth, "4" ,"Plan Width", NULL, false, NULL, SETTER(plan_width)},
137 | {P_ActualStartupTime,"A","Actual Startup Time", NULL, false, NULL, SETTER(actual_startup_time)},
138 | {P_ActualTotalTime, "B" ,"Actual Total Time", NULL, false, NULL, SETTER(actual_total_time)},
139 | {P_ActualRows, "C" ,"Actual Rows", NULL, false, NULL, SETTER(actual_rows)},
140 | {P_ActualLoops, "D" ,"Actual Loops", NULL, false, NULL, SETTER(actual_loops)},
141 | {P_HeapFetches, "E" ,"Heap Fetches", NULL, false, NULL, SETTER(heap_fetches)},
142 | {P_SharedHitBlks, "F" ,"Shared Hit Blocks", NULL, false, NULL, SETTER(shared_hit_blks)},
143 | {P_SharedReadBlks, "G" ,"Shared Read Blocks", NULL, false, NULL, SETTER(shared_read_blks)},
144 | {P_SharedDirtiedBlks,"H","Shared Dirtied Blocks",NULL,false, NULL, SETTER(shared_dirtied_blks)},
145 | {P_SharedWrittenBlks,"I","Shared Written Blocks",NULL,false, NULL, SETTER(shared_written_blks)},
146 | {P_LocalHitBlks, "J" ,"Local Hit Blocks", NULL, false, NULL, SETTER(local_hit_blks)},
147 | {P_LocalReadBlks, "K" ,"Local Read Blocks", NULL, false, NULL, SETTER(local_read_blks)},
148 | {P_LocalDirtiedBlks,"L" ,"Local Dirtied Blocks",NULL, false, NULL, SETTER(local_dirtied_blks)},
149 | {P_LocalWrittenBlks,"M" ,"Local Written Blocks",NULL, false, NULL, SETTER(local_written_blks)},
150 | {P_TempReadBlks, "N" ,"Temp Read Blocks", NULL, false, NULL, SETTER(temp_read_blks)},
151 | {P_TempWrittenBlks, "O" ,"Temp Written Blocks", NULL, false, NULL, SETTER(temp_written_blks)},
152 | {P_IOReadTime, "P" ,"I/O Read Time", NULL, false, NULL, SETTER(io_read_time)},
153 | {P_IOWwriteTime, "Q" ,"I/O Write Time", NULL, false, NULL, SETTER(io_write_time)},
154 | {P_SortSpaceUsed, "R" ,"Sort Space Used", NULL, false, NULL, SETTER(sort_space_used)},
155 | {P_SortSpaceType, "S" ,"Sort Space Type", NULL, false, conv_sortspacetype,SETTER(sort_space_type)},
156 | {P_PeakMemoryUsage, "T" ,"Peak Memory Usage", NULL, false, NULL, SETTER(peak_memory_usage)},
157 | {P_OrgHashBatches, "U","Original Hash Batches",NULL, false, NULL, SETTER(org_hash_batches)},
158 | {P_OrgHashBuckets, "*","Original Hash Buckets",NULL, false, NULL, SETTER(org_hash_buckets)},
159 | {P_HashBatches, "V" ,"Hash Batches", NULL, false, NULL, SETTER(hash_batches)},
160 | {P_HashBuckets, "W" ,"Hash Buckets", NULL, false, NULL, SETTER(hash_buckets)},
161 | {P_RowsFilterRmvd, "X" ,"Rows Removed by Filter",NULL,false,NULL, SETTER(filter_removed)},
162 | {P_RowsIdxRchkRmvd, "Y" ,"Rows Removed by Index Recheck",NULL,false, NULL, SETTER(idxrchk_removed)},
163 | {P_TrgTime, "Z" ,"Time", NULL, false, NULL, SETTER(trig_time)},
164 | {P_TrgCalls, "z" ,"Calls", NULL, false, NULL, SETTER(trig_calls)},
165 | {P_PlanTime, "#" ,"Planning Time", NULL, false, NULL, SETTER(plan_time)},
166 | {P_ExecTime, "$" ,"Execution Time", NULL, false, NULL, SETTER(exec_time)},
167 | {P_ExactHeapBlks, "&" ,"Exact Heap Blocks", NULL, false, NULL, SETTER(exact_heap_blks)},
168 | {P_LossyHeapBlks, "(" ,"Lossy Heap Blocks", NULL, false, NULL, SETTER(lossy_heap_blks)},
169 | {P_RowsJoinFltRemvd,")" ,"Rows Removed by Join Filter", NULL, false, NULL, SETTER(joinfilt_removed)},
170 | {P_TargetTables, "_" ,"Target Tables", NULL, false, NULL, NULL},
171 | {P_ConfRes, "%" ,"Conflict Resolution", NULL, false, NULL, SETTER(conflict_resolution)},
172 | {P_ConfArbitIdx, "@" ,"Conflict Arbiter Indexes",NULL, false, NULL, SETTER(conflict_arbiter_indexes)},
173 | {P_TuplesInserted, "^" ,"Tuples Inserted", NULL, false, NULL, SETTER(tuples_inserted)},
174 | {P_ConfTuples, "+" ,"Conflicting Tuples", NULL, false, NULL, SETTER(conflicting_tuples)},
175 | {P_SamplingMethod, ":" ,"Sampling Method" , NULL, false, NULL, SETTER(sampling_method)},
176 | {P_SamplingParams, ";" ,"Sampling Parameters" , NULL, false, NULL, SETTER(sampling_params)},
177 | {P_RepeatableSeed, "<" ,"Repeatable Seed" , NULL, false, NULL, SETTER(repeatable_seed)},
178 | {P_Workers, "[" ,"Workers", NULL, false, NULL, NULL},
179 | {P_WorkerNumber, "]" ,"Worker Number", NULL, false, NULL, SETTER(worker_number)},
180 | {P_TableFuncName, "aa" ,"Table Function Name",NULL, false, NULL, SETTER(table_func_name)},
181 |
182 | {P_PresortedKey, "pk" ,"Presorted Key" ,NULL, false, NULL, SETTER(presorted_key)},
183 | {P_FullsortGroups, "fg" ,"Full-sort Groups" ,NULL, false, NULL, NULL},
184 | {P_SortMethodsUsed, "su" ,"Sort Methods Used" ,NULL, false, NULL, SETTER(sortmethod_used)},
185 | {P_SortSpaceMemory, "sm" ,"Sort Space Memory" ,NULL, false, NULL, SETTER(sortspace_mem)},
186 | {P_GroupCount, "gc" ,"Group Count" ,NULL, false, NULL, SETTER(group_count)},
187 | {P_AvgSortSpcUsed, "as" ,"Average Sort Space Used",NULL, false, NULL, SETTER(avg_sortspc_used)},
188 | {P_PeakSortSpcUsed, "ps" ,"Peak Sort Space Used",NULL, false, NULL, SETTER(peak_sortspc_used)},
189 | {P_PreSortedGroups, "pg" ,"Pre-sorted Groups" ,NULL, false, NULL, NULL},
190 |
191 | {P_Invalid, NULL, NULL, NULL, false, NULL, NULL}
192 | };
193 |
194 | word_table nodetypes[] =
195 | {
196 | {T_Result, "a" ,"Result", NULL, false, NULL, NULL},
197 | {T_ModifyTable, "b" ,"ModifyTable", NULL, false, NULL, NULL},
198 | {T_Append, "c" ,"Append", NULL, false, NULL, NULL},
199 | {T_MergeAppend, "d" ,"Merge Append", NULL, false, NULL, NULL},
200 | {T_RecursiveUnion,"e" ,"Recursive Union",NULL, false, NULL, NULL},
201 | {T_BitmapAnd, "f" ,"BitmapAnd", NULL, false, NULL, NULL},
202 | {T_BitmapOr, "g" ,"BitmapOr", NULL, false, NULL, NULL},
203 | #if PG_VERSION_NUM < 16
204 | {T_Scan, "" , "", "", false, NULL, NULL},
205 | #endif
206 | {T_SeqScan, "h" ,"Seq Scan", NULL, false, NULL, NULL},
207 | {T_IndexScan, "i" ,"Index Scan", NULL, false, NULL, NULL},
208 | {T_IndexOnlyScan,"j","Index Only Scan",NULL, false, NULL, NULL},
209 | {T_BitmapIndexScan,"k" ,"Bitmap Index Scan", NULL, false, NULL, NULL},
210 | {T_BitmapHeapScan,"l" ,"Bitmap Heap Scan", NULL ,false, NULL, NULL},
211 | {T_TidScan, "m" ,"Tid Scan", NULL, false, NULL, NULL},
212 | {T_SubqueryScan,"n" ,"Subquery Scan", NULL, false, NULL, NULL},
213 | {T_FunctionScan,"o" ,"Function Scan", NULL, false, NULL, NULL},
214 | {T_ValuesScan, "p" ,"Values Scan", NULL, false, NULL, NULL},
215 | {T_CteScan, "q" ,"CTE Scan", NULL, false, NULL, NULL},
216 | {T_WorkTableScan,"r","WorkTable Scan", NULL, false, NULL, NULL},
217 | {T_ForeignScan, "s" , "Foreign Scan", NULL, false, NULL, NULL},
218 | #if PG_VERSION_NUM < 16
219 | {T_Join, "" , "", NULL, false, NULL, NULL},
220 | #endif
221 | {T_NestLoop, "t" ,"Nested Loop", NULL, false, NULL, NULL},
222 | {T_MergeJoin, "u" ,"Merge Join", "Merge", false, NULL, NULL},
223 | {T_HashJoin, "v" ,"Hash Join", "Hash", false, NULL, NULL},
224 | {T_Material, "w" ,"Materialize", NULL, false, NULL, NULL},
225 | {T_Sort, "x" ,"Sort", NULL, false, NULL, NULL},
226 | {T_Group, "y" ,"Group", NULL, false, NULL, NULL},
227 | {T_Agg, "z" ,"Aggregate", NULL, false, NULL, NULL},
228 | {T_WindowAgg, "0" ,"WindowAgg", NULL, false, NULL, NULL},
229 | {T_Unique, "1" ,"Unique", NULL, false, NULL, NULL},
230 | {T_Hash, "2" ,"Hash", NULL, false, NULL, NULL},
231 | {T_SetOp, "3" ,"SetOp", NULL, false, NULL, NULL},
232 | {T_LockRows, "4" ,"LockRows", NULL, false, NULL, NULL},
233 | {T_Limit, "5" ,"Limit", NULL, false, NULL, NULL},
234 | #if PG_VERSION_NUM >= 90500
235 | {T_SampleScan, "B" ,"Sample Scan", NULL, false, NULL, NULL},
236 | #endif
237 | #if PG_VERSION_NUM >= 90600
238 | {T_Gather, "6" ,"Gather", NULL, false, NULL, NULL},
239 | #endif
240 | #if PG_VERSION_NUM >= 100000
241 | {T_ProjectSet, "7" ,"ProjectSet", NULL, false, NULL, NULL},
242 | {T_TableFuncScan,"8","Table Function Scan", NULL, false, NULL, NULL},
243 | {T_NamedTuplestoreScan,"9","Named Tuplestore Scan", NULL, false, NULL, NULL},
244 | {T_GatherMerge, "A" ,"Gather Merge", NULL, false, NULL, NULL},
245 | #endif
246 | #if PG_VERSION_NUM >= 130000
247 | {T_IncrementalSort, "C" ,"Incremental Sort", NULL, false, NULL, NULL},
248 | #endif
249 | #if PG_VERSION_NUM >= 140000
250 | {T_TidRangeScan,"D", "Tid Range Scan", NULL, false, NULL, NULL},
251 | {T_Memoize, "E", "Memoize", NULL, false, NULL, NULL},
252 | #endif
253 |
254 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
255 | };
256 |
257 | word_table directions[] =
258 | {
259 | {T_Invalid, "b" ,"Backward", "Backward", false, NULL, NULL},
260 | {T_Invalid, "n" ,"NoMovement","", false, NULL, NULL},
261 | {T_Invalid, "f" ,"Forward", "", false, NULL, NULL},
262 | {T_Invalid, NULL , NULL, NULL, false, NULL, NULL}
263 | };
264 |
265 | word_table relationships[] =
266 | {
267 | {T_Invalid, "o" ,"Outer", NULL, false, NULL, NULL},
268 | {T_Invalid, "i" ,"Inner", NULL, false, NULL, NULL},
269 | {T_Invalid, "s" ,"Subquery", NULL, false, NULL, NULL},
270 | {T_Invalid, "m" ,"Member", NULL, false, NULL, NULL},
271 | {T_Invalid, "I" ,"InitPlan", NULL, false, NULL, NULL},
272 | {T_Invalid, "S" ,"SubPlan", NULL, false, NULL, NULL},
273 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
274 | };
275 |
276 | word_table strategies[] =
277 | {
278 | {S_Plain, "p" ,"Plain", NULL, false, NULL, NULL},
279 | {S_Sorted, "s" ,"Sorted", NULL, false, NULL, NULL},
280 | {S_Hashed, "h" ,"Hashed", NULL, false, NULL, NULL},
281 | {S_Mixed, "m" ,"Mixed", NULL, false, NULL, NULL},
282 | {S_Invalid, NULL, NULL, NULL, false, NULL, NULL}
283 | };
284 |
285 | word_table operations[] =
286 | {
287 | {T_Invalid, "i" ,"Insert", NULL, false, NULL, NULL},
288 | {T_Invalid, "d" ,"Delete", NULL, false, NULL, NULL},
289 | {T_Invalid, "u" ,"Update", NULL, false, NULL, NULL},
290 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
291 | };
292 |
293 | word_table jointypes[] =
294 | {
295 | {T_Invalid, "i" ,"Inner", NULL, false, NULL, NULL},
296 | {T_Invalid, "l" ,"Left", NULL, false, NULL, NULL},
297 | {T_Invalid, "f" ,"Full", NULL, false, NULL, NULL},
298 | {T_Invalid, "r" ,"Right", NULL, false, NULL, NULL},
299 | {T_Invalid, "s" ,"Semi", NULL, false, NULL, NULL},
300 | {T_Invalid, "a" ,"Anti", NULL, false, NULL, NULL},
301 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
302 | };
303 |
304 | word_table setsetopcommands[] =
305 | {
306 | {T_Invalid, "i" ,"Intersect", NULL, false, NULL, NULL},
307 | {T_Invalid, "I" ,"Intersect All", NULL, false, NULL, NULL},
308 | {T_Invalid, "e" ,"Except", NULL, false, NULL, NULL},
309 | {T_Invalid, "E" ,"Except All", NULL, false, NULL, NULL},
310 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
311 | };
312 |
313 | word_table sortmethods[] =
314 | {
315 | {T_Invalid, "h" ,"top-N heapsort", NULL, false, NULL, NULL},
316 | {T_Invalid, "q" ,"quicksort", NULL, false, NULL, NULL},
317 | {T_Invalid, "e" ,"external sort", NULL, false, NULL, NULL},
318 | {T_Invalid, "E" ,"external merge", NULL, false, NULL, NULL},
319 | {T_Invalid, "s" ,"still in progress", NULL, false, NULL, NULL},
320 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
321 | };
322 |
323 | word_table sortspacetype[] =
324 | {
325 | {T_Invalid, "d" ,"Disk", NULL, false, NULL, NULL},
326 | {T_Invalid, "m" ,"Memory",NULL, false, NULL, NULL},
327 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
328 | };
329 |
330 | word_table partialmode[] =
331 | {
332 | {T_Invalid, "p" ,"Partial", NULL, false, NULL, NULL},
333 | {T_Invalid, "f" ,"Finalize",NULL, false, NULL, NULL},
334 | {T_Invalid, "s" ,"Simple",NULL, false, NULL, NULL},
335 | {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
336 | };
337 |
338 |
339 | word_table *
340 | search_word_table(word_table *tbl, const char *word, int mode)
341 | {
342 | word_table *p;
343 |
344 | bool longname =
345 | (mode == PGSP_JSON_SHORTEN || mode == PGSP_JSON_NORMALIZE);
346 |
347 |
348 | /*
349 | * Use simple linear search. We can gain too small portion of the whole
350 | * processing time using more 'clever' algorithms like b-tree or tries,
351 | * which won't be worth the additional memory, complexity and
352 | * initialization cost.
353 | */
354 | for (p = tbl ; p->longname ; p++)
355 | {
356 | if (strcmp(longname ? p->longname: p->shortname, word) == 0)
357 | break;
358 | }
359 |
360 | if (p->longname == NULL && mode == PGSP_JSON_TEXTIZE)
361 | {
362 | /* Fallback to long json prop name */
363 | for (p = tbl ; p->longname ; p++)
364 | if (strcmp(p->longname, word) == 0)
365 | break;
366 | }
367 |
368 | return (p->longname ? p : NULL);
369 | }
370 |
371 |
372 | const char *
373 | converter_core(word_table *tbl,
374 | const char *src, pgsp_parser_mode mode)
375 | {
376 | word_table *p;
377 | char *ret;
378 |
379 | p = search_word_table(tbl, src, mode);
380 |
381 | if (!p) return src;
382 |
383 | ret = p->shortname;
384 | switch(mode)
385 | {
386 | case PGSP_JSON_SHORTEN:
387 | case PGSP_JSON_NORMALIZE:
388 | ret = p->shortname;
389 | break;
390 | case PGSP_JSON_INFLATE:
391 | case PGSP_JSON_YAMLIZE:
392 | case PGSP_JSON_XMLIZE:
393 | ret = p->longname;
394 | break;
395 | case PGSP_JSON_TEXTIZE:
396 | if(p->textname)
397 | ret = p->textname;
398 | else
399 | ret = p->longname;
400 | break;
401 | default:
402 | elog(ERROR, "Internal error");
403 | }
404 | return ret;
405 | }
406 |
407 | const char *
408 | conv_nodetype(const char *src, pgsp_parser_mode mode)
409 | {
410 | return converter_core(nodetypes, src, mode);
411 | }
412 |
413 | const char *
414 | conv_scandir(const char *src, pgsp_parser_mode mode)
415 | {
416 | return converter_core(directions, src, mode);
417 | }
418 |
419 | const char *
420 | conv_relasionship(const char *src, pgsp_parser_mode mode)
421 | {
422 | return converter_core(relationships, src, mode);
423 | }
424 |
425 | const char *
426 | conv_strategy(const char *src, pgsp_parser_mode mode)
427 | {
428 | return converter_core(strategies, src, mode);
429 | }
430 |
431 | /*
432 | * Look for these operator characters in order to decide whether to strip
433 | * whitespaces which are needless from the view of sql syntax in
434 | * normalize_expr(). This must be synced with op_chars in scan.l.
435 | */
436 | #define OPCHARS "~!@#^&|`?+-*/%<>="
437 | #define IS_WSCHAR(c) ((c) == ' ' || (c) == '\n' || (c) == '\t')
438 | #define IS_CONST(tok) (tok == FCONST || tok == SCONST || tok == BCONST || \
439 | tok == XCONST || tok == ICONST || tok == NULL_P || \
440 | tok == TRUE_P || tok == FALSE_P || \
441 | tok == CURRENT_CATALOG || tok == CURRENT_DATE || \
442 | tok == CURRENT_ROLE || tok == CURRENT_SCHEMA || \
443 | tok == CURRENT_TIME || tok == CURRENT_TIMESTAMP || \
444 | tok == CURRENT_USER || \
445 | tok == LOCALTIME || tok == LOCALTIMESTAMP)
446 | #define IS_INDENTED_ARRAY(v) ((v) == P_GroupKeys || (v) == P_HashKeys)
447 |
448 | /*
449 | * norm_yylex: core_yylex with replacing some tokens.
450 | */
451 | static int
452 | norm_yylex(char *str, core_YYSTYPE *yylval, YYLTYPE *yylloc, core_yyscan_t yyscanner)
453 | {
454 | int tok;
455 |
456 | PG_TRY();
457 | {
458 | tok = core_yylex(yylval, yylloc, yyscanner);
459 | }
460 | PG_CATCH();
461 | {
462 | /*
463 | * Error might occur during parsing quoted tokens that chopped
464 | * halfway. Just ignore the rest of this query even if there might
465 | * be other reasons for parsing to fail.
466 | */
467 | FlushErrorState();
468 | return -1;
469 | }
470 | PG_END_TRY();
471 |
472 | /*
473 | * '?' alone is assumed to be an IDENT. If there's a real
474 | * operator '?', this should be confused but there's hardly be.
475 | */
476 | if (tok == Op && str[*yylloc] == '?' &&
477 | strchr(OPCHARS, str[*yylloc + 1]) == NULL)
478 | tok = SCONST;
479 |
480 | /*
481 | * Replace tokens with '=' if the operator is consists of two or
482 | * more opchars only. Assuming that opchars do not compose a token
483 | * with non-opchars, check the first char only is sufficient.
484 | */
485 | if (tok == Op && strchr(OPCHARS, str[*yylloc]) != NULL)
486 | tok = '=';
487 |
488 | return tok;
489 | }
490 |
491 | /*
492 | * normalize_expr - Normalize statements or expressions.
493 | *
494 | * Mask constants, strip unnecessary whitespaces and upcase keywords. expr is
495 | * modified in-place (destructively). If readability is more important than
496 | * uniqueness, preserve_space puts one space for one existent whitespace for
497 | * more readability.
498 | */
499 | /* scanner interface is changed in PG12 */
500 | #if PG_VERSION_NUM < 120000
501 | #define ScanKeywords (*ScanKeywords)
502 | #define ScanKeywordTokens NumScanKeywords
503 | #endif
504 | void
505 | normalize_expr(char *expr, bool preserve_space)
506 | {
507 | core_yyscan_t yyscanner;
508 | core_yy_extra_type yyextra;
509 | core_YYSTYPE yylval;
510 | YYLTYPE yylloc;
511 | YYLTYPE lastloc;
512 | YYLTYPE start;
513 | char *wp;
514 | int tok, lasttok;
515 |
516 | wp = expr;
517 | yyscanner = scanner_init(expr,
518 | &yyextra,
519 | &ScanKeywords,
520 | ScanKeywordTokens);
521 |
522 | /*
523 | * The warnings about nonstandard escape strings is already emitted in the
524 | * core. Just silence them here.
525 | */
526 | #if PG_VERSION_NUM >= 90500
527 | yyextra.escape_string_warning = false;
528 | #endif
529 | lasttok = 0;
530 | lastloc = -1;
531 |
532 | for (;;)
533 | {
534 | tok = norm_yylex(expr, &yylval, &yylloc, yyscanner);
535 |
536 | start = yylloc;
537 |
538 | if (lastloc >= 0)
539 | {
540 | int i, i2;
541 |
542 | /* Skipping preceding whitespaces */
543 | for(i = lastloc ; i < start && IS_WSCHAR(expr[i]) ; i++);
544 |
545 | /* Searching for trailing whitespace */
546 | for(i2 = i; i2 < start && !IS_WSCHAR(expr[i2]) ; i2++);
547 |
548 | if (lasttok == IDENT)
549 | {
550 | /* Identifiers are copied in case-sensitive manner. */
551 | memcpy(wp, expr + i, i2 - i);
552 | wp += i2 - i;
553 | }
554 | #if PG_VERSION_NUM >= 100000
555 | /*
556 | * Since PG10 pg_stat_statements doesn't store trailing semicolon
557 | * in the column "query". Normalization is basically useless in the
558 | * version but still usefull to match utility commands so follow
559 | * the behavior change.
560 | */
561 | else if (lasttok == ';')
562 | {
563 | /* Just do nothing */
564 | }
565 | #endif
566 | else
567 | {
568 | /* Upcase keywords */
569 | char *sp;
570 | for (sp = expr + i ; sp < expr + i2 ; sp++, wp++)
571 | *wp = (*sp >= 'a' && *sp <= 'z' ?
572 | *sp - ('a' - 'A') : *sp);
573 | }
574 |
575 | /*
576 | * Because of destructive writing, wp must not go advance the
577 | * reading point.
578 | * Although this function's output does not need any validity as a
579 | * statement or an expression, spaces are added where it should be
580 | * to keep some extent of sanity. If readability is more important
581 | * than uniqueness, preserve_space adds one space for each
582 | * existent whitespace.
583 | */
584 | if (tok > 0 &&
585 | i2 < start &&
586 | (preserve_space ||
587 | (tok >= IDENT && lasttok >= IDENT &&
588 | !IS_CONST(tok) && !IS_CONST(lasttok))))
589 | *wp++ = ' ';
590 |
591 | start = i2;
592 | }
593 |
594 | /* Exit on parse error. */
595 | if (tok < 0)
596 | {
597 | *wp = 0;
598 | return;
599 | }
600 |
601 | /*
602 | * Negative signs before numbers are tokenized separately. And
603 | * explicit positive signs won't appear in deparsed expressions.
604 | */
605 | if (tok == '-')
606 | tok = norm_yylex(expr, &yylval, &yylloc, yyscanner);
607 |
608 | /* Exit on parse error. */
609 | if (tok < 0)
610 | {
611 | *wp = 0;
612 | return;
613 | }
614 |
615 | if (IS_CONST(tok))
616 | {
617 | YYLTYPE end;
618 |
619 | tok = norm_yylex(expr, &yylval, &end, yyscanner);
620 |
621 | /* Exit on parse error. */
622 | if (tok < 0)
623 | {
624 | *wp = 0;
625 | return;
626 | }
627 |
628 | /*
629 | * Negative values may be surrounded with parens by the
630 | * deparser. Mask involving them.
631 | */
632 | if (lasttok == '(' && tok == ')')
633 | {
634 | wp -= (start - lastloc);
635 | start = lastloc;
636 | end++;
637 | }
638 |
639 | while (expr[end - 1] == ' ')
640 | end--;
641 |
642 | *wp++ = '?';
643 | yylloc = end;
644 | }
645 |
646 | if (tok == 0)
647 | break;
648 |
649 | lasttok = tok;
650 | lastloc = yylloc;
651 | }
652 | *wp = 0;
653 | }
654 |
655 | const char *
656 | conv_expression(const char *src, pgsp_parser_mode mode)
657 | {
658 | const char *ret = src;
659 |
660 | if (mode == PGSP_JSON_NORMALIZE)
661 | {
662 | char *t = pstrdup(src);
663 | normalize_expr(t, true);
664 | ret = (const char *)t;
665 | }
666 | return ret;
667 | }
668 |
669 | const char *
670 | conv_operation(const char *src, pgsp_parser_mode mode)
671 | {
672 | return converter_core(operations, src, mode);
673 |
674 | }
675 |
676 | const char *
677 | conv_jointype(const char *src, pgsp_parser_mode mode)
678 | {
679 | return converter_core(jointypes, src, mode);
680 | }
681 |
682 | const char *
683 | conv_setsetopcommand(const char *src, pgsp_parser_mode mode)
684 | {
685 | return converter_core(setsetopcommands, src, mode);
686 | }
687 |
688 | const char *
689 | conv_sortmethod(const char *src, pgsp_parser_mode mode)
690 | {
691 | return converter_core(sortmethods, src, mode);
692 | }
693 |
694 | const char *
695 | conv_sortspacetype(const char *src, pgsp_parser_mode mode)
696 | {
697 | return converter_core(sortspacetype, src, mode);
698 | }
699 |
700 | const char *
701 | conv_partialmode(const char *src, pgsp_parser_mode mode)
702 | {
703 | return converter_core(partialmode, src, mode);
704 | }
705 |
706 | /**** Parser callbacks ****/
707 |
708 | /* JSON */
709 | static JsonParseErrorType
710 | json_objstart(void *state)
711 | {
712 | pgspParserContext *ctx = (pgspParserContext *)state;
713 |
714 | if (ctx->mode == PGSP_JSON_INFLATE)
715 | {
716 | if (!ctx->fname && ctx->dest->len > 0)
717 | {
718 | appendStringInfoChar(ctx->dest, '\n');
719 | appendStringInfoSpaces(ctx->dest, (ctx->level) * INDENT_STEP);
720 | }
721 | ctx->fname = NULL;
722 | }
723 | appendStringInfoChar(ctx->dest, '{');
724 |
725 | ctx->level++;
726 | ctx->first = bms_add_member(ctx->first, ctx->level);
727 |
728 | if (ctx->mode == PGSP_JSON_INFLATE)
729 | appendStringInfoChar(ctx->dest, '\n');
730 |
731 | JSONACTION_RETURN_SUCCESS();
732 | }
733 |
734 | static JsonParseErrorType
735 | json_objend(void *state)
736 | {
737 | pgspParserContext *ctx = (pgspParserContext *)state;
738 | if (ctx->mode == PGSP_JSON_INFLATE)
739 | {
740 | if (!bms_is_member(ctx->level, ctx->first))
741 | appendStringInfoChar(ctx->dest, '\n');
742 | appendStringInfoSpaces(ctx->dest, (ctx->level - 1) * INDENT_STEP);
743 | }
744 |
745 | appendStringInfoChar(ctx->dest, '}');
746 |
747 | ctx->level--;
748 | ctx->last_elem_is_object = true;
749 | ctx->first = bms_del_member(ctx->first, ctx->level);
750 | ctx->fname = NULL;
751 |
752 | JSONACTION_RETURN_SUCCESS();
753 | }
754 |
755 | static JsonParseErrorType
756 | json_arrstart(void *state)
757 | {
758 | pgspParserContext *ctx = (pgspParserContext *)state;
759 |
760 | if (IS_INDENTED_ARRAY(ctx->current_list))
761 | ctx->wlist_level++;
762 |
763 | appendStringInfoChar(ctx->dest, '[');
764 | ctx->fname = NULL;
765 | ctx->level++;
766 | ctx->last_elem_is_object = true;
767 | ctx->first = bms_add_member(ctx->first, ctx->level);
768 |
769 | JSONACTION_RETURN_SUCCESS();
770 | }
771 |
772 | static JsonParseErrorType
773 | json_arrend(void *state)
774 | {
775 | pgspParserContext *ctx = (pgspParserContext *)state;
776 |
777 | if (IS_INDENTED_ARRAY(ctx->current_list))
778 | ctx->wlist_level--;
779 |
780 | if (ctx->mode == PGSP_JSON_INFLATE &&
781 | (IS_INDENTED_ARRAY(ctx->current_list) ?
782 | ctx->wlist_level == 0 : ctx->last_elem_is_object))
783 | {
784 | appendStringInfoChar(ctx->dest, '\n');
785 | appendStringInfoSpaces(ctx->dest, (ctx->level - 1) * INDENT_STEP);
786 | }
787 |
788 | appendStringInfoChar(ctx->dest, ']');
789 | ctx->level--;
790 |
791 | JSONACTION_RETURN_SUCCESS();
792 | }
793 |
794 | static JsonParseErrorType
795 | json_ofstart(void *state, char *fname, bool isnull)
796 | {
797 | word_table *p;
798 | pgspParserContext *ctx = (pgspParserContext *)state;
799 | char *fn;
800 |
801 | ctx->remove = false;
802 | p = search_word_table(propfields, fname, ctx->mode);
803 | if (!p)
804 | {
805 | ereport(DEBUG1,
806 | (errmsg("JSON parser encoutered unknown field name: \"%s\".", fname),
807 | errdetail_log("INPUT: \"%s\"", ctx->org_string)));
808 | }
809 |
810 | ctx->remove = (ctx->mode == PGSP_JSON_NORMALIZE &&
811 | (!p || !p->normalize_use));
812 |
813 | if (ctx->remove)
814 | JSONACTION_RETURN_SUCCESS();
815 |
816 | if (!bms_is_member(ctx->level, ctx->first))
817 | {
818 | appendStringInfoChar(ctx->dest, ',');
819 | if (ctx->mode == PGSP_JSON_INFLATE)
820 | appendStringInfoChar(ctx->dest, '\n');
821 | }
822 | else
823 | ctx->first = bms_del_member(ctx->first, ctx->level);
824 |
825 | if (ctx->mode == PGSP_JSON_INFLATE)
826 | appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP);
827 |
828 | /*
829 | * We intentionally let some property names not have a short name. Use long
830 | * name for the cases.
831 | */
832 | if (!p || !p->longname)
833 | fn = fname;
834 | else if (ctx->mode == PGSP_JSON_INFLATE ||
835 | !(p->shortname && p->shortname[0]))
836 | fn = p->longname;
837 | else
838 | fn = p->shortname;
839 |
840 | escape_json(ctx->dest, fn);
841 | ctx->fname = fn;
842 | ctx->valconverter = (p ? p->converter : NULL);
843 |
844 | appendStringInfoChar(ctx->dest, ':');
845 |
846 | if (ctx->mode == PGSP_JSON_INFLATE)
847 | appendStringInfoChar(ctx->dest, ' ');
848 |
849 | if (p && IS_INDENTED_ARRAY(p->tag))
850 | {
851 | ctx->current_list = p->tag;
852 | ctx->list_fname = fname;
853 | ctx->wlist_level = 0;
854 | }
855 |
856 | JSONACTION_RETURN_SUCCESS();
857 | }
858 |
859 | static JsonParseErrorType
860 | json_ofend(void *state, char *fname, bool isnull)
861 | {
862 | pgspParserContext *ctx = (pgspParserContext *)state;
863 |
864 | if (ctx->list_fname && strcmp(fname, ctx->list_fname) == 0)
865 | {
866 | ctx->list_fname = NULL;
867 | ctx->current_list = P_Invalid;
868 | }
869 |
870 | JSONACTION_RETURN_SUCCESS();
871 | }
872 |
873 | static JsonParseErrorType
874 | json_aestart(void *state, bool isnull)
875 | {
876 | pgspParserContext *ctx = (pgspParserContext *)state;
877 | if (ctx->remove)
878 | JSONACTION_RETURN_SUCCESS();
879 |
880 | if (IS_INDENTED_ARRAY(ctx->current_list) &&
881 | ctx->wlist_level == 1)
882 | {
883 | if (!bms_is_member(ctx->level, ctx->first))
884 | appendStringInfoChar(ctx->dest, ',');
885 |
886 | if (ctx->mode == PGSP_JSON_INFLATE)
887 | {
888 | appendStringInfoChar(ctx->dest, '\n');
889 | appendStringInfoSpaces(ctx->dest, (ctx->level) * INDENT_STEP);
890 | }
891 | }
892 | else
893 | {
894 | if (!bms_is_member(ctx->level, ctx->first))
895 | {
896 | appendStringInfoChar(ctx->dest, ',');
897 |
898 | if (ctx->mode == PGSP_JSON_INFLATE &&
899 | !ctx->last_elem_is_object)
900 | appendStringInfoChar(ctx->dest, ' ');
901 | }
902 | }
903 |
904 | ctx->first = bms_del_member(ctx->first, ctx->level);
905 |
906 | JSONACTION_RETURN_SUCCESS();
907 | }
908 |
909 | static JsonParseErrorType
910 | json_scalar(void *state, char *token, JsonTokenType tokentype)
911 | {
912 | pgspParserContext *ctx = (pgspParserContext *)state;
913 | const char *val = token;
914 |
915 | if (ctx->remove)
916 | JSONACTION_RETURN_SUCCESS();
917 |
918 | if (ctx->valconverter)
919 | val = ctx->valconverter(token, ctx->mode);
920 |
921 | if (tokentype == JSON_TOKEN_STRING)
922 | escape_json(ctx->dest, val);
923 | else
924 | appendStringInfoString(ctx->dest, val);
925 | ctx->last_elem_is_object = false;
926 |
927 | JSONACTION_RETURN_SUCCESS();
928 | }
929 |
930 |
931 | /* YAML */
932 | static JsonParseErrorType
933 | yaml_objstart(void *state)
934 | {
935 | pgspParserContext *ctx = (pgspParserContext *)state;
936 |
937 | if (ctx->fname)
938 | {
939 | if (ctx->dest->len > 0)
940 | appendStringInfoChar(ctx->dest, '\n');
941 | appendStringInfoSpaces(ctx->dest, (ctx->level - 1) * INDENT_STEP);
942 | appendStringInfoString(ctx->dest, "- ");
943 | appendStringInfoString(ctx->dest, ctx->fname);
944 | appendStringInfoString(ctx->dest, ":\n");
945 | appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP);
946 | ctx->fname = NULL;
947 | }
948 |
949 | ctx->level++;
950 | ctx->first = bms_add_member(ctx->first, ctx->level);
951 |
952 | JSONACTION_RETURN_SUCCESS();
953 | }
954 |
955 | static JsonParseErrorType
956 | yaml_objend(void *state)
957 | {
958 | pgspParserContext *ctx = (pgspParserContext *)state;
959 |
960 | ctx->level--;
961 | ctx->last_elem_is_object = true;
962 | ctx->first = bms_del_member(ctx->first, ctx->level);
963 |
964 | JSONACTION_RETURN_SUCCESS();
965 | }
966 |
967 | static JsonParseErrorType
968 | yaml_arrstart(void *state)
969 | {
970 | pgspParserContext *ctx = (pgspParserContext *)state;
971 |
972 | if (ctx->fname)
973 | {
974 | appendStringInfoString(ctx->dest, ctx->fname);
975 | appendStringInfoString(ctx->dest, ":");
976 | }
977 |
978 | ctx->fname = NULL;
979 | ctx->level++;
980 | ctx->first = bms_add_member(ctx->first, ctx->level);
981 |
982 | JSONACTION_RETURN_SUCCESS();
983 | }
984 |
985 | static JsonParseErrorType
986 | yaml_arrend(void *state)
987 | {
988 | pgspParserContext *ctx = (pgspParserContext *)state;
989 | ctx->level--;
990 |
991 | JSONACTION_RETURN_SUCCESS();
992 | }
993 |
994 | static JsonParseErrorType
995 | yaml_ofstart(void *state, char *fname, bool isnull)
996 | {
997 | word_table *p;
998 | pgspParserContext *ctx = (pgspParserContext *)state;
999 | char *s;
1000 |
1001 | p = search_word_table(propfields, fname, ctx->mode);
1002 | if (!p)
1003 | {
1004 | ereport(DEBUG1,
1005 | (errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname),
1006 | errdetail_log("INPUT: \"%s\"", ctx->org_string)));
1007 | }
1008 | s = (p ? p->longname : fname);
1009 |
1010 | if (!bms_is_member(ctx->level, ctx->first))
1011 | {
1012 | appendStringInfoString(ctx->dest, "\n");
1013 | appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP);
1014 | }
1015 | else
1016 | ctx->first = bms_del_member(ctx->first, ctx->level);
1017 |
1018 | ctx->valconverter = NULL;
1019 | ctx->fname = s;
1020 | ctx->valconverter = (p ? p->converter : NULL);
1021 |
1022 | JSONACTION_RETURN_SUCCESS();
1023 | }
1024 |
1025 | static JsonParseErrorType
1026 | yaml_aestart(void *state, bool isnull)
1027 | {
1028 | pgspParserContext *ctx = (pgspParserContext *)state;
1029 |
1030 | appendStringInfoString(ctx->dest, "\n");
1031 | ctx->first = bms_del_member(ctx->first, ctx->level);
1032 | appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP);
1033 | appendStringInfoString(ctx->dest, "- ");
1034 |
1035 | JSONACTION_RETURN_SUCCESS();
1036 | }
1037 |
1038 | static JsonParseErrorType
1039 | yaml_scalar(void *state, char *token, JsonTokenType tokentype)
1040 | {
1041 | pgspParserContext *ctx = (pgspParserContext *)state;
1042 |
1043 | if (ctx->fname)
1044 | {
1045 | appendStringInfoString(ctx->dest, ctx->fname);
1046 | appendStringInfoString(ctx->dest, ": ");
1047 | ctx->fname = NULL;
1048 | }
1049 |
1050 | json_scalar(state, token, tokentype);
1051 |
1052 | ctx->last_elem_is_object = false;
1053 |
1054 | JSONACTION_RETURN_SUCCESS();
1055 | }
1056 |
1057 |
1058 | /* XML */
1059 | static JsonParseErrorType
1060 | xml_objstart(void *state)
1061 | {
1062 | pgspParserContext *ctx = (pgspParserContext *)state;
1063 |
1064 | ctx->level ++;
1065 | ctx->first = bms_add_member(ctx->first, ctx->level);
1066 |
1067 | JSONACTION_RETURN_SUCCESS();
1068 | }
1069 |
1070 |
1071 | static JsonParseErrorType
1072 | xml_objend(void *state)
1073 | {
1074 | pgspParserContext *ctx = (pgspParserContext *)state;
1075 | appendStringInfoChar(ctx->dest, '\n');
1076 | appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP);
1077 |
1078 | ctx->level--;
1079 | ctx->first = bms_del_member(ctx->first, ctx->level);
1080 |
1081 | ctx->last_elem_is_object = true;
1082 |
1083 | JSONACTION_RETURN_SUCCESS();
1084 | }
1085 |
1086 | static JsonParseErrorType
1087 | xml_arrend(void *state)
1088 | {
1089 | pgspParserContext *ctx = (pgspParserContext *)state;
1090 |
1091 | appendStringInfoChar(ctx->dest, '\n');
1092 | appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP);
1093 |
1094 | JSONACTION_RETURN_SUCCESS();
1095 | }
1096 |
1097 | static void
1098 | adjust_wbuf(pgspParserContext *ctx, int len)
1099 | {
1100 | int buflen;
1101 |
1102 | for (buflen = ctx->wbuflen ; len > buflen ; buflen *= 2);
1103 | if (buflen > ctx->wbuflen)
1104 | {
1105 | ctx->wbuf = (char *)palloc(buflen);
1106 | ctx->wbuflen = buflen;
1107 | }
1108 | }
1109 |
1110 | static char *
1111 | hyphenate_words(pgspParserContext *ctx, char *src)
1112 | {
1113 | char *p;
1114 |
1115 | adjust_wbuf(ctx, strlen(src) + 1);
1116 | strcpy(ctx->wbuf, src);
1117 |
1118 | for (p = ctx->wbuf ; *p ; p++)
1119 | if (*p == ' ') *p = '-';
1120 |
1121 | return ctx->wbuf;
1122 | }
1123 |
1124 | static JsonParseErrorType
1125 | xml_ofstart(void *state, char *fname, bool isnull)
1126 | {
1127 | word_table *p;
1128 | pgspParserContext *ctx = (pgspParserContext *)state;
1129 | char *s;
1130 |
1131 | p = search_word_table(propfields, fname, ctx->mode);
1132 | if (!p)
1133 | {
1134 | ereport(DEBUG1,
1135 | (errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname),
1136 | errdetail_log("INPUT: \"%s\"", ctx->org_string)));
1137 | }
1138 | s = (p ? p->longname : fname);
1139 |
1140 | /*
1141 | * save current process context
1142 | * There's no problem if P_Plan appears recursively.
1143 | */
1144 | if (p && (p->tag == P_Plan || p->tag == P_Triggers))
1145 | ctx->section = p->tag;
1146 |
1147 | appendStringInfoChar(ctx->dest, '\n');
1148 | appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP);
1149 |
1150 | ctx->valconverter = NULL;
1151 |
1152 | appendStringInfoChar(ctx->dest, '<');
1153 | appendStringInfoString(ctx->dest, escape_xml(hyphenate_words(ctx, s)));
1154 | appendStringInfoChar(ctx->dest, '>');
1155 | ctx->valconverter = (p ? p->converter : NULL);
1156 |
1157 | /*
1158 | * If the object field name is Plan or Triggers, the value should be an
1159 | * array and the items are tagged by other than "Item". "Item"s appear
1160 | * only in Output field.
1161 | */
1162 | if (p && (p->tag == P_Plans || p->tag == P_Triggers))
1163 | ctx->not_item = bms_add_member(ctx->not_item, ctx->level + 1);
1164 | else
1165 | ctx->not_item = bms_del_member(ctx->not_item, ctx->level + 1);
1166 |
1167 | JSONACTION_RETURN_SUCCESS();
1168 | }
1169 |
1170 | static JsonParseErrorType
1171 | xml_ofend(void *state, char *fname, bool isnull)
1172 | {
1173 | pgspParserContext *ctx = (pgspParserContext *)state;
1174 | word_table *p;
1175 | char *s;
1176 |
1177 | p = search_word_table(propfields, fname, ctx->mode);
1178 | s = (p ? p->longname : fname);
1179 |
1180 | appendStringInfoString(ctx->dest, "");
1181 | appendStringInfoString(ctx->dest, escape_xml(hyphenate_words(ctx, s)));
1182 | appendStringInfoChar(ctx->dest, '>');
1183 |
1184 | JSONACTION_RETURN_SUCCESS();
1185 | }
1186 |
1187 | static JsonParseErrorType
1188 | xml_aestart(void *state, bool isnull)
1189 | {
1190 | pgspParserContext *ctx = (pgspParserContext *)state;
1191 | char *tag;
1192 |
1193 | /*
1194 | * The "Trigger" in "Triggers", "Plan" in "Plans" and "Item" nodes are
1195 | * implicitly represented in JSON format. Restore them for XML format.
1196 | */
1197 |
1198 | ctx->level++;
1199 | if (bms_is_member(ctx->level, ctx->not_item))
1200 | {
1201 | if (ctx->section == P_Plan)
1202 | tag = "";
1203 | else
1204 | tag = "";
1205 | }
1206 | else
1207 | tag = "- ";
1208 |
1209 | appendStringInfoChar(ctx->dest, '\n');
1210 | appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP);
1211 | appendStringInfoString(ctx->dest, tag);
1212 |
1213 | JSONACTION_RETURN_SUCCESS();
1214 | }
1215 |
1216 | static JsonParseErrorType
1217 | xml_aeend(void *state, bool isnull)
1218 | {
1219 | pgspParserContext *ctx = (pgspParserContext *)state;
1220 | char *tag;
1221 |
1222 | /*
1223 | * The "Plan" in "Plans" or "Item" nodes are implicitly represented in
1224 | * JSON format. Restore it for XML format.
1225 | */
1226 |
1227 | if (bms_is_member(ctx->level, ctx->not_item))
1228 | {
1229 | if (ctx->section == P_Plan)
1230 | tag = "
";
1231 | else
1232 | tag = "";
1233 | }
1234 | else
1235 | tag = "";
1236 | appendStringInfoString(ctx->dest, tag);
1237 | ctx->level--;
1238 |
1239 | JSONACTION_RETURN_SUCCESS();
1240 | }
1241 |
1242 | static JsonParseErrorType
1243 | xml_scalar(void *state, char *token, JsonTokenType tokentype)
1244 | {
1245 | pgspParserContext *ctx = (pgspParserContext *)state;
1246 | const char *s = token;
1247 |
1248 | if (ctx->valconverter)
1249 | s = ctx->valconverter(token, PGSP_JSON_XMLIZE);
1250 |
1251 | if (tokentype == JSON_TOKEN_STRING)
1252 | s = escape_xml(s);
1253 |
1254 | appendStringInfoString(ctx->dest, s);
1255 | ctx->last_elem_is_object = false;
1256 |
1257 | JSONACTION_RETURN_SUCCESS();
1258 | }
1259 |
1260 | /********************************/
1261 | void
1262 | init_parser_context(pgspParserContext *ctx, int mode,
1263 | char *orgstr, char *buf, int buflen){
1264 | memset(ctx, 0, sizeof(*ctx));
1265 | ctx->dest = makeStringInfo();
1266 | ctx->mode = mode;
1267 | ctx->org_string = orgstr;
1268 | ctx->wbuf = buf;
1269 | ctx->wbuflen = buflen;
1270 | }
1271 |
1272 | /*
1273 | * run_pg_parse_json:
1274 | *
1275 | * Wrap pg_parse_json in order to restore InterruptHoldoffCount when parse
1276 | * error occured.
1277 | *
1278 | * Returns true when parse completed. False for unexpected end of string.
1279 | */
1280 | bool
1281 | run_pg_parse_json(JsonLexContext *lex, JsonSemAction *sem)
1282 | {
1283 | #if PG_VERSION_NUM >= 130000
1284 | return pg_parse_json(lex, sem) == JSON_SUCCESS;
1285 | #else
1286 | MemoryContext ccxt = CurrentMemoryContext;
1287 | uint32 saved_IntrHoldoffCount;
1288 |
1289 | /*
1290 | * "ereport(ERROR.." occurs on error in pg_parse_json resets
1291 | * InterruptHoldoffCount to zero, so we must save the value before calling
1292 | * json parser to restore it on parse error. See errfinish().
1293 | */
1294 | saved_IntrHoldoffCount = InterruptHoldoffCount;
1295 |
1296 | PG_TRY();
1297 | {
1298 | pg_parse_json(lex, sem);
1299 | }
1300 | PG_CATCH();
1301 | {
1302 | ErrorData *errdata;
1303 | MemoryContext ecxt;
1304 |
1305 | InterruptHoldoffCount = saved_IntrHoldoffCount;
1306 |
1307 | ecxt = MemoryContextSwitchTo(ccxt);
1308 | errdata = CopyErrorData();
1309 |
1310 | if (errdata->sqlerrcode == ERRCODE_INVALID_TEXT_REPRESENTATION)
1311 | {
1312 | FlushErrorState();
1313 | return false;
1314 | }
1315 | else
1316 | {
1317 | MemoryContextSwitchTo(ecxt);
1318 | PG_RE_THROW();
1319 | }
1320 | }
1321 | PG_END_TRY();
1322 |
1323 | return true;
1324 | #endif
1325 | }
1326 |
1327 | void
1328 | init_json_lex_context(JsonLexContext *lex, char *json)
1329 | {
1330 | #if PG_VERSION_NUM < 170000
1331 | memset(lex, 0, sizeof(JsonLexContext));
1332 | lex->input = lex->token_terminator = lex->line_start = json;
1333 | lex->line_number = 1;
1334 | lex->input_length = strlen(json);
1335 | #if PG_VERSION_NUM >= 130000
1336 | lex->input_encoding = GetDatabaseEncoding();
1337 | #endif /* PG13+ */
1338 | lex->strval = makeStringInfo();
1339 | #else /* PG17- */
1340 | makeJsonLexContextCstringLen(lex, json, strlen(json),
1341 | GetDatabaseEncoding(), true);
1342 | #endif /* PG17+ */
1343 | }
1344 |
1345 | static void
1346 | init_json_semaction(JsonSemAction *sem, pgspParserContext *ctx)
1347 | {
1348 | sem->semstate = (void*)ctx;
1349 | sem->object_start = json_objstart;
1350 | sem->object_end = json_objend;
1351 | sem->array_start = json_arrstart;
1352 | sem->array_end = json_arrend;
1353 | sem->object_field_start = json_ofstart;
1354 | sem->object_field_end = json_ofend;
1355 | sem->array_element_start= json_aestart;
1356 | sem->array_element_end = NULL;
1357 | sem->scalar = json_scalar;
1358 | }
1359 |
1360 | char *
1361 | pgsp_json_shorten(char *json)
1362 | {
1363 | JsonLexContext lex;
1364 | JsonSemAction sem;
1365 | pgspParserContext ctx;
1366 |
1367 | init_json_lex_context(&lex, json);
1368 | init_parser_context(&ctx, PGSP_JSON_SHORTEN, json, NULL, 0);
1369 | init_json_semaction(&sem, &ctx);
1370 |
1371 | run_pg_parse_json(&lex, &sem);
1372 |
1373 | return ctx.dest->data;
1374 | }
1375 |
1376 | char *
1377 | pgsp_json_normalize(char *json)
1378 | {
1379 | JsonLexContext lex;
1380 | JsonSemAction sem;
1381 | pgspParserContext ctx;
1382 |
1383 | init_json_lex_context(&lex, json);
1384 | init_parser_context(&ctx,PGSP_JSON_NORMALIZE, json, NULL, 0);
1385 | init_json_semaction(&sem, &ctx);
1386 |
1387 | run_pg_parse_json(&lex, &sem);
1388 |
1389 | return ctx.dest->data;
1390 | }
1391 |
1392 | char *
1393 | pgsp_json_inflate(char *json)
1394 | {
1395 | JsonLexContext lex;
1396 | JsonSemAction sem;
1397 | pgspParserContext ctx;
1398 |
1399 | init_json_lex_context(&lex, json);
1400 | init_parser_context(&ctx, PGSP_JSON_INFLATE, json, NULL, 0);
1401 | init_json_semaction(&sem, &ctx);
1402 |
1403 | if (!run_pg_parse_json(&lex, &sem))
1404 | {
1405 | if (ctx.dest->len > 0 &&
1406 | ctx.dest->data[ctx.dest->len - 1] != '\n')
1407 | appendStringInfoChar(ctx.dest, '\n');
1408 |
1409 | if (ctx.dest->len == 0)
1410 | appendStringInfoString(ctx.dest, " ");
1411 | else
1412 | appendStringInfoString(ctx.dest, "");
1413 | }
1414 |
1415 | return ctx.dest->data;
1416 | }
1417 |
1418 | char *
1419 | pgsp_json_yamlize(char *json)
1420 | {
1421 | pgspParserContext ctx;
1422 | JsonSemAction sem;
1423 | JsonLexContext lex;
1424 |
1425 | init_json_lex_context(&lex, json);
1426 | init_parser_context(&ctx, PGSP_JSON_YAMLIZE, json, NULL, 0);
1427 |
1428 | sem.semstate = (void*)&ctx;
1429 | sem.object_start = yaml_objstart;
1430 | sem.object_end = yaml_objend;
1431 | sem.array_start = yaml_arrstart;
1432 | sem.array_end = yaml_arrend;
1433 | sem.object_field_start = yaml_ofstart;
1434 | sem.object_field_end = NULL;
1435 | sem.array_element_start= yaml_aestart;
1436 | sem.array_element_end = NULL;
1437 | sem.scalar = yaml_scalar;
1438 |
1439 | if (!run_pg_parse_json(&lex, &sem))
1440 | {
1441 | if (ctx.dest->len > 0 &&
1442 | ctx.dest->data[ctx.dest->len - 1] != '\n')
1443 | appendStringInfoChar(ctx.dest, '\n');
1444 |
1445 | if (ctx.dest->len == 0)
1446 | appendStringInfoString(ctx.dest, " ");
1447 | else
1448 | appendStringInfoString(ctx.dest, "");
1449 | }
1450 |
1451 | return ctx.dest->data;
1452 | }
1453 |
1454 | char *
1455 | pgsp_json_xmlize(char *json)
1456 | {
1457 | pgspParserContext ctx;
1458 | JsonSemAction sem;
1459 | JsonLexContext lex;
1460 | int start_len;
1461 | char buf[32];
1462 |
1463 | init_json_lex_context(&lex, json);
1464 | init_parser_context(&ctx, PGSP_JSON_XMLIZE, json, buf, sizeof(buf));
1465 |
1466 | sem.semstate = (void*)&ctx;
1467 | sem.object_start = xml_objstart;
1468 | sem.object_end = xml_objend;
1469 | sem.array_start = NULL;
1470 | sem.array_end = xml_arrend;
1471 | sem.object_field_start = xml_ofstart;
1472 | sem.object_field_end = xml_ofend;
1473 | sem.array_element_start= xml_aestart;
1474 | sem.array_element_end = xml_aeend;
1475 | sem.scalar = xml_scalar;
1476 |
1477 | appendStringInfo(ctx.dest,
1478 | "\n ");
1479 | start_len = ctx.dest->len;
1480 |
1481 | if (!run_pg_parse_json(&lex, &sem))
1482 | {
1483 | if (ctx.dest->len > start_len &&
1484 | ctx.dest->data[ctx.dest->len - 1] != '\n')
1485 | appendStringInfoChar(ctx.dest, '\n');
1486 |
1487 | if (ctx.dest->len == start_len)
1488 | {
1489 | resetStringInfo(ctx.dest);
1490 | appendStringInfoString(ctx.dest, " ");
1491 | }
1492 | else
1493 | appendStringInfoString(ctx.dest, "");
1494 | }
1495 | else
1496 | appendStringInfo(ctx.dest, " \n \n");
1497 |
1498 | return ctx.dest->data;
1499 | }
1500 |
--------------------------------------------------------------------------------
/pgsp_json.h:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json.h: Definitions of plan handler for JSON/XML/YAML style plans
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json.h
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | #include "pgsp_json_text.h"
15 |
16 | extern char *pgsp_json_normalize(char *json);
17 | extern char *pgsp_json_shorten(char *json);
18 | extern char *pgsp_json_inflate(char *json);
19 | extern char *pgsp_json_yamlize(char *json);
20 | extern char *pgsp_json_xmlize(char *json);
21 | extern void normalize_expr(char *expr, bool preserve_space);
22 |
--------------------------------------------------------------------------------
/pgsp_json_int.h:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json_int.h: Definitions for internal use for pgsp_json.c
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json_int.h
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | typedef enum
15 | {
16 | PGSP_JSON_SHORTEN,
17 | PGSP_JSON_INFLATE,
18 | PGSP_JSON_TEXTIZE,
19 | PGSP_JSON_YAMLIZE,
20 | PGSP_JSON_XMLIZE,
21 | PGSP_JSON_NORMALIZE
22 | } pgsp_parser_mode;
23 |
24 | typedef enum
25 | {
26 | S_Invalid,
27 | S_Plain,
28 | S_Sorted,
29 | S_Hashed,
30 | S_Mixed
31 | } pgsp_strategies;
32 |
33 | typedef const char *(converter_t)(const char *src, pgsp_parser_mode mode);
34 | typedef void (setter_t)(node_vals *vals, const char *val);
35 |
36 | typedef enum
37 | {
38 | P_Invalid,
39 | P_Plan,
40 | P_Plans,
41 | P_NodeType,
42 | P_RelationShip,
43 | P_ScanDir,
44 | P_IndexName,
45 | P_RelationName,
46 | P_FunctioName,
47 | P_CTEName,
48 | P_Schema,
49 | P_Alias,
50 | P_Output,
51 | P_MergeCond,
52 | P_Strategy,
53 | P_JoinType,
54 | P_Command,
55 | P_SortMethod,
56 | P_SortKey,
57 | P_GroupKey,
58 | P_GroupKeys,
59 | P_GroupSets,
60 | P_HashKeys,
61 | P_HashKey,
62 | P_Filter,
63 | P_JoinFilter,
64 | P_HashCond,
65 | P_IndexCond,
66 | P_TidCond,
67 | P_RecheckCond,
68 | P_Operation,
69 | P_SubplanName,
70 | P_Triggers,
71 | P_Trigger,
72 | P_TriggerName,
73 | P_TrgRelation,
74 | P_ConstraintName,
75 | P_Parallel,
76 | P_PartialMode,
77 | P_WorkersPlanned,
78 |
79 | P_FunctionCall,
80 | P_StartupCost,
81 | P_TotalCost,
82 | P_PlanRows,
83 | P_PlanWidth,
84 | P_ActualStartupTime,
85 | P_ActualTotalTime,
86 | P_ActualRows,
87 | P_ActualLoops,
88 | P_HeapFetches,
89 | P_SharedHitBlks,
90 | P_SharedReadBlks,
91 | P_SharedDirtiedBlks,
92 | P_SharedWrittenBlks,
93 | P_LocalHitBlks,
94 | P_LocalReadBlks,
95 | P_LocalDirtiedBlks,
96 | P_LocalWrittenBlks,
97 | P_TempReadBlks,
98 | P_TempWrittenBlks,
99 | P_IOReadTime,
100 | P_IOWwriteTime,
101 | P_SortSpaceUsed,
102 | P_SortSpaceType,
103 | P_PeakMemoryUsage,
104 | P_OrgHashBatches,
105 | P_OrgHashBuckets,
106 | P_HashBatches,
107 | P_HashBuckets,
108 | P_RowsFilterRmvd,
109 | P_RowsIdxRchkRmvd,
110 | P_TrgTime,
111 | P_TrgCalls,
112 | P_PlanTime,
113 | P_ExecTime,
114 | P_ExactHeapBlks,
115 | P_LossyHeapBlks,
116 | P_RowsJoinFltRemvd,
117 | P_TargetTables,
118 | P_ConfRes,
119 | P_ConfArbitIdx,
120 | P_TuplesInserted,
121 | P_ConfTuples,
122 | P_SamplingMethod,
123 | P_SamplingParams,
124 | P_RepeatableSeed,
125 | P_Workers,
126 | P_WorkersLaunched,
127 | P_WorkerNumber,
128 | P_InnerUnique,
129 | P_TableFuncName,
130 | P_PresortedKey,
131 | P_FullsortGroups,
132 | P_SortMethodsUsed,
133 | P_SortSpaceMemory,
134 | P_GroupCount,
135 | P_AvgSortSpcUsed,
136 | P_PeakSortSpcUsed,
137 | P_PreSortedGroups,
138 | P_AsyncCapable
139 | } pgsp_prop_tags;
140 |
141 | typedef struct
142 | {
143 | int tag; /* Tag to identify words */
144 | char *shortname; /* Property name for short-style JSON */
145 | char *longname; /* Property name for long(normal)-style JSON */
146 | char *textname; /* Property name for Text representation */
147 | bool normalize_use; /* True means this word to be used for
148 | normalization, which makes difference of
149 | plan-id */
150 | converter_t *converter; /* Converter function for the property name */
151 | setter_t *setter; /* Converter function for the property value */
152 | } word_table;
153 |
154 | typedef struct
155 | {
156 | StringInfo dest; /* Storage for parse result */
157 | pgsp_parser_mode mode; /* Tells what to do to the parser */
158 | node_vals *nodevals; /* Node value holder */
159 | char *org_string; /* What to parse */
160 |
161 | /* Working variables used internally in parser */
162 | int level; /* Next (indent or object) level */
163 | Bitmapset *plan_levels; /* Level list for Plan objects */
164 | Bitmapset *first; /* Bitmap set holds whether the first element
165 | * has been processed for each level */
166 | Bitmapset *not_item; /* Bitmap set holds whether the node name at
167 | the level was literally "Item" or not. */
168 | bool remove; /* True if the current node is not shown in
169 | * the result */
170 | bool last_elem_is_object; /* True if the last processed element
171 | * was an object */
172 | pgsp_prop_tags section; /* explain section under processing */
173 | pgsp_prop_tags current_list; /* current list tag that needs special treat*/
174 | StringInfo work_str; /* StringInfor for very-short term usage */
175 | char *list_fname; /* the field name of the current_list */
176 | char *fname; /* Field name*/
177 | char *wbuf; /* Working buffer */
178 | int wbuflen; /* Length of the working buffer */
179 | int wlist_level; /* Nest level of list for Grouping Sets */
180 | grouping_set *tmp_gset; /* Working area for grouping sets */
181 |
182 | converter_t *valconverter; /* field name converter for the current
183 | * element */
184 | setter_t *setter; /* value converter for the current element */
185 | } pgspParserContext;
186 |
187 |
188 | extern word_table nodetypes[];
189 | extern word_table strategies[];
190 | extern word_table propfields[];
191 |
192 | extern void init_word_index(void);
193 | extern word_table *search_word_table(word_table *tbl,
194 | const char *word, int mode);
195 | extern const char *conv_nodetype(const char *src, pgsp_parser_mode mode);
196 | extern const char *conv_operation(const char *src, pgsp_parser_mode mode);
197 | extern const char *conv_scandir(const char *src, pgsp_parser_mode mode);
198 | extern const char *conv_expression(const char *src, pgsp_parser_mode mode);
199 | extern const char *conv_relasionship(const char *src, pgsp_parser_mode mode);
200 | extern const char *conv_jointype(const char *src, pgsp_parser_mode mode);
201 | extern const char *conv_strategy(const char *src, pgsp_parser_mode mode);
202 | extern const char *conv_setsetopcommand(const char *src, pgsp_parser_mode mode);
203 | extern const char *conv_sortmethod(const char *src, pgsp_parser_mode mode);
204 | extern const char *conv_sortspacetype(const char *src, pgsp_parser_mode mode);
205 | extern const char *conv_partialmode(const char *src, pgsp_parser_mode mode);
206 |
207 | extern bool run_pg_parse_json(JsonLexContext *lex, JsonSemAction *sem);
208 | extern void init_parser_context(pgspParserContext *ctx, int mode,
209 | char *orgstr, char *buf,int buflen);
210 | extern void init_json_lex_context(JsonLexContext *lex, char *json);
211 |
212 |
--------------------------------------------------------------------------------
/pgsp_json_text.c:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json_text.h: Text plan generator for pg_store_plans.
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json_text.c
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | #include "postgres.h"
15 | #include "miscadmin.h"
16 | #include "nodes/nodes.h"
17 | #include "nodes/bitmapset.h"
18 | #include "nodes/pg_list.h"
19 | #include "utils/json.h"
20 | #if PG_VERSION_NUM < 130000
21 | #include "utils/jsonapi.h"
22 | #else
23 | #include "common/jsonapi.h"
24 | #endif
25 | #include "utils/builtins.h"
26 |
27 | #include "pgsp_json_text.h"
28 | #include "pgsp_json_int.h"
29 |
30 | #if PG_VERSION_NUM < 160000
31 | #define JsonParseErrorType void
32 | #define JSONACTION_RETURN_SUCCESS()
33 | #else
34 | #define JSONACTION_RETURN_SUCCESS() return JSON_SUCCESS
35 | #endif
36 |
37 | static void clear_nodeval(node_vals *vals);
38 | static void print_current_node(pgspParserContext *ctx);
39 | static void print_current_trig_node(pgspParserContext *ctx);
40 | static void print_prop(StringInfo s, char *prepstr,
41 | const char *prop, int leve, int exind);
42 | static void print_prop_if_exists(StringInfo s, char *prepstr,
43 | const char *prop, int leve, int exind);
44 | static void print_prop_if_nz(StringInfo s, char *prepstr,
45 | const char *prop, int leve, int exind);
46 | static JsonParseErrorType json_text_objstart(void *state);
47 | static JsonParseErrorType json_text_objend(void *state);
48 | static JsonParseErrorType json_text_arrstart(void *state);
49 | static JsonParseErrorType json_text_arrend(void *state);
50 | static JsonParseErrorType json_text_ofstart(void *state, char *fname,
51 | bool isnull);
52 | static JsonParseErrorType json_text_ofend(void *state, char *fname,
53 | bool isnull);
54 | static JsonParseErrorType json_text_scalar(void *state, char *token,
55 | JsonTokenType tokentype);
56 |
57 | /* Parser callbacks for plan textization */
58 |
59 | /*
60 | * This setter is used for field names that store_plans doesn't know of.
61 | * Unlike the other setters, this holds a list of strings emitted as is in text
62 | * explains.
63 | */
64 | SETTERDECL(_undef)
65 | {
66 | StringInfo s;
67 |
68 | if(vals->_undef_newelem)
69 | {
70 | s = makeStringInfo();
71 | vals->_undef = lappend(vals->_undef, s);
72 | }
73 | else
74 | {
75 | s = llast (vals->_undef);
76 | }
77 |
78 | appendStringInfoString(s, val);
79 | }
80 |
81 | SETTERDECL(node_type)
82 | {
83 | word_table *p;
84 |
85 | vals->node_type = val;
86 | vals->nodetag = T_Invalid;
87 |
88 | p = search_word_table(nodetypes, val, PGSP_JSON_TEXTIZE);
89 | if (p)
90 | {
91 | vals->node_type = (p->textname ? p->textname : p->longname);
92 | vals->nodetag = p->tag;
93 | }
94 | }
95 |
96 | SETTERDECL(strategy)
97 | {
98 | word_table *p;
99 |
100 | p = search_word_table(strategies, val, PGSP_JSON_TEXTIZE);
101 |
102 | if (!p)
103 | return;
104 |
105 | switch (vals->nodetag)
106 | {
107 | case T_Agg:
108 | switch (p->tag)
109 | {
110 | case S_Hashed:
111 | vals->node_type = "HashAggregate"; break;
112 | case S_Sorted:
113 | vals->node_type = "GroupAggregate"; break;
114 | case S_Mixed:
115 | vals->node_type = "MixedAggregate"; break;
116 | default:
117 | break;
118 | }
119 | break;
120 |
121 | case T_SetOp:
122 | if (p->tag == S_Hashed)
123 | vals->node_type = "HashSetOp";
124 | break;
125 |
126 | default:
127 | break;
128 | }
129 | }
130 | CONVERSION_SETTER(scan_dir, conv_scandir);
131 | SQLQUOTE_SETTER(obj_name);
132 | SQLQUOTE_SETTER(alias);
133 | SQLQUOTE_SETTER(schema_name);
134 | LIST_SETTER(output);
135 | DEFAULT_SETTER(merge_cond);
136 | CONVERSION_SETTER(join_type, conv_jointype);
137 | CONVERSION_SETTER(setopcommand, conv_setsetopcommand);
138 | CONVERSION_SETTER(sort_method, conv_sortmethod);
139 | LIST_SETTER(sort_key);
140 | LIST_SETTER(group_key);
141 | LIST_SETTER(hash_key);
142 | BOOL_SETTER(parallel_aware);
143 | CONVERSION_SETTER(partial_mode, conv_partialmode);
144 | SQLQUOTE_SETTER(index_name);
145 | DEFAULT_SETTER(startup_cost);
146 | DEFAULT_SETTER(total_cost);
147 | DEFAULT_SETTER(plan_rows);
148 | DEFAULT_SETTER(plan_width);
149 | DEFAULT_SETTER(sort_space_used);
150 | CONVERSION_SETTER(sort_space_type, conv_sortspacetype);
151 | DEFAULT_SETTER(filter);
152 | DEFAULT_SETTER(join_filter);
153 | DEFAULT_SETTER(func_call);
154 | DEFAULT_SETTER(index_cond);
155 | DEFAULT_SETTER(recheck_cond);
156 | CONVERSION_SETTER(operation, conv_operation);
157 | DEFAULT_SETTER(subplan_name);
158 | DEFAULT_SETTER(hash_cond);
159 | DEFAULT_SETTER(tid_cond);
160 | DEFAULT_SETTER(filter_removed);
161 | DEFAULT_SETTER(idxrchk_removed);
162 | DEFAULT_SETTER(peak_memory_usage);
163 | DEFAULT_SETTER(org_hash_batches);
164 | DEFAULT_SETTER(org_hash_buckets);
165 | DEFAULT_SETTER(hash_batches);
166 | DEFAULT_SETTER(hash_buckets);
167 | DEFAULT_SETTER(actual_startup_time);
168 | DEFAULT_SETTER(actual_total_time);
169 | DEFAULT_SETTER(actual_rows);
170 | DEFAULT_SETTER(actual_loops);
171 | DEFAULT_SETTER(heap_fetches);
172 | DEFAULT_SETTER(shared_hit_blks);
173 | DEFAULT_SETTER(shared_read_blks);
174 | DEFAULT_SETTER(shared_dirtied_blks);
175 | DEFAULT_SETTER(shared_written_blks);
176 | DEFAULT_SETTER(local_hit_blks);
177 | DEFAULT_SETTER(local_read_blks);
178 | DEFAULT_SETTER(local_dirtied_blks);
179 | DEFAULT_SETTER(local_written_blks);
180 | DEFAULT_SETTER(temp_read_blks);
181 | DEFAULT_SETTER(temp_written_blks);
182 | DEFAULT_SETTER(io_read_time);
183 | DEFAULT_SETTER(io_write_time);
184 | SQLQUOTE_SETTER(trig_name);
185 | SQLQUOTE_SETTER(trig_relation);
186 | DEFAULT_SETTER(trig_time);
187 | DEFAULT_SETTER(trig_calls);
188 | DEFAULT_SETTER(plan_time);
189 | DEFAULT_SETTER(exec_time);
190 | DEFAULT_SETTER(exact_heap_blks);
191 | DEFAULT_SETTER(lossy_heap_blks);
192 | DEFAULT_SETTER(joinfilt_removed);
193 | DEFAULT_SETTER(conflict_resolution);
194 | LIST_SETTER(conflict_arbiter_indexes);
195 | DEFAULT_SETTER(tuples_inserted);
196 | DEFAULT_SETTER(conflicting_tuples);
197 | DEFAULT_SETTER(sampling_method);
198 | LIST_SETTER(sampling_params);
199 | DEFAULT_SETTER(repeatable_seed);
200 | DEFAULT_SETTER(worker_number);
201 | DEFAULT_SETTER(workers_planned);
202 | DEFAULT_SETTER(workers_launched);
203 | BOOL_SETTER(inner_unique);
204 | BOOL_SETTER(async_capable);
205 | DEFAULT_SETTER(table_func_name);
206 | LIST_SETTER(presorted_key);
207 | LIST_SETTER(sortmethod_used);
208 | DEFAULT_SETTER(sortspace_mem);
209 | DEFAULT_SETTER(group_count);
210 | DEFAULT_SETTER(avg_sortspc_used);
211 | DEFAULT_SETTER(peak_sortspc_used);
212 |
213 | #define ISZERO(s) (!s || strcmp(s, "0") == 0 || strcmp(s, "0.000") == 0 )
214 | #define HASSTRING(s) (s && strlen(s) > 0)
215 | #define TEXT_LEVEL_STEP 6
216 | #define TEXT_INDENT_OFFSET 2
217 | #define TEXT_INDENT_BASE(l, e) \
218 | (((l < 2) ? 0 : (TEXT_LEVEL_STEP * (l - 2) + TEXT_INDENT_OFFSET)) + e)
219 | #define TEXT_INDENT_DETAILS(l, e) \
220 | (TEXT_INDENT_BASE(l, e) + ((l < 2) ? 2 : 6))
221 |
222 | static void
223 | print_obj_name0(StringInfo s,
224 | const char *obj_name, const char *schema_name, const char *alias)
225 | {
226 | bool on_written = false;
227 |
228 | if (HASSTRING(obj_name))
229 | {
230 | on_written = true;
231 | appendStringInfoString(s, " on ");
232 | if (HASSTRING(schema_name))
233 | {
234 | appendStringInfoString(s, schema_name);
235 | appendStringInfoChar(s, '.');
236 | }
237 | appendStringInfoString(s, obj_name);
238 | }
239 | if (HASSTRING(alias) &&
240 | (!HASSTRING(obj_name) || strcmp(obj_name, alias) != 0))
241 | {
242 | if (!on_written)
243 | appendStringInfoString(s, " on ");
244 | else
245 | appendStringInfoChar(s, ' ');
246 | appendStringInfoString(s, alias);
247 | }
248 | }
249 |
250 | static void
251 | print_obj_name(pgspParserContext *ctx)
252 | {
253 | node_vals *v = ctx->nodevals;
254 | StringInfo s = ctx->dest;
255 |
256 | print_obj_name0(s, v->obj_name, v->schema_name, v->alias);
257 | }
258 |
259 | static void
260 | print_prop(StringInfo s, char *prepstr,
261 | const char *prop, int level, int exind)
262 | {
263 | if (level > 0)
264 | {
265 | appendStringInfoString(s, "\n");
266 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
267 | }
268 | appendStringInfoString(s, prepstr);
269 | appendStringInfoString(s, prop);
270 | }
271 |
272 | static void
273 | print_prop_if_exists(StringInfo s, char *prepstr,
274 | const char *prop, int level, int exind)
275 | {
276 | if (HASSTRING(prop))
277 | print_prop(s, prepstr, prop, level, exind);
278 | }
279 |
280 | static void
281 | print_propstr_if_exists(StringInfo s, char *prepstr,
282 | StringInfo prop, int level, int exind)
283 | {
284 | if (prop && prop->data[0])
285 | {
286 | appendStringInfoString(s, "\n");
287 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
288 | appendStringInfoString(s, prepstr);
289 | appendStringInfoString(s, prop->data);
290 | }
291 | }
292 |
293 | static void
294 | print_groupingsets_if_exists(StringInfo s, List *gss, int level, int exind)
295 | {
296 | ListCell *lc;
297 |
298 | foreach (lc, gss)
299 | {
300 | ListCell *lcg;
301 | grouping_set *gs = (grouping_set *)lfirst (lc);
302 |
303 | if (gs->sort_keys)
304 | {
305 | print_prop_if_exists(s, "Sort Key: ", gs->sort_keys, level, exind);
306 | exind += 2;
307 | }
308 |
309 | foreach (lcg, gs->group_keys)
310 | {
311 | const char *gk = (const char *)lfirst (lcg);
312 | print_prop_if_exists(s, gs->key_type, gk, level, exind);
313 | }
314 |
315 | }
316 | }
317 |
318 | static void
319 | print_prop_if_nz(StringInfo s, char *prepstr,
320 | const char *prop, int level, int exind)
321 | {
322 | if (!ISZERO(prop))
323 | print_prop(s, prepstr, prop, level, exind);
324 | }
325 |
326 | static void
327 | print_current_node(pgspParserContext *ctx)
328 | {
329 | node_vals *v = ctx->nodevals;
330 | StringInfo s = ctx->dest;
331 | ListCell *lc;
332 | int level = ctx->level - 1;
333 | bool comma = false;
334 | int exind = 0;
335 |
336 | /*
337 | * The element objects in "Workers" list doesn't have node type, which
338 | * would be named T_Worker if there were in node.h. So it needs a special
339 | * treat.
340 | */
341 |
342 | if (v->node_type == T_Invalid && !HASSTRING(v->worker_number))
343 | return;
344 |
345 | if (s->len > 0)
346 | appendStringInfoString(s, "\n");
347 | appendStringInfoSpaces(s, TEXT_INDENT_BASE(level, exind));
348 |
349 | if (HASSTRING(v->subplan_name))
350 | {
351 | appendStringInfoString(s, v->subplan_name);
352 | appendStringInfoString(s, "\n");
353 | exind = 2;
354 | appendStringInfoSpaces(s, TEXT_INDENT_BASE(level, exind));
355 | }
356 |
357 | /* list items doesn't need this header */
358 | if (level > 1 && ctx->current_list == P_Invalid)
359 | appendStringInfoString(s, "-> ");
360 |
361 | if (v->parallel_aware)
362 | appendStringInfoString(s, "Parallel ");
363 |
364 | if (v->async_capable)
365 | appendStringInfoString(s, "Async ");
366 |
367 | switch (v->nodetag)
368 | {
369 | case T_ModifyTable:
370 | case T_SeqScan:
371 | case T_BitmapHeapScan:
372 | case T_TidScan:
373 | case T_SubqueryScan:
374 | case T_FunctionScan:
375 | case T_ValuesScan:
376 | case T_CteScan:
377 | case T_WorkTableScan:
378 | case T_ForeignScan:
379 | if (v->nodetag == T_ModifyTable)
380 | appendStringInfoString(s, v->operation);
381 | else
382 | appendStringInfoString(s, v->node_type);
383 |
384 | print_obj_name(ctx);
385 | break;
386 |
387 | case T_IndexScan:
388 | case T_IndexOnlyScan:
389 | case T_BitmapIndexScan:
390 | appendStringInfoString(s, v->node_type);
391 | print_prop_if_exists(s, " ", v->scan_dir, 0, 0);
392 | print_prop_if_exists(s, " using ", v->index_name, 0, 0);
393 | print_obj_name(ctx);
394 | break;
395 |
396 | case T_NestLoop:
397 | case T_MergeJoin:
398 | case T_HashJoin:
399 | appendStringInfoString(s, v->node_type);
400 | if (v->join_type && strcmp(v->join_type, "Inner") != 0)
401 | {
402 | appendStringInfoChar(s, ' ');
403 | appendStringInfoString(s, v->join_type);
404 | }
405 | if (v->nodetag != T_NestLoop)
406 | appendStringInfoString(s, " Join");
407 | break;
408 |
409 | case T_SetOp:
410 | appendStringInfoString(s, v->node_type);
411 | print_prop_if_exists(s, " ", v->setopcommand, 0, 0);
412 | break;
413 |
414 | default:
415 | /* Existence of worker_number suggests this is a Worker node */
416 | if (HASSTRING(v->worker_number))
417 | {
418 | appendStringInfoString(s, "Worker");
419 | print_prop_if_exists(s, " ", v->worker_number, 0, 0);
420 |
421 | /*
422 | * "Worker"s are individual JSON objects in a JSON list but
423 | * should be printed as just a property in text
424 | * representaion. Correct indent using exind here.
425 | */
426 | exind = -4;
427 | }
428 | else
429 | appendStringInfoString(s, v->node_type);
430 | break;
431 | }
432 |
433 | /* Don't show costs for child talbes */
434 | if (ctx->current_list == P_TargetTables)
435 | return;
436 |
437 | if (!ISZERO(v->startup_cost) &&
438 | !ISZERO(v->total_cost) &&
439 | HASSTRING(v->plan_rows) &&
440 | HASSTRING(v->plan_width))
441 | {
442 | appendStringInfoString(s, " (cost=");
443 | appendStringInfoString(s, v->startup_cost);
444 | appendStringInfoString(s, "..");
445 | appendStringInfoString(s, v->total_cost);
446 | appendStringInfoString(s, " rows=");
447 | appendStringInfoString(s, v->plan_rows);
448 | appendStringInfoString(s, " width=");
449 | appendStringInfoString(s, v->plan_width);
450 | appendStringInfoString(s, ")");
451 | }
452 |
453 | if (HASSTRING(v->actual_loops) && ISZERO(v->actual_loops))
454 | appendStringInfoString(s, " (never executed)");
455 | else if (HASSTRING(v->actual_rows) &&
456 | HASSTRING(v->actual_loops) &&
457 | HASSTRING(v->actual_startup_time) &&
458 | HASSTRING(v->actual_total_time))
459 | {
460 | appendStringInfoString(s, " (actual ");
461 | appendStringInfoString(s, "time=");
462 | appendStringInfoString(s, v->actual_startup_time);
463 | appendStringInfoString(s, "..");
464 | appendStringInfoString(s, v->actual_total_time);
465 | appendStringInfoString(s, " ");
466 |
467 | appendStringInfoString(s, "rows=");
468 | appendStringInfoString(s, v->actual_rows);
469 |
470 | appendStringInfoString(s, " loops=");
471 | appendStringInfoString(s, v->actual_loops);
472 |
473 | appendStringInfoString(s, ")");
474 | }
475 |
476 | foreach(lc, v->target_tables)
477 | {
478 | char *str = (char *)lfirst (lc);
479 |
480 | appendStringInfoString(s, "\n");
481 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
482 | appendStringInfoString(s, str);
483 | }
484 |
485 | print_propstr_if_exists(s, "Output: ", v->output, level, exind);
486 | print_propstr_if_exists(s, "Group Key: ", v->group_key, level, exind);
487 | print_groupingsets_if_exists(s, v->grouping_sets, level, exind);
488 | print_prop_if_exists(s, "Merge Cond: ", v->merge_cond, level, exind);
489 | print_prop_if_exists(s, "Hash Cond: " , v->hash_cond, level, exind);
490 | print_prop_if_exists(s, "Tid Cond: " , v->tid_cond, level, exind);
491 | print_prop_if_exists(s, "Join Filter: " , v->join_filter, level, exind);
492 | print_prop_if_exists(s, "Index Cond: " , v->index_cond, level, exind);
493 | print_prop_if_exists(s, "Recheck Cond: ", v->recheck_cond, level, exind);
494 | print_prop_if_exists(s, "Workers Planned: ", v->workers_planned, level, exind);
495 | print_prop_if_exists(s, "Workers Launched: ", v->workers_launched, level, exind);
496 |
497 | if (HASSTRING(v->sampling_method))
498 | {
499 | appendStringInfoString(s, "\n");
500 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
501 | appendStringInfo(s, "Sampling: %s (%s)",
502 | v->sampling_method,
503 | v->sampling_params ? v->sampling_params->data : "");
504 | if (v->repeatable_seed)
505 | appendStringInfo(s, " REPEATABLE (%s)", v->repeatable_seed);
506 | }
507 |
508 | print_propstr_if_exists(s, "Sort Key: ", v->sort_key, level, exind);
509 | if (HASSTRING(v->sort_method))
510 | {
511 | appendStringInfoString(s, "\n");
512 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
513 | appendStringInfoString(s, "Sort Method: ");
514 | appendStringInfoString(s, v->sort_method);
515 |
516 | if (HASSTRING(v->sort_space_type) &&
517 | HASSTRING(v->sort_space_used))
518 | {
519 | appendStringInfoString(s, " ");
520 | appendStringInfoString(s, v->sort_space_type);
521 | appendStringInfoString(s, ": ");
522 | appendStringInfoString(s, v->sort_space_used);
523 | appendStringInfoString(s, "kB");
524 | }
525 | }
526 |
527 | print_prop_if_exists(s, "Function Call: ", v->func_call, level, exind);
528 |
529 | /*
530 | * Emit unknown properties here. The properties are printed in the same
531 | * shape with JSON properties as assumed by explain.c.
532 | */
533 | foreach (lc, v->_undef)
534 | {
535 | StringInfo str = (StringInfo) lfirst(lc);
536 |
537 | appendStringInfoString(s, "\n");
538 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
539 | appendStringInfoString(s, str->data);
540 | }
541 | v->_undef = NULL;
542 |
543 | print_prop_if_exists(s, "Filter: ", v->filter, level, exind);
544 | print_prop_if_nz(s, "Rows Removed by Filter: ",
545 | v->filter_removed, level, exind);
546 | print_prop_if_nz(s, "Rows Removed by Index Recheck: ",
547 | v->idxrchk_removed, level, exind);
548 | print_prop_if_nz(s, "Rows Removed by Join Filter: ",
549 | v->joinfilt_removed, level, exind);
550 |
551 | if (HASSTRING(v->exact_heap_blks) ||
552 | HASSTRING(v->lossy_heap_blks))
553 | {
554 | appendStringInfoString(s, "\n");
555 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
556 | appendStringInfoString(s, "Heap Blocks:");
557 | print_prop_if_nz(s, " exact=", v->exact_heap_blks, 0, exind);
558 | print_prop_if_nz(s, " lossy=", v->lossy_heap_blks, 0, exind);
559 | }
560 |
561 | if (!ISZERO(v->hash_buckets))
562 | {
563 | bool show_original = false;
564 |
565 | appendStringInfoString(s, "\n");
566 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
567 | appendStringInfoString(s, "Buckets: ");
568 | appendStringInfoString(s, v->hash_buckets);
569 |
570 | /* See show_hash_info() in explain.c for details */
571 | if ((v->org_hash_buckets &&
572 | strcmp(v->hash_buckets, v->org_hash_buckets) != 0) ||
573 | (v->org_hash_batches &&
574 | strcmp(v->hash_batches, v->org_hash_batches) != 0))
575 | show_original = true;
576 |
577 | if (show_original && v->org_hash_buckets)
578 | {
579 | appendStringInfoString(s, " (originally ");
580 | appendStringInfoString(s, v->org_hash_buckets);
581 | appendStringInfoChar(s, ')');
582 | }
583 |
584 | if (!ISZERO(v->hash_batches))
585 | {
586 | appendStringInfoString(s, " Batches: ");
587 | appendStringInfoString(s, v->hash_batches);
588 | if (show_original && v->org_hash_batches)
589 | {
590 | appendStringInfoString(s, " (originally ");
591 | appendStringInfoString(s, v->org_hash_batches);
592 | appendStringInfoChar(s, ')');
593 | }
594 | }
595 | if (!ISZERO(v->peak_memory_usage))
596 | {
597 | appendStringInfoString(s, " Memory Usage: ");
598 | appendStringInfoString(s, v->peak_memory_usage);
599 | appendStringInfoString(s, "kB");
600 | }
601 | }
602 |
603 | print_prop_if_exists(s, "Heap Fetches: ", v->heap_fetches, level, exind);
604 | print_prop_if_exists(s, "Conflict Resolution: ",
605 | v->conflict_resolution, level, exind);
606 | print_propstr_if_exists(s, "Conflict Arbiter Indexes: ",
607 | v->conflict_arbiter_indexes, level, exind);
608 | print_prop_if_exists(s, "Tuples Inserted: ",
609 | v->tuples_inserted, level, exind);
610 | print_prop_if_exists(s, "Conflicting Tuples: ",
611 | v->conflicting_tuples, level, exind);
612 |
613 | if (!ISZERO(v->shared_hit_blks) ||
614 | !ISZERO(v->shared_read_blks) ||
615 | !ISZERO(v->shared_dirtied_blks) ||
616 | !ISZERO(v->shared_written_blks))
617 | {
618 | appendStringInfoString(s, "\n");
619 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
620 | appendStringInfoString(s, "Buffers: shared");
621 |
622 | if (!ISZERO(v->shared_hit_blks))
623 | {
624 | appendStringInfoString(s, " hit=");
625 | appendStringInfoString(s, v->shared_hit_blks);
626 | comma =true;
627 | }
628 | if (!ISZERO(v->shared_read_blks))
629 | {
630 | appendStringInfoString(s, " read=");
631 | appendStringInfoString(s, v->shared_read_blks);
632 | comma =true;
633 | }
634 | if (!ISZERO(v->shared_dirtied_blks))
635 | {
636 | appendStringInfoString(s, " dirtied=");
637 | appendStringInfoString(s, v->shared_dirtied_blks);
638 | comma =true;
639 | }
640 | if (!ISZERO(v->shared_written_blks))
641 | {
642 | appendStringInfoString(s, " written=");
643 | appendStringInfoString(s, v->shared_written_blks);
644 | comma =true;
645 | }
646 | }
647 | if (!ISZERO(v->local_hit_blks) ||
648 | !ISZERO(v->local_read_blks) ||
649 | !ISZERO(v->local_dirtied_blks) ||
650 | !ISZERO(v->local_written_blks))
651 | {
652 | if (comma)
653 | appendStringInfoString(s, ", ");
654 | else
655 | {
656 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
657 | appendStringInfoString(s, "Buffers: ");
658 | }
659 |
660 | appendStringInfoString(s, "local");
661 | if (!ISZERO(v->local_hit_blks))
662 | {
663 | appendStringInfoString(s, " hit=");
664 | appendStringInfoString(s, v->local_hit_blks);
665 | comma =true;
666 | }
667 | if (!ISZERO(v->local_read_blks))
668 | {
669 | appendStringInfoString(s, " read=");
670 | appendStringInfoString(s, v->local_read_blks);
671 | comma =true;
672 | }
673 | if (!ISZERO(v->local_dirtied_blks))
674 | {
675 | appendStringInfoString(s, " dirtied=");
676 | appendStringInfoString(s, v->local_dirtied_blks);
677 | comma =true;
678 | }
679 | if (!ISZERO(v->local_written_blks))
680 | {
681 | appendStringInfoString(s, " written=");
682 | appendStringInfoString(s, v->local_written_blks);
683 | comma =true;
684 | }
685 | }
686 | if (!ISZERO(v->temp_read_blks) ||
687 | !ISZERO(v->temp_written_blks))
688 | {
689 | if (comma)
690 | appendStringInfoString(s, ", ");
691 | else
692 | {
693 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
694 | appendStringInfoString(s, "Buffers: ");
695 | }
696 |
697 | appendStringInfoString(s, "temp");
698 | if (!ISZERO(v->temp_read_blks))
699 | {
700 | appendStringInfoString(s, " read=");
701 | appendStringInfoString(s, v->temp_read_blks);
702 | comma =true;
703 | }
704 | if (!ISZERO(v->temp_written_blks))
705 | {
706 | appendStringInfoString(s, " written=");
707 | appendStringInfoString(s, v->temp_written_blks);
708 | comma =true;
709 | }
710 | }
711 | if (!ISZERO(v->io_read_time) ||
712 | !ISZERO(v->io_write_time))
713 | {
714 | /* Feed a line if any of Buffers: items has been shown */
715 | if (comma)
716 | appendStringInfoString(s, "\n");
717 |
718 | appendStringInfoSpaces(s, TEXT_INDENT_DETAILS(level, exind));
719 | appendStringInfoString(s, "I/O Timings: ");
720 |
721 | if (!ISZERO(v->io_read_time))
722 | {
723 | appendStringInfoString(s, " read=");
724 | appendStringInfoString(s, v->io_read_time);
725 | }
726 | if (!ISZERO(v->io_write_time))
727 | {
728 | appendStringInfoString(s, " write=");
729 | appendStringInfoString(s, v->io_write_time);
730 | }
731 | }
732 | }
733 |
734 | static void
735 | print_current_trig_node(pgspParserContext *ctx)
736 | {
737 | node_vals *v = ctx->nodevals;
738 | StringInfo s = ctx->dest;
739 |
740 | if (HASSTRING(v->trig_name) && !ISZERO(v->trig_time))
741 | {
742 | if (s->len > 0)
743 | appendStringInfoString(s, "\n");
744 | appendStringInfoString(s, "Trigger ");
745 | appendStringInfoString(s, v->trig_name);
746 | appendStringInfoString(s, ": time=");
747 | appendStringInfoString(s, v->trig_time);
748 | appendStringInfoString(s, " calls=");
749 | appendStringInfoString(s, v->trig_calls);
750 | }
751 | }
752 |
753 |
754 | static void
755 | clear_nodeval(node_vals *vals)
756 | {
757 | memset(vals, 0, sizeof(node_vals));
758 | }
759 |
760 | static JsonParseErrorType
761 | json_text_objstart(void *state)
762 | {
763 | pgspParserContext *ctx = (pgspParserContext *)state;
764 | ctx->level++;
765 |
766 | /* Create new grouping sets or reset existing ones */
767 | if (ctx->current_list == P_GroupSets)
768 | {
769 | node_vals *v = ctx->nodevals;
770 |
771 | ctx->tmp_gset = (grouping_set*) palloc0(sizeof(grouping_set));
772 | if (!v->sort_key)
773 | v->sort_key = makeStringInfo();
774 | if (!v->group_key)
775 | v->group_key = makeStringInfo();
776 | if (!v->hash_key)
777 | v->hash_key = makeStringInfo();
778 | resetStringInfo(v->sort_key);
779 | resetStringInfo(v->group_key);
780 | resetStringInfo(v->hash_key);
781 | }
782 |
783 | JSONACTION_RETURN_SUCCESS();
784 | }
785 |
786 | static JsonParseErrorType
787 | json_text_objend(void *state)
788 | {
789 | pgspParserContext *ctx = (pgspParserContext *)state;
790 |
791 | /* Print current node if the object is a P_Plan or a child of P_Plans */
792 | if (bms_is_member(ctx->level - 1, ctx->plan_levels))
793 | {
794 | print_current_node(ctx);
795 | clear_nodeval(ctx->nodevals);
796 | }
797 | else if (ctx->section == P_Triggers)
798 | {
799 | print_current_trig_node(ctx);
800 | clear_nodeval(ctx->nodevals);
801 | }
802 | else if (ctx->current_list == P_TargetTables)
803 | {
804 | /* Move the current working taget tables into nodevals */
805 | node_vals *v = ctx->nodevals;
806 |
807 | if (!ctx->work_str)
808 | ctx->work_str = makeStringInfo();
809 |
810 | resetStringInfo(ctx->work_str);
811 | appendStringInfoString(ctx->work_str, v->operation);
812 | print_obj_name0(ctx->work_str, v->obj_name, v->schema_name, v->alias);
813 | v->target_tables = lappend(v->target_tables,
814 | pstrdup(ctx->work_str->data));
815 | resetStringInfo(ctx->work_str);
816 | }
817 | else if (ctx->current_list == P_GroupSets && ctx->tmp_gset)
818 | {
819 | /* Move working grouping set into nodevals */
820 | node_vals *v = ctx->nodevals;
821 |
822 | /* Copy sort key if any */
823 | if (v->sort_key->data[0])
824 | {
825 | ctx->tmp_gset->sort_keys = strdup(v->sort_key->data);
826 | resetStringInfo(v->sort_key);
827 | }
828 |
829 | /* Move working grouping set into nodevals */
830 | ctx->nodevals->grouping_sets =
831 | lappend(v->grouping_sets, ctx->tmp_gset);
832 | ctx->tmp_gset = NULL;
833 | }
834 |
835 | ctx->last_elem_is_object = true;
836 | ctx->level--;
837 |
838 | JSONACTION_RETURN_SUCCESS();
839 | }
840 |
841 | static JsonParseErrorType
842 | json_text_arrstart(void *state)
843 | {
844 | pgspParserContext *ctx = (pgspParserContext *)state;
845 |
846 | if (ctx->current_list == P_GroupSets)
847 | {
848 | ctx->wlist_level++;
849 | }
850 |
851 | JSONACTION_RETURN_SUCCESS();
852 | }
853 |
854 | static JsonParseErrorType
855 | json_text_arrend(void *state)
856 | {
857 | pgspParserContext *ctx = (pgspParserContext *)state;
858 |
859 | if (ctx->current_list == P_GroupSets)
860 | {
861 | /*
862 | * wlist_level means that now at the end of innermost list of Group
863 | * Keys
864 | */
865 | if (ctx->wlist_level == 3)
866 | {
867 | node_vals *v = ctx->nodevals;
868 |
869 | /*
870 | * At this point, v->group_key holds the keys in "Group Keys". The
871 | * item holds a double-nested list and the innermost lists are to
872 | * go into individual "Group Key" lines. Empty innermost list is
873 | * represented as "()" there. See explain.c of PostgreSQL.
874 | */
875 | ctx->tmp_gset->key_type = "Group Key: ";
876 | if (v->group_key->data[0])
877 | {
878 | ctx->tmp_gset->group_keys =
879 | lappend(ctx->tmp_gset->group_keys,
880 | pstrdup(v->group_key->data));
881 | }
882 | else if (v->hash_key->data[0])
883 | {
884 | ctx->tmp_gset->group_keys =
885 | lappend(ctx->tmp_gset->group_keys,
886 | pstrdup(v->hash_key->data));
887 | ctx->tmp_gset->key_type = "Hash Key: ";
888 | }
889 | else
890 | ctx->tmp_gset->group_keys =
891 | lappend(ctx->tmp_gset->group_keys, "()");
892 |
893 | resetStringInfo(ctx->nodevals->group_key);
894 | resetStringInfo(ctx->nodevals->hash_key);
895 | }
896 | ctx->wlist_level--;
897 | }
898 |
899 | JSONACTION_RETURN_SUCCESS();
900 | }
901 |
902 | static JsonParseErrorType
903 | json_text_ofstart(void *state, char *fname, bool isnull)
904 | {
905 | word_table *p;
906 | pgspParserContext *ctx = (pgspParserContext *)state;
907 |
908 | ctx->setter = NULL;
909 | p = search_word_table(propfields, fname, PGSP_JSON_TEXTIZE);
910 |
911 | if (!p)
912 | {
913 | ereport(DEBUG2,
914 | (errmsg("Short JSON parser encoutered unknown field name: \"%s\", skipped.", fname),
915 | errdetail_log("INPUT: \"%s\"", ctx->org_string)));
916 |
917 | /*
918 | * Unknown properties may be put by foreign data wrappers and assumed
919 | * to be printed in the same format to JSON properties. We store in
920 | * nodevals a string emittable as-is in text explains.
921 | */
922 | ctx->setter = SETTER(_undef);
923 | ctx->nodevals->_undef_newelem = true;
924 | ctx->setter(ctx->nodevals, fname);
925 | ctx->nodevals->_undef_newelem = false;
926 | ctx->setter(ctx->nodevals, ": ");
927 | }
928 | else
929 | {
930 | /*
931 | * Print the current node immediately if the next level of
932 | * Plan/Plans/Worers comes. This assumes that the plan output is
933 | * strcutured tail-recursively.
934 | */
935 | if (p->tag == P_Plan || p->tag == P_Plans || p->tag == P_Workers)
936 | {
937 | print_current_node(ctx);
938 | clear_nodeval(ctx->nodevals);
939 | }
940 | else if (p->tag == P_TargetTables)
941 | {
942 | node_vals *v = ctx->nodevals;
943 |
944 | ctx->current_list = p->tag;
945 | ctx->list_fname = fname;
946 |
947 | /* stash some data */
948 | v->tmp_obj_name = v->obj_name;
949 | v->tmp_schema_name = v->schema_name;
950 | v->tmp_alias = v->alias;
951 | }
952 |
953 | if (p->tag == P_GroupSets || p->tag == P_Workers)
954 | {
955 | ctx->current_list = p->tag;
956 | ctx->list_fname = fname;
957 | ctx->wlist_level = 0;
958 | }
959 |
960 | /*
961 | * This paser prints partial result at the end of every P_Plan object,
962 | * which includes elements in P_Plans list.
963 | */
964 | if (p->tag == P_Plan || p->tag == P_Plans || p->tag == P_Workers)
965 | ctx->plan_levels = bms_add_member(ctx->plan_levels, ctx->level);
966 | else
967 | ctx->plan_levels = bms_del_member(ctx->plan_levels, ctx->level);
968 |
969 | if (p->tag == P_Plan || p->tag == P_Triggers)
970 | ctx->section = p->tag;
971 | ctx->setter = p->setter;
972 | }
973 |
974 | JSONACTION_RETURN_SUCCESS();
975 | }
976 |
977 | static JsonParseErrorType
978 | json_text_ofend(void *state, char *fname, bool isnull)
979 | {
980 | pgspParserContext *ctx = (pgspParserContext *)state;
981 | node_vals *v = ctx->nodevals;
982 |
983 | /* We assume that lists with same fname will not be nested */
984 | if (ctx->list_fname && strcmp(fname, ctx->list_fname) == 0)
985 | {
986 | /* Restore stashed data, see json_text_ofstart */
987 | if (ctx->current_list == P_TargetTables)
988 | {
989 | v->obj_name = v->tmp_obj_name;
990 | v->schema_name = v->tmp_schema_name;
991 | v->alias = v->tmp_alias;
992 | }
993 |
994 | ctx->list_fname = NULL;
995 | ctx->current_list = P_Invalid;
996 | }
997 |
998 | /* Planning/Execution time to be appeared at the end of plan */
999 | if (HASSTRING(v->plan_time) ||
1000 | HASSTRING(v->exec_time))
1001 | {
1002 | if (HASSTRING(v->plan_time))
1003 | {
1004 | appendStringInfoString(ctx->dest, "\nPlanning Time: ");
1005 | appendStringInfoString(ctx->dest, v->plan_time);
1006 | appendStringInfoString(ctx->dest, " ms");
1007 | }
1008 | else
1009 | {
1010 | appendStringInfoString(ctx->dest, "\nExecution Time: ");
1011 | appendStringInfoString(ctx->dest, v->exec_time);
1012 | appendStringInfoString(ctx->dest, " ms");
1013 | }
1014 | clear_nodeval(v);
1015 | }
1016 |
1017 | JSONACTION_RETURN_SUCCESS();
1018 | }
1019 |
1020 | static JsonParseErrorType
1021 | json_text_scalar(void *state, char *token, JsonTokenType tokentype)
1022 | {
1023 | pgspParserContext *ctx = (pgspParserContext *)state;
1024 |
1025 | if (ctx->setter)
1026 | ctx->setter(ctx->nodevals, token);
1027 |
1028 | JSONACTION_RETURN_SUCCESS();
1029 | }
1030 |
1031 | char *
1032 | pgsp_json_textize(char *json)
1033 | {
1034 | JsonLexContext lex;
1035 | JsonSemAction sem;
1036 | pgspParserContext ctx;
1037 |
1038 | init_json_lex_context(&lex, json);
1039 | init_parser_context(&ctx, PGSP_JSON_TEXTIZE, json, NULL, 0);
1040 |
1041 | ctx.nodevals = (node_vals*)palloc0(sizeof(node_vals));
1042 |
1043 | sem.semstate = (void*)&ctx;
1044 | sem.object_start = json_text_objstart;
1045 | sem.object_end = json_text_objend;
1046 | sem.array_start = json_text_arrstart;
1047 | sem.array_end = json_text_arrend;
1048 | sem.object_field_start = json_text_ofstart;
1049 | sem.object_field_end = json_text_ofend;
1050 | sem.array_element_start= NULL;
1051 | sem.array_element_end = NULL;
1052 | sem.scalar = json_text_scalar;
1053 |
1054 |
1055 | if (!run_pg_parse_json(&lex, &sem))
1056 | {
1057 | if (ctx.nodevals->node_type)
1058 | print_current_node(&ctx);
1059 |
1060 | if (ctx.dest->len > 0 &&
1061 | ctx.dest->data[ctx.dest->len - 1] != '\n')
1062 | appendStringInfoChar(ctx.dest, '\n');
1063 |
1064 | if (ctx.dest->len == 0)
1065 | appendStringInfoString(ctx.dest, " ");
1066 | else
1067 | appendStringInfoString(ctx.dest, "");
1068 | }
1069 |
1070 | pfree(ctx.nodevals);
1071 |
1072 | return ctx.dest->data;
1073 | }
1074 |
--------------------------------------------------------------------------------
/pgsp_json_text.h:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json_text.h: Defenitions for text plan generator for pg_store_plans.
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json_text.h
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | typedef struct
15 | {
16 | const char *sort_keys;
17 | List *group_keys;
18 | char *key_type;
19 | } grouping_set;
20 |
21 | typedef struct
22 | {
23 | NodeTag nodetag;
24 | const char *node_type;
25 | const char *operation;
26 | const char *subplan_name;
27 |
28 | const char *scan_dir;
29 | const char *index_name;
30 | const char *obj_name;
31 | const char *schema_name;
32 |
33 | const char *filter;
34 | const char *join_filter;
35 | const char *rows_removed_by_filter;
36 | const char *alias;
37 | StringInfo output;
38 | List *target_tables;
39 | const char *func_call;
40 | const char *sort_method;
41 | StringInfo sort_key;
42 | StringInfo group_key;
43 | StringInfo hash_key;
44 | List *grouping_sets;
45 | const char *index_cond;
46 | const char *merge_cond;
47 | const char *hash_cond;
48 | const char *tid_cond;
49 | const char *recheck_cond;
50 | const char *hash_buckets;
51 | const char *hash_batches;
52 | const char *setopcommand;
53 | const char *join_type;
54 | const char *org_hash_batches;
55 | const char *org_hash_buckets;
56 | const char *peak_memory_usage;
57 | const char *startup_cost;
58 | const char *total_cost;
59 | const char *plan_rows;
60 | const char *plan_width;
61 | const char *sort_space_used;
62 | const char *sort_space_type;
63 | const char *actual_startup_time;
64 | const char *actual_total_time;
65 | const char *actual_rows;
66 | const char *actual_loops;
67 | const char *heap_fetches;
68 | const char *shared_hit_blks;
69 | const char *shared_read_blks;
70 | const char *shared_dirtied_blks;
71 | const char *shared_written_blks;
72 | const char *local_hit_blks;
73 | const char *local_read_blks;
74 | const char *local_dirtied_blks;
75 | const char *local_written_blks;
76 | const char *temp_read_blks;
77 | const char *temp_written_blks;
78 | const char *io_read_time;
79 | const char *io_write_time;
80 | const char *filter_removed;
81 | const char *idxrchk_removed;
82 | const char *trig_name;
83 | const char *trig_relation;
84 | const char *trig_time;
85 | const char *trig_calls;
86 | const char *plan_time;
87 | const char *exec_time;
88 | const char *exact_heap_blks;
89 | const char *lossy_heap_blks;
90 | const char *joinfilt_removed;
91 | const char *conflict_resolution;
92 | StringInfo conflict_arbiter_indexes;
93 | const char *tuples_inserted;
94 | const char *conflicting_tuples;
95 | const char *sampling_method;
96 | StringInfo sampling_params;
97 | const char *repeatable_seed;
98 | bool parallel_aware;
99 | const char *partial_mode;
100 | const char *worker_number;
101 | const char *workers_planned;
102 | const char *workers_launched;
103 | bool inner_unique;
104 | bool async_capable;
105 | const char *table_func_name;
106 | StringInfo presorted_key;
107 | StringInfo sortmethod_used;
108 | const char *sortspace_mem;
109 | const char *group_count;
110 | const char *avg_sortspc_used;
111 | const char *peak_sortspc_used;
112 |
113 | const char *tmp_obj_name;
114 | const char *tmp_schema_name;
115 | const char *tmp_alias;
116 | List *_undef;
117 | bool _undef_newelem;
118 | } node_vals;
119 |
120 | #define SETTER(name) pgsp_node_set_##name
121 |
122 | #define SETTERDECL(name) extern void SETTER(name)(node_vals *vals, const char *val)
123 | #define DEFAULT_SETTER(name) \
124 | SETTERDECL(name) { vals->name = val;}
125 |
126 | #define SQLQUOTE_SETTER(name) \
127 | SETTERDECL(name) { vals->name = quote_identifier(val);}
128 |
129 | #define BOOL_SETTER(name) \
130 | SETTERDECL(name) { vals->name = (strcmp(val, "true") == 0 ? 1 : 0);}
131 |
132 | #define LIST_SETTER(name) \
133 | SETTERDECL(name) { \
134 | if (!vals->name || !vals->name->data[0])\
135 | { \
136 | vals->name = makeStringInfo(); \
137 | appendStringInfoString(vals->name, val); \
138 | } \
139 | else \
140 | { \
141 | appendStringInfoString(vals->name, ", "); \
142 | appendStringInfoString(vals->name, val); \
143 | } \
144 | }\
145 |
146 | #define CONVERSION_SETTER(name, converter) \
147 | SETTERDECL(name) { vals->name = converter(val, PGSP_JSON_TEXTIZE);}
148 |
149 | extern char *pgsp_json_textize(char *json);
150 |
151 | /* Prototypes for setter for node_vals */
152 | SETTERDECL(_undef);
153 | SETTERDECL(node_type);
154 | SETTERDECL(scan_dir);
155 | SETTERDECL(obj_name);
156 | SETTERDECL(schema_name);
157 | SETTERDECL(alias);
158 | SETTERDECL(output);
159 | SETTERDECL(strategy);
160 | SETTERDECL(join_type);
161 | SETTERDECL(setopcommand);
162 | SETTERDECL(sort_method);
163 | SETTERDECL(sort_key);
164 | SETTERDECL(group_key);
165 | SETTERDECL(group_keys);
166 | SETTERDECL(hash_key);
167 | SETTERDECL(parallel_aware);
168 | SETTERDECL(partial_mode);
169 | SETTERDECL(index_name);
170 | SETTERDECL(startup_cost);
171 | SETTERDECL(total_cost);
172 | SETTERDECL(plan_rows);
173 | SETTERDECL(plan_width);
174 | SETTERDECL(sort_space_used);
175 | SETTERDECL(sort_space_type);
176 | SETTERDECL(filter);
177 | SETTERDECL(join_filter);
178 | SETTERDECL(func_call);
179 | SETTERDECL(operation);
180 | SETTERDECL(subplan_name);
181 | SETTERDECL(index_cond);
182 | SETTERDECL(hash_cond);
183 | SETTERDECL(merge_cond);
184 | SETTERDECL(tid_cond);
185 | SETTERDECL(recheck_cond);
186 | SETTERDECL(hash_buckets);
187 | SETTERDECL(hash_batches);
188 | SETTERDECL(org_hash_batches);
189 | SETTERDECL(org_hash_buckets);
190 | SETTERDECL(peak_memory_usage);
191 | SETTERDECL(filter_removed);
192 | SETTERDECL(idxrchk_removed);
193 | SETTERDECL(actual_startup_time);
194 | SETTERDECL(actual_total_time);
195 | SETTERDECL(actual_rows);
196 | SETTERDECL(actual_loops);
197 | SETTERDECL(heap_fetches);
198 | SETTERDECL(shared_hit_blks);
199 | SETTERDECL(shared_read_blks);
200 | SETTERDECL(shared_dirtied_blks);
201 | SETTERDECL(shared_written_blks);
202 | SETTERDECL(local_hit_blks);
203 | SETTERDECL(local_read_blks);
204 | SETTERDECL(local_dirtied_blks);
205 | SETTERDECL(local_written_blks);
206 | SETTERDECL(temp_read_blks);
207 | SETTERDECL(temp_written_blks);
208 | SETTERDECL(io_read_time);
209 | SETTERDECL(io_write_time);
210 | SETTERDECL(trig_name);
211 | SETTERDECL(trig_relation);
212 | SETTERDECL(trig_time);
213 | SETTERDECL(trig_calls);
214 | SETTERDECL(plan_time);
215 | SETTERDECL(exec_time);
216 | SETTERDECL(exact_heap_blks);
217 | SETTERDECL(lossy_heap_blks);
218 | SETTERDECL(joinfilt_removed);
219 | SETTERDECL(conflict_resolution);
220 | SETTERDECL(conflict_arbiter_indexes);
221 | SETTERDECL(tuples_inserted);
222 | SETTERDECL(conflicting_tuples);
223 | SETTERDECL(sampling_method);
224 | SETTERDECL(sampling_params);
225 | SETTERDECL(repeatable_seed);
226 | SETTERDECL(worker_number);
227 | SETTERDECL(workers_planned);
228 | SETTERDECL(workers_launched);
229 | SETTERDECL(inner_unique);
230 | SETTERDECL(async_capable);
231 | SETTERDECL(table_func_name);
232 | SETTERDECL(presorted_key);
233 | SETTERDECL(sortmethod_used);
234 | SETTERDECL(sortspace_mem);
235 | SETTERDECL(group_count);
236 | SETTERDECL(avg_sortspc_used);
237 | SETTERDECL(peak_sortspc_used);
238 |
--------------------------------------------------------------------------------
/pgsp_token_types.h:
--------------------------------------------------------------------------------
1 | /*-------------------------------------------------------------------------
2 | *
3 | * pgsp_json.c: Plan handler for JSON/XML/YAML style plans
4 | *
5 | * Copyright (c) 2008-2025, PostgreSQL Global Development Group
6 | * Copyright (c) 2012-2025, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
7 | *
8 | * IDENTIFICATION
9 | * pg_store_plans/pgsp_json.c
10 | *
11 | *-------------------------------------------------------------------------
12 | */
13 |
14 | #include "postgres.h"
15 |
16 | /* In PG16, include/scan.h was gone. Define required symbols manually.. */
17 | /* must be in sync with src/backend/parser/gram.h */
18 | #if PG_VERSION_NUM < 160000
19 | #error This file should only be included for PostgreSQL 16 and above
20 | #elif PG_VERSION_NUM < 170000
21 | enum pgsptokentype
22 | {
23 | IDENT = 258, /* IDENT */
24 | FCONST = 260, /* FCONST */
25 | SCONST = 261, /* SCONST */
26 | BCONST = 263, /* BCONST */
27 | XCONST = 264, /* XCONST */
28 | Op = 265, /* Op */
29 | ICONST = 266, /* ICONST */
30 | CURRENT_CATALOG = 358, /* CURRENT_CATALOG */
31 | CURRENT_DATE = 359, /* CURRENT_DATE */
32 | CURRENT_ROLE = 360, /* CURRENT_ROLE */
33 | CURRENT_SCHEMA = 361, /* CURRENT_SCHEMA */
34 | CURRENT_TIME = 362, /* CURRENT_TIME */
35 | CURRENT_TIMESTAMP = 363, /* CURRENT_TIMESTAMP */
36 | CURRENT_USER = 364, /* CURRENT_USER */
37 | FALSE_P = 415, /* FALSE_P */
38 | LOCALTIME = 502, /* LOCALTIME */
39 | LOCALTIMESTAMP = 503, /* LOCALTIMESTAMP */
40 | NULL_P = 540, /* NULL_P */
41 | TRUE_P = 689, /* TRUE_P */
42 | };
43 | #elif PG_VERSION_NUM < 180000
44 | enum pgsptokentype
45 | {
46 | IDENT = 258, /* IDENT */
47 | FCONST = 260, /* FCONST */
48 | SCONST = 261, /* SCONST */
49 | BCONST = 263, /* BCONST */
50 | XCONST = 264, /* XCONST */
51 | Op = 265, /* Op */
52 | ICONST = 266, /* ICONST */
53 | CURRENT_CATALOG = 359, /* CURRENT_CATALOG */
54 | CURRENT_DATE = 360, /* CURRENT_DATE */
55 | CURRENT_ROLE = 361, /* CURRENT_ROLE */
56 | CURRENT_SCHEMA = 362, /* CURRENT_SCHEMA */
57 | CURRENT_TIME = 363, /* CURRENT_TIME */
58 | CURRENT_TIMESTAMP = 364, /* CURRENT_TIMESTAMP */
59 | CURRENT_USER = 365, /* CURRENT_USER */
60 | FALSE_P = 418, /* FALSE_P */
61 | LOCALTIME = 512, /* LOCALTIME */
62 | LOCALTIMESTAMP = 513, /* LOCALTIMESTAMP */
63 | NULL_P = 552, /* NULL_P */
64 | TRUE_P = 708, /* TRUE_P */
65 | };
66 | #else
67 | #error This version of PostgeSQL is not supported
68 | #endif
69 |
--------------------------------------------------------------------------------
/regress.conf:
--------------------------------------------------------------------------------
1 | shared_preload_libraries = 'pg_store_plans,pg_stat_statements'
2 |
--------------------------------------------------------------------------------
/sql/store.sql:
--------------------------------------------------------------------------------
1 | SET client_min_messages = 'error';
2 | CREATE EXTENSION IF NOT EXISTS pg_store_plans;
3 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
4 | SELECT pg_stat_statements_reset() IS NOT NULL AS t;
5 | SELECT pg_store_plans_reset();
6 |
7 | DROP TABLE IF EXISTS t1;
8 | CREATE TABLE t1 (a int);
9 | CREATE INDEX ON t1 (a);
10 | INSERT INTO t1 (SELECT a FROM generate_series(0, 9999) a);
11 | RESET enable_seqscan;
12 | RESET enable_bitmapscan;
13 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
14 | SET enable_seqscan TO false;
15 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
16 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
17 | SET enable_bitmapscan TO false;
18 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
19 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
20 | SELECT count(*) FROM (SELECT * FROM t1) AS x;
21 | RESET enable_seqscan;
22 | RESET enable_bitmapscan;
23 |
24 | CREATE OR REPLACE FUNCTION test_explain() RETURNS text AS
25 | $x$
26 | DECLARE
27 | r record;
28 | s text;
29 | p text;
30 | totalrows int;
31 | totalcalls int;
32 | first bool;
33 | BEGIN
34 | s := '';
35 | first = true;
36 | SELECT calls, rows INTO totalcalls, totalrows
37 | FROM pg_stat_statements
38 | WHERE query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x';
39 |
40 | FOR r IN SELECT s.query as q, p.plan as p, p.calls as c, p.rows r
41 | FROM pg_stat_statements s
42 | JOIN pg_store_plans p USING (queryid)
43 | WHERE s.query = 'SELECT count(*) FROM (SELECT * FROM t1) AS x'
44 | ORDER BY p.calls
45 | LOOP
46 | IF first then
47 | s = r.q || E'\n totalcalls=' || totalcalls ||
48 | ' , totalrows=' || totalrows || E'\n';
49 | first := false;
50 | END IF;
51 | p := regexp_replace(r.p, '=[0-9.]+([^0-9.])', '=xxx\1', 'g');
52 | s := s || p || E'\n calls=' || r.c || ', rows=' || r.r || E'\n';
53 | END LOOP;
54 |
55 | RETURN s;
56 | END
57 | $x$
58 | LANGUAGE plpgsql;
59 | SELECT test_explain();
60 | DROP FUNCTION test_explain();
61 | DROP TABLE t1;
62 |
63 |
--------------------------------------------------------------------------------