├── pgactivity ├── __init__.py ├── queries │ ├── reset_statement_timeout.sql │ ├── disable_log_statement.sql │ ├── disable_log_min_duration_sample.sql │ ├── disable_log_min_duration_statement.sql │ ├── get_data_directory.sql │ ├── get_version.sql │ ├── do_pg_terminate_backend.sql │ ├── get_pga_inet_addresses.sql │ ├── get_wal_senders_post_090100.sql │ ├── get_wal_receivers_post_090600.sql │ ├── do_pg_cancel_backend.sql │ ├── get_replication_slots_post_140000.sql │ ├── get_temporary_files_post_120000.sql │ ├── __init__.py │ ├── get_pg_activity_post_090400.sql │ ├── get_pg_activity_post_130000.sql │ ├── get_pg_activity_post_110000.sql │ ├── get_pg_activity_post_090200.sql │ ├── get_pg_activity_post_090600.sql │ ├── get_pg_activity_post_100000.sql │ ├── get_waiting_post_090200.sql │ ├── get_temporary_files_oldest.sql │ ├── get_temporary_files_post_090100.sql │ ├── get_pg_activity_oldest.sql │ ├── get_temporary_files_post_090500.sql │ ├── get_waiting_oldest.sql │ ├── get_server_info_oldest.sql │ ├── get_server_info_post_090200.sql │ ├── get_server_info_post_090100.sql │ ├── get_server_info_post_090400.sql │ ├── get_server_info_post_090600.sql │ ├── get_server_info_post_100000.sql │ ├── get_server_info_post_110000.sql │ ├── get_blocking_post_090600.sql │ ├── get_blocking_post_090200.sql │ └── get_blocking_oldest.sql ├── __main__.py ├── profiles │ ├── wide.conf │ ├── narrow.conf │ └── minimal.conf ├── colors.py ├── widgets.py ├── compat.py ├── keys.py ├── handlers.py ├── pg.py ├── cli.py ├── utils.py ├── ui.py ├── activities.py └── config.py ├── .coveragerc ├── .codespellrc ├── mypy.ini ├── pytest.ini ├── docs ├── imgs │ ├── screenshot.png │ ├── logo-horizontal.png │ ├── logo.svg │ └── logo-horizontal.svg └── man │ ├── build-man.sh │ └── pg_activity.pod ├── .gitignore ├── .editorconfig ├── tests ├── test_types.py ├── test_widgets.txt ├── test_cli.py ├── test_views.py ├── test_scroll.txt ├── conftest.py ├── test_cli_help.txt ├── test_cli_help_py312.txt ├── test_activities.py ├── test_config.py └── test_data.py ├── .github ├── dependabot.yml └── workflows │ ├── lint.yml │ ├── publish.yml │ └── tests.yml ├── .flake8 ├── MANIFEST.in ├── tox.ini ├── LICENSE.txt ├── .pre-commit-config.yaml ├── AUTHORS.md ├── RELEASE.md └── pyproject.toml /pgactivity/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "3.6.1" 2 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | include = 3 | pg_activity 4 | pgactivity/* 5 | -------------------------------------------------------------------------------- /pgactivity/queries/reset_statement_timeout.sql: -------------------------------------------------------------------------------- 1 | RESET statement_timeout; 2 | -------------------------------------------------------------------------------- /.codespellrc: -------------------------------------------------------------------------------- 1 | [codespell] 2 | skip = .git,.mypy_cache,.tox,.venv,build,htmlcov 3 | -------------------------------------------------------------------------------- /pgactivity/queries/disable_log_statement.sql: -------------------------------------------------------------------------------- 1 | SET log_statement TO 'none'; 2 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | files = pgactivity 3 | show_error_codes = true 4 | strict = true 5 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --doctest-modules 3 | filterwarnings = 4 | error 5 | -------------------------------------------------------------------------------- /pgactivity/queries/disable_log_min_duration_sample.sql: -------------------------------------------------------------------------------- 1 | SET log_min_duration_sample TO -1; 2 | -------------------------------------------------------------------------------- /docs/imgs/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalibo/pg_activity/HEAD/docs/imgs/screenshot.png -------------------------------------------------------------------------------- /pgactivity/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import main 2 | 3 | if __name__ == "__main__": 4 | main() 5 | -------------------------------------------------------------------------------- /pgactivity/queries/disable_log_min_duration_statement.sql: -------------------------------------------------------------------------------- 1 | SET log_min_duration_statement TO -1; 2 | -------------------------------------------------------------------------------- /pgactivity/queries/get_data_directory.sql: -------------------------------------------------------------------------------- 1 | -- Get the path of the pidfile 2 | SHOW data_directory; 3 | -------------------------------------------------------------------------------- /pgactivity/queries/get_version.sql: -------------------------------------------------------------------------------- 1 | -- Get the server's version 2 | SELECT version() AS pg_version; 3 | -------------------------------------------------------------------------------- /docs/imgs/logo-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dalibo/pg_activity/HEAD/docs/imgs/logo-horizontal.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .coverage 2 | .tox 3 | .vscode 4 | __pycache__/ 5 | build/ 6 | dist/ 7 | htmlcov/ 8 | pg_activity.egg-info 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*.md] 4 | max_line_length = 80 5 | 6 | [LICENSE.txt] 7 | max_line_length = 78 8 | 9 | [*.py] 10 | max_line_length = 89 11 | -------------------------------------------------------------------------------- /pgactivity/queries/do_pg_terminate_backend.sql: -------------------------------------------------------------------------------- 1 | -- Terminate the session whose backend process has the specified process ID 2 | SELECT pg_terminate_backend(%(pid)s) AS is_stopped; 3 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pga_inet_addresses.sql: -------------------------------------------------------------------------------- 1 | -- Get the server and client inet addresses 2 | SELECT inet_server_addr() AS inet_server_addr, inet_client_addr() AS inet_client_addr; 3 | -------------------------------------------------------------------------------- /pgactivity/queries/get_wal_senders_post_090100.sql: -------------------------------------------------------------------------------- 1 | -- Get the wal sender info for pg >= 9.1 2 | -- NEW pg_stat_replication 3 | SELECT count(*) AS wal_senders 4 | FROM pg_stat_replication 5 | -------------------------------------------------------------------------------- /pgactivity/queries/get_wal_receivers_post_090600.sql: -------------------------------------------------------------------------------- 1 | -- Get the wal receiver info for pg >= 9.6 2 | -- NEW pg_stat_wal_receiver 3 | SELECT count(*) AS wal_receivers FROM pg_stat_wal_receiver 4 | -------------------------------------------------------------------------------- /pgactivity/queries/do_pg_cancel_backend.sql: -------------------------------------------------------------------------------- 1 | -- Cancel the current query of the session whose backend process has the 2 | -- specified process ID 3 | SELECT pg_cancel_backend(%(pid)s) AS is_stopped; 4 | -------------------------------------------------------------------------------- /tests/test_types.py: -------------------------------------------------------------------------------- 1 | from pgactivity.types import Filters 2 | 3 | 4 | def test_filters_from_options(): 5 | f = Filters.from_options(["dbname:postgres"]) 6 | assert f == Filters(dbname="postgres") 7 | -------------------------------------------------------------------------------- /pgactivity/queries/get_replication_slots_post_140000.sql: -------------------------------------------------------------------------------- 1 | -- get the replication slots information for pg > 14 2 | -- NEW pg_replication_slots 3 | SELECT count(*) AS replication_slots FROM pg_replication_slots 4 | -------------------------------------------------------------------------------- /docs/man/build-man.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | pod2man \ 4 | -r "$(pg_activity --version)" \ 5 | -d "$(date +%Y-%m-%d)" \ 6 | -c "Command line tool for PostgreSQL server activity monitoring." \ 7 | pg_activity.pod > pg_activity.1 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 2 | 3 | version: 2 4 | updates: 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | interval: "monthly" 9 | -------------------------------------------------------------------------------- /pgactivity/queries/get_temporary_files_post_120000.sql: -------------------------------------------------------------------------------- 1 | -- Get temporary file information for pg >= 12 2 | -- NEW: pg_ls_tmp_dir() 3 | SELECT count(*) as temp_files, 4 | COALESCE(SUM(size), 0) AS temp_bytes 5 | FROM pg_tablespace ts, LATERAL pg_catalog.pg_ls_tmpdir(ts.oid) tmp 6 | WHERE spcname <> 'pg_global' 7 | -------------------------------------------------------------------------------- /pgactivity/profiles/wide.conf: -------------------------------------------------------------------------------- 1 | [xmin] 2 | hidden = no 3 | 4 | [database] 5 | hidden = no 6 | 7 | [user] 8 | hidden = no 9 | 10 | [client] 11 | hidden = no 12 | 13 | [cpu] 14 | hidden = no 15 | 16 | [mem] 17 | hidden = no 18 | 19 | [read] 20 | hidden = no 21 | 22 | [write] 23 | hidden = no 24 | 25 | [appname] 26 | hidden = no 27 | -------------------------------------------------------------------------------- /pgactivity/profiles/narrow.conf: -------------------------------------------------------------------------------- 1 | [xmin] 2 | hidden = yes 3 | 4 | [database] 5 | hidden = yes 6 | 7 | [user] 8 | hidden = yes 9 | 10 | [client] 11 | hidden = yes 12 | 13 | [cpu] 14 | hidden = yes 15 | 16 | [mem] 17 | hidden = yes 18 | 19 | [read] 20 | hidden = yes 21 | 22 | [write] 23 | hidden = yes 24 | 25 | [appname] 26 | hidden = yes 27 | -------------------------------------------------------------------------------- /pgactivity/profiles/minimal.conf: -------------------------------------------------------------------------------- 1 | [header] 2 | show_instance = no 3 | show_system = no 4 | show_workers = no 5 | 6 | [xmin] 7 | hidden = yes 8 | 9 | [database] 10 | hidden = yes 11 | 12 | [user] 13 | hidden = yes 14 | 15 | [client] 16 | hidden = yes 17 | 18 | [cpu] 19 | hidden = yes 20 | 21 | [mem] 22 | hidden = yes 23 | 24 | [read] 25 | hidden = yes 26 | 27 | [write] 28 | hidden = yes 29 | 30 | [appname] 31 | hidden = yes 32 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | doctests = True 3 | ignore = 4 | # whitespace before ‘:’ 5 | E203, 6 | # line too long 7 | E501, 8 | # multiple statements on one line (def) 9 | E704, 10 | # line break before binary operator 11 | W503, 12 | exclude = 13 | .git, 14 | .mypy_cache, 15 | .tox, 16 | .venv, 17 | per-file-ignores = 18 | pg_activity:E251 19 | setup.py:E251,E402 20 | select = B,C,E,F,W,T4,B9 21 | mypy_config = mypy.ini 22 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v5 10 | - uses: actions/setup-python@v5 11 | - name: Install tox 12 | run: pip install tox 13 | - name: Check manifest 14 | run: tox -e check-manifest 15 | - name: Lint 16 | run: tox -e lint 17 | - name: Mypy 18 | run: tox -e mypy 19 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include docs/man/pg_activity.1 2 | recursive-include pgactivity *.py *.sql *.conf 3 | recursive-include docs *.png *.pod *.sh *.svg 4 | recursive-include tests *.py *.json *.txt 5 | include *.md 6 | include *.txt 7 | include .editorconfig 8 | include .codespellrc 9 | include .coveragerc 10 | include .flake8 11 | include .pre-commit-config.yaml 12 | include mypy.ini 13 | include pyproject.toml 14 | include pytest.ini 15 | include tox.ini 16 | 17 | prune .github 18 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v5 14 | - uses: actions/setup-python@v5 15 | with: 16 | python-version: '3.11' 17 | - name: Install 18 | run: python -m pip install build setuptools twine wheel 19 | - name: Build 20 | run: | 21 | python -m build 22 | python -m twine check dist/* 23 | - name: Publish 24 | run: python -m twine upload dist/* 25 | env: 26 | TWINE_USERNAME: __token__ 27 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 28 | -------------------------------------------------------------------------------- /tests/test_widgets.txt: -------------------------------------------------------------------------------- 1 | >>> from blessed import Terminal 2 | >>> from pgactivity import widgets 3 | 4 | >>> term = Terminal(force_styling=None) 5 | 6 | Tests for boxed() 7 | ------------------ 8 | 9 | >>> print(widgets.boxed(term, "hello, world"), end="") 10 | ┌──────────────┐ 11 | │ hello, world │ 12 | └──────────────┘ 13 | >>> print(widgets.boxed(term, "hello, world", border=False), end="") 14 | 15 | hello, world 16 | 17 | >>> print(widgets.boxed(term, "hello, world", center=True), end="") 18 | ┌──────────────┐ 19 | │ hello, world │ 20 | └──────────────┘ 21 | -------------------------------------------------------------------------------- /pgactivity/queries/__init__.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import pathlib 3 | 4 | here = pathlib.Path(__file__).parent 5 | 6 | 7 | @functools.cache 8 | def get(name: str) -> str: 9 | r"""Return the SQL query contained in named file, ignoring commented lines. 10 | 11 | >>> get('get_version') 12 | 'SELECT version() AS pg_version;' 13 | >>> print(get('get_pg_activity_post_130000')[:101]) 14 | SELECT 15 | a.pid AS pid, 16 | a.backend_xmin AS xmin, 17 | a.application_name AS application_name 18 | """ 19 | path = here / f"{name}.sql" 20 | s = "-- " 21 | with path.open() as f: 22 | return "\n".join( 23 | line.rstrip().split(s, 1)[0] 24 | for line in f 25 | if line.strip() and not line.startswith(s) 26 | ) 27 | -------------------------------------------------------------------------------- /pgactivity/colors.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from . import utils 4 | 5 | PINNED_COLOR = "bold_yellow" 6 | FOCUSED_COLOR = "cyan_reverse" 7 | 8 | 9 | def short_state(state: str) -> str | None: 10 | state = utils.short_state(state) 11 | if state == "active": 12 | return "green" 13 | elif state == "idle in trans": 14 | return "yellow" 15 | elif state == "idle in trans (a)": 16 | return "red" 17 | return None 18 | 19 | 20 | def lock_mode(mode: str) -> str: 21 | if mode in ( 22 | "ExclusiveLock", 23 | "RowExclusiveLock", 24 | "AccessExclusiveLock", 25 | ): 26 | return "bold_red" 27 | else: 28 | return "bold_yellow" 29 | 30 | 31 | def wait(value: bool) -> str: 32 | return "red" if value else "green" 33 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | tests: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | include: 11 | - python: "3.9" 12 | psycopg: "psycopg2" 13 | - python: "3.13" 14 | psycopg: "psycopg3" 15 | steps: 16 | - uses: actions/checkout@v5 17 | - name: Setup Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: ${{ matrix.python }} 21 | allow-prereleases: true 22 | - name: Install tox 23 | run: pip install tox 24 | - name: Add fr_FR and zh_TW for test purposes 25 | run: | 26 | sudo locale-gen fr_FR zh_TW.EUC-TW 27 | sudo update-locale 28 | - name: Test 29 | run: tox -e py-${{ matrix.psycopg }} 30 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = check-manifest, lint, mypy, py-psycopg{2,3} 3 | skip_missing_interpreters = True 4 | 5 | [testenv] 6 | extras = 7 | testing 8 | psycopg2: psycopg2 9 | psycopg3: psycopg 10 | commands = 11 | pytest {posargs:-vv -ra} 12 | setenv = 13 | psycopg2: _PGACTIVITY_USE_PSYCOPG2=1 14 | usedevelop = true 15 | 16 | [testenv:check-manifest] 17 | skip_install = True 18 | deps = 19 | check-manifest 20 | commands = 21 | check-manifest 22 | 23 | [testenv:lint] 24 | skip_install = True 25 | deps = 26 | codespell 27 | black >= 24.10.0 28 | flake8 29 | isort 30 | pre-commit 31 | pyupgrade 32 | commands = 33 | codespell {toxinidir} 34 | black --check --diff {toxinidir} 35 | flake8 {toxinidir} 36 | isort --check --diff {toxinidir} 37 | pre-commit run --all-files --show-diff-on-failure pyupgrade 38 | 39 | [testenv:mypy] 40 | extras = 41 | psycopg 42 | typing 43 | commands = 44 | mypy 45 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 - 2019, Julien Tachoires 2 | Copyright (c) 2020, Dalibo 3 | 4 | Permission to use, copy, modify, and distribute this software and its 5 | documentation for any purpose, without fee, and without a written agreement is 6 | hereby granted, provided that the above copyright notice and this paragraph 7 | and the following two paragraphs appear in all copies. 8 | 9 | IN NO EVENT SHALL DALIBO BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, 10 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF 11 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF DALIBO HAS BEEN 12 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 13 | 14 | DALIBO SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 15 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND DALIBO 17 | HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR 18 | MODIFICATIONS. 19 | -------------------------------------------------------------------------------- /pgactivity/widgets.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from blessed import Terminal 4 | 5 | 6 | def boxed( 7 | term: Terminal, 8 | content: str, 9 | *, 10 | border: bool = True, 11 | border_color: str = "white", 12 | center: bool = False, 13 | width: int | None = None, 14 | ) -> str: 15 | if border: 16 | border_width = term.length(content) + 2 17 | border_formatter = getattr(term, border_color) 18 | lines = [ 19 | border_formatter("┌" + "─" * border_width + "┐"), 20 | " ".join( 21 | [border_formatter("│") + term.normal, content, border_formatter("│")] 22 | ), 23 | border_formatter("└" + "─" * border_width + "┘") + term.normal, 24 | ] 25 | else: 26 | # border is disabled in UI tests. 27 | lines = ["", content, ""] 28 | if center: 29 | if width is None: 30 | width = term.width 31 | lines = [term.center(line, width=width) for line in lines] 32 | return "\n".join(lines) 33 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_090400.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity from pg 9.4 to 9.6 2 | -- NEW pg_stat_activity.backend_xmin 3 | SELECT 4 | a.pid AS pid, 5 | a.backend_xmin AS xmin, 6 | a.application_name AS application_name, 7 | a.datname AS database, 8 | a.client_addr AS client, 9 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 10 | a.wait_event AS wait, 11 | a.usename AS user, 12 | a.state AS state, 13 | a.query AS query, 14 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 15 | NULL AS query_leader_pid, 16 | false AS is_parallel_worker 17 | FROM 18 | pg_stat_activity a 19 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 20 | WHERE 21 | state <> 'idle' 22 | AND pid <> pg_backend_pid() 23 | AND CASE WHEN {min_duration} = 0 24 | THEN true 25 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 26 | END 27 | AND CASE WHEN {dbname_filter} IS NULL THEN true 28 | ELSE a.datname ~* %(dbname_filter)s 29 | END 30 | ORDER BY 31 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 32 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_130000.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity since pg 13 2 | -- NEW pg_activity.leader_pid 3 | SELECT 4 | a.pid AS pid, 5 | a.backend_xmin AS xmin, 6 | a.application_name AS application_name, 7 | a.datname AS database, 8 | a.client_addr AS client, 9 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 10 | a.wait_event AS wait, 11 | a.usename AS user, 12 | a.state AS state, 13 | a.query AS query, 14 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 15 | coalesce(a.leader_pid, a.pid) AS query_leader_pid, 16 | a.backend_type = 'parallel worker' AS is_parallel_worker 17 | FROM 18 | pg_stat_activity a 19 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 20 | WHERE 21 | a.state <> 'idle' 22 | AND a.pid <> pg_catalog.pg_backend_pid() 23 | AND CASE WHEN {min_duration} = 0 24 | THEN true 25 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 26 | END 27 | AND CASE WHEN {dbname_filter} IS NULL THEN true 28 | ELSE a.datname ~* %(dbname_filter)s 29 | END 30 | ORDER BY 31 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 32 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_110000.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity since pg 11 2 | -- NEW pg_activity.backend_type value for 'parallel worker' 3 | SELECT 4 | a.pid AS pid, 5 | a.backend_xmin AS xmin, 6 | a.application_name AS application_name, 7 | a.datname AS database, 8 | a.client_addr AS client, 9 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 10 | a.wait_event AS wait, 11 | a.usename AS user, 12 | a.state AS state, 13 | a.query AS query, 14 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 15 | NULL AS query_leader_pid, 16 | a.backend_type = 'parallel worker' AS is_parallel_worker 17 | FROM 18 | pg_stat_activity a 19 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 20 | WHERE 21 | a.state <> 'idle' 22 | AND a.pid <> pg_catalog.pg_backend_pid() 23 | AND CASE WHEN {min_duration} = 0 24 | THEN true 25 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 26 | END 27 | AND CASE WHEN {dbname_filter} IS NULL THEN true 28 | ELSE a.datname ~* %(dbname_filter)s 29 | END 30 | ORDER BY 31 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 32 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_090200.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity from pg 9.2 to pg 9.5 2 | -- NEW pg_stat_activity.current_query => pg_stat_activity.query 3 | -- NEW pg_stat_activity.procpid => pg_stat_activity.pid 4 | SELECT 5 | a.pid AS pid, 6 | NULL AS xmin, 7 | a.application_name AS application_name, 8 | a.datname AS database, 9 | a.client_addr AS client, 10 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 11 | a.waiting AS wait, 12 | a.usename AS user, 13 | a.state AS state, 14 | a.query AS query, 15 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 16 | NULL AS query_leader_pid, 17 | false AS is_parallel_worker 18 | FROM 19 | pg_stat_activity a 20 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 21 | WHERE 22 | state <> 'idle' 23 | AND pid <> pg_backend_pid() 24 | AND CASE WHEN {min_duration} = 0 25 | THEN true 26 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 27 | END 28 | AND CASE WHEN {dbname_filter} IS NULL THEN true 29 | ELSE a.datname ~* %(dbname_filter)s 30 | END 31 | ORDER BY 32 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 33 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_090600.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity from pg 9.6 to 10 2 | -- In this version there is no way to distinguish parallel workers from the rest 3 | -- NEW pg_stat_activity.waiting => pg_stat_activity.wait_event 4 | SELECT 5 | a.pid AS pid, 6 | a.backend_xmin AS xmin, 7 | a.application_name AS application_name, 8 | a.datname AS database, 9 | a.client_addr AS client, 10 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 11 | a.wait_event AS wait, 12 | a.usename AS user, 13 | a.state AS state, 14 | a.query AS query, 15 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 16 | NULL AS query_leader_pid, 17 | false AS is_parallel_worker 18 | FROM 19 | pg_stat_activity a 20 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 21 | WHERE 22 | state <> 'idle' 23 | AND pid <> pg_backend_pid() 24 | AND CASE WHEN {min_duration} = 0 25 | THEN true 26 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 27 | END 28 | AND CASE WHEN {dbname_filter} IS NULL THEN true 29 | ELSE a.datname ~* %(dbname_filter)s 30 | END 31 | ORDER BY 32 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 33 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-added-large-files 6 | - id: debug-statements 7 | - id: end-of-file-fixer 8 | files: text 9 | exclude: tests/test_*.txt 10 | - id: trailing-whitespace 11 | files: text 12 | exclude: tests/test_*.txt 13 | - repo: local 14 | hooks: 15 | - id: pyupgrade 16 | name: pyupgrade 17 | entry: pyupgrade --py39-plus --exit-zero-even-if-changed 18 | language: system 19 | types: [python] 20 | - id: black 21 | name: black 22 | entry: black --check . 23 | language: system 24 | types: [python] 25 | - id: flake8 26 | name: flake8 27 | entry: flake8 . 28 | language: system 29 | types: [python] 30 | - id: isort 31 | name: isort 32 | entry: isort --check --diff . 33 | language: system 34 | types: [python] 35 | - id: mypy 36 | name: mypy 37 | entry: mypy 38 | language: system 39 | types: [python] 40 | exclude: tests 41 | - id: codespell 42 | name: codespell 43 | entry: codespell 44 | language: system 45 | types: [file] 46 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_post_100000.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity from pg 10 to pg 11 2 | -- We assume a background_worker with a not null query is a parallel worker. 3 | -- NEW pg_activity.backend_type 4 | SELECT 5 | a.pid AS pid, 6 | a.backend_xmin AS xmin, 7 | a.application_name AS application_name, 8 | a.datname AS database, 9 | a.client_addr AS client, 10 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 11 | a.wait_event as wait, 12 | a.usename AS user, 13 | a.state AS state, 14 | a.query AS query, 15 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 16 | NULL AS query_leader_pid, 17 | ( a.backend_type = 'background worker' 18 | AND a.query IS NOT NULL 19 | ) AS is_parallel_worker 20 | FROM 21 | pg_stat_activity a 22 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 23 | WHERE 24 | state <> 'idle' 25 | AND pid <> pg_backend_pid() 26 | AND CASE WHEN {min_duration} = 0 27 | THEN true 28 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 29 | END 30 | AND CASE WHEN {dbname_filter} IS NULL THEN true 31 | ELSE a.datname ~* %(dbname_filter)s 32 | END 33 | ORDER BY 34 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 35 | -------------------------------------------------------------------------------- /pgactivity/compat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import operator 4 | from importlib.metadata import version 5 | from typing import Any 6 | 7 | import attr 8 | import attr.validators 9 | import blessed 10 | 11 | ATTR_VERSION = tuple(int(x) for x in version("attrs").split(".", 2)[:2]) 12 | BLESSED_VERSION = tuple(int(x) for x in version("blessed").split(".", 2)[:2]) 13 | 14 | if ATTR_VERSION < (18, 1): 15 | 16 | def fields_dict(cls: Any) -> dict[str, Any]: 17 | return {a.name: a for a in cls.__attrs_attrs__} 18 | 19 | else: 20 | fields_dict = attr.fields_dict 21 | 22 | if ATTR_VERSION < (21, 3): 23 | 24 | @attr.s(auto_attribs=True, frozen=True, slots=True) 25 | class gt: 26 | bound: int 27 | 28 | def __call__(self, instance: Any, attribute: Any, value: Any) -> None: 29 | if not operator.gt(value, self.bound): 30 | raise ValueError(f"'{attribute.name}' must be > {self.bound}: {value}") 31 | 32 | else: 33 | gt = attr.validators.gt # type: ignore[assignment,misc] 34 | 35 | 36 | if BLESSED_VERSION < (1, 17): 37 | 38 | def link(term: blessed.Terminal, url: str, text: str, url_id: str = "") -> str: 39 | return url 40 | 41 | else: 42 | 43 | def link(term: blessed.Terminal, url: str, text: str, url_id: str = "") -> str: 44 | return term.link(url, text, url_id=url_id) 45 | -------------------------------------------------------------------------------- /pgactivity/queries/get_waiting_post_090200.sql: -------------------------------------------------------------------------------- 1 | -- Get waiting queries for versions >= 9.2 2 | -- NEW pg_stat_activity.state 3 | -- NEW pg_stat_activity.current_query => pg_stat_activity.query 4 | -- NEW pg_stat_activity.procpid => pg_stat_activity.pid 5 | SELECT 6 | pg_locks.pid AS pid, 7 | a.application_name AS application_name, 8 | a.datname AS database, 9 | a.usename AS user, 10 | a.client_addr AS client, 11 | pg_locks.mode AS mode, 12 | pg_locks.locktype AS type, 13 | pg_locks.relation::regclass AS relation, 14 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 15 | a.state as state, 16 | a.query AS query, 17 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding 18 | FROM 19 | pg_catalog.pg_locks 20 | JOIN pg_catalog.pg_stat_activity a ON(pg_catalog.pg_locks.pid = a.pid) 21 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 22 | WHERE 23 | NOT pg_catalog.pg_locks.granted 24 | AND a.pid <> pg_backend_pid() 25 | AND CASE WHEN {min_duration} = 0 26 | THEN true 27 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 28 | END 29 | AND CASE WHEN {dbname_filter} IS NULL THEN true 30 | ELSE a.datname ~* %(dbname_filter)s 31 | END 32 | ORDER BY 33 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 34 | -------------------------------------------------------------------------------- /pgactivity/queries/get_temporary_files_oldest.sql: -------------------------------------------------------------------------------- 1 | -- Get temporary file information prior to 9.0 2 | SELECT count(agg.tmpfile) AS temp_files, 3 | COALESCE(SUM(COALESCE((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size, 0)),0) AS temp_bytes 4 | FROM ( 5 | SELECT ls.oid, ls.spcname AS spcname, 6 | ls.dir||'/'||ls.sub AS dir, 7 | pg_ls_dir(ls.dir||'/'||ls.sub) AS tmpfile 8 | FROM (SELECT sr.oid, sr.spcname, 9 | 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, 10 | pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub 11 | FROM 12 | (SELECT spc.oid, spc.spcname, 13 | pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, 14 | trim(TRAILING e'\n ' FROM pg_read_file('PG_VERSION', 0, 100)) AS v 15 | FROM (SELECT oid, spcname 16 | FROM pg_tablespace 17 | WHERE spcname NOT IN ('pg_default', 'pg_global') 18 | ) AS spc 19 | ) AS sr 20 | WHERE sr.spc_root ~ ('^PG_'||sr.v) 21 | UNION ALL 22 | SELECT 0, 'pg_default', 'base' AS dir, 'pgsql_tmp' AS sub 23 | FROM pg_ls_dir('base') AS l 24 | WHERE l='pgsql_tmp' 25 | ) AS ls 26 | WHERE ls.sub = 'pgsql_tmp' 27 | ) AS agg 28 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # pg_activity Development Team 2 | 3 | pg_activity is an open project. Feel free to join us and improve this tool. 4 | 5 | 6 | ## Maintainers 7 | 8 | - Benoit Lobréau 9 | - Denis Laxalde 10 | 11 | 12 | ## Contributors 13 | 14 | * Julien Tachoires - creator and original maintainer. 15 | * [Damien Cazeils](https://www.damiencazeils.com/) - logo 16 | * Fabrízio de Royes Mello 17 | * crisnamurti 18 | * Étienne BERSAC 19 | * Michel Milezzi 20 | * Vincent Maugé 21 | * Kaarel Moppel 22 | * Alexandre Fayolle 23 | * Fabio Renato Geiss 24 | * Julien Rouhaud 25 | * MattK 26 | * Nick LaMuro 27 | * Nicolas Seinlet 28 | * Andrey Zhidenkov 29 | * Christophe Courtois 30 | * Damien Garaud 31 | * Emmanuel Bouthenot 32 | * Feike Steenbergen 33 | * Marion Delcambre 34 | * Matheus de Oliveira 35 | * Mickaël Le Baillif 36 | * Nicolas Dandrimont 37 | * Nils Hamerlinck 38 | -------------------------------------------------------------------------------- /pgactivity/queries/get_temporary_files_post_090100.sql: -------------------------------------------------------------------------------- 1 | -- Get temporary file information for pg >= 9.1 2 | -- NEW pg_read_file no longer need 3 parameters 3 | SELECT count(agg.tmpfile) AS temp_files, 4 | COALESCE(SUM(COALESCE((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size, 0)),0) AS temp_bytes 5 | FROM ( 6 | SELECT ls.oid, ls.spcname AS spcname, 7 | ls.dir||'/'||ls.sub AS dir, 8 | pg_ls_dir(ls.dir||'/'||ls.sub) AS tmpfile 9 | FROM (SELECT sr.oid, sr.spcname, 10 | 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, 11 | pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub 12 | FROM 13 | (SELECT spc.oid, spc.spcname, 14 | pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, 15 | trim(TRAILING e'\n ' FROM pg_read_file('PG_VERSION')) AS v 16 | FROM (SELECT oid, spcname 17 | FROM pg_tablespace 18 | WHERE spcname NOT IN ('pg_default', 'pg_global') 19 | ) AS spc 20 | ) AS sr 21 | WHERE sr.spc_root ~ ('^PG_'||sr.v) 22 | UNION ALL 23 | SELECT 0, 'pg_default', 'base' AS dir, 'pgsql_tmp' AS sub 24 | FROM pg_ls_dir('base') AS l 25 | WHERE l='pgsql_tmp' 26 | ) AS ls 27 | WHERE ls.sub = 'pgsql_tmp' 28 | ) AS agg 29 | -------------------------------------------------------------------------------- /pgactivity/queries/get_pg_activity_oldest.sql: -------------------------------------------------------------------------------- 1 | -- Get data from pg_activity before pg 9.2 2 | SELECT 3 | a.procpid AS pid, 4 | NULL AS xmin, 5 | '' AS application_name, 6 | a.datname AS database, 7 | a.client_addr AS client, 8 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 9 | a.waiting AS wait, 10 | a.usename AS user, 11 | CASE 12 | WHEN a.current_query = ' in transaction (aborted)' THEN 'idle in transaction (aborted)' 13 | WHEN a.current_query = ' in transaction' THEN 'idle in transaction' 14 | WHEN a.current_query = '' THEN 'idle' 15 | ELSE 'active' 16 | END AS state, 17 | CASE 18 | WHEN a.current_query LIKE '%%' THEN NULL 19 | ELSE a.current_query 20 | END AS query, 21 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 22 | NULL AS query_leader_pid, 23 | false AS is_parallel_worker 24 | FROM 25 | pg_stat_activity a 26 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 27 | WHERE 28 | current_query <> '' 29 | AND procpid <> pg_backend_pid() 30 | AND CASE WHEN {min_duration} = 0 31 | THEN true 32 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 33 | END 34 | AND CASE WHEN {dbname_filter} IS NULL THEN true 35 | ELSE a.datname ~* %(dbname_filter)s 36 | END 37 | ORDER BY 38 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC 39 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | from pgactivity import cli 2 | 3 | 4 | def test_parser() -> None: 5 | parser = cli.get_parser() 6 | ns = parser.parse_args( 7 | ["--no-db-size", "-w", "-p", "5433", "--no-pid", "--no-app-name"] 8 | ) 9 | assert vars(ns) == { 10 | "profile": None, 11 | "blocksize": 4096, 12 | "rds": False, 13 | "output": None, 14 | "dbsize": False, 15 | "tempfiles": None, 16 | "walreceiver": None, 17 | "wrap_query": True, 18 | "durationmode": "1", 19 | "minduration": 0, 20 | "filters": [], 21 | "debug_file": None, 22 | "help": False, 23 | "connection_string": "", 24 | "host": None, 25 | "port": "5433", 26 | "username": None, 27 | "dbname": None, 28 | "pid": False, 29 | "database": None, 30 | "user": None, 31 | "client": None, 32 | "cpu": None, 33 | "mem": None, 34 | "read": None, 35 | "write": None, 36 | "time": None, 37 | "wait": None, 38 | "xmin": None, 39 | "appname": False, 40 | "header_show_instance": None, 41 | "header_show_system": None, 42 | "header_show_workers": None, 43 | "hide_queries_in_logs": False, 44 | "refresh": 2, 45 | } 46 | 47 | 48 | def test_parser_flag_on() -> None: 49 | parser = cli.get_parser() 50 | ns = parser.parse_args(["--pid", "--no-app-name"]) 51 | assert ns.pid is True 52 | assert ns.appname is False 53 | assert ns.wait is None 54 | -------------------------------------------------------------------------------- /pgactivity/queries/get_temporary_files_post_090500.sql: -------------------------------------------------------------------------------- 1 | -- Get temporary file information for pg >= 9.1 2 | -- Note : pg_stat_file's prototype has changed in 9.5 to add pg_stat_file(text, bool) 3 | -- overload. 4 | SELECT count(agg.tmpfile) AS temp_files, 5 | COALESCE(SUM(COALESCE((pg_stat_file(agg.dir||'/'||agg.tmpfile, true)).size, 0)),0) AS temp_bytes 6 | FROM ( 7 | SELECT ls.oid, ls.spcname AS spcname, 8 | ls.dir||'/'||ls.sub AS dir, 9 | pg_ls_dir(ls.dir||'/'||ls.sub) AS tmpfile 10 | FROM (SELECT sr.oid, sr.spcname, 11 | 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, 12 | pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub 13 | FROM 14 | (SELECT spc.oid, spc.spcname, 15 | pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, 16 | trim(TRAILING e'\n ' FROM pg_read_file('PG_VERSION')) AS v 17 | FROM (SELECT oid, spcname 18 | FROM pg_tablespace 19 | WHERE spcname NOT IN ('pg_default', 'pg_global') 20 | ) AS spc 21 | ) AS sr 22 | WHERE sr.spc_root ~ ('^PG_'||sr.v) 23 | UNION ALL 24 | SELECT 0, 'pg_default', 'base' AS dir, 'pgsql_tmp' AS sub 25 | FROM pg_ls_dir('base') AS l 26 | WHERE l='pgsql_tmp' 27 | ) AS ls 28 | WHERE ls.sub = 'pgsql_tmp' 29 | ) AS agg 30 | -------------------------------------------------------------------------------- /pgactivity/queries/get_waiting_oldest.sql: -------------------------------------------------------------------------------- 1 | -- Get waiting queries for versions before 9.2 2 | SELECT 3 | pg_locks.pid AS pid, 4 | '' AS application_name, 5 | a.datname AS database, 6 | a.usename AS user, 7 | a.client_addr AS client, 8 | pg_locks.mode AS mode, 9 | pg_locks.locktype AS type, 10 | pg_locks.relation::regclass AS relation, 11 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) AS duration, 12 | CASE 13 | WHEN a.current_query = ' in transaction (aborted)' THEN 'idle in transaction (aborted)' 14 | WHEN a.current_query = ' in transaction' THEN 'idle in transaction' 15 | WHEN a.current_query = '' THEN 'idle' 16 | ELSE 'active' 17 | END AS state, 18 | CASE WHEN a.current_query LIKE '%%' 19 | THEN NULL 20 | ELSE a.current_query 21 | END AS query, 22 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding 23 | FROM 24 | pg_catalog.pg_locks 25 | JOIN pg_catalog.pg_stat_activity a ON (pg_catalog.pg_locks.pid = a.procpid) 26 | LEFT OUTER JOIN pg_database b ON a.datid = b.oid 27 | WHERE 28 | NOT pg_catalog.pg_locks.granted 29 | AND a.procpid <> pg_backend_pid() 30 | AND CASE WHEN {min_duration} = 0 31 | THEN true 32 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 33 | END 34 | AND CASE WHEN {dbname_filter} IS NULL THEN true 35 | ELSE a.datname ~* %(dbname_filter)s 36 | END 37 | ORDER BY 38 | EXTRACT(epoch FROM (NOW() - a.{duration_column})) DESC; 39 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release HOW TO 2 | 3 | ## Preparatory changes 4 | 5 | * Review the **Unreleased** section, if any, in `CHANGELOG.md` possibly adding 6 | any missing item from closed issues, merged pull requests, or directly the git 7 | history[^git-changes], 8 | * Rename the **Unreleased** section according to the version to be released, 9 | with a date, 10 | * Bump the version in `pgactivity/__init__.py`, 11 | * Rebuild the man page, and, 12 | * Commit these changes (either on a dedicated branch, before submitting a pull 13 | request or directly on the `master` branch). 14 | * Then, when changes landed in the `master` branch, create an annotated (and 15 | possibly signed) tag, as `git tag -a [-s] -m 'pg_activity 1.6.0' v1.6.0`, and, 16 | * Push with `--follow-tags`. 17 | 18 | [^git-changes]: Use `git log $(git describe --tags --abbrev=0).. --format=%s 19 | --reverse` to get commits from the previous tag. 20 | 21 | ## PyPI package 22 | 23 | This requires no special action as, upon push of a tag on GitHub, the "publish" 24 | workflow will build the Python package and upload to PyPI. 25 | 26 | ## GitHub release 27 | 28 | *Draft a new release* from [release page][], choosing the tag just pushed and 29 | copy respective change log section as a description. 30 | 31 | [release page]: https://github.com/dalibo/pg_activity/releases 32 | 33 | ## Create a news article on postgresql.org and submit it 34 | 35 | Example for release 1.6.0 : 36 | ``` 37 | pg_activity (https://github.com/dalibo/pg_activity) 1.6.0 has been released. 38 | 39 | This release adds the following features : 40 | 41 | * the --min-duration flag to only show laggy queries (kmoppel) 42 | * the --duration-mode and the shortcut (T) to choose from the duration modes: 43 | query, transaction, backend (nilshamerlinck) 44 | * the D shortcut to refresh dbsize (Fabio Renato Geiss) 45 | * an expanded refresh interval from 1-3s to 0.5-5s (kmoppel) 46 | 47 | The full release notes can be read here : 48 | https://github.com/dalibo/pg_activity/releases/tag/v1.6.0 49 | -------------------------------------------------------------------------------- /tests/test_views.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from blessed import Terminal 3 | 4 | from pgactivity import views 5 | from pgactivity.config import Flag 6 | from pgactivity.types import UI, QueryMode, SortKey 7 | 8 | 9 | @pytest.fixture 10 | def term() -> Terminal: 11 | return Terminal() 12 | 13 | 14 | @pytest.mark.parametrize( 15 | "ui, width, expected", 16 | [ 17 | ( 18 | UI.make( 19 | query_mode=QueryMode.activities, 20 | flag=Flag.PID | Flag.DATABASE, 21 | sort_key=SortKey.cpu, 22 | ), 23 | 83, 24 | "PID DATABASE state Query", 25 | ), 26 | ( 27 | UI.make( 28 | query_mode=QueryMode.activities, flag=Flag.CPU, sort_key=SortKey.cpu 29 | ), 30 | None, 31 | "CPU% state Query", 32 | ), 33 | ( 34 | UI.make( 35 | query_mode=QueryMode.activities, flag=Flag.MEM, sort_key=SortKey.cpu 36 | ), 37 | None, 38 | "MEM% state Query", 39 | ), 40 | ( 41 | UI.make( 42 | query_mode=QueryMode.blocking, 43 | flag=Flag.PID 44 | | Flag.DATABASE 45 | | Flag.APPNAME 46 | | Flag.RELATION 47 | | Flag.CLIENT 48 | | Flag.WAIT, 49 | sort_key=SortKey.duration, 50 | ), 51 | 150, 52 | "PID DATABASE APP CLIENT RELATION Waiting state Query", 53 | ), 54 | ], 55 | ids=[ 56 | "pid+database; sort by cpu; activities", 57 | "cpu; sort by cpu; activities", 58 | "mem; sort by cpu; activities", 59 | "many flags; sort by duration; blocking", 60 | ], 61 | ) 62 | def test_columns_header(capsys, term, ui, width, expected): 63 | views.columns_header(term, ui, width=width) 64 | out = capsys.readouterr()[0] 65 | assert out == expected + "\n" 66 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_oldest.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information prior to 9.0 2 | WITH dbinfo AS( 3 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 4 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 5 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 6 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 7 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 8 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 9 | COALESCE(CASE 10 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 11 | ELSE SUM(pg_database_size(d.datname)) 12 | END, 0) AS total_size, 13 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 14 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 15 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 16 | current_timestamp - pg_postmaster_start_time() AS uptime, 17 | EXTRACT(EPOCH FROM NOW()) AS epoch 18 | FROM pg_database d 19 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 20 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 21 | AND CASE WHEN {dbname_filter} IS NULL THEN true 22 | ELSE d.datname ~* %(dbname_filter)s 23 | END 24 | ), activity AS ( 25 | SELECT sum(CASE WHEN current_query NOT LIKE '%%' THEN 1 ELSE 0 END) AS active_connections, 26 | sum(CASE WHEN current_query LIKE '' THEN 1 ELSE 0 END) AS idle, 27 | sum(CASE WHEN current_query LIKE ' in transaction' THEN 1 ELSE 0 END) AS idle_in_transaction, 28 | sum(CASE WHEN current_query LIKE ' in transaction (aborted)' THEN 1 ELSE 0 END) AS idle_in_transaction_aborted, 29 | count(*) AS total, 30 | sum(CASE WHEN waiting THEN 1 ELSE 0 END) AS waiting, 31 | current_setting('max_connections')::int AS max_connections, 32 | NULL AS autovacuum_workers, 33 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 34 | NULL AS logical_replication_workers, 35 | NULL AS parallel_workers, 36 | NULL AS max_logical_replication_workers, 37 | NULL AS max_parallel_workers, 38 | NULL AS max_worker_processes, 39 | NULL AS max_wal_senders, 40 | NULL AS max_replication_slots 41 | FROM pg_stat_activity 42 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 43 | ELSE datname ~* %(dbname_filter)s 44 | END 45 | ) 46 | SELECT * FROM dbinfo, activity 47 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_090200.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 9.3 2 | -- NEW pg_stat_activity.state 3 | WITH dbinfo AS( 4 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 5 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 6 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 7 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 8 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 9 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 10 | COALESCE(CASE 11 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 12 | ELSE SUM(pg_database_size(d.datname)) 13 | END, 0) AS total_size, 14 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 15 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 16 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 17 | current_timestamp - pg_postmaster_start_time() AS uptime, 18 | EXTRACT(EPOCH FROM NOW()) AS epoch 19 | FROM pg_database d 20 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 21 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 22 | AND CASE WHEN {dbname_filter} IS NULL THEN true 23 | ELSE d.datname ~* %(dbname_filter)s 24 | END 25 | ), activity AS ( 26 | SELECT sum(CASE WHEN state = 'active' THEN 1 ELSE 0 END) AS active_connections, 27 | sum(CASE WHEN state = 'idle' THEN 1 ELSE 0 END) AS idle, 28 | sum(CASE WHEN state = 'idle in transaction' THEN 1 ELSE 0 END) AS idle_in_transaction, 29 | sum(CASE WHEN state = 'idle in transaction aborted' THEN 1 ELSE 0 END) AS idle_in_transaction_aborted, 30 | count(*) AS total, 31 | sum(CASE WHEN waiting THEN 1 ELSE 0 END) AS waiting, 32 | current_setting('max_connections')::int AS max_connections, 33 | NULL AS autovacuum_workers, 34 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 35 | NULL AS logical_replication_workers, 36 | NULL AS parallel_workers, 37 | NULL AS max_logical_replication_workers, 38 | NULL AS max_parallel_workers, 39 | NULL AS max_worker_processes, 40 | current_setting('max_wal_senders')::int AS max_wal_senders, 41 | NULL AS max_replication_slots 42 | FROM pg_stat_activity 43 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 44 | ELSE datname ~* %(dbname_filter)s 45 | END 46 | ) 47 | SELECT * FROM dbinfo, activity 48 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 77.0.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "pg-activity" 7 | dynamic = ["version"] 8 | description = "Command line tool for PostgreSQL server activity monitoring." 9 | readme = "README.md" 10 | license = "PostgreSQL" 11 | requires-python = ">=3.9" 12 | authors = [ 13 | { name = "Julien Tachoires", email = "julmon@gmail.com" }, 14 | { name = "Benoit Lobréau", email = "benoit.lobreau@dalibo.com" }, 15 | { name = "Denis Laxalde", email = "denis.laxalde@dalibo.com" }, 16 | { name = "Dalibo", email = "contact@dalibo.com" }, 17 | ] 18 | maintainers = [ 19 | { name = "Denis Laxalde", email = "denis.laxalde@dalibo.com" }, 20 | { name = "Benoit Lobréau", email = "benoit.lobreau@dalibo.com" }, 21 | ] 22 | keywords = [ 23 | "activity", 24 | "cli", 25 | "monitoring", 26 | "postgresql", 27 | "sql", 28 | "top", 29 | ] 30 | classifiers = [ 31 | "Development Status :: 5 - Production/Stable", 32 | "Environment :: Console", 33 | "Environment :: Console :: Curses", 34 | "Programming Language :: Python :: 3", 35 | "Topic :: Database", 36 | ] 37 | dependencies = [ 38 | "attrs >= 17.4, !=21.1", 39 | "blessed >= 1.15.0", 40 | "humanize >= 0.5.1, !=4.12.0", 41 | "psutil >= 2.0.0", 42 | ] 43 | 44 | [project.optional-dependencies] 45 | dev = [ 46 | "pg-activity[psycopg,psycopg2,testing,typing]", 47 | "black >= 24.10.0", 48 | "check-manifest", 49 | "codespell", 50 | "flake8", 51 | "isort", 52 | "pre-commit", 53 | "pyupgrade", 54 | ] 55 | typing = [ 56 | "mypy", 57 | "types-psutil >= 5.9.5.15", 58 | "types-psycopg2", 59 | "types-setuptools", 60 | ] 61 | psycopg = [ 62 | "psycopg[binary] >= 3.1", 63 | ] 64 | psycopg2 = [ 65 | "psycopg2-binary >= 2.8", 66 | ] 67 | testing = [ 68 | "psycopg[binary]", 69 | "pytest", 70 | "pytest-postgresql >= 4.0", 71 | ] 72 | 73 | [project.scripts] 74 | pg_activity = "pgactivity.cli:main" 75 | 76 | [project.urls] 77 | "Bug Tracker" = "https://github.com/dalibo/pg_activity/issues/" 78 | Changelog = "https://github.com/dalibo/pg_activity/blob/master/CHANGELOG.md" 79 | Homepage = "https://github.com/dalibo/pg_activity" 80 | "Source code" = "https://github.com/dalibo/pg_activity/" 81 | 82 | [tool.setuptools.dynamic] 83 | version = { attr = "pgactivity.__version__" } 84 | 85 | [tool.black] 86 | line-length = 88 87 | include = '\.pyi?$' 88 | exclude = ''' 89 | ( 90 | /( 91 | \.eggs 92 | | \.git 93 | | \.mypy_cache 94 | | \.tox 95 | | \.venv 96 | | dist 97 | )/ 98 | ) 99 | ''' 100 | 101 | [tool.isort] 102 | profile = "black" 103 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_090100.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 9.1 2 | -- NEW guc max_wal_senders 3 | WITH dbinfo AS( 4 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 5 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 6 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 7 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 8 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 9 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 10 | COALESCE(CASE 11 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 12 | ELSE SUM(pg_database_size(d.datname)) 13 | END, 0) AS total_size, 14 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 15 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 16 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 17 | current_timestamp - pg_postmaster_start_time() AS uptime, 18 | EXTRACT(EPOCH FROM NOW()) AS epoch 19 | FROM pg_database d 20 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 21 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 22 | AND CASE WHEN {dbname_filter} IS NULL THEN true 23 | ELSE d.datname ~* %(dbname_filter)s 24 | END 25 | ), activity AS ( 26 | SELECT sum(CASE WHEN current_query NOT LIKE '%%' THEN 1 ELSE 0 END) AS active_connections, 27 | sum(CASE WHEN current_query LIKE '' THEN 1 ELSE 0 END) AS idle, 28 | sum(CASE WHEN current_query LIKE ' in transaction' THEN 1 ELSE 0 END) AS idle_in_transaction, 29 | sum(CASE WHEN current_query LIKE ' in transaction (aborted)' THEN 1 ELSE 0 END) AS idle_in_transaction_aborted, 30 | count(*) AS total, 31 | sum(CASE WHEN waiting THEN 1 ELSE 0 END) AS waiting, 32 | current_setting('max_connections')::int AS max_connections, 33 | NULL AS autovacuum_workers, 34 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 35 | NULL AS logical_replication_workers, 36 | NULL AS parallel_workers, 37 | NULL AS max_logical_replication_workers, 38 | NULL AS max_parallel_workers, 39 | NULL AS max_worker_processes, 40 | current_setting('max_wal_senders')::int AS max_wal_senders, 41 | NULL AS max_replication_slots 42 | FROM pg_stat_activity 43 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 44 | ELSE datname ~* %(dbname_filter)s 45 | END 46 | ) 47 | SELECT * FROM dbinfo, activity 48 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_090400.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 9.4 2 | -- NEW guc max_replication_slots 3 | -- NEW guc max_worker_processes 4 | -- count(*) FILTER( WHERE ...) 5 | 6 | WITH dbinfo AS( 7 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 8 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 9 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 10 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 11 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 12 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 13 | COALESCE(CASE 14 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 15 | ELSE SUM(pg_database_size(d.datname)) 16 | END, 0) AS total_size, 17 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 18 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 19 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 20 | current_timestamp - pg_postmaster_start_time() AS uptime, 21 | EXTRACT(EPOCH FROM NOW()) AS epoch 22 | FROM pg_database d 23 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 24 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 25 | AND CASE WHEN {dbname_filter} IS NULL THEN true 26 | ELSE d.datname ~* %(dbname_filter)s 27 | END 28 | ), activity AS ( 29 | SELECT count(*) FILTER( WHERE state = 'active') AS active_connections, 30 | count(*) FILTER( WHERE state = 'idle') AS idle, 31 | count(*) FILTER( WHERE state = 'idle in transaction') AS idle_in_transaction, 32 | count(*) FILTER( WHERE state = 'idle in transaction aborted') AS idle_in_transaction_aborted, 33 | count(*) AS total, 34 | count(*) FILTER( WHERE waiting) as waiting, 35 | current_setting('max_connections')::int AS max_connections, 36 | NULL AS autovacuum_workers, 37 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 38 | NULL AS logical_replication_workers, 39 | NULL AS parallel_workers, 40 | NULL AS max_logical_replication_workers, 41 | NULL AS max_parallel_workers, 42 | current_setting('max_worker_processes')::int AS max_worker_processes, 43 | current_setting('max_wal_senders')::int AS max_wal_senders, 44 | current_setting('max_replication_slots')::int AS max_replication_slots 45 | FROM pg_stat_activity 46 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 47 | ELSE datname ~* %(dbname_filter)s 48 | END 49 | ) 50 | SELECT * FROM dbinfo, activity 51 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_090600.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 9.6 2 | -- NEW pg_stat_activity.wait_event_type 3 | -- NEW parallelism 4 | WITH dbinfo AS( 5 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 6 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 7 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 8 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 9 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 10 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 11 | COALESCE(CASE 12 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 13 | ELSE SUM(pg_database_size(d.datname)) 14 | END, 0) AS total_size, 15 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 16 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 17 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 18 | current_timestamp - pg_postmaster_start_time() AS uptime, 19 | EXTRACT(EPOCH FROM NOW()) AS epoch 20 | FROM pg_database d 21 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 22 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 23 | AND CASE WHEN {dbname_filter} IS NULL THEN true 24 | ELSE d.datname ~* %(dbname_filter)s 25 | END 26 | ), activity AS ( 27 | SELECT count(*) FILTER( WHERE state = 'active') AS active_connections, 28 | count(*) FILTER( WHERE state = 'idle') AS idle, 29 | count(*) FILTER( WHERE state = 'idle in transaction') AS idle_in_transaction, 30 | count(*) FILTER( WHERE state = 'idle in transaction aborted') AS idle_in_transaction_aborted, 31 | count(*) AS total, -- works with parallelism ? 32 | count(*) FILTER( WHERE wait_event_type = 'Lock') as waiting, 33 | current_setting('max_connections')::int AS max_connections, 34 | NULL AS autovacuum_workers, 35 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 36 | NULL AS logical_replication_workers, 37 | NULL AS parallel_workers, 38 | NULL AS max_logical_replication_workers, 39 | NULL AS max_parallel_workers, 40 | current_setting('max_worker_processes')::int AS max_worker_processes, 41 | current_setting('max_wal_senders')::int AS max_wal_senders, 42 | current_setting('max_replication_slots')::int AS max_replication_slots 43 | FROM pg_stat_activity 44 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 45 | ELSE datname ~* %(dbname_filter)s 46 | END 47 | ) 48 | SELECT * FROM dbinfo, activity 49 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_100000.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 10 2 | -- NEW pg_activity.backend_type 3 | -- NEW guc max_logical_replication_workers 4 | -- NEW guc max_parallel_workers 5 | WITH dbinfo AS( 6 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 7 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 8 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 9 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 10 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 11 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 12 | COALESCE(CASE 13 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 14 | ELSE SUM(pg_database_size(d.datname)) 15 | END, 0) AS total_size, 16 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 17 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 18 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 19 | current_timestamp - pg_postmaster_start_time() AS uptime, 20 | EXTRACT(EPOCH FROM NOW()) AS epoch 21 | FROM pg_database d 22 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 23 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 24 | AND CASE WHEN {dbname_filter} IS NULL THEN true 25 | ELSE d.datname ~* %(dbname_filter)s 26 | END 27 | ), activity AS ( 28 | SELECT count(*) FILTER( WHERE backend_type = 'client backend' and state = 'active') AS active_connections, 29 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle') AS idle, 30 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle in transaction') AS idle_in_transaction, 31 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle in transaction aborted') AS idle_in_transaction_aborted, 32 | count(*) FILTER( WHERE backend_type = 'client backend') AS total, 33 | count(*) FILTER( WHERE wait_event_type = 'Lock') as waiting, 34 | current_setting('max_connections')::int AS max_connections, 35 | count(*) FILTER( WHERE backend_type = 'autovacuum worker') AS autovacuum_workers, 36 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 37 | NULL AS logical_replication_workers, 38 | NULL AS parallel_workers, 39 | current_setting('max_logical_replication_workers')::int AS max_logical_replication_workers, 40 | current_setting('max_parallel_workers')::int AS max_parallel_workers, 41 | current_setting('max_worker_processes')::int AS max_worker_processes, 42 | current_setting('max_wal_senders')::int AS max_wal_senders, 43 | current_setting('max_replication_slots')::int AS max_replication_slots 44 | FROM pg_stat_activity 45 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 46 | ELSE datname ~* %(dbname_filter)s 47 | END 48 | ) 49 | SELECT * FROM dbinfo, activity 50 | -------------------------------------------------------------------------------- /pgactivity/queries/get_server_info_post_110000.sql: -------------------------------------------------------------------------------- 1 | -- Get the server information for pg >= 11 2 | -- NEW: pg_activity.backend_type for background workers (logical replication worker, parallel worker) 3 | WITH dbinfo AS( 4 | SELECT COALESCE(SUM(sd.xact_commit + sd.xact_rollback)::BIGINT, 0) AS xact_count, 5 | COALESCE(SUM(sd.xact_commit), 0) AS xact_commit, 6 | COALESCE(SUM(sd.xact_rollback), 0) AS xact_rollback, 7 | COALESCE(SUM(tup_inserted)::BIGINT, 0) AS insert, 8 | COALESCE(SUM(tup_updated)::BIGINT, 0) AS update, 9 | COALESCE(SUM(tup_deleted)::BIGINT, 0) AS delete, 10 | COALESCE(CASE 11 | WHEN %(skip_db_size)s THEN %(prev_total_size)s 12 | ELSE SUM(pg_database_size(d.datname)) 13 | END, 0) AS total_size, 14 | COALESCE(SUM(sd.blks_read), 0) AS blks_read, 15 | COALESCE(SUM(sd.blks_hit), 0) AS blks_hit, 16 | COALESCE(MAX(LENGTH(d.datname)), 0) AS max_dbname_length, 17 | current_timestamp - pg_postmaster_start_time() AS uptime, 18 | EXTRACT(EPOCH FROM NOW()) AS epoch 19 | FROM pg_database d 20 | INNER JOIN pg_stat_database sd ON d.oid = sd.datid 21 | WHERE NOT (d.datname = 'rdsadmin' AND %(using_rds)s) 22 | AND CASE WHEN {dbname_filter} IS NULL THEN true 23 | ELSE d.datname ~* %(dbname_filter)s 24 | END 25 | ), activity AS ( 26 | SELECT count(*) FILTER( WHERE backend_type = 'client backend' and state = 'active') AS active_connections, 27 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle') AS idle, 28 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle in transaction') AS idle_in_transaction, 29 | count(*) FILTER( WHERE backend_type = 'client backend' and state = 'idle in transaction aborted') AS idle_in_transaction_aborted, 30 | count(*) FILTER( WHERE backend_type = 'client backend') AS total, 31 | count(*) FILTER( WHERE wait_event_type = 'Lock') as waiting, 32 | current_setting('max_connections')::int AS max_connections, 33 | count(*) FILTER( WHERE backend_type = 'autovacuum worker') AS autovacuum_workers, 34 | current_setting('autovacuum_max_workers')::int AS autovacuum_max_workers, 35 | count(*) FILTER( WHERE backend_type = 'logical replication worker') AS logical_replication_workers, 36 | count(*) FILTER( WHERE backend_type = 'parallel worker') AS parallel_workers, 37 | current_setting('max_logical_replication_workers')::int AS max_logical_replication_workers, 38 | current_setting('max_parallel_workers')::int AS max_parallel_workers, 39 | current_setting('max_worker_processes')::int AS max_worker_processes, 40 | current_setting('max_wal_senders')::int AS max_wal_senders, 41 | current_setting('max_replication_slots')::int AS max_replication_slots 42 | FROM pg_stat_activity 43 | WHERE CASE WHEN {dbname_filter} IS NULL THEN true 44 | ELSE datname ~* %(dbname_filter)s 45 | END 46 | ) 47 | SELECT * FROM dbinfo, activity 48 | -------------------------------------------------------------------------------- /tests/test_scroll.txt: -------------------------------------------------------------------------------- 1 | Tests scrolling in processes_rows() 2 | ----------------------------------- 3 | 4 | >>> from blessed import Terminal 5 | >>> from pgactivity.types import * 6 | >>> from pgactivity.views import * 7 | 8 | >>> term = Terminal(force_styling=None) 9 | >>> term.width 10 | 80 11 | 12 | >>> processes_scroll = [ 13 | ... RunningProcess( 14 | ... pid=x, 15 | ... application_name="pgbench", 16 | ... database="pgbench", 17 | ... user="postgres", 18 | ... client=None, 19 | ... state="active", 20 | ... query=f"SELECT {x}", 21 | ... encoding=None, 22 | ... duration=0.0, 23 | ... xmin=1234, 24 | ... wait=False, 25 | ... query_leader_pid=x, 26 | ... is_parallel_worker=False, 27 | ... ) for x in range(0, 10)] 28 | >>> ui = UI.make(flag=Flag.PID|Flag.DATABASE) 29 | >>> sproc = SelectableProcesses(processes_scroll) 30 | >>> _ = sproc.focus_next() 31 | >>> sproc.position() 32 | 0 33 | >>> lc = line_counter(4) 34 | >>> processes_rows(term, ui, sproc, lc.value, lines_counter=lc) 35 | 0 pgbench active SELECT 0 36 | 1 pgbench active SELECT 1 37 | 2 pgbench active SELECT 2 38 | 3 pgbench active SELECT 3 39 | >>> for x in range(9): 40 | ... _ = sproc.focus_next() 41 | >>> sproc.position() 42 | 9 43 | >>> lc = line_counter(5) 44 | >>> processes_rows(term, ui, sproc, lc.value, lines_counter=lc) 45 | 6 pgbench active SELECT 6 46 | 7 pgbench active SELECT 7 47 | 8 pgbench active SELECT 8 48 | 9 pgbench active SELECT 9 49 | 0 pgbench active SELECT 0 50 | >>> _ = sproc.focus_next() 51 | >>> sproc.position() 52 | 0 53 | >>> lc = line_counter(5) 54 | >>> processes_rows(term, ui, sproc, lc.value, lines_counter=lc) 55 | 0 pgbench active SELECT 0 56 | 1 pgbench active SELECT 1 57 | 2 pgbench active SELECT 2 58 | 3 pgbench active SELECT 3 59 | 4 pgbench active SELECT 4 60 | >>> for x in range(9): 61 | ... _ = sproc.focus_next() 62 | >>> sproc.position() 63 | 9 64 | 65 | We never put the focus on the lower 1/5th of the screen. 66 | For 5 display lines, the focus will be on the 4th . 67 | 68 | >>> lc = line_counter(5) 69 | >>> processes_rows(term, ui, sproc, lc.value, lines_counter=lc) 70 | 6 pgbench active SELECT 6 71 | 7 pgbench active SELECT 7 72 | 8 pgbench active SELECT 8 73 | 9 pgbench active SELECT 9 74 | 0 pgbench active SELECT 0 75 | 76 | For 10 display lines, it should be at 8th line. 77 | 78 | >>> lc = line_counter(10) 79 | >>> processes_rows(term, ui, sproc, lc.value, lines_counter=lc) 80 | 2 pgbench active SELECT 2 81 | 3 pgbench active SELECT 3 82 | 4 pgbench active SELECT 4 83 | 5 pgbench active SELECT 5 84 | 6 pgbench active SELECT 6 85 | 7 pgbench active SELECT 7 86 | 8 pgbench active SELECT 8 87 | 9 pgbench active SELECT 9 88 | 0 pgbench active SELECT 0 89 | 1 pgbench active SELECT 1 90 | 91 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import pathlib 5 | import threading 6 | from typing import Any 7 | 8 | import psycopg 9 | import psycopg.errors 10 | import pytest 11 | from psycopg import sql 12 | from psycopg.conninfo import make_conninfo 13 | 14 | from pgactivity import pg 15 | 16 | LOGGER = logging.getLogger(__name__) 17 | LOGGER.setLevel(logging.DEBUG) 18 | 19 | 20 | def pytest_report_header(config: Any) -> list[str]: 21 | return [f"psycopg: {pg.__version__}"] 22 | 23 | 24 | @pytest.fixture(scope="session") 25 | def datadir() -> pathlib.Path: 26 | return pathlib.Path(__file__).parent / "data" 27 | 28 | 29 | @pytest.fixture 30 | def database_factory(postgresql): 31 | dbnames = set() 32 | 33 | def createdb(dbname: str, encoding: str, locale: str | None = None) -> None: 34 | with psycopg.connect(postgresql.info.dsn, autocommit=True) as conn: 35 | qs = sql.SQL( 36 | "CREATE DATABASE {dbname} ENCODING {encoding} TEMPLATE template0" 37 | ).format(dbname=sql.Identifier(dbname), encoding=sql.Identifier(encoding)) 38 | if locale: 39 | qs = sql.SQL(" ").join( 40 | [ 41 | qs, 42 | sql.SQL("LOCALE {locale}").format( 43 | locale=sql.Identifier(locale) 44 | ), 45 | ] 46 | ) 47 | conn.execute(qs) 48 | dbnames.add(dbname) 49 | 50 | yield createdb 51 | 52 | with psycopg.connect(postgresql.info.dsn, autocommit=True) as conn: 53 | for dbname in dbnames: 54 | conn.execute( 55 | sql.SQL("DROP DATABASE IF EXISTS {dbname} WITH (FORCE)").format( 56 | dbname=sql.Identifier(dbname) 57 | ) 58 | ) 59 | 60 | 61 | @pytest.fixture 62 | def execute(postgresql): 63 | """Create a thread and return an execute() function that will run SQL queries in that 64 | thread. 65 | """ 66 | threads_and_cnx = [] 67 | 68 | def execute( 69 | query: str, 70 | commit: bool = False, 71 | autocommit: bool = False, 72 | dbname: str | None = None, 73 | ) -> None: 74 | dsn, kwargs = postgresql.info.dsn, {} 75 | if dbname: 76 | kwargs["dbname"] = dbname 77 | conn = psycopg.connect(make_conninfo(dsn, **kwargs)) 78 | conn.autocommit = autocommit 79 | 80 | def _execute() -> None: 81 | LOGGER.info( 82 | "running query %s (commit=%s, autocommit=%s) using connection <%s>", 83 | query, 84 | commit, 85 | autocommit, 86 | id(conn), 87 | ) 88 | with conn.cursor() as c: 89 | try: 90 | c.execute(query) 91 | except ( 92 | psycopg.errors.AdminShutdown, 93 | psycopg.errors.QueryCanceled, 94 | ): 95 | return 96 | if not autocommit and commit: 97 | conn.commit() 98 | LOGGER.info("query %s finished", query) 99 | 100 | thread = threading.Thread(target=_execute, daemon=True) 101 | thread.start() 102 | threads_and_cnx.append((thread, conn)) 103 | 104 | yield execute 105 | 106 | for thread, conn in threads_and_cnx: 107 | thread.join(timeout=2) 108 | LOGGER.info("closing connection <%s>", id(conn)) 109 | conn.close() 110 | -------------------------------------------------------------------------------- /tests/test_cli_help.txt: -------------------------------------------------------------------------------- 1 | >>> import sys 2 | >>> import pytest 3 | >>> if sys.version_info < (3, 13): 4 | ... pytest.skip("only applies for Python version >= 3.13") 5 | ... 6 | 7 | >>> from pgactivity import cli 8 | >>> parser = cli.get_parser(prog="pg_activity") 9 | >>> parser.print_help() 10 | usage: pg_activity [options] [connection string] 11 | 12 | htop like application for PostgreSQL server activity monitoring. 13 | 14 | Configuration: 15 | -P, --profile PROFILE 16 | Configuration profile matching a PROFILE.conf file in 17 | ${XDG_CONFIG_HOME:~/.config}/pg_activity/ or 18 | /etc/pg_activity/, or a built-in profile. 19 | 20 | Options: 21 | --blocksize BLOCKSIZE 22 | Filesystem blocksize (default: 4096). 23 | --rds Enable support for AWS RDS (implies --no-tempfiles and 24 | filters out the rdsadmin database from space 25 | calculation). 26 | --output FILEPATH Store running queries as CSV. 27 | --db-size, --no-db-size 28 | Enable/disable total size of DB. 29 | --tempfiles, --no-tempfiles 30 | Enable/disable tempfile count and size. 31 | --walreceiver, --no-walreceiver 32 | Enable/disable walreceiver checks. 33 | -w, --wrap-query Wrap query column instead of truncating. 34 | --duration-mode DURATION_MODE 35 | Duration mode. Values: 1-QUERY(default), 36 | 2-TRANSACTION, 3-BACKEND. 37 | --min-duration SECONDS 38 | Don't display queries with smaller than specified 39 | duration (in seconds). 40 | --filter FIELD:REGEX Filter activities with a (case insensitive) regular 41 | expression applied on selected fields. Known fields 42 | are: dbname. 43 | --debug-file DEBUG_FILE 44 | Enable debug and write it to DEBUG_FILE. 45 | --version show program's version number and exit. 46 | --help Show this help message and exit. 47 | 48 | Connection Options: 49 | connection string A valid connection string to the database, e.g.: 50 | 'host=HOSTNAME port=PORT user=USER dbname=DBNAME'. 51 | -h, --host HOSTNAME Database server host or socket directory. 52 | -p, --port PORT Database server port. 53 | -U, --username USERNAME 54 | Database user name. 55 | -d, --dbname DBNAME Database name to connect to. 56 | 57 | Process table display options: 58 | These options may be used hide some columns from the processes table. 59 | 60 | --pid, --no-pid Enable/disable PID. 61 | --xmin, --no-xmin Enable/disable XMIN. 62 | --database, --no-database 63 | Enable/disable DATABASE. 64 | --user, --no-user Enable/disable USER. 65 | --client, --no-client 66 | Enable/disable CLIENT. 67 | --cpu, --no-cpu Enable/disable CPU%. 68 | --mem, --no-mem Enable/disable MEM%. 69 | --read, --no-read Enable/disable READ/s. 70 | --write, --no-write Enable/disable WRITE/s. 71 | --time, --no-time Enable/disable TIME+. 72 | --wait, --no-wait Enable/disable W. 73 | --app-name, --no-app-name 74 | Enable/disable APP. 75 | 76 | Header display options: 77 | --no-inst-info Hide instance information. 78 | --no-sys-info Hide system information. 79 | --no-proc-info Hide workers process information. 80 | 81 | Other display options: 82 | --hide-queries-in-logs 83 | Disable log_min_duration_statements and 84 | log_min_duration_sample for pg_activity. 85 | --refresh REFRESH Refresh rate. Values: 0.5, 1, 2, 3, 4, 5 (default: 2). 86 | 87 | The connection string can be in the form of a list of Key/Value parameters or 88 | an URI as described in the PostgreSQL documentation. The parsing is delegated 89 | to the libpq: different versions of the client library may support different 90 | formats or parameters (for example, connection URIs are only supported from 91 | libpq 9.2). 92 | -------------------------------------------------------------------------------- /tests/test_cli_help_py312.txt: -------------------------------------------------------------------------------- 1 | >>> import sys 2 | >>> import pytest 3 | >>> if sys.version_info >= (3, 13): 4 | ... pytest.skip("only applies for Python version < 3.13") 5 | ... 6 | 7 | >>> from pgactivity import cli 8 | >>> parser = cli.get_parser(prog="pg_activity") 9 | >>> parser.print_help() 10 | usage: pg_activity [options] [connection string] 11 | 12 | htop like application for PostgreSQL server activity monitoring. 13 | 14 | Configuration: 15 | -P PROFILE, --profile PROFILE 16 | Configuration profile matching a PROFILE.conf file in 17 | ${XDG_CONFIG_HOME:~/.config}/pg_activity/ or 18 | /etc/pg_activity/, or a built-in profile. 19 | 20 | Options: 21 | --blocksize BLOCKSIZE 22 | Filesystem blocksize (default: 4096). 23 | --rds Enable support for AWS RDS (implies --no-tempfiles and 24 | filters out the rdsadmin database from space 25 | calculation). 26 | --output FILEPATH Store running queries as CSV. 27 | --db-size, --no-db-size 28 | Enable/disable total size of DB. 29 | --tempfiles, --no-tempfiles 30 | Enable/disable tempfile count and size. 31 | --walreceiver, --no-walreceiver 32 | Enable/disable walreceiver checks. 33 | -w, --wrap-query Wrap query column instead of truncating. 34 | --duration-mode DURATION_MODE 35 | Duration mode. Values: 1-QUERY(default), 36 | 2-TRANSACTION, 3-BACKEND. 37 | --min-duration SECONDS 38 | Don't display queries with smaller than specified 39 | duration (in seconds). 40 | --filter FIELD:REGEX Filter activities with a (case insensitive) regular 41 | expression applied on selected fields. Known fields 42 | are: dbname. 43 | --debug-file DEBUG_FILE 44 | Enable debug and write it to DEBUG_FILE. 45 | --version show program's version number and exit. 46 | --help Show this help message and exit. 47 | 48 | Connection Options: 49 | connection string A valid connection string to the database, e.g.: 50 | 'host=HOSTNAME port=PORT user=USER dbname=DBNAME'. 51 | -h HOSTNAME, --host HOSTNAME 52 | Database server host or socket directory. 53 | -p PORT, --port PORT Database server port. 54 | -U USERNAME, --username USERNAME 55 | Database user name. 56 | -d DBNAME, --dbname DBNAME 57 | Database name to connect to. 58 | 59 | Process table display options: 60 | These options may be used hide some columns from the processes table. 61 | 62 | --pid, --no-pid Enable/disable PID. 63 | --xmin, --no-xmin Enable/disable XMIN. 64 | --database, --no-database 65 | Enable/disable DATABASE. 66 | --user, --no-user Enable/disable USER. 67 | --client, --no-client 68 | Enable/disable CLIENT. 69 | --cpu, --no-cpu Enable/disable CPU%. 70 | --mem, --no-mem Enable/disable MEM%. 71 | --read, --no-read Enable/disable READ/s. 72 | --write, --no-write Enable/disable WRITE/s. 73 | --time, --no-time Enable/disable TIME+. 74 | --wait, --no-wait Enable/disable W. 75 | --app-name, --no-app-name 76 | Enable/disable APP. 77 | 78 | Header display options: 79 | --no-inst-info Hide instance information. 80 | --no-sys-info Hide system information. 81 | --no-proc-info Hide workers process information. 82 | 83 | Other display options: 84 | --hide-queries-in-logs 85 | Disable log_min_duration_statements and 86 | log_min_duration_sample for pg_activity. 87 | --refresh REFRESH Refresh rate. Values: 0.5, 1, 2, 3, 4, 5 (default: 2). 88 | 89 | The connection string can be in the form of a list of Key/Value parameters or 90 | an URI as described in the PostgreSQL documentation. The parsing is delegated 91 | to the libpq: different versions of the client library may support different 92 | formats or parameters (for example, connection URIs are only supported from 93 | libpq 9.2). 94 | -------------------------------------------------------------------------------- /pgactivity/keys.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import curses 4 | from typing import Any 5 | 6 | import attr 7 | from blessed.keyboard import Keystroke 8 | 9 | from .types import QueryMode 10 | 11 | 12 | @attr.s(auto_attribs=True, frozen=True, slots=True) 13 | class Key: 14 | value: str 15 | description: str 16 | name: str | None = None 17 | local_only: bool = False 18 | 19 | def __eq__(self, other: Any) -> bool: 20 | if not isinstance(other, str): 21 | return False 22 | return self.value == other 23 | 24 | 25 | CANCEL_SELECTION = "KEY_ESCAPE" 26 | CHANGE_DURATION_MODE = "T" 27 | WRAP_QUERY = "v" 28 | EXIT = "q" 29 | HELP = "h" 30 | SPACE = " " 31 | COPY_TO_CLIPBOARD = "y" 32 | PROCESS_CANCEL = "C" 33 | PROCESS_KILL = "K" 34 | PROCESS_FIRST = "KEY_HOME" 35 | PROCESS_NEXT = "KEY_DOWN" 36 | PROCESS_NEXTPAGE = "KEY_PGDOWN" 37 | PROCESS_NEXT_VI = "j" 38 | PROCESS_PREV = "KEY_UP" 39 | PROCESS_PREV_VI = "k" 40 | PROCESS_PREVPAGE = "KEY_PGUP" 41 | PROCESS_LAST = "KEY_END" 42 | PROCESS_PIN = Key(SPACE, "tag/untag current query", "Space") 43 | REFRESH_DB_SIZE = "D" 44 | REFRESH_TIME_INCREASE = "+" 45 | REFRESH_TIME_DECREASE = "-" 46 | SORTBY_READ = "r" 47 | SORTBY_WRITE = "w" 48 | SORTBY_MEM = "m" 49 | SORTBY_TIME = "t" 50 | SORTBY_CPU = "c" 51 | HEADER_TOGGLE_SYSTEM = "s" 52 | HEADER_TOGGLE_INSTANCE = "i" 53 | HEADER_TOGGLE_WORKERS = "o" 54 | 55 | 56 | def is_process_next(key: Keystroke) -> bool: 57 | if key.name == PROCESS_NEXT: 58 | return True 59 | elif key == PROCESS_NEXT_VI: 60 | return True 61 | return False 62 | 63 | 64 | def is_process_prev(key: Keystroke) -> bool: 65 | if key.name == PROCESS_PREV: 66 | return True 67 | elif key == PROCESS_PREV_VI: 68 | return True 69 | return False 70 | 71 | 72 | def is_process_nextpage(key: Keystroke) -> bool: 73 | return key.name == PROCESS_NEXTPAGE 74 | 75 | 76 | def is_process_prevpage(key: Keystroke) -> bool: 77 | return key.name == PROCESS_PREVPAGE 78 | 79 | 80 | def is_process_first(key: Keystroke) -> bool: 81 | return key.name == PROCESS_FIRST 82 | 83 | 84 | def is_process_last(key: Keystroke) -> bool: 85 | return key.name == PROCESS_LAST 86 | 87 | 88 | def is_toggle_header_system(key: Keystroke) -> bool: 89 | return key == HEADER_TOGGLE_SYSTEM 90 | 91 | 92 | def is_toggle_header_instance(key: Keystroke) -> bool: 93 | return key == HEADER_TOGGLE_INSTANCE 94 | 95 | 96 | def is_toggle_header_workers(key: Keystroke) -> bool: 97 | return key == HEADER_TOGGLE_WORKERS 98 | 99 | 100 | EXIT_KEY = Key(EXIT, "quit") 101 | PAUSE_KEY = Key(SPACE, "pause/unpause", "Space") 102 | 103 | BINDINGS: list[Key] = [ 104 | Key("Up/Down", "scroll process list"), 105 | PAUSE_KEY, 106 | Key(SORTBY_CPU, "sort by CPU% desc. (activities)", local_only=True), 107 | Key(SORTBY_MEM, "sort by MEM% desc. (activities)", local_only=True), 108 | Key(SORTBY_READ, "sort by READ/s desc. (activities)", local_only=True), 109 | Key(SORTBY_WRITE, "sort by WRITE/s desc. (activities)", local_only=True), 110 | Key(SORTBY_TIME, "sort by TIME+ desc. (activities)", local_only=True), 111 | Key(REFRESH_TIME_INCREASE, "increase refresh time (max:5s)"), 112 | Key(REFRESH_TIME_DECREASE, "decrease refresh time (min:0.5s)"), 113 | Key(WRAP_QUERY, "toggle query wrap"), 114 | Key(CHANGE_DURATION_MODE, "change duration mode"), 115 | Key(REFRESH_DB_SIZE, "force refresh database size"), 116 | Key("R", "force refresh"), 117 | Key(HEADER_TOGGLE_SYSTEM, "Display system information in header", local_only=True), 118 | Key(HEADER_TOGGLE_INSTANCE, "Display general instance information in header"), 119 | Key(HEADER_TOGGLE_WORKERS, "Display worker information in header"), 120 | EXIT_KEY, 121 | ] 122 | 123 | 124 | def _sequence_by_int(v: int) -> tuple[str, str, int]: 125 | """ 126 | >>> _sequence_by_int(11) 127 | ('F11', '11', 275) 128 | """ 129 | assert 1 <= v <= 12, v 130 | return f"F{v}", str(v), getattr(curses, f"KEY_F{v}") 131 | 132 | 133 | KEYS_BY_QUERYMODE = { 134 | QueryMode.activities: _sequence_by_int(1), 135 | QueryMode.waiting: _sequence_by_int(2), 136 | QueryMode.blocking: _sequence_by_int(3), 137 | } 138 | QUERYMODE_FROM_KEYS = { 139 | k: qm for qm, keys in KEYS_BY_QUERYMODE.items() for k in keys[1:] 140 | } 141 | 142 | 143 | MODES: list[Key] = [ 144 | Key("/".join(KEYS_BY_QUERYMODE[qm][:-1]), qm.value) for qm in QueryMode 145 | ] 146 | -------------------------------------------------------------------------------- /pgactivity/handlers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from blessed.keyboard import Keystroke 4 | 5 | from . import keys 6 | from .config import Flag 7 | from .types import DurationMode, QueryMode, SortKey, enum_next 8 | 9 | 10 | def refresh_time( 11 | key: str | None, value: float, minimum: float = 0.5, maximum: float = 5 12 | ) -> float: 13 | """Return an updated refresh time interval from input key respecting bounds. 14 | 15 | >>> refresh_time("+", 1) 16 | 2 17 | >>> refresh_time("+", 5) 18 | 5 19 | >>> refresh_time("+", 5, maximum=10) 20 | 6 21 | >>> refresh_time("-", 2) 22 | 1 23 | >>> refresh_time("-", 1) 24 | 0.5 25 | >>> refresh_time("-", 0.5) 26 | 0.5 27 | >>> refresh_time("=", 42) 28 | Traceback (most recent call last): 29 | ... 30 | ValueError: = 31 | """ 32 | if key == keys.REFRESH_TIME_DECREASE: 33 | return max(value - 1, minimum) 34 | elif key == keys.REFRESH_TIME_INCREASE: 35 | return min(int(value + 1), maximum) 36 | raise ValueError(key) 37 | 38 | 39 | def duration_mode(key: Keystroke, mode: DurationMode) -> DurationMode: 40 | """Return the updated duration mode matching input key. 41 | 42 | >>> from blessed.keyboard import Keystroke as k 43 | 44 | >>> duration_mode(k("42"), DurationMode.query).name 45 | 'query' 46 | >>> duration_mode(k("T"), DurationMode.transaction).name 47 | 'backend' 48 | """ 49 | if key == keys.CHANGE_DURATION_MODE: 50 | return enum_next(mode) 51 | return mode 52 | 53 | 54 | def wrap_query(key: Keystroke, wrap: bool) -> bool: 55 | """Return the updated 'wrap' value depending on input key. 56 | 57 | >>> from blessed.keyboard import Keystroke as k 58 | 59 | >>> wrap_query(k("42"), True) 60 | True 61 | >>> wrap_query(k("v"), False) 62 | True 63 | """ 64 | if key == keys.WRAP_QUERY: 65 | return not wrap 66 | return wrap 67 | 68 | 69 | def query_mode(key: Keystroke) -> QueryMode | None: 70 | """Return the query mode matching input key or None. 71 | 72 | >>> import curses 73 | >>> from blessed.keyboard import Keystroke as k 74 | 75 | >>> query_mode(k("42")) 76 | >>> query_mode(k("1")).name 77 | 'activities' 78 | >>> query_mode(k(code=curses.KEY_F3)).name 79 | 'blocking' 80 | """ 81 | if key.is_sequence and key.code: 82 | try: 83 | return keys.QUERYMODE_FROM_KEYS[key.code] 84 | except KeyError: 85 | pass 86 | return keys.QUERYMODE_FROM_KEYS.get(key) 87 | 88 | 89 | def sort_key_for(key: Keystroke, query_mode: QueryMode, flag: Flag) -> SortKey | None: 90 | """Return the sort key matching input key or None. 91 | 92 | >>> from blessed.keyboard import Keystroke as k 93 | >>> from pgactivity.types import QueryMode 94 | 95 | >>> flag = Flag.all() 96 | 97 | Unhandled key: 98 | >>> sort_key_for(k("1"), QueryMode.activities, flag) 99 | 100 | In activities mode, 'm', 'w', 't', ... keys are handled: 101 | >>> sort_key_for(k("m"), QueryMode.activities, flag).name 102 | 'mem' 103 | >>> sort_key_for(k("w"), QueryMode.activities, flag).name 104 | 'write' 105 | >>> sort_key_for(k("t"), QueryMode.activities, flag).name 106 | 'duration' 107 | >>> sort_key_for(k("c"), QueryMode.activities, flag).name 108 | 'cpu' 109 | 110 | In other modes, the default sort key is always returned: 111 | >>> sort_key_for(k("m"), QueryMode.waiting, flag).name 112 | 'duration' 113 | 114 | When flag does not match given sort key, return None: 115 | >>> flag ^= Flag.CPU 116 | >>> sort_key_for(k("c"), QueryMode.activities, flag) 117 | >>> sort_key_for(k("m"), QueryMode.activities, flag).name 118 | 'mem' 119 | >>> flag ^= Flag.MEM 120 | >>> sort_key_for(k("m"), QueryMode.activities, flag) 121 | """ 122 | if query_mode != QueryMode.activities: 123 | return SortKey.default() 124 | try: 125 | sort_key, required_flag = { 126 | keys.SORTBY_CPU: (SortKey.cpu, Flag.CPU), 127 | keys.SORTBY_MEM: (SortKey.mem, Flag.MEM), 128 | keys.SORTBY_READ: (SortKey.read, Flag.READ), 129 | keys.SORTBY_TIME: (SortKey.duration, Flag.TIME), 130 | keys.SORTBY_WRITE: (SortKey.write, Flag.WRITE), 131 | }[key] 132 | except KeyError: 133 | return None 134 | if flag & required_flag: 135 | return sort_key 136 | return None 137 | -------------------------------------------------------------------------------- /docs/imgs/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | image/svg+xml -------------------------------------------------------------------------------- /tests/test_activities.py: -------------------------------------------------------------------------------- 1 | import json 2 | from collections import namedtuple 3 | from unittest.mock import patch 4 | 5 | import attr 6 | import pytest 7 | 8 | from pgactivity import activities 9 | from pgactivity.types import ( 10 | IOCounter, 11 | LoadAverage, 12 | MemoryInfo, 13 | RunningProcess, 14 | SwapInfo, 15 | SystemProcess, 16 | ) 17 | 18 | 19 | @pytest.fixture 20 | def system_processes(datadir): 21 | with (datadir / "local-processes-input.json").open() as f: 22 | input_data = json.load(f) 23 | 24 | fs_blocksize = input_data["fs_blocksize"] 25 | 26 | pg_processes = [] 27 | new_system_procs = {} 28 | system_procs = {} 29 | 30 | running_process_fields = {a.name for a in attr.fields(RunningProcess)} 31 | 32 | def system_process(extras): 33 | for k in ("io_read", "io_write"): 34 | try: 35 | counter = extras.pop(k) 36 | except KeyError: 37 | pass 38 | else: 39 | extras[k] = IOCounter(counter["count"], counter["bytes"]) 40 | return SystemProcess(**extras) 41 | 42 | for new_proc in input_data["new_processes"].values(): 43 | new_system_procs[new_proc["pid"]] = system_process(new_proc["extras"]) 44 | pg_processes.append( 45 | RunningProcess( 46 | **{k: v for k, v in new_proc.items() if k in running_process_fields} 47 | ) 48 | ) 49 | 50 | system_procs = { 51 | proc["pid"]: system_process(proc["extras"]) 52 | for proc in input_data["processes"].values() 53 | } 54 | 55 | return pg_processes, system_procs, new_system_procs, fs_blocksize 56 | 57 | 58 | def test_ps_complete(system_processes): 59 | pg_processes, system_procs, new_system_procs, fs_blocksize = system_processes 60 | 61 | def sys_get_proc(pid): 62 | return new_system_procs.pop(pid, None) 63 | 64 | n_system_procs = len(system_procs) 65 | 66 | with patch("pgactivity.activities.sys_get_proc", new=sys_get_proc): 67 | procs, io_read, io_write = activities.ps_complete( 68 | pg_processes, system_procs, fs_blocksize 69 | ) 70 | 71 | assert not new_system_procs # all new system processes consumed 72 | 73 | assert io_read == IOCounter.default() 74 | assert io_write == IOCounter.default() 75 | assert len(procs) == len(pg_processes) 76 | assert len(system_procs) == n_system_procs 77 | assert {p.pid for p in procs} == { 78 | 6221, 79 | 6222, 80 | 6223, 81 | 6224, 82 | 6225, 83 | 6226, 84 | 6227, 85 | 6228, 86 | 6229, 87 | 6230, 88 | 6231, 89 | 6232, 90 | 6233, 91 | 6234, 92 | 6235, 93 | 6237, 94 | 6238, 95 | 6239, 96 | 6240, 97 | } 98 | 99 | 100 | def test_ps_complete_empty_procs(system_processes): 101 | # same as test_ps_complete() but starting with an empty "system_procs" dict 102 | pg_processes, __, new_system_procs, fs_blocksize = system_processes 103 | 104 | def sys_get_proc(pid): 105 | return new_system_procs.pop(pid, None) 106 | 107 | system_procs = {} 108 | 109 | with patch("pgactivity.activities.sys_get_proc", new=sys_get_proc): 110 | procs, io_read, io_write = activities.ps_complete( 111 | pg_processes, system_procs, fs_blocksize 112 | ) 113 | 114 | assert not new_system_procs # all new system processes consumed 115 | 116 | assert io_read == IOCounter.default() 117 | assert io_write == IOCounter.default() 118 | assert len(procs) == len(pg_processes) 119 | assert system_procs 120 | 121 | 122 | def test_mem_swap_load() -> None: 123 | pmem = namedtuple("pmem", ["total", "free", "buffers", "cached"]) 124 | vmem = namedtuple("vmem", ["total", "free", "used"]) 125 | with ( 126 | patch( 127 | "psutil.virtual_memory", return_value=pmem(45, 6, 6, 7) 128 | ) as virtual_memory, 129 | patch("psutil.swap_memory", return_value=vmem(8, 6, 2)) as swap_memory, 130 | patch("os.getloadavg", return_value=(0.14, 0.27, 0.44)) as getloadavg, 131 | ): 132 | memory, swap, load = activities.mem_swap_load() 133 | virtual_memory.assert_called_once_with() 134 | swap_memory.assert_called_once_with() 135 | getloadavg.assert_called_once_with() 136 | assert memory == MemoryInfo(total=45, used=26, free=6, buff_cached=13) 137 | assert swap == SwapInfo(total=8, used=2, free=6) 138 | assert load == LoadAverage(0.14, 0.27, 0.44) 139 | assert memory.pct_used == 57.77777777777778 140 | assert memory.pct_free == 13.333333333333334 141 | assert memory.pct_bc == 28.88888888888889 142 | assert swap.pct_used == 25.0 143 | assert swap.pct_free == 75.0 144 | -------------------------------------------------------------------------------- /pgactivity/queries/get_blocking_post_090600.sql: -------------------------------------------------------------------------------- 1 | -- Get blocking queries >= 9.6 2 | -- NEW pg_stat_activity.waiting => pg_stat_activity.wait_event 3 | SELECT 4 | pid, 5 | application_name, 6 | sq.datname AS database, 7 | usename AS user, 8 | client, 9 | relation, 10 | mode, 11 | locktype AS type, 12 | duration, 13 | state, 14 | sq.query AS query, 15 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 16 | wait_event as wait 17 | FROM 18 | ( 19 | -- Transaction id lock 20 | SELECT 21 | blocking.pid, 22 | pg_stat_activity.application_name, 23 | pg_stat_activity.query, 24 | blocking.mode, 25 | pg_stat_activity.datname, 26 | pg_stat_activity.datid, 27 | pg_stat_activity.usename, 28 | pg_stat_activity.client_addr AS client, 29 | blocking.locktype, 30 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 31 | pg_stat_activity.state as state, 32 | blocking.relation::regclass AS relation, 33 | pg_stat_activity.wait_event 34 | FROM 35 | pg_locks AS blocking 36 | JOIN pg_locks AS blocked ON (blocking.transactionid = blocked.transactionid AND blocking.locktype = blocked.locktype) 37 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 38 | WHERE 39 | blocking.granted 40 | AND NOT blocked.granted 41 | AND CASE WHEN {min_duration} = 0 42 | THEN true 43 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 44 | END 45 | AND CASE WHEN {dbname_filter} IS NULL THEN true 46 | ELSE datname ~* %(dbname_filter)s 47 | END 48 | UNION ALL 49 | -- VirtualXid Lock 50 | SELECT 51 | blocking.pid, 52 | pg_stat_activity.application_name, 53 | pg_stat_activity.query, 54 | blocking.mode, 55 | pg_stat_activity.datname, 56 | pg_stat_activity.datid, 57 | pg_stat_activity.usename, 58 | pg_stat_activity.client_addr AS client, 59 | blocking.locktype, 60 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 61 | pg_stat_activity.state as state, 62 | blocking.relation::regclass AS relation, 63 | pg_stat_activity.wait_event 64 | FROM 65 | pg_locks AS blocking 66 | JOIN pg_locks AS blocked ON (blocking.virtualxid = blocked.virtualxid AND blocking.locktype = blocked.locktype) 67 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 68 | WHERE 69 | blocking.granted 70 | AND NOT blocked.granted 71 | AND CASE WHEN {min_duration} = 0 72 | THEN true 73 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 74 | END 75 | AND CASE WHEN {dbname_filter} IS NULL THEN true 76 | ELSE datname ~* %(dbname_filter)s 77 | END 78 | UNION ALL 79 | -- Relation or tuple Lock 80 | SELECT 81 | blocking.pid, 82 | pg_stat_activity.application_name, 83 | pg_stat_activity.query, 84 | blocking.mode, 85 | pg_stat_activity.datname, 86 | pg_stat_activity.datid, 87 | pg_stat_activity.usename, 88 | pg_stat_activity.client_addr AS client, 89 | blocking.locktype, 90 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 91 | pg_stat_activity.state as state, 92 | blocking.relation::regclass AS relation, 93 | pg_stat_activity.wait_event 94 | FROM 95 | pg_locks AS blocking 96 | JOIN pg_locks AS blocked ON (blocking.database = blocked.database AND blocking.relation = blocked.relation AND blocking.locktype = blocked.locktype) 97 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 98 | WHERE 99 | blocking.granted 100 | AND NOT blocked.granted 101 | AND blocked.relation IS NOT NULL 102 | AND CASE WHEN {min_duration} = 0 103 | THEN true 104 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 105 | END 106 | AND CASE WHEN {dbname_filter} IS NULL THEN true 107 | ELSE datname ~* %(dbname_filter)s 108 | END 109 | ) AS sq 110 | LEFT OUTER JOIN pg_database b ON sq.datid = b.oid 111 | GROUP BY 112 | pid, 113 | application_name, 114 | database, 115 | usename, 116 | client, 117 | relation, 118 | mode, 119 | locktype, 120 | duration, 121 | state, 122 | query, 123 | encoding, 124 | wait_event 125 | ORDER BY 126 | duration DESC; 127 | -------------------------------------------------------------------------------- /pgactivity/queries/get_blocking_post_090200.sql: -------------------------------------------------------------------------------- 1 | -- Get blocking queries >= 9.2 2 | -- NEW pg_stat_activity.state 3 | -- NEW pg_stat_activity.current_query => pg_stat_activity.query 4 | -- NEW pg_stat_activity.procpid => pg_stat_activity.pid 5 | SELECT 6 | pid, 7 | application_name, 8 | sq.datname AS database, 9 | usename AS user, 10 | client, 11 | relation, 12 | mode, 13 | locktype AS type, 14 | duration, 15 | state, 16 | sq.query AS query, 17 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 18 | waiting as wait 19 | FROM 20 | ( 21 | -- Transaction id lock 22 | SELECT 23 | blocking.pid, 24 | pg_stat_activity.application_name, 25 | pg_stat_activity.query, 26 | blocking.mode, 27 | pg_stat_activity.datname, 28 | pg_stat_activity.datid, 29 | pg_stat_activity.usename, 30 | pg_stat_activity.client_addr AS client, 31 | blocking.locktype, 32 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 33 | pg_stat_activity.state as state, 34 | blocking.relation::regclass AS relation, 35 | pg_stat_activity.waiting 36 | FROM 37 | pg_locks AS blocking 38 | JOIN pg_locks AS blocked ON (blocking.transactionid = blocked.transactionid AND blocking.locktype = blocked.locktype) 39 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 40 | WHERE 41 | blocking.granted 42 | AND NOT blocked.granted 43 | AND CASE WHEN {min_duration} = 0 44 | THEN true 45 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 46 | END 47 | AND CASE WHEN {dbname_filter} IS NULL THEN true 48 | ELSE datname ~* %(dbname_filter)s 49 | END 50 | UNION ALL 51 | -- VirtualXid Lock 52 | SELECT 53 | blocking.pid, 54 | pg_stat_activity.application_name, 55 | pg_stat_activity.query, 56 | blocking.mode, 57 | pg_stat_activity.datname, 58 | pg_stat_activity.datid, 59 | pg_stat_activity.usename, 60 | pg_stat_activity.client_addr AS client, 61 | blocking.locktype, 62 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 63 | pg_stat_activity.state as state, 64 | blocking.relation::regclass AS relation, 65 | pg_stat_activity.waiting 66 | FROM 67 | pg_locks AS blocking 68 | JOIN pg_locks AS blocked ON (blocking.virtualxid = blocked.virtualxid AND blocking.locktype = blocked.locktype) 69 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 70 | WHERE 71 | blocking.granted 72 | AND NOT blocked.granted 73 | AND CASE WHEN {min_duration} = 0 74 | THEN true 75 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 76 | END 77 | AND CASE WHEN {dbname_filter} IS NULL THEN true 78 | ELSE datname ~* %(dbname_filter)s 79 | END 80 | UNION ALL 81 | -- Relation or tuple Lock 82 | SELECT 83 | blocking.pid, 84 | pg_stat_activity.application_name, 85 | pg_stat_activity.query, 86 | blocking.mode, 87 | pg_stat_activity.datname, 88 | pg_stat_activity.datid, 89 | pg_stat_activity.usename, 90 | pg_stat_activity.client_addr AS client, 91 | blocking.locktype, 92 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 93 | pg_stat_activity.state as state, 94 | blocking.relation::regclass AS relation, 95 | pg_stat_activity.waiting 96 | FROM 97 | pg_locks AS blocking 98 | JOIN pg_locks AS blocked ON (blocking.database = blocked.database AND blocking.relation = blocked.relation AND blocking.locktype = blocked.locktype) 99 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.pid) 100 | WHERE 101 | blocking.granted 102 | AND NOT blocked.granted 103 | AND blocked.relation IS NOT NULL 104 | AND CASE WHEN {min_duration} = 0 105 | THEN true 106 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 107 | END 108 | AND CASE WHEN {dbname_filter} IS NULL THEN true 109 | ELSE datname ~* %(dbname_filter)s 110 | END 111 | ) AS sq 112 | LEFT OUTER JOIN pg_database b ON sq.datid = b.oid 113 | GROUP BY 114 | pid, 115 | application_name, 116 | database, 117 | usename, 118 | client, 119 | relation, 120 | mode, 121 | locktype, 122 | duration, 123 | state, 124 | query, 125 | encoding, 126 | waiting 127 | ORDER BY 128 | duration DESC; 129 | -------------------------------------------------------------------------------- /pgactivity/queries/get_blocking_oldest.sql: -------------------------------------------------------------------------------- 1 | -- Get blocking queries 2 | SELECT 3 | pid, 4 | application_name, 5 | sq.datname AS database, 6 | usename AS user, 7 | client, 8 | relation, 9 | mode, 10 | locktype AS type, 11 | duration, 12 | CASE 13 | WHEN sq.query = ' in transaction (aborted)' THEN 'idle in transaction (aborted)' 14 | WHEN sq.query = ' in transaction' THEN 'idle in transaction' 15 | WHEN sq.query = '' THEN 'idle' 16 | ELSE 'active' 17 | END AS state, 18 | CASE WHEN sq.query LIKE '%%' 19 | THEN NULL 20 | ELSE sq.query 21 | END AS query, 22 | pg_catalog.pg_encoding_to_char(b.encoding) AS encoding, 23 | waiting AS wait 24 | FROM 25 | ( 26 | -- Transaction id lock 27 | SELECT 28 | blocking.pid, 29 | '' AS application_name, 30 | pg_stat_activity.current_query AS query, 31 | blocking.mode, 32 | pg_stat_activity.datname, 33 | pg_stat_activity.datid, 34 | pg_stat_activity.usename, 35 | pg_stat_activity.client_addr AS client, 36 | blocking.locktype, 37 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 38 | NULL AS state, 39 | blocking.relation::regclass AS relation, 40 | pg_stat_activity.waiting 41 | FROM 42 | pg_locks AS blocking 43 | JOIN pg_locks AS blocked ON (blocking.transactionid = blocked.transactionid AND blocking.locktype = blocked.locktype) 44 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.procpid) 45 | WHERE 46 | blocking.granted 47 | AND NOT blocked.granted 48 | AND CASE WHEN {min_duration} = 0 49 | THEN true 50 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 51 | END 52 | AND CASE WHEN {dbname_filter} IS NULL THEN true 53 | ELSE datname ~* %(dbname_filter)s 54 | END 55 | UNION ALL 56 | -- VirtualXid Lock 57 | SELECT 58 | blocking.pid, 59 | '' AS application_name, 60 | pg_stat_activity.current_query AS query, 61 | blocking.mode, 62 | pg_stat_activity.datname, 63 | pg_stat_activity.datid, 64 | pg_stat_activity.usename, 65 | pg_stat_activity.client_addr AS client, 66 | blocking.locktype, 67 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 68 | NULL AS state, 69 | blocking.relation::regclass AS relation, 70 | pg_stat_activity.waiting 71 | FROM 72 | pg_locks AS blocking 73 | JOIN pg_locks AS blocked ON (blocking.virtualxid = blocked.virtualxid AND blocking.locktype = blocked.locktype) 74 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.procpid) 75 | WHERE 76 | blocking.granted 77 | AND NOT blocked.granted 78 | AND CASE WHEN {min_duration} = 0 79 | THEN true 80 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 81 | END 82 | AND CASE WHEN {dbname_filter} IS NULL THEN true 83 | ELSE datname ~* %(dbname_filter)s 84 | END 85 | UNION ALL 86 | -- Relation or tuple Lock 87 | SELECT 88 | blocking.pid, 89 | '' AS application_name, 90 | pg_stat_activity.current_query AS query, 91 | blocking.mode, 92 | pg_stat_activity.datname, 93 | pg_stat_activity.datid, 94 | pg_stat_activity.usename, 95 | pg_stat_activity.client_addr AS client, 96 | blocking.locktype, 97 | EXTRACT(epoch FROM (NOW() - pg_stat_activity.{duration_column})) AS duration, 98 | NULL AS state, 99 | blocking.relation::regclass AS relation, 100 | pg_stat_activity.waiting 101 | FROM 102 | pg_locks AS blocking 103 | JOIN pg_locks AS blocked ON (blocking.database = blocked.database AND blocking.relation = blocked.relation AND blocking.locktype = blocked.locktype) 104 | JOIN pg_stat_activity ON (blocking.pid = pg_stat_activity.procpid) 105 | WHERE 106 | blocking.granted 107 | AND NOT blocked.granted 108 | AND blocked.relation IS NOT NULL 109 | AND CASE WHEN {min_duration} = 0 110 | THEN true 111 | ELSE extract(epoch from now() - {duration_column}) > %(min_duration)s 112 | END 113 | AND CASE WHEN {dbname_filter} IS NULL THEN true 114 | ELSE datname ~* %(dbname_filter)s 115 | END 116 | ) AS sq 117 | LEFT OUTER JOIN pg_database b ON sq.datid = b.oid 118 | GROUP BY 119 | pid, 120 | application_name, 121 | database, 122 | usename, 123 | client, 124 | relation, 125 | mode, 126 | locktype, 127 | duration, 128 | state, 129 | query, 130 | encoding, 131 | waiting 132 | ORDER BY 133 | duration DESC; 134 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pathlib import Path 4 | from typing import Any 5 | 6 | import attr 7 | import pytest 8 | 9 | from pgactivity.config import Configuration, ConfigurationError, Flag, UISection 10 | 11 | 12 | def test_flag() -> None: 13 | f = Flag(3) 14 | assert f == Flag.APPNAME | Flag.DATABASE 15 | assert f | Flag.CLIENT == Flag.CLIENT | Flag.APPNAME | Flag.DATABASE 16 | f ^= Flag.APPNAME 17 | assert f == Flag.DATABASE 18 | 19 | 20 | def test_flag_load() -> None: 21 | options = { 22 | "appname": True, 23 | "client": True, 24 | "cpu": True, 25 | "database": True, 26 | "mem": True, 27 | "pid": None, 28 | "read": True, 29 | "relation": True, 30 | "time": True, 31 | "user": True, 32 | "wait": None, 33 | "write": True, 34 | "xmin": True, 35 | } 36 | flag = Flag.load(None, is_local=True, **options) 37 | assert ( 38 | flag 39 | == Flag.PID 40 | | Flag.IOWAIT 41 | | Flag.MODE 42 | | Flag.TYPE 43 | | Flag.RELATION 44 | | Flag.WAIT 45 | | Flag.TIME 46 | | Flag.WRITE 47 | | Flag.READ 48 | | Flag.MEM 49 | | Flag.CPU 50 | | Flag.USER 51 | | Flag.CLIENT 52 | | Flag.APPNAME 53 | | Flag.DATABASE 54 | | Flag.XMIN 55 | ) 56 | cfg = Configuration( 57 | name="test", 58 | values=dict( 59 | pid=UISection(hidden=True), 60 | relation=UISection(hidden=False), 61 | wait=UISection(hidden=False), 62 | ), 63 | ) 64 | flag = Flag.load(cfg, is_local=False, **options) 65 | assert ( 66 | flag 67 | == Flag.MODE 68 | | Flag.TYPE 69 | | Flag.RELATION 70 | | Flag.WAIT 71 | | Flag.TIME 72 | | Flag.USER 73 | | Flag.CLIENT 74 | | Flag.APPNAME 75 | | Flag.DATABASE 76 | | Flag.XMIN 77 | ) 78 | options["database"] = False 79 | options["time"] = False 80 | options["pid"] = False 81 | cfg = Configuration( 82 | name="test", 83 | values=dict(database=UISection(hidden=False), relation=UISection(hidden=True)), 84 | ) 85 | flag = Flag.load(cfg, is_local=False, **options) 86 | assert ( 87 | flag 88 | == Flag.MODE 89 | | Flag.TYPE 90 | | Flag.WAIT 91 | | Flag.USER 92 | | Flag.CLIENT 93 | | Flag.APPNAME 94 | | Flag.XMIN 95 | ) 96 | 97 | 98 | def asdict(cfg: Configuration) -> dict[str, Any]: 99 | return {k: attr.asdict(v) for k, v in cfg.items()} 100 | 101 | 102 | def test_error() -> None: 103 | cfg = Configuration(name="test", values={}) 104 | with pytest.raises(ConfigurationError, match="test error") as cm: 105 | raise cfg.error("test error") 106 | assert cm.value.filename == "test" 107 | 108 | 109 | def test_lookup(tmp_path: Path) -> None: 110 | cfg = Configuration.lookup(None, user_config_home=tmp_path) 111 | assert cfg is None 112 | 113 | (tmp_path / "pg_activity.conf").write_text( 114 | "\n".join( 115 | [ 116 | "[client]", 117 | "width=5", 118 | "color=cyan", 119 | "[header]", 120 | "show_instance=no", 121 | ] 122 | ) 123 | ) 124 | cfg = Configuration.lookup(None, user_config_home=tmp_path) 125 | assert cfg is not None and asdict(cfg) == { 126 | "client": {"hidden": False, "width": 5, "color": "cyan"}, 127 | "header": {"show_instance": False, "show_system": True, "show_workers": True}, 128 | } 129 | 130 | (tmp_path / "pg_activity").mkdir() 131 | (tmp_path / "pg_activity" / "x.conf").write_text( 132 | "\n".join( 133 | ["[database]", "hidden= on", "width = 3 ", "[header]", "show_workers=no"] 134 | ) 135 | ) 136 | cfg = Configuration.lookup("x", user_config_home=tmp_path) 137 | assert cfg is not None and asdict(cfg) == { 138 | "database": {"hidden": True, "width": 3, "color": None}, 139 | "header": {"show_instance": True, "show_system": True, "show_workers": False}, 140 | } 141 | 142 | with pytest.raises(FileNotFoundError): 143 | Configuration.lookup("y", user_config_home=tmp_path) 144 | 145 | 146 | no_header = { 147 | "header": {k: False for k in ("show_instance", "show_system", "show_workers")} 148 | } 149 | columns = ( 150 | "database", 151 | "user", 152 | "client", 153 | "cpu", 154 | "mem", 155 | "read", 156 | "write", 157 | "appname", 158 | "xmin", 159 | ) 160 | narrow = {k: {"hidden": True, "width": None, "color": None} for k in columns} 161 | wide = {k: {"hidden": False, "width": None, "color": None} for k in columns} 162 | minimal = {**no_header, **narrow} 163 | 164 | 165 | @pytest.mark.parametrize( 166 | "profile, expected", 167 | [ 168 | ("minimal", minimal), 169 | ("narrow", narrow), 170 | ("wide", wide), 171 | ], 172 | ) 173 | def test_lookup_builtin_profiles( 174 | tmp_path: Path, profile: str, expected: dict[str, Any] 175 | ) -> None: 176 | cfg = Configuration.lookup(profile, user_config_home=tmp_path) 177 | assert cfg is not None and asdict(cfg) == expected 178 | -------------------------------------------------------------------------------- /docs/imgs/logo-horizontal.svg: -------------------------------------------------------------------------------- 1 | 2 | image/svg+xmlpg_activity 98 | -------------------------------------------------------------------------------- /tests/test_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time 4 | 5 | import attr 6 | import psycopg 7 | import pytest 8 | from psycopg.errors import WrongObjectType 9 | 10 | from pgactivity import types 11 | from pgactivity.data import Data 12 | 13 | 14 | def retry(fct, msg: str, timeout: int = 2): 15 | count = int(timeout / 0.1) 16 | for _ in range(count): 17 | time.sleep(0.1) 18 | value = fct() 19 | if value: 20 | return value 21 | pytest.fail(msg) 22 | 23 | 24 | @pytest.fixture 25 | def data(postgresql): 26 | obj = Data.pg_connect( 27 | host=postgresql.info.host, 28 | port=postgresql.info.port, 29 | database=postgresql.info.dbname, 30 | user=postgresql.info.user, 31 | ) 32 | yield obj 33 | obj.pg_conn.close() 34 | 35 | 36 | def test_pg_is_local(postgresql, data): 37 | assert data.pg_is_local() 38 | 39 | 40 | def test_pg_is_local_access(postgresql, data): 41 | assert data.pg_is_local_access() 42 | 43 | 44 | def test_pg_get_server_information(data): 45 | data.pg_get_server_information(None) 46 | 47 | 48 | def test_activities(postgresql, data): 49 | postgresql.execute("SELECT pg_sleep(1)") 50 | (running,) = data.pg_get_activities() 51 | assert "pg_sleep" in running.query 52 | assert running.state == "idle in transaction" 53 | if postgresql.info.server_version >= 100000: 54 | assert running.wait == "ClientRead" 55 | else: 56 | assert running.wait is None 57 | assert not running.is_parallel_worker 58 | 59 | 60 | def test_blocking_waiting(postgresql, data, execute): 61 | postgresql.execute("CREATE TABLE t AS (SELECT 'init'::text s)") 62 | postgresql.commit() 63 | execute("UPDATE t SET s = 'blocking'") 64 | execute("UPDATE t SET s = 'waiting 1'", commit=True) 65 | execute("UPDATE t SET s = 'waiting 2'", commit=True) 66 | blocking = retry(data.pg_get_blocking, msg="could not fetch blocking queries") 67 | waiting = data.pg_get_waiting() 68 | assert len(blocking) == 2 69 | assert len(waiting) == 2 70 | assert "blocking" in blocking[0].query 71 | assert "waiting 1" in waiting[0].query and "waiting 2" in waiting[1].query 72 | if postgresql.info.server_version >= 100000: 73 | assert blocking[0].wait == "ClientRead" 74 | assert blocking[1].wait == "transactionid" 75 | assert str(blocking[0].type) == "transactionid" 76 | assert str(blocking[1].type) == "tuple" 77 | 78 | 79 | def test_pg_get_blocking_virtualxid(postgresql, data, execute): 80 | postgresql.execute("CREATE TABLE t AS (SELECT 'init'::text s)") 81 | postgresql.commit() 82 | execute("UPDATE t SET s = 'blocking'") 83 | execute("CREATE INDEX CONCURRENTLY ON t(s)", autocommit=True) 84 | (blocking,) = retry(data.pg_get_blocking, msg="could not fetch blocking queries") 85 | (waiting,) = data.pg_get_waiting() 86 | assert "blocking" in blocking.query 87 | assert "CREATE INDEX CONCURRENTLY ON t(s)" in waiting.query 88 | assert str(blocking.type) == "virtualxid" 89 | 90 | 91 | def test_cancel_backend(postgresql, data): 92 | postgresql.execute("SELECT pg_sleep(1)") 93 | (running,) = data.pg_get_activities() 94 | assert data.pg_cancel_backend(running.pid) 95 | 96 | 97 | def test_terminate_backend(postgresql, data): 98 | postgresql.execute("SELECT pg_sleep(1)") 99 | (running,) = data.pg_get_activities() 100 | assert data.pg_terminate_backend(running.pid) 101 | retry(lambda: not data.pg_get_activities(), msg="some processes are still active") 102 | 103 | 104 | def test_encoding(postgresql, data, execute): 105 | """Test for issue #149, #332.""" 106 | conninfo = postgresql.info.dsn 107 | # plateform specific locales (Centos, Ubuntu) 108 | encodings = ["fr_FR.latin1", "fr_FR.88591", "fr_FR.8859-1"] 109 | with psycopg.connect(conninfo, autocommit=True) as conn: 110 | for encoding in encodings: 111 | try: 112 | conn.execute( 113 | f"CREATE DATABASE latin1 ENCODING 'latin1' TEMPLATE template0 LC_COLLATE '{encoding}' LC_CTYPE '{encoding}'" 114 | ) 115 | except WrongObjectType: 116 | continue 117 | else: 118 | break 119 | else: 120 | pytest.skip( 121 | f"could not create a database with encoding amongst {', '.join(encodings)}" 122 | ) 123 | 124 | with psycopg.connect(conninfo, dbname="latin1") as conn: 125 | conn.execute("CREATE TABLE tbl AS (SELECT 'initilialized éléphant' s)") 126 | conn.commit() 127 | 128 | execute("UPDATE tbl SET s = 'blocking éléphant'", dbname="latin1") 129 | execute("UPDATE tbl SET s = 'waiting éléphant'", dbname="latin1", commit=True) 130 | running = retry(data.pg_get_activities, msg="could not fetch activities") 131 | assert "blocking éléphant" in running[0].query 132 | (waiting,) = retry(data.pg_get_waiting, "no waiting process") 133 | assert waiting.query and "waiting éléphant" in waiting.query 134 | (blocking,) = data.pg_get_blocking() 135 | assert blocking.query and "blocking éléphant" in blocking.query 136 | 137 | 138 | @pytest.mark.parametrize("encoding", ["utf-8", "latin1", "sql_ascii"]) 139 | def test_client_encoding(postgresql, encoding: str) -> None: 140 | data = Data.pg_connect( 141 | host=postgresql.info.host, 142 | port=postgresql.info.port, 143 | database=postgresql.info.dbname, 144 | user=postgresql.info.user, 145 | dsn=f"client_encoding={encoding}", 146 | ) 147 | try: 148 | assert data.pg_version.startswith( 149 | f"PostgreSQL {str(postgresql.info.server_version)[:2]}" 150 | ) 151 | finally: 152 | data.pg_conn.close() 153 | 154 | 155 | @pytest.mark.parametrize( 156 | "pyenc, pgenc, locale", 157 | [ 158 | ("utf8", "UTF8", None), 159 | ("ascii", "SQL_ASCII", None), 160 | ("unknown", "EUC_TW", "zh_TW.euctw"), 161 | ], 162 | ) 163 | def test_postgres_and_python_encoding( 164 | database_factory, pyenc: str, pgenc: str, locale: str | None, data, postgresql 165 | ) -> None: 166 | dbname = pyenc 167 | try: 168 | database_factory(dbname, encoding=pgenc, locale=locale) 169 | except WrongObjectType: 170 | pytest.skip(f"could not create a database with encoding '{pgenc}'") 171 | with psycopg.connect( 172 | postgresql.info.dsn, dbname=dbname, client_encoding="utf-8" 173 | ) as conn: 174 | conn.execute("SELECT 'encoding'") 175 | (running,) = data.pg_get_activities() 176 | assert "'encoding'" in running.query 177 | assert running.encoding == pgenc 178 | assert running.database == dbname 179 | 180 | 181 | def test_filters_dbname(data, execute): 182 | data_filtered = attr.evolve(data, filters=types.Filters(dbname="temp")) 183 | execute("SELECT pg_sleep(2)", dbname="template1", autocommit=True) 184 | nbconn = retry( 185 | data.pg_get_server_information, 186 | msg="could not get active connections for filtered DBNAME", 187 | ) 188 | nbconn_filtered = retry( 189 | data_filtered.pg_get_server_information, 190 | msg="could not get active connections for filtered DBNAME", 191 | ) 192 | assert nbconn.active_connections == 2 193 | assert nbconn_filtered.active_connections == 1 194 | -------------------------------------------------------------------------------- /pgactivity/pg.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import os 5 | from collections.abc import Callable, Sequence 6 | from typing import Any, TypeVar, overload 7 | 8 | Row = TypeVar("Row") 9 | 10 | try: 11 | if "_PGACTIVITY_USE_PSYCOPG2" in os.environ: 12 | raise ImportError("psycopg2 requested from environment") 13 | 14 | import psycopg 15 | from psycopg import sql as sql 16 | from psycopg._encodings import pg2pyenc 17 | from psycopg.adapt import Buffer, Loader 18 | from psycopg.conninfo import conninfo_to_dict, make_conninfo 19 | from psycopg.errors import FeatureNotSupported as FeatureNotSupported 20 | from psycopg.errors import InsufficientPrivilege as InsufficientPrivilege 21 | from psycopg.errors import InterfaceError as InterfaceError 22 | from psycopg.errors import InvalidPassword as InvalidPassword 23 | from psycopg.errors import NotSupportedError 24 | from psycopg.errors import OperationalError as OperationalError 25 | from psycopg.errors import ProgrammingError as ProgrammingError 26 | from psycopg.errors import QueryCanceled as QueryCanceled 27 | from psycopg.rows import dict_row 28 | 29 | __version__ = psycopg.__version__ 30 | 31 | Connection = psycopg.Connection[dict[str, Any]] 32 | 33 | class BytesLoader(Loader): 34 | def load(self, data: Buffer) -> bytes | bytearray: 35 | if isinstance(data, memoryview): 36 | return bytes(data) 37 | return data 38 | 39 | class AutoTextLoader(Loader): 40 | def load(self, data: Buffer) -> str: 41 | if not isinstance(data, bytes): 42 | data = bytes(data) 43 | return data.decode(errors="replace") 44 | 45 | def connect(dsn: str = "", **kwargs: Any) -> Connection: 46 | if "PGCLIENTENCODING" not in os.environ: 47 | # Set client_encoding to 'auto', if not set by the user. 48 | # This is (more or less) what's done by psql. 49 | conninfo = conninfo_to_dict(dsn) 50 | conninfo.setdefault("client_encoding", "auto") 51 | dsn = make_conninfo("", **conninfo) 52 | conn = psycopg.connect(dsn, autocommit=True, row_factory=dict_row, **kwargs) 53 | if conn.info.encoding == "ascii": 54 | # If client encoding is still 'ascii', fall back to a loader with a replace 55 | # policy. 56 | logging.getLogger("pgactivity").warning( 57 | "client encoding is 'ascii', using a fallback loader for character types" 58 | ) 59 | conn.adapters.register_loader("text", AutoTextLoader) 60 | conn.adapters.register_loader("varchar", AutoTextLoader) 61 | conn.adapters.register_loader('"char"', AutoTextLoader) 62 | return conn 63 | 64 | def server_version(conn: Connection) -> int: 65 | return conn.info.server_version 66 | 67 | def connection_parameters(conn: Connection) -> dict[str, Any]: 68 | return conn.info.get_parameters() 69 | 70 | def execute( 71 | conn: Connection, 72 | query: str | sql.Composed, 73 | args: None | Sequence[Any] | dict[str, Any] = None, 74 | ) -> None: 75 | conn.execute(query, args, prepare=True) 76 | 77 | @overload 78 | def cursor( 79 | conn: Connection, mkrow: Callable[..., Row], text_as_bytes: bool 80 | ) -> psycopg.Cursor[Row]: ... 81 | 82 | @overload 83 | def cursor( 84 | conn: Connection, mkrow: None, text_as_bytes: bool 85 | ) -> psycopg.Cursor[psycopg.rows.DictRow]: ... 86 | 87 | def cursor( 88 | conn: Connection, mkrow: Callable[..., Row] | None, text_as_bytes: bool 89 | ) -> psycopg.Cursor[psycopg.rows.DictRow] | psycopg.Cursor[Row]: 90 | if mkrow is not None: 91 | cur = conn.cursor(row_factory=psycopg.rows.kwargs_row(mkrow)) 92 | else: 93 | cur = conn.cursor() # type: ignore[assignment] 94 | if text_as_bytes: 95 | cur.adapters.register_loader("text", BytesLoader) 96 | return cur 97 | 98 | @overload 99 | def fetchone( 100 | conn: Connection, 101 | query: str | sql.Composed, 102 | args: None | Sequence[Any] | dict[str, Any] = None, 103 | *, 104 | mkrow: Callable[..., Row], 105 | text_as_bytes: bool = False, 106 | ) -> Row: ... 107 | 108 | @overload 109 | def fetchone( 110 | conn: Connection, 111 | query: str | sql.Composed, 112 | args: None | Sequence[Any] | dict[str, Any] = None, 113 | *, 114 | text_as_bytes: bool = False, 115 | ) -> dict[str, Any]: ... 116 | 117 | def fetchone( 118 | conn: Connection, 119 | query: str | sql.Composed, 120 | args: None | Sequence[Any] | dict[str, Any] = None, 121 | *, 122 | mkrow: Callable[..., Row] | None = None, 123 | text_as_bytes: bool = False, 124 | ) -> dict[str, Any] | Row: 125 | with cursor(conn, mkrow, text_as_bytes) as cur: 126 | row = cur.execute(query, args, prepare=True).fetchone() 127 | assert row is not None 128 | return row 129 | 130 | @overload 131 | def fetchall( 132 | conn: Connection, 133 | query: str | sql.Composed, 134 | args: None | Sequence[Any] | dict[str, Any] = None, 135 | *, 136 | mkrow: Callable[..., Row], 137 | text_as_bytes: bool = False, 138 | ) -> list[Row]: ... 139 | 140 | @overload 141 | def fetchall( 142 | conn: Connection, 143 | query: str | sql.Composed, 144 | args: None | Sequence[Any] | dict[str, Any] = None, 145 | *, 146 | text_as_bytes: bool = False, 147 | ) -> list[dict[str, Any]]: ... 148 | 149 | def fetchall( 150 | conn: Connection, 151 | query: str | sql.Composed, 152 | args: None | Sequence[Any] | dict[str, Any] = None, 153 | *, 154 | text_as_bytes: bool = False, 155 | mkrow: Callable[..., Row] | None = None, 156 | ) -> list[dict[str, Any]] | list[Row]: 157 | with cursor(conn, mkrow, text_as_bytes) as cur: 158 | return cur.execute(query, args, prepare=True).fetchall() 159 | 160 | def decode(value: bytes, pgenc: bytes, *, errors: str) -> str: 161 | """Decode 'value' with PostgreSQL encoding 'pgenc' converted to Python encoding 162 | name if available. 163 | """ 164 | try: 165 | pyenc = pg2pyenc(pgenc) 166 | except NotSupportedError: 167 | pyenc = "utf-8" 168 | return value.decode(pyenc, errors=errors) 169 | 170 | def needs_password(exc: OperationalError) -> bool: 171 | assert exc.pgconn is not None 172 | return exc.pgconn.needs_password 173 | 174 | except ImportError: 175 | import codecs 176 | 177 | import psycopg2 178 | import psycopg2.extensions 179 | from psycopg2 import sql as sql # type: ignore[no-redef] 180 | 181 | # isort: off 182 | from psycopg2.errors import FeatureNotSupported as FeatureNotSupported # type: ignore[assignment] 183 | from psycopg2.errors import InsufficientPrivilege as InsufficientPrivilege # type: ignore[assignment] 184 | from psycopg2.errors import InterfaceError as InterfaceError # type: ignore[assignment] 185 | from psycopg2.errors import InvalidPassword as InvalidPassword # type: ignore[assignment] 186 | from psycopg2.errors import OperationalError as OperationalError # type: ignore[assignment] 187 | from psycopg2.errors import ProgrammingError as ProgrammingError # type: ignore[assignment] 188 | from psycopg2.errors import QueryCanceled as QueryCanceled # type: ignore[assignment] 189 | 190 | # isort: on 191 | from psycopg2.extensions import connection as Connection # type: ignore[assignment] 192 | from psycopg2.extras import DictCursor 193 | 194 | __version__ = psycopg2.__version__ # type: ignore[attr-defined] 195 | 196 | def connect(dsn: str = "", **kwargs: Any) -> Connection: 197 | try: 198 | kwargs.setdefault("database", kwargs.pop("dbname")) 199 | except KeyError: 200 | pass 201 | conn = psycopg2.connect(dsn, cursor_factory=DictCursor, **kwargs) 202 | conn.autocommit = True 203 | return conn # type: ignore[no-any-return] 204 | 205 | def server_version(conn: Connection) -> int: 206 | return conn.server_version # type: ignore[attr-defined, no-any-return] 207 | 208 | def connection_parameters(conn: Connection) -> dict[str, Any]: 209 | return conn.info.dsn_parameters # type: ignore[attr-defined, no-any-return] 210 | 211 | def execute( 212 | conn: Connection, 213 | query: str | sql.Composed, 214 | args: None | Sequence[Any] | dict[str, Any] = None, 215 | ) -> None: 216 | with conn.cursor() as cur: 217 | cur.execute(query, args) 218 | 219 | def fetchone( # type: ignore[no-redef] 220 | conn: Connection, 221 | query: str | sql.Composed, 222 | args: None | Sequence[Any] | dict[str, Any] = None, 223 | *, 224 | mkrow: Callable[..., Row] | None = None, 225 | text_as_bytes: bool = False, 226 | ) -> dict[str, Any] | Row: 227 | with conn.cursor() as cur: 228 | if text_as_bytes: 229 | psycopg2.extensions.register_type(psycopg2.extensions.BYTES, cur) # type: ignore[arg-type] 230 | cur.execute(query, args) 231 | row = cur.fetchone() 232 | assert row is not None 233 | if mkrow is not None: 234 | return mkrow(**row) 235 | return row 236 | 237 | def fetchall( # type: ignore[no-redef] 238 | conn: Connection, 239 | query: str | sql.Composed, 240 | args: None | Sequence[Any] | dict[str, Any] = None, 241 | *, 242 | mkrow: Callable[..., Row] | None = None, 243 | text_as_bytes: bool = False, 244 | ) -> list[dict[str, Any]] | list[Row]: 245 | with conn.cursor() as cur: 246 | if text_as_bytes: 247 | psycopg2.extensions.register_type(psycopg2.extensions.BYTES, cur) # type: ignore[arg-type] 248 | cur.execute(query, args) 249 | rows = cur.fetchall() 250 | if mkrow is not None: 251 | return [mkrow(**row) for row in rows] 252 | return rows 253 | 254 | def decode(value: bytes, pgenc: bytes, *, errors: str) -> str: 255 | """Decode 'value' with PostgreSQL encoding 'pgenc' converted to Python encoding 256 | name if available. 257 | """ 258 | try: 259 | pyenc = codecs.lookup(pgenc.decode()).name 260 | except LookupError: 261 | pyenc = "utf-8" 262 | return value.decode(pyenc, errors=errors) 263 | 264 | def needs_password(exc: OperationalError) -> bool: 265 | if isinstance(exc, InvalidPassword): 266 | return True 267 | msg = str(exc) 268 | return ( 269 | msg.startswith("FATAL: password authentication failed for user") 270 | or "fe_sendauth: no password supplied" in msg 271 | ) 272 | 273 | 274 | __all__ = [ 275 | "__version__", 276 | "Connection", 277 | "FeatureNotSupported", 278 | "InterfaceError", 279 | "InvalidPassword", 280 | "InsufficientPrivilege", 281 | "OperationalError", 282 | "ProgrammingError", 283 | "QueryCanceled", 284 | "connect", 285 | "connection_parameters", 286 | "decode", 287 | "execute", 288 | "fetchall", 289 | "fetchone", 290 | "server_version", 291 | "sql", 292 | ] 293 | -------------------------------------------------------------------------------- /pgactivity/cli.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import argparse 4 | import logging 5 | import os 6 | import socket 7 | import sys 8 | import time 9 | from io import StringIO 10 | from typing import Any 11 | 12 | from blessed import Terminal 13 | 14 | from . import __version__, data, types, ui 15 | from .config import Configuration, ConfigurationError 16 | from .pg import OperationalError 17 | 18 | 19 | def configure_logger(debug_file: str | None = None) -> StringIO: 20 | logger = logging.getLogger("pgactivity") 21 | logger.setLevel(logging.DEBUG) 22 | 23 | # The steamhandler is used to print all messages at exit. 24 | memory_string = StringIO() 25 | c_handler = logging.StreamHandler(memory_string) 26 | c_handler.setLevel(logging.INFO) 27 | c_handler.name = "stream_handler" 28 | 29 | c_format = logging.Formatter("%(levelname)s - %(message)s") 30 | c_handler.setFormatter(c_format) 31 | 32 | logger.addHandler(c_handler) 33 | 34 | if debug_file is not None: 35 | f_handler = logging.FileHandler(debug_file) 36 | f_handler.setLevel(logging.DEBUG) 37 | 38 | f_format = logging.Formatter( 39 | "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 40 | ) 41 | f_handler.setFormatter(f_format) 42 | 43 | logger.addHandler(f_handler) 44 | 45 | return memory_string 46 | 47 | 48 | def flag(p: Any, spec: str, *, dest: str, feature: str) -> None: 49 | assert not spec.startswith("--no-") and spec.startswith("--"), spec 50 | p.add_argument( 51 | spec, 52 | dest=dest, 53 | help=f"Enable/disable {feature}.", 54 | action=argparse.BooleanOptionalAction, 55 | default=None, 56 | ) 57 | 58 | 59 | def get_parser(prog: str | None = None) -> argparse.ArgumentParser: 60 | parser = argparse.ArgumentParser( 61 | prog=prog, 62 | usage="%(prog)s [options] [connection string]", 63 | description=( 64 | "htop like application for PostgreSQL server activity monitoring." 65 | ), 66 | epilog=( 67 | "The connection string can be in the form of a list of " 68 | "Key/Value parameters or an URI as described in the PostgreSQL documentation. " 69 | "The parsing is delegated to the libpq: different versions of the client library " 70 | "may support different formats or parameters (for example, connection URIs are " 71 | "only supported from libpq 9.2)." 72 | ), 73 | add_help=False, 74 | ) 75 | 76 | group = parser.add_argument_group( 77 | "Configuration", 78 | ) 79 | group.add_argument( 80 | "-P", 81 | "--profile", 82 | help=( 83 | "Configuration profile matching a PROFILE.conf file in " 84 | "${XDG_CONFIG_HOME:~/.config}/pg_activity/ or /etc/pg_activity/, " 85 | "or a built-in profile." 86 | ), 87 | ) 88 | 89 | group = parser.add_argument_group( 90 | "Options", 91 | ) 92 | group.add_argument( 93 | "--blocksize", 94 | dest="blocksize", 95 | help="Filesystem blocksize (default: %(default)s).", 96 | metavar="BLOCKSIZE", 97 | type=int, 98 | default=4096, 99 | ) 100 | group.add_argument( 101 | "--rds", 102 | dest="rds", 103 | action="store_true", 104 | help="Enable support for AWS RDS (implies --no-tempfiles and filters out the rdsadmin database from space calculation).", 105 | default=False, 106 | ) 107 | group.add_argument( 108 | "--output", 109 | dest="output", 110 | help="Store running queries as CSV.", 111 | metavar="FILEPATH", 112 | default=None, 113 | ) 114 | flag(group, "--db-size", dest="dbsize", feature="total size of DB") 115 | flag(group, "--tempfiles", dest="tempfiles", feature="tempfile count and size") 116 | flag(group, "--walreceiver", dest="walreceiver", feature="walreceiver checks") 117 | group.add_argument( 118 | "-w", 119 | "--wrap-query", 120 | dest="wrap_query", 121 | action="store_true", 122 | help="Wrap query column instead of truncating.", 123 | default=False, 124 | ) 125 | group.add_argument( 126 | "--duration-mode", 127 | dest="durationmode", 128 | help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND.", 129 | metavar="DURATION_MODE", 130 | choices=["1", "2", "3"], 131 | default="1", 132 | ) 133 | group.add_argument( 134 | "--min-duration", 135 | dest="minduration", 136 | help="Don't display queries with smaller than specified duration (in seconds).", 137 | metavar="SECONDS", 138 | type=float, 139 | default=0, 140 | ) 141 | group.add_argument( 142 | "--filter", 143 | dest="filters", 144 | help=( 145 | "Filter activities with a (case insensitive) regular expression applied on selected fields. " 146 | "Known fields are: dbname." 147 | ), 148 | action="append", 149 | metavar="FIELD:REGEX", 150 | default=[], 151 | ) 152 | group.add_argument( 153 | "--debug-file", 154 | dest="debug_file", 155 | metavar="DEBUG_FILE", 156 | help="Enable debug and write it to DEBUG_FILE.", 157 | default=None, 158 | ) 159 | group.add_argument( 160 | "--version", 161 | help="show program's version number and exit.", 162 | action="version", 163 | version=f"%(prog)s {__version__}", 164 | ) 165 | group.add_argument( 166 | "--help", 167 | dest="help", 168 | action="store_true", 169 | help="Show this help message and exit.", 170 | default=False, 171 | ) 172 | 173 | group = parser.add_argument_group( 174 | "Connection Options", 175 | ) 176 | # Connection string 177 | group.add_argument( 178 | "connection_string", 179 | help=( 180 | "A valid connection string to the database, e.g.: " 181 | "'host=HOSTNAME port=PORT user=USER dbname=DBNAME'." 182 | ), 183 | nargs="?", 184 | default="", 185 | metavar="connection string", 186 | ) 187 | # -h / --host 188 | group.add_argument( 189 | "-h", 190 | "--host", 191 | dest="host", 192 | help="Database server host or socket directory.", 193 | metavar="HOSTNAME", 194 | ) 195 | # -p / --port 196 | group.add_argument( 197 | "-p", 198 | "--port", 199 | dest="port", 200 | help="Database server port.", 201 | metavar="PORT", 202 | ) 203 | # -U / --username 204 | group.add_argument( 205 | "-U", 206 | "--username", 207 | dest="username", 208 | help="Database user name.", 209 | metavar="USERNAME", 210 | ) 211 | # -d / --dbname 212 | group.add_argument( 213 | "-d", 214 | "--dbname", 215 | dest="dbname", 216 | help="Database name to connect to.", 217 | metavar="DBNAME", 218 | ) 219 | 220 | group = parser.add_argument_group( 221 | "Process table display options", 222 | "These options may be used hide some columns from the processes table.", 223 | ) 224 | flag(group, "--pid", dest="pid", feature="PID") 225 | flag(group, "--xmin", dest="xmin", feature="XMIN") 226 | flag(group, "--database", dest="database", feature="DATABASE") 227 | flag(group, "--user", dest="user", feature="USER") 228 | flag(group, "--client", dest="client", feature="CLIENT") 229 | flag(group, "--cpu", dest="cpu", feature="CPU%%") 230 | flag(group, "--mem", dest="mem", feature="MEM%%") 231 | flag(group, "--read", dest="read", feature="READ/s") 232 | flag(group, "--write", dest="write", feature="WRITE/s") 233 | flag(group, "--time", dest="time", feature="TIME+") 234 | flag(group, "--wait", dest="wait", feature="W") 235 | flag(group, "--app-name", dest="appname", feature="APP") 236 | 237 | group = parser.add_argument_group("Header display options") 238 | group.add_argument( 239 | "--no-inst-info", 240 | dest="header_show_instance", 241 | action="store_false", 242 | help="Hide instance information.", 243 | default=None, 244 | ) 245 | group.add_argument( 246 | "--no-sys-info", 247 | dest="header_show_system", 248 | action="store_false", 249 | help="Hide system information.", 250 | default=None, 251 | ) 252 | group.add_argument( 253 | "--no-proc-info", 254 | dest="header_show_workers", 255 | action="store_false", 256 | help="Hide workers process information.", 257 | default=None, 258 | ) 259 | 260 | group = parser.add_argument_group("Other display options") 261 | group.add_argument( 262 | "--hide-queries-in-logs", 263 | dest="hide_queries_in_logs", 264 | action="store_true", 265 | help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity.", 266 | default=False, 267 | ) 268 | group.add_argument( 269 | "--refresh", 270 | dest="refresh", 271 | help="Refresh rate. Values: %(choices)s (default: %(default)d).", 272 | metavar="REFRESH", 273 | choices=[0.5, 1, 2, 3, 4, 5], 274 | type=float, 275 | default=2, 276 | ) 277 | 278 | return parser 279 | 280 | 281 | def main() -> None: 282 | if os.name != "posix": 283 | sys.exit("FATAL: Platform not supported.") 284 | 285 | parser = get_parser() 286 | args = parser.parse_args() 287 | memory_stream = configure_logger(args.debug_file) 288 | 289 | if args.help: 290 | parser.print_help() 291 | sys.exit(1) 292 | 293 | try: 294 | filters = types.Filters.from_options(args.filters) 295 | except ValueError as e: 296 | parser.error(str(e)) 297 | if args.rds: 298 | args.notempfile = True 299 | 300 | try: 301 | cfg = Configuration.lookup(args.profile) 302 | except (ConfigurationError, FileNotFoundError) as e: 303 | parser.error(str(e)) 304 | 305 | try: 306 | dataobj = data.pg_connect(args, min_duration=args.minduration, filters=filters) 307 | except OperationalError as e: 308 | parser.exit(status=1, message=f"could not connect to PostgreSQL: {e}") 309 | hostname = socket.gethostname() 310 | conninfo = dataobj.pg_conn.info 311 | host = types.Host( 312 | hostname, 313 | conninfo.user, 314 | conninfo.host, 315 | int(conninfo.port), 316 | conninfo.dbname, 317 | ) 318 | 319 | term = Terminal() 320 | while True: 321 | try: 322 | ui.main(term, cfg, dataobj, host, args) 323 | except ConfigurationError as e: 324 | parser.exit(1, f"error: {e}") 325 | except OperationalError: 326 | while True: 327 | print(term.clear + term.home, end="") 328 | print("Connection to PostgreSQL lost, trying to reconnect...") 329 | try: 330 | time.sleep(5) 331 | except KeyboardInterrupt: 332 | sys.exit(1) 333 | newdataobj = dataobj.try_reconnect() 334 | if newdataobj is not None: 335 | dataobj = newdataobj 336 | break 337 | except KeyboardInterrupt: 338 | sys.exit(0) 339 | else: 340 | break 341 | finally: 342 | print(memory_stream.getvalue(), file=sys.stderr) 343 | -------------------------------------------------------------------------------- /pgactivity/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import base64 4 | import functools 5 | import re 6 | import sys 7 | from collections.abc import Iterable, Mapping 8 | from datetime import datetime, timedelta, timezone 9 | from typing import IO, Any 10 | 11 | import attr 12 | import humanize 13 | 14 | naturalsize = functools.partial(humanize.naturalsize, gnu=True, format="%.2f") 15 | try: 16 | precisedelta = humanize.precisedelta 17 | except AttributeError: # humanize < 2.6 18 | 19 | def naturaltimedelta(d: timedelta) -> str: 20 | """Render a timedelta with seconds truncated. 21 | 22 | >>> d = timedelta(days=5, seconds=15182, microseconds=129198) 23 | >>> naturaltimedelta(d) 24 | '5 days, 4:13:00' 25 | >>> str(d) 26 | '5 days, 4:13:02.129198' 27 | """ 28 | d = timedelta(days=d.days, seconds=d.seconds // 60 * 60) 29 | return str(d) 30 | 31 | else: 32 | naturaltimedelta = functools.partial( 33 | humanize.precisedelta, 34 | minimum_unit="minutes", 35 | format="%.0f", 36 | suppress=["months", "years"], 37 | ) 38 | 39 | 40 | @attr.s(auto_attribs=True, frozen=True, slots=True) 41 | class MessagePile: 42 | """A pile of message. 43 | 44 | >>> p = MessagePile(2) 45 | >>> p.send("hello") 46 | >>> p.get() 47 | 'hello' 48 | >>> p.send("world") 49 | >>> p.get() 50 | 'world' 51 | >>> p.get() 52 | 'world' 53 | >>> p.get() 54 | """ 55 | 56 | n: int 57 | messages: list[str] = attr.ib(default=attr.Factory(list), init=False) 58 | 59 | def send(self, message: str) -> None: 60 | self.messages[:] = [message] * self.n 61 | 62 | def get(self) -> str | None: 63 | if self.messages: 64 | return self.messages.pop() 65 | return None 66 | 67 | 68 | def yn(value: bool) -> str: 69 | """Return 'Y' or 'N' for a boolean value. 70 | 71 | >>> yn(True) 72 | 'Y' 73 | >>> yn(False) 74 | 'N' 75 | """ 76 | return "Y" if value else "N" 77 | 78 | 79 | def clean_str(string: str) -> str: 80 | r""" 81 | Strip and replace some special characters. 82 | 83 | >>> clean_str("\n") 84 | '' 85 | >>> clean_str("\n a a b b c \n\t\n c\v\n") 86 | 'a a b b c c' 87 | """ 88 | msg = str(string) 89 | msg = msg.replace("\n", " ") 90 | msg = re.sub(r"\s+", r" ", msg) 91 | msg = re.sub(r"^\s", r"", msg) 92 | msg = re.sub(r"\s$", r"", msg) 93 | return msg 94 | 95 | 96 | def ellipsis(v: str, width: int) -> str: 97 | """Shorten a string to specified width with '...' in the middle. 98 | 99 | >>> ellipsis("loooooooooog", 7) 100 | 'lo...og' 101 | >>> ellipsis("loooooooooog", 6) 102 | 'lo...g' 103 | >>> ellipsis("short", 10) 104 | 'short' 105 | """ 106 | lv = len(v) 107 | if lv <= width: 108 | return v 109 | assert width >= 5 110 | wl = (width - 3) // 2 111 | return v[: wl + 1] + "..." + v[-wl:] 112 | 113 | 114 | def get_duration(duration: float | None) -> float: 115 | """Return 0 if the given duration is negative else, return the duration. 116 | 117 | >>> get_duration(None) 118 | 0 119 | >>> get_duration(-10) 120 | 0 121 | >>> get_duration(12) 122 | 12.0 123 | """ 124 | if duration is None or float(duration) < 0: 125 | return 0 126 | return float(duration) 127 | 128 | 129 | @functools.lru_cache(maxsize=2) 130 | def format_duration(duration: float | None) -> tuple[str, str]: 131 | """Return a string from 'duration' value along with the color for rendering. 132 | 133 | >>> format_duration(None) 134 | ('N/A ', 'green') 135 | >>> format_duration(-0.000062) 136 | ('0.000000', 'green') 137 | >>> format_duration(0.1) 138 | ('0.100000', 'green') 139 | >>> format_duration(1.2) 140 | ('00:01.20', 'yellow') 141 | >>> format_duration(12345) 142 | ('205:45.00', 'red') 143 | >>> format_duration(60001) 144 | ('16 h', 'red') 145 | """ 146 | if duration is None: 147 | return "N/A".ljust(8), "green" 148 | 149 | if duration < 1: 150 | if duration < 0: 151 | duration = 0 152 | ctime = f"{duration:.6f}" 153 | color = "green" 154 | elif duration < 60000: 155 | if duration < 3: 156 | color = "yellow" 157 | else: 158 | color = "red" 159 | duration_d = timedelta(seconds=float(duration)) 160 | mic = "%.6d" % duration_d.microseconds 161 | ctime = "{}:{}.{}".format( 162 | str(duration_d.seconds // 60).zfill(2), 163 | str(duration_d.seconds % 60).zfill(2), 164 | mic[:2], 165 | ) 166 | else: 167 | ctime = "%s h" % str(int(duration / 3600)) 168 | color = "red" 169 | 170 | return ctime, color 171 | 172 | 173 | def wait_status(value: None | bool | str) -> str: 174 | """Display the waiting status of query. 175 | 176 | >>> wait_status(None) 177 | '' 178 | >>> wait_status(False) 179 | 'N' 180 | >>> wait_status(True) 181 | 'Y' 182 | >>> wait_status("MultiXactTruncationLock") 183 | 'MultiXactTruncationLock' 184 | """ 185 | if isinstance(value, str): 186 | return value 187 | if isinstance(value, bool): 188 | return yn(value) 189 | return "" 190 | 191 | 192 | def short_state(state: str) -> str: 193 | """Return a short version of query state. 194 | 195 | >>> short_state("active") 196 | 'active' 197 | >>> short_state("idle in transaction") 198 | 'idle in trans' 199 | >>> short_state("idle in transaction (aborted)") 200 | 'idle in trans (a)' 201 | """ 202 | return { 203 | "idle in transaction": "idle in trans", 204 | "idle in transaction (aborted)": "idle in trans (a)", 205 | }.get(state, state) 206 | 207 | 208 | def osc52_copy(text: str) -> None: 209 | assert sys.__stderr__ is not None 210 | buffer = sys.__stderr__.buffer 211 | buffer.write(b";".join([b"\033]52", b"c", base64.b64encode(text.encode())]) + b"\a") 212 | buffer.flush() 213 | 214 | 215 | def csv_write( 216 | fobj: IO[str], 217 | procs: Iterable[Mapping[str, Any]], 218 | *, 219 | delimiter: str = ";", 220 | ) -> None: 221 | """Store process list into CSV file. 222 | 223 | >>> processes = [ 224 | ... {'pid': 25199, 'xmin': 1234, 'application_name': '', 'database': 'pgbench', 'user': None, 225 | ... 'client': 'local', 'cpu': 0.0, 'mem': 0.6504979545924837, 226 | ... 'read': 0.0, 'write': 0.0, 'state': 'active', 227 | ... 'query': 'autovacuum: VACUUM ANALYZE public.pgbench_tellers', 228 | ... 'duration': 0.348789, 'wait': False, 229 | ... 'io_wait': False, 'is_parallel_worker': False}, 230 | ... {'pid': 25068, 'xmin': 2345, 'application_name': 'pgbench', 'database': None, 231 | ... 'user': 'postgres', 'client': 'local', 'cpu': 0.0, 'mem': 2.4694780629380646, 232 | ... 'read': 278536.76590087387, 'write': 835610.2977026217, 233 | ... 'state': 'idle in transaction', 234 | ... 'query': 'INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (625, 87, 4368910, -341, CURRENT_TIMESTAMP);', 235 | ... 'duration': 0.000105, 'wait': False, 'io_wait': False, 236 | ... 'is_parallel_worker': False}, 237 | ... {'pid': 25379, 'xmin': 3456, 'application_name': 'pgbench', 'database': 'pgbench', 238 | ... 'user': 'postgres', 'client': 'local', 'state': 'active', 239 | ... 'query': 'UPDATE pgbench_branches SET bbalance = bbalance + -49 WHERE bid = 73;', 240 | ... 'duration': 0, 'wait': False}, 241 | ... {'pid': 25392, 'xmin': 4567, 'application_name': 'pgbench', 'database': 'pgbench', 242 | ... 'user': 'postgres', 'client': 'local', 'state': 'active', 243 | ... 'query': 'BEGIN;', 'duration': 0, 'wait': False} 244 | ... ] 245 | >>> import tempfile 246 | >>> with tempfile.NamedTemporaryFile(mode='w+') as f: 247 | ... csv_write(f, processes[:2]) 248 | ... csv_write(f, processes[2:]) 249 | ... _ = f.seek(0) 250 | ... content = f.read() 251 | >>> print(content, end="") # doctest: +ELLIPSIS 252 | datetimeutc;pid;xmin;database;appname;user;client;cpu;memory;read;write;duration;wait;io_wait;state;query 253 | "...-...-...T...Z";"25199";"1234";"pgbench";"";"None";"local";"0.0";"0.6504979545924837";"0.0";"0.0";"0.348789";"N";"N";"active";"autovacuum: VACUUM ANALYZE public.pgbench_tellers" 254 | "...-...-...T...Z";"25068";"2345";"";"pgbench";"postgres";"local";"0.0";"2.4694780629380646";"278536.76590087387";"835610.2977026217";"0.000105";"N";"N";"idle in transaction";"INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (625, 87, 4368910, -341, CURRENT_TIMESTAMP);" 255 | "...-...-...T...Z";"25379";"3456";"pgbench";"pgbench";"postgres";"local";"N/A";"N/A";"N/A";"N/A";"0";"N";"N/A";"active";"UPDATE pgbench_branches SET bbalance = bbalance + -49 WHERE bid = 73;" 256 | "...-...-...T...Z";"25392";"4567";"pgbench";"pgbench";"postgres";"local";"N/A";"N/A";"N/A";"N/A";"0";"N";"N/A";"active";"BEGIN;" 257 | """ 258 | 259 | def clean_str_csv(s: str) -> str: 260 | return clean_str(s).replace('"', '\\"') 261 | 262 | if fobj.tell() == 0: 263 | # First line then write CSV header 264 | fobj.write( 265 | delimiter.join( 266 | [ 267 | "datetimeutc", 268 | "pid", 269 | "xmin", 270 | "database", 271 | "appname", 272 | "user", 273 | "client", 274 | "cpu", 275 | "memory", 276 | "read", 277 | "write", 278 | "duration", 279 | "wait", 280 | "io_wait", 281 | "state", 282 | "query", 283 | ] 284 | ) 285 | + "\n" 286 | ) 287 | 288 | def yn_na(value: bool | None) -> str: 289 | if value is None: 290 | return "N/A" 291 | return yn(value) 292 | 293 | for p in procs: 294 | dt = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") 295 | pid = p.get("pid", "N/A") 296 | xmin = p.get("xmin", "N/A") 297 | database = p.get("database", "N/A") or "" 298 | appname = p.get("application_name", "N/A") 299 | user = p.get("user", "N/A") 300 | client = p.get("client", "N/A") 301 | cpu = p.get("cpu", "N/A") 302 | mem = p.get("mem", "N/A") 303 | read = p.get("read", "N/A") 304 | write = p.get("write", "N/A") 305 | duration = p.get("duration", "N/A") 306 | wait = yn_na(p.get("wait")) 307 | io_wait = yn_na(p.get("io_wait")) 308 | state = p.get("state", "N/A") 309 | query = clean_str_csv(p.get("query", "N/A")) 310 | fobj.write( 311 | delimiter.join( 312 | [ 313 | f'"{dt}"', 314 | f'"{pid}"', 315 | f'"{xmin}"', 316 | f'"{database}"', 317 | f'"{appname}"', 318 | f'"{user}"', 319 | f'"{client}"', 320 | f'"{cpu}"', 321 | f'"{mem}"', 322 | f'"{read}"', 323 | f'"{write}"', 324 | f'"{duration}"', 325 | f'"{wait}"', 326 | f'"{io_wait}"', 327 | f'"{state}"', 328 | f'"{query}"', 329 | ] 330 | ) 331 | + "\n" 332 | ) 333 | -------------------------------------------------------------------------------- /pgactivity/ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time 4 | from argparse import Namespace 5 | from functools import partial 6 | from typing import Any, cast 7 | 8 | import attr 9 | from blessed import Terminal 10 | 11 | from . import __version__, activities, handlers, keys, types, utils, views, widgets 12 | from .config import Configuration, Flag 13 | from .data import Data 14 | 15 | 16 | def main( 17 | term: Terminal, 18 | config: Configuration | None, 19 | data: Data, 20 | host: types.Host, 21 | options: Namespace, 22 | *, 23 | render_header: bool = True, 24 | render_footer: bool = True, 25 | width: int | None = None, 26 | wait_on_actions: float | None = None, 27 | ) -> None: 28 | fs_blocksize = options.blocksize 29 | 30 | is_local = data.pg_is_local() and data.pg_is_local_access() 31 | 32 | skip_db_size = not options.dbsize 33 | server_information = data.pg_get_server_information( 34 | prev_server_info=None, 35 | using_rds=options.rds, 36 | skip_db_size=skip_db_size, 37 | skip_tempfile=not options.tempfiles, 38 | skip_walreceiver=not options.walreceiver, 39 | ) 40 | 41 | flag = Flag.load(config, is_local=is_local, **vars(options)) 42 | ui = types.UI.make( 43 | header=types.UIHeader.make( 44 | config.header() if config else None, 45 | show_instance=options.header_show_instance, 46 | show_system=options.header_show_system, 47 | show_workers=options.header_show_workers, 48 | ), 49 | config=config, 50 | flag=flag, 51 | refresh_time=options.refresh, 52 | min_duration=options.minduration, 53 | duration_mode=int(options.durationmode), 54 | wrap_query=options.wrap_query, 55 | max_db_length=min(max(server_information.max_dbname_length, 8), 16), 56 | filters=data.filters, 57 | ) 58 | 59 | key, in_help = None, False 60 | sys_procs: dict[int, types.SystemProcess] = {} 61 | pg_procs = types.SelectableProcesses([]) 62 | activity_stats: types.ActivityStats 63 | 64 | msg_pile = utils.MessagePile(2) 65 | 66 | with term.fullscreen(), term.cbreak(), term.hidden_cursor(): 67 | while True: 68 | if key == keys.HELP: 69 | in_help = True 70 | elif in_help and key is not None: 71 | in_help, key = False, None 72 | print(term.clear + term.home, end="") 73 | elif key == keys.EXIT: 74 | break 75 | elif not ui.interactive() and key == keys.SPACE: 76 | ui.toggle_pause() 77 | elif not options.dbsize and key == keys.REFRESH_DB_SIZE: 78 | skip_db_size = False 79 | elif key is not None: 80 | if keys.is_process_next(key): 81 | if pg_procs.focus_next(): 82 | ui.start_interactive() 83 | elif keys.is_process_prev(key): 84 | if pg_procs.focus_prev(): 85 | ui.start_interactive() 86 | elif keys.is_process_nextpage(key): 87 | if pg_procs.focus_next(term.height // 3): 88 | ui.start_interactive() 89 | elif keys.is_process_prevpage(key): 90 | if pg_procs.focus_prev(term.height // 3): 91 | ui.start_interactive() 92 | elif keys.is_process_first(key): 93 | if pg_procs.focus_first(): 94 | ui.start_interactive() 95 | elif keys.is_process_last(key): 96 | if pg_procs.focus_last(): 97 | ui.start_interactive() 98 | elif key == keys.SPACE: 99 | pg_procs.toggle_pin_focused() 100 | elif key == keys.COPY_TO_CLIPBOARD: 101 | msg = pg_procs.copy_focused_query_to_clipboard() 102 | msg_pile.send(msg) 103 | elif key.name == keys.CANCEL_SELECTION: 104 | pg_procs.reset() 105 | ui.end_interactive() 106 | elif keys.is_toggle_header_system(key): 107 | ui.header.toggle_system() 108 | elif keys.is_toggle_header_instance(key): 109 | ui.header.toggle_instance() 110 | elif keys.is_toggle_header_workers(key): 111 | ui.header.toggle_workers() 112 | elif pg_procs.selected and key in ( 113 | keys.PROCESS_CANCEL, 114 | keys.PROCESS_KILL, 115 | ): 116 | action, color = { 117 | keys.PROCESS_CANCEL: ("cancel", "yellow"), 118 | keys.PROCESS_KILL: ("terminate", "red"), 119 | }[key] 120 | action_formatter = getattr(term, color) 121 | pids = pg_procs.selected 122 | if len(pids) > 1: 123 | ptitle = f"processes {', '.join(str(p) for p in pids)}" 124 | else: 125 | ptitle = f"process {pids[0]}" 126 | with term.location(x=0, y=term.height // 3): 127 | print( 128 | widgets.boxed( 129 | term, 130 | f"Confirm {action_formatter(action)} action on {ptitle}? (y/n)", 131 | border_color=color, 132 | center=True, 133 | width=width, 134 | ), 135 | end="", 136 | ) 137 | confirm_key = term.inkey(timeout=None) 138 | if confirm_key.lower() == "y": 139 | if action == "cancel": 140 | for pid in pids: 141 | data.pg_cancel_backend(pid) 142 | msg_pile.send( 143 | action_formatter(f"{ptitle.capitalize()} cancelled") 144 | ) 145 | elif action == "terminate": 146 | for pid in pids: 147 | data.pg_terminate_backend(pid) 148 | msg_pile.send( 149 | action_formatter(f"{ptitle.capitalize()} terminated") 150 | ) 151 | pg_procs.reset() 152 | ui.end_interactive() 153 | if wait_on_actions: 154 | # Used in tests. 155 | time.sleep(wait_on_actions) 156 | else: 157 | pg_procs.reset() 158 | ui.end_interactive() 159 | changes: dict[str, Any] = { 160 | "duration_mode": handlers.duration_mode(key, ui.duration_mode), 161 | "wrap_query": handlers.wrap_query(key, ui.wrap_query), 162 | } 163 | if key in (keys.REFRESH_TIME_INCREASE, keys.REFRESH_TIME_DECREASE): 164 | changes["refresh_time"] = handlers.refresh_time( 165 | key, ui.refresh_time 166 | ) 167 | query_mode = handlers.query_mode(key) 168 | if query_mode is not None: 169 | changes["query_mode"] = query_mode 170 | else: 171 | query_mode = ui.query_mode 172 | sort_key = handlers.sort_key_for(key, query_mode, flag) 173 | if sort_key is not None: 174 | changes["sort_key"] = sort_key 175 | ui.evolve(**changes) 176 | 177 | if in_help: 178 | # Only draw help screen once. 179 | if key is not None: 180 | print(term.clear + term.home, end="") 181 | views.help( 182 | term, 183 | __version__, 184 | is_local, 185 | lines_counter=views.line_counter(term.height), 186 | ) 187 | 188 | else: 189 | if not ui.in_pause and not ui.interactive(): 190 | if not options.dbsize and not skip_db_size: 191 | skip_db_size = True 192 | 193 | server_information = data.pg_get_server_information( 194 | prev_server_info=server_information, 195 | using_rds=options.rds, 196 | skip_db_size=skip_db_size, 197 | skip_tempfile=not options.tempfiles, 198 | skip_walreceiver=not options.walreceiver, 199 | ) 200 | memory, swap, load = activities.mem_swap_load() 201 | system_info = types.SystemInfo.default( 202 | memory=memory, swap=swap, load=load 203 | ) 204 | 205 | if is_local: 206 | memory, swap, load = activities.mem_swap_load() 207 | system_info = attr.evolve( 208 | system_info, 209 | memory=memory, 210 | swap=swap, 211 | load=load, 212 | ) 213 | 214 | if ui.query_mode == types.QueryMode.activities: 215 | pg_procs.set_items(data.pg_get_activities(ui.duration_mode)) 216 | if is_local: 217 | # TODO: Use this logic in waiting and blocking cases. 218 | local_pg_procs, io_read, io_write = activities.ps_complete( 219 | cast(list[types.RunningProcess], pg_procs.items), 220 | sys_procs, 221 | fs_blocksize, 222 | ) 223 | system_info = attr.evolve( 224 | system_info, 225 | io_read=io_read, 226 | io_write=io_write, 227 | max_iops=activities.update_max_iops( 228 | system_info.max_iops, 229 | io_read.count, 230 | io_write.count, 231 | ), 232 | ) 233 | pg_procs.set_items(local_pg_procs) 234 | 235 | else: 236 | if ui.query_mode == types.QueryMode.blocking: 237 | pg_procs.set_items(data.pg_get_blocking(ui.duration_mode)) 238 | elif ui.query_mode == types.QueryMode.waiting: 239 | pg_procs.set_items(data.pg_get_waiting(ui.duration_mode)) 240 | else: 241 | assert False # help type checking 242 | 243 | activity_stats = (pg_procs, system_info) if is_local else pg_procs # type: ignore[assignment] 244 | 245 | if options.output is not None: 246 | custom_asdict = partial(attr.asdict, recurse=True) 247 | with open(options.output, "a") as f: 248 | utils.csv_write(f, map(custom_asdict, pg_procs.items)) 249 | 250 | views.screen( 251 | term, 252 | ui, 253 | host=host, 254 | pg_version=data.pg_version, 255 | server_information=server_information, 256 | activity_stats=activity_stats, 257 | message=msg_pile.get(), 258 | render_header=render_header, 259 | render_footer=render_footer, 260 | width=width, 261 | ) 262 | 263 | if ui.interactive(): 264 | if not pg_procs.pinned: 265 | ui.tick_interactive() 266 | elif pg_procs.selected: 267 | pg_procs.reset() 268 | 269 | key = term.inkey(timeout=ui.refresh_time) or None 270 | -------------------------------------------------------------------------------- /pgactivity/activities.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import builtins 4 | import os 5 | import time 6 | from collections.abc import Sequence 7 | from typing import TypeVar 8 | from warnings import catch_warnings, simplefilter 9 | 10 | import attr 11 | import psutil 12 | 13 | from .types import ( 14 | BlockingProcess, 15 | IOCounter, 16 | LoadAverage, 17 | LocalRunningProcess, 18 | MemoryInfo, 19 | RunningProcess, 20 | SortKey, 21 | SwapInfo, 22 | SystemProcess, 23 | WaitingProcess, 24 | ) 25 | 26 | 27 | def sys_get_proc(pid: int) -> SystemProcess | None: 28 | """Return a SystemProcess instance matching given pid or None if access with psutil 29 | is not possible. 30 | """ 31 | try: 32 | psproc = psutil.Process(pid) 33 | meminfo = psproc.memory_info() 34 | mem_percent = psproc.memory_percent() 35 | cpu_percent = psproc.cpu_percent(interval=0) 36 | cpu_times = psproc.cpu_times() 37 | io_counters = psproc.io_counters() 38 | status_iow = str(psproc.status()) 39 | except (psutil.NoSuchProcess, psutil.AccessDenied): 40 | return None 41 | 42 | return SystemProcess( 43 | meminfo=meminfo, 44 | io_read=IOCounter(io_counters.read_count, io_counters.read_bytes), 45 | io_write=IOCounter(io_counters.write_count, io_counters.write_bytes), 46 | io_time=time.time(), 47 | mem_percent=mem_percent, 48 | cpu_percent=cpu_percent, 49 | cpu_times=cpu_times, 50 | read_delta=0, 51 | write_delta=0, 52 | io_wait=(status_iow == psutil.STATUS_DISK_SLEEP), 53 | psutil_proc=psproc, 54 | ) 55 | 56 | 57 | def ps_complete( 58 | pg_processes: Sequence[RunningProcess], 59 | processes: dict[int, SystemProcess], 60 | fs_blocksize: int, 61 | ) -> tuple[list[LocalRunningProcess], IOCounter, IOCounter]: 62 | """Transform the sequence of 'pg_processes' (RunningProcess) as LocalRunningProcess 63 | with local system information from the 'processes' map. Return LocalRunningProcess 64 | list, as well as read and write IO counters. 65 | 66 | The 'processes' map is updated in place. 67 | """ 68 | local_procs = [] 69 | read_bytes_delta = 0.0 70 | write_bytes_delta = 0.0 71 | read_count_delta = 0 72 | write_count_delta = 0 73 | n_io_time = time.time() 74 | for pg_proc in pg_processes: 75 | pid = pg_proc.pid 76 | new_proc = sys_get_proc(pid) 77 | if new_proc is None: 78 | continue 79 | try: 80 | # Getting information from the previous loop 81 | proc = processes[pid] 82 | except KeyError: 83 | # No previous information about this process 84 | proc = new_proc 85 | else: 86 | # Update old process with new information 87 | mem_percent = proc.mem_percent 88 | cpu_percent = proc.cpu_percent 89 | if proc.psutil_proc is not None: 90 | try: 91 | mem_percent = proc.psutil_proc.memory_percent() 92 | cpu_percent = proc.psutil_proc.cpu_percent(interval=0) 93 | except (psutil.NoSuchProcess, psutil.AccessDenied): 94 | pass 95 | proc = attr.evolve( 96 | proc, 97 | io_wait=new_proc.io_wait, 98 | read_delta=( 99 | (new_proc.io_read.bytes - proc.io_read.bytes) 100 | / (n_io_time - proc.io_time) 101 | ), 102 | write_delta=( 103 | (new_proc.io_write.bytes - proc.io_write.bytes) 104 | / (n_io_time - proc.io_time) 105 | ), 106 | io_read=new_proc.io_read, 107 | io_write=new_proc.io_write, 108 | io_time=n_io_time, 109 | mem_percent=mem_percent, 110 | cpu_percent=cpu_percent, 111 | ) 112 | 113 | # Global io counters 114 | read_bytes_delta += proc.read_delta 115 | write_bytes_delta += proc.write_delta 116 | 117 | processes[pid] = proc 118 | 119 | local_procs.append( 120 | LocalRunningProcess.from_process( 121 | pg_proc, 122 | cpu=proc.cpu_percent, 123 | mem=proc.mem_percent, 124 | read=proc.read_delta, 125 | write=proc.write_delta, 126 | io_wait=proc.io_wait, 127 | ) 128 | ) 129 | 130 | # store io counters 131 | if read_bytes_delta > 0: 132 | read_count_delta += int(read_bytes_delta / fs_blocksize) 133 | if write_bytes_delta > 0: 134 | write_count_delta += int(write_bytes_delta / fs_blocksize) 135 | 136 | io_read = IOCounter(count=read_count_delta, bytes=int(read_bytes_delta)) 137 | io_write = IOCounter(count=write_count_delta, bytes=int(write_bytes_delta)) 138 | 139 | return local_procs, io_read, io_write 140 | 141 | 142 | T = TypeVar("T", RunningProcess, WaitingProcess, BlockingProcess, LocalRunningProcess) 143 | 144 | 145 | def sorted(processes: list[T], *, key: SortKey, reverse: bool = False) -> list[T]: 146 | """Return processes sorted. 147 | 148 | >>> from ipaddress import IPv4Interface, ip_address 149 | 150 | PostgreSQL 13+ 151 | >>> processes = [ 152 | ... LocalRunningProcess( 153 | ... pid="6240", 154 | ... xmin="1234", 155 | ... application_name="pgbench", 156 | ... database="pgbench", 157 | ... user="postgres", 158 | ... client=ip_address("127.0.0.2"), 159 | ... cpu=0.1, 160 | ... mem=0.993_254_939_413_836, 161 | ... read=0.1, 162 | ... write=0.282_725_318_098_656_75, 163 | ... state="idle in transaction", 164 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 3062 WHERE aid = 1932841;", 165 | ... encoding="UTF-8", 166 | ... duration=0.1, 167 | ... wait="ClientRead", 168 | ... io_wait=False, 169 | ... query_leader_pid=6240, 170 | ... is_parallel_worker=False, 171 | ... ), 172 | ... LocalRunningProcess( 173 | ... pid="6239", 174 | ... xmin="2345", 175 | ... application_name="pgbench", 176 | ... database="pgbench", 177 | ... user="postgres", 178 | ... client=IPv4Interface("192.0.2.5/24"), 179 | ... cpu=0.1, 180 | ... mem=0.994_254_939_413_836, 181 | ... read=0.1, 182 | ... write=0.282_725_318_098_656_75, 183 | ... state="idle in transaction", 184 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 141 WHERE aid = 7289374;", 185 | ... encoding=None, 186 | ... duration=0.1, 187 | ... wait="ClientRead", 188 | ... io_wait=False, 189 | ... query_leader_pid=6239, 190 | ... is_parallel_worker=False, 191 | ... ), 192 | ... LocalRunningProcess( 193 | ... pid="6228", 194 | ... xmin="3456", 195 | ... application_name="pgbench", 196 | ... database="pgbench", 197 | ... user="postgres", 198 | ... client=ip_address("2001:db8::"), 199 | ... cpu=0.2, 200 | ... mem=1.024_758_418_061_11, 201 | ... read=0.2, 202 | ... write=0.113_090_128_201_154_74, 203 | ... state="active", 204 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 3062 WHERE aid = 1932841;", 205 | ... encoding="UTF-8", 206 | ... duration=0.1, 207 | ... wait=False, 208 | ... io_wait=False, 209 | ... query_leader_pid=6240, 210 | ... is_parallel_worker=True, 211 | ... ), 212 | ... ] 213 | 214 | >>> processes = sorted(processes, key=SortKey.cpu, reverse=True) 215 | >>> [p.pid for p in processes] 216 | ['6228', '6240', '6239'] 217 | >>> processes = sorted(processes, key=SortKey.mem) 218 | >>> [p.pid for p in processes] 219 | ['6240', '6239', '6228'] 220 | 221 | When using the 'duration' sort key, processes are also sorted by ascending 222 | (query_leader_pid, is_parallel_worker). 223 | >>> processes = sorted(processes, key=SortKey.duration, reverse=True) 224 | >>> [p.pid for p in processes] 225 | ['6239', '6240', '6228'] 226 | 227 | PostgreSQL 12- (query_leader_pid is None) 228 | >>> processes = [ 229 | ... LocalRunningProcess( 230 | ... pid="6240", 231 | ... xmin="1234", 232 | ... application_name="pgbench", 233 | ... database="pgbench", 234 | ... user="postgres", 235 | ... client=ip_address("192.168.1.2"), 236 | ... cpu=0.1, 237 | ... mem=0.993_254_939_413_836, 238 | ... read=0.1, 239 | ... write=0.282_725_318_098_656_75, 240 | ... state="idle in transaction", 241 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 3062 WHERE aid = 1932841;", 242 | ... encoding=None, 243 | ... duration=0.1, 244 | ... wait="ClientRead", 245 | ... io_wait=False, 246 | ... query_leader_pid=None, 247 | ... is_parallel_worker=False, 248 | ... ), 249 | ... LocalRunningProcess( 250 | ... pid="6239", 251 | ... xmin="2345", 252 | ... application_name="pgbench", 253 | ... database="pgbench", 254 | ... user="postgres", 255 | ... client=ip_address("0000:0000:0000:0000:0000:0abc:0007:0def"), 256 | ... cpu=0.1, 257 | ... mem=0.994_254_939_413_836, 258 | ... read=0.1, 259 | ... write=0.282_725_318_098_656_75, 260 | ... state="idle in transaction", 261 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 141 WHERE aid = 7289374;", 262 | ... encoding="UTF-8", 263 | ... duration=0.1, 264 | ... wait="ClientRead", 265 | ... io_wait=False, 266 | ... query_leader_pid=None, 267 | ... is_parallel_worker=False, 268 | ... ), 269 | ... LocalRunningProcess( 270 | ... pid="6228", 271 | ... xmin="3456", 272 | ... application_name="pgbench", 273 | ... database="pgbench", 274 | ... user="postgres", 275 | ... client=None, 276 | ... cpu=0.2, 277 | ... mem=1.024_758_418_061_11, 278 | ... read=0.2, 279 | ... write=0.113_090_128_201_154_74, 280 | ... state="active", 281 | ... query="UPDATE pgbench_accounts SET abalance = abalance + 3062 WHERE aid = 1932841;", 282 | ... encoding="latin1", 283 | ... duration=0.1, 284 | ... wait=False, 285 | ... io_wait=False, 286 | ... query_leader_pid=None, 287 | ... is_parallel_worker=True, 288 | ... ), 289 | ... ] 290 | 291 | >>> processes = sorted(processes, key=SortKey.duration, reverse=True) 292 | >>> [p.pid for p in processes] 293 | ['6240', '6239', '6228'] 294 | """ 295 | 296 | # If we filter by duration, we also need to filter by ascending 297 | # (query_leader_pid, is_parallel_worker): 298 | # * for pg13+: query_leader_pid = coalesce(leader_pid, pid) 299 | # * for pg12-: query_leader_pid = Null / None 300 | # Note: parallel_worker have the same "duration" as their leader. 301 | if key == SortKey.duration: 302 | processes = builtins.sorted( 303 | processes, 304 | key=lambda p: (p.query_leader_pid, p.is_parallel_worker), 305 | reverse=False, 306 | ) 307 | 308 | return builtins.sorted( 309 | processes, 310 | key=lambda p: getattr(p, key.name) or 0, # TODO: avoid getattr() 311 | reverse=reverse, 312 | ) 313 | 314 | 315 | def update_max_iops(max_iops: int, read_count: float, write_count: float) -> int: 316 | """Update 'max_iops' value from read_count/write_count. 317 | 318 | >>> update_max_iops(45657, 123, 888) 319 | 45657 320 | >>> update_max_iops(3, 123, 888) 321 | 1011 322 | """ 323 | return max(int(read_count + write_count), max_iops) 324 | 325 | 326 | def get_load_average() -> tuple[float, float, float]: 327 | """Get load average""" 328 | return os.getloadavg() 329 | 330 | 331 | def get_mem_swap() -> tuple[MemoryInfo, SwapInfo]: 332 | """Get memory and swap usage""" 333 | with catch_warnings(): 334 | simplefilter("ignore", RuntimeWarning) 335 | phymem = psutil.virtual_memory() 336 | vmem = psutil.swap_memory() 337 | # 'buffers' and 'cached' attributes are not available on some systems (e.g. OSX) 338 | buffers = getattr(phymem, "buffers", 0) 339 | cached = getattr(phymem, "cached", 0) 340 | mem_used = phymem.total - (phymem.free + buffers + cached) 341 | return ( 342 | MemoryInfo(mem_used, buffers + cached, phymem.free, phymem.total), 343 | SwapInfo(vmem.used, vmem.free, vmem.total), 344 | ) 345 | 346 | 347 | def mem_swap_load() -> tuple[MemoryInfo, SwapInfo, LoadAverage]: 348 | """Read memory, swap and load average from Data object.""" 349 | memory, swap = get_mem_swap() 350 | load = LoadAverage(*get_load_average()) 351 | return memory, swap, load 352 | -------------------------------------------------------------------------------- /pgactivity/config.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import configparser 4 | import enum 5 | import importlib.resources 6 | import io 7 | import os 8 | from collections.abc import ItemsView 9 | from pathlib import Path 10 | from typing import IO, Any, TypeVar, Union 11 | 12 | import attr 13 | from attr import validators 14 | 15 | from .compat import gt 16 | 17 | 18 | def read_resource(pkgname: str, dirname: str, *args: str) -> str | None: 19 | resource = importlib.resources.files(pkgname).joinpath(dirname) 20 | for arg in args: 21 | resource = resource.joinpath(arg) 22 | if resource.is_file(): 23 | return resource.read_text() 24 | return None 25 | 26 | 27 | class ConfigurationError(Exception): 28 | def __init__(self, filename: str, *args: Any) -> None: 29 | super().__init__(*args) 30 | self.filename = filename 31 | 32 | @property 33 | def message(self) -> str: 34 | return super().__str__() 35 | 36 | def __str__(self) -> str: 37 | return f"invalid configuration '{self.filename}': {self.message}" 38 | 39 | 40 | class InvalidSection(ConfigurationError): 41 | def __init__(self, section: str, *args: Any) -> None: 42 | super().__init__(*args) 43 | self.section = section 44 | 45 | @property 46 | def message(self) -> str: 47 | return f"invalid section '{self.section}'" 48 | 49 | 50 | class InvalidOptions(ConfigurationError): 51 | def __init__(self, section: str, message: str, *args: Any) -> None: 52 | super().__init__(*args) 53 | self.section = section 54 | self._message = message 55 | 56 | @property 57 | def message(self) -> str: 58 | return f"invalid option(s) in '{self.section}': {self._message}" 59 | 60 | 61 | class Flag(enum.Flag): 62 | """Column flag. 63 | 64 | >>> Flag.names() 65 | ['database', 'appname', 'client', 'user', 'cpu', 'mem', 'read', 'write', 'time', 'wait', 'relation', 'type', 'mode', 'iowait', 'pid', 'xmin'] 66 | >>> Flag.all() # doctest: +ELLIPSIS 67 | 68 | """ 69 | 70 | DATABASE = enum.auto() 71 | APPNAME = enum.auto() 72 | CLIENT = enum.auto() 73 | USER = enum.auto() 74 | CPU = enum.auto() 75 | MEM = enum.auto() 76 | READ = enum.auto() 77 | WRITE = enum.auto() 78 | TIME = enum.auto() 79 | WAIT = enum.auto() 80 | RELATION = enum.auto() 81 | TYPE = enum.auto() 82 | MODE = enum.auto() 83 | IOWAIT = enum.auto() 84 | PID = enum.auto() 85 | XMIN = enum.auto() 86 | 87 | @classmethod 88 | def names(cls) -> list[str]: 89 | rv = [] 90 | for f in cls: 91 | assert f.name 92 | rv.append(f.name.lower()) 93 | return rv 94 | 95 | @classmethod 96 | def all(cls) -> Flag: 97 | value = cls(0) 98 | for f in cls: 99 | value |= f 100 | return value 101 | 102 | @classmethod 103 | def from_config(cls, config: Configuration) -> Flag: 104 | value = cls(0) 105 | for f in cls: 106 | assert f.name is not None 107 | try: 108 | cfg = config[f.name.lower()] 109 | except KeyError: 110 | pass 111 | else: 112 | assert isinstance(cfg, UISection) 113 | if cfg.hidden: 114 | continue 115 | value |= f 116 | return value 117 | 118 | @classmethod 119 | def load( 120 | cls, 121 | config: Configuration | None, 122 | *, 123 | is_local: bool, 124 | appname: bool | None, 125 | client: bool | None, 126 | cpu: bool | None, 127 | database: bool | None, 128 | mem: bool | None, 129 | pid: bool | None, 130 | read: bool | None, 131 | time: bool | None, 132 | user: bool | None, 133 | wait: bool | None, 134 | write: bool | None, 135 | xmin: bool | None, 136 | **kwargs: Any, 137 | ) -> Flag: 138 | """Build a Flag value from command line options.""" 139 | if config: 140 | flag = cls.from_config(config) 141 | else: 142 | flag = cls.all() 143 | for opt, value in ( 144 | (appname, cls.APPNAME), 145 | (client, cls.CLIENT), 146 | (cpu, cls.CPU), 147 | (database, cls.DATABASE), 148 | (mem, cls.MEM), 149 | (pid, cls.PID), 150 | (read, cls.READ), 151 | (time, cls.TIME), 152 | (user, cls.USER), 153 | (wait, cls.WAIT), 154 | (write, cls.WRITE), 155 | (xmin, cls.XMIN), 156 | ): 157 | if opt is True: 158 | flag |= value 159 | elif opt is False: 160 | flag ^= value 161 | # Remove some if no running against local pg server. 162 | if not is_local and (flag & cls.CPU): 163 | flag ^= cls.CPU 164 | if not is_local and (flag & cls.MEM): 165 | flag ^= cls.MEM 166 | if not is_local and (flag & cls.READ): 167 | flag ^= cls.READ 168 | if not is_local and (flag & cls.WRITE): 169 | flag ^= cls.WRITE 170 | if not is_local and (flag & cls.IOWAIT): 171 | flag ^= cls.IOWAIT 172 | return flag 173 | 174 | 175 | class BaseSectionMixin: 176 | @classmethod 177 | def check_options( 178 | cls: type[attr.AttrsInstance], section: configparser.SectionProxy 179 | ) -> list[str]: 180 | """Check that items of 'section' conform to known attributes of this class and 181 | return the list of know options. 182 | """ 183 | known_options = {f.name for f in attr.fields(cls)} 184 | unknown_options = set(section) - set(known_options) 185 | if unknown_options: 186 | raise ValueError(f"invalid option(s): {', '.join(sorted(unknown_options))}") 187 | return list(sorted(known_options)) 188 | 189 | 190 | @attr.s(auto_attribs=True, frozen=True, slots=True) 191 | class HeaderSection(BaseSectionMixin): 192 | show_instance: bool = True 193 | show_system: bool = True 194 | show_workers: bool = True 195 | 196 | _T = TypeVar("_T", bound="HeaderSection") 197 | 198 | @classmethod 199 | def from_config_section(cls: type[_T], section: configparser.SectionProxy) -> _T: 200 | values: dict[str, bool] = {} 201 | for optname in cls.check_options(section): 202 | value = section.getboolean(optname) 203 | if value is not None: 204 | values[optname] = value 205 | return cls(**values) 206 | 207 | 208 | @attr.s(auto_attribs=True, frozen=True, slots=True) 209 | class UISection(BaseSectionMixin): 210 | hidden: bool = False 211 | width: int | None = attr.ib(default=None, validator=validators.optional(gt(0))) 212 | color: str | None = attr.ib(default=None) 213 | 214 | _T = TypeVar("_T", bound="UISection") 215 | 216 | @classmethod 217 | def from_config_section(cls: type[_T], section: configparser.SectionProxy) -> _T: 218 | cls.check_options(section) 219 | values: dict[str, Any] = {} 220 | hidden = section.getboolean("hidden") 221 | if hidden is not None: 222 | values["hidden"] = hidden 223 | values["width"] = section.getint("width") 224 | values["color"] = section.get("color") 225 | return cls(**values) 226 | 227 | 228 | @attr.s(auto_attribs=True, frozen=True, slots=True) 229 | class BuiltinProfile: 230 | name: str 231 | content: IO[str] 232 | 233 | @classmethod 234 | def get(cls, name: str) -> BuiltinProfile | None: 235 | content = read_resource("pgactivity", "profiles", f"{name}.conf") 236 | if content is not None: 237 | return cls(name, io.StringIO(content)) 238 | return None 239 | 240 | 241 | USER_CONFIG_HOME = Path(os.environ.get("XDG_CONFIG_HOME", Path.home() / ".config")) 242 | ETC = Path("/etc") 243 | 244 | 245 | Value = Union[HeaderSection, UISection] 246 | 247 | 248 | @attr.s(auto_attribs=True, frozen=True, slots=True) 249 | class Configuration: 250 | name: str 251 | values: dict[str, Value] 252 | 253 | def __getitem__(self, name: str) -> Value: 254 | return self.values.__getitem__(name) 255 | 256 | def get(self, name: str, default: Value | None = None) -> Value | None: 257 | return self.values.get(name, default) 258 | 259 | def items(self) -> ItemsView[str, Value]: 260 | return self.values.items() 261 | 262 | def header(self) -> HeaderSection | None: 263 | return self.get("header") # type: ignore[return-value] 264 | 265 | _T = TypeVar("_T", bound="Configuration") 266 | 267 | @classmethod 268 | def parse(cls: type[_T], f: IO[str], name: str) -> _T: 269 | r"""Parse configuration from 'f'. 270 | 271 | >>> from io import StringIO 272 | >>> from pprint import pprint 273 | 274 | >>> f = StringIO('[header]\nshow_workers=false\n[client]\nhidden=true\ncolor=green\n') 275 | >>> cfg = Configuration.parse(f, "f.ini") 276 | >>> cfg.name 277 | 'f.ini' 278 | >>> pprint(cfg.values) 279 | {'client': UISection(hidden=True, width=None, color='green'), 280 | 'header': HeaderSection(show_instance=True, show_system=True, show_workers=False)} 281 | 282 | >>> bad = StringIO("[global]\nx=1") 283 | >>> Configuration.parse(bad, "bad.ini") 284 | Traceback (most recent call last): 285 | ... 286 | pgactivity.config.InvalidSection: invalid configuration 'bad.ini': invalid section 'global' 287 | >>> bad = StringIO("[xxx]\n") 288 | >>> Configuration.parse(bad, "bad.ini") 289 | Traceback (most recent call last): 290 | ... 291 | pgactivity.config.InvalidSection: invalid configuration 'bad.ini': invalid section 'xxx' 292 | >>> bad = StringIO("[cpu]\nx=1") 293 | >>> Configuration.parse(bad, "bad.ini") 294 | Traceback (most recent call last): 295 | ... 296 | pgactivity.config.InvalidOptions: invalid configuration 'bad.ini': invalid option(s) in 'cpu': invalid option(s): x 297 | >>> bad = StringIO("[mem]\nwidth=-2") 298 | >>> Configuration.parse(bad, "bad.ini") 299 | Traceback (most recent call last): 300 | ... 301 | pgactivity.config.InvalidOptions: invalid configuration 'bad.ini': invalid option(s) in 'mem': 'width' must be > 0: -2 302 | >>> bad = StringIO("[mem]\nwidth=xyz") 303 | >>> Configuration.parse(bad, "bad.ini") 304 | Traceback (most recent call last): 305 | ... 306 | pgactivity.config.InvalidOptions: invalid configuration 'bad.ini': invalid option(s) in 'mem': invalid literal for int() with base 10: 'xyz' 307 | >>> bad = StringIO("not some INI??") 308 | >>> Configuration.parse(bad, "bad.txt") 309 | Traceback (most recent call last): 310 | ... 311 | pgactivity.config.ConfigurationError: invalid configuration 'bad.txt': failed to parse INI: File contains no section headers. 312 | file: '', line: 1 313 | 'not some INI??' 314 | """ 315 | p = configparser.ConfigParser(default_section="global", strict=True) 316 | try: 317 | p.read_file(f) 318 | except configparser.Error as e: 319 | raise ConfigurationError(name, f"failed to parse INI: {e}") from None 320 | known_sections = set(Flag.names()) 321 | config: dict[str, HeaderSection | UISection] = {} 322 | for sname, section in p.items(): 323 | if sname == p.default_section: 324 | if section: 325 | raise InvalidSection(p.default_section, name) 326 | continue 327 | if sname == "header": 328 | config[sname] = HeaderSection.from_config_section(section) 329 | continue 330 | if sname not in known_sections: 331 | raise InvalidSection(sname, name) 332 | try: 333 | config[sname] = UISection.from_config_section(section) 334 | except ValueError as e: 335 | raise InvalidOptions(sname, str(e), name) from None 336 | return cls(name=name, values=config) 337 | 338 | @classmethod 339 | def lookup( 340 | cls: type[_T], 341 | profile: str | None, 342 | *, 343 | user_config_home: Path = USER_CONFIG_HOME, 344 | etc: Path = ETC, 345 | ) -> _T | None: 346 | if profile is None: 347 | for base in (user_config_home, etc): 348 | fpath = base / "pg_activity.conf" 349 | if fpath.exists(): 350 | with fpath.open() as f: 351 | return cls.parse(f, str(fpath)) 352 | return None 353 | 354 | assert profile # per argument validation 355 | fname = f"{profile}.conf" 356 | bases = (user_config_home / "pg_activity", etc / "pg_activity") 357 | for base in bases: 358 | fpath = base / fname 359 | if fpath.exists(): 360 | with fpath.open() as f: 361 | return cls.parse(f, str(fpath)) 362 | 363 | builtin_profile = BuiltinProfile.get(profile) 364 | if builtin_profile is not None: 365 | return cls.parse(builtin_profile.content, builtin_profile.name) 366 | 367 | raise FileNotFoundError(f"profile {profile!r} not found") 368 | 369 | def error(self, message: str) -> ConfigurationError: 370 | return ConfigurationError(self.name, message) 371 | -------------------------------------------------------------------------------- /docs/man/pg_activity.pod: -------------------------------------------------------------------------------- 1 | =head1 NAME 2 | 3 | pg_activity - Realtime PostgreSQL database server monitoring tool 4 | 5 | =head1 SYNOPSIS 6 | 7 | B [option..] [connection string] 8 | 9 | =head1 DESCRIPTION 10 | 11 | Command line tool for PostgreSQL server activity monitoring. 12 | 13 | pg_activity must run on the same server as the instance and 14 | as the user running the instance (or root) to show 15 | CPU, MEM, READ or WRITE columns and other system information. 16 | 17 | =head2 THE HEADER 18 | 19 | The first line of the header displays PostgreSQL's version, the host name, the 20 | connection string, the refresh rate and the duration mode. 21 | 22 | The header is then divided in tree groups: B, B, 23 | B. The information is filtered according to the filter parameters when 24 | appropriate. This is shown in the following descriptions with the "(filtered)" 25 | mention. Depending on the version you are on, some information might not be 26 | available. In that case, it will be replaced by a dash. 27 | 28 | The B group displays information aubout the PostgreSQL instance (or 29 | cluster). This group can be displayed or hidden with the I Key. 30 | 31 | Global : 32 | 33 | =over 2 34 | 35 | =item - B: since when is the instance running; 36 | 37 | =item - B: total size of the databases (filtered); 38 | 39 | =item - B: growth in B/s of the databases (filtered); 40 | 41 | =item - B: the percentage of page read from the PostgreSQL's cache since last snapshot (filtered). 42 | 43 | =back 44 | 45 | Sessions : 46 | 47 | =over 2 48 | 49 | =item - B: session count (filtered) / max_connections; 50 | 51 | =item - B: number of active sessions (filtered); 52 | 53 | =item - B: number of idle sessions (filtered); 54 | 55 | =item - B: number of sessions who are in the idle in transaction state (filtered); 56 | 57 | =item - B: number of sessions who are in the idle in transaction aborted state (filtered); 58 | 59 | =item - B: number of sessions that are waiting for a lock (filtered). 60 | 61 | =back 62 | 63 | Activity : 64 | 65 | =over 2 66 | 67 | =item - B: transaction per second (sum of commit & rollback for all databases / time elapsed since last snapshot) (filtered); 68 | 69 | =item - B: number of inserts per second (filtered); 70 | 71 | =item - B: number of updates per second (filtered); 72 | 73 | =item - B: number of deletes per second (filtered); 74 | 75 | =item - B: number of temporary files created on the instance; 76 | 77 | =item - B: total temporary file size on the instance. 78 | 79 | =back 80 | 81 | The B group displays information about backgroup workers, 82 | autovacuum processes, wal senders and wal receivers. It also gives information 83 | about replication slots. Except for the autovacuum workers count, most of this 84 | information is not related to a specific database, therefore their values will 85 | be zero when the data is filtered. 86 | 87 | Worker processes: 88 | 89 | =over 2 90 | 91 | =item - B: total worker count / maximum number of worker slots, parallel workers and logical replication workers are taken from this amount (filtered); 92 | 93 | =item - B: logical replication worker count / maximum number of logical replication workers (filtered); 94 | 95 | =item - B: parallel worker count for maintenance & queries / maximum number of parallel workers (filtered). 96 | 97 | =back 98 | 99 | Other processes & information: 100 | 101 | =over 2 102 | 103 | =item - B: number of autovacuum worker in action / maximum number of autovacuum workers (filtered); 104 | 105 | =item - B: number of wal senders / maximum number of wal senders processes (filtered); 106 | 107 | =item - B: number of wal receivers / maximum number of wal receiver processes (filtered); 108 | 109 | =item - B: number of replication slots / maximum number of replication slots (filtered). 110 | 111 | =back 112 | 113 | The last group displays B: 114 | 115 | =over 2 116 | 117 | =item - B: total / free / used and buff+cached memory with the related percentages; 118 | 119 | =item - B: total / free / used swap; 120 | 121 | =item - B: the number of IO per second, current Read and Write throughput (aggregated data gathered with the psutil library); 122 | 123 | =item - B: CPU load for the last 1, 5, 15 minutes; 124 | 125 | =back 126 | 127 | =head2 THE RUNNING QUERIES PANEL 128 | 129 | The running queries panel shows all running queries, transactions or backends 130 | (depending on the B setting) which have lasted for more than 131 | B seconds. It displays the following information: 132 | 133 | =over 2 134 | 135 | =item - B: process id of the backend which executes the query; 136 | 137 | =item - B: xmin horizon of the backend; 138 | 139 | =item - B: database specified in the connection string; 140 | 141 | =item - B: application name specified in the connection string; 142 | 143 | =item - B: user name specified in the connection string; 144 | 145 | =item - B: client address or "local" in case of linux socker connection; 146 | 147 | =item - B: percentage of CPU used by the backend as reported by the psutil library; 148 | 149 | =item - B: percentage of memory used by the backend as reported by the psutil library; 150 | 151 | =item - B: read thruput as reported by the psutil library; 152 | 153 | =item - B: write thruput as reported by the psutil library; 154 | 155 | =item - B