├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── CONTRIBUTORS ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── conf.py ├── development.rst ├── generating_a_spec.rst ├── index.rst ├── making_configuration_changes.rst ├── notable_functionality_and_caveats.rst ├── project_goals.rst └── spec_overview.rst ├── pgbedrock ├── __init__.py ├── attributes.py ├── cli.py ├── common.py ├── context.py ├── core_configure.py ├── core_generate.py ├── memberships.py ├── ownerships.py ├── privileges.py └── spec_inspector.py ├── requirements-dev.txt ├── requirements-docs.txt ├── requirements-publish.txt ├── requirements.txt ├── setup.cfg ├── setup.py └── tests ├── Dockerfile ├── conftest.py ├── test_attributes.py ├── test_cli.py ├── test_common.py ├── test_context.py ├── test_core_configure.py ├── test_core_generate.py ├── test_memberships.py ├── test_ownerships.py ├── test_privileges.py ├── test_spec_inspector.py └── wait_for_postgres.sh /.gitignore: -------------------------------------------------------------------------------- 1 | tag_message 2 | 3 | # Created by https://www.gitignore.io/api/python 4 | 5 | ### Python ### 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | *.ipynb 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *,cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # dotenv 86 | .env 87 | 88 | # virtualenv 89 | .venv/ 90 | venv/ 91 | ENV/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # End of https://www.gitignore.io/api/python 100 | *.sw[mnop] 101 | tmp/ 102 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # We only use Python for coveralls; both Python 2.7 and 3.6 are 2 | # tested but that is done within docker containers 3 | language: python 4 | python: 3.6 5 | 6 | services: 7 | - docker 8 | 9 | jobs: 10 | include: 11 | - stage: Testing 12 | env: SUPPORTED_PG_VERSIONS=9.5.13 13 | # sudo is required in order to run `make clean` 14 | script: sudo make test 15 | 16 | - stage: # Intentionally left blank to parallelize 17 | env: SUPPORTED_PG_VERSIONS=9.6.4 18 | script: sudo make test 19 | 20 | - stage: # Intentionally left blank to parallelize 21 | env: SUPPORTED_PG_VERSIONS=10.4 22 | script: sudo make test 23 | 24 | - stage: # Intentionally left blank to parallelize 25 | install: pip install -e . -r requirements-docs.txt 26 | script: make docs 27 | 28 | after_success: 29 | # Paths in .coverage file were based on the path in the docker container; 30 | # Put the code in the same place so coveralls can determine coverage 31 | - cp -r pgbedrock/ /opt/ 32 | - pip install coveralls 33 | - coveralls 34 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 5 | and this project tries to adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | _this space intentionally left blank_ 9 | 10 | ## [0.4.2] - 2019-12-13 11 | ### In Code 12 | - Fixes for new "except" feature introduced in 0.4.0 ( @jholbrook-sqsp ) 13 | - Upgrade PyYAML from 5.1 to 5.2 in requirements.txt ( @dependabot ) 14 | - Patch to remove YAML.load warnings ( @jholbrook-sqsp ) 15 | 16 | ## [0.4.1] - 2019-12-10 17 | ### In Code 18 | - Small fix to ensure docker container deploys with working code. ( @domoore1989 ) 19 | 20 | ## [0.4.0] - 2019-12-10 21 | ### In Code 22 | - Added the ability to except tables and sequences from privileges when the schemas entire tables 23 | or sequences are whitelisted ( @dmoore1989 ) 24 | 25 | ## [0.3.2] - 2018-08-30 26 | ### In Code 27 | - `pgbedrock` is more permissive in its declared dependencies so it plays well 28 | with other packages in the same python environment ( @emddudley ) 29 | ### In Docs 30 | - Added a changelog 31 | - Added a contributor credits file 32 | - Added some rules about keeping them up to date. 33 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | Conrad Dean 2 | John Shiver 3 | Michael Dudley 4 | Zach Marine 5 | Douglas Moore 6 | Josh Holbrook 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6 2 | 3 | VOLUME /opt 4 | WORKDIR /opt 5 | 6 | COPY setup.py /opt/ 7 | COPY requirements.txt /opt/ 8 | COPY pgbedrock /opt/pgbedrock 9 | RUN pip install -r requirements.txt 10 | RUN pip install . 11 | RUN pip install --force-reinstall psycopg2 12 | 13 | ENTRYPOINT ["pgbedrock"] 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2018 Squarespace, INC. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE requirements.txt 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: attach build build_tester clean coverage create_network docs psql release_pypi release_pypitest release_quay remove_network start_postgres stop_postgres test test_one_pg_version test27 test36 view_docs wait_for_postgres 2 | 3 | SUPPORTED_PG_VERSIONS ?= 9.5.13 9.6.4 10.4 4 | # The default Postgres that will be used in individual targets 5 | POSTGRES_VERSION ?= 10.4 6 | 7 | COMPOSED_NETWORK = pgbedrock_network 8 | POSTGRES_HOST = pgbedrock_postgres 9 | POSTGRES_DB = test_db 10 | POSTGRES_USER = test_user 11 | POSTGRES_PASSWORD = test_password 12 | 13 | FULL_NAME = quay.io/squarespace/pgbedrock 14 | VERSION = `grep "^__version__" pgbedrock/__init__.py | cut -d "'" -f 2` 15 | 16 | 17 | attach: 18 | @docker run -it --entrypoint "/bin/bash" pgbedrock 19 | 20 | build: clean 21 | @echo "Building the prod docker image" 22 | docker build \ 23 | -t $(FULL_NAME) \ 24 | -t $(FULL_NAME):$(VERSION) \ 25 | -t $(FULL_NAME):latest \ 26 | . 27 | 28 | build_tester: 29 | @echo "Building the tester27 and tester36 docker images" 30 | docker build . \ 31 | -f tests/Dockerfile \ 32 | --build-arg PYTHON_VERSION=2.7 \ 33 | -t tester27 34 | docker build . \ 35 | -f tests/Dockerfile \ 36 | --build-arg PYTHON_VERSION=3.6 \ 37 | -t tester36 38 | 39 | clean: 40 | @echo "Cleaning the repo" 41 | @find . -name '__pycache__' -type d -exec rm -rf {} + 42 | @find . -name '*.pyc' -delete 43 | @find . -name '*.retry' -delete 44 | 45 | coverage: start_postgres wait_for_postgres 46 | pytest --cov pgbedrock/ --cov-report=term-missing:skip-covered 47 | 48 | create_network: remove_network 49 | @echo "Creating the docker network" 50 | @docker network create $(COMPOSED_NETWORK) 51 | 52 | docs: 53 | $(MAKE) -C docs html O=-nW 54 | 55 | psql: 56 | @docker exec -it $(POSTGRES_HOST) psql -d $(POSTGRES_DB) -U $(POSTGRES_USER) 57 | 58 | release_pypi: test 59 | @echo "Releasing Python package to pypi" 60 | rm -rf dist/ 61 | python setup.py sdist bdist_wheel 62 | twine upload -r pypi ./dist/* 63 | rm -rf dist/ 64 | 65 | release_pypitest: test 66 | @echo "Releasing Python package to pypitest" 67 | rm -rf dist/ 68 | python setup.py sdist bdist_wheel 69 | twine upload -r pypitest ./dist/* 70 | rm -rf dist/ 71 | 72 | # Note: you may have to do a `docker login` and/or be added to the 73 | # admin users for the docker repo before quay will accept a push 74 | release_quay: test build 75 | @echo "Releasing docker image to quay" 76 | docker push $(FULL_NAME):$(VERSION) 77 | docker push $(FULL_NAME):latest 78 | 79 | remove_network: stop_postgres 80 | @echo "Removing the docker network (if it exists)" 81 | -docker network rm $(COMPOSED_NETWORK) || true 82 | 83 | start_postgres: create_network 84 | @echo "Starting postgres $(POSTGRES_VERSION)" 85 | @docker run --rm -d --name $(POSTGRES_HOST) \ 86 | -e POSTGRES_USER=$(POSTGRES_USER) \ 87 | -e POSTGRES_PASSWORD=$(POSTGRES_PASSWORD) \ 88 | -e POSTGRES_DB=$(POSTGRES_DB) \ 89 | -p 54321:5432 \ 90 | --net=$(COMPOSED_NETWORK) \ 91 | postgres:$(POSTGRES_VERSION) 92 | 93 | stop_postgres: 94 | @echo "Stopping postgres (if it is running)" 95 | @-docker stop $(POSTGRES_HOST) || true 96 | 97 | test_one_pg_version: start_postgres wait_for_postgres test27 test36 remove_network clean 98 | 99 | test: clean build_tester 100 | @for pg_version in ${SUPPORTED_PG_VERSIONS}; do \ 101 | echo "\n\n\n\n\n\n\nTesting Postgres $$pg_version"; \ 102 | $(MAKE) test_one_pg_version POSTGRES_VERSION="$$pg_version"; \ 103 | done 104 | 105 | test27: 106 | @echo "Running pytest with Python 2.7" 107 | @docker run \ 108 | --rm \ 109 | -e WITHIN_DOCKER_FLAG=true \ 110 | -e POSTGRES_PORT=5432 \ 111 | -v $(shell pwd):/opt \ 112 | --net=$(COMPOSED_NETWORK) \ 113 | tester27 114 | 115 | test36: 116 | @echo "Running pytest with Python 3.6" 117 | @docker run \ 118 | --rm \ 119 | -e WITHIN_DOCKER_FLAG=true \ 120 | -e POSTGRES_PORT=5432 \ 121 | -v $(shell pwd):/opt \ 122 | --net=$(COMPOSED_NETWORK) \ 123 | tester36 124 | 125 | wait_for_postgres: 126 | @echo 'Sleeping while postgres starts up'; 127 | @docker run --rm -it --name wait_for_postgres \ 128 | -e POSTGRES_HOST=$(POSTGRES_HOST) \ 129 | -e POSTGRES_USER=$(POSTGRES_USER) \ 130 | -e POSTGRES_PASSWORD=$(POSTGRES_PASSWORD) \ 131 | -e POSTGRES_DB=$(POSTGRES_DB) \ 132 | -e POSTGRES_VERSION=$(POSTGRES_VERSION) \ 133 | -v $(shell pwd)/tests/wait_for_postgres.sh:/wait_for_postgres.sh \ 134 | --net=$(COMPOSED_NETWORK) \ 135 | --entrypoint="/wait_for_postgres.sh" \ 136 | postgres:$(POSTGRES_VERSION) 137 | 138 | view_docs: docs 139 | open docs/_build/html/index.html 140 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | pgbedrock 2 | ========= 3 | |travis_ci| |coveralls| |postgres_versions| |pip_versions| 4 | 5 | .. |travis_ci| image:: https://travis-ci.org/Squarespace/pgbedrock.svg?branch=master 6 | :target: https://travis-ci.org/Squarespace/pgbedrock 7 | 8 | .. |coveralls| image:: https://coveralls.io/repos/github/Squarespace/pgbedrock/badge.svg?branch=master 9 | :target: https://coveralls.io/github/Squarespace/pgbedrock?branch=master 10 | 11 | .. |postgres_versions| image:: https://img.shields.io/badge/postgres-9.5,_9.6,_10-blue.svg 12 | 13 | .. |pip_versions| image:: https://img.shields.io/pypi/pyversions/pgbedrock.svg 14 | :target: https://pypi.python.org/pypi/pgbedrock 15 | 16 | 17 | pgbedrock is an application for managing the roles, memberships, ownerships, and most importantly 18 | the permissions for tables, sequences, and schemas in a Postgres database. 19 | 20 | Given the parameters to connect to a Postgres database (i.e. host, port, etc.) and a YAML file (a 21 | "spec") representing the desired database configuration, pgbedrock makes sure that the configuration 22 | of that database matches the spec. If there are differences, it will alter the database to make it 23 | match the spec. 24 | 25 | It can be run as a docker container (via ``docker run quay.io/squarespace/pgbedrock``) or 26 | as a local command-line utility (via ``pip install pgbedrock``). 27 | 28 | Detailed information can be found in the `documentation`_. 29 | 30 | .. _documentation: https://pgbedrock.readthedocs.io/en/latest/ 31 | 32 | Example 33 | ------- 34 | As an example, the definition for the ``jdoe`` role in the spec might look like this: 35 | 36 | .. code-block:: bash 37 | 38 | jdoe: 39 | can_login: yes 40 | is_superuser: no 41 | attributes: 42 | - PASSWORD "{{ env['JDOE_PASSWORD'] }}" 43 | member_of: 44 | - analyst 45 | owns: 46 | schemas: 47 | - finance_reports 48 | tables: 49 | - finance_reports.Q2_revenue 50 | - finance_reports.Q2_margin 51 | privileges: 52 | schemas: 53 | read: 54 | - finance 55 | - marketing 56 | write: 57 | - reports 58 | tables: 59 | read: 60 | - finance.* 61 | - marketing.ad_spend 62 | - marketing.impressions 63 | write: 64 | - reports.* 65 | except: 66 | - reports.Q2_fixed_assets 67 | sequences: 68 | write: 69 | - reports.* 70 | 71 | When pgbedrock is run, it would make sure that: 72 | 73 | * The role ``jdoe`` exists 74 | * ``jdoe`` can log in 75 | * ``jdoe`` is not a superuser 76 | * ``jdoe``'s password is the same as what is in the ``$JDOE_PASSWORD`` environment variable 77 | * All other role attributes for ``jdoe`` are the Postgres defaults (as defined by `pg_authid`_). 78 | * ``jdoe`` is a member of the ``analyst`` role 79 | * ``jdoe`` is a member of no other roles 80 | * ``jdoe`` owns the ``finance_reports`` schema 81 | * ``jdoe`` owns the ``finance_reports.Q2_revenue`` and ``finance_reports.Q2_margin`` tables 82 | * ``jdoe`` has read-level schema access (in Postgres terms: ``USAGE``) for the ``finance`` and 83 | ``marketing`` schemas 84 | * ``jdoe`` has write-level schema access (``CREATE``) for the ``reports`` schema 85 | * ``jdoe`` has read-level access (``SELECT``) to all tables in the ``finance`` schema and to the 86 | ``marketing.ad_spend`` and ``marketing.impressions`` tables 87 | * ``jdoe`` has default privileges to read from all future tables created in the ``finance`` schema 88 | * ``jdoe`` has write-level access (``SELECT``, ``INSERT``, ``UPDATE``, ``DELETE``, ``TRUNCATE``, 89 | ``REFERENCES``, and ``TRIGGER``) to all tables in the ``reports`` schema except for the ``Q2_fixed_assets`` table 90 | * ``jdoe`` has default privileges to write to all future tables created in the ``reports`` schema 91 | * ``jdoe`` has write-level access (``SELECT``, ``USAGE``, ``UPDATE``) to all sequences in the 92 | ``reports`` schema 93 | * ``jdoe`` has default privileges to write to all future sequences created in the ``reports`` schema 94 | * ``jdoe`` does not have any access other than that listed above (except whatever it inherits 95 | from the ``analyst`` role that ``jdoe`` is a member of) 96 | 97 | .. _pg_authid: https://www.postgresql.org/docs/9.6/static/catalog-pg-authid.html 98 | 99 | 100 | Quickstart 101 | ---------- 102 | Using pgbedrock requires three steps: generating a spec for a database, reviewing that spec, and 103 | configuring the database using that spec. Below we will do this using the pgbedrock docker image, 104 | but these steps can also be done with the pip-installed version of the tool. 105 | 106 | #. **Generate a spec for a database**. Specify the connection parameters below (host, port, 107 | database, username, and user password) as well as the place to output the tentative spec. Note 108 | that the user passed with ``-U`` must be a superuser. 109 | 110 | .. code-block:: bash 111 | 112 | docker run -it \ 113 | quay.io/squarespace/pgbedrock generate \ 114 | -h myhost.mynetwork.net \ 115 | -p 5432 \ 116 | -d mydatabase \ 117 | -U mysuperuser \ 118 | -w supersecret > path/to/spec.yml 119 | 120 | 121 | #. **Review the spec**. pgbedrock is not quite as flexible as Postgres's permissioning, and as a 122 | result the generated spec may differ slightly from the current state of your database. For more 123 | information on these potential simplifications, see the `Notable Functionality And Caveats`_ 124 | section in the docs. As a result, it is recommended to run ``pgbedrock configure`` in check mode 125 | the first time you use it to see what changes it would introduce to your current setup. This 126 | looks similar to the command above, but requires us to also pass in the passwords for any roles 127 | whose passwords are managed within Postgres itself. These can be identified in the spec file as 128 | roles with a line that looks like ``PASSWORD "{{ env['MYROLE_PASSWORD'] }}"`` (if you forget to 129 | pass in these passwords pgbedrock will just throw an error and refuse to run). Note that you must 130 | run ``pgbedrock configure`` against the Postgres primary. To run pgbedrock in check mode we do 131 | the following: 132 | 133 | .. _Notable Functionality And Caveats: https://pgbedrock.readthedocs.io/en/latest/notable_functionality_and_caveats.html 134 | 135 | .. code-block:: bash 136 | 137 | docker run -it \ 138 | -e "JDOE_PASSWORD=${JDOE_PASSWORD}" \ 139 | -e "JSMITH_PASSWORD=${JSMITH_PASSWORD}" \ 140 | -v /path/to/spec.yml:/opt/spec.yml \ 141 | quay.io/squarespace/pgbedrock configure spec.yml \ 142 | -h myhost.mynetwork.net \ 143 | -p 5432 \ 144 | -d mydatabase \ 145 | -U mysuperuser \ 146 | -w supersecret \ 147 | --check 148 | 149 | Note that ``--check`` is actually the default behavior, so we could also omit that. 150 | 151 | 152 | #. **Configure the database using the spec**. Once you feel comfortable with the changes pgbedrock 153 | would introduce, run the above command again using ``--live`` instead of ``--check``. Changes 154 | will now be made real. To make future changes, modify the spec file and run the above command. 155 | 156 | For further information, see the `documentation`_. 157 | 158 | 159 | License 160 | ------- 161 | Copyright 2018 Squarespace, Inc. 162 | 163 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 164 | in compliance with the License. You may obtain a copy of the License at: 165 | 166 | http://www.apache.org/licenses/LICENSE-2.0 167 | 168 | Unless required by applicable law or agreed to in writing, software distributed under the License 169 | is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 170 | or implied. See the License for the specific language governing permissions and limitations under 171 | the License. 172 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = pgbedrock 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime as dt 3 | 4 | from pgbedrock import __version__ 5 | 6 | 7 | project = 'pgbedrock' 8 | copyright = 'Squarespace Data Engineering, {}'.format(dt.datetime.utcnow().year) 9 | author = 'Squarespace Data Engineering' 10 | 11 | version = __version__ # The short X.Y version 12 | release = __version__ # The full version, including alpha/beta/rc tags 13 | 14 | extensions = [ 15 | 'sphinx.ext.autosectionlabel', 16 | ] 17 | 18 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 19 | html_theme = 'sphinx_rtd_theme' 20 | master_doc = 'index' 21 | source_suffix = '.rst' 22 | -------------------------------------------------------------------------------- /docs/development.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | Several functionalities for testing and debugging are described below. 5 | 6 | 7 | Debugging With Verbose Mode 8 | --------------------------- 9 | To see all queries executed by pgbedrock as it runs, run pgbedrock with the ``--verbose`` flag. 10 | Note that this will likely produce a lot of output, so you may want to tee it into a log file. 11 | 12 | 13 | Getting Set Up For Local Development 14 | ------------------------------------ 15 | First, get your Python environment set up: 16 | 17 | .. code-block:: bash 18 | 19 | mkvirtualenv pgbedrock3 --python python3 20 | pip3 install -e . -r requirements-dev.txt -r requirements-publish.txt 21 | 22 | Note that if the pip install step fails on psycopg2 you may have to do the following: 23 | 24 | * ``brew install postgresql openssl`` 25 | * ``xcode-select --install``, followed by a restart of your machine 26 | * If you still get an error about a library for -lssl not found, then you have two options: ``brew reinstall python`` to get Python to use brew's OpenSSL, or explicitly tell pip to use Brew's OpenSSL via ``LDFLAGS="-L$(brew --prefix openssl)/lib" pip3 install psycopg2``. 27 | 28 | Testing Functionality 29 | --------------------- 30 | Various testing functionality exists: 31 | 32 | * ``make test`` - Run tests for both Python 2 and 3 (via docker containers) against all 33 | supported Postgres versions 34 | * ``pytest`` - Run tests for whichever Python version is in your virtualenv. This requires 35 | running ``make start_postgres`` first to start up a local dockerized Postgres. Also, if 36 | you've previously run the test suite with docker that you will need to run ``make clean`` 37 | first to clear out pytest's cache or else pytest will error out. 38 | * ``make coverage`` - Check package coverage and test coverage 39 | 40 | 41 | Releasing A New Version 42 | ----------------------- 43 | If you make a PR that gets merged into master, a new version of pgbedrock can be created as follows. 44 | 45 | 1. Increment the ``__version__`` in the ``pgbedrock/__init__.py`` file and commit that change. 46 | 2. Update the `CHANGELOG` file calling out changes to code regarding added features, new behaviors that could introduce breaking changes, and credits. 47 | 3. Update `CONTRIBUTORS`, adding new contributors alphabetically according to `git log --format=%an | sort | uniq`, excluding duplicates and correcting author names as requested by contributors. 48 | 4. Push a new git tag to the repo by doing: 49 | 50 | * Write the tag message in a dummy file called ``tag_message``. We do this to allow multi-line tag 51 | messages 52 | * ``git tag x.x.x -F tag_message`` 53 | * ``git push --tags origin master`` 54 | 55 | 3. Run ``make release-pypi``. 56 | 4. Run ``make release-quay``. This may require doing a docker login to quay first. 57 | -------------------------------------------------------------------------------- /docs/generating_a_spec.rst: -------------------------------------------------------------------------------- 1 | Generating A Spec 2 | ================= 3 | 4 | The ``pgbedrock generate`` command creates a spec given a database's current state, printing its 5 | results to ``STDOUT``. As a result, one can create a spec with: 6 | 7 | .. code-block:: bash 8 | 9 | docker run -it \ 10 | quay.io/squarespace/pgbedrock generate \ 11 | -h myhost.mynetwork.net \ 12 | -p 5432 \ 13 | -d mydatabase \ 14 | -U mysuperuser \ 15 | -w supersecret > path/to/spec.yml 16 | 17 | Alternatively, if you'd prefer to use the Python command-line interface instead, pip install 18 | pgbedrock and run the above command starting from ``pgbedrock generate``. The rest of the command 19 | is identical. 20 | 21 | Note that a generated spec may differ from reality due to simplifications that pgbedrock makes. For 22 | an example, see the "pgbedrock simplifies permissions down to read vs. write" bullet in the 23 | :ref:`Notable Functionality And Caveats`. As a result, after generating a spec it is recommended 24 | to run ``pgbedrock configure`` against it right away in check mode to see what differences exist. 25 | 26 | In addition to roles being granted various missing write privileges, another common change seen 27 | after running ``pgbedrock generate`` is various default privilege grants occurring. If within the 28 | database there is currently a default privilege granted to a role within a schema, pgbedrock assumes 29 | that the grantee is intended to have this default privilege regardless of who creates the future 30 | object. To do this in Postgres correctly, pgbedrock needs to grant that default privileges from all 31 | roles that could create new objects (see the "Default privileges are granted for permissions like 32 | ``myschema.*``" bullet in the :ref:`Notable Functionality And Caveats` section for more details). 33 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Overview 2 | ======== 3 | 4 | pgbedrock is an application for managing the roles, memberships, ownerships, and most importantly 5 | the permissions for tables, sequences, and schemas in a Postgres database. 6 | 7 | Given the parameters to connect to a Postgres database (i.e. host, port, etc.) and a YAML file (a 8 | "spec") representing the desired database configuration, pgbedrock makes sure that the configuration 9 | of that database matches the spec. If there are differences, it will alter the database to make it 10 | match the spec. 11 | 12 | It can be run as a docker container (via ``docker run quay.io/squarespace/pgbedrock``) or 13 | as a local command-line utility (via ``pip install pgbedrock``). 14 | 15 | 16 | Example 17 | ------- 18 | 19 | As an example, the definition for the ``jdoe`` role in the spec might look like this: 20 | 21 | .. code-block:: bash 22 | 23 | jdoe: 24 | can_login: yes 25 | is_superuser: no 26 | attributes: 27 | - PASSWORD "{{ env['JDOE_PASSWORD'] }}" 28 | member_of: 29 | - analyst 30 | owns: 31 | schemas: 32 | - finance_reports 33 | tables: 34 | - finance_reports.Q2_revenue 35 | - finance_reports.Q2_margin 36 | privileges: 37 | schemas: 38 | read: 39 | - finance 40 | - marketing 41 | write: 42 | - reports 43 | tables: 44 | read: 45 | - finance.* 46 | - marketing.ad_spend 47 | - marketing.impressions 48 | write: 49 | - reports.* 50 | sequences: 51 | write: 52 | - reports.* 53 | 54 | When pgbedrock is run, it would make sure that: 55 | 56 | * The role ``jdoe`` exists 57 | * ``jdoe`` can log in 58 | * ``jdoe`` is not a superuser 59 | * ``jdoe``'s password is the same as what is in the ``$JDOE_PASSWORD`` environment variable 60 | * All other role attributes for ``jdoe`` are the Postgres defaults (as defined by `pg_authid`_). 61 | * ``jdoe`` is a member of the ``analyst`` role 62 | * ``jdoe`` is a member of no other roles 63 | * ``jdoe`` owns the ``finance_reports`` schema 64 | * ``jdoe`` owns the ``finance_reports.Q2_revenue`` and ``finance_reports.Q2_margin`` tables 65 | * ``jdoe`` has read-level schema access (in Postgres terms: ``USAGE``) for the ``finance`` and 66 | ``marketing`` schemas 67 | * ``jdoe`` has write-level schema access (``CREATE``) for the ``reports`` schema 68 | * ``jdoe`` has read-level access (``SELECT``) to all tables in the ``finance`` schema and to the 69 | ``marketing.ad_spend`` and ``marketing.impressions`` tables 70 | * ``jdoe`` has default privileges to read from all future tables created in the ``finance`` schema 71 | * ``jdoe`` has write-level access (``SELECT``, ``INSERT``, ``UPDATE``, ``DELETE``, ``TRUNCATE``, 72 | ``REFERENCES``, and ``TRIGGER``) to all tables in the ``reports`` schema 73 | * ``jdoe`` has default privileges to write to all future tables created in the ``reports`` schema 74 | * ``jdoe`` has write-level access (``SELECT``, ``USAGE``, ``UPDATE``) to all sequences in the 75 | ``reports`` schema 76 | * ``jdoe`` has default privileges to write to all future sequences created in the ``reports`` schema 77 | * ``jdoe`` does not have any access other than that listed above (except whatever it inherits 78 | from the ``analyst`` role that ``jdoe`` is a member of) 79 | 80 | .. _pg_authid: https://www.postgresql.org/docs/9.6/static/catalog-pg-authid.html 81 | 82 | 83 | Quickstart 84 | ---------- 85 | 86 | Using pgbedrock requires three steps: generating a spec for a database, reviewing that spec, and 87 | configuring the database using that spec. Below we will do this using the pgbedrock docker image, 88 | but these steps can also be done with the pip-installed version of the tool. 89 | 90 | #. **Generate a spec for a database**. Specify the connection parameters below (host, port, 91 | database, username, and user password) as well as the place to output the tentative spec. Note 92 | that the user passed with ``-U`` must be a superuser. 93 | 94 | .. code-block:: bash 95 | 96 | docker run -it \ 97 | quay.io/squarespace/pgbedrock generate \ 98 | -h myhost.mynetwork.net \ 99 | -p 5432 \ 100 | -d mydatabase \ 101 | -U mysuperuser \ 102 | -w supersecret > path/to/spec.yml 103 | 104 | 105 | #. **Review the spec**. pgbedrock is not quite as flexible as Postgres's permissioning, and as a 106 | result the generated spec may differ slightly from the current state of your database. For more 107 | information on these potential simplifications, see :ref:`Notable Functionality And Caveats`. 108 | As a result, it is recommended to run ``pgbedrock configure`` in check mode the first time you 109 | use it to see what changes it would introduce to your current setup. This looks similar to the 110 | command above, but requires us to also pass in the passwords for any roles whose passwords are 111 | managed within Postgres itself. These can be identified in the spec file as roles with a line 112 | that looks like ``PASSWORD "{{ env['MYROLE_PASSWORD'] }}"`` (if you forget to pass in these 113 | passwords pgbedrock will just throw an error and refuse to run). Note that you must run 114 | ``pgbedrock configure`` against the Postgres primary. To run pgbedrock in check mode we do the 115 | following: 116 | 117 | .. code-block:: bash 118 | 119 | docker run -it \ 120 | -e "JDOE_PASSWORD=${JDOE_PASSWORD}" \ 121 | -e "JSMITH_PASSWORD=${JSMITH_PASSWORD}" \ 122 | -v /path/to/spec.yml:/opt/spec.yml \ 123 | quay.io/squarespace/pgbedrock configure spec.yml \ 124 | -h myhost.mynetwork.net \ 125 | -p 5432 \ 126 | -d mydatabase \ 127 | -U mysuperuser \ 128 | -w supersecret \ 129 | --check 130 | 131 | Note that ``--check`` is actually the default behavior, so we could also omit that. 132 | 133 | 134 | #. **Configure the database using the spec**. Once you feel comfortable with the changes pgbedrock 135 | would introduce, run the above command again using ``--live`` instead of ``--check``. Changes 136 | will now be made real. To make future changes, modify the spec file and run the above command. 137 | 138 | 139 | Documentation Contents 140 | ---------------------- 141 | .. toctree:: 142 | :maxdepth: 3 143 | 144 | self 145 | project_goals 146 | generating_a_spec 147 | making_configuration_changes 148 | spec_overview 149 | notable_functionality_and_caveats 150 | development 151 | -------------------------------------------------------------------------------- /docs/making_configuration_changes.rst: -------------------------------------------------------------------------------- 1 | Making Configuration Changes 2 | ============================ 3 | 4 | The ``pgbedrock configure`` command takes a set of parameters for connecting to a database and a 5 | path to a YAML spec and modifies the database so that it matches the spec file. In check mode these 6 | changes will be reported but not committed. 7 | 8 | One can configure a database with: 9 | 10 | .. code-block:: bash 11 | 12 | docker run -it \ 13 | -e "JDOE_PASSWORD=${JDOE_PASSWORD}" \ 14 | -e "JSMITH_PASSWORD=${JSMITH_PASSWORD}" \ 15 | -v /path/to/spec.yml:/opt/spec.yml \ 16 | quay.io/squarespace/pgbedrock configure spec.yml \ 17 | -h myhost.mynetwork.net \ 18 | -p 5432 \ 19 | -d mydatabase \ 20 | -U mysuperuser \ 21 | --prompt \ 22 | --check \ 23 | --attributes \ 24 | --memberships \ 25 | --no-ownerships \ 26 | --no-privileges 27 | 28 | A few notes on the above: 29 | 30 | * We use ``-it`` here because we are not providing a password as an input variable. Instead, we 31 | will bring up an interactive password prompt (via ``--prompt``). If we instead just passed in 32 | a password with ``-w`` then we would not need to use ``-it``. 33 | * We use ``-t`` so docker allocates a pseudo-tty for us, which allows us to see the progress 34 | bars as pgbedrock works. This isn't strictly necessary if you don't want to see the progress 35 | bars. 36 | * Because our spec.yml has templated passwords for the jdoe and jsmith roles, we pass in the 37 | environment variables for those passwords to our docker container (note that here we're 38 | passing them from environment variables in our own environment; obviously you could just 39 | hard-code them in if you wanted, i.e. ``-e "JDOE_PASSWORD=rumplestiltskin"``). 40 | * The role we provide with ``-U`` must be a superuser since they will need the ability to 41 | modify roles, memberships, schema ownership, and privileges. 42 | * We use ``--prompt`` to have an interactive prompt come up for us to put in our password. 43 | * We use ``--check`` to be sure that our changes will run in check mode, meaning that we will 44 | see what pgbedrock *would* change, but it will not actually commit those changes to our 45 | database cluster at the end of execution. Note that check mode is the default, so we would 46 | not have to provide this flag, but it is still a good idea to do so to be safe and explicit. 47 | If we wanted the changes pgbedrock makes to be committed we would instead use the ``--live`` 48 | flag. 49 | * We choose to run only the attributes and memberships submodules here. In general it is a good 50 | idea to run all of the submodules (which is the default), but it can be useful to only use a 51 | subset if you are just tweaking a spec and checking what would change. 52 | 53 | Further details on the meanings of parameters that pgbedrock accepts can be found by running 54 | ``docker run quay.io/squarespace/pgbedrock configure --help``. 55 | 56 | Also note that above we are running pgbedrock through a docker container, but if you'd prefer to 57 | use the Python command-line interface instead, pip install pgbedrock and run the above command 58 | starting from ``pgbedrock configure``. The rest of the command is identical. Note that any 59 | environment variables that you have templated into your spec.yml file must be set within your shell. 60 | -------------------------------------------------------------------------------- /docs/notable_functionality_and_caveats.rst: -------------------------------------------------------------------------------- 1 | Notable Functionality And Caveats 2 | ================================= 3 | 4 | * Only Postgres 9.5, 9.6, and 10 are currently supported 5 | Support for older Postgres versions is unlikely to be prioritized. 6 | 7 | * pgbedrock will not delete or alter any objects 8 | pgbedrock is explicitly written to not do anything destructive toward the objects in the 9 | database. A revoked permission can simply be re-granted, but a table/schema/sequence that has 10 | been deleted is gone for good (unless you have backups). As a result, pgbedrock will not delete 11 | any objects, including roles, schemas, tables, and sequences. pgbedrock will configure these 12 | objects, but if they need to be deleted you will have to do that manually. If one of these 13 | objects is not listed in the spec.yml file then pgbedrock will refuse to run, alerting the user 14 | of the discrepancy and asking them to manually take action (i.e. delete the role / schema / 15 | table / sequence or add it to the spec). 16 | 17 | * Ownership and privilege management currently supports only schemas, tables, and sequences 18 | Support for managing ownership and privileges of other objects (for example: functions, foreign 19 | data wrappers, foreign servers, etc.) is not avaiable but may be added in the future. 20 | 21 | * Roles and memberships are cluster-wide in Postgres 22 | This means that if you have multiple databases within one Postgres instance, all of those 23 | databases share the same roles and role memberships. The consequence of this is that if you use 24 | pgbedrock to manage all of those databases, then you will need to list the roles and role 25 | memberships in each database's spec file. 26 | 27 | * pgbedrock simplifies permissions down to read vs. write 28 | In our experience, this is easier to reason about, easier to remember, and is a sufficient level 29 | of granularity for most use cases. However, a consequence of this is that if you *do* use more 30 | fine-grained controls in your database then you will need to be more permissive or restrictive 31 | in your permissions in order to use pgbedrock (or, even better, put in a pull request to add 32 | support for finer-grained controls to pgbedrock!). As a concrete example, if roleA currently has 33 | ``INSERT`` permission to a table, then to use pgbedrock you will have to decide whether they 34 | will get read access (and thus lose that ``INSERT`` permission) or write access (and thus get 35 | ``UPDATE``, ``DELETE``, etc. permissions as well). If the spec is created with ``pgbedrock 36 | generate``, pgbedrock will take the latter approach (i.e. granting additional write-level 37 | access), so make sure to check the initial spec after generating it to verify that any changes 38 | it introduces are acceptable. 39 | 40 | * Default privileges are granted for permissions like ``myschema.*`` 41 | When a permission grant looks like ``myschema.*``, pgbedrock interprets that to mean "grant this 42 | permission for all existing tables *and for all future tables too*" (i.e. a default privilege). 43 | However, default privileges in Postgres are only applied to new tables created by the role that 44 | granted the privilege, meaning that if roleA grants default ``SELECT`` privileges on tables to 45 | roleB, then those default privileges will apply if and only if roleA is the one who creates a 46 | subsequent table. If instead roleC creates a table then the default privileges won't happen. To 47 | deal with this, when pgbedrock sees ``myschema.*`` it will identify all roles that have the 48 | ability to create objects in that schema and grant default privileges from each of these roles 49 | to the role that should have the default privileges. 50 | 51 | * personal_schemas are supported 52 | It is common to give users a "sandbox" where they can create objects, modify them, delete them, 53 | etc. A typical way to do this is to create a schema with the same name as the role and let them 54 | own it, i.e. the role ``jdoe`` would own the schema ``jdoe``. Every object in the ``jdoe`` 55 | schema should thus be owned by ``jdoe``. pgbedrock supports this concept in a few ways. First, 56 | by specifying ``has_personal_schema: yes`` for a role, a personal schema will be created if it 57 | does not exist. If the schema already exists, pgbedrock will make sure that the schema and all 58 | objects in it that pgbedrock manages are owned by this role, making changes to ownership to make 59 | this true. Finally, ``personal_schemas`` can be used as a special term in privilege grants. For 60 | example, a role can be given read-level table privileges to ``personal_schemas.*``, which will 61 | let that role read all tables in all personal schemas in the database. To be a personal schema, 62 | the schema must be owned by a role with the same name as the schema and that role must be able 63 | to login. 64 | -------------------------------------------------------------------------------- /docs/project_goals.rst: -------------------------------------------------------------------------------- 1 | Project Goals 2 | ============= 3 | 4 | pgbedrock was created with several goals in mind: 5 | 6 | #. **Simplify permission complexity.** 7 | pgbedrock simplifies object access down to read vs. write. As a result, an administrator 8 | doesn't need to know that within Postgres 'read' access is really ``SELECT`` for tables but 9 | ``USAGE`` for schemas, or that write access for schemas means ``CREATE`` but for tables it is a 10 | combination of ``INSERT``, ``UPDATE``, ``DELETE``, ``TRUNCATE``, ``REFERENCES``, and 11 | ``TRIGGER``. 12 | 13 | #. **Co-locate all config.** 14 | Within Postgres itself, role, role membership, ownership, and permission information is 15 | distributed across a variety of locations: ``pg_authid``, ``pg_class``, ``pg_namespace``, 16 | ``pg_default_acl``, and so on. As a result, it is hard to get a high-level "lay of the land". 17 | pgbedrock puts all this config into one YAML file so it's easy to stay on top of how the 18 | database is configured. 19 | 20 | #. **Assert that config matches reality.** 21 | Because information is so distributed in a normal Postgres cluster, it is easy for things to 22 | get out of sync. pgbedrock checks the YAML spec against the provided database and asserts that 23 | the two match. If they do not, it makes changes to the database to make them match, 24 | transparently reporting all of the queries that it ran to make those changes. 25 | 26 | #. **Provide an auditable log of changes.** 27 | By using a YAML spec, our config can be put into source control, allowing us to see who had 28 | access at any given time. In addition, each time pgbedrock runs it will output the set of SQL 29 | queries that it ran to bring the cluster in line with the spec. By storing those outputs an 30 | administrator will have an audit trail of when each change occurred. 31 | 32 | As a knock-on benefit, by having pgbedrock run on a schedule one can enforce that config changes be 33 | put into code and through a PR process: changes made live to a cluster will be revoked the next 34 | time the tool runs, helping dissuade administrators from continually making live, unaudited changes. 35 | -------------------------------------------------------------------------------- /docs/spec_overview.rst: -------------------------------------------------------------------------------- 1 | Spec Overview 2 | ============= 3 | 4 | At A Glance 5 | ----------- 6 | The spec.yml file is a YAML document that holds all information about roles, role memberships, 7 | object ownerships, and privileges for a given database. It is best generated programmatically with 8 | ``pgbedrock generate``. 9 | 10 | The spec.yml is comprised of a number of role definitions. An example role definition within 11 | this file may look something like the below: 12 | 13 | .. code-block:: bash 14 | 15 | jdoe: 16 | can_login: yes 17 | is_superuser: no 18 | attributes: 19 | - PASSWORD "{{ env['JDOE_PASSWORD'] }}" 20 | member_of: 21 | - analyst 22 | owns: 23 | schemas: 24 | - finance_reports 25 | tables: 26 | - finance_reports.Q2_revenue 27 | - finance_reports.Q2_margin 28 | privileges: 29 | schemas: 30 | read: 31 | - finance 32 | - marketing 33 | write: 34 | - reports 35 | tables: 36 | read: 37 | - finance.* 38 | - marketing.ad_spend 39 | - marketing.impressions 40 | write: 41 | - reports.* 42 | sequences: 43 | write: 44 | - reports.* 45 | 46 | All items other than the role name itself are optional. As a result, if you wanted to create a role 47 | ``foo`` with all defaults you could do so with just: 48 | 49 | .. code-block:: bash 50 | 51 | foo: 52 | 53 | A role definition can include any of the keywords listed below. 54 | 55 | 56 | Keywords 57 | -------- 58 | 59 | attributes 60 | ^^^^^^^^^^ 61 | ==== ======= 62 | Type Default 63 | ==== ======= 64 | list Empty 65 | ==== ======= 66 | 67 | Items in the list may be any of the following attributes accepted by Postgres's `CREATE ROLE`_ 68 | statement. Most attributes can be preceeded by 'NO' to negate them: 69 | 70 | ========================= ============= 71 | Keyword Default 72 | ========================= ============= 73 | BYPASSRLS NOBYPASSRLS 74 | CONNECTION LIMIT -1 75 | CREATEDB NOCREATEDB 76 | CREATEROLE NOCREATEROLE 77 | INHERIT INHERIT 78 | PASSWORD None 79 | REPLICATION NOREPLICATION 80 | VALID UNTIL 'infinity' 81 | ========================= ============= 82 | 83 | .. _CREATE ROLE: https://www.postgresql.org/docs/9.6/static/sql-alterrole.html 84 | 85 | 86 | can_login 87 | ^^^^^^^^^ 88 | ==== ======= 89 | Type Default 90 | ==== ======= 91 | bool False 92 | ==== ======= 93 | 94 | 95 | has_personal_schema 96 | ^^^^^^^^^^^^^^^^^^^ 97 | ==== ======= 98 | Type Default 99 | ==== ======= 100 | bool False 101 | ==== ======= 102 | 103 | Whether the role should have a personal schema as defined in the "personal_schemas are supported" 104 | bullet in :ref:`Notable Functionality And Caveats`. 105 | 106 | 107 | is_superuser 108 | ^^^^^^^^^^^^ 109 | ==== ======= 110 | Type Default 111 | ==== ======= 112 | bool False 113 | ==== ======= 114 | 115 | 116 | member_of 117 | ^^^^^^^^^ 118 | ==== ======= 119 | Type Default 120 | ==== ======= 121 | list Empty 122 | ==== ======= 123 | 124 | The roles that this role is a member of. Within Postgres, this means that if ``roleA`` is a member 125 | of ``roleB``, then ``roleA`` will inherit all privileges that ``roleB`` has. 126 | 127 | 128 | owns 129 | ^^^^ 130 | ==== ======= 131 | Type Default 132 | ==== ======= 133 | dict Empty 134 | ==== ======= 135 | 136 | The objects that this role owns. At present pgbedrock manages schema, table, and sequence ownership. 137 | Each of these objects is provided as a keyword followed by a list of the objects of that kind that 138 | is owned by this role. For example: 139 | 140 | .. code-block:: bash 141 | 142 | analyst: 143 | owns: 144 | schemas: 145 | - finance 146 | sequences: 147 | - finance.* 148 | tables: 149 | - finance.* 150 | - marketing.ad_spend 151 | 152 | 153 | privileges 154 | ^^^^^^^^^^ 155 | ==== ======= 156 | Type Default 157 | ==== ======= 158 | dict Empty 159 | ==== ======= 160 | 161 | The privileges section may be easiest to explain with an example: 162 | 163 | .. code-block:: bash 164 | 165 | analyst: 166 | can_login: no 167 | privileges: 168 | schemas: 169 | read: 170 | - finance 171 | - marketing 172 | write: 173 | - reports 174 | tables: 175 | read: 176 | - finance.* 177 | - marketing.* 178 | write: 179 | - reports.* 180 | 181 | Here we have a role ``analyst`` that will be used as a group role (i.e. it has no login access, but 182 | we will grant it to each of our analyst employees so that they inherit its permissions). We have 183 | given this analyst role read access on the finance and marketing schemas and to all tables in them, 184 | as well as write access to the reports schema and to all tables in it. 185 | 186 | The above example shows the general structure of the privileges section: the first keys within it 187 | are the object types. pgbedrock currently supports schemas, sequences, and tables as object types, 188 | each of which is optional to include. Within each object type, we have keys for read and write, also 189 | both optional. Under each of these entries we have a list of the items to grant to. 190 | 191 | Note that the ``foo.*`` syntax is not a regex expression but rather a shorthand for listing 192 | everything in the schema. As a result, putting ``foo.bar*`` (to get tables ``foo.barn`` or 193 | ``foo.barbados``) won't work; only ``foo.*`` will work. 194 | 195 | Password Management 196 | ------------------- 197 | Password management deserves some additional clarification. Since passwords shouldn't be stored in 198 | plain text in version control, pgbedrock takes user-provided environment variables to fill in 199 | passwords. For example, one could have a role defined as: 200 | 201 | .. code-block:: bash 202 | 203 | myrole: 204 | attributes: 205 | - PASSWORD "{{ env['MYROLE_PASSWORD'] }}" 206 | 207 | Note that the environment variable can be named whatever you would like. As long as that variable 208 | exists in the environment, pgbedrock will use it. If a variable is declared in the spec template 209 | but does not exist in the environment, pgbedrock will refuse to run and will report the name of the 210 | missing environment variable in its error message. 211 | 212 | Note that if you are running pgbedrock through docker you will need to pass these environment 213 | variables into the docker container. This can be done using the ``-e`` flag for docker run as shown 214 | in the example for the :ref:`Making Configuration Changes` section above. 215 | -------------------------------------------------------------------------------- /pgbedrock/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.4.2' 2 | LOG_FORMAT = '%(levelname)s:%(filename)s:%(funcName)s:%(lineno)s - %(message)s' 3 | -------------------------------------------------------------------------------- /pgbedrock/attributes.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import datetime as dt 3 | import hashlib 4 | import logging 5 | 6 | import click 7 | import psycopg2 8 | 9 | from pgbedrock import common 10 | from pgbedrock.context import DatabaseContext 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | UNKNOWN_ATTRIBUTE_MSG = "Unknown attribute '{}' provided to ALTER ROLE" 16 | UNSUPPORTED_CHAR_MSG = 'Password for role "{}" contains an unsupported character: \' or "' 17 | 18 | Q_ALTER_CONN_LIMIT = 'ALTER ROLE "{}" WITH CONNECTION LIMIT {}; -- Previous value: {}' 19 | Q_ALTER_PASSWORD = "ALTER ROLE \"{}\" WITH ENCRYPTED PASSWORD '{}';" 20 | Q_REMOVE_PASSWORD = "ALTER ROLE \"{}\" WITH PASSWORD NULL;" 21 | Q_ALTER_ROLE = 'ALTER ROLE "{}" WITH {};' 22 | Q_ALTER_VALID_UNTIL = "ALTER ROLE \"{}\" WITH VALID UNTIL '{}'; -- Previous value: {}" 23 | Q_CREATE_ROLE = 'CREATE ROLE "{}";' 24 | 25 | 26 | DEFAULT_ATTRIBUTES = { 27 | 'rolbypassrls': False, 28 | 'rolcanlogin': False, 29 | 'rolconnlimit': -1, 30 | 'rolcreatedb': False, 31 | 'rolcreaterole': False, 32 | 'rolinherit': True, 33 | 'rolpassword': None, 34 | 'rolreplication': False, 35 | 'rolsuper': False, 36 | 'rolvaliduntil': None, 37 | } 38 | 39 | # Map to how the attribute is referred to within pg_authid 40 | PG_COLUMN_NAME = { 41 | 'BYPASSRLS': 'rolbypassrls', 42 | 'CONNECTION LIMIT': 'rolconnlimit', 43 | 'CREATEDB': 'rolcreatedb', 44 | 'CREATEROLE': 'rolcreaterole', 45 | 'INHERIT': 'rolinherit', 46 | 'LOGIN': 'rolcanlogin', 47 | 'PASSWORD': 'rolpassword', 48 | 'REPLICATION': 'rolreplication', 49 | 'SUPERUSER': 'rolsuper', 50 | 'VALID UNTIL': 'rolvaliduntil' 51 | } 52 | 53 | # We also need a reverse lookup of PG_COLUMN_NAME 54 | COLUMN_NAME_TO_KEYWORD = {v: k for k, v in PG_COLUMN_NAME.items()} 55 | 56 | 57 | def analyze_attributes(spec, cursor, verbose): 58 | logger.debug('Starting analyze_attributes()') 59 | dbcontext = DatabaseContext(cursor, verbose) 60 | 61 | # We disable the progress bar when showing verbose output (using '' as our bar_template) 62 | # or # the bar will get lost in the # output 63 | bar_template = '' if verbose else common.PROGRESS_TEMPLATE 64 | with click.progressbar(spec.items(), label='Analyzing roles: ', bar_template=bar_template, 65 | show_eta=False, item_show_func=common.item_show_func) as all_roles: 66 | all_sql_to_run = [] 67 | password_all_sql_to_run = [] 68 | for rolename, spec_config in all_roles: 69 | logger.debug('Starting to analyze role {}'.format(rolename)) 70 | 71 | spec_config = spec_config or {} 72 | spec_attributes = spec_config.get('attributes', []) 73 | 74 | for keyword, attribute in (('can_login', 'LOGIN'), ('is_superuser', 'SUPERUSER')): 75 | is_desired = spec_config.get(keyword, False) 76 | spec_attributes.append(attribute if is_desired else 'NO' + attribute) 77 | 78 | roleconf = AttributeAnalyzer(rolename, spec_attributes, dbcontext) 79 | roleconf.analyze() 80 | all_sql_to_run += roleconf.sql_to_run 81 | password_all_sql_to_run += roleconf.password_sql_to_run 82 | 83 | return all_sql_to_run, password_all_sql_to_run 84 | 85 | 86 | def create_md5_hash(rolename, value): 87 | salted_input = (value + rolename).encode('utf-8') 88 | return 'md5' + hashlib.md5(salted_input).hexdigest() 89 | 90 | 91 | def is_valid_forever(val): 92 | if val is None or val == 'infinity': 93 | return True 94 | elif isinstance(val, dt.datetime) and val.tzinfo is not None: 95 | return val == dt.datetime.max.replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None)) 96 | else: 97 | return val == dt.datetime.max 98 | 99 | 100 | class AttributeAnalyzer(object): 101 | """ Analyze one role and determine (via .analyze()) any SQL statements that are necessary to 102 | make it match the provided spec attributes. Note that spec_attributes is a list whereas 103 | current_attributes is a dict. """ 104 | 105 | def __init__(self, rolename, spec_attributes, dbcontext): 106 | self.sql_to_run = [] 107 | self.rolename = common.check_name(rolename) 108 | logger.debug('self.rolename set to {}'.format(self.rolename)) 109 | self.spec_attributes = spec_attributes 110 | 111 | self.current_attributes = dbcontext.get_role_attributes(rolename) 112 | 113 | # We keep track of password-related SQL separately as we don't want running this to 114 | # go into the main SQL stream since that could leak password 115 | self.password_sql_to_run = [] 116 | 117 | def analyze(self): 118 | if not self.role_exists(): 119 | self.create_role() 120 | 121 | desired_attributes = self.coalesce_attributes() 122 | self.set_all_attributes(desired_attributes) 123 | return self.sql_to_run 124 | 125 | def create_role(self): 126 | query = Q_CREATE_ROLE.format(self.rolename) 127 | self.sql_to_run.append(query) 128 | 129 | def coalesce_attributes(self): 130 | """ Override default attributes with user-provided ones and verify attributes are 131 | acceptable. Returns a dict with keys and structure similar to DEFAULT_ATTRIBUTES """ 132 | attributes = copy.deepcopy(DEFAULT_ATTRIBUTES) 133 | spec_attributes = self.converted_attributes() 134 | attributes.update(spec_attributes) 135 | return attributes 136 | 137 | def converted_attributes(self): 138 | """ Convert the list of attributes provided in the spec to postgres-compatible 139 | keywords and values. 140 | """ 141 | converted_attributes = {} 142 | for spec_attribute in self.spec_attributes: 143 | 144 | # We do spec_attribute.upper() in each spot in order to leave the original 145 | # spec_attribute unchanged in case it is a password, in which case we don't want to 146 | # change the case 147 | if spec_attribute.upper().startswith('CONNECTION LIMIT'): 148 | val = spec_attribute[17:].strip() 149 | converted_attributes['rolconnlimit'] = int(val) 150 | 151 | elif spec_attribute.upper().startswith('VALID UNTIL'): 152 | val = spec_attribute[12:].strip() 153 | converted_attributes['rolvaliduntil'] = val 154 | 155 | elif 'PASSWORD' in spec_attribute.upper(): 156 | # Regardless whether the spec specified ENCRYPTED or UNENCRYPTED for the password, 157 | # we throw this away as we will be storing the password in encrypted form 158 | val = spec_attribute.split('PASSWORD ', 1)[-1] 159 | 160 | # Trim leading and ending quotes, if there are any 161 | if val[0] == '"' or val[0] == "'": 162 | val = val[1:] 163 | if val[-1] == '"' or val[-1] == "'": 164 | val = val[:-1] 165 | 166 | if "'" in val or '"' in val: 167 | common.fail(msg=UNSUPPORTED_CHAR_MSG.format(self.rolename)) 168 | 169 | converted_attributes['rolpassword'] = val 170 | 171 | elif spec_attribute.upper().startswith('NO'): 172 | keyword = spec_attribute.upper()[2:] 173 | colname = PG_COLUMN_NAME.get(keyword) 174 | if not colname: 175 | common.fail(UNKNOWN_ATTRIBUTE_MSG.format(spec_attribute)) 176 | 177 | converted_attributes[colname] = False 178 | 179 | else: 180 | keyword = spec_attribute.upper() 181 | colname = PG_COLUMN_NAME.get(keyword) 182 | if not colname: 183 | common.fail(UNKNOWN_ATTRIBUTE_MSG.format(spec_attribute)) 184 | 185 | converted_attributes[colname] = True 186 | 187 | return converted_attributes 188 | 189 | def get_attribute_value(self, attribute): 190 | """ Take an attribute named like a postgres column (e.g. rolsuper) and look up that value 191 | in our dbcontext """ 192 | value = self.current_attributes.get(attribute, DEFAULT_ATTRIBUTES[attribute]) 193 | logger.debug('Returning attribute "{}": "{}"'.format(attribute, value)) 194 | return value 195 | 196 | def is_same_password(self, value): 197 | """ Convert the input value into a postgres rolname-salted md5 hash and compare 198 | it with the currently stored hash """ 199 | if value is None: 200 | return self.current_attributes.get('rolpassword') is None 201 | 202 | md5_hash = create_md5_hash(self.rolename, value) 203 | return self.current_attributes.get('rolpassword') == md5_hash 204 | 205 | def role_exists(self): 206 | # If current_attributes is empty then the rolname wasn't in pg_authid, i.e. it doesn't exist 207 | return self.current_attributes != {} 208 | 209 | def set_all_attributes(self, attributes): 210 | """ Verify that the role's attributes match the spec's, updating as necessary """ 211 | for attribute, desired_value in attributes.items(): 212 | current_value = self.get_attribute_value(attribute) 213 | if attribute == 'rolpassword' and not self.is_same_password(desired_value): 214 | logger.debug('Altering password for role "{}"'.format(self.rolename)) 215 | self.set_password(desired_value) 216 | 217 | if attribute == 'rolvaliduntil' \ 218 | and is_valid_forever(desired_value) \ 219 | and is_valid_forever(current_value): 220 | continue 221 | 222 | elif current_value != desired_value and attribute != 'rolpassword': 223 | self.set_attribute_value(attribute, desired_value, current_value) 224 | 225 | def set_attribute_value(self, attribute, desired_value, current_value): 226 | if attribute == 'rolconnlimit': 227 | query = Q_ALTER_CONN_LIMIT.format(self.rolename, desired_value, current_value) 228 | elif attribute == 'rolvaliduntil': 229 | query = Q_ALTER_VALID_UNTIL.format(self.rolename, desired_value, current_value) 230 | else: 231 | base_keyword = COLUMN_NAME_TO_KEYWORD[attribute] 232 | # prepend 'NO' if desired_value is False 233 | keyword = base_keyword if desired_value else 'NO' + base_keyword 234 | query = Q_ALTER_ROLE.format(self.rolename, keyword) 235 | 236 | self.sql_to_run.append(query) 237 | 238 | def set_password(self, desired_value): 239 | if desired_value is None: 240 | actual_query = Q_REMOVE_PASSWORD.format(self.rolename) 241 | else: 242 | actual_query = Q_ALTER_PASSWORD.format(self.rolename, desired_value) 243 | self.password_sql_to_run.append(actual_query) 244 | 245 | sanitized_query = Q_ALTER_PASSWORD.format(self.rolename, '******') 246 | self.sql_to_run.append('--' + sanitized_query) 247 | -------------------------------------------------------------------------------- /pgbedrock/cli.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | 3 | import click 4 | 5 | from pgbedrock import core_configure, core_generate 6 | 7 | 8 | USER = getpass.getuser() 9 | 10 | 11 | @click.group() 12 | def entrypoint(): 13 | pass 14 | 15 | 16 | @entrypoint.command(short_help='Configure a database to match a YAML spec') 17 | @click.argument('spec', required=True) 18 | @click.option('-h', '--host', default='localhost', help='database server host (default: localhost)') 19 | @click.option('-p', '--port', default=5432, type=int, help='database server port (default: 5432)') 20 | @click.option('-U', '--user', default=USER, help='database user name (default: "{}")'.format(USER)) 21 | @click.option('-w', '--password', default="", help='database user password; (default: "")') 22 | @click.option('-d', '--dbname', default=USER, help='database to connect to (default: "{}")'.format(USER)) 23 | @click.option('--prompt/--no-prompt', default=False, help='prompt the user to input a password (default: --no-prompt)') 24 | @click.option('--attributes/--no-attributes', default=True, help='whether to configure role attributes (default: --attributes)') 25 | @click.option('--memberships/--no-memberships', default=True, help='whether to configure memberships (default: --membership)') 26 | @click.option('--ownerships/--no-ownerships', default=True, help='whether to configure object ownerships (default: --ownerships)') 27 | @click.option('--privileges/--no-privileges', default=True, help='whether to configure privileges (default: --privileges)') 28 | @click.option('--live/--check', default=False, help='whether to actually make changes ("live") or only show what would be changed ("check") (default: --check)') 29 | @click.option('--verbose/--no-verbose', default=False, help='whether to show debug-level logging messages while running (default: --no-verbose)') 30 | def configure(spec, host, port, user, password, dbname, prompt, attributes, memberships, ownerships, 31 | privileges, live, verbose): 32 | """ 33 | Configure the role attributes, memberships, object ownerships, and/or privileges of a 34 | database cluster to match a desired spec. 35 | 36 | By default pgbedrock will not make the changes it proposes, i.e. it runs with --check by 37 | default (though you can explicitly pass --check as well if you want to be really safe). In this 38 | mode, when pgbedrock is finished it will abort the transaction that it is in. To make changes 39 | real, instead pass --live. 40 | 41 | In addition, using --verbose will print to STDOUT all debug statements and all SQL queries 42 | issued by pgbedrock. 43 | """ 44 | core_configure.configure(spec, host, port, user, password, dbname, prompt, attributes, 45 | memberships, ownerships, privileges, live, verbose) 46 | 47 | 48 | @entrypoint.command(short_help='Generate a YAML spec for a database') 49 | @click.option('-h', '--host', default='localhost', help='database server host (default: localhost)') 50 | @click.option('-p', '--port', default=5432, type=int, help='database server port (default: 5432)') 51 | @click.option('-U', '--user', default=USER, help='database user name (default: "{}")'.format(USER)) 52 | @click.option('-w', '--password', default="", help='database user password; (default: "")') 53 | @click.option('-d', '--dbname', default=USER, help='database to connect to (default: "{}")'.format(USER)) 54 | @click.option('--prompt/--no-prompt', default=False, help='prompt the user to input a password (default: --no-prompt)') 55 | @click.option('--verbose/--no-verbose', default=False, help='whether to show debug-level logging messages while running (default: --no-verbose)') 56 | def generate(host, port, user, password, dbname, prompt, verbose): 57 | """ 58 | Generate a YAML spec that represents the roles, memberships, ownerships, and/or privileges of a 59 | database. 60 | """ 61 | core_generate.generate(host, port, user, password, dbname, prompt, verbose) 62 | 63 | 64 | if __name__ == '__main__': 65 | entrypoint() 66 | -------------------------------------------------------------------------------- /pgbedrock/common.py: -------------------------------------------------------------------------------- 1 | try: 2 | # Python 2 3 | from distutils import strtobool 4 | except: 5 | # Python 3 6 | from distutils.util import strtobool 7 | import logging 8 | import sys 9 | import traceback 10 | 11 | import click 12 | import psycopg2 13 | 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | DATABASE_CONNECTION_ERROR_MSG = 'Unable to connect to database. Postgres traceback:\n{}' 18 | 19 | FAILED_QUERY_MSG = 'Failed to execute query "{}": {}' 20 | UNSUPPORTED_CHAR_MSG = 'Role "{}" contains an unsupported character: \' or "' 21 | PROGRESS_TEMPLATE = '%(label)s [%(bar)s] %(info)s' 22 | 23 | 24 | def check_name(name): 25 | if "'" in name or '"' in name: 26 | fail(msg=UNSUPPORTED_CHAR_MSG.format(name)) 27 | else: 28 | return name 29 | 30 | 31 | def fail(msg): 32 | click.secho(msg, fg='red') 33 | sys.exit(1) 34 | 35 | 36 | def get_db_connection(host, port, dbname, user, password): 37 | try: 38 | db_conn = psycopg2.connect(host=host, port=port, dbname=dbname, user=user, password=password) 39 | db_conn.set_session(autocommit=False) 40 | return db_conn 41 | except Exception as e: 42 | fail(DATABASE_CONNECTION_ERROR_MSG.format(e)) 43 | 44 | 45 | def item_show_func(x): 46 | return x[0] if x else '' 47 | 48 | 49 | def parse_bool(value): 50 | return bool(strtobool(str(value).lower())) 51 | 52 | 53 | def run_query(cursor, verbose, query): 54 | logger.debug('Executing query: {}'.format(query)) 55 | try: 56 | cursor.execute(query) 57 | except Exception as e: 58 | if verbose: 59 | click.secho(FAILED_QUERY_MSG.format(query, ''), fg='red') 60 | # The following is needed to output the traceback as well 61 | exc_type, exc_value, exc_tb = sys.exc_info() 62 | formatted_tb = '\n'.join(traceback.format_tb(exc_tb)) 63 | click.secho(formatted_tb) 64 | else: 65 | click.secho(FAILED_QUERY_MSG.format(query, e), fg='red') 66 | sys.exit(1) 67 | 68 | 69 | class ObjectName(object): 70 | """ Hold references to a specifc object, i.e. the schema and object name. 71 | 72 | We do this in order to: 73 | * Enable us to easily pick out the schema and object name for an object 74 | * Be sure that when we use a schema or object name we won't have to worry 75 | about existing double-quoting of these characteristics 76 | * Be sure that when we get the fully-qualified name it will be double quoted 77 | properly, i.e. "myschema"."mytable" 78 | """ 79 | def __init__(self, schema, unqualified_name=None): 80 | # Make sure schema and table are both stored without double quotes around 81 | # them; we add these when ObjectName.qualified_name is called 82 | self._schema = self._unquoted_item(schema) 83 | self._unqualified_name = self._unquoted_item(unqualified_name) 84 | 85 | if self._unqualified_name and self._unqualified_name == '*': 86 | self._qualified_name = '{}.{}'.format(self.schema, self.unqualified_name) 87 | elif self._unqualified_name and self._unqualified_name != '*': 88 | # Note that if we decide to support "schema"."table" within YAML that we'll need to 89 | # add a custom constructor since otherwise YAML gets confused unless you do 90 | # '"schema"."table"' 91 | self._qualified_name = '{}."{}"'.format(self.schema, self.unqualified_name) 92 | else: 93 | self._qualified_name = '{}'.format(self.schema) 94 | 95 | def __eq__(self, other): 96 | return (self.schema == other.schema) and (self.unqualified_name == other.unqualified_name) 97 | 98 | def __hash__(self): 99 | return hash(self.qualified_name) 100 | 101 | def __lt__(self, other): 102 | return self.qualified_name < other.qualified_name 103 | 104 | def __repr__(self): 105 | if self.unqualified_name: 106 | return "ObjectName('{}', '{}')".format(self.schema, self.unqualified_name) 107 | 108 | return "ObjectName('{}')".format(self.schema) 109 | 110 | @classmethod 111 | def from_str(cls, text): 112 | """ Convert a text representation of a qualified object name into an ObjectName instance 113 | 114 | For example, 'foo.bar', '"foo".bar', '"foo"."bar"', etc. will be converted an object with 115 | schema 'foo' and object name 'bar'. Double quotes around the schema or object name are 116 | stripped, but note that we don't do anything with impossible input like 'foo."bar".baz' 117 | (which is impossible because the object name would include double quotes in it). Instead, 118 | we let processing proceed and the issue bubble up downstream. 119 | """ 120 | if '.' not in text: 121 | return cls(schema=text) 122 | 123 | # If there are multiple periods we assume that the first one delineates the schema from 124 | # the rest of the object, i.e. foo.bar.baz means schema foo and object "bar.baz" 125 | schema, unqualified_name = text.split('.', 1) 126 | # Don't worry about removing double quotes as that happens in __init__ 127 | return cls(schema=schema, unqualified_name=unqualified_name) 128 | 129 | def only_schema(self): 130 | """ Return an ObjectName instance for the schema associated with the current object """ 131 | return ObjectName(self.schema) 132 | 133 | @property 134 | def schema(self): 135 | return self._schema 136 | 137 | @property 138 | def unqualified_name(self): 139 | return self._unqualified_name 140 | 141 | @property 142 | def qualified_name(self): 143 | return self._qualified_name 144 | 145 | @staticmethod 146 | def _unquoted_item(item): 147 | if item and item.startswith('"') and item.endswith('"'): 148 | return item[1:-1] 149 | return item 150 | -------------------------------------------------------------------------------- /pgbedrock/core_configure.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | import logging 3 | 4 | import click 5 | import psycopg2.extras 6 | 7 | from pgbedrock import LOG_FORMAT 8 | from pgbedrock import common 9 | from pgbedrock.attributes import analyze_attributes 10 | from pgbedrock.memberships import analyze_memberships 11 | from pgbedrock.ownerships import analyze_ownerships 12 | from pgbedrock.privileges import analyze_privileges 13 | from pgbedrock.spec_inspector import load_spec 14 | 15 | 16 | logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) 17 | logger = logging.getLogger(__name__) 18 | 19 | HEADER = '-- SQL EXECUTED ({} MODE)' 20 | SUCCESS_MSG = "\nNo changes needed. Congratulations! :)" 21 | 22 | 23 | def create_divider(section): 24 | """ Within our output, we prepend all SQL statements for a given submodule (e.g. memberships, 25 | privileges, etc.) with a divider that names the section that we're on """ 26 | edge_line = '--------------------------------' 27 | center_line = '--- Configuring {} '.format(section) 28 | padding = 32 - len(center_line) 29 | divider = '\n'.join(['', '', edge_line, center_line + '-' * padding, edge_line, '']) 30 | return divider 31 | 32 | 33 | def has_changes(statements): 34 | """ See if a list of SQL statements has any lines that are not just comments """ 35 | for stmt in statements: 36 | if not stmt.startswith('--') and not stmt.startswith('\n\n--'): 37 | return True 38 | return False 39 | 40 | 41 | def run_module_sql(module_sql, cursor, verbose): 42 | if module_sql and has_changes(module_sql): 43 | # Put all SQL into 1 string to reduce network IO of sending many small calls to Postgres 44 | combined_sql = '\n'.join(module_sql) 45 | common.run_query(cursor, verbose, combined_sql) 46 | 47 | 48 | def run_password_sql(cursor, all_password_sql_to_run): 49 | """ 50 | Run one or more SQL statements that contains a password. We do this outside of the 51 | common.run_query() framework for two reasons: 52 | 1) If verbose mode is requested then common.run_query() will show the password in its 53 | reporting of the queries that are executed 54 | 2) The input to common.run_query() is the module output. This output is faithfully rendered 55 | as-is to STDOUT upon pgbedrock's completion, so we would leak the password there as well. 56 | 57 | By running password-containing queries outside of the common.run_query() approach we can avoid 58 | these issues 59 | """ 60 | query = '\n'.join(all_password_sql_to_run) 61 | 62 | try: 63 | cursor.execute(query) 64 | except Exception as e: 65 | common.fail(msg=common.FAILED_QUERY_MSG.format(query, e)) 66 | 67 | 68 | def configure(spec_path, host, port, user, password, dbname, prompt, attributes, memberships, 69 | ownerships, privileges, live, verbose): 70 | """ 71 | Configure the role attributes, memberships, object ownerships, and/or privileges of a 72 | database cluster to match a desired spec. 73 | 74 | Note that attributes and memberships are database cluster-wide settings, i.e. they are the 75 | same across multiple databases within a given Postgres instance. Ownerships and privileges 76 | are specific to each individual database within a Postgres instance. 77 | 78 | Inputs: 79 | 80 | spec_path - str; the path for the configuration file 81 | 82 | host - str; the database server host 83 | 84 | port - str; the database server port 85 | 86 | user - str; the database user name 87 | 88 | password - str; the database user's password 89 | 90 | dbname - str; the database to connect to and configure 91 | 92 | prompt - bool; whether to prompt for a password 93 | 94 | attributes - bool; whether to configure the role attributes for the specified 95 | database cluster 96 | 97 | memberships - bool; whether to configure the role memberships for the specified 98 | database cluster 99 | 100 | ownerships - bool; whether to configure the ownerships for the specified database 101 | 102 | privileges - bool; whether to configure the privileges for the specified database 103 | 104 | live - bool; whether to apply the changes (True) or just show what changes 105 | would be made without actually appyling them (False) 106 | 107 | verbose - bool; whether to show all queries that are executed and all debug log 108 | messages during execution 109 | """ 110 | if verbose: 111 | root_logger = logging.getLogger('') 112 | root_logger.setLevel(logging.DEBUG) 113 | 114 | if prompt: 115 | password = getpass.getpass() 116 | 117 | db_connection = common.get_db_connection(host, port, dbname, user, password) 118 | cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) 119 | 120 | spec = load_spec(spec_path, cursor, verbose, attributes, memberships, ownerships, privileges) 121 | 122 | sql_to_run = [] 123 | password_changed = False # Initialize this in case the attributes module isn't run 124 | 125 | if attributes: 126 | sql_to_run.append(create_divider('attributes')) 127 | # Password changes happen within the attributes.py module itself so we don't leak 128 | # passwords; as a result we need to see if password changes occurred 129 | module_sql, all_password_sql_to_run = analyze_attributes(spec, cursor, verbose) 130 | run_module_sql(module_sql, cursor, verbose) 131 | if all_password_sql_to_run: 132 | password_changed = True 133 | run_password_sql(cursor, all_password_sql_to_run) 134 | 135 | sql_to_run.extend(module_sql) 136 | 137 | if memberships: 138 | sql_to_run.append(create_divider('memberships')) 139 | module_sql = analyze_memberships(spec, cursor, verbose) 140 | run_module_sql(module_sql, cursor, verbose) 141 | sql_to_run.extend(module_sql) 142 | 143 | if ownerships: 144 | sql_to_run.append(create_divider('ownerships')) 145 | module_sql = analyze_ownerships(spec, cursor, verbose) 146 | run_module_sql(module_sql, cursor, verbose) 147 | sql_to_run.extend(module_sql) 148 | 149 | if privileges: 150 | sql_to_run.append(create_divider('privileges')) 151 | module_sql = analyze_privileges(spec, cursor, verbose) 152 | run_module_sql(module_sql, cursor, verbose) 153 | sql_to_run.extend(module_sql) 154 | 155 | changed = password_changed or has_changes(sql_to_run) 156 | if changed and live: 157 | logger.debug('Committing changes') 158 | db_connection.commit() 159 | else: 160 | db_connection.rollback() 161 | 162 | # Make sure there is at least 1 line with a real change (vs. all headers) 163 | if changed: 164 | click.secho(HEADER.format('LIVE' if live else 'CHECK'), fg='green') 165 | for statement in sql_to_run: 166 | click.secho(statement, fg='green') 167 | else: 168 | click.secho(SUCCESS_MSG, fg='green') 169 | -------------------------------------------------------------------------------- /pgbedrock/memberships.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import click 4 | 5 | from pgbedrock import common 6 | from pgbedrock.context import DatabaseContext 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | SKIP_SUPERUSER_MEMBERSHIPS_MSG = '-- Skipping membership configuration for superuser "{}"' 12 | 13 | Q_GRANT_MEMBERSHIP = 'GRANT "{}" TO "{}";' 14 | Q_REVOKE_MEMBERSHIP = 'REVOKE "{}" FROM "{}";' 15 | 16 | 17 | def analyze_memberships(spec, cursor, verbose): 18 | logger.debug('Starting analyze_memberships()') 19 | dbcontext = DatabaseContext(cursor, verbose) 20 | 21 | # We disable the progress bar when showing verbose output (using '' as our bar_template) 22 | # or # the bar will get lost in the # output 23 | bar_template = '' if verbose else common.PROGRESS_TEMPLATE 24 | with click.progressbar(spec.items(), label='Analyzing memberships:', bar_template=bar_template, 25 | show_eta=False, item_show_func=common.item_show_func) as all_roles: 26 | all_sql_to_run = [] 27 | for rolename, spec_config in all_roles: 28 | spec_config = spec_config or {} 29 | spec_memberships = set(spec_config.get('member_of', [])) 30 | sql_to_run = MembershipAnalyzer(rolename, spec_memberships, dbcontext).analyze() 31 | all_sql_to_run += sql_to_run 32 | 33 | return all_sql_to_run 34 | 35 | 36 | class MembershipAnalyzer(object): 37 | """ Analyze one role's memberships and determine (via .analyze()) any SQL statements 38 | that are necessary to make the memberships match the provided spec memberships. 39 | """ 40 | 41 | def __init__(self, rolename, spec_memberships, dbcontext): 42 | self.sql_to_run = [] 43 | self.rolename = common.check_name(rolename) 44 | logger.debug('self.rolename set to {}'.format(self.rolename)) 45 | self.desired_memberships = spec_memberships 46 | 47 | self.current_memberships = dbcontext.get_role_memberships(rolename) 48 | self.is_superuser = dbcontext.is_superuser(rolename) 49 | 50 | def analyze(self): 51 | # Check if the role is a superuser. If so, configuring memberships 52 | # is meaningless since superusers bypass all checks 53 | if self.is_superuser: 54 | skip_msg = SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(self.rolename) 55 | self.sql_to_run.append(skip_msg) 56 | 57 | else: 58 | # Get all memberships that we have but don't want and remove them 59 | memberships_to_revoke = self.current_memberships.difference(self.desired_memberships) 60 | for group in memberships_to_revoke: 61 | self.revoke_membership(group) 62 | 63 | # Get all memberships that we want but don't have and create them 64 | memberships_to_grant = self.desired_memberships.difference(self.current_memberships) 65 | for group in memberships_to_grant: 66 | self.grant_membership(group) 67 | 68 | return self.sql_to_run 69 | 70 | def grant_membership(self, group): 71 | query = Q_GRANT_MEMBERSHIP.format(group, self.rolename) 72 | self.sql_to_run.append(query) 73 | 74 | def revoke_membership(self, group): 75 | query = Q_REVOKE_MEMBERSHIP.format(group, self.rolename) 76 | self.sql_to_run.append(query) 77 | -------------------------------------------------------------------------------- /pgbedrock/ownerships.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import click 4 | 5 | from pgbedrock import common 6 | from pgbedrock.context import DatabaseContext 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | Q_CREATE_SCHEMA = 'CREATE SCHEMA "{}" AUTHORIZATION "{}";' 12 | Q_SET_SCHEMA_OWNER = 'ALTER SCHEMA "{}" OWNER TO "{}"; -- Previous owner: "{}"' 13 | Q_SET_OBJECT_OWNER = 'ALTER {} {} OWNER TO "{}"; -- Previous owner: "{}"' 14 | 15 | 16 | def analyze_ownerships(spec, cursor, verbose): 17 | logger.debug('Starting analyze_ownerships()') 18 | dbcontext = DatabaseContext(cursor, verbose) 19 | 20 | # We disable the progress bar when showing verbose output (using '' as our bar_template) 21 | # or # the bar will get lost in the # output 22 | bar_template = '' if verbose else common.PROGRESS_TEMPLATE 23 | with click.progressbar(spec.items(), label='Analyzing ownerships: ', bar_template=bar_template, 24 | show_eta=False, item_show_func=common.item_show_func) as all_roles: 25 | all_sql_to_run = [] 26 | for rolename, config in all_roles: 27 | if not config: 28 | continue 29 | 30 | if config.get('has_personal_schema'): 31 | objname = common.ObjectName.from_str(rolename) 32 | sql_to_run = SchemaAnalyzer(rolename=rolename, objname=objname, 33 | dbcontext=dbcontext, is_personal_schema=True).analyze() 34 | all_sql_to_run += sql_to_run 35 | 36 | ownerships = config.get('owns', {}) 37 | for objkind, objects_to_own in ownerships.items(): 38 | if objkind == 'schemas': 39 | for objname in objects_to_own: 40 | sql_to_run = SchemaAnalyzer(rolename=rolename, objname=objname, 41 | dbcontext=dbcontext, 42 | is_personal_schema=False).analyze() 43 | all_sql_to_run += sql_to_run 44 | else: 45 | for objname in objects_to_own: 46 | sql_to_run = NonschemaAnalyzer(rolename=rolename, objname=objname, 47 | objkind=objkind, dbcontext=dbcontext).analyze() 48 | all_sql_to_run += sql_to_run 49 | 50 | return all_sql_to_run 51 | 52 | 53 | class NonschemaAnalyzer(object): 54 | """ 55 | Analyze one object and determine (via .analyze()) any SQL statements that are 56 | necessary to make sure that the object has the correct owner. 57 | 58 | If the objname is schema.* then ownership for each of the objects (of kind objkind) 59 | in that schema will be verified and changed if necessary. 60 | """ 61 | def __init__(self, rolename, objname, objkind, dbcontext): 62 | """ 63 | Args: 64 | rolename (str): The name of the role that should own the object(s) 65 | 66 | objname (common.ObjectName): The object(s) to analyze 67 | 68 | objkind (str): The type of object. This must be one of the keys of 69 | context.PRIVILEGE_MAP, e.g. 'schemas', 'tables', etc. 70 | 71 | dbcontext (context.DatabaseContext): A context.DatabaseContext instance for getting 72 | information for the associated database 73 | """ 74 | self.rolename = rolename 75 | self.objname = objname 76 | self.objkind = objkind 77 | self.dbcontext = dbcontext 78 | self.sql_to_run = [] 79 | 80 | def expand_schema_objects(self, schema): 81 | """ Get all non-dependent objects of kind objkind within the specified schema """ 82 | all_objkind_objects = self.dbcontext.get_all_object_attributes().get(self.objkind, dict()) 83 | schema_objects = all_objkind_objects.get(schema, dict()) 84 | nondependent_objects = [objname for objname, attr in schema_objects.items() if not attr['is_dependent']] 85 | return nondependent_objects 86 | 87 | def analyze(self): 88 | if self.objname.unqualified_name == '*': 89 | objects_to_manage = self.expand_schema_objects(self.objname.schema) 90 | else: 91 | objects_to_manage = [self.objname] 92 | 93 | all_object_attributes = self.dbcontext.get_all_object_attributes() 94 | for objname in objects_to_manage: 95 | current_owner = all_object_attributes[self.objkind][self.objname.schema][objname]['owner'] 96 | if current_owner != self.rolename: 97 | obj_kind_singular = self.objkind.upper()[:-1] 98 | query = Q_SET_OBJECT_OWNER.format(obj_kind_singular, objname.qualified_name, 99 | self.rolename, current_owner) 100 | self.sql_to_run.append(query) 101 | 102 | return self.sql_to_run 103 | 104 | 105 | class SchemaAnalyzer(object): 106 | """ 107 | Analyze one schema and determine (via .analyze()) any SQL statements that are 108 | necessary to make sure that the schema exists, it has the correct owner, and if it is a 109 | personal schema that all objects in it (and that we track, i.e. the keys to the privileges.py 110 | modules's PRIVILEGE_MAP) are owned by the correct schema owner 111 | """ 112 | def __init__(self, rolename, objname, dbcontext, is_personal_schema=False): 113 | """ 114 | Args: 115 | rolename (str): The name of the role that should own the schema 116 | 117 | objname (common.ObjectName): The schema to analyze 118 | 119 | dbcontext (context.DatabaseContext): A context.DatabaseContext instance for getting 120 | information for the associated database 121 | 122 | is_personal_schemas (bool): Whether this is a personal schema 123 | """ 124 | self.sql_to_run = [] 125 | self.rolename = common.check_name(rolename) 126 | logger.debug('self.rolename set to {}'.format(self.rolename)) 127 | self.objname = objname 128 | self.is_personal_schema = is_personal_schema 129 | 130 | self.current_owner = dbcontext.get_schema_owner(self.objname) 131 | self.schema_objects = dbcontext.get_schema_objects(self.objname) 132 | # If there is no owner then the schema must not exist yet 133 | self.exists = self.current_owner is not None 134 | 135 | def analyze(self): 136 | if not self.exists: 137 | self.create_schema() 138 | elif self.current_owner != self.rolename: 139 | self.set_owner() 140 | 141 | if self.is_personal_schema: 142 | # Make it true that all tables in the personal schema are owned by the schema owner 143 | objects_to_change = self.get_improperly_owned_objects() 144 | for objkind, objname, prev_owner in objects_to_change: 145 | self.alter_object_owner(objkind, objname, prev_owner) 146 | 147 | return self.sql_to_run 148 | 149 | def get_improperly_owned_objects(self): 150 | """ Return all objects that are not owned by this schema's owner and which are not 151 | auto-dependent (i.e. a sequence that is linked to a table, in which case its ownership 152 | derives from that linked table). Note that we only look at objects supported by pgbedrock 153 | (i.e. tables and sequences). Each entry returned is a tuple of the form 154 | (objkind, common.ObjectName, current_owner) """ 155 | objects = [] 156 | for item in self.schema_objects: 157 | if item.owner != self.rolename and not item.is_dependent: 158 | objects.append((item.kind, item.objname, item.owner)) 159 | return objects 160 | 161 | def alter_object_owner(self, objkind, objname, prev_owner): 162 | obj_kind_singular = objkind.upper()[:-1] 163 | query = Q_SET_OBJECT_OWNER.format(obj_kind_singular, objname.qualified_name, 164 | self.rolename, prev_owner) 165 | self.sql_to_run.append(query) 166 | 167 | def create_schema(self): 168 | query = Q_CREATE_SCHEMA.format(self.objname.qualified_name, self.rolename) 169 | self.sql_to_run.append(query) 170 | 171 | def set_owner(self): 172 | query = Q_SET_SCHEMA_OWNER.format(self.objname.qualified_name, 173 | self.rolename, self.current_owner) 174 | self.sql_to_run.append(query) 175 | -------------------------------------------------------------------------------- /pgbedrock/privileges.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import itertools 3 | 4 | import click 5 | 6 | from pgbedrock import common 7 | from pgbedrock.context import DatabaseContext, PRIVILEGE_MAP 8 | 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | SKIP_SUPERUSER_PRIVILEGE_CONFIGURATION_MSG = '-- Skipping privilege configuration for superuser "{}"' 14 | PERSONAL_SCHEMAS_ERROR_MSG = ("Unable to interpret reserved keyword 'personal_schemas' " 15 | "for rolename '{}', object_kind '{}', access '{}'") 16 | OBJECT_DOES_NOT_EXIST_ERROR_MSG = "{} '{}' requested for role \"{}\" does not exist" 17 | OBJECTS_WITH_DEFAULTS = ('functions', 'tables', 'sequences', 'types') 18 | 19 | Q_GRANT_NONDEFAULT = 'GRANT {} ON {} {} TO "{}";' 20 | Q_REVOKE_NONDEFAULT = 'REVOKE {} ON {} {} FROM "{}";' 21 | Q_GRANT_DEFAULT = """ 22 | SET ROLE "{}"; 23 | ALTER DEFAULT PRIVILEGES IN SCHEMA {} GRANT {} ON {} TO "{}"; 24 | RESET ROLE; 25 | """ 26 | Q_REVOKE_DEFAULT = """ 27 | SET ROLE "{}"; 28 | ALTER DEFAULT PRIVILEGES IN SCHEMA {} REVOKE {} ON {} FROM "{}"; 29 | RESET ROLE; 30 | """ 31 | 32 | 33 | def analyze_privileges(spec, cursor, verbose): 34 | logger.debug('Starting analyze_privileges()') 35 | dbcontext = DatabaseContext(cursor, verbose) 36 | 37 | # We disable the progress bar when showing verbose output (using '' as our bar_template) 38 | # or # the bar will get lost in the # output 39 | bar_template = '' if verbose else common.PROGRESS_TEMPLATE 40 | with click.progressbar(spec.items(), label='Analyzing privileges: ', bar_template=bar_template, 41 | show_eta=False, item_show_func=common.item_show_func) as all_roles: 42 | 43 | schema_writers = determine_schema_writers(spec) 44 | personal_schemas = determine_personal_schemas(spec) 45 | all_sql_to_run = [] 46 | for rolename, config in all_roles: 47 | config = config or {} 48 | if dbcontext.is_superuser(rolename): 49 | all_sql_to_run.append( 50 | SKIP_SUPERUSER_PRIVILEGE_CONFIGURATION_MSG.format(rolename) 51 | ) 52 | continue 53 | all_desired_privs = config.get('privileges', {}) 54 | 55 | for object_kind in PRIVILEGE_MAP.keys(): 56 | desired_items_this_obj = all_desired_privs.get(object_kind, {}) 57 | excepted_items_this_obj = desired_items_this_obj.get('except', []) 58 | 59 | for access in ('read', 'write'): 60 | desired_items = desired_items_this_obj.get(access, []) 61 | # If a write privilege is desired then read access is as well 62 | if access == 'read': 63 | desired_items += desired_items_this_obj.get('write', []) 64 | 65 | privconf = PrivilegeAnalyzer(rolename=rolename, 66 | access=access, 67 | object_kind=object_kind, 68 | desired_items=desired_items, 69 | dbcontext=dbcontext, 70 | schema_writers=schema_writers, 71 | personal_schemas=personal_schemas, 72 | excepted_items=excepted_items_this_obj) 73 | role_sql_to_run = privconf.analyze() 74 | all_sql_to_run += role_sql_to_run 75 | 76 | return all_sql_to_run 77 | 78 | 79 | def determine_role_members(spec): 80 | """ Create a dict mapping from each role to all direct and indirect members of that role """ 81 | return {role: get_members(role, spec) for role in spec.keys()} 82 | 83 | 84 | def get_members(group, spec): 85 | """ Get all members of a group role, whether they are direct members or 86 | indirect members (i.e. members of members of this role, etc.) """ 87 | members = set() 88 | for role, config in spec.items(): 89 | if config and group in config.get('member_of', ()): 90 | members.add(role) 91 | sub_members = get_members(role, spec) 92 | members.update(sub_members) 93 | 94 | return members 95 | 96 | 97 | def determine_personal_schemas(spec): 98 | """ 99 | Returns: 100 | set: A set of ObjectName instances of personal schemas 101 | """ 102 | personal_schemas = set() 103 | for role, config in spec.items(): 104 | if config and common.parse_bool(config.get('has_personal_schema', False)): 105 | personal_schemas.add(common.ObjectName(role)) 106 | 107 | return personal_schemas 108 | 109 | 110 | def determine_schema_owners(spec): 111 | """ Create a dict of {ObjectName(schema): owner} """ 112 | schema_owners = dict() 113 | for role, config in spec.items(): 114 | if not config: 115 | continue 116 | 117 | if 'owns' in config: 118 | owned_schemas = config['owns'].get('schemas', ()) 119 | for schema in owned_schemas: 120 | schema_owners[schema] = role 121 | 122 | if common.parse_bool(config.get('has_personal_schema', False)): 123 | schema_owners[common.ObjectName(role)] = role 124 | 125 | return schema_owners 126 | 127 | 128 | def determine_superusers(spec): 129 | superusers = set() 130 | for role, config in spec.items(): 131 | if not config: 132 | continue 133 | 134 | if common.parse_bool(config.get('is_superuser', False)): 135 | superusers.add(role) 136 | 137 | return superusers 138 | 139 | 140 | def determine_schema_writers(spec): 141 | """ 142 | Create a dict mapping from each schema to all roles that can create objects in that 143 | schema, i.e.: 144 | 145 | Returns: 146 | dict: A dict of the form {common.ObjectName(schema): [roleA, roleB, roleC], ...} 147 | """ 148 | members_of_role = determine_role_members(spec) 149 | personal_schemas = determine_personal_schemas(spec) 150 | schema_owners = determine_schema_owners(spec) 151 | 152 | # At a minimum, the schema owner could conceivably create objects 153 | writers = {schema: set([owner]) for schema, owner in schema_owners.items()} 154 | 155 | for role, config in spec.items(): 156 | try: 157 | writable_schemas = set(config['privileges']['schemas']['write']) if config else set() 158 | except KeyError: 159 | writable_schemas = set() 160 | 161 | if common.ObjectName('personal_schemas') in writable_schemas: 162 | writable_schemas.remove(common.ObjectName('personal_schemas')) 163 | writable_schemas.update(personal_schemas) 164 | 165 | for schema in writable_schemas: 166 | writers[schema].add(role) 167 | role_members = members_of_role[role] 168 | writers[schema].update(role_members) 169 | 170 | # Superusers can write in any schema 171 | superusers = determine_superusers(spec) 172 | for vals in writers.values(): 173 | vals.update(superusers) 174 | 175 | return writers 176 | 177 | 178 | class PrivilegeAnalyzer(object): 179 | """ Analyze the privileges for one combination of role x access type x object kind (e.g. 180 | read-level table privileges for myrole1). Analysis is done via the .analyze() method 181 | and generates a set of SQL statements necessary to make the database match the desired 182 | set of items. 183 | """ 184 | 185 | def __init__(self, rolename, access, object_kind, desired_items, schema_writers, 186 | personal_schemas, dbcontext, excepted_items): 187 | log_msg = 'Initializing PrivilegeAnalyzer for rolename "{}", access "{}", and object "{}"' 188 | logger.debug(log_msg.format(rolename, access, object_kind)) 189 | self.sql_to_run = [] 190 | self.rolename = common.check_name(rolename) 191 | 192 | self.access = access 193 | self.object_kind = object_kind 194 | self.desired_items = desired_items 195 | self.excepted_items = excepted_items 196 | self.schema_writers = schema_writers 197 | self.personal_schemas = personal_schemas 198 | self.default_acl_possible = self.object_kind in OBJECTS_WITH_DEFAULTS 199 | 200 | self.current_defaults = dbcontext.get_role_current_defaults(rolename, object_kind, access) 201 | self.current_nondefaults = dbcontext.get_role_current_nondefaults(rolename, object_kind, access) 202 | 203 | self.all_object_attrs = dbcontext.get_all_object_attributes() 204 | 205 | def analyze(self): 206 | self.identify_desired_objects() 207 | self.analyze_nondefaults() 208 | 209 | if self.default_acl_possible: 210 | self.analyze_defaults() 211 | 212 | return self.sql_to_run 213 | 214 | def analyze_defaults(self): 215 | """ Analyze default privileges. Note that we sort the grants / revokes before issuing 216 | them so the output will be more organized, making it easier for the end user to read """ 217 | defaults_to_grant = self.desired_defaults.difference(self.current_defaults) 218 | logger.debug('defaults_to_grant: {}'.format(defaults_to_grant)) 219 | for grantor, schema, pg_priv_kind in sorted(defaults_to_grant): 220 | self.grant_default(grantor, schema, pg_priv_kind) 221 | 222 | defaults_to_revoke = self.current_defaults.difference(self.desired_defaults) 223 | logger.debug('defaults_to_revoke: {}'.format(defaults_to_revoke)) 224 | for grantor, schema, pg_priv_kind in sorted(defaults_to_revoke): 225 | self.revoke_default(grantor, schema, pg_priv_kind) 226 | 227 | def analyze_nondefaults(self): 228 | """ Analyze non-default privileges. Note that we sort the grants / revokes before issuing 229 | them so the output will be more organized, making it easier for the end user to read """ 230 | nondefaults_to_grant = self.desired_nondefaults.difference(self.current_nondefaults) 231 | logger.debug('nondefaults_to_grant: {}'.format(nondefaults_to_grant)) 232 | if nondefaults_to_grant: 233 | for objname, pg_priv_kind in sorted(nondefaults_to_grant): 234 | self.grant_nondefault(objname, pg_priv_kind) 235 | 236 | nondefaults_to_revoke = self.current_nondefaults.difference(self.desired_nondefaults) 237 | logger.debug('nondefaults_to_revoke: {}'.format(nondefaults_to_revoke)) 238 | if nondefaults_to_revoke: 239 | for objname, pg_priv_kind in sorted(nondefaults_to_revoke): 240 | self.revoke_nondefault(objname, pg_priv_kind) 241 | 242 | def determine_desired_defaults(self, schemas): 243 | """ 244 | For any given schema, we want to grant default privileges to this role from each role 245 | that can write in that schema. We cross this against all privilege types. 246 | 247 | Args: 248 | schemas (set): A set of common.ObjectNames instances representing schemas 249 | """ 250 | self.desired_defaults = set() 251 | for schema in schemas: 252 | writers = self.schema_writers[schema] 253 | for writer in writers: 254 | # We don't need to grant default privileges for things this role will create 255 | if writer == self.rolename: 256 | continue 257 | for pg_priv_kind in PRIVILEGE_MAP[self.object_kind][self.access]: 258 | self.desired_defaults.add(tuple([writer, schema, pg_priv_kind])) 259 | 260 | def get_object_owner(self, objname, objkind=None): 261 | objkind = objkind or self.object_kind 262 | object_owners = self.all_object_attrs.get(objkind, dict()).get(objname.schema, dict()) 263 | owner = object_owners.get(objname, dict()).get('owner', None) 264 | if owner: 265 | return owner 266 | else: 267 | obj_kind_singular = objkind[:-1] 268 | common.fail(OBJECT_DOES_NOT_EXIST_ERROR_MSG.format(obj_kind_singular, 269 | objname.qualified_name, 270 | self.rolename)) 271 | 272 | def get_schema_objects(self, schema): 273 | """ Get all objects of kind self.object_kind which are in the given schema and not owned by 274 | self.rolename """ 275 | object_owners = self.all_object_attrs.get(self.object_kind, dict()).get(schema, dict()) 276 | return {objname for objname, attr in object_owners.items() if attr['owner'] != self.rolename} 277 | 278 | def grant_default(self, grantor, schema, privilege): 279 | query = Q_GRANT_DEFAULT.format(grantor, schema.qualified_name, privilege, 280 | self.object_kind.upper(), self.rolename) 281 | self.sql_to_run.append(query) 282 | 283 | def grant_nondefault(self, objname, privilege): 284 | obj_kind_singular = self.object_kind.upper()[:-1] 285 | query = Q_GRANT_NONDEFAULT.format(privilege, obj_kind_singular, 286 | objname.qualified_name, self.rolename) 287 | self.sql_to_run.append(query) 288 | 289 | def identify_desired_objects(self): 290 | """ 291 | Create the sets of desired privileges. The sets will look like the following: 292 | 293 | self.desired_nondefaults: 294 | {(ObjectName(schema, unqualified_name), priv_name), ...} 295 | Example: {('myschema.mytable', 'SELECT'), ...} 296 | 297 | self.desired_defaults: 298 | {(grantor, schema, priv_name), ...} 299 | Example: {('svc-hr-etl', 'hr_schema', 'SELECT'), ...} 300 | """ 301 | desired_nondefault_objs = set() 302 | schemas = [] 303 | for objname in self.desired_items: 304 | if objname == common.ObjectName('personal_schemas') and self.object_kind == 'schemas': 305 | desired_nondefault_objs.update(self.personal_schemas) 306 | elif objname == common.ObjectName('personal_schemas') and self.object_kind != 'schemas': 307 | # The end-user is asking something impossible 308 | common.fail(PERSONAL_SCHEMAS_ERROR_MSG.format(self.rolename, self.object_kind, self.access)) 309 | elif objname == common.ObjectName('personal_schemas', '*'): 310 | schemas.extend(self.personal_schemas) 311 | elif objname.unqualified_name != '*': 312 | # This is a single non-default privilege ask 313 | owner = self.get_object_owner(objname) 314 | if owner != self.rolename: 315 | desired_nondefault_objs.add(objname) 316 | else: 317 | # We were given a schema.*; we'll process those below 318 | schemas.append(objname.only_schema()) 319 | 320 | for schema in schemas: 321 | # For schemas, we wish to have privileges for all existing objects, so get all 322 | # existing objects not owned by this role and add them to self.desired_nondefaults 323 | schema_objects = self.get_schema_objects(schema.qualified_name) 324 | desired_nondefault_objs.update(schema_objects) 325 | 326 | #Remove excepted elements 327 | desired_nondefault_objs.difference_update(self.excepted_items) 328 | 329 | # Cross our desired objects with the desired privileges 330 | priv_types = PRIVILEGE_MAP[self.object_kind][self.access] 331 | self.desired_nondefaults = set(itertools.product(desired_nondefault_objs, priv_types)) 332 | 333 | if self.default_acl_possible: 334 | self.determine_desired_defaults(schemas) 335 | 336 | def revoke_default(self, grantor, schema, privilege): 337 | query = Q_REVOKE_DEFAULT.format(grantor, schema.qualified_name, privilege, 338 | self.object_kind.upper(), self.rolename) 339 | self.sql_to_run.append(query) 340 | 341 | def revoke_nondefault(self, objname, privilege): 342 | obj_kind_singular = self.object_kind.upper()[:-1] 343 | query = Q_REVOKE_NONDEFAULT.format(privilege, obj_kind_singular, 344 | objname.qualified_name, self.rolename) 345 | self.sql_to_run.append(query) 346 | -------------------------------------------------------------------------------- /pgbedrock/spec_inspector.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import copy 3 | import os 4 | 5 | import cerberus 6 | import jinja2 7 | import yaml 8 | 9 | from pgbedrock import common 10 | from pgbedrock import context 11 | 12 | DEPENDENT_OBJECTS_MSG = ('Spec error: Ownership listed for dependent {objkind}: {dep_objs}\n' 13 | 'Ownership for a dependent object derives from the object is depends ' 14 | 'on. Please remove these objects from the ownership sections within ' 15 | 'your spec file') 16 | DUPLICATE_ROLE_DEFINITIONS_ERR_MSG = 'Spec error: Role(s) defined more than once: {}' 17 | FILE_OPEN_ERROR_MSG = "Unable to open file '{}':\n{}" 18 | MISSING_ENVVAR_MSG = "Spec error: Required environment variable not found:\n{}" 19 | MULTIPLE_SCHEMA_OWNER_ERR_MSG = 'Spec error: Schema "{}" owned by multiple roles: {}' 20 | MULTIPLE_OBJKIND_OWNER_ERR_MSG = 'Spec error: {} "{}" owned by multiple roles: {}' 21 | OBJECT_REF_READ_WRITE_ERR = ( 22 | 'Spec error: objects have been unnecessarily given both read and write privileges.' 23 | 'pgbedrock automatically grants read access when write access is requested.{}' 24 | ) 25 | UNKNOWN_OBJECTS_MSG = ('Spec error: Unknown {objkind} found: {unknown_objects}\n' 26 | 'Please manually add these {objkind} to the database or ' 27 | 'remove them from the spec file') 28 | UNOWNED_OBJECTS_MSG = ('Spec error: Unowned {objkind} found: {unowned_objects}\n' 29 | 'Please add these {objkind} to the spec file or manually remove ' 30 | 'them from the Postgres cluster') 31 | UNDOCUMENTED_ROLES_MSG = ('Spec error: Undocumented roles found: {}.\n' 32 | 'Please add these roles to the spec file or manually remove ' 33 | 'them from the Postgres cluster') 34 | UNOWNED_SCHEMAS_MSG = ('Spec error: Schemas found in database with no owner in spec: {}\n' 35 | 'Please add these schemas to the spec file or manually remove ' 36 | 'them from the Postgres cluster') 37 | EXCEPTED_SCHEMAS_MSG = ('Spec error: Schema found with except privilege for role: {}\n' 38 | 'Except may only be used for tables and sequences ') 39 | VALIDATION_ERR_MSG = 'Spec error: Role "{}", field "{}": {}' 40 | 41 | SPEC_SCHEMA_YAML = """ 42 | can_login: 43 | type: boolean 44 | has_personal_schema: 45 | type: boolean 46 | is_superuser: 47 | type: boolean 48 | attributes: 49 | type: list 50 | schema: 51 | type: string 52 | forbidden: 53 | - LOGIN 54 | - NOLOGIN 55 | - SUPERUSER 56 | - NOSUPERUSER 57 | member_of: 58 | type: list 59 | schema: 60 | type: string 61 | owns: 62 | type: dict 63 | allowed: 64 | - schemas 65 | - tables 66 | - sequences 67 | valueschema: 68 | type: list 69 | schema: 70 | type: string 71 | privileges: 72 | type: dict 73 | allowed: 74 | - schemas 75 | - sequences 76 | - tables 77 | valueschema: 78 | type: dict 79 | allowed: 80 | - read 81 | - write 82 | - except 83 | valueschema: 84 | type: list 85 | schema: 86 | type: string 87 | """ 88 | 89 | 90 | def convert_spec_to_objectnames(spec): 91 | """ Convert object names in a loaded spec from strings to ObjectName instances 92 | 93 | This converts items in the following sublists, if those sublists exist: 94 | * -> owns -> 95 | * -> privileges -> -> read 96 | * -> privileges -> -> write 97 | """ 98 | output_spec = copy.deepcopy(spec) 99 | for role, config in output_spec.items(): 100 | if not config: 101 | continue 102 | 103 | for objkind, owned_items in config.get('owns', {}).items(): 104 | if not owned_items: 105 | continue 106 | converted = [common.ObjectName.from_str(item) for item in owned_items] 107 | config['owns'][objkind] = converted 108 | 109 | for objkind, perm_dicts in config.get('privileges', {}).items(): 110 | for priv_kind, granted_items in perm_dicts.items(): 111 | if not granted_items: 112 | continue 113 | converted = [common.ObjectName.from_str(item) for item in granted_items] 114 | config['privileges'][objkind][priv_kind] = converted 115 | 116 | return output_spec 117 | 118 | 119 | def ensure_no_object_owned_twice(spec, dbcontext, objkind): 120 | """ Check spec for objects of objkind with multiple owners. """ 121 | all_db_objects = dbcontext.get_all_object_attributes().get(objkind, dict()) 122 | 123 | object_ownerships = defaultdict(list) 124 | for rolename, config in spec.items(): 125 | if not config: 126 | continue 127 | 128 | if config.get('has_personal_schema'): 129 | schema_objects = all_db_objects.get(rolename, dict()) 130 | nondependent_objects = [name for name, attr in schema_objects.items() if not attr['is_dependent']] 131 | for obj in nondependent_objects: 132 | object_ownerships[obj].append(rolename) 133 | 134 | if not config.get('owns') or not config['owns'].get(objkind): 135 | continue 136 | 137 | role_owned_objects = config['owns'][objkind] 138 | for objname in role_owned_objects: 139 | if objname.unqualified_name == '*': 140 | schema_objects = all_db_objects.get(objname.schema, dict()) 141 | nondependent_objects = [name for name, attr in schema_objects.items() if not attr['is_dependent']] 142 | for obj in nondependent_objects: 143 | object_ownerships[obj].append(rolename) 144 | else: 145 | object_ownerships[objname].append(rolename) 146 | 147 | error_messages = [] 148 | for objname, owners in object_ownerships.items(): 149 | if len(owners) > 1: 150 | owners_formatted = ", ".join(sorted(owners)) 151 | error_messages.append(MULTIPLE_OBJKIND_OWNER_ERR_MSG.format(objkind[:-1].capitalize(), 152 | objname.qualified_name, 153 | owners_formatted)) 154 | 155 | return error_messages 156 | 157 | 158 | def ensure_no_schema_owned_twice(spec): 159 | """ Check spec for schemas with multiple owners. """ 160 | schema_ownerships = defaultdict(list) 161 | for rolename, config in spec.items(): 162 | if not config: 163 | continue 164 | if config.get('has_personal_schema'): 165 | # Indicates a role has a personal schema with its same name 166 | schema_ownerships[common.ObjectName(rolename)].append(rolename) 167 | if config.get('owns') and config['owns'].get('schemas'): 168 | role_owned_schemas = config['owns']['schemas'] 169 | for schema in role_owned_schemas: 170 | schema_ownerships[schema].append(rolename) 171 | 172 | error_messages = [] 173 | for schema, owners in schema_ownerships.items(): 174 | if len(owners) > 1: 175 | owners_formatted = ", ".join(sorted(owners)) 176 | error_messages.append(MULTIPLE_SCHEMA_OWNER_ERR_MSG.format(schema.qualified_name, 177 | owners_formatted)) 178 | 179 | return error_messages 180 | 181 | 182 | def ensure_no_redundant_privileges(spec): 183 | """ 184 | Verify objects aren't defined in both read and write privilege sections for a given role. 185 | """ 186 | multi_refs = defaultdict(dict) 187 | for rolename, config in spec.items(): 188 | if config and config.get('privileges'): 189 | for obj in config['privileges']: 190 | try: 191 | reads = set(config['privileges'][obj]['read']) 192 | writes = set(config['privileges'][obj]['write']) 193 | duplicates = reads.intersection(writes) 194 | if duplicates: 195 | multi_refs[rolename][obj] = list(duplicates) 196 | except KeyError: 197 | continue 198 | 199 | if multi_refs: 200 | # Convert ObjectNames back to strings to print out in the error message 201 | for rolename, mapped_duplicates in multi_refs.items(): 202 | for objkind, duplicate_objects in mapped_duplicates.items(): 203 | multi_refs[rolename][objkind] = [dup.qualified_name for dup in duplicate_objects] 204 | 205 | multi_ref_strings = ["%s: %s" % (k, v) for k, v in multi_refs.items()] 206 | multi_ref_err_string = "\n\t".join(multi_ref_strings) 207 | return [OBJECT_REF_READ_WRITE_ERR.format(multi_ref_err_string)] 208 | 209 | return [] 210 | 211 | 212 | def ensure_no_duplicate_roles(rendered_spec_template): 213 | """ 214 | Ensure that no roles are declared multiple times. 215 | 216 | In a spec template, if a role is declared more than once there exists a risk that the 217 | re-declaration will override the desired configuration. pgbedrock considers a config containing 218 | this risk to be invalid and will throw an error. 219 | 220 | To accomplish this, the yaml.loader.Loader object is used to convert spec template into a 221 | document tree. Then, the root object's child nodes (which are the roles) are checked for 222 | duplicates. 223 | 224 | Outputs a list of strings. The decision to return a list of strings was deliberate, despite the 225 | fact that the length of the list can at most be one. The reason for this is that the other spec 226 | verification functions also return a list of strings. This return signature consistency makes 227 | the code in the verify_spec function cleaner. 228 | """ 229 | loader = yaml.loader.Loader(rendered_spec_template) 230 | document_tree = loader.get_single_node() 231 | if document_tree is None: 232 | return None 233 | 234 | role_definitions = defaultdict(int) 235 | for node in document_tree.value: 236 | role_definitions[node[0].value] += 1 237 | multi_defined_roles = [k for k, v in role_definitions.items() if v > 1] 238 | if multi_defined_roles: 239 | multi_roles_fmtd = " ,".join(multi_defined_roles) 240 | return [DUPLICATE_ROLE_DEFINITIONS_ERR_MSG.format(multi_roles_fmtd)] 241 | 242 | return [] 243 | 244 | 245 | def ensure_no_undocumented_roles(spec, dbcontext): 246 | """ 247 | Ensure that all roles in the database are documented within the spec. This is done 248 | (vs. having pbedrock assume it should delete these roles) because the roles may own schemas, 249 | tables, functions, etc. There's enough going on that if the user just made a mistake by 250 | forgetting to add a role to their spec then we've caused serious damage; better to throw an 251 | error and ask the user to manually resolve this. 252 | """ 253 | current_role_attributes = dbcontext.get_all_role_attributes() 254 | spec_roles = set(spec.keys()) 255 | current_roles = set(current_role_attributes.keys()) 256 | undocumented_roles = current_roles.difference(spec_roles) 257 | 258 | if undocumented_roles: 259 | undocumented_roles_fmtd = '"' + '", "'.join(sorted(undocumented_roles)) + '"' 260 | return [UNDOCUMENTED_ROLES_MSG.format(undocumented_roles_fmtd)] 261 | 262 | return [] 263 | 264 | 265 | def ensure_no_missing_objects(spec, dbcontext, objkind): 266 | """ 267 | Ensure that all objects of kind objkind in the database are documented within the spec, and 268 | vice versa. This is done for two reasons: 269 | 270 | Object defined in database but not in spec 271 | In this case, pgbedrock could delete the object, but this is hard-to-reverse. If the user 272 | happened to just forget to document something then a table could be dropped, etc. It's 273 | better to throw an error and ask the user to manually resolve this. 274 | 275 | Object defined in spec but not in database 276 | Similarly, if a object is defined in the spec but not in the database it is unclear what 277 | pgbedrock should do. It can't create the object as it doesn't know the DDL that the object 278 | should have. The only real option here is to alert the user to the mismatch and ask them to 279 | resolve it. 280 | """ 281 | db_objects = set() 282 | for obj in dbcontext.get_all_raw_object_attributes(): 283 | if obj.kind == objkind and not obj.is_dependent: 284 | db_objects.add(obj.objname) 285 | 286 | db_objects_by_schema = dbcontext.get_all_object_attributes().get(objkind, dict()) 287 | spec_objects = set() 288 | for rolename, config in spec.items(): 289 | if not config: 290 | continue 291 | 292 | if config.get('has_personal_schema'): 293 | schema_objects = db_objects_by_schema.get(rolename, dict()) 294 | nondependent_objects = [name for name, attr in schema_objects.items() if not attr['is_dependent']] 295 | for obj in nondependent_objects: 296 | spec_objects.add(obj) 297 | 298 | if not config.get('owns') or not config['owns'].get(objkind): 299 | continue 300 | 301 | role_owned_objects = config['owns'][objkind] 302 | for objname in role_owned_objects: 303 | if objname.unqualified_name == '*': 304 | schema_objects = db_objects_by_schema.get(objname.schema, dict()) 305 | nondependent_objects = [name for name, attr in schema_objects.items() if not attr['is_dependent']] 306 | for obj in nondependent_objects: 307 | spec_objects.add(obj) 308 | else: 309 | spec_objects.add(objname) 310 | 311 | error_messages = [] 312 | 313 | not_in_db = spec_objects.difference(db_objects) 314 | if not_in_db: 315 | qualified_names = [objname.qualified_name for objname in not_in_db] 316 | unknown_objects = ', '.join(sorted(qualified_names)) 317 | msg = UNKNOWN_OBJECTS_MSG.format(objkind=objkind, unknown_objects=unknown_objects) 318 | error_messages.append(msg) 319 | 320 | not_in_spec = db_objects.difference(spec_objects) 321 | if not_in_spec: 322 | qualified_names = [objname.qualified_name for objname in not_in_spec] 323 | unowned_objects = ', '.join(sorted(qualified_names)) 324 | msg = UNOWNED_OBJECTS_MSG.format(objkind=objkind, unowned_objects=unowned_objects) 325 | error_messages.append(msg) 326 | 327 | return error_messages 328 | 329 | 330 | def ensure_no_unowned_schemas(spec, dbcontext): 331 | """ 332 | Ensure that all schemas in the database are documented within the spec. This is done 333 | (vs. having pgbedrock assume it should delete these schemas) because the schema likely contains 334 | tables, those tables may contain permissions, etc. There's enough going on that if the user 335 | just made a mistake by forgetting to add a schema to their spec we've caused serious damage; 336 | better to throw an error and ask the user to manually resolve this 337 | """ 338 | current_schemas_and_owners = dbcontext.get_all_schemas_and_owners() 339 | current_schemas = set(objname for objname in current_schemas_and_owners.keys()) 340 | spec_schemas = get_spec_schemas(spec) 341 | undocumented_schemas = current_schemas.difference(spec_schemas) 342 | if undocumented_schemas: 343 | schema_names = [objname.qualified_name for objname in undocumented_schemas] 344 | undocumented_schemas_fmtd = '"' + '", "'.join(sorted(schema_names)) + '"' 345 | return [UNOWNED_SCHEMAS_MSG.format(undocumented_schemas_fmtd)] 346 | 347 | return [] 348 | 349 | 350 | def ensure_no_dependent_object_is_owned(spec, dbcontext, objkind): 351 | all_db_objects = dbcontext.get_all_object_attributes().get(objkind, dict()) 352 | owned_dependent_objects = [] 353 | for rolename, config in spec.items(): 354 | if not config or not config.get('owns') or not config['owns'].get(objkind): 355 | continue 356 | 357 | role_owned_objects = config['owns'][objkind] 358 | for objname in role_owned_objects: 359 | if objname.unqualified_name == '*': 360 | continue 361 | 362 | try: 363 | obj_is_dependent = all_db_objects[objname.schema][objname]['is_dependent'] 364 | except KeyError: 365 | # This object is missing in the db; that condition already being checked elsewhere 366 | continue 367 | 368 | if obj_is_dependent: 369 | owned_dependent_objects.append(objname) 370 | 371 | if owned_dependent_objects: 372 | qualified_names = [objname.qualified_name for objname in owned_dependent_objects] 373 | dep_objs = ', '.join(sorted(qualified_names)) 374 | msg = DEPENDENT_OBJECTS_MSG.format(objkind=objkind, dep_objs=dep_objs) 375 | return [msg] 376 | 377 | return [] 378 | 379 | 380 | def ensure_valid_schema(spec): 381 | """ Ensure spec has no schema errors """ 382 | error_messages = [] 383 | 384 | schema = yaml.safe_load(SPEC_SCHEMA_YAML) 385 | v = cerberus.Validator(schema) 386 | for rolename, config in spec.items(): 387 | if not config: 388 | continue 389 | v.validate(config) 390 | for field, err_msg in v.errors.items(): 391 | error_messages.append(VALIDATION_ERR_MSG.format(rolename, field, err_msg[0])) 392 | 393 | return error_messages 394 | 395 | 396 | def get_spec_schemas(spec): 397 | """ Get all personal and non-personal schemas defined in the spec file """ 398 | spec_schemas = [] 399 | for rolename, config in spec.items(): 400 | config = config or {} 401 | spec_schemas.extend(config.get('owns', {}).get('schemas', [])) 402 | 403 | if config.get('has_personal_schema'): 404 | spec_schemas.append(common.ObjectName(rolename)) 405 | 406 | return set(spec_schemas) 407 | 408 | def print_spec(spec_path): 409 | """ Validate a spec passes various checks and, if so, return the loaded spec. """ 410 | rendered_template = render_template(spec_path) 411 | unconverted_spec = yaml.safe_load(rendered_template) 412 | 413 | # Validate the schema before verifying anything else about the spec. If the spec is invalid 414 | # then other checks may fail in erratic ways, so it is better to error out here 415 | error_messages = ensure_valid_schema(unconverted_spec) 416 | if error_messages: 417 | common.fail('\n'.join(error_messages)) 418 | 419 | spec = convert_spec_to_objectnames(unconverted_spec) 420 | 421 | return spec 422 | 423 | def load_spec(spec_path, cursor, verbose, attributes, memberships, ownerships, privileges): 424 | """ Validate a spec passes various checks and, if so, return the loaded spec. """ 425 | rendered_template = render_template(spec_path) 426 | unconverted_spec = yaml.safe_load(rendered_template) 427 | 428 | # Validate the schema before verifying anything else about the spec. If the spec is invalid 429 | # then other checks may fail in erratic ways, so it is better to error out here 430 | error_messages = ensure_valid_schema(unconverted_spec) 431 | if error_messages: 432 | common.fail('\n'.join(error_messages)) 433 | 434 | spec = convert_spec_to_objectnames(unconverted_spec) 435 | verify_spec(rendered_template, spec, cursor, verbose, attributes, memberships, 436 | ownerships, privileges) 437 | return spec 438 | 439 | 440 | def render_template(path): 441 | """ Load a spec. There may be templated password variables, which we render using Jinja. """ 442 | try: 443 | dir_path, filename = os.path.split(path) 444 | environment = jinja2.Environment(loader=jinja2.FileSystemLoader(dir_path), 445 | undefined=jinja2.StrictUndefined) 446 | loaded = environment.get_template(filename) 447 | rendered = loaded.render(env=os.environ) 448 | except jinja2.exceptions.TemplateNotFound as err: 449 | common.fail(FILE_OPEN_ERROR_MSG.format(path, err)) 450 | except jinja2.exceptions.UndefinedError as err: 451 | common.fail(MISSING_ENVVAR_MSG.format(err)) 452 | else: 453 | return rendered 454 | 455 | 456 | def verify_spec(rendered_template, spec, cursor, verbose, attributes, memberships, ownerships, 457 | privileges): 458 | assert isinstance(spec, dict) 459 | dbcontext = context.DatabaseContext(cursor, verbose) 460 | 461 | error_messages = [] 462 | 463 | # Having all roles represented exactly once is critical for all submodules 464 | # so we check this regardless of which submodules are being used 465 | error_messages += ensure_no_duplicate_roles(rendered_template) 466 | error_messages += ensure_no_undocumented_roles(spec, dbcontext) 467 | error_messages += ensure_no_except_on_schema(spec) 468 | 469 | if ownerships: 470 | for objkind in context.PRIVILEGE_MAP.keys(): 471 | if objkind == 'schemas': 472 | error_messages += ensure_no_unowned_schemas(spec, dbcontext) 473 | error_messages += ensure_no_schema_owned_twice(spec) 474 | else: 475 | # We run each of these functions once per object kind as it is possible that 476 | # two objects of different kinds could have the same name in the same schema 477 | error_messages += ensure_no_missing_objects(spec, dbcontext, objkind) 478 | error_messages += ensure_no_object_owned_twice(spec, dbcontext, objkind) 479 | error_messages += ensure_no_dependent_object_is_owned(spec, dbcontext, objkind) 480 | 481 | if privileges: 482 | error_messages += ensure_no_redundant_privileges(spec) 483 | 484 | if error_messages: 485 | common.fail('\n'.join(error_messages)) 486 | 487 | def ensure_no_except_on_schema(spec): 488 | error_messages = [] 489 | for rolename, config in spec.items(): 490 | if config and config.get('privileges'): 491 | if config['privileges'].get('schemas') and config['privileges']['schemas'].get('except') and config['privileges']['schemas']['excepted']: 492 | error_messages.append(EXCEPTED_SCHEMAS_MSG.format(rolename)) 493 | return error_messages -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest==3.1.3 2 | pytest-cov==2.5.1 3 | -r requirements-docs.txt 4 | wheel==0.33.6 5 | -------------------------------------------------------------------------------- /requirements-docs.txt: -------------------------------------------------------------------------------- 1 | sphinx>=1.7.4 2 | sphinx_rtd_theme>=0.3.1 3 | -------------------------------------------------------------------------------- /requirements-publish.txt: -------------------------------------------------------------------------------- 1 | twine==3.1.1 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Cerberus==1.1 2 | click==6.7 3 | Jinja2==2.10.1 4 | MarkupSafe==1.0 5 | psycopg2==2.7.3 6 | PyYAML==5.2 7 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | 4 | [metadata] 5 | license_file = LICENSE 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import re 3 | from setuptools import setup 4 | 5 | 6 | def ensure_one_level_of_quotes(text): 7 | # Converts '"foo"' to 'foo' 8 | return str(ast.literal_eval(text)) 9 | 10 | 11 | def get_version(): 12 | """ Based on the functionality in pallets/click's setup.py 13 | (https://github.com/pallets/click/blob/master/setup.py) """ 14 | _version_re = re.compile(r'__version__\s+=\s+(.*)') 15 | with open('pgbedrock/__init__.py', 'rb') as f: 16 | lines = f.read().decode('utf-8') 17 | version = ensure_one_level_of_quotes(_version_re.search(lines).group(1)) 18 | return version 19 | 20 | 21 | required = [ 22 | 'Cerberus', 23 | 'click', 24 | 'Jinja2', 25 | 'psycopg2', 26 | 'PyYAML', 27 | ] 28 | 29 | setup( 30 | name='pgbedrock', 31 | description='Manage Postgres roles and privileges', 32 | long_description="Manage a Postgres cluster's roles, role memberships, role privileges, and schema ownership", 33 | version=get_version(), 34 | author='Squarespace Data Engineering', 35 | url='https://github.com/Squarespace/pgbedrock', 36 | download_url='https://github.com/Squarespace/pgbedrock/tarball/{}'.format(get_version()), 37 | packages=['pgbedrock'], 38 | license='Apache License 2.0', 39 | entry_points={ 40 | 'console_scripts': ['pgbedrock = pgbedrock.cli:entrypoint'], 41 | }, 42 | install_requires=required, 43 | classifiers=[ 44 | 'Programming Language :: Python', 45 | 'Programming Language :: Python :: 2', 46 | 'Programming Language :: Python :: 3', 47 | 'Programming Language :: Python :: 2.7', 48 | 'Programming Language :: Python :: 3.6', 49 | ], 50 | ) 51 | -------------------------------------------------------------------------------- /tests/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTHON_VERSION 2 | FROM python:$PYTHON_VERSION 3 | 4 | VOLUME /opt 5 | WORKDIR /opt 6 | 7 | COPY . /tmp/ 8 | RUN pip install /tmp/ && \ 9 | pip install -r /tmp/requirements-dev.txt 10 | 11 | # We generate a coverage report in order to send this to coveralls from Travis CI. We also 12 | # specify `--cov-report=` do not show the report in all of our output 13 | CMD ["python", "-m", "pytest", "/opt/tests", "--cov", "/opt/pgbedrock", "--cov-report="] 14 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | import os 4 | import sys 5 | from textwrap import dedent 6 | 7 | import psycopg2 8 | from psycopg2 import extras # access via psycopg2.extras doesn't work so this import is needed 9 | import pytest 10 | 11 | # Add the package to the Python path so just running `pytest` from the top-level dir works 12 | # This is also necessary in order to import pgbedrock 13 | HERE = os.path.abspath(os.path.dirname(__file__)) 14 | sys.path.insert(0, os.path.dirname(HERE)) 15 | 16 | # Set log level to DEBUG because pgbedrock by default only logs at INFO level and above 17 | from pgbedrock import LOG_FORMAT 18 | logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) 19 | logger = logging.getLogger(__name__) 20 | 21 | 22 | Q_GET_ROLE_ATTRIBUTE = "SELECT {} FROM pg_authid WHERE rolname='{}';" 23 | NEW_USER = 'foobar' 24 | 25 | 26 | @pytest.fixture(scope='session') 27 | def db_config(): 28 | """ 29 | db config assumes you are using the postgres db provided by the docker 30 | container either connecting to it through localhost if you're running the 31 | test suite outside of docker, or through docker's network if you are running 32 | the test suite from within docker 33 | """ 34 | in_docker = os.environ.get('WITHIN_DOCKER_FLAG', False) 35 | host = 'pgbedrock_postgres' if in_docker else 'localhost' 36 | port = int(os.environ.get('POSTGRES_PORT', '54321')) 37 | yield {'host': host, 38 | 'port': port, 39 | 'user': 'test_user', 40 | 'password': 'test_password', 41 | 'dbname': 'test_db'} 42 | 43 | 44 | @pytest.fixture(scope='function') 45 | def cursor(request, db_config): 46 | db_connection = psycopg2.connect(**db_config) 47 | cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) 48 | 49 | if hasattr(request, 'param'): 50 | for query in request.param: 51 | logger.debug('Executing query: {}'.format(query)) 52 | cursor.execute(query) 53 | 54 | yield cursor 55 | db_connection.rollback() 56 | db_connection.close() 57 | 58 | 59 | @pytest.fixture(scope='function') 60 | def drop_users_and_objects(cursor): 61 | """ Remove committed users and objects after a test run. To do just a teardown we have to 62 | yield the empty fixture first """ 63 | yield 64 | cursor.execute(""" 65 | SELECT rolname 66 | FROM pg_authid 67 | WHERE rolname NOT IN ( 68 | 'test_user', 'postgres', 'pg_signal_backend', 69 | -- Roles introduced in Postgres 10: 70 | 'pg_monitor', 'pg_read_all_settings', 'pg_read_all_stats', 'pg_stat_scan_tables' 71 | ); 72 | """) 73 | users = [u[0] for u in cursor.fetchall()] 74 | for user in users: 75 | cursor.execute('DROP OWNED BY "{0}" CASCADE; DROP ROLE "{0}"'.format(user)) 76 | cursor.connection.commit() 77 | 78 | 79 | @pytest.fixture 80 | def base_spec(cursor): 81 | """ A spec with the existing state of the test database before anything has been done """ 82 | spec = dedent(""" 83 | postgres: 84 | attributes: 85 | - BYPASSRLS 86 | - CREATEDB 87 | - CREATEROLE 88 | - REPLICATION 89 | can_login: true 90 | is_superuser: true 91 | owns: 92 | schemas: 93 | - information_schema 94 | - pg_catalog 95 | - public 96 | tables: 97 | - information_schema.* 98 | - pg_catalog.* 99 | privileges: 100 | schemas: 101 | write: 102 | - information_schema 103 | - pg_catalog 104 | - public 105 | 106 | test_user: 107 | attributes: 108 | - PASSWORD "test_password" 109 | can_login: yes 110 | is_superuser: yes 111 | """) 112 | 113 | # Postgres 10 introduces several new roles that we have to account for 114 | cursor.execute("SELECT substring(version from 'PostgreSQL ([0-9.]*) ') FROM version()") 115 | pg_version = cursor.fetchone()[0] 116 | if pg_version.startswith('10.'): 117 | spec += dedent(""" 118 | 119 | pg_read_all_settings: 120 | 121 | pg_stat_scan_tables: 122 | 123 | pg_read_all_stats: 124 | 125 | pg_monitor: 126 | member_of: 127 | - pg_read_all_settings 128 | - pg_stat_scan_tables 129 | - pg_read_all_stats 130 | """) 131 | 132 | return spec 133 | 134 | 135 | @pytest.fixture 136 | def spec_with_new_user(tmpdir, base_spec): 137 | # NOTE: if the test_password isn't provided here we end up changing our 138 | # test_user's password for real in the test_configure_live_mode_works 139 | spec = copy.copy(base_spec) 140 | spec += dedent(""" 141 | {new_user}: 142 | has_personal_schema: yes 143 | member_of: 144 | - postgres 145 | privileges: 146 | tables: 147 | read: 148 | - pg_catalog.pg_class 149 | """.format(new_user=NEW_USER)) 150 | 151 | spec_path = tmpdir.join('spec.yml') 152 | spec_path.write(spec) 153 | return spec_path.strpath 154 | 155 | 156 | @pytest.fixture() 157 | def mockdbcontext(): 158 | """ Create a mock DatabaseContext that returns None for any method call that 159 | has not been overwritten """ 160 | class MockDatabaseContext(object): 161 | def __getattr__(self, val): 162 | def empty_func(*args, **kwargs): 163 | return None 164 | 165 | return empty_func 166 | 167 | return MockDatabaseContext() 168 | 169 | 170 | def quoted_object(schema, rest): 171 | """ All objects that pgbedrock works with will be double-quoted after the schema. Anything we 172 | work with in our test suite needs to behave similarly. """ 173 | return '{}."{}"'.format(schema, rest) 174 | 175 | 176 | def run_setup_sql(statement): 177 | """ 178 | Take a SQL statement and have cursor execute it before the test begins. This is intended to 179 | separate highly bespoke test setup more fully from test execution 180 | """ 181 | return pytest.mark.parametrize('cursor', [statement], indirect=True) 182 | -------------------------------------------------------------------------------- /tests/test_attributes.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import datetime as dt 3 | 4 | import pytest 5 | 6 | from conftest import run_setup_sql 7 | from pgbedrock import attributes as attr 8 | 9 | 10 | ROLE1 = 'charlie' 11 | ROLE2 = 'barney' 12 | ROLE3 = 'wacko' 13 | NON_DEFAULTS_GIVEN = ['CREATEDB', 'CREATEROLE', 'REPLICATION', 'CONNECTION LIMIT 30', 14 | 'VALID UNTIL 2016-08-04'] 15 | NON_DEFAULTS_EXPECTED = {'rolcreatedb': True, 16 | 'rolcreaterole': True, 17 | 'rolreplication': True, 18 | 'rolconnlimit': 30, 19 | 'rolvaliduntil': dt.date(2016, 8, 4)} 20 | DUMMY = 'foo' 21 | 22 | 23 | def nondefault_attributes(opts): 24 | return pytest.mark.parametrize('roleconf', [opts], indirect=True) 25 | 26 | 27 | @pytest.fixture(scope='function') 28 | def roleconf(request, mockdbcontext): 29 | # We need this ternary expression in case we were indirectly passwed non-default attributes 30 | nondefault_attributes = request.param if hasattr(request, 'param') else {} 31 | 32 | # Apply any non-default attributes to what mockdbcontext will return 33 | role_attributes = copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 34 | role_attributes.update(nondefault_attributes) 35 | mockdbcontext.get_role_attributes = lambda x: role_attributes 36 | 37 | roleconf = attr.AttributeAnalyzer(rolename=ROLE1, spec_attributes={}, dbcontext=mockdbcontext) 38 | return roleconf 39 | 40 | 41 | @run_setup_sql([ 42 | attr.Q_CREATE_ROLE.format(ROLE1), 43 | ]) 44 | def test_analyze_attributes_modifying_objects(capsys, cursor): 45 | """ 46 | End-to-end test. 47 | ROLE1 exists and has some non-defaults 48 | ROLE2 does not exist yet and is a superuser 49 | ROLE3 does not exist and has the defaults 50 | """ 51 | attributes = ['BYPASSRLS', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION'] 52 | spec = { 53 | # Add the existing users so we don't get an UNDOCUMENTED_ROLES failure 54 | 'postgres': {'is_superuser': True, 'attributes': attributes}, 55 | 'test_user': {'is_superuser': True, 'attributes': attributes}, 56 | 57 | ROLE1: {'can_login': False, 'attributes': NON_DEFAULTS_GIVEN}, 58 | ROLE2: {'is_superuser': True, 'attributes': NON_DEFAULTS_GIVEN}, 59 | ROLE3: {}, 60 | } 61 | 62 | # Generate all of the ALTER ROLE statements for NON_DEFAULTS_GIVEN 63 | expected = set([]) 64 | for role in (ROLE1, ROLE2): 65 | for k, v in NON_DEFAULTS_EXPECTED.items(): 66 | 67 | if isinstance(v, bool): 68 | base_keyword = attr.COLUMN_NAME_TO_KEYWORD[k] 69 | # prepend 'NO' if desired_value is False 70 | keyword = base_keyword if v is True else ('NO' + base_keyword) 71 | # prefix = 'NO' if v is False else '' 72 | # desired = prefix + k 73 | stmt = attr.Q_ALTER_ROLE.format(role, keyword) 74 | elif k == 'rolconnlimit': 75 | stmt = attr.Q_ALTER_CONN_LIMIT.format(role, v, attr.DEFAULT_ATTRIBUTES[k]) 76 | elif k == 'rolvaliduntil': 77 | stmt = attr.Q_ALTER_VALID_UNTIL.format(role, v, attr.DEFAULT_ATTRIBUTES[k]) 78 | 79 | expected.add(stmt) 80 | 81 | expected.add(attr.Q_CREATE_ROLE.format(ROLE2)) 82 | expected.add(attr.Q_ALTER_ROLE.format(ROLE2, 'SUPERUSER')) 83 | expected.add(attr.Q_CREATE_ROLE.format(ROLE3)) 84 | 85 | actual, password_changed = attr.analyze_attributes(spec, cursor, verbose=False) 86 | # Filter out changes for roles that existed before this test 87 | actual = set([s for s in actual if ('postgres' not in s and 'test_user' not in s)]) 88 | 89 | assert actual == expected 90 | 91 | 92 | def test_analyze_nonexistent_role_with_default_attributes(mockdbcontext): 93 | mockdbcontext.get_role_attributes = lambda x: dict() 94 | # Analyze the role with default attributes 95 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes={}, dbcontext=mockdbcontext) 96 | roleconf.analyze() 97 | 98 | assert roleconf.sql_to_run == [ 99 | attr.Q_CREATE_ROLE.format(ROLE1) 100 | ] 101 | 102 | 103 | def test_analyze_nonexistent_role_with_non_default_attributes(mockdbcontext): 104 | mockdbcontext.get_role_attributes = lambda x: dict() 105 | 106 | spec_attributes = copy.deepcopy(NON_DEFAULTS_GIVEN) 107 | spec_attributes.extend(['LOGIN', 'SUPERUSER']) 108 | 109 | # Analyze the role with non-default attributes 110 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=spec_attributes, 111 | dbcontext=mockdbcontext) 112 | roleconf.analyze() 113 | 114 | expected = set([ 115 | attr.Q_CREATE_ROLE.format(ROLE1), 116 | attr.Q_ALTER_ROLE.format(ROLE1, 'LOGIN'), 117 | attr.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'), 118 | attr.Q_ALTER_ROLE.format(ROLE1, 'CREATEDB'), 119 | attr.Q_ALTER_ROLE.format(ROLE1, 'CREATEROLE'), 120 | attr.Q_ALTER_ROLE.format(ROLE1, 'REPLICATION'), 121 | attr.Q_ALTER_CONN_LIMIT.format(ROLE1, '30', '-1'), 122 | attr.Q_ALTER_VALID_UNTIL.format(ROLE1, '2016-08-04', 'None'), 123 | ]) 124 | actual = set(roleconf.sql_to_run) 125 | assert actual == expected 126 | 127 | 128 | def test_analyze_existing_role_non_default_attributes(mockdbcontext): 129 | role_attributes = copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 130 | role_attributes.update( 131 | dict( 132 | rolcanlogin=True, 133 | rolconnlimit=27, 134 | rolreplication=True 135 | ) 136 | ) 137 | mockdbcontext.get_role_attributes = lambda x: role_attributes 138 | 139 | spec_attributes = copy.deepcopy(NON_DEFAULTS_GIVEN) 140 | spec_attributes.extend(['LOGIN', 'SUPERUSER']) 141 | 142 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=spec_attributes, 143 | dbcontext=mockdbcontext) 144 | 145 | roleconf.analyze() 146 | 147 | expected = set([ 148 | attr.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'), 149 | attr.Q_ALTER_ROLE.format(ROLE1, 'CREATEDB'), 150 | attr.Q_ALTER_ROLE.format(ROLE1, 'CREATEROLE'), 151 | attr.Q_ALTER_CONN_LIMIT.format(ROLE1, '30', '27'), 152 | attr.Q_ALTER_VALID_UNTIL.format(ROLE1, '2016-08-04', 'None'), 153 | ]) 154 | actual = set(roleconf.sql_to_run) 155 | assert actual == expected 156 | 157 | 158 | @nondefault_attributes(dict( 159 | # role_exists() checks if dbcontext got attributes from the db for this rolename, meaning so 160 | # long as we have something here role_exists() will think the role exists 161 | anything='can_be_here' 162 | )) 163 | def test_role_exists_true(roleconf): 164 | assert roleconf.role_exists() 165 | 166 | 167 | def test_role_exists_false(mockdbcontext, roleconf): 168 | mockdbcontext.get_role_attributes = lambda x: dict() 169 | roleconf = attr.AttributeAnalyzer(DUMMY, spec_attributes=DUMMY, dbcontext=mockdbcontext) 170 | assert not roleconf.role_exists() 171 | 172 | 173 | def test_create_role(roleconf): 174 | roleconf.create_role() 175 | assert roleconf.sql_to_run == [ 176 | attr.Q_CREATE_ROLE.format(ROLE1) 177 | ] 178 | 179 | 180 | def test_converted_attributes_defaults(roleconf): 181 | assert roleconf.converted_attributes() == {} 182 | 183 | 184 | @pytest.mark.parametrize('bogus_attribute', [('INVALID'), ('NOINVALID')]) 185 | def test_converted_attributes_invalid_attribute(capsys, mockdbcontext, bogus_attribute): 186 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=[bogus_attribute], 187 | dbcontext=mockdbcontext) 188 | 189 | with pytest.raises(SystemExit): 190 | roleconf.converted_attributes() 191 | assert capsys.readouterr()[0] == attr.UNKNOWN_ATTRIBUTE_MSG.format(bogus_attribute) + '\n' 192 | 193 | 194 | def test_converted_attributes_connection_limit(mockdbcontext): 195 | """ Make sure converted_attributes parses a connection limit attribute successfully, i.e. 196 | that it splits the string and converts the second part to an int """ 197 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=['CONNECTION LIMIT 11'], 198 | dbcontext=mockdbcontext) 199 | attributes = roleconf.converted_attributes() 200 | assert attributes['rolconnlimit'] == 11 201 | 202 | 203 | def test_converted_attributes_valid_until(mockdbcontext): 204 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=["VALID UNTIL '2018-08-08'"], 205 | dbcontext=mockdbcontext) 206 | attributes = roleconf.converted_attributes() 207 | assert attributes['rolvaliduntil'] == "'2018-08-08'" 208 | 209 | 210 | def test_converted_attributes_password(mockdbcontext): 211 | password_val = 'supeRSecret' 212 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=["PASSWORD '{}'".format(password_val)], 213 | dbcontext=mockdbcontext) 214 | attributes = roleconf.converted_attributes() 215 | assert attributes['rolpassword'] == password_val 216 | 217 | 218 | @pytest.mark.parametrize("password", [('super"secret'), ("super'secret")]) 219 | def test_converted_attributes_password_error_on_quotes(capsys, mockdbcontext, password): 220 | with pytest.raises(SystemExit): 221 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=["PASSWORD {}".format(password)], 222 | dbcontext=mockdbcontext) 223 | roleconf.converted_attributes() 224 | 225 | expected_err_msg = attr.UNSUPPORTED_CHAR_MSG.format(ROLE1) + '\n' 226 | assert capsys.readouterr()[0] == expected_err_msg 227 | 228 | 229 | def test_converted_attributes_boolean_attribute(mockdbcontext): 230 | set_attributes = ['LOGIN', 'NOINHERIT', 'CREATEROLE', 'BYPASSRLS'] 231 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=set_attributes, 232 | dbcontext=mockdbcontext) 233 | converted_attributes = roleconf.converted_attributes() 234 | 235 | for opt in set_attributes: 236 | if opt.startswith('NO'): 237 | colname = attr.PG_COLUMN_NAME[opt[2:]] 238 | assert converted_attributes[colname] is False 239 | else: 240 | colname = attr.PG_COLUMN_NAME[opt] 241 | assert converted_attributes[colname] is True 242 | 243 | 244 | def test_coalesce_attributes(mockdbcontext): 245 | mockdbcontext.get_role_attributes = lambda x: copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 246 | set_attributes = ['BYPASSRLS', 'CREATEDB', 'NOINHERIT', 'REPLICATION'] 247 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=set_attributes, 248 | dbcontext=mockdbcontext) 249 | 250 | actual = roleconf.coalesce_attributes() 251 | expected = copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 252 | expected.update(dict( 253 | rolbypassrls=True, 254 | rolcreatedb=True, 255 | rolinherit=False, 256 | rolreplication=True, 257 | )) 258 | assert actual == expected 259 | 260 | 261 | def test_set_all_attributes(mockdbcontext): 262 | """ 263 | This test name is a bit of a misnomer because it's mirroring the method's name 264 | (set_all_attributes). We aren't setting _all_ attributes, but rather testing that this method 265 | will set and update multiple attributes at once. 266 | 267 | Unlike the tests that check that coalesce_attributes works as expected, this test verifies 268 | that the changes are actually reflected in our change log. 269 | """ 270 | mockdbcontext.get_role_attributes = lambda x: copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 271 | set_attributes = ['BYPASSRLS', 'CREATEDB', 'CREATEROLE', 'REPLICATION'] 272 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=set_attributes, 273 | dbcontext=mockdbcontext) 274 | attributes = roleconf.coalesce_attributes() 275 | roleconf.set_all_attributes(attributes) 276 | 277 | actual = set(roleconf.sql_to_run) 278 | expected = {attr.Q_ALTER_ROLE.format(ROLE1, opt) for opt in set_attributes} 279 | assert actual == expected 280 | 281 | 282 | @pytest.mark.parametrize("password", ["'supersecret'", '"supersecret"']) 283 | def test_set_all_attributes_change_skips_same_password(mockdbcontext, password): 284 | role_attributes = copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 285 | role_attributes.update( 286 | dict( 287 | rolpassword=attr.create_md5_hash(ROLE1, 'supersecret') 288 | ) 289 | ) 290 | mockdbcontext.get_role_attributes = lambda x: role_attributes 291 | 292 | attributes = ['PASSWORD {}'.format(password)] 293 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=attributes, dbcontext=mockdbcontext) 294 | attributes = roleconf.coalesce_attributes() 295 | roleconf.set_all_attributes(attributes) 296 | assert roleconf.sql_to_run == [] 297 | 298 | 299 | @pytest.mark.parametrize("optname, optval", [ 300 | ('rolbypassrls', False), 301 | ('rolcreatedb', False), 302 | ('rolcreaterole', False), 303 | ('rolinherit', False), 304 | ('rolcanlogin', True), 305 | ('rolreplication', False), 306 | ('rolsuper', True), 307 | ('rolconnlimit', 8)]) 308 | def test_get_attribute_value(mockdbcontext, optname, optval): 309 | role_attributes = copy.deepcopy(attr.DEFAULT_ATTRIBUTES) 310 | role_attributes.update( 311 | dict( 312 | rolcanlogin=True, 313 | rolsuper=True, 314 | rolinherit=False, 315 | rolconnlimit=8 316 | ) 317 | ) 318 | mockdbcontext.get_role_attributes = lambda x: role_attributes 319 | 320 | roleconf = attr.AttributeAnalyzer(ROLE1, spec_attributes=[], dbcontext=mockdbcontext) 321 | assert roleconf.get_attribute_value(optname) == optval 322 | 323 | 324 | @nondefault_attributes(dict( 325 | rolvaliduntil=dt.datetime(2020, 1, 4), 326 | )) 327 | def test_get_attribute_value_valid_until(roleconf): 328 | assert str(roleconf.get_attribute_value('rolvaliduntil').date()) == '2020-01-04' 329 | 330 | 331 | @pytest.mark.parametrize("optname, optval", [ 332 | ('rolbypassrls', True), 333 | ('rolcreatedb', True), 334 | ('rolcreaterole', True), 335 | ('rolinherit', False), 336 | ('rolcanlogin', True), 337 | ('rolreplication', True), 338 | ('rolsuper', True), 339 | ('rolconnlimit', 8)]) 340 | def test_set_attribute_value(roleconf, optname, optval): 341 | # Get value before we've changed anything 342 | assert roleconf.get_attribute_value(optname) != optval 343 | 344 | current_value = 'foo' 345 | roleconf.set_attribute_value(attribute=optname, desired_value=optval, 346 | current_value=current_value) 347 | 348 | if optname == 'rolconnlimit': 349 | expected = [attr.Q_ALTER_CONN_LIMIT.format(ROLE1, str(optval), current_value)] 350 | else: 351 | base_keyword = attr.COLUMN_NAME_TO_KEYWORD[optname] 352 | # prepend 'NO' if desired_value is False 353 | keyword = base_keyword if optval is True else ('NO' + base_keyword) 354 | expected = [attr.Q_ALTER_ROLE.format(ROLE1, keyword)] 355 | 356 | actual = roleconf.sql_to_run 357 | assert actual == expected 358 | 359 | 360 | def test_set_attribute_value_sql_to_run(roleconf): 361 | assert len(roleconf.sql_to_run) == 0 362 | roleconf.set_attribute_value(attribute='rolcanlogin', desired_value=True, current_value='_') 363 | roleconf.set_attribute_value(attribute='rolsuper', desired_value=True, current_value='_') 364 | assert roleconf.sql_to_run == [attr.Q_ALTER_ROLE.format(ROLE1, 'LOGIN'), 365 | attr.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER')] 366 | 367 | 368 | def test_set_attribute_value_valid_until(roleconf): 369 | opt = 'rolvaliduntil' 370 | val = '2019-09-09' 371 | curr_val = 'infinity' 372 | 373 | assert roleconf.get_attribute_value(opt) != val 374 | 375 | roleconf.set_attribute_value(attribute=opt, desired_value=val, current_value=curr_val) 376 | 377 | expected = [attr.Q_ALTER_VALID_UNTIL.format(ROLE1, val, curr_val)] 378 | assert roleconf.sql_to_run == expected 379 | 380 | 381 | @nondefault_attributes(dict( 382 | rolpassword=attr.create_md5_hash(ROLE1, 'supersecret'), 383 | )) 384 | @pytest.mark.parametrize('desired_value, expected', [ 385 | ('supersecret', True), 386 | ('incorrect_password', False)]) 387 | def test_is_same_password(roleconf, desired_value, expected): 388 | assert roleconf.is_same_password(desired_value) == expected 389 | 390 | 391 | def test_is_same_password_if_empty(roleconf): 392 | assert roleconf.is_same_password(None) is True 393 | 394 | 395 | @nondefault_attributes(dict( 396 | rolpassword=attr.create_md5_hash(ROLE1, 'supersecret'), 397 | )) 398 | def test_set_password_statements_generated(roleconf): 399 | desired_value = 'evenmoresecret' 400 | roleconf.set_password(desired_value) 401 | assert roleconf.password_sql_to_run == [attr.Q_ALTER_PASSWORD.format(ROLE1, desired_value)] 402 | 403 | # Verify that the output is sanitized 404 | expected = ['--' + attr.Q_ALTER_PASSWORD.format(roleconf.rolename, '******')] 405 | assert roleconf.sql_to_run == expected 406 | 407 | 408 | @run_setup_sql([ 409 | attr.Q_CREATE_ROLE.format(ROLE1), 410 | ]) 411 | def test_set_password_log_message_is_masked(capsys, cursor): 412 | new_password = 'mysecretpassword' 413 | spec = { 414 | # Add the existing users so we don't get an UNDOCUMENTED_ROLES failure 415 | 'postgres': {'is_superuser': True}, 416 | 'test_user': { 417 | 'is_superuser': True, 418 | 'attributes': ['PASSWORD test_password'], 419 | }, 420 | 421 | ROLE1: {'attributes': ['PASSWORD {}'.format(new_password)]}, 422 | } 423 | 424 | _, password_all_sql_to_run = attr.analyze_attributes(spec, cursor, verbose=False) 425 | 426 | assert password_all_sql_to_run == [attr.Q_ALTER_PASSWORD.format(ROLE1, new_password)] 427 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | from click.testing import CliRunner 2 | import pytest 3 | 4 | from pgbedrock import cli 5 | from conftest import Q_GET_ROLE_ATTRIBUTE, NEW_USER 6 | 7 | 8 | @pytest.mark.usefixtures('drop_users_and_objects') 9 | def test_configure_defaults_to_check_mode(cursor, spec_with_new_user, db_config): 10 | # Assert that we start without the role we are trying to add 11 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 12 | assert cursor.rowcount == 0 13 | 14 | runner = CliRunner() 15 | result = runner.invoke(cli.entrypoint, ['configure', 16 | spec_with_new_user, 17 | '-h', db_config['host'], 18 | '-p', str(db_config['port']), 19 | '-U', db_config['user'], 20 | '-w', db_config['password'], 21 | '-d', db_config['dbname'], 22 | ]) 23 | assert result.exit_code == 0 24 | 25 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 26 | assert cursor.rowcount == 0 27 | 28 | 29 | @pytest.mark.usefixtures('drop_users_and_objects') 30 | @pytest.mark.parametrize('live_mode, expected', [('--live', 1), ('--check', 0)]) 31 | def test_configure_live_mode_works(cursor, spec_with_new_user, db_config, live_mode, expected): 32 | """ 33 | We add a new user (NEW_USER) through pgbedrock and make sure that 1) this change isn't 34 | committed if we pass --check and 2) this change _is_ committed if we pass --live 35 | """ 36 | runner = CliRunner() 37 | result = runner.invoke(cli.entrypoint, ['configure', 38 | spec_with_new_user, 39 | '-h', db_config['host'], 40 | '-p', str(db_config['port']), 41 | '-U', db_config['user'], 42 | '-w', db_config['password'], 43 | '-d', db_config['dbname'], 44 | live_mode 45 | ]) 46 | assert result.exit_code == 0 47 | 48 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 49 | assert cursor.rowcount == expected 50 | -------------------------------------------------------------------------------- /tests/test_common.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from pgbedrock import common 4 | 5 | 6 | @pytest.mark.parametrize('rolename', [ 7 | ("bad'_name"), 8 | ('bad"_name'), 9 | ]) 10 | def test_check_name_fails_on_quotes(capsys, rolename): 11 | with pytest.raises(SystemExit): 12 | common.check_name(rolename) 13 | assert capsys.readouterr()[0] == common.UNSUPPORTED_CHAR_MSG.format(rolename) + '\n' 14 | 15 | 16 | def test_check_name_succeeds(): 17 | rolename = 'foobar' 18 | assert rolename == common.check_name(rolename) 19 | 20 | 21 | def test_get_db_connection_fails(capsys): 22 | with pytest.raises(SystemExit) as err: 23 | common.get_db_connection('foo', 'foo', 'foo', 'foo', 'foo') 24 | 25 | out, err = capsys.readouterr() 26 | assert common.DATABASE_CONNECTION_ERROR_MSG.format('') in out 27 | 28 | 29 | def test_get_db_connection_autocommit(db_config): 30 | db_connection = common.get_db_connection(**db_config) 31 | assert db_connection.autocommit is False 32 | 33 | 34 | @pytest.mark.parametrize("value,expected", [ 35 | ('yes', True), 36 | ('Yes', True), 37 | ('YES', True), 38 | ('true', True), 39 | ('True', True), 40 | ('on', True), 41 | ('1', True), 42 | (1, True), 43 | ('no', False), 44 | ('No', False), 45 | ('NO', False), 46 | ('false', False), 47 | ('False', False), 48 | ('off', False), 49 | ('0', False), 50 | (0, False), 51 | ]) 52 | def test_parse_bool(value, expected): 53 | assert common.parse_bool(value) == expected 54 | 55 | 56 | def test_run_query(cursor): 57 | common.run_query(cursor, verbose=True, query='SELECT 1+1') 58 | assert cursor.fetchone() == [2] 59 | 60 | 61 | def test_run_query_fails_in_verbose_mode(capsys, cursor): 62 | cursor.close() 63 | with pytest.raises(SystemExit): 64 | common.run_query(cursor, verbose=True, query='SELECT 1+1') 65 | expected_msg = common.FAILED_QUERY_MSG.format('SELECT 1+1', '') 66 | error = capsys.readouterr()[0] 67 | assert error.startswith(expected_msg) 68 | assert 'pgbedrock/common.py", line' in error 69 | 70 | 71 | def test_run_query_fails_not_verbose_mode(capsys, cursor): 72 | cursor.close() 73 | with pytest.raises(SystemExit): 74 | common.run_query(cursor, verbose=False, query='SELECT 1+1') 75 | expected_msg = common.FAILED_QUERY_MSG.format('SELECT 1+1', 'cursor already closed\n') 76 | assert expected_msg == capsys.readouterr()[0] 77 | 78 | 79 | def test_objectname_nonschema(): 80 | objname = common.ObjectName(schema='myschema', unqualified_name='mytable') 81 | assert objname.schema == 'myschema' 82 | assert objname.unqualified_name == 'mytable' 83 | assert objname.qualified_name == 'myschema."mytable"' 84 | 85 | 86 | def test_objectname_schema(): 87 | objname = common.ObjectName(schema='myschema') 88 | assert objname.schema == 'myschema' 89 | assert objname.unqualified_name is None 90 | assert objname.qualified_name == 'myschema' 91 | 92 | 93 | def test_objectname_unquoted_item(): 94 | assert common.ObjectName._unquoted_item('foo') == 'foo' 95 | assert common.ObjectName._unquoted_item('"foo"') == 'foo' 96 | 97 | 98 | def test_objectname_equivalence(): 99 | objname1 = common.ObjectName(schema='myschema', unqualified_name='mytable') 100 | objname2 = common.ObjectName(schema='myschema', unqualified_name='mytable') 101 | assert objname1 == objname2 102 | 103 | objname1 = common.ObjectName(schema='myschema') 104 | objname2 = common.ObjectName(schema='myschema') 105 | assert objname1 == objname2 106 | 107 | 108 | def test_objectname_repr(): 109 | objname1 = common.ObjectName(schema='myschema') 110 | assert repr(objname1) == "ObjectName('myschema')" 111 | 112 | objname2 = common.ObjectName(schema='myschema', unqualified_name='mytable') 113 | assert repr(objname2) == "ObjectName('myschema', 'mytable')" 114 | 115 | 116 | def test_objectname_sorting(): 117 | list_of_objnames = [ 118 | common.ObjectName(schema='baz'), 119 | common.ObjectName(schema='foo', unqualified_name='gamma'), 120 | common.ObjectName(schema='foo', unqualified_name='alpha'), 121 | common.ObjectName(schema='foo', unqualified_name='bravo'), 122 | common.ObjectName(schema='bar'), 123 | ] 124 | expected = [ 125 | common.ObjectName(schema='bar'), 126 | common.ObjectName(schema='baz'), 127 | common.ObjectName(schema='foo', unqualified_name='alpha'), 128 | common.ObjectName(schema='foo', unqualified_name='bravo'), 129 | common.ObjectName(schema='foo', unqualified_name='gamma'), 130 | ] 131 | 132 | actual = sorted(list_of_objnames) 133 | assert actual == expected 134 | 135 | 136 | @pytest.mark.parametrize('full_name', [('foo'), ('"foo"')]) 137 | def test_objectname_from_str_only_schema(full_name): 138 | objname = common.ObjectName.from_str(full_name) 139 | assert isinstance(objname, common.ObjectName) 140 | assert objname.schema == 'foo' 141 | assert objname.unqualified_name is None 142 | assert objname.qualified_name == 'foo' 143 | 144 | 145 | @pytest.mark.parametrize('full_name, schema_name, unqualified_name, qualified_name', [ 146 | ('foo.bar', 'foo', 'bar', 'foo."bar"'), 147 | ('foo."bar"', 'foo', 'bar', 'foo."bar"'), 148 | ('"foo".bar', 'foo', 'bar', 'foo."bar"'), 149 | ('"foo"."bar"', 'foo', 'bar', 'foo."bar"'), 150 | ('"foo".bar.baz', 'foo', 'bar.baz', 'foo."bar.baz"'), 151 | ('"foo"."bar.baz"', 'foo', 'bar.baz', 'foo."bar.baz"'), 152 | ('foo.*', 'foo', '*', 'foo.*'), 153 | ]) 154 | def test_objectname_from_str_schema_and_object(full_name, schema_name, unqualified_name, qualified_name): 155 | objname = common.ObjectName.from_str(full_name) 156 | assert isinstance(objname, common.ObjectName) 157 | assert objname.schema == schema_name 158 | assert objname.unqualified_name == unqualified_name 159 | assert objname.qualified_name == qualified_name 160 | 161 | 162 | def test_objectname_only_schema(): 163 | objname = common.ObjectName(schema='myschema', unqualified_name='mytable') 164 | only_schema = objname.only_schema() 165 | assert only_schema.qualified_name == 'myschema' 166 | assert only_schema.unqualified_name is None 167 | -------------------------------------------------------------------------------- /tests/test_context.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from conftest import run_setup_sql 4 | from pgbedrock import attributes, common, context, privileges as privs, ownerships 5 | from pgbedrock import memberships 6 | 7 | 8 | Q_CREATE_TABLE = 'SET ROLE {}; CREATE TABLE {}.{} AS (SELECT 1+1); RESET ROLE;' 9 | Q_CREATE_SEQUENCE = 'SET ROLE {}; CREATE SEQUENCE {}.{}; RESET ROLE;' 10 | Q_HAS_PRIVILEGE = "SELECT has_table_privilege('{}', '{}', 'SELECT');" 11 | 12 | SCHEMAS = tuple('schema{}'.format(i) for i in range(4)) 13 | ROLES = tuple('role{}'.format(i) for i in range(4)) 14 | TABLES = tuple('table{}'.format(i) for i in range(6)) 15 | SEQUENCES = tuple('seq{}'.format(i) for i in range(6)) 16 | DUMMY = 'foo' 17 | 18 | 19 | @run_setup_sql( 20 | # Create the roles 21 | [attributes.Q_CREATE_ROLE.format(r) for r in ROLES] + 22 | 23 | [ 24 | # Create a schema owned by role1 25 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[1]), 26 | 27 | # Let role2 create tables in the schema and have it create a table there 28 | # so that default privileges from role2 should occur when we configure 29 | privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], ROLES[2]), 30 | Q_CREATE_TABLE.format(ROLES[2], SCHEMAS[0], TABLES[0]), 31 | 32 | # Grant default privileges to role0 from role3 for this schema; these should get 33 | # revoked in our test 34 | privs.Q_GRANT_DEFAULT.format(ROLES[3], SCHEMAS[0], 'SELECT', 'TABLES', ROLES[0]), 35 | ] 36 | ) 37 | def test_get_all_current_defaults(cursor): 38 | dbcontext = context.DatabaseContext(cursor, verbose=True) 39 | expected = { 40 | ROLES[0]: { 41 | 'tables': { 42 | 'read': set([ 43 | (ROLES[3], common.ObjectName(SCHEMAS[0]), 'SELECT'), 44 | ]), 45 | 'write': set(), 46 | } 47 | } 48 | } 49 | actual = dbcontext.get_all_current_defaults() 50 | assert actual == expected 51 | 52 | # Make sure that this data is cached for future use 53 | cursor.close() 54 | actual_again = dbcontext.get_all_current_defaults() 55 | assert actual_again == expected 56 | 57 | 58 | 59 | 60 | @pytest.mark.parametrize('rolename, object_kind, access, expected', [ 61 | ('role1', 'object_kind1', 'access1', set([1, 2, 3])), 62 | ('role1', 'object_kind1', 'missing_access', set()), 63 | ('role1', 'missing_object_kind1', 'access1', set()), 64 | ('missing_role1', 'object_kind1', 'access', set()), 65 | ]) 66 | def test_get_role_current_defaults(rolename, object_kind, access, expected): 67 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 68 | dbcontext._cache['get_all_current_defaults'] = lambda: { 69 | 'role1': { 70 | 'object_kind1': { 71 | 'access1': set([1, 2, 3]) 72 | } 73 | } 74 | } 75 | assert dbcontext.get_role_current_defaults(rolename, object_kind, access) == expected 76 | 77 | 78 | 79 | @pytest.mark.parametrize('rolename, schema, object_kind, access, expected', [ 80 | # No privilege --> false 81 | ('role1', common.ObjectName('schema1'), 'tables', 'read', False), 82 | # Privilege exists --> True 83 | ('role1', common.ObjectName('schema1'), 'tables', 'write', True), 84 | # Grantor is this role --> False 85 | ('role1', common.ObjectName('schema2'), 'tables', 'read', False), 86 | # No entries exist --> False 87 | ('role1', common.ObjectName(DUMMY), 'objkind_does_not_exist', DUMMY, False), 88 | ]) 89 | def test_has_default_privilege(rolename, schema, object_kind, access, expected): 90 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 91 | dbcontext._cache['get_all_current_defaults'] = lambda: { 92 | 'role1': { 93 | 'tables': { 94 | 'read': set([ 95 | ('role1', common.ObjectName('schema2'), 'SELECT'), 96 | ]), 97 | 'write': set([ 98 | ('not_this_role', common.ObjectName('schema1'), 'UPDATE'), 99 | ]), 100 | } 101 | } 102 | } 103 | assert dbcontext.has_default_privilege(rolename, schema, object_kind, access) == expected 104 | 105 | 106 | 107 | 108 | @run_setup_sql( 109 | # Create the roles 110 | [attributes.Q_CREATE_ROLE.format(r) for r in ROLES] + 111 | 112 | # Create two schemas, both owned by Role1 (who will own nothing else) 113 | [ownerships.Q_CREATE_SCHEMA.format(s, ROLES[1]) for s in SCHEMAS[:2]] + 114 | 115 | [ 116 | # Let role2 and role3 create objects in the schemas 117 | privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], ROLES[2]), 118 | privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], ROLES[3]), 119 | privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[1], ROLES[2]), 120 | privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[1], ROLES[3]), 121 | 122 | # Create a couple tables 123 | Q_CREATE_TABLE.format(ROLES[3], SCHEMAS[0], TABLES[1]), 124 | Q_CREATE_TABLE.format(ROLES[3], SCHEMAS[1], TABLES[3]), 125 | 126 | # Grant SELECT to role0 for several tables 127 | privs.Q_GRANT_NONDEFAULT.format('SELECT', 'TABLE', '{}.{}'.format(SCHEMAS[0], TABLES[1]), ROLES[0]), 128 | privs.Q_GRANT_NONDEFAULT.format('SELECT', 'TABLE', '{}.{}'.format(SCHEMAS[1], TABLES[3]), ROLES[0]), 129 | ] 130 | ) 131 | def test_get_all_current_nondefaults(cursor): 132 | dbcontext = context.DatabaseContext(cursor, verbose=True) 133 | expected = { 134 | ROLES[0]: { 135 | 'tables': { 136 | 'read': set([ 137 | (common.ObjectName(SCHEMAS[0], TABLES[1]), 'SELECT'), 138 | (common.ObjectName(SCHEMAS[1], TABLES[3]), 'SELECT'), 139 | ]), 140 | 'write': set(), 141 | } 142 | }, 143 | ROLES[2]: { 144 | 'schemas': { 145 | 'read': set(), 146 | 'write': set([ 147 | (common.ObjectName(schema=SCHEMAS[0]), 'CREATE'), 148 | (common.ObjectName(schema=SCHEMAS[1]), 'CREATE'), 149 | ]), 150 | } 151 | }, 152 | ROLES[3]: { 153 | 'schemas': { 154 | 'read': set(), 155 | 'write': set([ 156 | (common.ObjectName(schema=SCHEMAS[0]), 'CREATE'), 157 | (common.ObjectName(schema=SCHEMAS[1]), 'CREATE'), 158 | ]), 159 | } 160 | } 161 | } 162 | actual = dbcontext.get_all_current_nondefaults() 163 | assert actual == expected 164 | 165 | # Make sure that this data is cached for future use 166 | cursor.close() 167 | actual_again = dbcontext.get_all_current_nondefaults() 168 | assert actual_again == expected 169 | 170 | 171 | 172 | 173 | @pytest.mark.parametrize('rolename, object_kind, access, expected', [ 174 | ('role1', 'object_kind1', 'access1', set([ 175 | (common.ObjectName('foo', 'bar'), 'SELECT'), 176 | (common.ObjectName('foo', 'baz'), 'SELECT'), 177 | (common.ObjectName('foo', 'qux'), 'INSERT'), 178 | ])), 179 | ('role1', 'object_kind1', 'missing_access', set()), 180 | ('role1', 'missing_object_kind1', 'access1', set()), 181 | ('missing_role1', 'object_kind1', 'access', set()), 182 | ]) 183 | def test_get_role_current_nondefaults(rolename, object_kind, access, expected): 184 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 185 | dbcontext._cache['get_all_current_nondefaults'] = lambda: { 186 | 'role1': { 187 | 'object_kind1': { 188 | 'access1': set([ 189 | (common.ObjectName('foo', 'bar'), 'SELECT'), 190 | (common.ObjectName('foo', 'baz'), 'SELECT'), 191 | (common.ObjectName('foo', 'qux'), 'INSERT'), 192 | ]) 193 | } 194 | } 195 | } 196 | actual = dbcontext.get_role_current_nondefaults(rolename, object_kind, access) 197 | assert actual == expected 198 | 199 | 200 | 201 | 202 | @pytest.mark.parametrize('access, expected', [ 203 | ('write', set()), 204 | ('read', set([ 205 | common.ObjectName(SCHEMAS[0], TABLES[0]), 206 | common.ObjectName(SCHEMAS[0], TABLES[1]) 207 | ])), 208 | ]) 209 | def test_get_role_objects_with_access(access, expected): 210 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 211 | dbcontext._cache['get_all_current_nondefaults'] = lambda: { 212 | ROLES[0]: { 213 | 'tables': { 214 | 'read': set([ 215 | (common.ObjectName(SCHEMAS[0], TABLES[0]), 'SELECT'), 216 | (common.ObjectName(SCHEMAS[0], TABLES[1]), 'SELECT'), 217 | ]) 218 | } 219 | } 220 | } 221 | actual = dbcontext.get_role_objects_with_access(ROLES[0], common.ObjectName(SCHEMAS[0]), 222 | 'tables', access) 223 | assert actual == expected 224 | 225 | 226 | 227 | 228 | @run_setup_sql( 229 | # Create all the roles 230 | [attributes.Q_CREATE_ROLE.format(r) for r in ROLES] + 231 | [ 232 | # Create schema; Role0 owns the schema but no objects 233 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 234 | ] + 235 | # Let all roles create objects in the schema 236 | [privs.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], r) for r in ROLES] + 237 | [ 238 | # Role1 owns 2 tables (0, 1) and 1 sequence (0) 239 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[0]), 240 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[1]), 241 | Q_CREATE_SEQUENCE.format(ROLES[1], SCHEMAS[0], SEQUENCES[0]), 242 | 243 | # Role2 owns 0 tables and 2 sequences (1, 2) 244 | Q_CREATE_SEQUENCE.format(ROLES[2], SCHEMAS[0], SEQUENCES[1]), 245 | Q_CREATE_SEQUENCE.format(ROLES[2], SCHEMAS[0], SEQUENCES[2]), 246 | 247 | # Role3 owns 1 table (2) and 0 sequences 248 | Q_CREATE_TABLE.format(ROLES[3], SCHEMAS[0], TABLES[2]), 249 | ]) 250 | def test_get_all_object_attributes(cursor): 251 | dbcontext = context.DatabaseContext(cursor, verbose=True) 252 | expected = { 253 | 'tables': { 254 | SCHEMAS[0]: { 255 | common.ObjectName(SCHEMAS[0], TABLES[0]): {'owner': ROLES[1], 'is_dependent': False}, 256 | common.ObjectName(SCHEMAS[0], TABLES[1]): {'owner': ROLES[1], 'is_dependent': False}, 257 | common.ObjectName(SCHEMAS[0], TABLES[2]): {'owner': ROLES[3], 'is_dependent': False}, 258 | } 259 | }, 260 | 'sequences': { 261 | SCHEMAS[0]: { 262 | common.ObjectName(SCHEMAS[0], SEQUENCES[0]): {'owner': ROLES[1], 'is_dependent': False}, 263 | common.ObjectName(SCHEMAS[0], SEQUENCES[1]): {'owner': ROLES[2], 'is_dependent': False}, 264 | common.ObjectName(SCHEMAS[0], SEQUENCES[2]): {'owner': ROLES[2], 'is_dependent': False}, 265 | } 266 | }, 267 | 'schemas': { 268 | SCHEMAS[0]: { 269 | common.ObjectName(SCHEMAS[0]): {'owner': ROLES[0], 'is_dependent': False}, 270 | }, 271 | 'public': { 272 | 'public': {'owner': 'postgres', 'is_dependent': False}, 273 | } 274 | } 275 | } 276 | 277 | actual = dbcontext.get_all_object_attributes() 278 | 279 | # We do this to avoid having to look at / filter out entries from 280 | # information_schema or pg_catalog 281 | for key in expected.keys(): 282 | expected_entries = expected[key][SCHEMAS[0]] 283 | actual_entries = actual[key][SCHEMAS[0]] 284 | assert expected_entries == actual_entries 285 | 286 | # Make sure that this data is cached for future use 287 | cursor.close() 288 | actual_again = dbcontext.get_all_object_attributes() 289 | assert actual_again == actual 290 | 291 | 292 | 293 | 294 | @run_setup_sql( 295 | # Create the roles 296 | [attributes.Q_CREATE_ROLE.format(r) for r in ROLES[:3]] + 297 | 298 | # Grant login permission to 2 of the roles 299 | [attributes.Q_ALTER_ROLE.format(r, 'LOGIN') for r in ROLES[1:3]] + 300 | 301 | # Create personal schemas (i.e. schemas named identically to their owner) 302 | [ownerships.Q_CREATE_SCHEMA.format(r, r) for r in ROLES[:3]] 303 | ) 304 | def test_get_all_personal_schemas(cursor): 305 | dbcontext = context.DatabaseContext(cursor, verbose=True) 306 | actual = dbcontext.get_all_personal_schemas() 307 | expected = set([common.ObjectName(schema) for schema in ROLES[1:3]]) 308 | assert actual == expected 309 | 310 | # Make sure that this data is cached for future use 311 | cursor.close() 312 | actual_again = dbcontext.get_all_personal_schemas() 313 | assert actual_again == actual 314 | 315 | 316 | 317 | 318 | @run_setup_sql([ 319 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 320 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 321 | ]) 322 | def test_get_all_role_attributes(cursor): 323 | dbcontext = context.DatabaseContext(cursor, verbose=True) 324 | 325 | expected = set(['test_user', 'postgres', ROLES[0], ROLES[1]]) 326 | pg_version = dbcontext.get_version_info().postgres_version 327 | # Postgres 10 introduces several new roles that we have to account for 328 | if pg_version.startswith('10.'): 329 | expected.update(set([ 330 | 'pg_read_all_settings', 'pg_stat_scan_tables', 'pg_read_all_stats', 'pg_monitor'] 331 | )) 332 | 333 | actual = dbcontext.get_all_role_attributes() 334 | assert set(actual.keys()) == expected 335 | 336 | # Make sure that this data is cached for future use 337 | cursor.close() 338 | actual_again = dbcontext.get_all_role_attributes() 339 | assert actual_again == actual 340 | 341 | 342 | 343 | def test_get_role_attributes(): 344 | expected = {'foo': 'bar'} 345 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 346 | dbcontext._cache['get_all_role_attributes'] = lambda: {ROLES[0]: expected} 347 | actual = dbcontext.get_role_attributes(ROLES[0]) 348 | assert actual == expected 349 | 350 | 351 | 352 | 353 | def test_get_role_attributes_role_does_not_exist(): 354 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 355 | dbcontext._cache['get_all_role_attributes'] = lambda: {} 356 | actual = dbcontext.get_role_attributes(ROLES[0]) 357 | assert actual == dict() 358 | 359 | 360 | 361 | 362 | @pytest.mark.parametrize('all_role_attributes, expected', [ 363 | ({ROLES[0]: {'rolsuper': False}}, False), 364 | ({ROLES[0]: {'rolsuper': True}}, True), 365 | ({}, False), 366 | ]) 367 | def test_is_superuser(all_role_attributes, expected): 368 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 369 | dbcontext._cache['get_all_role_attributes'] = lambda: all_role_attributes 370 | actual = dbcontext.is_superuser(ROLES[0]) 371 | assert actual == expected 372 | 373 | 374 | 375 | @run_setup_sql([ 376 | # Create two roles 377 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 378 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 379 | 380 | # Create a few schemas 381 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 382 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[1], ROLES[0]), 383 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[2], ROLES[1]), 384 | ownerships.Q_CREATE_SCHEMA.format(ROLES[1], ROLES[1]), 385 | ]) 386 | def test_get_all_schemas_and_owners(cursor): 387 | dbcontext = context.DatabaseContext(cursor, verbose=True) 388 | expected = { 389 | common.ObjectName(SCHEMAS[0]): ROLES[0], 390 | common.ObjectName(SCHEMAS[1]): ROLES[0], 391 | common.ObjectName(SCHEMAS[2]): ROLES[1], 392 | common.ObjectName(ROLES[1]): ROLES[1], 393 | # These already existed 394 | common.ObjectName('public'): 'postgres', 395 | common.ObjectName('information_schema'): 'postgres', 396 | common.ObjectName('pg_catalog'): 'postgres', 397 | } 398 | 399 | actual = dbcontext.get_all_schemas_and_owners() 400 | assert actual == expected 401 | 402 | # Make sure that this data is cached for future use 403 | cursor.close() 404 | actual_again = dbcontext.get_all_schemas_and_owners() 405 | assert actual_again == actual 406 | 407 | 408 | 409 | @run_setup_sql([ 410 | # Create 3 roles 411 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 412 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 413 | attributes.Q_CREATE_ROLE.format(ROLES[2]), 414 | 415 | # Assign memberships 416 | memberships.Q_GRANT_MEMBERSHIP.format(ROLES[0], ROLES[1]), 417 | memberships.Q_GRANT_MEMBERSHIP.format(ROLES[1], ROLES[2]), 418 | ]) 419 | def test_get_all_memberships(cursor): 420 | dbcontext = context.DatabaseContext(cursor, verbose=True) 421 | 422 | expected = set([('role1', 'role0'), ('role2', 'role1')]) 423 | pg_version = dbcontext.get_version_info().postgres_version 424 | # Postgres 10 introduces several new roles and memberships that we have to account for 425 | if pg_version.startswith('10.'): 426 | expected.update(set([ 427 | ('pg_monitor', 'pg_stat_scan_tables'), 428 | ('pg_monitor', 'pg_read_all_stats'), 429 | ('pg_monitor', 'pg_read_all_settings'), 430 | ])) 431 | 432 | actual = dbcontext.get_all_memberships() 433 | assert isinstance(actual, list) 434 | assert len(actual) == len(expected) 435 | 436 | # Convert actual to a set of tuples so comparison is easier 437 | actual_converted = set([tuple(i) for i in actual]) 438 | assert actual_converted == expected 439 | 440 | 441 | 442 | def test_get_schema_owner(): 443 | schema = common.ObjectName('foo') 444 | expected_owner = 'bar' 445 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=True) 446 | dbcontext._cache['get_all_schemas_and_owners'] = lambda: {schema: expected_owner} 447 | actual = dbcontext.get_schema_owner(schema) 448 | assert actual == expected_owner 449 | 450 | 451 | 452 | @run_setup_sql([ 453 | # Create a few roles 454 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 455 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 456 | 457 | # Create a few schemas 458 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 459 | ownerships.Q_CREATE_SCHEMA.format(SCHEMAS[1], ROLES[1]), 460 | 461 | # Create some objects in those schemas 462 | Q_CREATE_TABLE.format(ROLES[0], SCHEMAS[0], TABLES[0]), 463 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[1], TABLES[0]), 464 | Q_CREATE_SEQUENCE.format(ROLES[0], SCHEMAS[0], SEQUENCES[1]), 465 | Q_CREATE_SEQUENCE.format(ROLES[1], SCHEMAS[1], SEQUENCES[2]), 466 | ]) 467 | def test_get_all_nonschema_objects_and_owners(cursor): 468 | dbcontext = context.DatabaseContext(cursor, verbose=True) 469 | expected = { 470 | common.ObjectName(SCHEMAS[0]): [ 471 | context.ObjectInfo('tables', common.ObjectName(SCHEMAS[0], TABLES[0]), ROLES[0], False), 472 | context.ObjectInfo('sequences', common.ObjectName(SCHEMAS[0], SEQUENCES[1]), ROLES[0], False), 473 | ], 474 | common.ObjectName(SCHEMAS[1]): [ 475 | context.ObjectInfo('tables', common.ObjectName(SCHEMAS[1], TABLES[0]), ROLES[1], False), 476 | context.ObjectInfo('sequences', common.ObjectName(SCHEMAS[1], SEQUENCES[2]), ROLES[1], False), 477 | ], 478 | } 479 | actual = dbcontext.get_all_nonschema_objects_and_owners() 480 | 481 | # We are deliberately not checking pg_catalog or information_schema here since that's a 482 | # lot of work and those should not be touched 483 | for k, v in expected.items(): 484 | assert set(v) == set(actual[k]) 485 | 486 | # Make sure that this data is cached for future use 487 | cursor.close() 488 | actual_again = dbcontext.get_all_nonschema_objects_and_owners() 489 | assert actual_again == actual 490 | 491 | 492 | def test_get_schema_objects(): 493 | schema = common.ObjectName('foo') 494 | expected = 'bar' 495 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=False) 496 | dbcontext._cache['get_all_nonschema_objects_and_owners'] = lambda: { 497 | common.ObjectName('foo'): expected 498 | } 499 | actual = dbcontext.get_schema_objects(schema) 500 | assert actual == expected 501 | 502 | 503 | def test_get_schema_objects_no_entry(): 504 | dbcontext = context.DatabaseContext(cursor=DUMMY, verbose=False) 505 | dbcontext._cache['get_all_nonschema_objects_and_owners'] = lambda: { 506 | common.ObjectName('foo'): 'bar', 507 | } 508 | actual = dbcontext.get_schema_objects(common.ObjectName('key_not_in_response')) 509 | assert actual == [] 510 | 511 | 512 | def test_get_all_raw_object_attributes(cursor): 513 | dbcontext = context.DatabaseContext(cursor, verbose=True) 514 | raw_results = dbcontext.get_all_raw_object_attributes() 515 | assert isinstance(raw_results, list) 516 | assert len(raw_results) > 0 517 | assert isinstance(raw_results[0], tuple) 518 | 519 | # Make sure that this data is cached for future use 520 | cursor.close() 521 | raw_results_again = dbcontext.get_all_raw_object_attributes() 522 | assert raw_results_again == raw_results 523 | 524 | 525 | def test_get_version_info(cursor): 526 | dbcontext = context.DatabaseContext(cursor, verbose=True) 527 | actual = dbcontext.get_version_info() 528 | 529 | assert isinstance(actual, context.VersionInfo) 530 | assert not actual.is_redshift 531 | assert not actual.redshift_version 532 | # We do not check the Postgres version since we test against multiple Postgres versions 533 | 534 | # Make sure that this data is cached for future use 535 | cursor.close() 536 | actual_again = dbcontext.get_version_info() 537 | assert actual_again == actual 538 | -------------------------------------------------------------------------------- /tests/test_core_configure.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from textwrap import dedent 3 | 4 | import pytest 5 | 6 | from conftest import Q_GET_ROLE_ATTRIBUTE, NEW_USER, run_setup_sql 7 | from pgbedrock import core_configure, context 8 | from pgbedrock import ownerships as own 9 | from pgbedrock import attributes as attr 10 | from test_ownerships import Q_SCHEMA_EXISTS 11 | from test_memberships import Q_HAS_ROLE 12 | from test_privileges import Q_HAS_PRIVILEGE 13 | 14 | 15 | @pytest.mark.parametrize('statements, expected', [ 16 | (['--foo', '--bar'], False), 17 | (['--foo', 'bar'], True), 18 | ([], False), 19 | ]) 20 | def test_has_changes(statements, expected): 21 | assert core_configure.has_changes(statements) is expected 22 | 23 | 24 | @pytest.mark.usefixtures('drop_users_and_objects') 25 | def test_configure_no_changes_needed(tmpdir, capsys, db_config, base_spec): 26 | spec_path = tmpdir.join('spec.yml') 27 | spec_path.write(base_spec) 28 | 29 | params = copy.deepcopy(db_config) 30 | params.update( 31 | dict(spec_path=spec_path.strpath, 32 | prompt=False, 33 | attributes=True, 34 | memberships=True, 35 | ownerships=True, 36 | privileges=True, 37 | live=False, 38 | verbose=False 39 | ) 40 | ) 41 | core_configure.configure(**params) 42 | out, err = capsys.readouterr() 43 | assert core_configure.SUCCESS_MSG in out 44 | 45 | 46 | @pytest.mark.usefixtures('drop_users_and_objects') 47 | @pytest.mark.parametrize('live_mode, expected', [(True, 1), (False, 0)]) 48 | def test_configure_live_mode_works(capsys, cursor, spec_with_new_user, db_config, live_mode, expected): 49 | """ 50 | We add a new user (NEW_USER) through pgbedrock and make sure that 1) this change isn't 51 | committed if we pass --check and 2) this change _is_ committed if we pass --live 52 | """ 53 | # Assert that we start without the role we are trying to add 54 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 55 | assert cursor.rowcount == 0 56 | 57 | params = copy.deepcopy(db_config) 58 | params.update( 59 | dict(spec_path=spec_with_new_user, 60 | prompt=False, 61 | attributes=True, 62 | memberships=True, 63 | ownerships=True, 64 | privileges=True, 65 | live=live_mode, 66 | verbose=False 67 | ) 68 | ) 69 | core_configure.configure(**params) 70 | out, err = capsys.readouterr() 71 | 72 | # We want to make sure that in live mode changes from each module have been 73 | # made (and additionally that in check mode these changes were _not_ made) 74 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 75 | assert cursor.rowcount == expected 76 | 77 | cursor.execute(Q_SCHEMA_EXISTS.format(NEW_USER)) 78 | assert cursor.rowcount == expected 79 | 80 | if live_mode: 81 | cursor.execute(Q_HAS_ROLE.format(NEW_USER, 'postgres')) 82 | assert cursor.rowcount == expected 83 | 84 | cursor.execute(Q_HAS_PRIVILEGE.format(NEW_USER, 'pg_catalog.pg_class')) 85 | assert cursor.fetchone()[0] is True 86 | 87 | 88 | @pytest.mark.usefixtures('drop_users_and_objects') 89 | def test_configure_live_does_not_leak_passwords(tmpdir, capsys, cursor, db_config, base_spec): 90 | """ 91 | We add a new user (NEW_USER) through pgbedrock and make sure that 1) this change isn't 92 | committed if we pass --check and 2) this change _is_ committed if we pass --live 93 | """ 94 | # Assert that we start without the role we are trying to add 95 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 96 | assert cursor.rowcount == 0 97 | 98 | new_password = 'supersecret' 99 | spec = copy.copy(base_spec) 100 | spec += dedent(""" 101 | {new_user}: 102 | attributes: 103 | - PASSWORD "{new_password}" 104 | """.format(new_user=NEW_USER, new_password=new_password)) 105 | 106 | spec_path = tmpdir.join('spec.yml') 107 | spec_path.write(spec) 108 | params = copy.deepcopy(db_config) 109 | params.update( 110 | dict(spec_path=spec_path.strpath, 111 | prompt=False, 112 | attributes=True, 113 | memberships=True, 114 | ownerships=True, 115 | privileges=True, 116 | live=True, 117 | verbose=True, 118 | ) 119 | ) 120 | core_configure.configure(**params) 121 | 122 | # Verify that the password was changed 123 | new_md5_hash = attr.create_md5_hash(NEW_USER, new_password) 124 | cursor.execute("SELECT rolpassword FROM pg_authid WHERE rolname = '{}';".format(NEW_USER)) 125 | assert cursor.fetchone()[0] == new_md5_hash 126 | 127 | # Verify that the password isn't exposed in our output 128 | out, err = capsys.readouterr() 129 | assert 'supersecret' not in out 130 | assert 'supersecret' not in err 131 | 132 | # Verify that the sanitized record of the password change is in our output 133 | assert 'ALTER ROLE "foobar" WITH ENCRYPTED PASSWORD \'******\';' in out 134 | 135 | 136 | @run_setup_sql([ 137 | attr.Q_CREATE_ROLE.format(NEW_USER), 138 | attr.Q_ALTER_PASSWORD.format(NEW_USER, 'some_password'), 139 | ]) 140 | @pytest.mark.usefixtures('drop_users_and_objects') 141 | def test_no_password_attribute_makes_password_none(cursor, spec_with_new_user, db_config): 142 | 143 | # We have to commit the changes from @run_setup_sql so they will be seen by the transaction 144 | # generated within pgbedrock configure as that will use a new cursor with a new transaction. 145 | # The NEW_USER role will get dropped by the drop_users_and_objects fixture though 146 | cursor.connection.commit() 147 | 148 | # Assert that we start with the role whose password we are trying to modify 149 | cursor.execute(Q_GET_ROLE_ATTRIBUTE.format('rolname', NEW_USER)) 150 | assert cursor.rowcount == 1 151 | 152 | # Assert that the password is not NULL 153 | cursor.execute("SELECT rolpassword IS NOT NULL FROM pg_authid WHERE rolname = '{}'".format(NEW_USER)) 154 | assert cursor.fetchone()[0] is True 155 | 156 | params = copy.deepcopy(db_config) 157 | params.update( 158 | dict(spec_path=spec_with_new_user, 159 | prompt=False, 160 | attributes=True, 161 | memberships=True, 162 | ownerships=True, 163 | privileges=True, 164 | live=True, 165 | verbose=False 166 | ) 167 | ) 168 | core_configure.configure(**params) 169 | 170 | # Assert that the password is NULL now 171 | cursor.execute("SELECT rolpassword IS NULL FROM pg_authid WHERE rolname = '{}'".format(NEW_USER)) 172 | assert cursor.fetchone()[0] is True 173 | 174 | 175 | def test_configure_schema_role_has_dash(tmpdir, capsys, db_config, cursor, base_spec): 176 | """ 177 | We add a new user ('role-with-dash') through pgbedrock and make sure that that user can create 178 | a personal schema 179 | """ 180 | role = 'role-with-dash' 181 | 182 | spec = copy.copy(base_spec) 183 | spec += dedent(""" 184 | {}: 185 | has_personal_schema: yes 186 | """.format(role)) 187 | 188 | spec_path = tmpdir.join('spec.yml') 189 | spec_path.write(spec) 190 | 191 | params = copy.deepcopy(db_config) 192 | params.update( 193 | dict(spec_path=spec_path.strpath, 194 | prompt=False, 195 | attributes=True, 196 | memberships=True, 197 | ownerships=True, 198 | privileges=True, 199 | live=False, 200 | verbose=False 201 | ) 202 | ) 203 | core_configure.configure(**params) 204 | out, err = capsys.readouterr() 205 | assert own.Q_CREATE_SCHEMA.format(role, role) in out 206 | -------------------------------------------------------------------------------- /tests/test_memberships.py: -------------------------------------------------------------------------------- 1 | from conftest import run_setup_sql 2 | from pgbedrock import memberships as memb 3 | from pgbedrock import attributes 4 | 5 | 6 | ROLE1 = 'charlie' 7 | ROLE2 = 'barney' 8 | ROLE3 = 'wacko' 9 | DESIRED_GROUP1 = 'desired_group1' 10 | DESIRED_GROUP2 = 'desired_group2' 11 | CURRENT_GROUP1 = 'current_group1' 12 | CURRENT_GROUP2 = 'current_group2' 13 | 14 | Q_HAS_ROLE = "SELECT pg_has_role('{}', '{}', 'member')" 15 | DUMMY = 'foo' 16 | 17 | 18 | @run_setup_sql([ 19 | attributes.Q_CREATE_ROLE.format(ROLE1), 20 | attributes.Q_CREATE_ROLE.format(ROLE2), 21 | attributes.Q_CREATE_ROLE.format(ROLE3), 22 | attributes.Q_CREATE_ROLE.format(CURRENT_GROUP1), 23 | attributes.Q_CREATE_ROLE.format(DESIRED_GROUP1), 24 | attributes.Q_CREATE_ROLE.format(DESIRED_GROUP2), 25 | attributes.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'), 26 | memb.Q_GRANT_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3), 27 | ]) 28 | def test_analyze_memberships(cursor): 29 | """ 30 | Test: 31 | * one superuser (to make sure they don't get evaluated) 32 | * two users, both of which will be removed from a group and added to a group 33 | """ 34 | spec = { 35 | ROLE1: {'member_of': [DESIRED_GROUP1]}, 36 | ROLE2: {'member_of': [DESIRED_GROUP1, DESIRED_GROUP2]}, 37 | ROLE3: {'member_of': [DESIRED_GROUP1]} 38 | } 39 | 40 | expected = set([ 41 | memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE1), 42 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE2), 43 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE2), 44 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE3), 45 | memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3), 46 | ]) 47 | 48 | actual = memb.analyze_memberships(spec, cursor, verbose=False) 49 | assert set(actual) == expected 50 | 51 | 52 | def test_analyze_no_desired_memberships_none_current(mockdbcontext): 53 | mockdbcontext.is_superuser = lambda x: False 54 | mockdbcontext.get_role_memberships = lambda x: set() 55 | memberships_ = set() 56 | 57 | actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=memberships_, 58 | dbcontext=mockdbcontext).analyze() 59 | assert actual == [] 60 | 61 | 62 | def test_analyze_none_current_some_desired(mockdbcontext): 63 | mockdbcontext.is_superuser = lambda x: False 64 | mockdbcontext.get_role_memberships = lambda x: set() 65 | desired_groups = set([DESIRED_GROUP1, DESIRED_GROUP2]) 66 | expected = set([ 67 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE1), 68 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE1), 69 | ]) 70 | 71 | actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups, 72 | dbcontext=mockdbcontext).analyze() 73 | assert set(actual) == expected 74 | 75 | 76 | def test_analyze_some_current_none_desired(mockdbcontext): 77 | mockdbcontext.is_superuser = lambda x: False 78 | mockdbcontext.get_role_memberships = lambda x: set([CURRENT_GROUP1, CURRENT_GROUP2]) 79 | desired_groups = set() 80 | expected = set([ 81 | memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1), 82 | memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP2, ROLE1), 83 | ]) 84 | 85 | actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups, 86 | dbcontext=mockdbcontext).analyze() 87 | assert set(actual) == expected 88 | 89 | 90 | def test_analyze_some_current_some_desired(mockdbcontext): 91 | mockdbcontext.is_superuser = lambda x: False 92 | mockdbcontext.get_role_memberships = lambda x: set([DESIRED_GROUP1, CURRENT_GROUP1, 93 | CURRENT_GROUP2]) 94 | desired_groups = set([DESIRED_GROUP1, DESIRED_GROUP2]) 95 | 96 | expected = set([ 97 | memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE1), 98 | memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1), 99 | memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP2, ROLE1), 100 | ]) 101 | 102 | actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups, 103 | dbcontext=mockdbcontext).analyze() 104 | assert set(actual) == expected 105 | 106 | 107 | def test_analyze_skip_superuser(mockdbcontext): 108 | mockdbcontext.is_superuser = lambda x: True 109 | expected = [memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE2)] 110 | actual = memb.MembershipAnalyzer(ROLE2, spec_memberships=DUMMY, 111 | dbcontext=mockdbcontext).analyze() 112 | assert actual == expected 113 | 114 | 115 | def test_grant_membership(mockdbcontext): 116 | mockdbcontext.is_superuser = lambda x: False 117 | memconf = memb.MembershipAnalyzer(ROLE1, spec_memberships=DUMMY, dbcontext=mockdbcontext) 118 | memconf.grant_membership(DESIRED_GROUP1) 119 | assert memconf.sql_to_run == [memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE1)] 120 | 121 | 122 | def test_revoke_membership(mockdbcontext): 123 | mockdbcontext.is_superuser = lambda x: False 124 | memconf = memb.MembershipAnalyzer(ROLE1, spec_memberships=DUMMY, dbcontext=mockdbcontext) 125 | memconf.revoke_membership(CURRENT_GROUP1) 126 | assert memconf.sql_to_run == [memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1)] 127 | -------------------------------------------------------------------------------- /tests/test_ownerships.py: -------------------------------------------------------------------------------- 1 | from conftest import quoted_object, run_setup_sql 2 | from pgbedrock import ownerships as own 3 | from pgbedrock import attributes, privileges 4 | from pgbedrock.common import ObjectName 5 | from pgbedrock.context import ObjectInfo 6 | 7 | Q_CREATE_SEQUENCE = 'SET ROLE {}; CREATE SEQUENCE {}.{}; RESET ROLE;' 8 | Q_CREATE_TABLE = 'SET ROLE {}; CREATE TABLE {}.{} AS (SELECT 1+1); RESET ROLE;' 9 | Q_SCHEMA_EXISTS = "SELECT schema_name FROM information_schema.schemata WHERE schema_name='{}';" 10 | 11 | ROLES = tuple('role{}'.format(i) for i in range(3)) 12 | SCHEMAS = tuple('schema{}'.format(i) for i in range(3)) 13 | TABLES = tuple('table{}'.format(i) for i in range(4)) 14 | SEQUENCES = tuple('seq{}'.format(i) for i in range(4)) 15 | DUMMY = 'foo' 16 | 17 | 18 | @run_setup_sql([ 19 | 'DROP SCHEMA public', 20 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 21 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 22 | ]) 23 | def test_analyze_ownerships_create_schemas(cursor): 24 | spec = { 25 | ROLES[0]: { 26 | 'has_personal_schema': True, 27 | 'owns': { 28 | 'schemas': [ObjectName(SCHEMAS[0])] 29 | }, 30 | }, 31 | ROLES[1]: { 32 | 'owns': { 33 | 'schemas': [ObjectName(SCHEMAS[1])], 34 | }, 35 | }, 36 | } 37 | actual = own.analyze_ownerships(spec, cursor, verbose=False) 38 | 39 | expected = set([ 40 | own.Q_CREATE_SCHEMA.format(ROLES[0], ROLES[0]), 41 | own.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 42 | own.Q_CREATE_SCHEMA.format(SCHEMAS[1], ROLES[1]), 43 | ]) 44 | assert set(actual) == expected 45 | 46 | 47 | @run_setup_sql([ 48 | 'DROP SCHEMA public', 49 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 50 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 51 | own.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 52 | own.Q_CREATE_SCHEMA.format(SCHEMAS[1], ROLES[0]), 53 | privileges.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], ROLES[1]), 54 | privileges.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[1], ROLES[1]), 55 | 56 | # Create tables in SCHEMAS[0], some of which aren't owned by ROLES[0] 57 | Q_CREATE_TABLE.format(ROLES[0], SCHEMAS[0], TABLES[0]), 58 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[1]), 59 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[2]), 60 | Q_CREATE_TABLE.format(ROLES[0], SCHEMAS[0], TABLES[3]), 61 | 62 | # Create two sequences in SCHEMAS[1], one of which isn't owned by ROLES[1] 63 | Q_CREATE_SEQUENCE.format(ROLES[1], SCHEMAS[1], SEQUENCES[0]), 64 | Q_CREATE_SEQUENCE.format(ROLES[0], SCHEMAS[1], SEQUENCES[1]), 65 | ]) 66 | def test_analyze_ownerships_nonschemas(cursor): 67 | spec = { 68 | ROLES[0]: { 69 | 'owns': { 70 | 'tables': [ObjectName(SCHEMAS[0], '*')] 71 | }, 72 | }, 73 | ROLES[1]: { 74 | 'owns': { 75 | 'sequences': [ 76 | ObjectName(SCHEMAS[1], SEQUENCES[0]), 77 | ObjectName(SCHEMAS[1], SEQUENCES[1]), 78 | ] 79 | }, 80 | }, 81 | } 82 | actual = own.analyze_ownerships(spec, cursor, verbose=False) 83 | 84 | expected = set([ 85 | own.Q_SET_OBJECT_OWNER.format('TABLE', quoted_object(SCHEMAS[0], TABLES[1]), 86 | ROLES[0], ROLES[1]), 87 | own.Q_SET_OBJECT_OWNER.format('TABLE', quoted_object(SCHEMAS[0], TABLES[2]), 88 | ROLES[0], ROLES[1]), 89 | own.Q_SET_OBJECT_OWNER.format('SEQUENCE', quoted_object(SCHEMAS[1], SEQUENCES[1]), 90 | ROLES[1], ROLES[0]), 91 | ]) 92 | assert set(actual) == expected 93 | 94 | 95 | @run_setup_sql([ 96 | 'DROP SCHEMA public', 97 | attributes.Q_CREATE_ROLE.format(ROLES[0]), 98 | attributes.Q_CREATE_ROLE.format(ROLES[1]), 99 | own.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0]), 100 | own.Q_CREATE_SCHEMA.format(SCHEMAS[1], ROLES[0]), 101 | privileges.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[0], ROLES[1]), 102 | privileges.Q_GRANT_NONDEFAULT.format('CREATE', 'SCHEMA', SCHEMAS[1], ROLES[1]), 103 | 104 | # Create tables in SCHEMAS[0], some of which aren't owned by ROLES[0] 105 | Q_CREATE_TABLE.format(ROLES[0], SCHEMAS[0], TABLES[0]), 106 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[1]), 107 | Q_CREATE_TABLE.format(ROLES[1], SCHEMAS[0], TABLES[2]), 108 | Q_CREATE_TABLE.format(ROLES[0], SCHEMAS[0], TABLES[3]), 109 | 110 | # Create two sequences in SCHEMAS[1], one of which isn't owned by ROLES[1] 111 | Q_CREATE_SEQUENCE.format(ROLES[1], SCHEMAS[1], SEQUENCES[0]), 112 | Q_CREATE_SEQUENCE.format(ROLES[0], SCHEMAS[1], SEQUENCES[1]), 113 | ]) 114 | def test_analyze_ownerships_schemas_and_nonschemas(cursor): 115 | """ 116 | This is just a combination of the related schema and nonschema tests to make sure the pieces 117 | fit together. 118 | """ 119 | spec = { 120 | ROLES[0]: { 121 | 'has_personal_schema': True, 122 | 'owns': { 123 | 'tables': [ObjectName(SCHEMAS[0], '*')], 124 | 'schemas': [ObjectName(SCHEMAS[2])], 125 | }, 126 | }, 127 | ROLES[1]: { 128 | 'owns': { 129 | 'sequences': [ 130 | ObjectName(SCHEMAS[1], SEQUENCES[0]), 131 | ObjectName(SCHEMAS[1], SEQUENCES[1]), 132 | ] 133 | }, 134 | }, 135 | } 136 | actual = own.analyze_ownerships(spec, cursor, verbose=False) 137 | 138 | expected = set([ 139 | own.Q_SET_OBJECT_OWNER.format('TABLE', quoted_object(SCHEMAS[0], TABLES[1]), 140 | ROLES[0], ROLES[1]), 141 | own.Q_SET_OBJECT_OWNER.format('TABLE', quoted_object(SCHEMAS[0], TABLES[2]), 142 | ROLES[0], ROLES[1]), 143 | own.Q_SET_OBJECT_OWNER.format('SEQUENCE', quoted_object(SCHEMAS[1], SEQUENCES[1]), 144 | ROLES[1], ROLES[0]), 145 | own.Q_CREATE_SCHEMA.format(ROLES[0], ROLES[0]), 146 | own.Q_CREATE_SCHEMA.format(SCHEMAS[2], ROLES[0]), 147 | ]) 148 | assert set(actual) == expected 149 | 150 | 151 | def test_schemaanalyzer_init(mockdbcontext): 152 | mockdbcontext.get_schema_owner = lambda x: 'foo' 153 | mockdbcontext.get_schema_objects = lambda x: 'bar' 154 | schemaconf = own.SchemaAnalyzer(rolename=ROLES[0], objname=ObjectName(SCHEMAS[0]), 155 | dbcontext=mockdbcontext) 156 | 157 | assert schemaconf.rolename == ROLES[0] 158 | assert isinstance(schemaconf.objname, ObjectName) 159 | assert schemaconf.objname.schema == SCHEMAS[0] 160 | assert schemaconf.current_owner == 'foo' 161 | assert schemaconf.exists is True 162 | assert schemaconf.schema_objects == 'bar' 163 | 164 | 165 | def test_schemaanalyzer_analyzer_creates_schema(mockdbcontext): 166 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(SCHEMAS[0]), 167 | dbcontext=mockdbcontext) 168 | actual = schemaconf.analyze() 169 | expected = [own.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0])] 170 | assert actual == expected 171 | 172 | 173 | def test_schemaanalyzer_existing_schema_owner_change(mockdbcontext): 174 | mockdbcontext.get_schema_owner = lambda x: ROLES[1] 175 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(SCHEMAS[0]), 176 | dbcontext=mockdbcontext) 177 | changes = schemaconf.analyze() 178 | assert changes == [own.Q_SET_SCHEMA_OWNER.format(SCHEMAS[0], ROLES[0], ROLES[1])] 179 | 180 | 181 | def test_schemaanalyzer_existing_schema_same_owner(mockdbcontext): 182 | mockdbcontext.get_schema_owner = lambda x: ROLES[0] 183 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(SCHEMAS[0]), 184 | dbcontext=mockdbcontext) 185 | changes = schemaconf.analyze() 186 | assert changes == [] 187 | 188 | 189 | def test_schemaanalyzer_existing_personal_schema_change_object_owners(mockdbcontext): 190 | personal_schema = ROLES[0] 191 | mockdbcontext.get_schema_owner = lambda x: ROLES[0] 192 | mockdbcontext.get_schema_objects = lambda x: [ 193 | ObjectInfo('tables', ObjectName(personal_schema, TABLES[0]), ROLES[0], False), 194 | ObjectInfo('sequences', ObjectName(personal_schema, SEQUENCES[0]), ROLES[0], False), 195 | ObjectInfo('tables', ObjectName(personal_schema, TABLES[1]), ROLES[1], False), 196 | ObjectInfo('sequences', ObjectName(personal_schema, SEQUENCES[1]), ROLES[1], False), 197 | ] 198 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(personal_schema), 199 | dbcontext=mockdbcontext, is_personal_schema=True) 200 | actual = schemaconf.analyze() 201 | expected = [ 202 | own.Q_SET_OBJECT_OWNER.format('TABLE', quoted_object(ROLES[0], TABLES[1]), ROLES[0], ROLES[1]), 203 | own.Q_SET_OBJECT_OWNER.format('SEQUENCE', quoted_object(ROLES[0], SEQUENCES[1]), ROLES[0], ROLES[1]), 204 | ] 205 | assert actual == expected 206 | 207 | 208 | def test_schemaanalyzer_create_schema(mockdbcontext): 209 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(SCHEMAS[0]), dbcontext=mockdbcontext) 210 | schemaconf.create_schema() 211 | assert schemaconf.sql_to_run == [own.Q_CREATE_SCHEMA.format(SCHEMAS[0], ROLES[0])] 212 | 213 | 214 | def test_schemaanalyzer_set_owner(mockdbcontext): 215 | previous_owner = ROLES[1] 216 | mockdbcontext.get_schema_owner = lambda x: previous_owner 217 | 218 | schemaconf = own.SchemaAnalyzer(ROLES[0], objname=ObjectName(SCHEMAS[0]), dbcontext=mockdbcontext) 219 | schemaconf.set_owner() 220 | 221 | expected = [own.Q_SET_SCHEMA_OWNER.format(SCHEMAS[0], ROLES[0], previous_owner)] 222 | assert schemaconf.sql_to_run == expected 223 | 224 | 225 | def test_schemaanalyzer_alter_object_owner(mockdbcontext): 226 | previous_owner = ROLES[1] 227 | owner = ROLES[0] 228 | schema = SCHEMAS[0] 229 | objname = ObjectName(schema, TABLES[0]) 230 | mockdbcontext.get_schema_owner = lambda x: owner 231 | 232 | schemaconf = own.SchemaAnalyzer(owner, objname=ObjectName(schema), dbcontext=mockdbcontext) 233 | schemaconf.alter_object_owner('tables', objname, previous_owner) 234 | assert schemaconf.sql_to_run == [ 235 | own.Q_SET_OBJECT_OWNER.format('TABLE', objname.qualified_name, owner, previous_owner) 236 | ] 237 | 238 | 239 | def test_schemaanalyzer_get_improperly_owned_objects(mockdbcontext): 240 | owner = ROLES[0] 241 | wrong_owner = ROLES[1] 242 | mockdbcontext.get_schema_owner = lambda x: owner 243 | mockdbcontext.get_schema_objects = lambda x: [ 244 | # Properly owned 245 | ObjectInfo('tables', ObjectName(owner, TABLES[0]), owner, False), 246 | ObjectInfo('sequences', ObjectName(owner, SEQUENCES[0]), owner, False), 247 | 248 | # Improperly owned 249 | ObjectInfo('tables', ObjectName(owner, TABLES[1]), wrong_owner, False), 250 | ObjectInfo('sequences', ObjectName(owner, SEQUENCES[1]), wrong_owner, False), 251 | 252 | # Improperly owned but dependent (i.e. should be skipped) 253 | ObjectInfo('sequences', ObjectName(owner, SEQUENCES[2]), wrong_owner, True), 254 | ] 255 | schemaconf = own.SchemaAnalyzer(rolename=owner, objname=ObjectName(owner), 256 | dbcontext=mockdbcontext, is_personal_schema=True) 257 | 258 | actual = schemaconf.get_improperly_owned_objects() 259 | expected = [('tables', ObjectName(owner, TABLES[1]), wrong_owner), 260 | ('sequences', ObjectName(owner, SEQUENCES[1]), wrong_owner)] 261 | assert set(actual) == set(expected) 262 | 263 | 264 | def test_nonschemaanalyzer_expand_schema_objects(mockdbcontext): 265 | mockdbcontext.get_all_object_attributes = lambda: { 266 | 'tables': { 267 | SCHEMAS[0]: { 268 | ObjectName(SCHEMAS[0], TABLES[0]): {'owner': DUMMY, 'is_dependent': False}, 269 | ObjectName(SCHEMAS[0], TABLES[1]): {'owner': DUMMY, 'is_dependent': False}, 270 | ObjectName(SCHEMAS[0], TABLES[2]): {'owner': DUMMY, 'is_dependent': True}, 271 | }, 272 | }, 273 | } 274 | nsa = own.NonschemaAnalyzer(rolename=ROLES[0], objname=DUMMY, 275 | objkind='tables', dbcontext=mockdbcontext) 276 | actual = nsa.expand_schema_objects(SCHEMAS[0]) 277 | expected = [ObjectName(SCHEMAS[0], TABLES[0]), ObjectName(SCHEMAS[0], TABLES[1])] 278 | assert set(actual) == set(expected) 279 | 280 | 281 | def test_nonschemaanalyzer_analyze_no_changed_needed(mockdbcontext): 282 | objname = ObjectName(SCHEMAS[0], TABLES[0]) 283 | mockdbcontext.get_all_object_attributes = lambda: { 284 | 'tables': { 285 | SCHEMAS[0]: { 286 | objname: {'owner': ROLES[0], 'is_dependent': False}, 287 | }, 288 | }, 289 | } 290 | nsa = own.NonschemaAnalyzer(rolename=ROLES[0], objname=objname, 291 | objkind='tables', dbcontext=mockdbcontext) 292 | actual = nsa.analyze() 293 | assert actual == [] 294 | 295 | 296 | def test_nonschemaanalyzer_analyze_without_schema_expansion(mockdbcontext): 297 | objname = ObjectName(SCHEMAS[0], TABLES[0]) 298 | mockdbcontext.get_all_object_attributes = lambda: { 299 | 'tables': { 300 | SCHEMAS[0]: { 301 | objname: {'owner': ROLES[1], 'is_dependent': False}, 302 | }, 303 | }, 304 | } 305 | nsa = own.NonschemaAnalyzer(rolename=ROLES[0], objname=objname, 306 | objkind='tables', dbcontext=mockdbcontext) 307 | actual = nsa.analyze() 308 | expected = [own.Q_SET_OBJECT_OWNER.format('TABLE', objname.qualified_name, ROLES[0], ROLES[1])] 309 | assert actual == expected 310 | 311 | 312 | def test_nonschemaanalyzer_analyze_with_schema_expansion(mockdbcontext): 313 | mockdbcontext.get_all_object_attributes = lambda: { 314 | 'sequences': { 315 | SCHEMAS[0]: { 316 | ObjectName(SCHEMAS[0], SEQUENCES[0]): {'owner': ROLES[1], 'is_dependent': False}, 317 | ObjectName(SCHEMAS[0], SEQUENCES[1]): {'owner': ROLES[2], 'is_dependent': False}, 318 | # This will be skipped as the owner is correct 319 | ObjectName(SCHEMAS[0], SEQUENCES[2]): {'owner': ROLES[0], 'is_dependent': False}, 320 | # This will be skipped as it is dependent 321 | ObjectName(SCHEMAS[0], SEQUENCES[3]): {'owner': ROLES[1], 'is_dependent': True}, 322 | }, 323 | }, 324 | } 325 | nsa = own.NonschemaAnalyzer(rolename=ROLES[0], objname=ObjectName(SCHEMAS[0], '*'), 326 | objkind='sequences', dbcontext=mockdbcontext) 327 | actual = nsa.analyze() 328 | expected = [ 329 | own.Q_SET_OBJECT_OWNER.format('SEQUENCE', ObjectName(SCHEMAS[0], SEQUENCES[0]).qualified_name, 330 | ROLES[0], ROLES[1]), 331 | own.Q_SET_OBJECT_OWNER.format('SEQUENCE', ObjectName(SCHEMAS[0], SEQUENCES[1]).qualified_name, 332 | ROLES[0], ROLES[2]), 333 | ] 334 | assert set(actual) == set(expected) 335 | -------------------------------------------------------------------------------- /tests/wait_for_postgres.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | if [[ $(echo "$POSTGRES_VERSION" | cut -d '.' -f 1) == 10 ]]; then 3 | # Postgres 10 versions are only x.x instead of x.x.x, so the short version is just "10" 4 | POSTGRES_SHORT_VERSION="10"; 5 | else 6 | POSTGRES_SHORT_VERSION=$(echo "$POSTGRES_VERSION" | cut -d '.' -f 1,2); 7 | fi 8 | 9 | ISREADY="pg_isready --host=$POSTGRES_HOST" 10 | $ISREADY 11 | while [[ $? -ne 0 ]]; do 12 | sleep 1 13 | echo "waiting on Postgres" 14 | $ISREADY 15 | done 16 | --------------------------------------------------------------------------------