├── .circleci └── config.yml ├── .dockerignore ├── .flake8 ├── .github └── FUNDING.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTION_GUIDE.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── build-docker.sh ├── deploy └── vbump.py ├── docker-entrypoint.sh ├── docs ├── comparison.md ├── deploy_usage.md ├── dev_usage.md ├── index.md ├── installing_and_connecting.md ├── options.md ├── quickstart.md └── with_django.md ├── migra ├── __init__.py ├── changes.py ├── command.py ├── migra.py ├── statements.py └── util.py ├── poetry.lock ├── pyproject.toml └── tests ├── FIXTURES ├── collations │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── constraints │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── dependencies │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── dependencies2 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── dependencies3 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── dependencies4 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── enumdefaults │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── enumdeps │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── everything │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── excludeschema │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── extversions │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── generated │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── generated_added │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── identitycols │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── inherit │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── inherit2 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── partitioning │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── privileges │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── rls │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── rls2 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── seq │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── singleschema │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── singleschema_ext │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── triggers │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── triggers2 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql └── triggers3 │ ├── a.sql │ ├── additions.sql │ ├── b.sql │ ├── expected.sql │ └── expected2.sql ├── __init__.py └── test_migra.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/circleci 5 | docker: 6 | - image: cimg/python:3.10 7 | - image: cimg/postgres:14.1 8 | environment: 9 | POSTGRES_USER: circleci 10 | POSTGRES_DB: circleci 11 | POSTGRES_HOST_AUTH_METHOD: trust 12 | 13 | steps: 14 | - checkout 15 | - restore_cache: 16 | key: deps1-{{ .Branch }}-{{ checksum "pyproject.toml" }} 17 | - run: 18 | name: Wait for db 19 | command: dockerize -wait tcp://localhost:5432 -timeout 1m 20 | - run: sudo apt-get update 21 | - run: sudo apt-get install -y postgresql-client 22 | - run: 23 | name: create postgres user 24 | command: psql postgresql://@localhost/circleci -c 'create role postgres' 25 | - run: 26 | name: Install poetry 27 | command: | 28 | python --version 29 | python -m pip install -U pip setuptools poetry 30 | poetry config virtualenvs.create false 31 | - run: 32 | command: | 33 | python -m venv ~/.venv 34 | . ~/.venv/bin/activate 35 | poetry install 36 | - save_cache: 37 | key: deps1-{{ .Branch }}-{{ checksum "pyproject.toml" }} 38 | paths: 39 | - "~/.venv" 40 | - run: 41 | name: Check formatting 42 | command: | 43 | . ~/.venv/bin/activate 44 | make lint 45 | - run: 46 | command: | 47 | . ~/.venv/bin/activate 48 | make test 49 | - store_artifacts: 50 | path: test-reports/ 51 | destination: tr1 52 | 53 | publish: 54 | working_directory: ~/circleci 55 | docker: 56 | - image: cimg/python:3.10 57 | steps: 58 | - setup_remote_docker 59 | - checkout 60 | - restore_cache: 61 | key: deps1-{{ .Branch }}-{{ checksum "pyproject.toml" }} 62 | - run: 63 | name: Install poetry 64 | command: | 65 | python --version 66 | python -m pip install -U pip setuptools poetry 67 | poetry config virtualenvs.create false 68 | - run: 69 | command: | 70 | python -m venv ~/.venv 71 | . ~/.venv/bin/activate 72 | poetry install 73 | - run: 74 | name: Bump version, build, install 75 | command: | 76 | . ~/.venv/bin/activate 77 | python deploy/vbump.py 78 | poetry config http-basic.pypi $PYPI_USERNAME $PYPI_PASSWORD 79 | poetry build 80 | poetry publish 81 | git tag `cat ~/version` 82 | git push origin `cat ~/version` 83 | 84 | 85 | workflows: 86 | version: 2 87 | build-then-publish: 88 | jobs: 89 | - build 90 | - publish: 91 | requires: 92 | - build 93 | filters: 94 | branches: 95 | only: master 96 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | wheelhouse 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | 39 | .cache 40 | 41 | scratch 42 | 43 | Dockerfile 44 | .* 45 | docs 46 | tests 47 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E501, W503 3 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: djrobstep 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | wheelhouse 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage* 28 | .tox 29 | nosetests.xml 30 | .pytest_cache 31 | 32 | # Translations 33 | *.mo 34 | 35 | # Mr Developer 36 | .mr.developer.cfg 37 | .project 38 | .pydevproject 39 | 40 | .cache 41 | 42 | docs/site 43 | 44 | scratch 45 | # poetry.lock 46 | pip-wheel-metadata 47 | .DS_Store 48 | 49 | .vscode 50 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at robertlechte@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTION_GUIDE.md: -------------------------------------------------------------------------------- 1 | # Contribution guide 2 | 3 | ## Trust the process 4 | 5 | Up until recently, `migra` had a very laissez-faire approach to contributions, but that created a few issues that made it harder for maintainers to review and left PRs languishing for too long. 6 | 7 | This new contributions guidelines are aimed to fix that, while still making contributing easy and approachable. 8 | 9 | Make sure your contribution follows these steps: 10 | 11 | - Before getting into code, raise an issue - maintainers should have useful advice on how to approach the change. Build some basic consensus on the approach before coding. 12 | 13 | - Break the fix/feature/change into several smaller changes. Most changes will also involve changing `schemainspect` at the same time, which does most of the gruntwork under the hood of migra. 14 | 15 | - Keep the individual PRs extremely small. 100 lines or less ideally. This one is key for making them quick and easy to review. 16 | 17 | ## Meet the maintainers 18 | 19 | Your humble team of maintainers currently consists of the following folks: 20 | 21 | - @djrobstep (original author) 22 | - @maximsmol, @kennyworkman, @aidanabd (new volunteers from latch.bio) 23 | 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | RUN apk add --update --no-cache --upgrade postgresql-libs && \ 4 | apk add --no-cache --virtual=build-dependencies build-base postgresql-dev && \ 5 | pip install --no-cache-dir packaging psycopg2-binary migra && \ 6 | apk del build-dependencies && \ 7 | rm -rf /tmp/* /var/tmp/* /var/cache/apk/* 8 | 9 | COPY docker-entrypoint.sh /docker-entrypoint.sh 10 | 11 | ENTRYPOINT ["/docker-entrypoint.sh"] 12 | 13 | CMD ["migra", "--help"] 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: docs 2 | 3 | # test commands and arguments 4 | tcommand = py.test -x 5 | tmessy = -svv 6 | targs = --cov-report term-missing --cov migra 7 | 8 | test: 9 | $(tcommand) $(tmessy) $(targs) tests 10 | 11 | stest: 12 | $(tcommand) $(tmessy) $(targs) tests 13 | 14 | gitclean: 15 | git clean -fXd 16 | 17 | clean: 18 | find . -name \*.pyc -delete 19 | rm -rf .cache 20 | rm -rf build 21 | 22 | fmt: 23 | isort . 24 | black . 25 | 26 | lint: 27 | flake8 . 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # migra: Like diff but for Postgres schemas 2 | 3 | - ## compare schemas 4 | - ## autogenerate migration scripts 5 | - ## autosync your development database from your application models 6 | - ## make your schema changes testable, robust, and (mostly) automatic 7 | 8 | `migra` is a schema diff tool for PostgreSQL, written in Python. Use it in your python scripts, or from the command line like this: 9 | 10 | $ migra postgresql:///a postgresql:///b 11 | alter table "public"."products" add column newcolumn text; 12 | 13 | alter table "public"."products" add constraint "x" CHECK ((price > (0)::numeric)); 14 | 15 | `migra` magically figures out all the statements required to get from A to B. 16 | 17 | Most features of PostgreSQL are supported. 18 | 19 | **Migra supports PostgreSQL >= 9 only.** Known issues exist with earlier versions. More recent versions are more comprehensively tested. Development resources are limited, and feature support rather than backwards compatibility is prioritised. 20 | 21 | ## THE DOCS 22 | 23 | Documentation is at [databaseci.com/docs/migra](https://databaseci.com/docs/migra). 24 | 25 | ## Folks, schemas are good 26 | 27 | Schema migrations are without doubt the most cumbersome and annoying part of working with SQL databases. So much so that some people think that schemas themselves are bad! 28 | 29 | But schemas are actually good. Enforcing data consistency and structure is a good thing. It’s the migration tooling that is bad, because it’s harder to use than it should be. ``migra`` is an attempt to change that, and make migrations easy, safe, and reliable instead of something to dread. 30 | 31 | ## Contributing 32 | 33 | Contributing is easy. [Jump into the issues](https://github.com/djrobstep/migra/issues), find a feature or fix you'd like to work on, and get involved. Or create a new issue and suggest something completely different. If you're unsure about any aspect of the process, just ask. 34 | 35 | ## Credits 36 | 37 | - [djrobstep](https://github.com/djrobstep): initial development, maintenance 38 | - [alvarogzp](https://github.com/alvarogzp): privileges support 39 | - [seblucas](https://github.com/seblucas): docker improvements 40 | - [MOZGIII](https://github.com/MOZGIII): docker support 41 | - [mshahbazi](https://github.com/mshahbazi): misc fixes and enhancements 42 | -------------------------------------------------------------------------------- /build-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | printf "\n> \e[93m\033[1mBuilding Docker image\e[0m\n\n" 4 | 5 | set -e 6 | 7 | ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd) 8 | cd ${ABSOLUTE_PATH} 9 | 10 | DOCKER_IMAGE="djrobstep/migra:latest" 11 | 12 | printf "# Image: \e[1;37m${DOCKER_IMAGE}\e[0m\n\n" 13 | 14 | docker build -t djrobstep/migra:latest . 15 | -------------------------------------------------------------------------------- /deploy/vbump.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from time import time 3 | 4 | from toml import TomlPreserveInlineDictEncoder as tpide 5 | from toml import dumps, loads 6 | 7 | PYPROJECT = "pyproject.toml" 8 | 9 | p = Path(PYPROJECT) 10 | pyproject = loads(p.read_text()) 11 | 12 | v = pyproject["tool"]["poetry"]["version"] 13 | 14 | parts = v.split(".")[:2] 15 | unix = str(int(time())) 16 | 17 | parts.append(unix) 18 | 19 | v_with_timestamp = ".".join(parts) 20 | pyproject["tool"]["poetry"]["version"] = v_with_timestamp 21 | p.write_text(dumps(pyproject, tpide())) 22 | 23 | Path("~/version").expanduser().write_text(v_with_timestamp) 24 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -e 4 | 5 | if [ "$1" = 'migra' ]; then 6 | if [ "${MIGRA_LOG_COMMAND}" = 'true' ]; then 7 | echo "$*" 8 | fi 9 | fi 10 | 11 | exec "$@" 12 | -------------------------------------------------------------------------------- /docs/comparison.md: -------------------------------------------------------------------------------- 1 |
2 | | Feature | migra | alembic | django | 3 | | - | - | - | - | 4 | | Migration files required for DB changes during dev? | No, autosyncs | Yes, required | Yes, required | 5 | | Requires schema version number tracking | No version numbers involved | Yes, relies on version numbers | Yes, relies on version numbers | 6 | | Must store entire chain of migration files? | No, only need one file with pending changes | Yes, required | Yes, required | 7 | | ORMs supported | Supports any ORM or no ORM | SQLAlchemy ORM only | Django ORM only | 8 | | Testability | Explicitly tests for matching schema | Doesn't test | Doesn't test | 9 | | Needs copy of access to (copy of) current schema to generate migration scripts | Yes | No | No | 10 | | Databases supported | PostgreSQL only | Postgres, mysql, various others | Postgres, mysql, various others | 11 |
-------------------------------------------------------------------------------- /docs/deploy_usage.md: -------------------------------------------------------------------------------- 1 | # Deploy Usage 2 | 3 | With migra you can dispense with schema version numbers and multiple migration files. 4 | 5 | ## Different deployment styles 6 | 7 | Migra doesn't force a precise workflow onto you, because every project is different. Instead, migra was designed to allow you to easily script up the workflow you want, help you automate it as fast as possible, and help you test your database changes to ensure correctness. 8 | 9 | Nevertheless, here's some guidelines about how set things up just right. 10 | 11 | ### App-driven vs database-driven 12 | 13 | Broadly speaking there are two ways to manage changes to an application database, which we might call app-centric and database-centric. You can use `migra` to enhance either style, however you'll use it quite differently depending on your preferred situation. 14 | 15 | With the app-centric approach, you add migration files to the same repository as your application code. When you deploy, any new migration files that haven't yet been applied to the production database get run before the app code is deployed. 16 | 17 | The database-centric approach is more common in an environment where you have separate people responsible for the database. Sysadmins or DBAs manage database changes as a separate task. App and DB deployments are more loosely coordinated and not run at the same time as part of the same deployment. 18 | 19 | The key feature of both is to directly use the production schema generate the changes needed. 20 | 21 | ### Migra with app-driven deployments 22 | 23 | Instead of manually crafting migration files and mucking about with version numbers, you can do the following. 24 | 25 | - dump the schema of your production database 26 | - use migra to generate the changes required to move your production database to the new intended state. edit this script as necessary and add it to source control 27 | - write tests to ensure that your app works after the migration has been applied and that it results in the exact schema you want 28 | - add a step to your deployment that does a schema comparison to see if any of the scripts you've added to source control need applying. unlike traditional migration tools, no version numbers are needed here because the script checks the structure of the database directly. 29 | - then applies the scripts, tests the schema again to check the script has resulted in the correct structure, then deploys the rest of the app as usual. 30 | 31 | ### Migra with database-centric deployments 32 | 33 | The flow with a database-centric application might look like this: 34 | 35 | - dump the production database schema and generate the changes required. 36 | - write tests to ensure correct functioning of the application (both before and after the database changes) 37 | - deploy the application 38 | - subsequently, apply the generated migration script on the production database. 39 | -------------------------------------------------------------------------------- /docs/dev_usage.md: -------------------------------------------------------------------------------- 1 | # Development Usage 2 | 3 | Migra is handy for speeding up database-related development and testing tasks. 4 | 5 | ### Auto-syncing dev database to application code 6 | 7 | When developing applications that work with a database, you'll inevitably have some kind of "target" schema in mind that you want the database to have. 8 | 9 | This is often defined with database model classes (such as used by Django/Rails), or perhaps with a setup script written in raw SQL. 10 | 11 | When developing your application, you'll generally have a local version of this database running for use while developing, tweaking and debugging your application. 12 | 13 | The challenge is to keep this database in sync with your "target" schema as you tinker and continually modify the target to suit your app. 14 | 15 | This sample script shows how you might use migra to sync up your dev database almost automatically: 16 | 17 | :::python 18 | def sync(): 19 | from sqlbag import S, temporary_database as temporary_db 20 | 21 | DB_URL = 'postgresql:///name_of_local_dev_database' 22 | 23 | with temporary_db() as TEMP_DB_URL: 24 | load_from_app(TEMP_DB_URL) 25 | 26 | with S(DB_URL) as s_current, S(TEMP_DB_URL) as s_target: 27 | m = Migration(s_current, s_target) 28 | m.set_safety(False) 29 | m.add_all_changes() 30 | 31 | if m.statements: 32 | print('THE FOLLOWING CHANGES ARE PENDING:', end='\n\n') 33 | print(m.sql) 34 | print() 35 | if input('Apply these changes?') == 'yes': 36 | print('Applying...') 37 | m.apply() 38 | else: 39 | print('Not applying.') 40 | else: 41 | print('Already synced.') 42 | 43 | 44 | ### Creating customized tasks and scripts 45 | 46 | Every project is slightly different, so Migra doesn't try and solve every migration workflow problem you could possibly have. Instead, migra tries to make it as easy as possible to write scripts to solve your own migration problems. 47 | 48 | Most of migra's functionality is available through the Migration object. Pass in two database sessions and migra will compare the two against each other. This basic unit of functionality should be fairly easily adaptable to your particular requirements. 49 | 50 | ### Setting up tests 51 | 52 | You can use migra to test the correctness of three things: 53 | 54 | - The correctness of your code pre-migration. 55 | - The correctness of your code post-migration. 56 | - The correctness of your migration scripts. 57 | 58 | ### Testing *before* and *after* database versions 59 | 60 | With the right testing framework and fixture setup code, you can configure your tests to run twice, against both the pre and post migration versions of your database. 61 | 62 | Here's how to do that with python's pytest. Suppose you are using migra to generate a file called `pending.sql` that contains any pending migrations. You could then add something like this to your `conftest.py` file: 63 | 64 | import os.path 65 | from sqlbag import S, temporary_database as temporary_db 66 | 67 | def load_pre_migration(dburl): 68 | with S(dburl) as s: 69 | load_sql_from_file(s, 'MIGRATIONS/production.dump.sql') 70 | 71 | 72 | def load_post_migration(dburl): 73 | with S(dburl) as s: 74 | load_sql_from_file(s, 'MIGRATIONS/production.dump.sql') 75 | 76 | with S(dburl) as s: 77 | load_sql_from_file(s, 'MIGRATIONS/pending.sql') 78 | 79 | pending = os.path.exists('MIGRATIONS/pending.sql') 80 | 81 | if pending: 82 | DATABASE_SETUPS_TO_TEST = [ 83 | load_pre_migration, 84 | load_post_migration 85 | ] 86 | else: 87 | DATABASE_SETUPS_TO_TEST = [ 88 | load_post_migration 89 | ] 90 | 91 | @pytest.fixture(params=DATABASE_SETUPS_TO_TEST) 92 | def db(request): 93 | with temporary_db() as test_db_url: 94 | setup_method = request.param 95 | setup_method(test_db_url) 96 | yield test_db_url 97 | 98 | ### Testing against real data 99 | 100 | You can make your testing more comprehensive by testing against not just the empty schema structure of your production database, but a copy populated with real (preferably anonymised) application data. 101 | 102 | If your production database is large, you really should be testing your migration against a copy of similar size, in order to detect any performance problems that could result from slow-running migration scripts. 103 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # `migra` is a schema comparison tool for PostgreSQL. 2 | 3 | It's a command line tool, and Python library. Find differences in database schemas as easily as running a diff on two text files. 4 | 5 | `migra` makes schema changes almost automatic. Management of database migration deployments becomes much easier, faster, and more reliable. 6 | 7 | Using `migra` is as simple as this: 8 | 9 | $ migra postgresql:///a postgresql:///b 10 | alter table "public"."book" add column "author" character varying not null; 11 | 12 | alter table "public"."book" alter column "name" set not null; 13 | 14 | To get started, hit the [Quickstart guide](/docs/migra/quickstart). 15 | 16 | Migra is 100% open source software. The code is available on [github](https://github.com/djrobstep/migra). 17 | 18 | ## Features and Limitations 19 | 20 | The following features of postgres are supported: 21 | 22 |
23 | 24 | Feature | Supported | Notes/limitations 25 | --- | --- | --- 26 | tables | ✔ | 27 | partitioned tables | ✔ | 28 | constraints | ✔ | 29 | views | ✔ | 30 | functions | ✔ | All languages except C/INTERNAL 31 | indexes | ✔ | 32 | sequences | ✔ | Does not track sequence numbers 33 | schemas | ✔ | 34 | extensions | ✔ | 35 | enums | ✔ | 36 | privileges | ✔ | Not exhaustive. Requires --with-privileges flag 37 | row-level security | ✔ | NEW! Doesn't include role management 38 | triggers | ✔ | 39 | identity columns | ✔ | 40 | generated columns | ✔ | 41 | custom types/domains | ✔ |Basic support (drop-and-create only, no alter) | 42 | 43 |
44 | 45 | `migra` plays nicely with extensions. Schema contents belonging to extensions will be ignored and left to the extension to manage. 46 | 47 | `migra` plays nicely with view/function dependencies, and will drop/create them in the correct order. 48 | 49 | ## Endorsements 50 | 51 | `migra` was [used to manage the schema that powers PyPI](https://twitter.com/dstufft/status/988410901459034113): 52 | 53 | > *Migra is cool as hell though, when I was trying to reconcile PyPI with the alembic initial schema I had to download some weird java app I found on some sketchy website to do that :P* 54 | 55 | >- [Donald Stufft](https://twitter.com/dstufft), PyPI maintainer 56 | 57 | > *I can definitely see Migra is more productive when switching around between schemas in development.* 58 | 59 | >- [Mike Bayer](https://twitter.com/zzzeek), SQLAlchemy author 60 | 61 | ## Development 62 | 63 | Migra is developed on [github](https://github.com/djrobstep/migra). Contributions are welcome, get involved! 64 | 65 | ## Philosophy 66 | 67 | There was [a good comment on Hacker News](https://news.ycombinator.com/item?id=16679665) discussing how `migra` differs from traditional migration tools. 68 | 69 | > *This is awesome!* 70 | 71 | > *A lot of people point to migrations as the best way to track changes to a database schema. But there are a lot of problems with them. For example, they involve adding version control on top of another version control system, which can cause a ton of problems. They also don't maintain themselves well. If you leave it alone, running migrations will just take longer and longer over time, even though you don't really get more utility.* 72 | 73 | > *I think we need more support from databases themselves to solve this problem.* 74 | 75 | > *In the meantime, this is a really good stopgap, because it can theoretically allow you to just have a file with your "ideal schema" for each commit. No need to maintain a separate database migration history too. You can even generate a series of migrations by looking at your git history!"* 76 | 77 | > - `blaisio` on Hacker News 78 | -------------------------------------------------------------------------------- /docs/installing_and_connecting.md: -------------------------------------------------------------------------------- 1 | # Installing and connecting 2 | 3 | `migra` is written in Python and thus requires a recent version of Python to be installed. Make sure you use at least Python 3.6: `migra` will very soon stop supporting earlier versions (Python 2 will soon reach end-of-life anyway). 4 | 5 | ## Installing migra 6 | 7 | Install migra and necessary dependencies with the usual `pip install`: 8 | 9 | :::bash 10 | pip install migra 11 | 12 | This doesn't include the PostgreSQL driver, but you can install that separately, or at the same with: 13 | 14 | :::bash 15 | pip install migra[pg] 16 | 17 | If you're on `zsh`, you'll need quote marks, as in: `'migra[pg]'`. 18 | 19 | 20 | ## Connecting 21 | 22 | `migra` uses database URLs to specify database connections. If you're unfamiliar with these, they work very much like a URL, incorporating all the details necessary for making a database connection into one concise string. 23 | 24 | The general form is as follows: 25 | 26 | :::bash 27 | connectiontype://username:password@databasehostname/databasename?extraparam1=a&extraparam2=b 28 | 29 | These can be left out if not necessary. For instance, if connecting locally, using your default system username and passwordless "trust" login, only a connection type and database name is required: 30 | 31 | :::bash 32 | postgresql:///exampledatabase 33 | 34 | Note that all the slashes are still required, so there are three slashes in a row. 35 | 36 | Alternate drivers can be specified with a `+` modifier to the connection type: 37 | 38 | :::bash 39 | postgresql+pg9000:///example 40 | 41 | For further details, consult the `sqlalchemy` docs: This is what migra uses under the hood to do connections (via a wrapper module called `sqlbag`). 42 | 43 | ## Background: Installing python and pip 44 | 45 | ### Python 46 | 47 | Your operating system may include Python, but possibly not the latest version. `migra` will always work best with the latest stable version of Python, so ideally install the latest stable release (as specified on [python.org](https://python.org/)). 48 | 49 | On Windows/MacOS, you can use the installer from [python.org](https://python.org/). On linux, as always, this situation is more complicated: Your default OS repos often have version 2 installed by default, and if they have a separate version of Python 3 it won't be the latest one. You can add various external repos or build your own version from source. I wrote a tool called [autovenv](/docs/autovenv) that manages multiple python versions (and virtual environments). Feel free to try it (I use it on both MacOS and linux), but it may be buggy (it isn't yet production software). 50 | 51 | ### pip 52 | 53 | Once you have python installed you need to install `pip`. This too can complicated - recent python versions automatically install pip too, but often OS-managed versions do not. Check that when you run `pip -V` that it runs the desired Python version you have installed. 54 | 55 | ## Aside: Virtual environments 56 | 57 | `python` has the notion of `virtual environments` (often called `virtualenv`s or `venv`s) which create an isolated place to install external python packages separate from what might have already be installed by the operating system. It's generally good practice to use these when working with Python - but if you're not otherwise using Python and just need `migra` from the command line, these may not be worth bothering with. 58 | 59 | -------------------------------------------------------------------------------- /docs/options.md: -------------------------------------------------------------------------------- 1 | # Options 2 | 3 | The command line version of migra includes a number of options for getting the diff output you desire. If using the API, similarly named options are available on the Migration object. 4 | 5 | ## `--unsafe` 6 | 7 | Migra will throw an exception if `drop...` statements are generated, as a precaution. Adding this flag will disable the safety feature and happily generate the drop statements. Remember, always review generated statements before running them! 8 | 9 | ## `--schema [SCHEMA_NAME]` 10 | 11 | Specify a single schema to diff. 12 | 13 | ## `--exclude_schema [SCHEMA_NAME]` 14 | 15 | Specify a single schema to exclude, including all other schemas in the diff. 16 | 17 | ## `--create-extensions-only` 18 | 19 | Only output create extension statements, nothing else. This is useful when you have extensions as part of a setup script for a single schema: Those extensions need to be installed, but extensions are usually not installed in a custom schema. 20 | 21 | You'd generate a setup script in two steps: 22 | 23 | - Generate the necessary create extension if not exists... statements. 24 | - Generate the schema changes with --schema-only. 25 | 26 | Then combine the output of 1 and 2 into a single database sync script. 27 | 28 | ## `--with-privileges` 29 | 30 | This tells migra to spit out permission-related change statements (grant/revoke). This is False by default: Often one is comparing databases from different environments, where the users and permissions are completely different and not something one would want to sync. 31 | 32 | ## `--force-utf8` 33 | 34 | Some folks have reported unicode character output issues on windows command lines. This flag often fixes it! -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | # Quickstart 2 | 3 | ## Installation 4 | 5 | `migra` is written in Python so you need to install it with `pip`, the Python Package Manager (don't worry, you don't need to know or use any Python to use the `migra` command). 6 | 7 | 1. Make sure you have [pip](https://pip.pypa.io/en/stable/installing/) properly installed. 8 | 9 | 2. Run: 10 | 11 | pip install migra[pg] 12 | 13 | This will install the latest version of migra from PyPI (the global Python Package Index), along with psycopg2, the python PostgreSQL driver. 14 | 15 | Alternatively, if you don't want to install Python, you can run it from a self-contained Docker image by first running: 16 | 17 | :::shell 18 | docker pull djrobstep/migra 19 | 20 | then creating a short alias to it with: 21 | 22 | :::shell 23 | alias migra="docker run djrobstep/migra migra" 24 | 25 | 3. Confirm migra is installed by running `migra --help`. The output should begin like this: 26 | 27 | usage: migra [-h] [--unsafe] dburl_from dburl_target 28 | 29 | ## Comparing two database schemas 30 | 31 | To compare two database schemas: 32 | 33 | migra [url_of_database_A] [url_of_database_B] 34 | 35 | For example, we have two databases, named "alpha" and "beta". We can compare them using this command: 36 | 37 | :::shell 38 | migra postgresql:///alpha postgresql:///beta 39 | 40 | Migra will then generate whatever SQL is required to change the schema of database `alpha` to match database `beta`. 41 | 42 | If the two database schemas match exactly, you'll get empty output, because no changes are required. This functions like the well-known [diff command](https://en.wikipedia.org/wiki/Diff_utility), which also returns empty output when comparing two identical files. 43 | 44 | ### Warning 45 | 46 | Don't blindly copy-and-paste the output of the `migra` command. 47 | 48 | `migra` features a safeguard against generation of dangerous statements. If the command generates a drop statement, `migra` will exit with an error. If you're sure you want the drop statement(s), you can turn off this safeguard behaviour with the `--unsafe` flag: 49 | 50 | :::shell 51 | migra --unsafe postgresql:///alpha postgresql:///beta 52 | 53 | ## Making changes to database schemas 54 | 55 | ### Suggestion 56 | 57 | If you're making changes to a serious production database, use a copy of it for these steps instead so you're not changing your production environment until you intend to. 58 | 59 | You can make a schema-only dump of your PostgreSQL database with the following command: 60 | 61 | :::shell 62 | pg_dump --no-owner --no-privileges --schema-only name_of_database -f schema.dump.sql 63 | 64 | ### Steps 65 | 66 | #### Connect 67 | Get the connection string of the database you want to make changes to. `migra` needs to connect to this database so it can analyse the database's schema. 68 | 69 | 2. Prepare a second PostgreSQL database. This database needs to have the new/desired/target schema. You might create a temporary database and set it up for this purpose. 70 | 71 | 3. Generate a migration script using the following command (substituting your own connection strings): 72 | 73 | :::shell 74 | migra --unsafe postgresql:///existing postgresql:///database_with_new_schema > migration_script.sql 75 | 76 | 4. Carefully review the migration script in `migration_script.sql` 77 | 78 | Consider in particular: 79 | 80 | - The generated script may result in data loss from your database when you apply this script. Consider if you intend for this to happen or if you need to add statements to copy data out of the relevant tables/columns before you drop them forever. 81 | 82 | - Some migration operations can take a long time and cause interruptions and downtime, particularly when involving tables containing large amounts of data.. 83 | 84 | 5. Apply `migration_script.sql` to your production database with a command similar to the following (again substituting your own connection string). 85 | 86 | :::shell 87 | psql postgresql://production -1 -f migration_script.sql 88 | -------------------------------------------------------------------------------- /docs/with_django.md: -------------------------------------------------------------------------------- 1 | # Using `migra` with `django` 2 | 3 | Incorporating non-default parts with django can be a little awkward. However, with some config tweaks you can use migra alongside django's default migration setup. 4 | 5 | Below is a sample script that implements a hybrid sync command: It'll run any defined django migrations as normal, but allows you to specify a list of installed django "`apps`" that you can manage with `migra` instead. 6 | 7 | ## Deploying 8 | 9 | The general advice elsewhere in the docs regarding the creation of migration scripts for your production environment still applies as normal. 10 | 11 | A script to generate production scripts would like quite similar the provided local-sync script below, except that the comparison would be `production -> target` instead of `current local -> target`. 12 | 13 | ## Testing 14 | 15 | Initialize your database for tests as follows: 16 | 17 | - `migrate` any django migrations in use 18 | - sync your prepared script of changes 19 | 20 | ## A sample local syncing script 21 | `migra`'s built-in apps come with some migrations, so you don't want to disable the built-in migrations entirely. 22 | 23 | :::python 24 | import manage 25 | import os 26 | import sys 27 | from mysite import settings 28 | from django.core import management 29 | import django 30 | from sqlbag import temporary_database, S 31 | from migra import Migration 32 | from contextlib import contextmanager 33 | 34 | # Point to your settings. 35 | os.environ["DJANGO_SETTINGS_MODULE"] = "mysite.settings" 36 | 37 | # The "polls" app (as per the django official tutorial) 38 | # is set here to be managed with migra instead 39 | MANAGE_WITH_MIGRA = ["polls"] 40 | 41 | # To disable migrations on a django app, you have to 42 | # set the MIGRATION_MODULES config with None for each 43 | # disabled "app" 44 | class DisableMigrations(object): 45 | def __contains__(self, item): 46 | return item in MANAGE_WITH_MIGRA 47 | 48 | def __getitem__(self, item): 49 | return None 50 | 51 | # Compare two schemas, prompt to run a sync if necessary 52 | def _sync_with_prompt(db_url_current, db_url_target): 53 | with S(db_url_current) as s0, S(db_url_target) as s1: 54 | m = Migration(s0, s1) 55 | m.set_safety(False) 56 | m.add_all_changes() 57 | 58 | if m.statements: 59 | print("THE FOLLOWING CHANGES ARE PENDING:", end="\n\n") 60 | print(m.sql) 61 | print() 62 | if input('Type "yes" to apply these changes: ') == "yes": 63 | print("Applying...") 64 | m.apply() 65 | else: 66 | print("Not applying.") 67 | else: 68 | print("Already synced.") 69 | 70 | 71 | # Create a temporary database for loading our 72 | # "target" schema into 73 | @contextmanager 74 | def tmptarget(): 75 | with temporary_database() as tdb: 76 | settings.DATABASES["tmp_target"] = { 77 | "ENGINE": "django.db.backends.postgresql", 78 | "NAME": tdb.split("/")[-1], 79 | } 80 | django.setup() 81 | yield tdb 82 | 83 | 84 | def syncdb(): 85 | # Disable django migrations if we're using migra instead 86 | settings.MIGRATION_MODULES = DisableMigrations() 87 | 88 | 89 | with tmptarget() as tdb: 90 | management.call_command("migrate") 91 | 92 | management.call_command( 93 | "migrate", 94 | "--run-syncdb", 95 | "--database=tmp_target", 96 | verbosity=0, 97 | interactive=False, 98 | ) 99 | real_db_name = settings.DATABASES["default"]["NAME"] 100 | _sync_with_prompt(f"postgresql:///{real_db_name}", tdb) 101 | 102 | 103 | if __name__ == "__main__": 104 | syncdb() 105 | 106 | 107 | # Can it be done better? 108 | 109 | How well does `migra` work for you with django? Let us know via email, github issues, twitter, whatever. -------------------------------------------------------------------------------- /migra/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .changes import Changes 4 | from .command import do_command 5 | from .migra import Migration 6 | from .statements import Statements, UnsafeMigrationException 7 | 8 | __all__ = [ 9 | "Migration", 10 | "Changes", 11 | "Statements", 12 | "UnsafeMigrationException", 13 | "do_command", 14 | ] 15 | -------------------------------------------------------------------------------- /migra/changes.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from collections import OrderedDict as od 4 | from functools import partial 5 | 6 | import schemainspect 7 | 8 | from .statements import Statements 9 | from .util import differences 10 | 11 | THINGS = [ 12 | "schemas", 13 | "enums", 14 | "sequences", 15 | "constraints", 16 | "functions", 17 | "views", 18 | "indexes", 19 | "extensions", 20 | "privileges", 21 | "collations", 22 | "rlspolicies", 23 | "triggers", 24 | ] 25 | PK = "PRIMARY KEY" 26 | 27 | 28 | def statements_for_changes( 29 | things_from, 30 | things_target, 31 | creations_only=False, 32 | drops_only=False, 33 | modifications_only=False, 34 | modifications=True, 35 | dependency_ordering=False, 36 | add_dependents_for_modified=False, 37 | modifications_as_alters=False, 38 | ): 39 | added, removed, modified, unmodified = differences(things_from, things_target) 40 | 41 | return statements_from_differences( 42 | added=added, 43 | removed=removed, 44 | modified=modified, 45 | replaceable=None, 46 | creations_only=creations_only, 47 | drops_only=drops_only, 48 | modifications_only=modifications_only, 49 | modifications=modifications, 50 | dependency_ordering=dependency_ordering, 51 | old=things_from, 52 | modifications_as_alters=modifications_as_alters, 53 | ) 54 | 55 | 56 | def statements_from_differences( 57 | added, 58 | removed, 59 | modified, 60 | replaceable=None, 61 | creations_only=False, 62 | drops_only=False, 63 | modifications=True, 64 | dependency_ordering=False, 65 | old=None, 66 | modifications_only=False, 67 | modifications_as_alters=False, 68 | ): 69 | replaceable = replaceable or set() 70 | statements = Statements() 71 | 72 | pending_creations = set() 73 | pending_drops = set() 74 | 75 | creations = not (drops_only or modifications_only) 76 | drops = not (creations_only or modifications_only) 77 | modifications = ( 78 | modifications or modifications_only and not (creations_only or drops_only) 79 | ) 80 | 81 | drop_and_recreate = modifications and not modifications_as_alters 82 | alters = modifications and modifications_as_alters 83 | 84 | if drops: 85 | pending_drops |= set(removed) 86 | 87 | if creations: 88 | pending_creations |= set(added) 89 | 90 | if drop_and_recreate: 91 | if drops: 92 | pending_drops |= set(modified) - replaceable 93 | 94 | if creations: 95 | pending_creations |= set(modified) 96 | 97 | if alters: 98 | for k, v in modified.items(): 99 | statements += v.alter_statements(old[k]) 100 | 101 | def has_remaining_dependents(v, pending_drops): 102 | if not dependency_ordering: 103 | return False 104 | 105 | return bool(set(v.dependents) & pending_drops) 106 | 107 | def has_uncreated_dependencies(v, pending_creations): 108 | if not dependency_ordering: 109 | return False 110 | 111 | return bool(set(v.dependent_on) & pending_creations) 112 | 113 | while True: 114 | before = pending_drops | pending_creations 115 | if drops: 116 | for k, v in removed.items(): 117 | if not has_remaining_dependents(v, pending_drops): 118 | if k in pending_drops: 119 | statements.append(old[k].drop_statement) 120 | pending_drops.remove(k) 121 | if creations: 122 | for k, v in added.items(): 123 | if not has_uncreated_dependencies(v, pending_creations): 124 | if k in pending_creations: 125 | if hasattr(v, "safer_create_statements"): 126 | statements += v.safer_create_statements 127 | else: 128 | statements.append(v.create_statement) 129 | pending_creations.remove(k) 130 | if modifications: 131 | for k, v in modified.items(): 132 | if drops: 133 | if not has_remaining_dependents(v, pending_drops): 134 | if k in pending_drops: 135 | statements.append(old[k].drop_statement) 136 | pending_drops.remove(k) 137 | if creations: 138 | if not has_uncreated_dependencies(v, pending_creations): 139 | if k in pending_creations: 140 | if hasattr(v, "safer_create_statements"): 141 | statements += v.safer_create_statements 142 | else: 143 | statements.append(v.create_statement) 144 | pending_creations.remove(k) 145 | after = pending_drops | pending_creations 146 | if not after: 147 | break 148 | 149 | elif ( 150 | after == before 151 | ): # this should never happen because there shouldn't be circular dependencies 152 | raise ValueError("cannot resolve dependencies") # pragma: no cover 153 | 154 | return statements 155 | 156 | 157 | def get_enum_modifications( 158 | tables_from, tables_target, enums_from, enums_target, return_tuple=False 159 | ): 160 | _, _, e_modified, _ = differences(enums_from, enums_target) 161 | _, _, t_modified, _ = differences(tables_from, tables_target) 162 | pre = Statements() 163 | recreate = Statements() 164 | post = Statements() 165 | enums_to_change = e_modified 166 | 167 | for t, v in t_modified.items(): 168 | t_before = tables_from[t] 169 | _, _, c_modified, _ = differences(t_before.columns, v.columns) 170 | for k, c in c_modified.items(): 171 | before = t_before.columns[k] 172 | 173 | if ( 174 | (c.is_enum and before.is_enum) 175 | and c.dbtypestr == before.dbtypestr 176 | and c.enum != before.enum 177 | ): 178 | has_default = c.default and not c.is_generated 179 | 180 | if has_default: 181 | pre.append(before.drop_default_statement(t)) 182 | 183 | recast = c.change_enum_statement(v.quoted_full_name) 184 | 185 | recreate.append(recast) 186 | 187 | if has_default: 188 | post.append(before.add_default_statement(t)) 189 | 190 | unwanted_suffix = "__old_version_to_be_dropped" 191 | 192 | for e in enums_to_change.values(): 193 | unwanted_name = e.name + unwanted_suffix 194 | 195 | rename = e.alter_rename_statement(unwanted_name) 196 | pre.append(rename) 197 | 198 | pre.append(e.create_statement) 199 | 200 | drop_statement = e.drop_statement_with_rename(unwanted_name) 201 | 202 | post.append(drop_statement) 203 | 204 | if return_tuple: 205 | return pre, recreate + post 206 | else: 207 | return pre + recreate + post 208 | 209 | 210 | def get_table_changes( 211 | tables_from, 212 | tables_target, 213 | enums_from, 214 | enums_target, 215 | sequences_from, 216 | sequences_target, 217 | ): 218 | added, removed, modified, _ = differences(tables_from, tables_target) 219 | 220 | statements = Statements() 221 | for t, v in removed.items(): 222 | statements.append(v.drop_statement) 223 | 224 | enums_pre, enums_post = get_enum_modifications( 225 | tables_from, tables_target, enums_from, enums_target, return_tuple=True 226 | ) 227 | 228 | statements += enums_pre 229 | 230 | for t, v in added.items(): 231 | statements.append(v.create_statement) 232 | if v.rowsecurity: 233 | rls_alter = v.alter_rls_statement 234 | statements.append(rls_alter) 235 | 236 | statements += enums_post 237 | 238 | for t, v in modified.items(): 239 | before = tables_from[t] 240 | 241 | # drop/recreate tables which have changed from partitioned to non-partitioned 242 | if v.is_partitioned != before.is_partitioned: 243 | statements.append(v.drop_statement) 244 | statements.append(v.create_statement) 245 | continue 246 | 247 | if v.is_unlogged != before.is_unlogged: 248 | statements += [v.alter_unlogged_statement] 249 | 250 | # attach/detach tables with changed parent tables 251 | if v.parent_table != before.parent_table: 252 | statements += v.attach_detach_statements(before) 253 | 254 | modified_order = list(modified.keys()) 255 | 256 | modified_order.sort(key=lambda x: modified[x].is_inheritance_child_table) 257 | 258 | for t in modified_order: 259 | v = modified[t] 260 | 261 | before = tables_from[t] 262 | 263 | if not v.is_alterable: 264 | continue 265 | 266 | c_added, c_removed, c_modified, _ = differences(before.columns, v.columns) 267 | 268 | for k in list(c_modified): 269 | c = v.columns[k] 270 | c_before = before.columns[k] 271 | 272 | # there's no way to alter a table into/out of generated state 273 | # so you gotta drop/recreate 274 | 275 | generated_status_changed = c.is_generated != c_before.is_generated 276 | 277 | inheritance_status_changed = c.is_inherited != c_before.is_inherited 278 | 279 | generated_status_removed = not c.is_generated and c_before.is_generated 280 | 281 | can_drop_generated = ( 282 | generated_status_removed and c_before.can_drop_generated 283 | ) 284 | 285 | drop_and_recreate_required = inheritance_status_changed or ( 286 | generated_status_changed and not can_drop_generated 287 | ) 288 | 289 | if drop_and_recreate_required: 290 | del c_modified[k] 291 | 292 | if not c_before.is_inherited: 293 | c_removed[k] = c_before 294 | 295 | if not c.is_inherited: 296 | c_added[k] = c 297 | 298 | if generated_status_changed: 299 | pass 300 | 301 | for k, c in c_removed.items(): 302 | alter = v.alter_table_statement(c.drop_column_clause) 303 | statements.append(alter) 304 | for k, c in c_added.items(): 305 | alter = v.alter_table_statement(c.add_column_clause) 306 | statements.append(alter) 307 | for k, c in c_modified.items(): 308 | c_before = before.columns[k] 309 | statements += c.alter_table_statements(c_before, t) 310 | 311 | if v.rowsecurity != before.rowsecurity: 312 | rls_alter = v.alter_rls_statement 313 | statements.append(rls_alter) 314 | 315 | seq_created, seq_dropped, seq_modified, _ = differences( 316 | sequences_from, sequences_target 317 | ) 318 | 319 | for k in seq_created: 320 | seq_b = sequences_target[k] 321 | 322 | if seq_b.quoted_table_and_column_name: 323 | statements.append(seq_b.alter_ownership_statement) 324 | 325 | for k in seq_modified: 326 | seq_a = sequences_from[k] 327 | seq_b = sequences_target[k] 328 | 329 | if seq_a.quoted_table_and_column_name != seq_b.quoted_table_and_column_name: 330 | statements.append(seq_b.alter_ownership_statement) 331 | 332 | return statements 333 | 334 | 335 | def get_selectable_differences( 336 | selectables_from, 337 | selectables_target, 338 | enums_from, 339 | enums_target, 340 | add_dependents_for_modified=True, 341 | ): 342 | tables_from = od((k, v) for k, v in selectables_from.items() if v.is_table) 343 | tables_target = od((k, v) for k, v in selectables_target.items() if v.is_table) 344 | 345 | other_from = od((k, v) for k, v in selectables_from.items() if not v.is_table) 346 | other_target = od((k, v) for k, v in selectables_target.items() if not v.is_table) 347 | 348 | added_tables, removed_tables, modified_tables, unmodified_tables = differences( 349 | tables_from, tables_target 350 | ) 351 | added_other, removed_other, modified_other, unmodified_other = differences( 352 | other_from, other_target 353 | ) 354 | 355 | _, _, modified_enums, _ = differences(enums_from, enums_target) 356 | 357 | changed_all = {} 358 | changed_all.update(modified_tables) 359 | changed_all.update(modified_other) 360 | modified_all = dict(changed_all) 361 | changed_all.update(removed_tables) 362 | changed_all.update(removed_other) 363 | 364 | replaceable = set() 365 | not_replaceable = set() 366 | 367 | if add_dependents_for_modified: 368 | 369 | for k, m in changed_all.items(): 370 | old = selectables_from[k] 371 | 372 | if k in modified_all and m.can_replace(old): 373 | if not m.is_table: 374 | changed_enums = [_ for _ in m.dependent_on if _ in modified_enums] 375 | if not changed_enums: 376 | replaceable.add(k) 377 | 378 | continue 379 | 380 | for d in m.dependents_all: 381 | if d in unmodified_other: 382 | dd = unmodified_other.pop(d) 383 | modified_other[d] = dd 384 | not_replaceable.add(d) 385 | modified_other = od(sorted(modified_other.items())) 386 | 387 | replaceable -= not_replaceable 388 | 389 | return ( 390 | tables_from, 391 | tables_target, 392 | added_tables, 393 | removed_tables, 394 | modified_tables, 395 | added_other, 396 | removed_other, 397 | modified_other, 398 | replaceable, 399 | ) 400 | 401 | 402 | def get_trigger_changes( 403 | triggers_from, 404 | triggers_target, 405 | selectables_from, 406 | selectables_target, 407 | enums_from, 408 | enums_target, 409 | add_dependents_for_modified=True, 410 | **kwargs 411 | ): 412 | ( 413 | _, 414 | _, 415 | _, 416 | _, 417 | modified_tables, 418 | _, 419 | _, 420 | modified_other, 421 | replaceable, 422 | ) = get_selectable_differences( 423 | selectables_from, 424 | selectables_target, 425 | enums_from, 426 | enums_target, 427 | add_dependents_for_modified, 428 | ) 429 | 430 | added, removed, modified, unmodified = differences(triggers_from, triggers_target) 431 | 432 | modified_tables_and_other = set(modified_other) 433 | deps_modified = [ 434 | k 435 | for k, v in unmodified.items() 436 | if v.quoted_full_selectable_name in modified_tables_and_other 437 | and v.quoted_full_selectable_name not in replaceable 438 | ] 439 | 440 | for k in deps_modified: 441 | modified[k] = unmodified.pop(k) 442 | 443 | return statements_from_differences( 444 | added, removed, modified, old=triggers_from, **kwargs 445 | ) 446 | 447 | 448 | def get_selectable_changes( 449 | selectables_from, 450 | selectables_target, 451 | enums_from, 452 | enums_target, 453 | sequences_from, 454 | sequences_target, 455 | add_dependents_for_modified=True, 456 | tables_only=False, 457 | non_tables_only=False, 458 | drops_only=False, 459 | creations_only=False, 460 | ): 461 | ( 462 | tables_from, 463 | tables_target, 464 | _, 465 | _, 466 | _, 467 | added_other, 468 | removed_other, 469 | modified_other, 470 | replaceable, 471 | ) = get_selectable_differences( 472 | selectables_from, 473 | selectables_target, 474 | enums_from, 475 | enums_target, 476 | add_dependents_for_modified, 477 | ) 478 | statements = Statements() 479 | 480 | def functions(d): 481 | return {k: v for k, v in d.items() if v.relationtype == "f"} 482 | 483 | if not tables_only: 484 | if not creations_only: 485 | statements += statements_from_differences( 486 | added_other, 487 | removed_other, 488 | modified_other, 489 | replaceable=replaceable, 490 | drops_only=True, 491 | dependency_ordering=True, 492 | old=selectables_from, 493 | ) 494 | 495 | if not non_tables_only: 496 | statements += get_table_changes( 497 | tables_from, 498 | tables_target, 499 | enums_from, 500 | enums_target, 501 | sequences_from, 502 | sequences_target, 503 | ) 504 | 505 | if not tables_only: 506 | if not drops_only: 507 | if any([functions(added_other), functions(modified_other)]): 508 | statements += ["set check_function_bodies = off;"] 509 | 510 | statements += statements_from_differences( 511 | added_other, 512 | removed_other, 513 | modified_other, 514 | replaceable=replaceable, 515 | creations_only=True, 516 | dependency_ordering=True, 517 | old=selectables_from, 518 | ) 519 | return statements 520 | 521 | 522 | class Changes(object): 523 | def __init__(self, i_from, i_target, ignore_extension_versions=False): 524 | self.i_from = i_from 525 | self.i_target = i_target 526 | self.ignore_extension_versions = ignore_extension_versions 527 | 528 | @property 529 | def extensions(self): 530 | if self.ignore_extension_versions: 531 | fe = self.i_from.extensions_without_versions 532 | te = self.i_target.extensions_without_versions 533 | 534 | return partial(statements_for_changes, fe, te, modifications=False) 535 | else: 536 | return partial( 537 | statements_for_changes, 538 | self.i_from.extensions, 539 | self.i_target.extensions, 540 | modifications_as_alters=True, 541 | ) 542 | 543 | @property 544 | def selectables(self): 545 | return partial( 546 | get_selectable_changes, 547 | od(sorted(self.i_from.selectables.items())), 548 | od(sorted(self.i_target.selectables.items())), 549 | self.i_from.enums, 550 | self.i_target.enums, 551 | self.i_from.sequences, 552 | self.i_target.sequences, 553 | ) 554 | 555 | @property 556 | def tables_only_selectables(self): 557 | return partial( 558 | get_selectable_changes, 559 | od(sorted(self.i_from.selectables.items())), 560 | od(sorted(self.i_target.selectables.items())), 561 | self.i_from.enums, 562 | self.i_target.enums, 563 | self.i_from.sequences, 564 | self.i_target.sequences, 565 | tables_only=True, 566 | ) 567 | 568 | @property 569 | def non_table_selectable_drops(self): 570 | return partial( 571 | get_selectable_changes, 572 | od(sorted(self.i_from.selectables.items())), 573 | od(sorted(self.i_target.selectables.items())), 574 | self.i_from.enums, 575 | self.i_target.enums, 576 | self.i_from.sequences, 577 | self.i_target.sequences, 578 | drops_only=True, 579 | non_tables_only=True, 580 | ) 581 | 582 | @property 583 | def non_table_selectable_creations(self): 584 | return partial( 585 | get_selectable_changes, 586 | od(sorted(self.i_from.selectables.items())), 587 | od(sorted(self.i_target.selectables.items())), 588 | self.i_from.enums, 589 | self.i_target.enums, 590 | self.i_from.sequences, 591 | self.i_target.sequences, 592 | creations_only=True, 593 | non_tables_only=True, 594 | ) 595 | 596 | @property 597 | def non_pk_constraints(self): 598 | a = self.i_from.constraints.items() 599 | b = self.i_target.constraints.items() 600 | a_od = od((k, v) for k, v in a if v.constraint_type != PK) 601 | b_od = od((k, v) for k, v in b if v.constraint_type != PK) 602 | return partial(statements_for_changes, a_od, b_od) 603 | 604 | @property 605 | def pk_constraints(self): 606 | a = self.i_from.constraints.items() 607 | b = self.i_target.constraints.items() 608 | a_od = od((k, v) for k, v in a if v.constraint_type == PK) 609 | b_od = od((k, v) for k, v in b if v.constraint_type == PK) 610 | return partial(statements_for_changes, a_od, b_od) 611 | 612 | @property 613 | def triggers(self): 614 | return partial( 615 | get_trigger_changes, 616 | od(sorted(self.i_from.triggers.items())), 617 | od(sorted(self.i_target.triggers.items())), 618 | od(sorted(self.i_from.selectables.items())), 619 | od(sorted(self.i_target.selectables.items())), 620 | self.i_from.enums, 621 | self.i_target.enums, 622 | ) 623 | 624 | @property 625 | def mv_indexes(self): 626 | a = self.i_from.indexes.items() 627 | b = self.i_target.indexes.items() 628 | 629 | def is_mv_index(i, ii): 630 | sig = schemainspect.misc.quoted_identifier(i.table_name, i.schema) 631 | return sig in ii.materialized_views 632 | 633 | a_od = od((k, v) for k, v in a if is_mv_index(v, self.i_from)) 634 | b_od = od((k, v) for k, v in b if is_mv_index(v, self.i_target)) 635 | return partial(statements_for_changes, a_od, b_od) 636 | 637 | @property 638 | def non_mv_indexes(self): 639 | a = self.i_from.indexes.items() 640 | b = self.i_target.indexes.items() 641 | 642 | def is_mv_index(i, ii): 643 | sig = schemainspect.misc.quoted_identifier(i.table_name, i.schema) 644 | return sig in ii.materialized_views 645 | 646 | a_od = od((k, v) for k, v in a if not is_mv_index(v, self.i_from)) 647 | b_od = od((k, v) for k, v in b if not is_mv_index(v, self.i_target)) 648 | return partial(statements_for_changes, a_od, b_od) 649 | 650 | @property 651 | def sequences(self): 652 | return partial( 653 | statements_for_changes, 654 | self.i_from.sequences, 655 | self.i_target.sequences, 656 | modifications=False, 657 | ) 658 | 659 | def __getattr__(self, name): 660 | if name in THINGS: 661 | return partial( 662 | statements_for_changes, 663 | getattr(self.i_from, name), 664 | getattr(self.i_target, name), 665 | ) 666 | 667 | else: 668 | raise AttributeError(name) 669 | -------------------------------------------------------------------------------- /migra/command.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, unicode_literals 2 | 3 | import argparse 4 | import sys 5 | from contextlib import contextmanager 6 | 7 | from .migra import Migration 8 | from .statements import UnsafeMigrationException 9 | 10 | 11 | @contextmanager 12 | def arg_context(x): 13 | if x == "EMPTY": 14 | yield None 15 | 16 | else: 17 | from sqlbag import S 18 | 19 | with S(x) as s: 20 | yield s 21 | 22 | 23 | def parse_args(args): 24 | parser = argparse.ArgumentParser(description="Generate a database migration.") 25 | parser.add_argument( 26 | "--unsafe", 27 | dest="unsafe", 28 | action="store_true", 29 | help="Prevent migra from erroring upon generation of drop statements.", 30 | ) 31 | parser.add_argument( 32 | "--schema", 33 | dest="schema", 34 | default=None, 35 | help="Restrict output to statements for a particular schema", 36 | ) 37 | parser.add_argument( 38 | "--exclude_schema", 39 | dest="exclude_schema", 40 | default=None, 41 | help="Restrict output to statements for all schemas except the specified schema", 42 | ) 43 | parser.add_argument( 44 | "--create-extensions-only", 45 | dest="create_extensions_only", 46 | action="store_true", 47 | default=False, 48 | help='Only output "create extension..." statements, nothing else.', 49 | ) 50 | parser.add_argument( 51 | "--ignore-extension-versions", 52 | dest="ignore_extension_versions", 53 | action="store_true", 54 | default=False, 55 | help="Ignore the versions when comparing extensions.", 56 | ) 57 | parser.add_argument( 58 | "--with-privileges", 59 | dest="with_privileges", 60 | action="store_true", 61 | default=False, 62 | help="Also output privilege differences (ie. grant/revoke statements)", 63 | ) 64 | parser.add_argument( 65 | "--force-utf8", 66 | dest="force_utf8", 67 | action="store_true", 68 | default=False, 69 | help="Force UTF-8 encoding for output", 70 | ) 71 | parser.add_argument("dburl_from", help="The database you want to migrate.") 72 | parser.add_argument( 73 | "dburl_target", help="The database you want to use as the target." 74 | ) 75 | return parser.parse_args(args) 76 | 77 | 78 | def run(args, out=None, err=None): 79 | schema = args.schema 80 | exclude_schema = args.exclude_schema 81 | if not out: 82 | out = sys.stdout # pragma: no cover 83 | if not err: 84 | err = sys.stderr # pragma: no cover 85 | with arg_context(args.dburl_from) as ac0, arg_context(args.dburl_target) as ac1: 86 | m = Migration( 87 | ac0, 88 | ac1, 89 | schema=schema, 90 | exclude_schema=exclude_schema, 91 | ignore_extension_versions=args.ignore_extension_versions, 92 | ) 93 | if args.unsafe: 94 | m.set_safety(False) 95 | if args.create_extensions_only: 96 | m.add_extension_changes(drops=False) 97 | else: 98 | m.add_all_changes(privileges=args.with_privileges) 99 | try: 100 | if m.statements: 101 | if args.force_utf8: 102 | print(m.sql.encode("utf8"), file=out) 103 | else: 104 | print(m.sql, file=out) 105 | except UnsafeMigrationException: 106 | print( 107 | "-- ERROR: destructive statements generated. Use the --unsafe flag to suppress this error.", 108 | file=err, 109 | ) 110 | return 3 111 | 112 | if not m.statements: 113 | return 0 114 | 115 | else: 116 | return 2 117 | 118 | 119 | def do_command(): # pragma: no cover 120 | args = parse_args(sys.argv[1:]) 121 | status = run(args) 122 | sys.exit(status) 123 | -------------------------------------------------------------------------------- /migra/migra.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from schemainspect import DBInspector, get_inspector 4 | 5 | from .changes import Changes 6 | from .statements import Statements 7 | 8 | 9 | class Migration(object): 10 | """ 11 | The main class of migra 12 | """ 13 | 14 | def __init__( 15 | self, 16 | x_from, 17 | x_target, 18 | schema=None, 19 | exclude_schema=None, 20 | ignore_extension_versions=False, 21 | ): 22 | self.statements = Statements() 23 | self.changes = Changes(None, None) 24 | if schema and exclude_schema: 25 | raise ValueError("You cannot have both a schema and excluded schema") 26 | self.schema = schema 27 | self.exclude_schema = exclude_schema 28 | if isinstance(x_from, DBInspector): 29 | self.changes.i_from = x_from 30 | else: 31 | self.changes.i_from = get_inspector( 32 | x_from, schema=schema, exclude_schema=exclude_schema 33 | ) 34 | if x_from: 35 | self.s_from = x_from 36 | if isinstance(x_target, DBInspector): 37 | self.changes.i_target = x_target 38 | else: 39 | self.changes.i_target = get_inspector( 40 | x_target, schema=schema, exclude_schema=exclude_schema 41 | ) 42 | if x_target: 43 | self.s_target = x_target 44 | 45 | self.changes.ignore_extension_versions = ignore_extension_versions 46 | 47 | def inspect_from(self): 48 | self.changes.i_from = get_inspector( 49 | self.s_from, schema=self.schema, exclude_schema=self.exclude_schema 50 | ) 51 | 52 | def inspect_target(self): 53 | self.changes.i_target = get_inspector( 54 | self.s_target, schema=self.schema, exclude_schema=self.exclude_schema 55 | ) 56 | 57 | def clear(self): 58 | self.statements = Statements() 59 | 60 | def apply(self): 61 | from sqlbag import raw_execute 62 | 63 | for stmt in self.statements: 64 | raw_execute(self.s_from, stmt) 65 | self.changes.i_from = get_inspector( 66 | self.s_from, schema=self.schema, exclude_schema=self.exclude_schema 67 | ) 68 | safety_on = self.statements.safe 69 | self.clear() 70 | self.set_safety(safety_on) 71 | 72 | def add(self, statements): 73 | self.statements += statements 74 | 75 | def add_sql(self, sql): 76 | self.statements += Statements([sql]) 77 | 78 | def set_safety(self, safety_on): 79 | self.statements.safe = safety_on 80 | 81 | def add_extension_changes(self, creates=True, drops=True): 82 | if creates: 83 | self.add(self.changes.extensions(creations_only=True)) 84 | if drops: 85 | self.add(self.changes.extensions(drops_only=True)) 86 | 87 | def add_all_changes(self, privileges=False): 88 | self.add(self.changes.schemas(creations_only=True)) 89 | 90 | self.add(self.changes.extensions(creations_only=True, modifications=False)) 91 | self.add(self.changes.extensions(modifications_only=True, modifications=True)) 92 | self.add(self.changes.collations(creations_only=True)) 93 | self.add(self.changes.enums(creations_only=True, modifications=False)) 94 | self.add(self.changes.sequences(creations_only=True)) 95 | self.add(self.changes.triggers(drops_only=True)) 96 | self.add(self.changes.rlspolicies(drops_only=True)) 97 | if privileges: 98 | self.add(self.changes.privileges(drops_only=True)) 99 | self.add(self.changes.non_pk_constraints(drops_only=True)) 100 | 101 | self.add(self.changes.mv_indexes(drops_only=True)) 102 | self.add(self.changes.non_table_selectable_drops()) 103 | 104 | self.add(self.changes.pk_constraints(drops_only=True)) 105 | self.add(self.changes.non_mv_indexes(drops_only=True)) 106 | 107 | self.add(self.changes.tables_only_selectables()) 108 | 109 | self.add(self.changes.sequences(drops_only=True)) 110 | self.add(self.changes.enums(drops_only=True, modifications=False)) 111 | self.add(self.changes.extensions(drops_only=True, modifications=False)) 112 | self.add(self.changes.non_mv_indexes(creations_only=True)) 113 | self.add(self.changes.pk_constraints(creations_only=True)) 114 | self.add(self.changes.non_pk_constraints(creations_only=True)) 115 | 116 | self.add(self.changes.non_table_selectable_creations()) 117 | self.add(self.changes.mv_indexes(creations_only=True)) 118 | 119 | if privileges: 120 | self.add(self.changes.privileges(creations_only=True)) 121 | self.add(self.changes.rlspolicies(creations_only=True)) 122 | self.add(self.changes.triggers(creations_only=True)) 123 | self.add(self.changes.collations(drops_only=True)) 124 | self.add(self.changes.schemas(drops_only=True)) 125 | 126 | @property 127 | def sql(self): 128 | return self.statements.sql 129 | -------------------------------------------------------------------------------- /migra/statements.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import re 4 | 5 | 6 | def check_for_drop(s): 7 | return bool(re.search(r"(drop\s+)", s, re.IGNORECASE)) 8 | 9 | 10 | class Statements(list): 11 | def __init__(self, *args, **kwargs): 12 | self.safe = True 13 | super(Statements, self).__init__(*args, **kwargs) 14 | 15 | @property 16 | def sql(self): 17 | if self.safe: 18 | self.raise_if_unsafe() 19 | if not self: 20 | return "" 21 | 22 | return "\n\n".join(self) + "\n\n" 23 | 24 | def raise_if_unsafe(self): 25 | if any(check_for_drop(s) for s in self): 26 | raise UnsafeMigrationException( 27 | "unsafe/destructive change being autogenerated, refusing to carry on further" 28 | ) 29 | 30 | def __add__(self, other): 31 | self += list(other) 32 | return self 33 | 34 | 35 | class UnsafeMigrationException(Exception): 36 | pass 37 | -------------------------------------------------------------------------------- /migra/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from collections import OrderedDict as od 4 | 5 | 6 | def differences(a, b, add_dependencies_for_modifications=True): 7 | a_keys = set(a.keys()) 8 | b_keys = set(b.keys()) 9 | keys_added = set(b_keys) - set(a_keys) 10 | keys_removed = set(a_keys) - set(b_keys) 11 | keys_common = set(a_keys) & set(b_keys) 12 | added = od((k, b[k]) for k in sorted(keys_added)) 13 | removed = od((k, a[k]) for k in sorted(keys_removed)) 14 | modified = od((k, b[k]) for k in sorted(keys_common) if a[k] != b[k]) 15 | unmodified = od((k, b[k]) for k in sorted(keys_common) if a[k] == b[k]) 16 | return added, removed, modified, unmodified 17 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "attrs" 3 | version = "22.1.0" 4 | description = "Classes Without Boilerplate" 5 | category = "dev" 6 | optional = false 7 | python-versions = ">=3.5" 8 | 9 | [package.extras] 10 | dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] 11 | docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] 12 | tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] 13 | tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] 14 | 15 | [[package]] 16 | name = "black" 17 | version = "22.8.0" 18 | description = "The uncompromising code formatter." 19 | category = "dev" 20 | optional = false 21 | python-versions = ">=3.6.2" 22 | 23 | [package.dependencies] 24 | click = ">=8.0.0" 25 | mypy-extensions = ">=0.4.3" 26 | pathspec = ">=0.9.0" 27 | platformdirs = ">=2" 28 | tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} 29 | typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} 30 | typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} 31 | 32 | [package.extras] 33 | colorama = ["colorama (>=0.4.3)"] 34 | d = ["aiohttp (>=3.7.4)"] 35 | jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] 36 | uvloop = ["uvloop (>=0.15.2)"] 37 | 38 | [[package]] 39 | name = "click" 40 | version = "8.1.3" 41 | description = "Composable command line interface toolkit" 42 | category = "dev" 43 | optional = false 44 | python-versions = ">=3.7" 45 | 46 | [package.dependencies] 47 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 48 | importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} 49 | 50 | [[package]] 51 | name = "colorama" 52 | version = "0.4.5" 53 | description = "Cross-platform colored terminal text." 54 | category = "dev" 55 | optional = false 56 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 57 | 58 | [[package]] 59 | name = "commonmark" 60 | version = "0.9.1" 61 | description = "Python parser for the CommonMark Markdown spec" 62 | category = "dev" 63 | optional = false 64 | python-versions = "*" 65 | 66 | [package.extras] 67 | test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] 68 | 69 | [[package]] 70 | name = "coverage" 71 | version = "6.4.4" 72 | description = "Code coverage measurement for Python" 73 | category = "dev" 74 | optional = false 75 | python-versions = ">=3.7" 76 | 77 | [package.dependencies] 78 | tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} 79 | 80 | [package.extras] 81 | toml = ["tomli"] 82 | 83 | [[package]] 84 | name = "flake8" 85 | version = "5.0.4" 86 | description = "the modular source code checker: pep8 pyflakes and co" 87 | category = "dev" 88 | optional = false 89 | python-versions = ">=3.6.1" 90 | 91 | [package.dependencies] 92 | importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""} 93 | mccabe = ">=0.7.0,<0.8.0" 94 | pycodestyle = ">=2.9.0,<2.10.0" 95 | pyflakes = ">=2.5.0,<2.6.0" 96 | 97 | [[package]] 98 | name = "greenlet" 99 | version = "1.1.3" 100 | description = "Lightweight in-process concurrent programming" 101 | category = "main" 102 | optional = false 103 | python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" 104 | 105 | [package.extras] 106 | docs = ["Sphinx"] 107 | 108 | [[package]] 109 | name = "importlib-metadata" 110 | version = "4.2.0" 111 | description = "Read metadata from Python packages" 112 | category = "main" 113 | optional = false 114 | python-versions = ">=3.6" 115 | 116 | [package.dependencies] 117 | typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} 118 | zipp = ">=0.5" 119 | 120 | [package.extras] 121 | docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] 122 | testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] 123 | 124 | [[package]] 125 | name = "iniconfig" 126 | version = "1.1.1" 127 | description = "iniconfig: brain-dead simple config-ini parsing" 128 | category = "dev" 129 | optional = false 130 | python-versions = "*" 131 | 132 | [[package]] 133 | name = "isort" 134 | version = "5.10.1" 135 | description = "A Python utility / library to sort Python imports." 136 | category = "dev" 137 | optional = false 138 | python-versions = ">=3.6.1,<4.0" 139 | 140 | [package.extras] 141 | colors = ["colorama (>=0.4.3,<0.5.0)"] 142 | pipfile_deprecated_finder = ["pipreqs", "requirementslib"] 143 | plugins = ["setuptools"] 144 | requirements_deprecated_finder = ["pip-api", "pipreqs"] 145 | 146 | [[package]] 147 | name = "mccabe" 148 | version = "0.7.0" 149 | description = "McCabe checker, plugin for flake8" 150 | category = "dev" 151 | optional = false 152 | python-versions = ">=3.6" 153 | 154 | [[package]] 155 | name = "mypy-extensions" 156 | version = "0.4.3" 157 | description = "Experimental type system extensions for programs checked with the mypy typechecker." 158 | category = "dev" 159 | optional = false 160 | python-versions = "*" 161 | 162 | [[package]] 163 | name = "packaging" 164 | version = "21.3" 165 | description = "Core utilities for Python packages" 166 | category = "main" 167 | optional = false 168 | python-versions = ">=3.6" 169 | 170 | [package.dependencies] 171 | pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" 172 | 173 | [[package]] 174 | name = "pathspec" 175 | version = "0.10.1" 176 | description = "Utility library for gitignore style pattern matching of file paths." 177 | category = "dev" 178 | optional = false 179 | python-versions = ">=3.7" 180 | 181 | [[package]] 182 | name = "platformdirs" 183 | version = "2.5.2" 184 | description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." 185 | category = "dev" 186 | optional = false 187 | python-versions = ">=3.7" 188 | 189 | [package.extras] 190 | docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx (>=4)", "sphinx-autodoc-typehints (>=1.12)"] 191 | test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] 192 | 193 | [[package]] 194 | name = "pluggy" 195 | version = "1.0.0" 196 | description = "plugin and hook calling mechanisms for python" 197 | category = "dev" 198 | optional = false 199 | python-versions = ">=3.6" 200 | 201 | [package.dependencies] 202 | importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} 203 | 204 | [package.extras] 205 | dev = ["pre-commit", "tox"] 206 | testing = ["pytest", "pytest-benchmark"] 207 | 208 | [[package]] 209 | name = "pprintpp" 210 | version = "0.4.0" 211 | description = "A drop-in replacement for pprint that's actually pretty" 212 | category = "dev" 213 | optional = false 214 | python-versions = "*" 215 | 216 | [[package]] 217 | name = "psycopg2-binary" 218 | version = "2.9.3" 219 | description = "psycopg2 - Python-PostgreSQL Database Adapter" 220 | category = "main" 221 | optional = false 222 | python-versions = ">=3.6" 223 | 224 | [[package]] 225 | name = "py" 226 | version = "1.11.0" 227 | description = "library with cross-python path, ini-parsing, io, code, log facilities" 228 | category = "dev" 229 | optional = false 230 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 231 | 232 | [[package]] 233 | name = "pycodestyle" 234 | version = "2.9.1" 235 | description = "Python style guide checker" 236 | category = "dev" 237 | optional = false 238 | python-versions = ">=3.6" 239 | 240 | [[package]] 241 | name = "pyflakes" 242 | version = "2.5.0" 243 | description = "passive checker of Python programs" 244 | category = "dev" 245 | optional = false 246 | python-versions = ">=3.6" 247 | 248 | [[package]] 249 | name = "Pygments" 250 | version = "2.13.0" 251 | description = "Pygments is a syntax highlighting package written in Python." 252 | category = "dev" 253 | optional = false 254 | python-versions = ">=3.6" 255 | 256 | [package.extras] 257 | plugins = ["importlib-metadata"] 258 | 259 | [[package]] 260 | name = "pyparsing" 261 | version = "3.0.9" 262 | description = "pyparsing module - Classes and methods to define and execute parsing grammars" 263 | category = "main" 264 | optional = false 265 | python-versions = ">=3.6.8" 266 | 267 | [package.extras] 268 | diagrams = ["jinja2", "railroad-diagrams"] 269 | 270 | [[package]] 271 | name = "pytest" 272 | version = "7.1.3" 273 | description = "pytest: simple powerful testing with Python" 274 | category = "dev" 275 | optional = false 276 | python-versions = ">=3.7" 277 | 278 | [package.dependencies] 279 | attrs = ">=19.2.0" 280 | colorama = {version = "*", markers = "sys_platform == \"win32\""} 281 | importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} 282 | iniconfig = "*" 283 | packaging = "*" 284 | pluggy = ">=0.12,<2.0" 285 | py = ">=1.8.2" 286 | tomli = ">=1.0.0" 287 | 288 | [package.extras] 289 | testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] 290 | 291 | [[package]] 292 | name = "pytest-clarity" 293 | version = "1.0.1" 294 | description = "A plugin providing an alternative, colourful diff output for failing assertions." 295 | category = "dev" 296 | optional = false 297 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 298 | 299 | [package.dependencies] 300 | pprintpp = ">=0.4.0" 301 | pytest = ">=3.5.0" 302 | rich = ">=8.0.0" 303 | 304 | [[package]] 305 | name = "pytest-cov" 306 | version = "3.0.0" 307 | description = "Pytest plugin for measuring coverage." 308 | category = "dev" 309 | optional = false 310 | python-versions = ">=3.6" 311 | 312 | [package.dependencies] 313 | coverage = {version = ">=5.2.1", extras = ["toml"]} 314 | pytest = ">=4.6" 315 | 316 | [package.extras] 317 | testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] 318 | 319 | [[package]] 320 | name = "rich" 321 | version = "12.5.1" 322 | description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" 323 | category = "dev" 324 | optional = false 325 | python-versions = ">=3.6.3,<4.0.0" 326 | 327 | [package.dependencies] 328 | commonmark = ">=0.9.0,<0.10.0" 329 | pygments = ">=2.6.0,<3.0.0" 330 | typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} 331 | 332 | [package.extras] 333 | jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] 334 | 335 | [[package]] 336 | name = "schemainspect" 337 | version = "3.1.1663480743" 338 | description = "Schema inspection for PostgreSQL (and possibly others)" 339 | category = "main" 340 | optional = false 341 | python-versions = ">=3.7,<4" 342 | 343 | [package.dependencies] 344 | sqlalchemy = "*" 345 | 346 | [[package]] 347 | name = "six" 348 | version = "1.16.0" 349 | description = "Python 2 and 3 compatibility utilities" 350 | category = "main" 351 | optional = false 352 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 353 | 354 | [[package]] 355 | name = "SQLAlchemy" 356 | version = "1.4.41" 357 | description = "Database Abstraction Library" 358 | category = "main" 359 | optional = false 360 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" 361 | 362 | [package.dependencies] 363 | greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} 364 | importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} 365 | 366 | [package.extras] 367 | aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] 368 | aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] 369 | asyncio = ["greenlet (!=0.4.17)"] 370 | asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] 371 | mariadb_connector = ["mariadb (>=1.0.1,!=1.1.2)"] 372 | mssql = ["pyodbc"] 373 | mssql_pymssql = ["pymssql"] 374 | mssql_pyodbc = ["pyodbc"] 375 | mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] 376 | mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] 377 | mysql_connector = ["mysql-connector-python"] 378 | oracle = ["cx_oracle (>=7)", "cx_oracle (>=7,<8)"] 379 | postgresql = ["psycopg2 (>=2.7)"] 380 | postgresql_asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] 381 | postgresql_pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] 382 | postgresql_psycopg2binary = ["psycopg2-binary"] 383 | postgresql_psycopg2cffi = ["psycopg2cffi"] 384 | pymysql = ["pymysql", "pymysql (<1)"] 385 | sqlcipher = ["sqlcipher3_binary"] 386 | 387 | [[package]] 388 | name = "sqlbag" 389 | version = "0.1.1617247075" 390 | description = "various snippets of SQL-related boilerplate" 391 | category = "main" 392 | optional = false 393 | python-versions = "*" 394 | 395 | [package.dependencies] 396 | packaging = "*" 397 | six = "*" 398 | sqlalchemy = "*" 399 | 400 | [package.extras] 401 | maria = ["pymysql"] 402 | pendulum = ["pendulum", "relativedelta"] 403 | pg = ["psycopg2"] 404 | 405 | [[package]] 406 | name = "toml" 407 | version = "0.10.2" 408 | description = "Python Library for Tom's Obvious, Minimal Language" 409 | category = "dev" 410 | optional = false 411 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 412 | 413 | [[package]] 414 | name = "tomli" 415 | version = "2.0.1" 416 | description = "A lil' TOML parser" 417 | category = "dev" 418 | optional = false 419 | python-versions = ">=3.7" 420 | 421 | [[package]] 422 | name = "typed-ast" 423 | version = "1.5.4" 424 | description = "a fork of Python 2 and 3 ast modules with type comment support" 425 | category = "dev" 426 | optional = false 427 | python-versions = ">=3.6" 428 | 429 | [[package]] 430 | name = "typing-extensions" 431 | version = "4.3.0" 432 | description = "Backported and Experimental Type Hints for Python 3.7+" 433 | category = "main" 434 | optional = false 435 | python-versions = ">=3.7" 436 | 437 | [[package]] 438 | name = "zipp" 439 | version = "3.8.1" 440 | description = "Backport of pathlib-compatible object wrapper for zip files" 441 | category = "main" 442 | optional = false 443 | python-versions = ">=3.7" 444 | 445 | [package.extras] 446 | docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] 447 | testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] 448 | 449 | [extras] 450 | pg = ["psycopg2-binary"] 451 | 452 | [metadata] 453 | lock-version = "1.1" 454 | python-versions = ">=3.7,<4" 455 | content-hash = "21ffd0af5c9902c2d943b8bf9414108c42f6a45a807f8130a4105fa341d86a85" 456 | 457 | [metadata.files] 458 | attrs = [ 459 | {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, 460 | {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, 461 | ] 462 | black = [ 463 | {file = "black-22.8.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ce957f1d6b78a8a231b18e0dd2d94a33d2ba738cd88a7fe64f53f659eea49fdd"}, 464 | {file = "black-22.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5107ea36b2b61917956d018bd25129baf9ad1125e39324a9b18248d362156a27"}, 465 | {file = "black-22.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8166b7bfe5dcb56d325385bd1d1e0f635f24aae14b3ae437102dedc0c186747"}, 466 | {file = "black-22.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd82842bb272297503cbec1a2600b6bfb338dae017186f8f215c8958f8acf869"}, 467 | {file = "black-22.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d839150f61d09e7217f52917259831fe2b689f5c8e5e32611736351b89bb2a90"}, 468 | {file = "black-22.8.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a05da0430bd5ced89176db098567973be52ce175a55677436a271102d7eaa3fe"}, 469 | {file = "black-22.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a098a69a02596e1f2a58a2a1c8d5a05d5a74461af552b371e82f9fa4ada8342"}, 470 | {file = "black-22.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5594efbdc35426e35a7defa1ea1a1cb97c7dbd34c0e49af7fb593a36bd45edab"}, 471 | {file = "black-22.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983526af1bea1e4cf6768e649990f28ee4f4137266921c2c3cee8116ae42ec3"}, 472 | {file = "black-22.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b2c25f8dea5e8444bdc6788a2f543e1fb01494e144480bc17f806178378005e"}, 473 | {file = "black-22.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:78dd85caaab7c3153054756b9fe8c611efa63d9e7aecfa33e533060cb14b6d16"}, 474 | {file = "black-22.8.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:cea1b2542d4e2c02c332e83150e41e3ca80dc0fb8de20df3c5e98e242156222c"}, 475 | {file = "black-22.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b879eb439094751185d1cfdca43023bc6786bd3c60372462b6f051efa6281a5"}, 476 | {file = "black-22.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a12e4e1353819af41df998b02c6742643cfef58282915f781d0e4dd7a200411"}, 477 | {file = "black-22.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3a73f66b6d5ba7288cd5d6dad9b4c9b43f4e8a4b789a94bf5abfb878c663eb3"}, 478 | {file = "black-22.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:e981e20ec152dfb3e77418fb616077937378b322d7b26aa1ff87717fb18b4875"}, 479 | {file = "black-22.8.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8ce13ffed7e66dda0da3e0b2eb1bdfc83f5812f66e09aca2b0978593ed636b6c"}, 480 | {file = "black-22.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:32a4b17f644fc288c6ee2bafdf5e3b045f4eff84693ac069d87b1a347d861497"}, 481 | {file = "black-22.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ad827325a3a634bae88ae7747db1a395d5ee02cf05d9aa7a9bd77dfb10e940c"}, 482 | {file = "black-22.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53198e28a1fb865e9fe97f88220da2e44df6da82b18833b588b1883b16bb5d41"}, 483 | {file = "black-22.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:bc4d4123830a2d190e9cc42a2e43570f82ace35c3aeb26a512a2102bce5af7ec"}, 484 | {file = "black-22.8.0-py3-none-any.whl", hash = "sha256:d2c21d439b2baf7aa80d6dd4e3659259be64c6f49dfd0f32091063db0e006db4"}, 485 | {file = "black-22.8.0.tar.gz", hash = "sha256:792f7eb540ba9a17e8656538701d3eb1afcb134e3b45b71f20b25c77a8db7e6e"}, 486 | ] 487 | click = [ 488 | {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, 489 | {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, 490 | ] 491 | colorama = [ 492 | {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, 493 | {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, 494 | ] 495 | commonmark = [ 496 | {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, 497 | {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, 498 | ] 499 | coverage = [ 500 | {file = "coverage-6.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7b4da9bafad21ea45a714d3ea6f3e1679099e420c8741c74905b92ee9bfa7cc"}, 501 | {file = "coverage-6.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fde17bc42e0716c94bf19d92e4c9f5a00c5feb401f5bc01101fdf2a8b7cacf60"}, 502 | {file = "coverage-6.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdbb0d89923c80dbd435b9cf8bba0ff55585a3cdb28cbec65f376c041472c60d"}, 503 | {file = "coverage-6.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67f9346aeebea54e845d29b487eb38ec95f2ecf3558a3cffb26ee3f0dcc3e760"}, 504 | {file = "coverage-6.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42c499c14efd858b98c4e03595bf914089b98400d30789511577aa44607a1b74"}, 505 | {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c35cca192ba700979d20ac43024a82b9b32a60da2f983bec6c0f5b84aead635c"}, 506 | {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9cc4f107009bca5a81caef2fca843dbec4215c05e917a59dec0c8db5cff1d2aa"}, 507 | {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f444627b3664b80d078c05fe6a850dd711beeb90d26731f11d492dcbadb6973"}, 508 | {file = "coverage-6.4.4-cp310-cp310-win32.whl", hash = "sha256:66e6df3ac4659a435677d8cd40e8eb1ac7219345d27c41145991ee9bf4b806a0"}, 509 | {file = "coverage-6.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:35ef1f8d8a7a275aa7410d2f2c60fa6443f4a64fae9be671ec0696a68525b875"}, 510 | {file = "coverage-6.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c1328d0c2f194ffda30a45f11058c02410e679456276bfa0bbe0b0ee87225fac"}, 511 | {file = "coverage-6.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61b993f3998ee384935ee423c3d40894e93277f12482f6e777642a0141f55782"}, 512 | {file = "coverage-6.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5dd4b8e9cd0deb60e6fcc7b0647cbc1da6c33b9e786f9c79721fd303994832f"}, 513 | {file = "coverage-6.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7026f5afe0d1a933685d8f2169d7c2d2e624f6255fb584ca99ccca8c0e966fd7"}, 514 | {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9c7b9b498eb0c0d48b4c2abc0e10c2d78912203f972e0e63e3c9dc21f15abdaa"}, 515 | {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ee2b2fb6eb4ace35805f434e0f6409444e1466a47f620d1d5763a22600f0f892"}, 516 | {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ab066f5ab67059d1f1000b5e1aa8bbd75b6ed1fc0014559aea41a9eb66fc2ce0"}, 517 | {file = "coverage-6.4.4-cp311-cp311-win32.whl", hash = "sha256:9d6e1f3185cbfd3d91ac77ea065d85d5215d3dfa45b191d14ddfcd952fa53796"}, 518 | {file = "coverage-6.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e3d3c4cc38b2882f9a15bafd30aec079582b819bec1b8afdbde8f7797008108a"}, 519 | {file = "coverage-6.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a095aa0a996ea08b10580908e88fbaf81ecf798e923bbe64fb98d1807db3d68a"}, 520 | {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef6f44409ab02e202b31a05dd6666797f9de2aa2b4b3534e9d450e42dea5e817"}, 521 | {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b7101938584d67e6f45f0015b60e24a95bf8dea19836b1709a80342e01b472f"}, 522 | {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a32ec68d721c3d714d9b105c7acf8e0f8a4f4734c811eda75ff3718570b5e3"}, 523 | {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6a864733b22d3081749450466ac80698fe39c91cb6849b2ef8752fd7482011f3"}, 524 | {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:08002f9251f51afdcc5e3adf5d5d66bb490ae893d9e21359b085f0e03390a820"}, 525 | {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a3b2752de32c455f2521a51bd3ffb53c5b3ae92736afde67ce83477f5c1dd928"}, 526 | {file = "coverage-6.4.4-cp37-cp37m-win32.whl", hash = "sha256:f855b39e4f75abd0dfbcf74a82e84ae3fc260d523fcb3532786bcbbcb158322c"}, 527 | {file = "coverage-6.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ee6ae6bbcac0786807295e9687169fba80cb0617852b2fa118a99667e8e6815d"}, 528 | {file = "coverage-6.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:564cd0f5b5470094df06fab676c6d77547abfdcb09b6c29c8a97c41ad03b103c"}, 529 | {file = "coverage-6.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cbbb0e4cd8ddcd5ef47641cfac97d8473ab6b132dd9a46bacb18872828031685"}, 530 | {file = "coverage-6.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6113e4df2fa73b80f77663445be6d567913fb3b82a86ceb64e44ae0e4b695de1"}, 531 | {file = "coverage-6.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d032bfc562a52318ae05047a6eb801ff31ccee172dc0d2504614e911d8fa83e"}, 532 | {file = "coverage-6.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e431e305a1f3126477abe9a184624a85308da8edf8486a863601d58419d26ffa"}, 533 | {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cf2afe83a53f77aec067033199797832617890e15bed42f4a1a93ea24794ae3e"}, 534 | {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:783bc7c4ee524039ca13b6d9b4186a67f8e63d91342c713e88c1865a38d0892a"}, 535 | {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ff934ced84054b9018665ca3967fc48e1ac99e811f6cc99ea65978e1d384454b"}, 536 | {file = "coverage-6.4.4-cp38-cp38-win32.whl", hash = "sha256:e1fabd473566fce2cf18ea41171d92814e4ef1495e04471786cbc943b89a3781"}, 537 | {file = "coverage-6.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:4179502f210ebed3ccfe2f78bf8e2d59e50b297b598b100d6c6e3341053066a2"}, 538 | {file = "coverage-6.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:98c0b9e9b572893cdb0a00e66cf961a238f8d870d4e1dc8e679eb8bdc2eb1b86"}, 539 | {file = "coverage-6.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc600f6ec19b273da1d85817eda339fb46ce9eef3e89f220055d8696e0a06908"}, 540 | {file = "coverage-6.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a98d6bf6d4ca5c07a600c7b4e0c5350cd483c85c736c522b786be90ea5bac4f"}, 541 | {file = "coverage-6.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01778769097dbd705a24e221f42be885c544bb91251747a8a3efdec6eb4788f2"}, 542 | {file = "coverage-6.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfa0b97eb904255e2ab24166071b27408f1f69c8fbda58e9c0972804851e0558"}, 543 | {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:fcbe3d9a53e013f8ab88734d7e517eb2cd06b7e689bedf22c0eb68db5e4a0a19"}, 544 | {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:15e38d853ee224e92ccc9a851457fb1e1f12d7a5df5ae44544ce7863691c7a0d"}, 545 | {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6913dddee2deff8ab2512639c5168c3e80b3ebb0f818fed22048ee46f735351a"}, 546 | {file = "coverage-6.4.4-cp39-cp39-win32.whl", hash = "sha256:354df19fefd03b9a13132fa6643527ef7905712109d9c1c1903f2133d3a4e145"}, 547 | {file = "coverage-6.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:1238b08f3576201ebf41f7c20bf59baa0d05da941b123c6656e42cdb668e9827"}, 548 | {file = "coverage-6.4.4-pp36.pp37.pp38-none-any.whl", hash = "sha256:f67cf9f406cf0d2f08a3515ce2db5b82625a7257f88aad87904674def6ddaec1"}, 549 | {file = "coverage-6.4.4.tar.gz", hash = "sha256:e16c45b726acb780e1e6f88b286d3c10b3914ab03438f32117c4aa52d7f30d58"}, 550 | ] 551 | flake8 = [ 552 | {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"}, 553 | {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"}, 554 | ] 555 | greenlet = [ 556 | {file = "greenlet-1.1.3-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:8c287ae7ac921dfde88b1c125bd9590b7ec3c900c2d3db5197f1286e144e712b"}, 557 | {file = "greenlet-1.1.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:870a48007872d12e95a996fca3c03a64290d3ea2e61076aa35d3b253cf34cd32"}, 558 | {file = "greenlet-1.1.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:7c5227963409551ae4a6938beb70d56bf1918c554a287d3da6853526212fbe0a"}, 559 | {file = "greenlet-1.1.3-cp27-cp27m-win32.whl", hash = "sha256:9fae214f6c43cd47f7bef98c56919b9222481e833be2915f6857a1e9e8a15318"}, 560 | {file = "greenlet-1.1.3-cp27-cp27m-win_amd64.whl", hash = "sha256:de431765bd5fe62119e0bc6bc6e7b17ac53017ae1782acf88fcf6b7eae475a49"}, 561 | {file = "greenlet-1.1.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:510c3b15587afce9800198b4b142202b323bf4b4b5f9d6c79cb9a35e5e3c30d2"}, 562 | {file = "greenlet-1.1.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9951dcbd37850da32b2cb6e391f621c1ee456191c6ae5528af4a34afe357c30e"}, 563 | {file = "greenlet-1.1.3-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:07c58e169bbe1e87b8bbf15a5c1b779a7616df9fd3e61cadc9d691740015b4f8"}, 564 | {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df02fdec0c533301497acb0bc0f27f479a3a63dcdc3a099ae33a902857f07477"}, 565 | {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c88e134d51d5e82315a7c32b914a58751b7353eb5268dbd02eabf020b4c4700"}, 566 | {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b41d19c0cfe5c259fe6c539fd75051cd39a5d33d05482f885faf43f7f5e7d26"}, 567 | {file = "greenlet-1.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:6f5d4b2280ceea76c55c893827961ed0a6eadd5a584a7c4e6e6dd7bc10dfdd96"}, 568 | {file = "greenlet-1.1.3-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:184416e481295832350a4bf731ba619a92f5689bf5d0fa4341e98b98b1265bd7"}, 569 | {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd0404d154084a371e6d2bafc787201612a1359c2dee688ae334f9118aa0bf47"}, 570 | {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a43bbfa9b6cfdfaeefbd91038dde65ea2c421dc387ed171613df340650874f2"}, 571 | {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce5b64dfe8d0cca407d88b0ee619d80d4215a2612c1af8c98a92180e7109f4b5"}, 572 | {file = "greenlet-1.1.3-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:903fa5716b8fbb21019268b44f73f3748c41d1a30d71b4a49c84b642c2fed5fa"}, 573 | {file = "greenlet-1.1.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0118817c9341ef2b0f75f5af79ac377e4da6ff637e5ee4ac91802c0e379dadb4"}, 574 | {file = "greenlet-1.1.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:466ce0928e33421ee84ae04c4ac6f253a3a3e6b8d600a79bd43fd4403e0a7a76"}, 575 | {file = "greenlet-1.1.3-cp35-cp35m-win32.whl", hash = "sha256:65ad1a7a463a2a6f863661329a944a5802c7129f7ad33583dcc11069c17e622c"}, 576 | {file = "greenlet-1.1.3-cp35-cp35m-win_amd64.whl", hash = "sha256:7532a46505470be30cbf1dbadb20379fb481244f1ca54207d7df3bf0bbab6a20"}, 577 | {file = "greenlet-1.1.3-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:caff52cb5cd7626872d9696aee5b794abe172804beb7db52eed1fd5824b63910"}, 578 | {file = "greenlet-1.1.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:db41f3845eb579b544c962864cce2c2a0257fe30f0f1e18e51b1e8cbb4e0ac6d"}, 579 | {file = "greenlet-1.1.3-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:e8533f5111704d75de3139bf0b8136d3a6c1642c55c067866fa0a51c2155ee33"}, 580 | {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537e4baf0db67f382eb29255a03154fcd4984638303ff9baaa738b10371fa57"}, 581 | {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8bfd36f368efe0ab2a6aa3db7f14598aac454b06849fb633b762ddbede1db90"}, 582 | {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0877a9a2129a2c56a2eae2da016743db7d9d6a05d5e1c198f1b7808c602a30e"}, 583 | {file = "greenlet-1.1.3-cp36-cp36m-win32.whl", hash = "sha256:88b04e12c9b041a1e0bcb886fec709c488192638a9a7a3677513ac6ba81d8e79"}, 584 | {file = "greenlet-1.1.3-cp36-cp36m-win_amd64.whl", hash = "sha256:4f166b4aca8d7d489e82d74627a7069ab34211ef5ebb57c300ec4b9337b60fc0"}, 585 | {file = "greenlet-1.1.3-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:cd16a89efe3a003029c87ff19e9fba635864e064da646bc749fc1908a4af18f3"}, 586 | {file = "greenlet-1.1.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5b756e6730ea59b2745072e28ad27f4c837084688e6a6b3633c8b1e509e6ae0e"}, 587 | {file = "greenlet-1.1.3-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:9b2f7d0408ddeb8ea1fd43d3db79a8cefaccadd2a812f021333b338ed6b10aba"}, 588 | {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b4817c34c9272c65550b788913620f1fdc80362b209bc9d7dd2f40d8793080"}, 589 | {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d58a5a71c4c37354f9e0c24c9c8321f0185f6945ef027460b809f4bb474bfe41"}, 590 | {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1dd51d2650e70c6c4af37f454737bf4a11e568945b27f74b471e8e2a9fd21268"}, 591 | {file = "greenlet-1.1.3-cp37-cp37m-win32.whl", hash = "sha256:048d2bed76c2aa6de7af500ae0ea51dd2267aec0e0f2a436981159053d0bc7cc"}, 592 | {file = "greenlet-1.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:77e41db75f9958f2083e03e9dd39da12247b3430c92267df3af77c83d8ff9eed"}, 593 | {file = "greenlet-1.1.3-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:1626185d938d7381631e48e6f7713e8d4b964be246073e1a1d15c2f061ac9f08"}, 594 | {file = "greenlet-1.1.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1ec2779774d8e42ed0440cf8bc55540175187e8e934f2be25199bf4ed948cd9e"}, 595 | {file = "greenlet-1.1.3-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f2f908239b7098799b8845e5936c2ccb91d8c2323be02e82f8dcb4a80dcf4a25"}, 596 | {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b181e9aa6cb2f5ec0cacc8cee6e5a3093416c841ba32c185c30c160487f0380"}, 597 | {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cf45e339cabea16c07586306a31cfcc5a3b5e1626d365714d283732afed6809"}, 598 | {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6200a11f003ec26815f7e3d2ded01b43a3810be3528dd760d2f1fa777490c3cd"}, 599 | {file = "greenlet-1.1.3-cp38-cp38-win32.whl", hash = "sha256:db5b25265010a1b3dca6a174a443a0ed4c4ab12d5e2883a11c97d6e6d59b12f9"}, 600 | {file = "greenlet-1.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:095a980288fe05adf3d002fbb180c99bdcf0f930e220aa66fcd56e7914a38202"}, 601 | {file = "greenlet-1.1.3-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:cbc1eb55342cbac8f7ec159088d54e2cfdd5ddf61c87b8bbe682d113789331b2"}, 602 | {file = "greenlet-1.1.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:694ffa7144fa5cc526c8f4512665003a39fa09ef00d19bbca5c8d3406db72fbe"}, 603 | {file = "greenlet-1.1.3-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:aa741c1a8a8cc25eb3a3a01a62bdb5095a773d8c6a86470bde7f607a447e7905"}, 604 | {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3a669f11289a8995d24fbfc0e63f8289dd03c9aaa0cc8f1eab31d18ca61a382"}, 605 | {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76a53bfa10b367ee734b95988bd82a9a5f0038a25030f9f23bbbc005010ca600"}, 606 | {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb0aa7f6996879551fd67461d5d3ab0c3c0245da98be90c89fcb7a18d437403"}, 607 | {file = "greenlet-1.1.3-cp39-cp39-win32.whl", hash = "sha256:5fbe1ab72b998ca77ceabbae63a9b2e2dc2d963f4299b9b278252ddba142d3f1"}, 608 | {file = "greenlet-1.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:ffe73f9e7aea404722058405ff24041e59d31ca23d1da0895af48050a07b6932"}, 609 | {file = "greenlet-1.1.3.tar.gz", hash = "sha256:bcb6c6dd1d6be6d38d6db283747d07fda089ff8c559a835236560a4410340455"}, 610 | ] 611 | importlib-metadata = [ 612 | {file = "importlib_metadata-4.2.0-py3-none-any.whl", hash = "sha256:057e92c15bc8d9e8109738a48db0ccb31b4d9d5cfbee5a8670879a30be66304b"}, 613 | {file = "importlib_metadata-4.2.0.tar.gz", hash = "sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31"}, 614 | ] 615 | iniconfig = [ 616 | {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, 617 | {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, 618 | ] 619 | isort = [ 620 | {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, 621 | {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, 622 | ] 623 | mccabe = [ 624 | {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, 625 | {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, 626 | ] 627 | mypy-extensions = [ 628 | {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, 629 | {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, 630 | ] 631 | packaging = [ 632 | {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, 633 | {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, 634 | ] 635 | pathspec = [ 636 | {file = "pathspec-0.10.1-py3-none-any.whl", hash = "sha256:46846318467efc4556ccfd27816e004270a9eeeeb4d062ce5e6fc7a87c573f93"}, 637 | {file = "pathspec-0.10.1.tar.gz", hash = "sha256:7ace6161b621d31e7902eb6b5ae148d12cfd23f4a249b9ffb6b9fee12084323d"}, 638 | ] 639 | platformdirs = [ 640 | {file = "platformdirs-2.5.2-py3-none-any.whl", hash = "sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788"}, 641 | {file = "platformdirs-2.5.2.tar.gz", hash = "sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19"}, 642 | ] 643 | pluggy = [ 644 | {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, 645 | {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, 646 | ] 647 | pprintpp = [ 648 | {file = "pprintpp-0.4.0-py2.py3-none-any.whl", hash = "sha256:b6b4dcdd0c0c0d75e4d7b2f21a9e933e5b2ce62b26e1a54537f9651ae5a5c01d"}, 649 | {file = "pprintpp-0.4.0.tar.gz", hash = "sha256:ea826108e2c7f49dc6d66c752973c3fc9749142a798d6b254e1e301cfdbc6403"}, 650 | ] 651 | psycopg2-binary = [ 652 | {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, 653 | {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, 654 | {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e82d38390a03da28c7985b394ec3f56873174e2c88130e6966cb1c946508e65"}, 655 | {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57804fc02ca3ce0dbfbef35c4b3a4a774da66d66ea20f4bda601294ad2ea6092"}, 656 | {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:083a55275f09a62b8ca4902dd11f4b33075b743cf0d360419e2051a8a5d5ff76"}, 657 | {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:0a29729145aaaf1ad8bafe663131890e2111f13416b60e460dae0a96af5905c9"}, 658 | {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a79d622f5206d695d7824cbf609a4f5b88ea6d6dab5f7c147fc6d333a8787e4"}, 659 | {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:090f3348c0ab2cceb6dfbe6bf721ef61262ddf518cd6cc6ecc7d334996d64efa"}, 660 | {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a9e1f75f96ea388fbcef36c70640c4efbe4650658f3d6a2967b4cc70e907352e"}, 661 | {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c3ae8e75eb7160851e59adc77b3a19a976e50622e44fd4fd47b8b18208189d42"}, 662 | {file = "psycopg2_binary-2.9.3-cp310-cp310-win32.whl", hash = "sha256:7b1e9b80afca7b7a386ef087db614faebbf8839b7f4db5eb107d0f1a53225029"}, 663 | {file = "psycopg2_binary-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:8b344adbb9a862de0c635f4f0425b7958bf5a4b927c8594e6e8d261775796d53"}, 664 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:e847774f8ffd5b398a75bc1c18fbb56564cda3d629fe68fd81971fece2d3c67e"}, 665 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68641a34023d306be959101b345732360fc2ea4938982309b786f7be1b43a4a1"}, 666 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3303f8807f342641851578ee7ed1f3efc9802d00a6f83c101d21c608cb864460"}, 667 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:e3699852e22aa68c10de06524a3721ade969abf382da95884e6a10ff798f9281"}, 668 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:526ea0378246d9b080148f2d6681229f4b5964543c170dd10bf4faaab6e0d27f"}, 669 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:b1c8068513f5b158cf7e29c43a77eb34b407db29aca749d3eb9293ee0d3103ca"}, 670 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:15803fa813ea05bef089fa78835118b5434204f3a17cb9f1e5dbfd0b9deea5af"}, 671 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:152f09f57417b831418304c7f30d727dc83a12761627bb826951692cc6491e57"}, 672 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:404224e5fef3b193f892abdbf8961ce20e0b6642886cfe1fe1923f41aaa75c9d"}, 673 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:1f6b813106a3abdf7b03640d36e24669234120c72e91d5cbaeb87c5f7c36c65b"}, 674 | {file = "psycopg2_binary-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2d872e3c9d5d075a2e104540965a1cf898b52274a5923936e5bfddb58c59c7c2"}, 675 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:10bb90fb4d523a2aa67773d4ff2b833ec00857f5912bafcfd5f5414e45280fb1"}, 676 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a52ecab70af13e899f7847b3e074eeb16ebac5615665db33bce8a1009cf33"}, 677 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a29b3ca4ec9defec6d42bf5feb36bb5817ba3c0230dd83b4edf4bf02684cd0ae"}, 678 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:12b11322ea00ad8db8c46f18b7dfc47ae215e4df55b46c67a94b4effbaec7094"}, 679 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:53293533fcbb94c202b7c800a12c873cfe24599656b341f56e71dd2b557be063"}, 680 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c381bda330ddf2fccbafab789d83ebc6c53db126e4383e73794c74eedce855ef"}, 681 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d29409b625a143649d03d0fd7b57e4b92e0ecad9726ba682244b73be91d2fdb"}, 682 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:183a517a3a63503f70f808b58bfbf962f23d73b6dccddae5aa56152ef2bcb232"}, 683 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:15c4e4cfa45f5a60599d9cec5f46cd7b1b29d86a6390ec23e8eebaae84e64554"}, 684 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:adf20d9a67e0b6393eac162eb81fb10bc9130a80540f4df7e7355c2dd4af9fba"}, 685 | {file = "psycopg2_binary-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f9ffd643bc7349eeb664eba8864d9e01f057880f510e4681ba40a6532f93c71"}, 686 | {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:def68d7c21984b0f8218e8a15d514f714d96904265164f75f8d3a70f9c295667"}, 687 | {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dffc08ca91c9ac09008870c9eb77b00a46b3378719584059c034b8945e26b272"}, 688 | {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:280b0bb5cbfe8039205c7981cceb006156a675362a00fe29b16fbc264e242834"}, 689 | {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:af9813db73395fb1fc211bac696faea4ca9ef53f32dc0cfa27e4e7cf766dcf24"}, 690 | {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:63638d875be8c2784cfc952c9ac34e2b50e43f9f0a0660b65e2a87d656b3116c"}, 691 | {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ffb7a888a047696e7f8240d649b43fb3644f14f0ee229077e7f6b9f9081635bd"}, 692 | {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c9d5450c566c80c396b7402895c4369a410cab5a82707b11aee1e624da7d004"}, 693 | {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:d1c1b569ecafe3a69380a94e6ae09a4789bbb23666f3d3a08d06bbd2451f5ef1"}, 694 | {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8fc53f9af09426a61db9ba357865c77f26076d48669f2e1bb24d85a22fb52307"}, 695 | {file = "psycopg2_binary-2.9.3-cp38-cp38-win32.whl", hash = "sha256:6472a178e291b59e7f16ab49ec8b4f3bdada0a879c68d3817ff0963e722a82ce"}, 696 | {file = "psycopg2_binary-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:35168209c9d51b145e459e05c31a9eaeffa9a6b0fd61689b48e07464ffd1a83e"}, 697 | {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:47133f3f872faf28c1e87d4357220e809dfd3fa7c64295a4a148bcd1e6e34ec9"}, 698 | {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91920527dea30175cc02a1099f331aa8c1ba39bf8b7762b7b56cbf54bc5cce42"}, 699 | {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887dd9aac71765ac0d0bac1d0d4b4f2c99d5f5c1382d8b770404f0f3d0ce8a39"}, 700 | {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:1f14c8b0942714eb3c74e1e71700cbbcb415acbc311c730370e70c578a44a25c"}, 701 | {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7af0dd86ddb2f8af5da57a976d27cd2cd15510518d582b478fbb2292428710b4"}, 702 | {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93cd1967a18aa0edd4b95b1dfd554cf15af657cb606280996d393dadc88c3c35"}, 703 | {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bda845b664bb6c91446ca9609fc69f7db6c334ec5e4adc87571c34e4f47b7ddb"}, 704 | {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:01310cf4cf26db9aea5158c217caa92d291f0500051a6469ac52166e1a16f5b7"}, 705 | {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:99485cab9ba0fa9b84f1f9e1fef106f44a46ef6afdeec8885e0b88d0772b49e8"}, 706 | {file = "psycopg2_binary-2.9.3-cp39-cp39-win32.whl", hash = "sha256:46f0e0a6b5fa5851bbd9ab1bc805eef362d3a230fbdfbc209f4a236d0a7a990d"}, 707 | {file = "psycopg2_binary-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:accfe7e982411da3178ec690baaceaad3c278652998b2c45828aaac66cd8285f"}, 708 | ] 709 | py = [ 710 | {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, 711 | {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, 712 | ] 713 | pycodestyle = [ 714 | {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, 715 | {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"}, 716 | ] 717 | pyflakes = [ 718 | {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"}, 719 | {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, 720 | ] 721 | Pygments = [ 722 | {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, 723 | {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, 724 | ] 725 | pyparsing = [ 726 | {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, 727 | {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, 728 | ] 729 | pytest = [ 730 | {file = "pytest-7.1.3-py3-none-any.whl", hash = "sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7"}, 731 | {file = "pytest-7.1.3.tar.gz", hash = "sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39"}, 732 | ] 733 | pytest-clarity = [ 734 | {file = "pytest-clarity-1.0.1.tar.gz", hash = "sha256:505fe345fad4fe11c6a4187fe683f2c7c52c077caa1e135f3e483fe112db7772"}, 735 | ] 736 | pytest-cov = [ 737 | {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, 738 | {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, 739 | ] 740 | rich = [ 741 | {file = "rich-12.5.1-py3-none-any.whl", hash = "sha256:2eb4e6894cde1e017976d2975ac210ef515d7548bc595ba20e195fb9628acdeb"}, 742 | {file = "rich-12.5.1.tar.gz", hash = "sha256:63a5c5ce3673d3d5fbbf23cd87e11ab84b6b451436f1b7f19ec54b6bc36ed7ca"}, 743 | ] 744 | schemainspect = [ 745 | {file = "schemainspect-3.1.1663480743-py3-none-any.whl", hash = "sha256:852813d0ee38985242373708b8441c5fbcff611d30dae013afb5cb7d7148c95a"}, 746 | {file = "schemainspect-3.1.1663480743.tar.gz", hash = "sha256:b042676ceffd189983f38a42042dfb19d86925f8c4f8bdd6d40334582fdb116b"}, 747 | ] 748 | six = [ 749 | {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, 750 | {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, 751 | ] 752 | SQLAlchemy = [ 753 | {file = "SQLAlchemy-1.4.41-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:13e397a9371ecd25573a7b90bd037db604331cf403f5318038c46ee44908c44d"}, 754 | {file = "SQLAlchemy-1.4.41-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2d6495f84c4fd11584f34e62f9feec81bf373787b3942270487074e35cbe5330"}, 755 | {file = "SQLAlchemy-1.4.41-cp27-cp27m-win32.whl", hash = "sha256:e570cfc40a29d6ad46c9aeaddbdcee687880940a3a327f2c668dd0e4ef0a441d"}, 756 | {file = "SQLAlchemy-1.4.41-cp27-cp27m-win_amd64.whl", hash = "sha256:5facb7fd6fa8a7353bbe88b95695e555338fb038ad19ceb29c82d94f62775a05"}, 757 | {file = "SQLAlchemy-1.4.41-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f37fa70d95658763254941ddd30ecb23fc4ec0c5a788a7c21034fc2305dab7cc"}, 758 | {file = "SQLAlchemy-1.4.41-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:361f6b5e3f659e3c56ea3518cf85fbdae1b9e788ade0219a67eeaaea8a4e4d2a"}, 759 | {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0990932f7cca97fece8017414f57fdd80db506a045869d7ddf2dda1d7cf69ecc"}, 760 | {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cd767cf5d7252b1c88fcfb58426a32d7bd14a7e4942497e15b68ff5d822b41ad"}, 761 | {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5102fb9ee2c258a2218281adcb3e1918b793c51d6c2b4666ce38c35101bb940e"}, 762 | {file = "SQLAlchemy-1.4.41-cp310-cp310-win32.whl", hash = "sha256:2082a2d2fca363a3ce21cfa3d068c5a1ce4bf720cf6497fb3a9fc643a8ee4ddd"}, 763 | {file = "SQLAlchemy-1.4.41-cp310-cp310-win_amd64.whl", hash = "sha256:e4b12e3d88a8fffd0b4ca559f6d4957ed91bd4c0613a4e13846ab8729dc5c251"}, 764 | {file = "SQLAlchemy-1.4.41-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:90484a2b00baedad361402c257895b13faa3f01780f18f4a104a2f5c413e4536"}, 765 | {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67fc780cfe2b306180e56daaa411dd3186bf979d50a6a7c2a5b5036575cbdbb"}, 766 | {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad2b727fc41c7f8757098903f85fafb4bf587ca6605f82d9bf5604bd9c7cded"}, 767 | {file = "SQLAlchemy-1.4.41-cp311-cp311-win32.whl", hash = "sha256:59bdc291165b6119fc6cdbc287c36f7f2859e6051dd923bdf47b4c55fd2f8bd0"}, 768 | {file = "SQLAlchemy-1.4.41-cp311-cp311-win_amd64.whl", hash = "sha256:d2e054aed4645f9b755db85bc69fc4ed2c9020c19c8027976f66576b906a74f1"}, 769 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:4ba7e122510bbc07258dc42be6ed45997efdf38129bde3e3f12649be70683546"}, 770 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0dcf127bb99458a9d211e6e1f0f3edb96c874dd12f2503d4d8e4f1fd103790b"}, 771 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e16c2be5cb19e2c08da7bd3a87fed2a0d4e90065ee553a940c4fc1a0fb1ab72b"}, 772 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5ebeeec5c14533221eb30bad716bc1fd32f509196318fb9caa7002c4a364e4c"}, 773 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-win32.whl", hash = "sha256:3e2ef592ac3693c65210f8b53d0edcf9f4405925adcfc031ff495e8d18169682"}, 774 | {file = "SQLAlchemy-1.4.41-cp36-cp36m-win_amd64.whl", hash = "sha256:eb30cf008850c0a26b72bd1b9be6730830165ce049d239cfdccd906f2685f892"}, 775 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:c23d64a0b28fc78c96289ffbd0d9d1abd48d267269b27f2d34e430ea73ce4b26"}, 776 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb8897367a21b578b26f5713833836f886817ee2ffba1177d446fa3f77e67c8"}, 777 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:14576238a5f89bcf504c5f0a388d0ca78df61fb42cb2af0efe239dc965d4f5c9"}, 778 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:639e1ae8d48b3c86ffe59c0daa9a02e2bfe17ca3d2b41611b30a0073937d4497"}, 779 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-win32.whl", hash = "sha256:0005bd73026cd239fc1e8ccdf54db58b6193be9a02b3f0c5983808f84862c767"}, 780 | {file = "SQLAlchemy-1.4.41-cp37-cp37m-win_amd64.whl", hash = "sha256:5323252be2bd261e0aa3f33cb3a64c45d76829989fa3ce90652838397d84197d"}, 781 | {file = "SQLAlchemy-1.4.41-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:05f0de3a1dc3810a776275763764bb0015a02ae0f698a794646ebc5fb06fad33"}, 782 | {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0002e829142b2af00b4eaa26c51728f3ea68235f232a2e72a9508a3116bd6ed0"}, 783 | {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22ff16cedab5b16a0db79f1bc99e46a6ddececb60c396562e50aab58ddb2871c"}, 784 | {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfd238f766a5bb5ee5545a62dd03f316ac67966a6a658efb63eeff8158a4bbf"}, 785 | {file = "SQLAlchemy-1.4.41-cp38-cp38-win32.whl", hash = "sha256:58bb65b3274b0c8a02cea9f91d6f44d0da79abc993b33bdedbfec98c8440175a"}, 786 | {file = "SQLAlchemy-1.4.41-cp38-cp38-win_amd64.whl", hash = "sha256:ce8feaa52c1640de9541eeaaa8b5fb632d9d66249c947bb0d89dd01f87c7c288"}, 787 | {file = "SQLAlchemy-1.4.41-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:199a73c31ac8ea59937cc0bf3dfc04392e81afe2ec8a74f26f489d268867846c"}, 788 | {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676d51c9f6f6226ae8f26dc83ec291c088fe7633269757d333978df78d931ab"}, 789 | {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:036d8472356e1d5f096c5e0e1a7e0f9182140ada3602f8fff6b7329e9e7cfbcd"}, 790 | {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2307495d9e0ea00d0c726be97a5b96615035854972cc538f6e7eaed23a35886c"}, 791 | {file = "SQLAlchemy-1.4.41-cp39-cp39-win32.whl", hash = "sha256:9c56e19780cd1344fcd362fd6265a15f48aa8d365996a37fab1495cae8fcd97d"}, 792 | {file = "SQLAlchemy-1.4.41-cp39-cp39-win_amd64.whl", hash = "sha256:f5fa526d027d804b1f85cdda1eb091f70bde6fb7d87892f6dd5a48925bc88898"}, 793 | {file = "SQLAlchemy-1.4.41.tar.gz", hash = "sha256:0292f70d1797e3c54e862e6f30ae474014648bc9c723e14a2fda730adb0a9791"}, 794 | ] 795 | sqlbag = [ 796 | {file = "sqlbag-0.1.1617247075-py2.py3-none-any.whl", hash = "sha256:ecdef26d661f8640711030ac6ee618deb92b91f9f0fc2efbf8a3b133af13092d"}, 797 | {file = "sqlbag-0.1.1617247075.tar.gz", hash = "sha256:b9d7862c3b2030356d796ca872907962fd54704066978d7ae89383f5123366ed"}, 798 | ] 799 | toml = [ 800 | {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, 801 | {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, 802 | ] 803 | tomli = [ 804 | {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, 805 | {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, 806 | ] 807 | typed-ast = [ 808 | {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, 809 | {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, 810 | {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, 811 | {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, 812 | {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, 813 | {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, 814 | {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, 815 | {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, 816 | {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, 817 | {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, 818 | {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, 819 | {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, 820 | {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, 821 | {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, 822 | {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, 823 | {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, 824 | {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, 825 | {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, 826 | {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, 827 | {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, 828 | {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, 829 | {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, 830 | {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, 831 | {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, 832 | ] 833 | typing-extensions = [ 834 | {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"}, 835 | {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"}, 836 | ] 837 | zipp = [ 838 | {file = "zipp-3.8.1-py3-none-any.whl", hash = "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"}, 839 | {file = "zipp-3.8.1.tar.gz", hash = "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2"}, 840 | ] 841 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "migra" 3 | version = "3.0" 4 | authors = [ "Robert Lechte ",] 5 | license = "Unlicense" 6 | readme = "README.md" 7 | description = "Like `diff` but for PostgreSQL schemas" 8 | 9 | repository = "https://github.com/djrobstep/migra" 10 | homepage = "https://databaseci.com/docs/migra" 11 | 12 | [tool.poetry.dependencies] 13 | python = ">=3.7,<4" 14 | sqlbag = "*" 15 | six = "*" 16 | # schemainspect = {path="../schemainspect", develop=true} 17 | schemainspect = ">=3.1.1663480743" 18 | psycopg2-binary = { version="*", optional = true } 19 | 20 | [tool.poetry.dev-dependencies] 21 | sqlbag = "*" 22 | pytest = "*" 23 | pytest-cov = "*" 24 | pytest-clarity = "*" 25 | psycopg2-binary = "*" 26 | flake8 = "*" 27 | isort = "*" 28 | black = "*" 29 | toml = "*" 30 | 31 | [tool.poetry.scripts] 32 | migra = 'migra:do_command' 33 | 34 | [tool.poetry.extras] 35 | pg = ["psycopg2-binary"] 36 | 37 | [tool.isort] 38 | multi_line_output = 3 39 | include_trailing_comma = true 40 | line_length = 88 41 | 42 | [build-system] 43 | requires = ["poetry-core>=1.0.0"] 44 | build-backend = "poetry.core.masonry.api" 45 | -------------------------------------------------------------------------------- /tests/FIXTURES/collations/a.sql: -------------------------------------------------------------------------------- 1 | CREATE COLLATION posix FROM "POSIX"; 2 | 3 | create table t( 4 | a text, 5 | b text collate posix 6 | ); 7 | 8 | CREATE COLLATION numeric (provider = icu, locale = 'en-u-kn-true'); 9 | -------------------------------------------------------------------------------- /tests/FIXTURES/collations/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/collations/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/collations/b.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | CREATE COLLATION numeric (provider = icu, locale = 'en-u-kn-true'); 5 | 6 | create table t( 7 | a text, 8 | b text collate numeric, 9 | c text collate numeric 10 | ); 11 | -------------------------------------------------------------------------------- /tests/FIXTURES/collations/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."t" add column "c" text collate "numeric"; 2 | 3 | alter table "public"."t" alter column "b" set data type text collate "numeric" using "b"::text; 4 | 5 | drop collation if exists "public"."posix"; 6 | -------------------------------------------------------------------------------- /tests/FIXTURES/collations/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/collations/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/constraints/a.sql: -------------------------------------------------------------------------------- 1 | create table t1(a int); 2 | 3 | create table b(bb int primary key); 4 | 5 | create table t2(a int primary key, bb int references b(bb), price numeric, constraint x check (price > 0)); 6 | 7 | create table c(cc int unique); 8 | 9 | create unique index on t1(a); 10 | 11 | CREATE TABLE circles_dropexclude ( 12 | c circle, 13 | EXCLUDE USING gist (c WITH &&) 14 | ); 15 | -------------------------------------------------------------------------------- /tests/FIXTURES/constraints/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/constraints/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/constraints/b.sql: -------------------------------------------------------------------------------- 1 | create table b(bb int primary key); 2 | 3 | create table t2(a int, bb int references b(bb) DEFERRABLE INITIALLY deferred); 4 | 5 | create table t1(a int primary key, price numeric, constraint x check (price > 0)); 6 | 7 | create table c(cc int unique); 8 | 9 | CREATE UNIQUE INDEX c_pkey ON public.c USING btree (cc); 10 | 11 | alter table "public"."c" add constraint "c_pkey" PRIMARY KEY using index "c_pkey" deferrable INITIALLY deferred; 12 | 13 | create unique index on t2(a); 14 | 15 | CREATE TABLE circles ( 16 | c circle, 17 | EXCLUDE USING gist (c WITH &&) 18 | ); 19 | 20 | CREATE TABLE circles_dropexclude ( 21 | c circle 22 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/constraints/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."circles_dropexclude" drop constraint "circles_dropexclude_c_excl"; 2 | 3 | alter table "public"."t2" drop constraint "x"; 4 | 5 | alter table "public"."t2" drop constraint "t2_bb_fkey"; 6 | 7 | alter table "public"."t2" drop constraint "t2_pkey"; 8 | 9 | select 1; -- drop index if exists "public"."circles_dropexclude_c_excl"; 10 | 11 | drop index if exists "public"."t1_a_idx"; 12 | 13 | drop index if exists "public"."t2_pkey"; 14 | 15 | create table "public"."circles" ( 16 | "c" circle 17 | ); 18 | 19 | 20 | alter table "public"."c" alter column "cc" set not null; 21 | 22 | alter table "public"."t1" add column "price" numeric; 23 | 24 | alter table "public"."t1" alter column "a" set not null; 25 | 26 | alter table "public"."t2" drop column "price"; 27 | 28 | alter table "public"."t2" alter column "a" drop not null; 29 | 30 | CREATE UNIQUE INDEX c_pkey ON public.c USING btree (cc); 31 | 32 | select 1; -- CREATE INDEX circles_c_excl ON public.circles USING gist (c); 33 | 34 | CREATE UNIQUE INDEX t1_pkey ON public.t1 USING btree (a); 35 | 36 | CREATE UNIQUE INDEX t2_a_idx ON public.t2 USING btree (a); 37 | 38 | alter table "public"."c" add constraint "c_pkey" PRIMARY KEY using index "c_pkey" DEFERRABLE INITIALLY DEFERRED; 39 | 40 | alter table "public"."t1" add constraint "t1_pkey" PRIMARY KEY using index "t1_pkey"; 41 | 42 | alter table "public"."circles" add constraint "circles_c_excl" EXCLUDE USING gist (c WITH &&); 43 | 44 | alter table "public"."t1" add constraint "x" CHECK ((price > (0)::numeric)) not valid; 45 | 46 | alter table "public"."t1" validate constraint "x"; 47 | 48 | alter table "public"."t2" add constraint "t2_bb_fkey" FOREIGN KEY (bb) REFERENCES b(bb) DEFERRABLE INITIALLY DEFERRED not valid; 49 | 50 | alter table "public"."t2" validate constraint "t2_bb_fkey"; -------------------------------------------------------------------------------- /tests/FIXTURES/constraints/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/constraints/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies/a.sql: -------------------------------------------------------------------------------- 1 | create table basetable(id serial primary key, name text); 2 | 3 | create view aaa_view1 as select name from basetable; 4 | 5 | create view bbb_view2 as select name from aaa_view1; 6 | 7 | create view ccc_view3 as select name from bbb_view2; 8 | 9 | create view ddd_changed as select name from basetable; 10 | 11 | create view ddd_unchanged as select name from ddd_changed; 12 | 13 | create or replace function "public"."depends_on_bbb_view2"(t text) 14 | returns TABLE(x text) as 15 | $$ select * from bbb_view2 $$ 16 | language SQL VOLATILE CALLED ON NULL INPUT SECURITY INVOKER; 17 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies/additions.sql: -------------------------------------------------------------------------------- 1 | create view ddd as select 'abc123' as a; 2 | 3 | create or replace function "public"."fff"(t text) 4 | returns TABLE(x text) as 5 | $$ select a::text from ddd $$ 6 | language SQL VOLATILE CALLED ON NULL INPUT SECURITY INVOKER; 7 | 8 | create view eee as select * from fff('abc123'); 9 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies/b.sql: -------------------------------------------------------------------------------- 1 | create table basetable(id serial primary key, name text); 2 | 3 | create view ddd_changed as select name, 'x' as x from basetable; 4 | 5 | create view ddd_unchanged as select name from ddd_changed; 6 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies/expected.sql: -------------------------------------------------------------------------------- 1 | drop view if exists "public"."ccc_view3"; 2 | 3 | drop function if exists "public"."depends_on_bbb_view2"(t text); 4 | 5 | drop view if exists "public"."bbb_view2"; 6 | 7 | drop view if exists "public"."aaa_view1"; 8 | 9 | create or replace view "public"."ddd_changed" as SELECT basetable.name, 10 | 'x'::text AS x 11 | FROM basetable; -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies/expected2.sql: -------------------------------------------------------------------------------- 1 | drop view if exists "public"."ccc_view3"; 2 | 3 | drop view if exists "public"."ddd"; 4 | 5 | drop function if exists "public"."depends_on_bbb_view2"(t text); 6 | 7 | drop view if exists "public"."eee"; 8 | 9 | drop function if exists "public"."fff"(t text); 10 | 11 | drop view if exists "public"."bbb_view2"; 12 | 13 | drop view if exists "public"."aaa_view1"; 14 | 15 | create or replace view "public"."ddd_changed" as SELECT basetable.name, 16 | 'x'::text AS x 17 | FROM basetable; 18 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies2/a.sql: -------------------------------------------------------------------------------- 1 | create schema x; 2 | 3 | create table x.data(id uuid, name text); 4 | 5 | create view x.q as select * from x.data; 6 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies2/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies2/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies2/b.sql: -------------------------------------------------------------------------------- 1 | create schema x; 2 | 3 | create table x.t_data(id uuid, name text); 4 | 5 | create view x.data as select * from x.t_data; 6 | 7 | create view x.q as select * from x.data; 8 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies2/expected.sql: -------------------------------------------------------------------------------- 1 | drop view if exists "x"."q"; 2 | 3 | drop table "x"."data"; 4 | 5 | create table "x"."t_data" ( 6 | "id" uuid, 7 | "name" text 8 | ); 9 | 10 | 11 | create or replace view "x"."data" as SELECT t_data.id, 12 | t_data.name 13 | FROM x.t_data; 14 | 15 | 16 | create or replace view "x"."q" as SELECT data.id, 17 | data.name 18 | FROM x.data; 19 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies2/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies2/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies3/a.sql: -------------------------------------------------------------------------------- 1 | create table t(a int); 2 | 3 | create view abc as select a from t; 4 | 5 | create view switcharoo as select 1 as a; 6 | 7 | create table "strange_name(((yo?)))"(id text); 8 | 9 | create view "strange_view(what)" as select id from "strange_name(((yo?)))"; -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies3/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies3/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies3/b.sql: -------------------------------------------------------------------------------- 1 | create table t(a int, b int); 2 | 3 | create view abc as select a from t; 4 | 5 | create materialized view switcharoo as select 1 as a; 6 | 7 | create table "strange_name(((yo?)))"(id text); 8 | 9 | create view "strange_view(what)" as select id::int * 2 as a from "strange_name(((yo?)))"; -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies3/expected.sql: -------------------------------------------------------------------------------- 1 | drop view if exists "public"."strange_view(what)"; 2 | 3 | drop view if exists "public"."switcharoo"; 4 | 5 | alter table "public"."t" add column "b" integer; 6 | 7 | create or replace view "public"."strange_view(what)" as SELECT (("strange_name(((yo?)))".id)::integer * 2) AS a 8 | FROM "strange_name(((yo?)))"; 9 | 10 | 11 | create materialized view "public"."switcharoo" as SELECT 1 AS a; 12 | -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies3/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies3/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies4/a.sql: -------------------------------------------------------------------------------- 1 | create table t2(a int); -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies4/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies4/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies4/b.sql: -------------------------------------------------------------------------------- 1 | create table t ( 2 | id integer not null primary key, 3 | a text, 4 | b integer 5 | ); 6 | 7 | create view v as 8 | select id, a, max(b) 9 | from t 10 | group by id; -- "a" is implied because "id" is primary key 11 | 12 | 13 | create materialized view mv as select id from v; 14 | 15 | create unique index on mv (id); -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies4/expected.sql: -------------------------------------------------------------------------------- 1 | drop table "public"."t2"; 2 | 3 | create table "public"."t" ( 4 | "id" integer not null, 5 | "a" text, 6 | "b" integer 7 | ); 8 | 9 | 10 | CREATE UNIQUE INDEX t_pkey ON public.t USING btree (id); 11 | 12 | alter table "public"."t" add constraint "t_pkey" PRIMARY KEY using index "t_pkey"; 13 | 14 | create or replace view "public"."v" as SELECT t.id, 15 | t.a, 16 | max(t.b) AS max 17 | FROM t 18 | GROUP BY t.id; 19 | 20 | 21 | create materialized view "public"."mv" as SELECT v.id 22 | FROM v; 23 | 24 | 25 | CREATE UNIQUE INDEX mv_id_idx ON public.mv USING btree (id); -------------------------------------------------------------------------------- /tests/FIXTURES/dependencies4/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/dependencies4/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/enumdefaults/a.sql: -------------------------------------------------------------------------------- 1 | create type order_status as enum('pending', 'processing', 'complete'); 2 | 3 | create schema other; 4 | 5 | create type other.otherenum1 as enum('a', 'b', 'c'); 6 | 7 | create type other.otherenum2 as enum('a', 'b', 'c'); 8 | 9 | create table orders( 10 | id serial primary key, 11 | status order_status default 'pending'::order_status, 12 | othercolumn other.otherenum1 13 | ); 14 | 15 | -------------------------------------------------------------------------------- /tests/FIXTURES/enumdefaults/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/enumdefaults/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/enumdefaults/b.sql: -------------------------------------------------------------------------------- 1 | create type order_status as enum('pending', 'processing', 'complete', 'rejected'); 2 | 3 | create schema other; 4 | 5 | create type other.otherenum1 as enum('a', 'b', 'c'); 6 | 7 | create type other.otherenum2 as enum('a', 'b', 'c'); 8 | 9 | create table orders( 10 | id serial primary key, 11 | status order_status default 'pending'::order_status, 12 | othercolumn other.otherenum2 13 | ); 14 | -------------------------------------------------------------------------------- /tests/FIXTURES/enumdefaults/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."orders" alter column "status" drop default; 2 | 3 | alter type "public"."order_status" rename to "order_status__old_version_to_be_dropped"; 4 | 5 | create type "public"."order_status" as enum ('pending', 'processing', 'complete', 'rejected'); 6 | 7 | alter table "public"."orders" alter column status type "public"."order_status" using status::text::"public"."order_status"; 8 | 9 | alter table "public"."orders" alter column "status" set default 'pending'::order_status; 10 | 11 | drop type "public"."order_status__old_version_to_be_dropped"; 12 | 13 | alter table "public"."orders" alter column "othercolumn" set data type other.otherenum2 using "othercolumn"::text::other.otherenum2; 14 | -------------------------------------------------------------------------------- /tests/FIXTURES/enumdefaults/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/enumdefaults/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/enumdeps/a.sql: -------------------------------------------------------------------------------- 1 | create type e as enum('a', 'b', 'c'); 2 | 3 | create table t(id integer primary key, category e); 4 | 5 | create view v as select * from t; 6 | 7 | create view v2 as select *, 'b'::e from t; -------------------------------------------------------------------------------- /tests/FIXTURES/enumdeps/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/enumdeps/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/enumdeps/b.sql: -------------------------------------------------------------------------------- 1 | create type e as enum('a', 'b', 'c', 'd'); 2 | 3 | create table t(id integer primary key, category e); 4 | 5 | create view v as select * from t; 6 | 7 | create view v2 as select *, 'b'::e from t; 8 | 9 | create table created_with_e(id integer, category e); -------------------------------------------------------------------------------- /tests/FIXTURES/enumdeps/expected.sql: -------------------------------------------------------------------------------- 1 | drop view if exists "public"."v"; 2 | 3 | drop view if exists "public"."v2"; 4 | 5 | alter type "public"."e" rename to "e__old_version_to_be_dropped"; 6 | 7 | create type "public"."e" as enum ('a', 'b', 'c', 'd'); 8 | 9 | create table "public"."created_with_e" ( 10 | "id" integer, 11 | "category" e 12 | ); 13 | 14 | 15 | alter table "public"."t" alter column category type "public"."e" using category::text::"public"."e"; 16 | 17 | drop type "public"."e__old_version_to_be_dropped"; 18 | 19 | create or replace view "public"."v" as SELECT t.id, 20 | t.category 21 | FROM t; 22 | 23 | 24 | create or replace view "public"."v2" as SELECT t.id, 25 | t.category, 26 | 'b'::e AS e 27 | FROM t; 28 | 29 | 30 | -------------------------------------------------------------------------------- /tests/FIXTURES/enumdeps/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/enumdeps/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/everything/a.sql: -------------------------------------------------------------------------------- 1 | create extension pg_trgm; 2 | 3 | create schema goodschema; 4 | create schema badschema; 5 | 6 | CREATE TYPE shipping_status AS ENUM ('not shipped', 'shipped'); 7 | 8 | CREATE TYPE unwanted_enum AS ENUM ('unwanted', 'not wanted'); 9 | 10 | CREATE TYPE unused_enum AS ENUM ('a', 'b'); 11 | 12 | CREATE TYPE usage_dropped_enum AS ENUM ('x', 'y'); 13 | 14 | create table columnless_table(); 15 | 16 | create unlogged table change_to_logged(); 17 | 18 | create table change_to_unlogged(); 19 | 20 | CREATE TABLE aunwanted ( 21 | id serial primary key, 22 | name text not null 23 | ); 24 | 25 | CREATE TABLE orders ( 26 | order_id serial primary key, 27 | shipping_address text, 28 | status shipping_status, 29 | status2 usage_dropped_enum 30 | ); 31 | 32 | CREATE TABLE products ( 33 | product_no integer, 34 | name varchar(10) not null unique, 35 | price numeric, 36 | x integer not null default 7 unique, 37 | oldcolumn text, 38 | constraint x check (price > 0), 39 | z integer REFERENCES orders ON DELETE CASCADE, 40 | zz integer REFERENCES aunwanted ON DELETE CASCADE 41 | ); 42 | 43 | create unique index on products(x); 44 | 45 | create unique index on orders(order_id); 46 | 47 | create index on products(price); 48 | 49 | create view vvv as select * from products; 50 | 51 | create materialized view matvvv as select * from products; 52 | 53 | grant select, insert on table products to postgres; 54 | 55 | create or replace function public.changed(i integer, t text[]) 56 | returns TABLE(a text, c integer) as 57 | $$ 58 | declare 59 | BEGIN 60 | select 'no', 1; 61 | END; 62 | 63 | $$ 64 | LANGUAGE PLPGSQL STABLE returns null on null input security definer; 65 | -------------------------------------------------------------------------------- /tests/FIXTURES/everything/additions.sql: -------------------------------------------------------------------------------- 1 | alter table products rename column oldcolumn to newcolumn; 2 | -------------------------------------------------------------------------------- /tests/FIXTURES/everything/b.sql: -------------------------------------------------------------------------------- 1 | create extension hstore; 2 | create extension citext; 3 | 4 | create schema goodschema; 5 | create schema evenbetterschema; 6 | 7 | CREATE TYPE shipping_status AS ENUM ('not shipped', 'shipped', 'delivered'); 8 | 9 | CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed'); 10 | 11 | CREATE TYPE unused_enum AS ENUM ('a', 'b', 'c'); 12 | 13 | CREATE TYPE usage_dropped_enum AS ENUM ('x', 'y'); 14 | 15 | create table columnless_table2(); 16 | 17 | create table change_to_logged(); 18 | 19 | create unlogged table change_to_unlogged(); 20 | 21 | CREATE TABLE products ( 22 | product_no serial primary key, 23 | name text, 24 | price numeric not null default 100, 25 | x integer, 26 | newcolumn text, 27 | newcolumn2 interval, 28 | constraint x check (price > 10), 29 | constraint y check (price > 0) 30 | ); 31 | 32 | create index on products(name); 33 | 34 | grant update, insert on table products to postgres; 35 | 36 | CREATE TABLE orders ( 37 | order_id integer primary key unique, 38 | shipping_address text, 39 | status shipping_status, 40 | status2 text, 41 | h hstore 42 | ); 43 | 44 | CREATE TABLE order_items ( 45 | product_no integer REFERENCES products ON DELETE RESTRICT, 46 | order_id integer REFERENCES orders ON DELETE CASCADE, 47 | quantity integer, 48 | PRIMARY KEY (product_no, order_id) 49 | ); 50 | 51 | create or replace function public.changed(i integer, t text[]) 52 | returns TABLE(a text, c integer) as 53 | $$ 54 | declare 55 | BEGIN 56 | select 'no', 1; 57 | END; 58 | 59 | $$ 60 | LANGUAGE PLPGSQL volatile returns null on null input security definer; 61 | 62 | create or replace function public.newfunc(i integer, t text[]) 63 | returns TABLE(a text, c integer) as 64 | $$ 65 | declare 66 | BEGIN 67 | select 'no', 1; 68 | END; 69 | 70 | $$ 71 | LANGUAGE PLPGSQL STABLE returns null on null input security invoker; 72 | 73 | create view vvv as select 2 as a; 74 | 75 | create materialized view matvvv as select 2 as a; 76 | 77 | CREATE TABLE bug ( 78 | id serial, 79 | description text, 80 | status text-- bug_status 81 | ); 82 | -------------------------------------------------------------------------------- /tests/FIXTURES/everything/expected.sql: -------------------------------------------------------------------------------- 1 | create schema if not exists "evenbetterschema"; 2 | 3 | create extension if not exists "citext" with schema "public"; 4 | 5 | create extension if not exists "hstore" with schema "public"; 6 | 7 | create type "public"."bug_status" as enum ('new', 'open', 'closed'); 8 | 9 | create sequence "public"."bug_id_seq"; 10 | 11 | create sequence "public"."products_product_no_seq"; 12 | 13 | revoke select on table "public"."products" from "postgres"; 14 | 15 | alter table "public"."products" drop constraint "products_name_key"; 16 | 17 | alter table "public"."products" drop constraint "products_x_key"; 18 | 19 | alter table "public"."products" drop constraint "products_z_fkey"; 20 | 21 | alter table "public"."products" drop constraint "products_zz_fkey"; 22 | 23 | alter table "public"."products" drop constraint "x"; 24 | 25 | drop materialized view if exists "public"."matvvv"; 26 | 27 | drop view if exists "public"."vvv"; 28 | 29 | alter table "public"."aunwanted" drop constraint "aunwanted_pkey"; 30 | 31 | drop index if exists "public"."aunwanted_pkey"; 32 | 33 | drop index if exists "public"."orders_order_id_idx"; 34 | 35 | drop index if exists "public"."products_name_key"; 36 | 37 | drop index if exists "public"."products_price_idx"; 38 | 39 | drop index if exists "public"."products_x_idx"; 40 | 41 | drop index if exists "public"."products_x_key"; 42 | 43 | drop table "public"."aunwanted"; 44 | 45 | drop table "public"."columnless_table"; 46 | 47 | alter type "public"."shipping_status" rename to "shipping_status__old_version_to_be_dropped"; 48 | 49 | create type "public"."shipping_status" as enum ('not shipped', 'shipped', 'delivered'); 50 | 51 | alter type "public"."unused_enum" rename to "unused_enum__old_version_to_be_dropped"; 52 | 53 | create type "public"."unused_enum" as enum ('a', 'b', 'c'); 54 | 55 | create table "public"."bug" ( 56 | "id" integer not null default nextval('bug_id_seq'::regclass), 57 | "description" text, 58 | "status" text 59 | ); 60 | 61 | 62 | create table "public"."columnless_table2" ( 63 | ); 64 | 65 | 66 | create table "public"."order_items" ( 67 | "product_no" integer not null, 68 | "order_id" integer not null, 69 | "quantity" integer 70 | ); 71 | 72 | 73 | alter table "public"."orders" alter column status type "public"."shipping_status" using status::text::"public"."shipping_status"; 74 | 75 | drop type "public"."shipping_status__old_version_to_be_dropped"; 76 | 77 | drop type "public"."unused_enum__old_version_to_be_dropped"; 78 | 79 | alter table "public"."change_to_logged" set logged; 80 | 81 | alter table "public"."change_to_unlogged" set unlogged; 82 | 83 | alter table "public"."orders" add column "h" hstore; 84 | 85 | alter table "public"."orders" alter column "order_id" drop default; 86 | 87 | alter table "public"."orders" alter column "status2" set data type text using "status2"::text; 88 | 89 | alter table "public"."products" drop column "oldcolumn"; 90 | 91 | alter table "public"."products" drop column "z"; 92 | 93 | alter table "public"."products" drop column "zz"; 94 | 95 | alter table "public"."products" add column "newcolumn" text; 96 | 97 | alter table "public"."products" add column "newcolumn2" interval; 98 | 99 | alter table "public"."products" alter column "name" drop not null; 100 | 101 | alter table "public"."products" alter column "name" set data type text using "name"::text; 102 | 103 | alter table "public"."products" alter column "price" set default 100; 104 | 105 | alter table "public"."products" alter column "price" set not null; 106 | 107 | alter table "public"."products" alter column "product_no" set default nextval('products_product_no_seq'::regclass); 108 | 109 | alter table "public"."products" alter column "product_no" set not null; 110 | 111 | alter table "public"."products" alter column "x" drop default; 112 | 113 | alter table "public"."products" alter column "x" drop not null; 114 | 115 | alter sequence "public"."bug_id_seq" owned by "public"."bug"."id"; 116 | 117 | alter sequence "public"."products_product_no_seq" owned by "public"."products"."product_no"; 118 | 119 | drop sequence if exists "public"."aunwanted_id_seq"; 120 | 121 | drop sequence if exists "public"."orders_order_id_seq"; 122 | 123 | drop type "public"."unwanted_enum"; 124 | 125 | drop extension if exists "pg_trgm"; 126 | 127 | CREATE UNIQUE INDEX order_items_pkey ON public.order_items USING btree (product_no, order_id); 128 | 129 | CREATE INDEX products_name_idx ON public.products USING btree (name); 130 | 131 | CREATE UNIQUE INDEX products_pkey ON public.products USING btree (product_no); 132 | 133 | alter table "public"."order_items" add constraint "order_items_pkey" PRIMARY KEY using index "order_items_pkey"; 134 | 135 | alter table "public"."products" add constraint "products_pkey" PRIMARY KEY using index "products_pkey"; 136 | 137 | alter table "public"."order_items" add constraint "order_items_order_id_fkey" FOREIGN KEY (order_id) REFERENCES orders(order_id) ON DELETE CASCADE not valid; 138 | 139 | alter table "public"."order_items" validate constraint "order_items_order_id_fkey"; 140 | 141 | alter table "public"."order_items" add constraint "order_items_product_no_fkey" FOREIGN KEY (product_no) REFERENCES products(product_no) ON DELETE RESTRICT not valid; 142 | 143 | alter table "public"."order_items" validate constraint "order_items_product_no_fkey"; 144 | 145 | alter table "public"."products" add constraint "y" CHECK ((price > (0)::numeric)) not valid; 146 | 147 | alter table "public"."products" validate constraint "y"; 148 | 149 | alter table "public"."products" add constraint "x" CHECK ((price > (10)::numeric)) not valid; 150 | 151 | alter table "public"."products" validate constraint "x"; 152 | 153 | set check_function_bodies = off; 154 | 155 | CREATE OR REPLACE FUNCTION public.newfunc(i integer, t text[]) 156 | RETURNS TABLE(a text, c integer) 157 | LANGUAGE plpgsql 158 | STABLE STRICT 159 | AS $function$ 160 | declare 161 | BEGIN 162 | select 'no', 1; 163 | END; 164 | 165 | $function$ 166 | ; 167 | 168 | CREATE OR REPLACE FUNCTION public.changed(i integer, t text[]) 169 | RETURNS TABLE(a text, c integer) 170 | LANGUAGE plpgsql 171 | STRICT SECURITY DEFINER 172 | AS $function$ 173 | declare 174 | BEGIN 175 | select 'no', 1; 176 | END; 177 | 178 | $function$ 179 | ; 180 | 181 | create materialized view "public"."matvvv" as SELECT 2 AS a; 182 | 183 | 184 | create or replace view "public"."vvv" as SELECT 2 AS a; 185 | 186 | 187 | grant update on table "public"."products" to "postgres"; 188 | 189 | drop schema if exists "badschema"; 190 | -------------------------------------------------------------------------------- /tests/FIXTURES/everything/expected2.sql: -------------------------------------------------------------------------------- 1 | create schema if not exists "evenbetterschema"; 2 | 3 | create extension if not exists "citext" with schema "public"; 4 | 5 | create extension if not exists "hstore" with schema "public"; 6 | 7 | create type "public"."bug_status" as enum ('new', 'open', 'closed'); 8 | 9 | create sequence "public"."bug_id_seq"; 10 | 11 | create sequence "public"."products_product_no_seq"; 12 | 13 | revoke select on table "public"."products" from "postgres"; 14 | 15 | alter table "public"."products" drop constraint "products_name_key"; 16 | 17 | alter table "public"."products" drop constraint "products_x_key"; 18 | 19 | alter table "public"."products" drop constraint "products_z_fkey"; 20 | 21 | alter table "public"."products" drop constraint "products_zz_fkey"; 22 | 23 | alter table "public"."products" drop constraint "x"; 24 | 25 | drop materialized view if exists "public"."matvvv"; 26 | 27 | drop view if exists "public"."vvv"; 28 | 29 | alter table "public"."aunwanted" drop constraint "aunwanted_pkey"; 30 | 31 | drop index if exists "public"."aunwanted_pkey"; 32 | 33 | drop index if exists "public"."orders_order_id_idx"; 34 | 35 | drop index if exists "public"."products_name_key"; 36 | 37 | drop index if exists "public"."products_price_idx"; 38 | 39 | drop index if exists "public"."products_x_idx"; 40 | 41 | drop index if exists "public"."products_x_key"; 42 | 43 | drop table "public"."aunwanted"; 44 | 45 | drop table "public"."columnless_table"; 46 | 47 | alter type "public"."shipping_status" rename to "shipping_status__old_version_to_be_dropped"; 48 | 49 | create type "public"."shipping_status" as enum ('not shipped', 'shipped', 'delivered'); 50 | 51 | alter type "public"."unused_enum" rename to "unused_enum__old_version_to_be_dropped"; 52 | 53 | create type "public"."unused_enum" as enum ('a', 'b', 'c'); 54 | 55 | create table "public"."bug" ( 56 | "id" integer not null default nextval('bug_id_seq'::regclass), 57 | "description" text, 58 | "status" text 59 | ); 60 | 61 | 62 | create table "public"."columnless_table2" ( 63 | ); 64 | 65 | 66 | create table "public"."order_items" ( 67 | "product_no" integer not null, 68 | "order_id" integer not null, 69 | "quantity" integer 70 | ); 71 | 72 | 73 | alter table "public"."orders" alter column status type "public"."shipping_status" using status::text::"public"."shipping_status"; 74 | 75 | drop type "public"."shipping_status__old_version_to_be_dropped"; 76 | 77 | drop type "public"."unused_enum__old_version_to_be_dropped"; 78 | 79 | alter table "public"."change_to_logged" set logged; 80 | 81 | alter table "public"."change_to_unlogged" set unlogged; 82 | 83 | alter table "public"."orders" add column "h" hstore; 84 | 85 | alter table "public"."orders" alter column "order_id" drop default; 86 | 87 | alter table "public"."orders" alter column "status2" set data type text using "status2"::text; 88 | 89 | alter table "public"."products" drop column "z"; 90 | 91 | alter table "public"."products" drop column "zz"; 92 | 93 | alter table "public"."products" add column "newcolumn2" interval; 94 | 95 | alter table "public"."products" alter column "name" drop not null; 96 | 97 | alter table "public"."products" alter column "name" set data type text using "name"::text; 98 | 99 | alter table "public"."products" alter column "price" set default 100; 100 | 101 | alter table "public"."products" alter column "price" set not null; 102 | 103 | alter table "public"."products" alter column "product_no" set default nextval('products_product_no_seq'::regclass); 104 | 105 | alter table "public"."products" alter column "product_no" set not null; 106 | 107 | alter table "public"."products" alter column "x" drop default; 108 | 109 | alter table "public"."products" alter column "x" drop not null; 110 | 111 | alter sequence "public"."bug_id_seq" owned by "public"."bug"."id"; 112 | 113 | alter sequence "public"."products_product_no_seq" owned by "public"."products"."product_no"; 114 | 115 | drop sequence if exists "public"."aunwanted_id_seq"; 116 | 117 | drop sequence if exists "public"."orders_order_id_seq"; 118 | 119 | drop type "public"."unwanted_enum"; 120 | 121 | drop extension if exists "pg_trgm"; 122 | 123 | CREATE UNIQUE INDEX order_items_pkey ON public.order_items USING btree (product_no, order_id); 124 | 125 | CREATE INDEX products_name_idx ON public.products USING btree (name); 126 | 127 | CREATE UNIQUE INDEX products_pkey ON public.products USING btree (product_no); 128 | 129 | alter table "public"."order_items" add constraint "order_items_pkey" PRIMARY KEY using index "order_items_pkey"; 130 | 131 | alter table "public"."products" add constraint "products_pkey" PRIMARY KEY using index "products_pkey"; 132 | 133 | alter table "public"."order_items" add constraint "order_items_order_id_fkey" FOREIGN KEY (order_id) REFERENCES orders(order_id) ON DELETE CASCADE not valid; 134 | 135 | alter table "public"."order_items" validate constraint "order_items_order_id_fkey"; 136 | 137 | alter table "public"."order_items" add constraint "order_items_product_no_fkey" FOREIGN KEY (product_no) REFERENCES products(product_no) ON DELETE RESTRICT not valid; 138 | 139 | alter table "public"."order_items" validate constraint "order_items_product_no_fkey"; 140 | 141 | alter table "public"."products" add constraint "y" CHECK ((price > (0)::numeric)) not valid; 142 | 143 | alter table "public"."products" validate constraint "y"; 144 | 145 | alter table "public"."products" add constraint "x" CHECK ((price > (10)::numeric)) not valid; 146 | 147 | alter table "public"."products" validate constraint "x"; 148 | 149 | set check_function_bodies = off; 150 | 151 | CREATE OR REPLACE FUNCTION public.newfunc(i integer, t text[]) 152 | RETURNS TABLE(a text, c integer) 153 | LANGUAGE plpgsql 154 | STABLE STRICT 155 | AS $function$ 156 | declare 157 | BEGIN 158 | select 'no', 1; 159 | END; 160 | 161 | $function$ 162 | ; 163 | 164 | CREATE OR REPLACE FUNCTION public.changed(i integer, t text[]) 165 | RETURNS TABLE(a text, c integer) 166 | LANGUAGE plpgsql 167 | STRICT SECURITY DEFINER 168 | AS $function$ 169 | declare 170 | BEGIN 171 | select 'no', 1; 172 | END; 173 | 174 | $function$ 175 | ; 176 | 177 | create materialized view "public"."matvvv" as SELECT 2 AS a; 178 | 179 | 180 | create or replace view "public"."vvv" as SELECT 2 AS a; 181 | 182 | 183 | grant update on table "public"."products" to "postgres"; 184 | 185 | drop schema if exists "badschema"; 186 | -------------------------------------------------------------------------------- /tests/FIXTURES/excludeschema/a.sql: -------------------------------------------------------------------------------- 1 | create schema excludedschema; 2 | 3 | create table excludedschema.t(id uuid, value text); 4 | 5 | create schema schema1; 6 | 7 | create table schema1.t(id uuid, value text); 8 | 9 | create schema schema2; 10 | 11 | create table schema2.t(id uuid, value text); 12 | -------------------------------------------------------------------------------- /tests/FIXTURES/excludeschema/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/excludeschema/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/excludeschema/b.sql: -------------------------------------------------------------------------------- 1 | create schema schema1; 2 | 3 | create table schema1.t(id uuid, value text); 4 | 5 | create schema schema2; 6 | 7 | create table schema2.t(id uuid, value text); 8 | 9 | create table schema2.z(id uuid, value text); 10 | 11 | -------------------------------------------------------------------------------- /tests/FIXTURES/excludeschema/expected.sql: -------------------------------------------------------------------------------- 1 | create table "schema2"."z" ( 2 | "id" uuid, 3 | "value" text 4 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/excludeschema/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/excludeschema/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/extversions/a.sql: -------------------------------------------------------------------------------- 1 | create extension pg_trgm version '1.3'; 2 | 3 | create extension hstore; -------------------------------------------------------------------------------- /tests/FIXTURES/extversions/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/extversions/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/extversions/b.sql: -------------------------------------------------------------------------------- 1 | create extension citext version '1.5'; 2 | 3 | create extension pg_trgm version '1.4'; 4 | -------------------------------------------------------------------------------- /tests/FIXTURES/extversions/expected.sql: -------------------------------------------------------------------------------- 1 | create extension if not exists "citext" with schema "public" version '1.5'; 2 | 3 | alter extension "pg_trgm" update to '1.4'; 4 | 5 | drop extension if exists "hstore"; -------------------------------------------------------------------------------- /tests/FIXTURES/extversions/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/extversions/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/generated/a.sql: -------------------------------------------------------------------------------- 1 | -- create table t( 2 | -- a int, 3 | -- adding int, 4 | -- modifying int generated always as identity, 5 | -- removing int generated by default as identity 6 | -- ); 7 | 8 | create table t(); 9 | 10 | CREATE TABLE "demo_gencol" ( 11 | "id" serial PRIMARY KEY, -- PRIMARY KEY 12 | "the_column" TEXT NULL GENERATED ALWAYS AS ('the original generated value') STORED, -- The column that is originally GENERATED, then changed not to be 13 | "the_column2" text 14 | ); 15 | 16 | -------------------------------------------------------------------------------- /tests/FIXTURES/generated/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/generated/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/generated/b.sql: -------------------------------------------------------------------------------- 1 | -- create table t( 2 | -- a int, 3 | -- adding int generated always as (1) stored, 4 | -- modifying int generated always as (1) stored, 5 | -- removing int 6 | -- ); 7 | 8 | 9 | 10 | 11 | 12 | CREATE TABLE "demo_gencol" ( 13 | "id" serial PRIMARY KEY, -- PRIMARY KEY 14 | "the_column" text, 15 | "the_column2" TEXT NULL GENERATED ALWAYS AS ('the original generated value') STORED -- The column that is originally GENERATED, then changed not to be 16 | ); 17 | 18 | -------------------------------------------------------------------------------- /tests/FIXTURES/generated/expected.sql: -------------------------------------------------------------------------------- 1 | drop table "public"."t"; 2 | 3 | alter table "public"."demo_gencol" drop column "the_column2"; 4 | 5 | alter table "public"."demo_gencol" add column "the_column2" text generated always as ('the original generated value'::text) stored; 6 | 7 | alter table "public"."demo_gencol" alter column "the_column" drop expression; -------------------------------------------------------------------------------- /tests/FIXTURES/generated/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/generated/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/generated_added/a.sql: -------------------------------------------------------------------------------- 1 | -- create table t( 2 | -- a int, 3 | -- adding int, 4 | -- modifying int generated always as identity, 5 | -- removing int generated by default as identity 6 | -- ); 7 | 8 | create table t(); 9 | 10 | CREATE TABLE "demo_gencol" ( 11 | "id" serial PRIMARY KEY , -- PRIMARY KEY 12 | "the_column" TEXT NULL -- The column that is originally GENERATED, then changed not to be 13 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/generated_added/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/generated_added/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/generated_added/b.sql: -------------------------------------------------------------------------------- 1 | -- create table t( 2 | -- a int, 3 | -- adding int generated always as (1) stored, 4 | -- modifying int generated always as (1) stored, 5 | -- removing int 6 | -- ); 7 | 8 | 9 | 10 | CREATE TABLE "demo_gencol" ( 11 | "id" serial PRIMARY KEY , -- PRIMARY KEY 12 | "the_column" TEXT NULL -- The column that is originally GENERATED, then changed not to be 13 | ); 14 | -------------------------------------------------------------------------------- /tests/FIXTURES/generated_added/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."demo_gencol" alter column "the_column" drop expression; -------------------------------------------------------------------------------- /tests/FIXTURES/generated_added/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/generated_added/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/identitycols/a.sql: -------------------------------------------------------------------------------- 1 | create table t( 2 | a int, 3 | b int default 1, 4 | c int generated always as (1) stored, 5 | d int generated always as identity, 6 | e int generated by default as identity 7 | ); 8 | 9 | 10 | create table "public"."gen" ( 11 | "adding" integer, 12 | "removing" integer generated always as (1) stored 13 | ); 14 | 15 | 16 | create table identchanges ( 17 | c int default 77, 18 | d int generated always as identity, 19 | e int generated by default as identity, 20 | f int generated always as identity 21 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/identitycols/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/identitycols/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/identitycols/b.sql: -------------------------------------------------------------------------------- 1 | create table t2( 2 | a int, 3 | b int default 1, 4 | c int generated always as (1) stored, 5 | d int generated always as identity, 6 | e int generated by default as identity 7 | ); 8 | 9 | create table "public"."gen" ( 10 | "adding" integer generated always as (1) stored, 11 | "removing" integer 12 | ); 13 | 14 | 15 | create table identchanges ( 16 | c int generated by default as identity, 17 | e int generated always as identity, 18 | d int generated by default as identity, 19 | f int 20 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/identitycols/expected.sql: -------------------------------------------------------------------------------- 1 | drop table "public"."t"; 2 | 3 | create table "public"."t2" ( 4 | "a" integer, 5 | "b" integer default 1, 6 | "c" integer generated always as (1) stored, 7 | "d" integer generated always as identity not null, 8 | "e" integer generated by default as identity not null 9 | ); 10 | 11 | 12 | alter table "public"."gen" drop column "adding"; 13 | 14 | alter table "public"."gen" add column "adding" integer generated always as (1) stored; 15 | 16 | alter table "public"."gen" alter column "removing" drop expression; 17 | 18 | alter table "public"."identchanges" alter column "c" drop default; 19 | 20 | alter table "public"."identchanges" alter column "c" set not null; 21 | 22 | alter table "public"."identchanges" alter column "c" add generated by default as identity; 23 | 24 | alter table "public"."identchanges" alter column "d" set generated by default; 25 | 26 | alter table "public"."identchanges" alter column "e" set generated always; 27 | 28 | alter table "public"."identchanges" alter column "f" drop identity; 29 | 30 | alter table "public"."identchanges" alter column "f" drop not null; 31 | -------------------------------------------------------------------------------- /tests/FIXTURES/identitycols/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/identitycols/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/inherit/a.sql: -------------------------------------------------------------------------------- 1 | create table t(id int); -------------------------------------------------------------------------------- /tests/FIXTURES/inherit/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/inherit/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/inherit/b.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE entity_bindings ( 2 | id BIGSERIAL, 3 | entity_type TEXT NOT NULL, 4 | entity_id BIGINT NOT NULL 5 | ); 6 | CREATE TABLE entity_bindings_A ( 7 | CONSTRAINT "entity_type must be A" CHECK("entity_type" = 'A'), 8 | UNIQUE("entity_id", "entity_type") 9 | ) INHERITS (entity_bindings) 10 | ; 11 | CREATE TABLE entity_bindings_B ( 12 | CONSTRAINT "entity_type must be B" CHECK("entity_type" = 'B'), 13 | UNIQUE("entity_id", "entity_type") 14 | ) INHERITS (entity_bindings) 15 | ; 16 | CREATE TABLE entity_bindings_C ( 17 | CONSTRAINT "entity_type must be C" CHECK("entity_type" = 'C'), 18 | UNIQUE("entity_id", "entity_type") 19 | ) INHERITS (entity_bindings) 20 | ; -------------------------------------------------------------------------------- /tests/FIXTURES/inherit/expected.sql: -------------------------------------------------------------------------------- 1 | create sequence "public"."entity_bindings_id_seq"; 2 | 3 | drop table "public"."t"; 4 | 5 | create table "public"."entity_bindings" ( 6 | "id" bigint not null default nextval('entity_bindings_id_seq'::regclass), 7 | "entity_type" text not null, 8 | "entity_id" bigint not null 9 | ); 10 | 11 | 12 | create table "public"."entity_bindings_a" ( 13 | "id" bigint not null default nextval('entity_bindings_id_seq'::regclass), 14 | "entity_type" text not null, 15 | "entity_id" bigint not null 16 | ) inherits ("public"."entity_bindings"); 17 | 18 | 19 | create table "public"."entity_bindings_b" ( 20 | "id" bigint not null default nextval('entity_bindings_id_seq'::regclass), 21 | "entity_type" text not null, 22 | "entity_id" bigint not null 23 | ) inherits ("public"."entity_bindings"); 24 | 25 | 26 | create table "public"."entity_bindings_c" ( 27 | "id" bigint not null default nextval('entity_bindings_id_seq'::regclass), 28 | "entity_type" text not null, 29 | "entity_id" bigint not null 30 | ) inherits ("public"."entity_bindings"); 31 | 32 | 33 | alter sequence "public"."entity_bindings_id_seq" owned by "public"."entity_bindings"."id"; 34 | 35 | CREATE UNIQUE INDEX entity_bindings_a_entity_id_entity_type_key ON public.entity_bindings_a USING btree (entity_id, entity_type); 36 | 37 | CREATE UNIQUE INDEX entity_bindings_b_entity_id_entity_type_key ON public.entity_bindings_b USING btree (entity_id, entity_type); 38 | 39 | CREATE UNIQUE INDEX entity_bindings_c_entity_id_entity_type_key ON public.entity_bindings_c USING btree (entity_id, entity_type); 40 | 41 | alter table "public"."entity_bindings_a" add constraint "entity_bindings_a_entity_id_entity_type_key" UNIQUE using index "entity_bindings_a_entity_id_entity_type_key"; 42 | 43 | alter table "public"."entity_bindings_a" add constraint "entity_type must be A" CHECK ((entity_type = 'A'::text)) not valid; 44 | 45 | alter table "public"."entity_bindings_a" validate constraint "entity_type must be A"; 46 | 47 | alter table "public"."entity_bindings_b" add constraint "entity_bindings_b_entity_id_entity_type_key" UNIQUE using index "entity_bindings_b_entity_id_entity_type_key"; 48 | 49 | alter table "public"."entity_bindings_b" add constraint "entity_type must be B" CHECK ((entity_type = 'B'::text)) not valid; 50 | 51 | alter table "public"."entity_bindings_b" validate constraint "entity_type must be B"; 52 | 53 | alter table "public"."entity_bindings_c" add constraint "entity_bindings_c_entity_id_entity_type_key" UNIQUE using index "entity_bindings_c_entity_id_entity_type_key"; 54 | 55 | alter table "public"."entity_bindings_c" add constraint "entity_type must be C" CHECK ((entity_type = 'C'::text)) not valid; 56 | 57 | alter table "public"."entity_bindings_c" validate constraint "entity_type must be C"; -------------------------------------------------------------------------------- /tests/FIXTURES/inherit/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/inherit/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/inherit2/a.sql: -------------------------------------------------------------------------------- 1 | 2 | create table timestamp_base (created_at timestamp default now(), e integer); 3 | 4 | create table a (a1 integer, a2 integer) inherits (timestamp_base); -------------------------------------------------------------------------------- /tests/FIXTURES/inherit2/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/inherit2/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/inherit2/b.sql: -------------------------------------------------------------------------------- 1 | 2 | create table timestamp_base (created_at timestamp default now()); 3 | 4 | create table a (a1 integer, a2 integer) inherits (timestamp_base); 5 | 6 | alter table a drop column a2; 7 | 8 | alter table a add column e integer; -------------------------------------------------------------------------------- /tests/FIXTURES/inherit2/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."timestamp_base" drop column "e"; 2 | 3 | alter table "public"."a" drop column "a2"; 4 | 5 | alter table "public"."a" add column "e" integer; 6 | 7 | 8 | -------------------------------------------------------------------------------- /tests/FIXTURES/inherit2/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/inherit2/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/partitioning/a.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE measurement ( 2 | city_id int not null, 3 | logdate date not null, 4 | peaktemp int, 5 | unitsales int 6 | ) PARTITION BY RANGE (logdate); 7 | 8 | CREATE TABLE measurement_y2006m02 PARTITION OF measurement 9 | FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); 10 | 11 | CREATE TABLE measurement_y2006m03 PARTITION OF measurement 12 | FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); 13 | 14 | CREATE INDEX ON measurement_y2006m02 (logdate); 15 | 16 | CREATE TABLE reg2partitioned( city_id int not null, logdate date not null, peaktemp int, unitsales int); 17 | 18 | CREATE TABLE partitioned2reg( city_id int not null, logdate date not null, peaktemp int, unitsales int ) PARTITION BY RANGE (logdate); 19 | -------------------------------------------------------------------------------- /tests/FIXTURES/partitioning/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/partitioning/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/partitioning/b.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE measurement ( 2 | city_id int not null, 3 | logdate date not null, 4 | peaktemp int, 5 | unitsales int, 6 | extra text 7 | ) PARTITION BY RANGE (logdate); 8 | 9 | CREATE TABLE measurement_y2005m02 PARTITION OF measurement 10 | FOR VALUES FROM ('2005-02-01') TO ('2005-03-01'); 11 | 12 | CREATE TABLE measurement_y2006m02 PARTITION OF measurement 13 | FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); 14 | 15 | CREATE TABLE measurement_y2006m03 ( 16 | city_id int not null, 17 | logdate date not null, 18 | peaktemp int, 19 | unitsales int 20 | ); 21 | 22 | CREATE TABLE reg2partitioned( city_id int not null, logdate date not null, peaktemp int, unitsales int) PARTITION BY RANGE (logdate); 23 | 24 | CREATE TABLE partitioned2reg( city_id int not null, logdate date not null, peaktemp int, unitsales int); 25 | -------------------------------------------------------------------------------- /tests/FIXTURES/partitioning/expected.sql: -------------------------------------------------------------------------------- 1 | drop index if exists "public"."measurement_y2006m02_logdate_idx"; 2 | 3 | create table "public"."measurement_y2005m02" partition of "public"."measurement" FOR VALUES FROM ('2005-02-01') TO ('2005-03-01'); 4 | 5 | 6 | alter table "public"."measurement" detach partition "public"."measurement_y2006m03"; 7 | 8 | drop table "public"."partitioned2reg"; 9 | 10 | create table "public"."partitioned2reg" ( 11 | "city_id" integer not null, 12 | "logdate" date not null, 13 | "peaktemp" integer, 14 | "unitsales" integer 15 | ); 16 | 17 | 18 | drop table "public"."reg2partitioned"; 19 | 20 | create table "public"."reg2partitioned" ( 21 | "city_id" integer not null, 22 | "logdate" date not null, 23 | "peaktemp" integer, 24 | "unitsales" integer 25 | ) partition by RANGE (logdate); 26 | 27 | 28 | alter table "public"."measurement" add column "extra" text; 29 | -------------------------------------------------------------------------------- /tests/FIXTURES/partitioning/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/partitioning/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/privileges/a.sql: -------------------------------------------------------------------------------- 1 | create extension pg_trgm; 2 | 3 | create schema any_schema; 4 | 5 | CREATE TYPE any_enum AS ENUM ('value1', 'value2'); 6 | 7 | CREATE TABLE any_table ( 8 | id serial primary key, 9 | name text not null 10 | ); 11 | 12 | create unique index on any_table(name); 13 | 14 | create view any_view as select * from any_table; 15 | 16 | create view any_other_view as select * from any_table; 17 | 18 | create or replace function any_function(i integer, t text[]) 19 | returns TABLE(a text, c integer) as 20 | $$ 21 | declare 22 | BEGIN 23 | select 'no', 1; 24 | END; 25 | 26 | $$ 27 | LANGUAGE PLPGSQL STABLE returns null on null input security definer; 28 | 29 | 30 | grant select, insert on table any_table to postgres; 31 | -------------------------------------------------------------------------------- /tests/FIXTURES/privileges/additions.sql: -------------------------------------------------------------------------------- 1 | grant delete on table any_table to postgres; 2 | 3 | revoke select on table any_table from postgres; 4 | -------------------------------------------------------------------------------- /tests/FIXTURES/privileges/b.sql: -------------------------------------------------------------------------------- 1 | create extension pg_trgm; 2 | 3 | create schema any_schema; 4 | 5 | CREATE TYPE any_enum AS ENUM ('value1', 'value2'); 6 | 7 | CREATE TABLE any_table ( 8 | id serial primary key, 9 | name text not null 10 | ); 11 | 12 | create unique index on any_table(name); 13 | 14 | create view any_view as select * from any_table; 15 | 16 | create or replace function any_function(i integer, t text[]) 17 | returns TABLE(a text, c integer) as 18 | $$ 19 | declare 20 | BEGIN 21 | select 'no', 1; 22 | END; 23 | 24 | $$ 25 | LANGUAGE PLPGSQL STABLE returns null on null input security definer; 26 | 27 | 28 | grant update, insert on table any_table to postgres; 29 | -------------------------------------------------------------------------------- /tests/FIXTURES/privileges/expected.sql: -------------------------------------------------------------------------------- 1 | revoke select on table "public"."any_table" from "postgres"; 2 | 3 | drop view if exists "public"."any_other_view"; 4 | 5 | grant update on table "public"."any_table" to "postgres"; 6 | -------------------------------------------------------------------------------- /tests/FIXTURES/privileges/expected2.sql: -------------------------------------------------------------------------------- 1 | revoke delete on table "public"."any_table" from "postgres"; 2 | 3 | drop view if exists "public"."any_other_view"; 4 | 5 | grant update on table "public"."any_table" to "postgres"; 6 | -------------------------------------------------------------------------------- /tests/FIXTURES/rls/a.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE accounts (manager text, company text, contact_email text); 2 | 3 | ALTER TABLE accounts ENABLE ROW LEVEL SECURITY; 4 | 5 | CREATE POLICY account_managers ON accounts TO schemainspect_test_role 6 | USING (manager = current_user); 7 | 8 | CREATE TABLE accounts2 (manager text, company text, contact_email text); 9 | -------------------------------------------------------------------------------- /tests/FIXTURES/rls/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/rls/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/rls/b.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE accounts (manager text, company text, contact_email text); 2 | 3 | ALTER TABLE accounts ENABLE ROW LEVEL SECURITY; 4 | 5 | CREATE POLICY account_managers ON accounts as restrictive TO schemainspect_test_role 6 | USING (manager = current_user); 7 | 8 | CREATE TABLE accounts2 (manager text, company text, contact_email text); 9 | 10 | ALTER TABLE accounts2 ENABLE ROW LEVEL SECURITY; 11 | -------------------------------------------------------------------------------- /tests/FIXTURES/rls/expected.sql: -------------------------------------------------------------------------------- 1 | drop policy "account_managers" on "public"."accounts"; 2 | 3 | alter table "public"."accounts2" enable row level security; 4 | 5 | create policy "account_managers" 6 | on "public"."accounts" 7 | as restrictive 8 | for all 9 | to schemainspect_test_role 10 | using ((manager = CURRENT_USER)); 11 | -------------------------------------------------------------------------------- /tests/FIXTURES/rls/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/rls/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/rls2/a.sql: -------------------------------------------------------------------------------- 1 | create table t(); -------------------------------------------------------------------------------- /tests/FIXTURES/rls2/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/rls2/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/rls2/b.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE example ( 2 | id SERIAL PRIMARY KEY, 3 | name text NOT NULL 4 | ); 5 | ALTER TABLE example ENABLE ROW LEVEL SECURITY; 6 | CREATE POLICY example_all ON example FOR ALL 7 | USING (true); -------------------------------------------------------------------------------- /tests/FIXTURES/rls2/expected.sql: -------------------------------------------------------------------------------- 1 | create sequence "public"."example_id_seq"; 2 | 3 | drop table "public"."t"; 4 | 5 | create table "public"."example" ( 6 | "id" integer not null default nextval('example_id_seq'::regclass), 7 | "name" text not null 8 | ); 9 | 10 | 11 | alter table "public"."example" enable row level security; 12 | 13 | alter sequence "public"."example_id_seq" owned by "public"."example"."id"; 14 | 15 | CREATE UNIQUE INDEX example_pkey ON public.example USING btree (id); 16 | 17 | alter table "public"."example" add constraint "example_pkey" PRIMARY KEY using index "example_pkey"; 18 | 19 | create policy "example_all" 20 | on "public"."example" 21 | as permissive 22 | for all 23 | to public 24 | using (true); -------------------------------------------------------------------------------- /tests/FIXTURES/rls2/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/rls2/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/seq/a.sql: -------------------------------------------------------------------------------- 1 | create table test ( 2 | id serial primary key 3 | ); 4 | 5 | create table unwanted(); 6 | 7 | create schema other; 8 | 9 | create sequence "public"."test2_id_seq"; 10 | 11 | create table "public"."test2" ( 12 | "id" integer not null default nextval('test2_id_seq'::regclass) 13 | ); 14 | 15 | 16 | CREATE UNIQUE INDEX test2_pkey ON public.test2 USING btree (id); 17 | 18 | 19 | alter table "public"."test2" add constraint "test2_pkey" PRIMARY KEY using index "test2_pkey"; -------------------------------------------------------------------------------- /tests/FIXTURES/seq/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/seq/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/seq/b.sql: -------------------------------------------------------------------------------- 1 | create schema other; 2 | 3 | create sequence "public"."test_id_seq"; 4 | 5 | create table "public"."test" ( 6 | "id" integer not null default nextval('test_id_seq'::regclass) 7 | ); 8 | 9 | 10 | CREATE UNIQUE INDEX test_pkey ON public.test USING btree (id); 11 | 12 | 13 | alter table "public"."test" add constraint "test_pkey" PRIMARY KEY using index "test_pkey"; 14 | 15 | 16 | create table test2 ( 17 | id serial primary key 18 | ); -------------------------------------------------------------------------------- /tests/FIXTURES/seq/expected.sql: -------------------------------------------------------------------------------- 1 | drop table "public"."unwanted"; 2 | 3 | alter sequence "public"."test2_id_seq" owned by "public"."test2"."id"; 4 | 5 | alter sequence "public"."test_id_seq" owned by none; 6 | 7 | -------------------------------------------------------------------------------- /tests/FIXTURES/seq/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/seq/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema/a.sql: -------------------------------------------------------------------------------- 1 | create extension hstore; 2 | 3 | create schema goodschema; 4 | 5 | create table goodschema.t(id uuid, value text); 6 | 7 | create table t(id uuid, value text); 8 | 9 | CREATE TYPE goodschema.sdfasdfasdf AS ENUM ('not shipped', 'shipped', 'delivered'); 10 | 11 | create index on goodschema.t(id); 12 | 13 | create view goodschema.v as select 1 AS a; 14 | 15 | grant select on table t to postgres; 16 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/singleschema/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema/b.sql: -------------------------------------------------------------------------------- 1 | create extension citext; 2 | 3 | create schema goodschema; 4 | 5 | CREATE TYPE goodschema.sdfasdfasdf AS ENUM ('not shipped', 'shipped', 'delivered', 'not delivered'); 6 | 7 | create table goodschema.t(id uuid, name text, value text); 8 | 9 | create view goodschema.v as select 2 as a; 10 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema/expected.sql: -------------------------------------------------------------------------------- 1 | drop index if exists "goodschema"."t_id_idx"; 2 | 3 | alter type "goodschema"."sdfasdfasdf" rename to "sdfasdfasdf__old_version_to_be_dropped"; 4 | 5 | create type "goodschema"."sdfasdfasdf" as enum ('not shipped', 'shipped', 'delivered', 'not delivered'); 6 | 7 | drop type "goodschema"."sdfasdfasdf__old_version_to_be_dropped"; 8 | 9 | alter table "goodschema"."t" add column "name" text; 10 | 11 | create or replace view "goodschema"."v" as SELECT 2 AS a; 12 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/singleschema/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema_ext/a.sql: -------------------------------------------------------------------------------- 1 | create extension hstore; 2 | 3 | create schema goodschema; 4 | 5 | create table goodschema.t(id uuid, value text); 6 | 7 | create table t(id uuid, value text); 8 | 9 | CREATE TYPE goodschema.sdfasdfasdf AS ENUM ('not shipped', 'shipped', 'delivered'); 10 | 11 | create index on goodschema.t(id); 12 | 13 | create view goodschema.v as select 1; 14 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema_ext/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/singleschema_ext/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema_ext/b.sql: -------------------------------------------------------------------------------- 1 | create extension citext; 2 | 3 | create schema goodschema; 4 | 5 | CREATE TYPE goodschema.sdfasdfasdf AS ENUM ('not shipped', 'shipped', 'delivered', 'not delivered'); 6 | 7 | create table goodschema.t(id uuid, name text, value text); 8 | 9 | create view goodschema.v as select 2; 10 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema_ext/expected.sql: -------------------------------------------------------------------------------- 1 | create extension if not exists "citext" with schema "public"; 2 | -------------------------------------------------------------------------------- /tests/FIXTURES/singleschema_ext/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/singleschema_ext/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers/a.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE emp ( 2 | empname text, 3 | salary integer, 4 | last_date timestamp, 5 | last_user text 6 | ); 7 | 8 | CREATE FUNCTION emp_stamp() RETURNS trigger AS $emp_stamp$ 9 | BEGIN 10 | -- Check that empname and salary are given 11 | IF NEW.empname IS NULL THEN 12 | RAISE EXCEPTION 'empname cannot be null'; 13 | END IF; 14 | IF NEW.salary IS NULL THEN 15 | RAISE EXCEPTION '% cannot have null salary', NEW.empname; 16 | END IF; 17 | 18 | -- Who works for us when they must pay for it? 19 | IF NEW.salary < 0 THEN 20 | RAISE EXCEPTION '% cannot have a negative salary', NEW.empname; 21 | END IF; 22 | 23 | -- Remember who changed the payroll when 24 | NEW.last_date := current_timestamp; 25 | NEW.last_user := current_user; 26 | RETURN NEW; 27 | END; 28 | $emp_stamp$ LANGUAGE plpgsql; 29 | 30 | CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp 31 | FOR EACH ROW EXECUTE PROCEDURE emp_stamp(); 32 | 33 | CREATE TRIGGER emp_stamp_drop BEFORE INSERT OR UPDATE ON emp 34 | FOR EACH ROW EXECUTE PROCEDURE emp_stamp(); 35 | -------------------------------------------------------------------------------- /tests/FIXTURES/triggers/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers/b.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE emp ( 2 | empname text, 3 | salary integer, 4 | last_date timestamp, 5 | last_user text 6 | ); 7 | 8 | CREATE FUNCTION emp_stamp() RETURNS trigger AS $emp_stamp$ 9 | BEGIN 10 | -- Check that empname and salary are given 11 | IF NEW.empname IS NULL THEN 12 | RAISE EXCEPTION 'empname cannot be null'; 13 | END IF; 14 | IF NEW.salary IS NULL THEN 15 | RAISE EXCEPTION '% cannot have null salary', NEW.empname; 16 | END IF; 17 | 18 | -- Who works for us when they must pay for it? 19 | IF NEW.salary < 0 THEN 20 | RAISE EXCEPTION '% cannot have a negative salary', NEW.empname; 21 | END IF; 22 | 23 | -- Remember who changed the payroll when 24 | NEW.last_date := current_timestamp; 25 | NEW.last_user := current_user; 26 | RETURN NEW; 27 | END; 28 | $emp_stamp$ LANGUAGE plpgsql; 29 | 30 | CREATE TRIGGER emp_stamp BEFORE UPDATE ON emp 31 | FOR EACH ROW EXECUTE PROCEDURE emp_stamp(); 32 | 33 | CREATE TRIGGER emp_stamp_create BEFORE INSERT OR UPDATE ON emp 34 | FOR EACH ROW EXECUTE PROCEDURE emp_stamp(); 35 | -------------------------------------------------------------------------------- /tests/FIXTURES/triggers/expected.sql: -------------------------------------------------------------------------------- 1 | drop trigger if exists "emp_stamp_drop" on "public"."emp"; 2 | 3 | drop trigger if exists "emp_stamp" on "public"."emp"; 4 | 5 | CREATE TRIGGER emp_stamp_create BEFORE INSERT OR UPDATE ON public.emp FOR EACH ROW EXECUTE FUNCTION emp_stamp(); 6 | 7 | CREATE TRIGGER emp_stamp BEFORE UPDATE ON public.emp FOR EACH ROW EXECUTE FUNCTION emp_stamp(); 8 | -------------------------------------------------------------------------------- /tests/FIXTURES/triggers/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers2/a.sql: -------------------------------------------------------------------------------- 1 | create table table1 ( 2 | id serial primary key 3 | ); 4 | create table table2 ( 5 | id serial primary key, 6 | t text 7 | ); 8 | 9 | create function trigger_func() returns trigger language plpgsql volatile as $$ 10 | begin 11 | RAISE NOTICE 'Hello'; 12 | end; 13 | $$; 14 | 15 | create trigger trigger_name after insert on table1 for each row 16 | execute procedure trigger_func(); 17 | 18 | create trigger trigger_name after insert on table2 for each row 19 | execute procedure trigger_func(); -------------------------------------------------------------------------------- /tests/FIXTURES/triggers2/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers2/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers2/b.sql: -------------------------------------------------------------------------------- 1 | create table table1 ( 2 | id serial primary key 3 | ); 4 | create table table2 ( 5 | id serial primary key 6 | ); 7 | 8 | create function trigger_func() returns trigger language plpgsql volatile as $$ 9 | begin 10 | RAISE NOTICE 'Hello'; 11 | end; 12 | $$; 13 | 14 | -- note switched trigger order 15 | create trigger trigger_name after insert on table2 for each row 16 | execute procedure trigger_func(); 17 | 18 | create trigger trigger_name after insert on table1 for each row 19 | execute procedure trigger_func(); -------------------------------------------------------------------------------- /tests/FIXTURES/triggers2/expected.sql: -------------------------------------------------------------------------------- 1 | alter table "public"."table2" drop column "t"; -------------------------------------------------------------------------------- /tests/FIXTURES/triggers2/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers2/expected2.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers3/a.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE "my_table" ( 2 | "some_text" text, 3 | "some_count" int 4 | ); 5 | 6 | CREATE VIEW "view_on_table" AS 7 | SELECT some_text, some_count FROM my_table; 8 | 9 | CREATE OR REPLACE FUNCTION my_function() 10 | RETURNS trigger 11 | LANGUAGE plpgsql 12 | AS $function$ 13 | BEGIN 14 | INSERT INTO my_table (some_text) 15 | VALUES (NEW.some_text); 16 | RETURN NEW; 17 | END; 18 | $function$ 19 | ; 20 | 21 | CREATE TRIGGER trigger_on_view INSTEAD OF 22 | INSERT ON view_on_table 23 | FOR EACH ROW EXECUTE PROCEDURE my_function(); 24 | ; 25 | 26 | INSERT INTO view_on_table VALUES ('this is a test'); -------------------------------------------------------------------------------- /tests/FIXTURES/triggers3/additions.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers3/additions.sql -------------------------------------------------------------------------------- /tests/FIXTURES/triggers3/b.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | CREATE TABLE "my_table" ( 4 | "some_text" text, 5 | "some_date" timestamp, 6 | "some_count" int 7 | ); 8 | 9 | CREATE VIEW "view_on_table" AS 10 | SELECT some_text, some_date, some_count FROM my_table; 11 | 12 | CREATE OR REPLACE FUNCTION my_function() 13 | RETURNS trigger 14 | LANGUAGE plpgsql 15 | AS $function$ 16 | BEGIN 17 | INSERT INTO my_table (some_text) 18 | VALUES (NEW.some_text); 19 | RETURN NEW; 20 | END; 21 | $function$ 22 | ; 23 | 24 | CREATE TRIGGER trigger_on_view INSTEAD OF 25 | INSERT ON view_on_table 26 | FOR EACH ROW EXECUTE PROCEDURE my_function(); 27 | ; -------------------------------------------------------------------------------- /tests/FIXTURES/triggers3/expected.sql: -------------------------------------------------------------------------------- 1 | drop trigger if exists "trigger_on_view" on "public"."view_on_table"; 2 | 3 | drop view if exists "public"."view_on_table"; 4 | 5 | alter table "public"."my_table" add column "some_date" timestamp without time zone; 6 | 7 | create or replace view "public"."view_on_table" as SELECT my_table.some_text, 8 | my_table.some_date, 9 | my_table.some_count 10 | FROM my_table; 11 | 12 | 13 | CREATE TRIGGER trigger_on_view INSTEAD OF INSERT ON public.view_on_table FOR EACH ROW EXECUTE FUNCTION my_function(); 14 | -------------------------------------------------------------------------------- /tests/FIXTURES/triggers3/expected2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/FIXTURES/triggers3/expected2.sql -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djrobstep/migra/da6671acae0b4acebdec804ae25a0c65cf8561c2/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_migra.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import io 4 | from difflib import ndiff as difflib_diff 5 | 6 | import pytest 7 | 8 | # import yaml 9 | from pytest import raises 10 | from schemainspect import get_inspector 11 | from sqlbag import S, load_sql_from_file, temporary_database 12 | 13 | from migra import Migration, Statements, UnsafeMigrationException 14 | from migra.command import parse_args, run 15 | 16 | 17 | def textdiff(a, b): 18 | cd = difflib_diff(a.splitlines(), b.splitlines()) 19 | return "\n" + "\n".join(cd) + "\n" 20 | 21 | 22 | SQL = """select 1; 23 | 24 | select 2; 25 | 26 | """ 27 | DROP = "drop table x;" 28 | 29 | 30 | def test_statements(): 31 | s1 = Statements(["select 1;"]) 32 | s2 = Statements(["select 2;"]) 33 | s3 = s1 + s2 34 | assert type(s1) == type(s2) == type(s3) 35 | s3 = s3 + Statements([DROP]) 36 | with raises(UnsafeMigrationException): 37 | assert s3.sql == SQL 38 | s3.safe = False 39 | SQL_WITH_DROP = SQL + DROP + "\n\n" 40 | assert s3.sql == SQL_WITH_DROP 41 | 42 | 43 | def outs(): 44 | return io.StringIO(), io.StringIO() 45 | 46 | 47 | def test_singleschema(): 48 | for FIXTURE_NAME in ["singleschema"]: 49 | do_fixture_test(FIXTURE_NAME, schema="goodschema") 50 | 51 | 52 | def test_excludeschema(): 53 | for FIXTURE_NAME in ["excludeschema"]: 54 | do_fixture_test(FIXTURE_NAME, exclude_schema="excludedschema") 55 | 56 | 57 | def test_singleschema_ext(): 58 | for FIXTURE_NAME in ["singleschema_ext"]: 59 | do_fixture_test(FIXTURE_NAME, create_extensions_only=True) 60 | 61 | 62 | def test_extversions(): 63 | for FIXTURE_NAME in ["extversions"]: 64 | do_fixture_test(FIXTURE_NAME, ignore_extension_versions=False) 65 | 66 | 67 | fixtures = """\ 68 | everything 69 | collations 70 | identitycols 71 | partitioning 72 | privileges 73 | enumdefaults 74 | enumdeps 75 | seq 76 | inherit 77 | inherit2 78 | triggers 79 | triggers2 80 | triggers3 81 | dependencies 82 | dependencies2 83 | dependencies3 84 | dependencies4 85 | constraints 86 | generated 87 | """.split() 88 | 89 | 90 | @pytest.mark.parametrize("fixture_name", fixtures) 91 | def test_fixtures(fixture_name): 92 | do_fixture_test(fixture_name, with_privileges=True) 93 | 94 | 95 | schemainspect_test_role = "schemainspect_test_role" 96 | 97 | 98 | def create_role(s, rolename): 99 | role = s.execute( 100 | """ 101 | SELECT 1 FROM pg_roles WHERE rolname=:rolename 102 | """, 103 | dict(rolename=rolename), 104 | ) 105 | 106 | role_exists = bool(list(role)) 107 | 108 | if not role_exists: 109 | s.execute( 110 | f""" 111 | create role {rolename}; 112 | """ 113 | ) 114 | 115 | 116 | def test_rls(): 117 | for FIXTURE_NAME in ["rls", "rls2"]: 118 | do_fixture_test(FIXTURE_NAME, with_privileges=True) 119 | 120 | 121 | check_expected = True 122 | 123 | 124 | def do_fixture_test( 125 | fixture_name, 126 | schema=None, 127 | create_extensions_only=False, 128 | ignore_extension_versions=True, 129 | with_privileges=False, 130 | exclude_schema=None, 131 | ): 132 | flags = ["--unsafe"] 133 | if schema: 134 | flags += ["--schema", schema] 135 | if exclude_schema: 136 | flags += ["--exclude_schema", exclude_schema] 137 | if create_extensions_only: 138 | flags += ["--create-extensions-only"] 139 | if ignore_extension_versions: 140 | flags += ["--ignore-extension-versions"] 141 | if with_privileges: 142 | flags += ["--with-privileges"] 143 | fixture_path = "tests/FIXTURES/{}/".format(fixture_name) 144 | EXPECTED = io.open(fixture_path + "expected.sql").read().strip() 145 | with temporary_database(host="localhost") as d0, temporary_database( 146 | host="localhost" 147 | ) as d1: 148 | with S(d0) as s0: 149 | create_role(s0, schemainspect_test_role) 150 | with S(d0) as s0, S(d1) as s1: 151 | load_sql_from_file(s0, fixture_path + "a.sql") 152 | load_sql_from_file(s1, fixture_path + "b.sql") 153 | 154 | args = parse_args([d0, d1]) 155 | assert not args.unsafe 156 | assert args.schema is None 157 | 158 | out, err = outs() 159 | assert run(args, out=out, err=err) == 3 160 | assert out.getvalue() == "" 161 | 162 | DESTRUCTIVE = "-- ERROR: destructive statements generated. Use the --unsafe flag to suppress this error.\n" 163 | 164 | assert err.getvalue() == DESTRUCTIVE 165 | 166 | args = parse_args(flags + [d0, d1]) 167 | assert args.unsafe 168 | assert args.schema == schema 169 | out, err = outs() 170 | assert run(args, out=out, err=err) == 2 171 | assert err.getvalue() == "" 172 | 173 | output = out.getvalue().strip() 174 | if check_expected: 175 | assert output == EXPECTED 176 | 177 | ADDITIONS = io.open(fixture_path + "additions.sql").read().strip() 178 | EXPECTED2 = io.open(fixture_path + "expected2.sql").read().strip() 179 | 180 | with S(d0) as s0, S(d1) as s1: 181 | m = Migration( 182 | s0, 183 | s1, 184 | schema=schema, 185 | exclude_schema=exclude_schema, 186 | ignore_extension_versions=ignore_extension_versions, 187 | ) 188 | m.inspect_from() 189 | m.inspect_target() 190 | with raises(AttributeError): 191 | m.changes.nonexist 192 | m.set_safety(False) 193 | if ADDITIONS: 194 | m.add_sql(ADDITIONS) 195 | m.apply() 196 | 197 | if create_extensions_only: 198 | m.add_extension_changes(drops=False) 199 | else: 200 | m.add_all_changes(privileges=with_privileges) 201 | 202 | expected = EXPECTED2 if ADDITIONS else EXPECTED 203 | 204 | if check_expected: 205 | assert m.sql.strip() == expected # sql generated OK 206 | 207 | m.apply() 208 | # check for changes again and make sure none are pending 209 | if create_extensions_only: 210 | m.add_extension_changes(drops=False) 211 | assert ( 212 | m.changes.i_from.extensions.items() 213 | >= m.changes.i_target.extensions.items() 214 | ) 215 | else: 216 | m.add_all_changes(privileges=with_privileges) 217 | 218 | # y0 = yaml.safe_dump(m.changes.i_from._as_dicts()) 219 | # y1 = yaml.safe_dump(m.changes.i_target._as_dicts()) 220 | 221 | # print(textdiff(y0, y1)) 222 | # print(m.statements) 223 | 224 | assert m.changes.i_from == m.changes.i_target 225 | assert not m.statements # no further statements to apply 226 | assert m.sql == "" 227 | out, err = outs() 228 | 229 | assert run(args, out=out, err=err) == 0 230 | # test alternative parameters 231 | with S(d0) as s0, S(d1) as s1: 232 | m = Migration( 233 | get_inspector(s0), get_inspector(s1), ignore_extension_versions=True 234 | ) 235 | # test empty 236 | m = Migration(None, None) 237 | m.add_all_changes(privileges=with_privileges) 238 | with raises(AttributeError): 239 | m.s_from 240 | with raises(AttributeError): 241 | m.s_target 242 | args = parse_args(flags + ["EMPTY", "EMPTY"]) 243 | out, err = outs() 244 | assert run(args, out=out, err=err) == 0 245 | --------------------------------------------------------------------------------