├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .ruby-version ├── CHANGELOG.md ├── Gemfile ├── Gemfile.lock ├── LICENSE ├── README.md ├── Rakefile ├── activerecord-clean-db-structure.gemspec ├── lib ├── activerecord-clean-db-structure.rb └── activerecord-clean-db-structure │ ├── clean_dump.rb │ ├── railtie.rb │ ├── tasks │ └── clean_db_structure.rake │ └── version.rb └── test ├── clean_dump_test.rb ├── data ├── ignored_schemas.sql ├── input.sql └── partitions.sql ├── expectations ├── default_props.sql ├── ignore_ids.sql ├── ignored_schemas_myschema.sql ├── ignored_schemas_pganalyze.sql ├── indexes_after_tables.sql ├── keep_extensions_all.sql ├── order_column_definitions.sql ├── order_schema_migrations_values.sql └── partitions.sql └── test_helper.rb /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | jobs: 5 | test: 6 | runs-on: ubuntu-latest 7 | strategy: 8 | matrix: 9 | ruby: ["3.1", "3.2"] 10 | steps: 11 | - uses: actions/checkout@v3 12 | - uses: ruby/setup-ruby@v1 13 | with: 14 | ruby-version: ${{ matrix.ruby }} 15 | bundler-cache: true 16 | - run: bundle exec rake test 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | pkg 2 | -------------------------------------------------------------------------------- /.ruby-version: -------------------------------------------------------------------------------- 1 | 3.2.3 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Unreleased 4 | 5 | * ... 6 | 7 | ## 0.4.3 2024-09-22 8 | 9 | * Fix additional regexp warnings from "rake test". [#50](https://github.com/lfittl/activerecord-clean-db-structure/pull/50) 10 | 11 | ## 0.4.2 2024-09-22 12 | 13 | * Add an option to not remove extensions [#43](https://github.com/lfittl/activerecord-clean-db-structure/pull/43) 14 | * Fix incorrect primary structure.sql filename in the case of multi-database setup [#44](https://github.com/lfittl/activerecord-clean-db-structure/pull/44) 15 | * Fix regexp warnings [#49](https://github.com/lfittl/activerecord-clean-db-structure/pull/49) 16 | 17 | ## 0.4.1 2024-08-28 18 | 19 | * Fix Rake task name for 6.1+ [#32](https://github.com/lfittl/activerecord-clean-db-structure/pull/32) 20 | * Bump activesupport [#36](https://github.com/lfittl/activerecord-clean-db-structure/pull/36), [#35](https://github.com/lfittl/activerecord-clean-db-structure/pull/35) 21 | * Bump tzinfo [#29](https://github.com/lfittl/activerecord-clean-db-structure/pull/29) 22 | * Update rake [#30](https://github.com/lfittl/activerecord-clean-db-structure/pull/30) 23 | * Bump activerecord [#34](https://github.com/lfittl/activerecord-clean-db-structure/pull/34) 24 | 25 | ## 0.4.0 2019-08-27 26 | 27 | * Add "indexes_after_tables" option to allow indexes to be placed following the respective tables [#13](https://github.com/lfittl/activerecord-clean-db-structure/pull/13) [Giovanni Kock Bonetti](https://github.com/giovannibonetti) 28 | * Add "order_schema_migrations_values" option to prevent schema_migrations values causing merge conflicts [#15](https://github.com/lfittl/activerecord-clean-db-structure/pull/15) [Nicke van Oorschot](https://github.com/nvanoorschot) 29 | * Add "order_column_definitions" option to sort table columns alphabetically [#11](https://github.com/lfittl/activerecord-clean-db-structure/pull/11) [RKushnir](https://github.com/RKushnir) 30 | * Generalize handling of schema names to not assume public 31 | * Rails 6 support 32 | * Fix Rails 6 compatibility [#16](https://github.com/lfittl/activerecord-clean-db-structure/pull/16) [Giovanni Kock Bonetti](https://github.com/giovannibonetti) 33 | * Fix handling of multiple structure.sql files 34 | * Remove Postgres 12 specific GUCs 35 | * Generalize handling of schema names to not assume public 36 | * Fix whitespace issue for config settings, remove default_with_oids 37 | 38 | 39 | ## 0.3.0 2019-05-07 40 | 41 | * Add "ignore_ids" option to allow disabling of primary key substitution logic [#12](https://github.com/lfittl/activerecord-clean-db-structure/pull/12) [Vladimir Dementyev](https://github.com/palkan) 42 | * Compatibility with Rails 6 multi-database configuration 43 | 44 | 45 | ## 0.2.6 2018-03-11 46 | 47 | * Fix regular expressions to support schema qualification changes in 10.3 48 | 49 | 50 | ## 0.2.5 2017-11-15 51 | 52 | * Filter out indices belonging partitioned tables 53 | 54 | 55 | ## 0.2.4 2017-11-02 56 | 57 | * Remove pg_buffercache extension if present (its only used for statistics purposes) 58 | * Remove extension comments if present - they can prevent non-superusers from 59 | restoring the tables, and are never used together with Rails anyway 60 | 61 | 62 | ## 0.2.3 2017-10-21 63 | 64 | * pg 10.x adds AS Integer to structure.sql format [Nathan Woodhull](https://github.com/woodhull) 65 | 66 | 67 | ## 0.2.2 2017-08-05 68 | 69 | * Support Rails 5.1 primary key UUIDs that rely on gen_random_uuid() 70 | 71 | 72 | ## 0.2.1 2017-06-30 73 | 74 | * Allow primary keys to be the last column of a table [Clemens Kofler](https://github.com/clemens) 75 | - Special thanks to [Jon Mohrbacher](https://github.com/johnnymo87) who submitted a similar earlier change 76 | 77 | 78 | ## 0.2.0 2017-03-20 79 | 80 | * Reduce dependencies to only require ActiveRecord [Mario Uher](https://github.com/ream88) 81 | * Support Rails Engines [Mario Uher](https://github.com/ream88) 82 | * Clean up more comment lines [Clemens Kofler](https://github.com/clemens) 83 | 84 | 85 | ## 0.1.0 2017-02-12 86 | 87 | * Initial release. 88 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gemspec 4 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | PATH 2 | remote: . 3 | specs: 4 | activerecord-clean-db-structure (0.4.3) 5 | activerecord (>= 4.2) 6 | 7 | GEM 8 | remote: https://rubygems.org/ 9 | specs: 10 | activemodel (7.2.1) 11 | activesupport (= 7.2.1) 12 | activerecord (7.2.1) 13 | activemodel (= 7.2.1) 14 | activesupport (= 7.2.1) 15 | timeout (>= 0.4.0) 16 | activesupport (7.2.1) 17 | base64 18 | bigdecimal 19 | concurrent-ruby (~> 1.0, >= 1.3.1) 20 | connection_pool (>= 2.2.5) 21 | drb 22 | i18n (>= 1.6, < 2) 23 | logger (>= 1.4.2) 24 | minitest (>= 5.1) 25 | securerandom (>= 0.3) 26 | tzinfo (~> 2.0, >= 2.0.5) 27 | base64 (0.2.0) 28 | bigdecimal (3.1.8) 29 | concurrent-ruby (1.3.4) 30 | connection_pool (2.4.1) 31 | drb (2.2.1) 32 | i18n (1.14.5) 33 | concurrent-ruby (~> 1.0) 34 | logger (1.6.1) 35 | minitest (5.25.1) 36 | rake (13.2.1) 37 | securerandom (0.3.1) 38 | timeout (0.4.1) 39 | tzinfo (2.0.6) 40 | concurrent-ruby (~> 1.0) 41 | 42 | PLATFORMS 43 | arm64-darwin-22 44 | ruby 45 | 46 | DEPENDENCIES 47 | activerecord-clean-db-structure! 48 | rake (~> 13) 49 | 50 | BUNDLED WITH 51 | 2.5.18 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Lukas Fittl 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of Lukas Fittl nor the names of its contributors may be used 15 | to endorse or promote products derived from this software without specific 16 | prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## activerecord-clean-db-structure [ ![](https://img.shields.io/gem/v/activerecord-clean-db-structure.svg)](https://rubygems.org/gems/activerecord-clean-db-structure) [ ![](https://img.shields.io/gem/dt/activerecord-clean-db-structure.svg)](https://rubygems.org/gems/activerecord-clean-db-structure) 2 | 3 | Ever been annoyed at a constantly changing `db/structure.sql` file when using ActiveRecord and Postgres? 4 | 5 | Spent hours trying to decipher why that one team member keeps changing the file? 6 | 7 | This library is here to help! 8 | 9 | It cleans away all the unnecessary output in the file every time its updated automatically. This helps avoid merge conflicts, as well as increase readability. 10 | 11 | ## Installation 12 | 13 | Add the following to your Gemfile: 14 | 15 | ```ruby 16 | gem 'activerecord-clean-db-structure' 17 | ``` 18 | 19 | This will automatically hook the library into your `rake db:migrate` task. 20 | 21 | ## Supported Rails versions 22 | 23 | Whilst there is no reason this shouldn't work on earlier versions, this has only been tested on Rails 4.2 and newer. 24 | 25 | It also assumes you use ActiveRecord with PostgreSQL - other ORMs or databases are not supported. 26 | 27 | ## Caveats 28 | 29 | Currently the library assumes all your `id` columns are either SERIAL, BIGSERIAL or uuid. It also assumes the `id` is the primary key. 30 | 31 | Multi-column primary keys, as well as tables that don't have `id` as the primary key are not supported right now, and might lead to wrong output. 32 | 33 | You can disable this part of the _cleaning_ process in your `config/environments/.rb` (or `config/application.rb`): 34 | 35 | ```ruby 36 | Rails.application.configure do 37 | config.activerecord_clean_db_structure.ignore_ids = true 38 | end 39 | ``` 40 | 41 | ## Other options 42 | 43 | ### indexes_after_tables 44 | 45 | You can optionally have indexes following the respective tables setting `indexes_after_tables`: 46 | 47 | ```ruby 48 | Rails.application.configure do 49 | config.activerecord_clean_db_structure.indexes_after_tables = true 50 | end 51 | ``` 52 | 53 | When it is enabled the structure looks like this: 54 | 55 | ```sql 56 | CREATE TABLE public.users ( 57 | id SERIAL PRIMARY KEY, 58 | tenant_id integer, 59 | email text NOT NULL 60 | ); 61 | 62 | CREATE INDEX index_users_on_tentant_id ON public.users USING btree (tenant_id); 63 | CREATE UNIQUE INDEX index_users_on_email ON public.users USING btree (email); 64 | ``` 65 | 66 | ### order_column_definitions 67 | 68 | To enable sorting the table column definitions alphabetically, discarding the actual order provided by `pg_dump`, set `order_column_definitions`: 69 | 70 | ```ruby 71 | Rails.application.configure do 72 | config.activerecord_clean_db_structure.order_column_definitions = true 73 | end 74 | ``` 75 | 76 | ### order_schema_migrations_values 77 | 78 | You can have the schema_migrations values reorganized to prevent merge conflicts by setting `order_schema_migrations_values`: 79 | 80 | ```ruby 81 | Rails.application.configure do 82 | config.activerecord_clean_db_structure.order_schema_migrations_values = true 83 | end 84 | ``` 85 | 86 | When it is enabled the values are ordered chronological and the semicolon is placed on a separate line: 87 | 88 | ```sql 89 | INSERT INTO "schema_migrations" (version) VALUES 90 | ('20190503120501') 91 | ,('20190508123941') 92 | ,('20190508132644') 93 | ; 94 | ``` 95 | 96 | ### keep_extensions 97 | 98 | By default the gem will remove [some extensions](https://github.com/ghiculescu/activerecord-clean-db-structure/blob/c9551391476a5e7a08ff314501af89baddcf669a/lib/activerecord-clean-db-structure/clean_dump.rb#L24) that typically aren't needed in structure dumps. You can choose to keep all, or just some, of those extensions: 99 | 100 | ```ruby 101 | Rails.application.configure do 102 | config.activerecord_clean_db_structure.keep_extensions = :all 103 | 104 | # This does the same thing as :all. You can choose which optional extensions to keep. 105 | config.activerecord_clean_db_structure.keep_extensions = ['pg_stat_statements', 'pg_buffercache'] 106 | end 107 | ``` 108 | 109 | ### ignore_schmeas 110 | 111 | You can ignore specific schemas, for example when dumping from a database copy that is integrated with pganalyze and has helper functions: 112 | 113 | ```ruby 114 | Rails.application.configure do 115 | config.activerecord_clean_db_structure.ignore_schemas = ['pganalyze'] 116 | end 117 | ``` 118 | 119 | ## Authors 120 | 121 | * [Lukas Fittl](https://github.com/lfittl) 122 | 123 | ## License 124 | 125 | Copyright (c) 2017, Lukas Fittl
126 | activerecord-clean-db-structure is licensed under the 3-clause BSD license, see LICENSE file for details. 127 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'bundler/setup' 3 | require 'bundler/gem_tasks' 4 | require "rake/testtask" 5 | 6 | Rake::TestTask.new(:test) do |t| 7 | t.libs << "test" 8 | t.libs << "lib" 9 | t.test_files = FileList["test/**/*_test.rb"] 10 | end 11 | 12 | task default: %i[test] 13 | -------------------------------------------------------------------------------- /activerecord-clean-db-structure.gemspec: -------------------------------------------------------------------------------- 1 | $:.push File.expand_path('../lib', __FILE__) 2 | require 'activerecord-clean-db-structure/version' 3 | 4 | Gem::Specification.new do |s| 5 | s.name = 'activerecord-clean-db-structure' 6 | s.version = ActiveRecordCleanDbStructure::VERSION 7 | s.summary = 'Automatic cleanup for the Rails db/structure.sql file (ActiveRecord/PostgreSQL)' 8 | s.description = 'Never worry about weird diffs and merge conflicts again' 9 | s.authors = ['Lukas Fittl'] 10 | s.email = 'lukas@fittl.com' 11 | 12 | s.files = `git ls-files`.split("\n") 13 | s.test_files = `git ls-files -- {spec}/*`.split("\n") 14 | s.require_paths = ['lib'] 15 | s.homepage = 'https://github.com/lfittl/activerecord-clean-db-structure' 16 | s.license = 'MIT' 17 | 18 | s.add_dependency('activerecord', '>= 4.2') 19 | 20 | s.add_development_dependency 'rake', '~> 13' 21 | end 22 | -------------------------------------------------------------------------------- /lib/activerecord-clean-db-structure.rb: -------------------------------------------------------------------------------- 1 | require 'activerecord-clean-db-structure/railtie' if defined?(Rails) 2 | -------------------------------------------------------------------------------- /lib/activerecord-clean-db-structure/clean_dump.rb: -------------------------------------------------------------------------------- 1 | module ActiveRecordCleanDbStructure 2 | class CleanDump 3 | attr_reader :dump, :options 4 | 5 | def initialize(dump, options = {}) 6 | @dump = dump 7 | @options = options 8 | end 9 | 10 | def run 11 | clean_partition_tables # Must be first because it makes assumptions about string format 12 | clean 13 | clean_ignored_schemas 14 | clean_inherited_tables 15 | clean_options 16 | clean_schema_comments 17 | end 18 | 19 | def clean 20 | # Remove trailing whitespace 21 | dump.gsub!(/[ \t]+$/, '') 22 | dump.gsub!(/\A\n/, '') 23 | dump.gsub!(/\n\n\z/, "\n") 24 | 25 | # Remove version-specific output 26 | dump.gsub!(/^-- Dumped.*/, '') 27 | dump.gsub!(/^SET row_security = off;\n/m, '') # 9.5 28 | dump.gsub!(/^SET idle_in_transaction_session_timeout = 0;\n/m, '') # 9.6 29 | dump.gsub!(/^SET transaction_timeout = 0;\n/m, '') # 17 30 | dump.gsub!(/^SET default_with_oids = false;\n/m, '') # all older than 12 31 | dump.gsub!(/^SET xmloption = content;\n/m, '') # 12 32 | dump.gsub!(/^SET default_table_access_method = heap;\n/m, '') # 12 33 | 34 | extensions_to_remove = ["pg_stat_statements", "pg_buffercache"] 35 | if options[:keep_extensions] == :all 36 | extensions_to_remove = [] 37 | elsif options[:keep_extensions] 38 | extensions_to_remove -= Array(options[:keep_extensions]) 39 | end 40 | extensions_to_remove.each do |ext| 41 | dump.gsub!(/^CREATE EXTENSION IF NOT EXISTS #{ext}.*/, '') 42 | dump.gsub!(/^-- Name: (EXTENSION )?#{ext};.*/, '') 43 | end 44 | 45 | # Remove comments on extensions, they create problems if the extension is owned by another user 46 | dump.gsub!(/^COMMENT ON EXTENSION .*/, '') 47 | 48 | # Remove useless, version-specific parts of comments 49 | dump.gsub!(/^-- (.*); Owner: -.*/, '-- \1') 50 | 51 | # Remove useless comment lines 52 | dump.gsub!(/^--$/, '') 53 | 54 | unless options[:ignore_ids] == true 55 | # Reduce noise for id fields by making them SERIAL instead of integer+sequence stuff 56 | # 57 | # This is a bit optimistic, but works as long as you don't have an id field thats not a sequence/uuid 58 | dump.gsub!(/^ id integer NOT NULL(,)?$/, ' id SERIAL PRIMARY KEY\1') 59 | dump.gsub!(/^ id bigint NOT NULL(,)?$/, ' id BIGSERIAL PRIMARY KEY\1') 60 | dump.gsub!(/^ id uuid DEFAULT ([\w]+\.)?uuid_generate_v4\(\) NOT NULL(,)?$/, ' id uuid DEFAULT \1uuid_generate_v4() PRIMARY KEY\2') 61 | dump.gsub!(/^ id uuid DEFAULT ([\w]+\.)?gen_random_uuid\(\) NOT NULL(,)?$/, ' id uuid DEFAULT \1gen_random_uuid() PRIMARY KEY\2') 62 | dump.gsub!(/^CREATE SEQUENCE [\w\.]+_id_seq\s+(AS integer\s+)?START WITH 1\s+INCREMENT BY 1\s+NO MINVALUE\s+NO MAXVALUE\s+CACHE 1;$/, '') 63 | dump.gsub!(/^ALTER SEQUENCE [\w\.]+_id_seq OWNED BY .*;$/, '') 64 | dump.gsub!(/^ALTER TABLE ONLY [\w\.]+ ALTER COLUMN id SET DEFAULT nextval\('[\w\.]+_id_seq'::regclass\);$/, '') 65 | dump.gsub!(/^ALTER TABLE ONLY [\w\.]+\s+ADD CONSTRAINT [\w\.]+_pkey PRIMARY KEY \(id\);$/, '') 66 | dump.gsub!(/^-- Name: (\w+\s+)?id; Type: DEFAULT; Schema: \w+$/, '') 67 | dump.gsub!(/^-- .*_id_seq; Type: SEQUENCE.*/, '') 68 | dump.gsub!(/^-- Name: (\w+\s+)?\w+_pkey; Type: CONSTRAINT; Schema: \w+$/, '') 69 | end 70 | end 71 | 72 | def clean_inherited_tables 73 | inherited_tables_regexp = /-- Name: ([\w\.]+); Type: TABLE; Schema: \w+\n\n[^;]+?INHERITS \([\w\.]+\);/m 74 | inherited_tables = dump.scan(inherited_tables_regexp).map(&:first) 75 | dump.gsub!(inherited_tables_regexp, '') 76 | inherited_tables.each do |inherited_table| 77 | dump.gsub!(/ALTER TABLE ONLY ([\w_]+\.)?#{inherited_table}[^;]+;/, '') 78 | 79 | index_regexp = /CREATE INDEX ([\w_]+) ON ([\w_]+\.)?#{inherited_table}[^;]+;/m 80 | dump.scan(index_regexp).map(&:first).each do |inherited_table_index| 81 | dump.gsub!(/-- Name: #{inherited_table_index}; Type: INDEX; Schema: \w+/, '') 82 | end 83 | dump.gsub!(index_regexp, '') 84 | end 85 | end 86 | 87 | def clean_ignored_schemas 88 | return if options[:ignore_schemas].nil? 89 | options[:ignore_schemas].each do |schema| 90 | dump.gsub!(/-- Name: ([^;]+); Type: \w+; Schema: #{schema}\n\n(?:(?!(-- Name:|SET default_tablespace)).)*/m, '') 91 | dump.gsub!(/-- Name: #{schema}; Type: SCHEMA; Schema: -\n\n(?:(?!(-- Name:|SET default_tablespace)).)*/m, '') 92 | dump.gsub!(/-- Name: \w+; Type: EXTENSION; Schema: -\n\n\nCREATE EXTENSION IF NOT EXISTS \w+ WITH SCHEMA #{schema};(?:(?!(-- Name:|SET default_tablespace)).)*/m, '') 93 | end 94 | end 95 | 96 | def clean_partition_tables 97 | partitioned_tables = [] 98 | 99 | # Postgres 12 pg_dump will output separate ATTACH PARTITION statements (even when run against an 11 or older server) 100 | partitioned_tables_regexp1 = /ALTER TABLE ONLY [\w\.]+ ATTACH PARTITION ([\w\.]+)/ 101 | partitioned_tables += dump.scan(partitioned_tables_regexp1).map(&:last) 102 | 103 | # Earlier versions use an inline PARTITION OF 104 | partitioned_tables_regexp2 = /-- Name: ([\w\.]+); Type: TABLE\n\n[^;]+?PARTITION OF [\w\.]+\n[^;]+?;/m 105 | partitioned_tables += dump.scan(partitioned_tables_regexp2).map(&:first) 106 | 107 | # We assume that a comment + schema statement pair has 3 trailing newlines. 108 | # This makes it easier to drop both the comment and statement at once. 109 | statements = dump.split("\n\n\n") 110 | names = [] 111 | partitioned_tables.each { |table| names << table.split('.', 2)[1] } 112 | if names.any? 113 | dump.scan(/CREATE (UNIQUE )?INDEX (\w+) ON (\w+\.)?(#{names.join('|')})[^;]+;/m).each { |m| names << m[1] } 114 | end 115 | statements.reject! { |stmt| names.any? { |name| stmt.include?(name) } } 116 | @dump = statements.join("\n\n") 117 | @dump << "\n" if @dump[-1] != "\n" 118 | 119 | # This is mostly done to allow restoring Postgres 11 output on Postgres 10 120 | dump.gsub!(/CREATE INDEX ([\w]+) ON ONLY/, 'CREATE INDEX \\1 ON') 121 | end 122 | 123 | def clean_options 124 | if options[:order_schema_migrations_values] == true 125 | schema_migrations_cleanup 126 | else 127 | # Remove whitespace between schema migration INSERTS to make editing easier 128 | dump.gsub!(/^(INSERT INTO schema_migrations .*)\n\n/, "\\1\n") 129 | end 130 | 131 | if options[:indexes_after_tables] == true 132 | # Extract indexes, remove comments and place them just after the respective tables 133 | indexes = 134 | dump 135 | .scan(/^CREATE.+INDEX.+ON.+\n/) 136 | .group_by { |line| line.scan(/\b\w+\.\w+\b/).first } 137 | .transform_values(&:join) 138 | 139 | dump.gsub!(/^CREATE( UNIQUE)? INDEX \w+ ON .+\n+/, '') 140 | dump.gsub!(/^-- Name: \w+; Type: INDEX; Schema: \w+\n+/, '') 141 | indexes.each do |table, indexes_for_table| 142 | dump.gsub!(/^(CREATE TABLE #{table}\b(:?[^;\n]*\n)+\);*\n(?:.*);*)/) { $1 + "\n\n" + indexes_for_table } 143 | end 144 | end 145 | 146 | # Reduce 2+ lines of whitespace to one line of whitespace 147 | dump.gsub!(/\n{2,}/m, "\n\n") 148 | 149 | if options[:order_column_definitions] == true 150 | dump.replace(order_column_definitions(dump)) 151 | end 152 | end 153 | 154 | def clean_schema_comments 155 | # Remove schema in comments for backwards compatibility 156 | dump.gsub!(/^-- (.*); Schema: [\w-]+/, '-- \1') 157 | end 158 | 159 | def order_column_definitions(source) 160 | result = [] 161 | 162 | parse_column_name = ->(line) { line.match(/^ "?([^" ]+)/)[1] } 163 | with_column_separator = ->(line) { line.sub(/,?\n$/, ",\n") } 164 | without_column_separator = ->(line) { line.sub(/,\n$/, "\n") } 165 | 166 | inside_table = false 167 | columns = [] 168 | 169 | source.each_line do |source_line| 170 | if source_line.start_with?("CREATE TABLE") 171 | inside_table = true 172 | columns = [] 173 | result << source_line 174 | elsif source_line.start_with?(")") 175 | if inside_table 176 | inside_table = false 177 | columns.sort_by!(&:first) 178 | 179 | columns[0..-2].each do |_, line| 180 | result << with_column_separator[line] 181 | end 182 | 183 | result << without_column_separator[columns.last[1]] 184 | end 185 | 186 | result << source_line 187 | elsif inside_table 188 | columns << [parse_column_name[source_line], source_line] 189 | else 190 | result << source_line 191 | end 192 | end 193 | 194 | result.join 195 | end 196 | 197 | private 198 | 199 | # Cleanup of schema_migrations values to prevent merge conflicts: 200 | # - sorts all values chronological 201 | # - places the comma's in front of each value (except for the first) 202 | # - places the semicolon on a separate last line 203 | def schema_migrations_cleanup 204 | # Read all schema_migrations values from the dump. 205 | values = dump.scan(/^(\(\'\d{14}\'\))[,;]\n/).flatten.sort 206 | 207 | # Replace the schema_migrations values. 208 | dump.sub!( 209 | /(?<=INSERT INTO "schema_migrations" \(version\) VALUES).+;\n*/m, 210 | "\n #{values.join("\n,")}\n;\n\n" 211 | ) 212 | end 213 | end 214 | end 215 | -------------------------------------------------------------------------------- /lib/activerecord-clean-db-structure/railtie.rb: -------------------------------------------------------------------------------- 1 | module ActiveRecordCleanDbStructure 2 | class Railtie < Rails::Railtie 3 | config.activerecord_clean_db_structure = ActiveSupport::OrderedOptions.new 4 | 5 | rake_tasks do 6 | load 'activerecord-clean-db-structure/tasks/clean_db_structure.rake' 7 | end 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /lib/activerecord-clean-db-structure/tasks/clean_db_structure.rake: -------------------------------------------------------------------------------- 1 | require 'activerecord-clean-db-structure/clean_dump' 2 | 3 | Rake::Task[ActiveRecord.version >= Gem::Version.new('6.1') ? 'db:schema:dump' : 'db:structure:dump'].enhance do 4 | filenames = [] 5 | filenames << ENV['DB_STRUCTURE'] if ENV.key?('DB_STRUCTURE') 6 | 7 | if ActiveRecord::VERSION::MAJOR >= 6 8 | # Based on https://github.com/rails/rails/pull/36560/files 9 | databases = ActiveRecord::Tasks::DatabaseTasks.setup_initial_database_yaml 10 | ActiveRecord::Tasks::DatabaseTasks.for_each(databases) do |spec_name| 11 | Rails.application.config.paths['db'].each do |path| 12 | filename = spec_name == 'primary' ? 'structure.sql' : spec_name + '_structure.sql' 13 | filenames << File.join(path, filename) 14 | end 15 | end 16 | end 17 | 18 | unless filenames.present? 19 | Rails.application.config.paths['db'].each do |path| 20 | filenames << File.join(path, 'structure.sql') 21 | end 22 | end 23 | 24 | filenames.each do |filename| 25 | cleaner = ActiveRecordCleanDbStructure::CleanDump.new( 26 | File.read(filename), 27 | **Rails.application.config.activerecord_clean_db_structure 28 | ) 29 | cleaner.run 30 | File.write(filename, cleaner.dump) 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/activerecord-clean-db-structure/version.rb: -------------------------------------------------------------------------------- 1 | module ActiveRecordCleanDbStructure 2 | VERSION = '0.4.3' 3 | end 4 | -------------------------------------------------------------------------------- /test/clean_dump_test.rb: -------------------------------------------------------------------------------- 1 | require "test_helper" 2 | 3 | class CleanDumpTest < Minitest::Test 4 | def test_basic_case 5 | assert_cleans_dump "data/input.sql", "expectations/default_props.sql" 6 | end 7 | 8 | def test_ignore_ids 9 | assert_cleans_dump "data/input.sql", "expectations/ignore_ids.sql", ignore_ids: true 10 | end 11 | 12 | def test_order_column_definitions 13 | assert_cleans_dump "data/input.sql", "expectations/order_column_definitions.sql", order_column_definitions: true 14 | end 15 | 16 | def test_order_schema_migrations_values 17 | assert_cleans_dump "data/input.sql", "expectations/order_schema_migrations_values.sql", order_schema_migrations_values: true 18 | end 19 | 20 | def test_indexes_after_tables 21 | assert_cleans_dump "data/input.sql", "expectations/indexes_after_tables.sql", indexes_after_tables: true 22 | end 23 | 24 | def test_keep_extensions_all 25 | assert_cleans_dump "data/input.sql", "expectations/keep_extensions_all.sql", keep_extensions: :all 26 | end 27 | 28 | def test_partitions 29 | assert_cleans_dump "data/partitions.sql", "expectations/partitions.sql" 30 | end 31 | 32 | def test_ignored_schemas 33 | assert_cleans_dump "data/ignored_schemas.sql", "expectations/ignored_schemas_pganalyze.sql", ignore_schemas: ['pganalyze'] 34 | assert_cleans_dump "data/ignored_schemas.sql", "expectations/ignored_schemas_myschema.sql", ignore_schemas: ['myschema'] 35 | end 36 | 37 | private 38 | 39 | def assert_cleans_dump(input, output, props = {}) 40 | cleaner = ActiveRecordCleanDbStructure::CleanDump.new(File.read(File.join(__dir__, input)), props) 41 | cleaner.run 42 | assert_equal File.read(File.join(__dir__, output)), cleaner.dump 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /test/data/ignored_schemas.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET idle_in_transaction_session_timeout = 0; 4 | SET transaction_timeout = 0; 5 | SET client_encoding = 'UTF8'; 6 | SET standard_conforming_strings = on; 7 | SELECT pg_catalog.set_config('search_path', '', false); 8 | SET check_function_bodies = false; 9 | SET xmloption = content; 10 | SET client_min_messages = warning; 11 | SET row_security = off; 12 | 13 | -- 14 | -- Name: myschema; Type: SCHEMA; Schema: -; Owner: - 15 | -- 16 | 17 | CREATE SCHEMA myschema; 18 | 19 | 20 | -- 21 | -- Name: pganalyze; Type: SCHEMA; Schema: -; Owner: - 22 | -- 23 | 24 | CREATE SCHEMA pganalyze; 25 | 26 | 27 | -- 28 | -- Name: btree_gin; Type: EXTENSION; Schema: -; Owner: - 29 | -- 30 | 31 | CREATE EXTENSION IF NOT EXISTS btree_gin WITH SCHEMA myschema; 32 | 33 | 34 | -- 35 | -- Name: EXTENSION btree_gin; Type: COMMENT; Schema: -; Owner: - 36 | -- 37 | 38 | COMMENT ON EXTENSION btree_gin IS 'support for indexing common datatypes in GIN'; 39 | 40 | 41 | -- 42 | -- Name: btree_gist; Type: EXTENSION; Schema: -; Owner: - 43 | -- 44 | 45 | CREATE EXTENSION IF NOT EXISTS btree_gist WITH SCHEMA public; 46 | 47 | 48 | -- 49 | -- Name: EXTENSION btree_gist; Type: COMMENT; Schema: -; Owner: - 50 | -- 51 | 52 | COMMENT ON EXTENSION btree_gist IS 'support for indexing common datatypes in GiST'; 53 | 54 | 55 | -- 56 | -- Name: dblink; Type: EXTENSION; Schema: -; Owner: - 57 | -- 58 | 59 | CREATE EXTENSION IF NOT EXISTS dblink WITH SCHEMA public; 60 | 61 | 62 | -- 63 | -- Name: EXTENSION dblink; Type: COMMENT; Schema: -; Owner: - 64 | -- 65 | 66 | COMMENT ON EXTENSION dblink IS 'connect to other PostgreSQL databases from within a database'; 67 | 68 | 69 | -- 70 | -- Name: pg_buffercache; Type: EXTENSION; Schema: -; Owner: - 71 | -- 72 | 73 | CREATE EXTENSION IF NOT EXISTS pg_buffercache WITH SCHEMA public; 74 | 75 | 76 | -- 77 | -- Name: EXTENSION pg_buffercache; Type: COMMENT; Schema: -; Owner: - 78 | -- 79 | 80 | COMMENT ON EXTENSION pg_buffercache IS 'examine the shared buffer cache'; 81 | 82 | 83 | -- 84 | -- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - 85 | -- 86 | 87 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public; 88 | 89 | 90 | -- 91 | -- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - 92 | -- 93 | 94 | COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; 95 | 96 | 97 | -- 98 | -- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - 99 | -- 100 | 101 | CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public; 102 | 103 | 104 | -- 105 | -- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - 106 | -- 107 | 108 | COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; 109 | 110 | 111 | -- 112 | -- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - 113 | -- 114 | 115 | CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public; 116 | 117 | 118 | -- 119 | -- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - 120 | -- 121 | 122 | COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; 123 | 124 | 125 | -- 126 | -- Name: myschema; Type: TYPE; Schema: myschema; Owner: - 127 | -- 128 | 129 | CREATE TYPE myschema.myenum AS ENUM ( 130 | 'a', 131 | 'b', 132 | 'c' 133 | ); 134 | 135 | 136 | -- 137 | -- Name: explain_analyze(text, text[], text[], text[]); Type: FUNCTION; Schema: pganalyze; Owner: - 138 | -- 139 | 140 | CREATE FUNCTION pganalyze.explain_analyze(query text, params text[], param_types text[], analyze_flags text[]) RETURNS text 141 | LANGUAGE plpgsql SECURITY DEFINER 142 | AS $$ 143 | DECLARE 144 | prepared_query text; 145 | params_str text; 146 | param_types_str text; 147 | explain_prefix text; 148 | explain_flag text; 149 | result text; 150 | BEGIN 151 | SET TRANSACTION READ ONLY; 152 | 153 | PERFORM 1 FROM pg_roles WHERE (rolname = current_user AND rolsuper) OR (pg_has_role(oid, 'MEMBER') AND rolname IN ('rds_superuser', 'azure_pg_admin', 'cloudsqlsuperuser')); 154 | IF FOUND THEN 155 | RAISE EXCEPTION 'cannot run: pganalyze.explain_analyze helper is owned by superuser - recreate function with lesser privileged user'; 156 | END IF; 157 | 158 | SELECT pg_catalog.regexp_replace(query, ';+\s*\Z', '') INTO prepared_query; 159 | IF prepared_query LIKE '%;%' THEN 160 | RAISE EXCEPTION 'cannot run pganalyze.explain_analyze helper with a multi-statement query'; 161 | END IF; 162 | 163 | explain_prefix := 'EXPLAIN (VERBOSE, FORMAT JSON'; 164 | FOR explain_flag IN SELECT * FROM unnest(analyze_flags) 165 | LOOP 166 | IF explain_flag NOT SIMILAR TO '[A-z_ ]+' THEN 167 | RAISE EXCEPTION 'cannot run pganalyze.explain_analyze helper with invalid flag'; 168 | END IF; 169 | explain_prefix := explain_prefix || ', ' || explain_flag; 170 | END LOOP; 171 | explain_prefix := explain_prefix || ') '; 172 | 173 | SELECT COALESCE('(' || pg_catalog.string_agg(pg_catalog.quote_literal(p), ',') || ')', '') FROM pg_catalog.unnest(params) _(p) INTO params_str; 174 | SELECT COALESCE('(' || pg_catalog.string_agg(pg_catalog.quote_ident(p), ',') || ')', '') FROM pg_catalog.unnest(param_types) _(p) INTO param_types_str; 175 | 176 | EXECUTE 'PREPARE pganalyze_explain_analyze ' || param_types_str || ' AS ' || prepared_query; 177 | BEGIN 178 | EXECUTE explain_prefix || 'EXECUTE pganalyze_explain_analyze' || params_str INTO STRICT result; 179 | EXCEPTION WHEN QUERY_CANCELED OR OTHERS THEN 180 | DEALLOCATE pganalyze_explain_analyze; 181 | RAISE; 182 | END; 183 | DEALLOCATE pganalyze_explain_analyze; 184 | 185 | RETURN result; 186 | END 187 | $$; 188 | 189 | 190 | -- 191 | -- Name: get_stat_replication(); Type: FUNCTION; Schema: pganalyze; Owner: - 192 | -- 193 | 194 | CREATE FUNCTION pganalyze.get_stat_replication() RETURNS SETOF pg_stat_replication 195 | LANGUAGE sql SECURITY DEFINER 196 | AS $$ 197 | /* pganalyze-collector */ SELECT * FROM pg_catalog.pg_stat_replication; 198 | $$; 199 | 200 | 201 | SET default_tablespace = ''; 202 | 203 | SET default_table_access_method = heap; 204 | 205 | -- 206 | -- Name: table2; Type: TABLE; Schema: public; Owner: - 207 | -- 208 | 209 | CREATE TABLE public.table2 ( 210 | id bigint NOT NULL, 211 | ); 212 | 213 | 214 | -- 215 | -- Name: test_func2(public.table2[]); Type: FUNCTION; Schema: public; Owner: - 216 | -- 217 | 218 | CREATE FUNCTION public.test_func2(ids public.table2[] DEFAULT NULL::public.table2[]) RETURNS TABLE(id bigint) 219 | LANGUAGE sql ROWS 10000 PARALLEL SAFE 220 | AS $$ 221 | SELECT * FROM 222 | $$; 223 | 224 | 225 | -- 226 | -- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: - 227 | -- 228 | 229 | CREATE TABLE public.ar_internal_metadata ( 230 | key character varying NOT NULL, 231 | value character varying, 232 | created_at timestamp without time zone NOT NULL, 233 | updated_at timestamp without time zone NOT NULL 234 | ); 235 | 236 | 237 | -- 238 | -- Name: myschema; Type: TABLE; Schema: myschema; Owner: - 239 | -- 240 | 241 | CREATE TABLE myschema.mytable ( 242 | id uuid DEFAULT public.gen_random_uuid() NOT NULL, 243 | organization_id uuid NOT NULL 244 | ); 245 | 246 | 247 | -- 248 | -- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - 249 | -- 250 | 251 | CREATE TABLE public.schema_migrations ( 252 | version character varying(255) NOT NULL 253 | ); 254 | 255 | 256 | -- 257 | -- Name: index_mytable_on_organization_id; Type: INDEX; Schema: myschema; Owner: - 258 | -- 259 | 260 | CREATE INDEX index_mytable_on_organization_id ON myschema.mytable USING btree (organization_id); 261 | 262 | 263 | -- 264 | -- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: - 265 | -- 266 | 267 | CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version); 268 | 269 | 270 | -- 271 | -- PostgreSQL database dump complete 272 | -- 273 | 274 | SET search_path TO "$user", public; 275 | 276 | INSERT INTO "schema_migrations" (version) VALUES 277 | ('20250101011234'), 278 | ('20250101015678'); 279 | 280 | -------------------------------------------------------------------------------- /test/data/input.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET idle_in_transaction_session_timeout = 0; 4 | SET transaction_timeout = 0; 5 | SET client_encoding = 'UTF8'; 6 | SET standard_conforming_strings = on; 7 | SELECT pg_catalog.set_config('search_path', '', false); 8 | SET check_function_bodies = false; 9 | SET xmloption = content; 10 | SET client_min_messages = warning; 11 | SET row_security = off; 12 | 13 | -- 14 | -- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - 15 | -- 16 | 17 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public; 18 | 19 | 20 | -- 21 | -- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - 22 | -- 23 | 24 | COMMENT ON EXTENSION pg_stat_statements IS 'track execution statistics of all SQL statements executed'; 25 | 26 | 27 | SET default_tablespace = ''; 28 | 29 | SET default_table_access_method = heap; 30 | 31 | -- 32 | -- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: - 33 | -- 34 | 35 | CREATE TABLE public.ar_internal_metadata ( 36 | key character varying NOT NULL, 37 | value character varying, 38 | created_at timestamp(6) without time zone NOT NULL, 39 | updated_at timestamp(6) without time zone NOT NULL 40 | ); 41 | 42 | 43 | -- 44 | -- Name: delayed_jobs; Type: TABLE; Schema: public; Owner: - 45 | -- 46 | 47 | CREATE TABLE public.delayed_jobs ( 48 | id bigint NOT NULL, 49 | priority integer DEFAULT 0, 50 | attempts integer DEFAULT 0, 51 | handler text, 52 | last_error text, 53 | run_at timestamp without time zone, 54 | locked_at timestamp without time zone, 55 | failed_at timestamp without time zone, 56 | locked_by character varying(255), 57 | queue character varying(255), 58 | created_at timestamp without time zone NOT NULL, 59 | updated_at timestamp without time zone NOT NULL, 60 | metadata jsonb DEFAULT '{}'::jsonb, 61 | whodunnit text 62 | ) 63 | WITH (fillfactor='85'); 64 | 65 | 66 | -- 67 | -- Name: delayed_jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: - 68 | -- 69 | 70 | CREATE SEQUENCE public.delayed_jobs_id_seq 71 | START WITH 1 72 | INCREMENT BY 1 73 | NO MINVALUE 74 | NO MAXVALUE 75 | CACHE 1; 76 | 77 | 78 | -- 79 | -- Name: delayed_jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - 80 | -- 81 | 82 | ALTER SEQUENCE public.delayed_jobs_id_seq OWNED BY public.delayed_jobs.id; 83 | 84 | 85 | -- 86 | -- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - 87 | -- 88 | 89 | CREATE TABLE public.schema_migrations ( 90 | version character varying NOT NULL 91 | ); 92 | 93 | 94 | -- 95 | -- Name: delayed_jobs id; Type: DEFAULT; Schema: public; Owner: - 96 | -- 97 | 98 | ALTER TABLE ONLY public.delayed_jobs ALTER COLUMN id SET DEFAULT nextval('public.delayed_jobs_id_seq'::regclass); 99 | 100 | 101 | -- 102 | -- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT; Schema: public; Owner: - 103 | -- 104 | 105 | ALTER TABLE ONLY public.ar_internal_metadata 106 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 107 | 108 | 109 | -- 110 | -- Name: delayed_jobs delayed_jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: - 111 | -- 112 | 113 | ALTER TABLE ONLY public.delayed_jobs 114 | ADD CONSTRAINT delayed_jobs_pkey PRIMARY KEY (id); 115 | 116 | 117 | -- 118 | -- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - 119 | -- 120 | 121 | ALTER TABLE ONLY public.schema_migrations 122 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 123 | 124 | 125 | -- 126 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX; Schema: public; Owner: - 127 | -- 128 | 129 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 130 | 131 | 132 | -- 133 | -- Name: index_delayed_jobs_on_queue; Type: INDEX; Schema: public; Owner: - 134 | -- 135 | 136 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 137 | 138 | 139 | -- 140 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX; Schema: public; Owner: - 141 | -- 142 | 143 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 144 | 145 | 146 | -- 147 | -- PostgreSQL database dump complete 148 | -- 149 | 150 | SET search_path TO "$user", public; 151 | 152 | INSERT INTO "schema_migrations" (version) VALUES 153 | ('20240822225012'), 154 | ('20240822224954'), 155 | ('20240725043656'), 156 | ('20240621041110'), 157 | ('20240621020038'), 158 | ('20220802204003'), 159 | ('20211125055031'), 160 | ('20211012054749'), 161 | ('20210923052631'), 162 | ('20210903003251'); 163 | 164 | -------------------------------------------------------------------------------- /test/data/partitions.sql: -------------------------------------------------------------------------------- 1 | -- 2 | -- Name: autovacuum_run_stats_35d; Type: TABLE; Schema: public; Owner: - 3 | -- 4 | 5 | CREATE TABLE public.autovacuum_run_stats_35d ( 6 | autovacuum_run_stats_id uuid DEFAULT public.gen_random_uuid() NOT NULL, 7 | server_id uuid, 8 | schema_table_id bigint, 9 | occurred_at timestamp with time zone NOT NULL 10 | ) 11 | PARTITION BY RANGE (occurred_at); 12 | 13 | 14 | -- 15 | -- Name: index_autovacuum_run_stats_35d_on_schema_table_id_occurred_at; Type: INDEX 16 | -- 17 | 18 | CREATE INDEX index_autovacuum_run_stats_35d_on_schema_table_id_occurred_at ON public.autovacuum_run_stats_35d USING btree (schema_table_id, occurred_at); 19 | 20 | 21 | -- 22 | -- Name: index_autovacuum_run_stats_35d_on_server_id_and_occurred_at; Type: INDEX 23 | -- 24 | 25 | CREATE INDEX index_autovacuum_run_stats_35d_on_server_id_and_occurred_at ON public.autovacuum_run_stats_35d USING btree (server_id, occurred_at); 26 | 27 | 28 | -- 29 | -- Name: autovacuum_run_stats_35d_20241026; Type: TABLE; Schema: public; Owner: - 30 | -- 31 | 32 | CREATE TABLE public.autovacuum_run_stats_35d_20241026 ( 33 | autovacuum_run_stats_id uuid DEFAULT public.gen_random_uuid() NOT NULL, 34 | server_id uuid, 35 | schema_table_id bigint, 36 | occurred_at timestamp with time zone NOT NULL 37 | ); 38 | 39 | 40 | -- 41 | -- Name: autovacuum_run_stats_35d_20241026; Type: TABLE ATTACH; Schema: public; Owner: - 42 | -- 43 | 44 | ALTER TABLE ONLY public.autovacuum_run_stats_35d ATTACH PARTITION public.autovacuum_run_stats_35d_20241026 FOR VALUES FROM ('2024-10-25 19:00:00-05') TO ('2024-10-26 19:00:00-05'); 45 | 46 | 47 | -- 48 | -- Name: autovacuum_run_stats_35d_20241026 autovacuum_run_stats_35d_20241026_pkey; Type: CONSTRAINT; Schema: public; Owner: - 49 | -- 50 | 51 | ALTER TABLE ONLY public.autovacuum_run_stats_35d_20241026 52 | ADD CONSTRAINT autovacuum_run_stats_35d_20241026_pkey PRIMARY KEY (autovacuum_run_stats_id); 53 | 54 | 55 | -- 56 | -- Name: autovacuum_run_stats_35d_20241026_server_id_occurred_at_idx; Type: INDEX; Schema: public; Owner: - 57 | -- 58 | 59 | CREATE INDEX autovacuum_run_stats_35d_20241026_server_id_occurred_at_idx ON public.autovacuum_run_stats_35d_20241026 USING btree (server_id, occurred_at); 60 | 61 | 62 | -- 63 | -- Name: autovacuum_run_stats_35d_2024_schema_table_id_occurred_at_idx25; Type: INDEX; Schema: public; Owner: - 64 | -- 65 | 66 | CREATE INDEX autovacuum_run_stats_35d_2024_schema_table_id_occurred_at_idx25 ON public.autovacuum_run_stats_35d_20241026 USING btree (schema_table_id, occurred_at); 67 | 68 | 69 | -- 70 | -- Name: autovacuum_run_stats_35d_20241026_server_id_occurred_at_idx; Type: INDEX ATTACH; Schema: public; Owner: - 71 | -- 72 | 73 | ALTER INDEX public.index_autovacuum_run_stats_35d_on_server_id_and_occurred_at ATTACH PARTITION public.autovacuum_run_stats_35d_20241026_server_id_occurred_at_idx; 74 | 75 | 76 | -- 77 | -- Name: schema_table_infos_35d; Type: TABLE; Schema: public; Owner: - 78 | -- 79 | 80 | CREATE TABLE public.schema_table_infos_35d ( 81 | schema_table_id bigint NOT NULL, 82 | collected_at timestamp with time zone NOT NULL, 83 | server_id uuid NOT NULL 84 | ) 85 | PARTITION BY RANGE (collected_at); 86 | 87 | 88 | -- 89 | -- Name: schema_table_infos_35d schema_table_infos_35d_pkey; Type: CONSTRAINT; Schema: public; Owner: - 90 | -- 91 | 92 | ALTER TABLE ONLY public.schema_table_infos_35d 93 | ADD CONSTRAINT schema_table_infos_35d_pkey PRIMARY KEY (schema_table_id, collected_at); 94 | 95 | 96 | -- 97 | -- Name: schema_table_infos_35d_20240920; Type: TABLE; Schema: public; Owner: - 98 | -- 99 | 100 | CREATE TABLE public.schema_table_infos_35d_20240920 ( 101 | schema_table_id bigint NOT NULL, 102 | collected_at timestamp with time zone NOT NULL, 103 | server_id uuid NOT NULL 104 | ); 105 | 106 | 107 | -- 108 | -- Name: schema_table_infos_35d_20240920; Type: TABLE ATTACH; Schema: public; Owner: - 109 | -- 110 | 111 | ALTER TABLE ONLY public.schema_table_infos_35d ATTACH PARTITION public.schema_table_infos_35d_20240920 FOR VALUES FROM ('2024-09-19 19:00:00-05') TO ('2024-09-20 19:00:00-05'); 112 | 113 | 114 | -- 115 | -- Name: schema_table_infos_35d_20240920 schema_table_infos_35d_20240920_pkey; Type: CONSTRAINT; Schema: public; Owner: - 116 | -- 117 | 118 | ALTER TABLE ONLY public.schema_table_infos_35d_20240920 119 | ADD CONSTRAINT schema_table_infos_35d_20240920_pkey PRIMARY KEY (schema_table_id, collected_at); 120 | 121 | 122 | -- 123 | -- Name: schema_table_infos_35d_20240920_server_id_idx; Type: INDEX; Schema: public; Owner: - 124 | -- 125 | 126 | CREATE INDEX schema_table_infos_35d_20240920_server_id_idx ON public.schema_table_infos_35d_20240920 USING btree (server_id); 127 | 128 | 129 | -- 130 | -- Name: schema_table_infos_35d_2024092_schema_table_id_collected_at_idx; Type: INDEX; Schema: public; Owner: - 131 | -- 132 | 133 | CREATE INDEX schema_table_infos_35d_2024092_schema_table_id_collected_at_idx ON public.schema_table_infos_35d_20240920 USING btree (schema_table_id, collected_at DESC); 134 | 135 | 136 | -- 137 | -- Name: schema_table_infos_35d_20240920_pkey; Type: INDEX ATTACH; Schema: public; Owner: - 138 | -- 139 | 140 | ALTER INDEX public.schema_table_infos_35d_pkey ATTACH PARTITION public.schema_table_infos_35d_20240920_pkey; 141 | 142 | 143 | -- 144 | -- Name: schema_table_infos_35d_20240920_server_id_idx; Type: INDEX ATTACH; Schema: public; Owner: - 145 | -- 146 | 147 | ALTER INDEX public.index_schema_table_infos_35d_on_server_id ATTACH PARTITION public.schema_table_infos_35d_20240920_server_id_idx; 148 | -------------------------------------------------------------------------------- /test/expectations/default_props.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | SET default_tablespace = ''; 10 | 11 | -- Name: ar_internal_metadata; Type: TABLE 12 | 13 | CREATE TABLE public.ar_internal_metadata ( 14 | key character varying NOT NULL, 15 | value character varying, 16 | created_at timestamp(6) without time zone NOT NULL, 17 | updated_at timestamp(6) without time zone NOT NULL 18 | ); 19 | 20 | -- Name: delayed_jobs; Type: TABLE 21 | 22 | CREATE TABLE public.delayed_jobs ( 23 | id BIGSERIAL PRIMARY KEY, 24 | priority integer DEFAULT 0, 25 | attempts integer DEFAULT 0, 26 | handler text, 27 | last_error text, 28 | run_at timestamp without time zone, 29 | locked_at timestamp without time zone, 30 | failed_at timestamp without time zone, 31 | locked_by character varying(255), 32 | queue character varying(255), 33 | created_at timestamp without time zone NOT NULL, 34 | updated_at timestamp without time zone NOT NULL, 35 | metadata jsonb DEFAULT '{}'::jsonb, 36 | whodunnit text 37 | ) 38 | WITH (fillfactor='85'); 39 | 40 | -- Name: schema_migrations; Type: TABLE 41 | 42 | CREATE TABLE public.schema_migrations ( 43 | version character varying NOT NULL 44 | ); 45 | 46 | ALTER TABLE ONLY public.ar_internal_metadata 47 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 48 | 49 | ALTER TABLE ONLY public.schema_migrations 50 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 51 | 52 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX 53 | 54 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 55 | 56 | -- Name: index_delayed_jobs_on_queue; Type: INDEX 57 | 58 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 59 | 60 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX 61 | 62 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 63 | 64 | -- PostgreSQL database dump complete 65 | 66 | SET search_path TO "$user", public; 67 | 68 | INSERT INTO "schema_migrations" (version) VALUES 69 | ('20240822225012'), 70 | ('20240822224954'), 71 | ('20240725043656'), 72 | ('20240621041110'), 73 | ('20240621020038'), 74 | ('20220802204003'), 75 | ('20211125055031'), 76 | ('20211012054749'), 77 | ('20210923052631'), 78 | ('20210903003251'); 79 | -------------------------------------------------------------------------------- /test/expectations/ignore_ids.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | SET default_tablespace = ''; 10 | 11 | -- Name: ar_internal_metadata; Type: TABLE 12 | 13 | CREATE TABLE public.ar_internal_metadata ( 14 | key character varying NOT NULL, 15 | value character varying, 16 | created_at timestamp(6) without time zone NOT NULL, 17 | updated_at timestamp(6) without time zone NOT NULL 18 | ); 19 | 20 | -- Name: delayed_jobs; Type: TABLE 21 | 22 | CREATE TABLE public.delayed_jobs ( 23 | id bigint NOT NULL, 24 | priority integer DEFAULT 0, 25 | attempts integer DEFAULT 0, 26 | handler text, 27 | last_error text, 28 | run_at timestamp without time zone, 29 | locked_at timestamp without time zone, 30 | failed_at timestamp without time zone, 31 | locked_by character varying(255), 32 | queue character varying(255), 33 | created_at timestamp without time zone NOT NULL, 34 | updated_at timestamp without time zone NOT NULL, 35 | metadata jsonb DEFAULT '{}'::jsonb, 36 | whodunnit text 37 | ) 38 | WITH (fillfactor='85'); 39 | 40 | -- Name: delayed_jobs_id_seq; Type: SEQUENCE 41 | 42 | CREATE SEQUENCE public.delayed_jobs_id_seq 43 | START WITH 1 44 | INCREMENT BY 1 45 | NO MINVALUE 46 | NO MAXVALUE 47 | CACHE 1; 48 | 49 | -- Name: delayed_jobs_id_seq; Type: SEQUENCE OWNED BY 50 | 51 | ALTER SEQUENCE public.delayed_jobs_id_seq OWNED BY public.delayed_jobs.id; 52 | 53 | -- Name: schema_migrations; Type: TABLE 54 | 55 | CREATE TABLE public.schema_migrations ( 56 | version character varying NOT NULL 57 | ); 58 | 59 | -- Name: delayed_jobs id; Type: DEFAULT 60 | 61 | ALTER TABLE ONLY public.delayed_jobs ALTER COLUMN id SET DEFAULT nextval('public.delayed_jobs_id_seq'::regclass); 62 | 63 | -- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT 64 | 65 | ALTER TABLE ONLY public.ar_internal_metadata 66 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 67 | 68 | -- Name: delayed_jobs delayed_jobs_pkey; Type: CONSTRAINT 69 | 70 | ALTER TABLE ONLY public.delayed_jobs 71 | ADD CONSTRAINT delayed_jobs_pkey PRIMARY KEY (id); 72 | 73 | -- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT 74 | 75 | ALTER TABLE ONLY public.schema_migrations 76 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 77 | 78 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX 79 | 80 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 81 | 82 | -- Name: index_delayed_jobs_on_queue; Type: INDEX 83 | 84 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 85 | 86 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX 87 | 88 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 89 | 90 | -- PostgreSQL database dump complete 91 | 92 | SET search_path TO "$user", public; 93 | 94 | INSERT INTO "schema_migrations" (version) VALUES 95 | ('20240822225012'), 96 | ('20240822224954'), 97 | ('20240725043656'), 98 | ('20240621041110'), 99 | ('20240621020038'), 100 | ('20220802204003'), 101 | ('20211125055031'), 102 | ('20211012054749'), 103 | ('20210923052631'), 104 | ('20210903003251'); 105 | -------------------------------------------------------------------------------- /test/expectations/ignored_schemas_myschema.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | -- Name: pganalyze; Type: SCHEMA 10 | 11 | CREATE SCHEMA pganalyze; 12 | 13 | -- Name: EXTENSION btree_gin; Type: COMMENT 14 | 15 | -- Name: btree_gist; Type: EXTENSION 16 | 17 | CREATE EXTENSION IF NOT EXISTS btree_gist WITH SCHEMA public; 18 | 19 | -- Name: EXTENSION btree_gist; Type: COMMENT 20 | 21 | -- Name: dblink; Type: EXTENSION 22 | 23 | CREATE EXTENSION IF NOT EXISTS dblink WITH SCHEMA public; 24 | 25 | -- Name: EXTENSION dblink; Type: COMMENT 26 | 27 | -- Name: pgcrypto; Type: EXTENSION 28 | 29 | CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public; 30 | 31 | -- Name: EXTENSION pgcrypto; Type: COMMENT 32 | 33 | -- Name: uuid-ossp; Type: EXTENSION 34 | 35 | CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public; 36 | 37 | -- Name: EXTENSION "uuid-ossp"; Type: COMMENT 38 | 39 | -- Name: explain_analyze(text, text[], text[], text[]); Type: FUNCTION 40 | 41 | CREATE FUNCTION pganalyze.explain_analyze(query text, params text[], param_types text[], analyze_flags text[]) RETURNS text 42 | LANGUAGE plpgsql SECURITY DEFINER 43 | AS $$ 44 | DECLARE 45 | prepared_query text; 46 | params_str text; 47 | param_types_str text; 48 | explain_prefix text; 49 | explain_flag text; 50 | result text; 51 | BEGIN 52 | SET TRANSACTION READ ONLY; 53 | 54 | PERFORM 1 FROM pg_roles WHERE (rolname = current_user AND rolsuper) OR (pg_has_role(oid, 'MEMBER') AND rolname IN ('rds_superuser', 'azure_pg_admin', 'cloudsqlsuperuser')); 55 | IF FOUND THEN 56 | RAISE EXCEPTION 'cannot run: pganalyze.explain_analyze helper is owned by superuser - recreate function with lesser privileged user'; 57 | END IF; 58 | 59 | SELECT pg_catalog.regexp_replace(query, ';+\s*\Z', '') INTO prepared_query; 60 | IF prepared_query LIKE '%;%' THEN 61 | RAISE EXCEPTION 'cannot run pganalyze.explain_analyze helper with a multi-statement query'; 62 | END IF; 63 | 64 | explain_prefix := 'EXPLAIN (VERBOSE, FORMAT JSON'; 65 | FOR explain_flag IN SELECT * FROM unnest(analyze_flags) 66 | LOOP 67 | IF explain_flag NOT SIMILAR TO '[A-z_ ]+' THEN 68 | RAISE EXCEPTION 'cannot run pganalyze.explain_analyze helper with invalid flag'; 69 | END IF; 70 | explain_prefix := explain_prefix || ', ' || explain_flag; 71 | END LOOP; 72 | explain_prefix := explain_prefix || ') '; 73 | 74 | SELECT COALESCE('(' || pg_catalog.string_agg(pg_catalog.quote_literal(p), ',') || ')', '') FROM pg_catalog.unnest(params) _(p) INTO params_str; 75 | SELECT COALESCE('(' || pg_catalog.string_agg(pg_catalog.quote_ident(p), ',') || ')', '') FROM pg_catalog.unnest(param_types) _(p) INTO param_types_str; 76 | 77 | EXECUTE 'PREPARE pganalyze_explain_analyze ' || param_types_str || ' AS ' || prepared_query; 78 | BEGIN 79 | EXECUTE explain_prefix || 'EXECUTE pganalyze_explain_analyze' || params_str INTO STRICT result; 80 | EXCEPTION WHEN QUERY_CANCELED OR OTHERS THEN 81 | DEALLOCATE pganalyze_explain_analyze; 82 | RAISE; 83 | END; 84 | DEALLOCATE pganalyze_explain_analyze; 85 | 86 | RETURN result; 87 | END 88 | $$; 89 | 90 | -- Name: get_stat_replication(); Type: FUNCTION 91 | 92 | CREATE FUNCTION pganalyze.get_stat_replication() RETURNS SETOF pg_stat_replication 93 | LANGUAGE sql SECURITY DEFINER 94 | AS $$ 95 | /* pganalyze-collector */ SELECT * FROM pg_catalog.pg_stat_replication; 96 | $$; 97 | 98 | SET default_tablespace = ''; 99 | 100 | -- Name: table2; Type: TABLE 101 | 102 | CREATE TABLE public.table2 ( 103 | id BIGSERIAL PRIMARY KEY, 104 | ); 105 | 106 | -- Name: test_func2(public.table2[]); Type: FUNCTION 107 | 108 | CREATE FUNCTION public.test_func2(ids public.table2[] DEFAULT NULL::public.table2[]) RETURNS TABLE(id bigint) 109 | LANGUAGE sql ROWS 10000 PARALLEL SAFE 110 | AS $$ 111 | SELECT * FROM 112 | $$; 113 | 114 | -- Name: ar_internal_metadata; Type: TABLE 115 | 116 | CREATE TABLE public.ar_internal_metadata ( 117 | key character varying NOT NULL, 118 | value character varying, 119 | created_at timestamp without time zone NOT NULL, 120 | updated_at timestamp without time zone NOT NULL 121 | ); 122 | 123 | -- Name: schema_migrations; Type: TABLE 124 | 125 | CREATE TABLE public.schema_migrations ( 126 | version character varying(255) NOT NULL 127 | ); 128 | 129 | -- Name: unique_schema_migrations; Type: INDEX 130 | 131 | CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version); 132 | 133 | -- PostgreSQL database dump complete 134 | 135 | SET search_path TO "$user", public; 136 | 137 | INSERT INTO "schema_migrations" (version) VALUES 138 | ('20250101011234'), 139 | ('20250101015678'); 140 | -------------------------------------------------------------------------------- /test/expectations/ignored_schemas_pganalyze.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | -- Name: myschema; Type: SCHEMA 10 | 11 | CREATE SCHEMA myschema; 12 | 13 | -- Name: btree_gin; Type: EXTENSION 14 | 15 | CREATE EXTENSION IF NOT EXISTS btree_gin WITH SCHEMA myschema; 16 | 17 | -- Name: EXTENSION btree_gin; Type: COMMENT 18 | 19 | -- Name: btree_gist; Type: EXTENSION 20 | 21 | CREATE EXTENSION IF NOT EXISTS btree_gist WITH SCHEMA public; 22 | 23 | -- Name: EXTENSION btree_gist; Type: COMMENT 24 | 25 | -- Name: dblink; Type: EXTENSION 26 | 27 | CREATE EXTENSION IF NOT EXISTS dblink WITH SCHEMA public; 28 | 29 | -- Name: EXTENSION dblink; Type: COMMENT 30 | 31 | -- Name: pgcrypto; Type: EXTENSION 32 | 33 | CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public; 34 | 35 | -- Name: EXTENSION pgcrypto; Type: COMMENT 36 | 37 | -- Name: uuid-ossp; Type: EXTENSION 38 | 39 | CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public; 40 | 41 | -- Name: EXTENSION "uuid-ossp"; Type: COMMENT 42 | 43 | -- Name: myschema; Type: TYPE 44 | 45 | CREATE TYPE myschema.myenum AS ENUM ( 46 | 'a', 47 | 'b', 48 | 'c' 49 | ); 50 | 51 | SET default_tablespace = ''; 52 | 53 | -- Name: table2; Type: TABLE 54 | 55 | CREATE TABLE public.table2 ( 56 | id BIGSERIAL PRIMARY KEY, 57 | ); 58 | 59 | -- Name: test_func2(public.table2[]); Type: FUNCTION 60 | 61 | CREATE FUNCTION public.test_func2(ids public.table2[] DEFAULT NULL::public.table2[]) RETURNS TABLE(id bigint) 62 | LANGUAGE sql ROWS 10000 PARALLEL SAFE 63 | AS $$ 64 | SELECT * FROM 65 | $$; 66 | 67 | -- Name: ar_internal_metadata; Type: TABLE 68 | 69 | CREATE TABLE public.ar_internal_metadata ( 70 | key character varying NOT NULL, 71 | value character varying, 72 | created_at timestamp without time zone NOT NULL, 73 | updated_at timestamp without time zone NOT NULL 74 | ); 75 | 76 | -- Name: myschema; Type: TABLE 77 | 78 | CREATE TABLE myschema.mytable ( 79 | id uuid DEFAULT public.gen_random_uuid() PRIMARY KEY, 80 | organization_id uuid NOT NULL 81 | ); 82 | 83 | -- Name: schema_migrations; Type: TABLE 84 | 85 | CREATE TABLE public.schema_migrations ( 86 | version character varying(255) NOT NULL 87 | ); 88 | 89 | -- Name: index_mytable_on_organization_id; Type: INDEX 90 | 91 | CREATE INDEX index_mytable_on_organization_id ON myschema.mytable USING btree (organization_id); 92 | 93 | -- Name: unique_schema_migrations; Type: INDEX 94 | 95 | CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version); 96 | 97 | -- PostgreSQL database dump complete 98 | 99 | SET search_path TO "$user", public; 100 | 101 | INSERT INTO "schema_migrations" (version) VALUES 102 | ('20250101011234'), 103 | ('20250101015678'); 104 | -------------------------------------------------------------------------------- /test/expectations/indexes_after_tables.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | SET default_tablespace = ''; 10 | 11 | -- Name: ar_internal_metadata; Type: TABLE 12 | 13 | CREATE TABLE public.ar_internal_metadata ( 14 | key character varying NOT NULL, 15 | value character varying, 16 | created_at timestamp(6) without time zone NOT NULL, 17 | updated_at timestamp(6) without time zone NOT NULL 18 | ); 19 | 20 | -- Name: delayed_jobs; Type: TABLE 21 | 22 | CREATE TABLE public.delayed_jobs ( 23 | id BIGSERIAL PRIMARY KEY, 24 | priority integer DEFAULT 0, 25 | attempts integer DEFAULT 0, 26 | handler text, 27 | last_error text, 28 | run_at timestamp without time zone, 29 | locked_at timestamp without time zone, 30 | failed_at timestamp without time zone, 31 | locked_by character varying(255), 32 | queue character varying(255), 33 | created_at timestamp without time zone NOT NULL, 34 | updated_at timestamp without time zone NOT NULL, 35 | metadata jsonb DEFAULT '{}'::jsonb, 36 | whodunnit text 37 | ) 38 | WITH (fillfactor='85'); 39 | 40 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 41 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 42 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 43 | 44 | -- Name: schema_migrations; Type: TABLE 45 | 46 | CREATE TABLE public.schema_migrations ( 47 | version character varying NOT NULL 48 | ); 49 | 50 | ALTER TABLE ONLY public.ar_internal_metadata 51 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 52 | 53 | ALTER TABLE ONLY public.schema_migrations 54 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 55 | 56 | -- PostgreSQL database dump complete 57 | 58 | SET search_path TO "$user", public; 59 | 60 | INSERT INTO "schema_migrations" (version) VALUES 61 | ('20240822225012'), 62 | ('20240822224954'), 63 | ('20240725043656'), 64 | ('20240621041110'), 65 | ('20240621020038'), 66 | ('20220802204003'), 67 | ('20211125055031'), 68 | ('20211012054749'), 69 | ('20210923052631'), 70 | ('20210903003251'); 71 | -------------------------------------------------------------------------------- /test/expectations/keep_extensions_all.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | -- Name: pg_stat_statements; Type: EXTENSION 10 | 11 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public; 12 | 13 | -- Name: EXTENSION pg_stat_statements; Type: COMMENT 14 | 15 | SET default_tablespace = ''; 16 | 17 | -- Name: ar_internal_metadata; Type: TABLE 18 | 19 | CREATE TABLE public.ar_internal_metadata ( 20 | key character varying NOT NULL, 21 | value character varying, 22 | created_at timestamp(6) without time zone NOT NULL, 23 | updated_at timestamp(6) without time zone NOT NULL 24 | ); 25 | 26 | -- Name: delayed_jobs; Type: TABLE 27 | 28 | CREATE TABLE public.delayed_jobs ( 29 | id BIGSERIAL PRIMARY KEY, 30 | priority integer DEFAULT 0, 31 | attempts integer DEFAULT 0, 32 | handler text, 33 | last_error text, 34 | run_at timestamp without time zone, 35 | locked_at timestamp without time zone, 36 | failed_at timestamp without time zone, 37 | locked_by character varying(255), 38 | queue character varying(255), 39 | created_at timestamp without time zone NOT NULL, 40 | updated_at timestamp without time zone NOT NULL, 41 | metadata jsonb DEFAULT '{}'::jsonb, 42 | whodunnit text 43 | ) 44 | WITH (fillfactor='85'); 45 | 46 | -- Name: schema_migrations; Type: TABLE 47 | 48 | CREATE TABLE public.schema_migrations ( 49 | version character varying NOT NULL 50 | ); 51 | 52 | ALTER TABLE ONLY public.ar_internal_metadata 53 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 54 | 55 | ALTER TABLE ONLY public.schema_migrations 56 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 57 | 58 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX 59 | 60 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 61 | 62 | -- Name: index_delayed_jobs_on_queue; Type: INDEX 63 | 64 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 65 | 66 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX 67 | 68 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 69 | 70 | -- PostgreSQL database dump complete 71 | 72 | SET search_path TO "$user", public; 73 | 74 | INSERT INTO "schema_migrations" (version) VALUES 75 | ('20240822225012'), 76 | ('20240822224954'), 77 | ('20240725043656'), 78 | ('20240621041110'), 79 | ('20240621020038'), 80 | ('20220802204003'), 81 | ('20211125055031'), 82 | ('20211012054749'), 83 | ('20210923052631'), 84 | ('20210903003251'); 85 | -------------------------------------------------------------------------------- /test/expectations/order_column_definitions.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | SET default_tablespace = ''; 10 | 11 | -- Name: ar_internal_metadata; Type: TABLE 12 | 13 | CREATE TABLE public.ar_internal_metadata ( 14 | created_at timestamp(6) without time zone NOT NULL, 15 | key character varying NOT NULL, 16 | updated_at timestamp(6) without time zone NOT NULL, 17 | value character varying 18 | ); 19 | 20 | -- Name: delayed_jobs; Type: TABLE 21 | 22 | CREATE TABLE public.delayed_jobs ( 23 | attempts integer DEFAULT 0, 24 | created_at timestamp without time zone NOT NULL, 25 | failed_at timestamp without time zone, 26 | handler text, 27 | id BIGSERIAL PRIMARY KEY, 28 | last_error text, 29 | locked_at timestamp without time zone, 30 | locked_by character varying(255), 31 | metadata jsonb DEFAULT '{}'::jsonb, 32 | priority integer DEFAULT 0, 33 | queue character varying(255), 34 | run_at timestamp without time zone, 35 | updated_at timestamp without time zone NOT NULL, 36 | whodunnit text 37 | ) 38 | WITH (fillfactor='85'); 39 | 40 | -- Name: schema_migrations; Type: TABLE 41 | 42 | CREATE TABLE public.schema_migrations ( 43 | version character varying NOT NULL 44 | ); 45 | 46 | ALTER TABLE ONLY public.ar_internal_metadata 47 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 48 | 49 | ALTER TABLE ONLY public.schema_migrations 50 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 51 | 52 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX 53 | 54 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 55 | 56 | -- Name: index_delayed_jobs_on_queue; Type: INDEX 57 | 58 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 59 | 60 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX 61 | 62 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 63 | 64 | -- PostgreSQL database dump complete 65 | 66 | SET search_path TO "$user", public; 67 | 68 | INSERT INTO "schema_migrations" (version) VALUES 69 | ('20240822225012'), 70 | ('20240822224954'), 71 | ('20240725043656'), 72 | ('20240621041110'), 73 | ('20240621020038'), 74 | ('20220802204003'), 75 | ('20211125055031'), 76 | ('20211012054749'), 77 | ('20210923052631'), 78 | ('20210903003251'); 79 | -------------------------------------------------------------------------------- /test/expectations/order_schema_migrations_values.sql: -------------------------------------------------------------------------------- 1 | SET statement_timeout = 0; 2 | SET lock_timeout = 0; 3 | SET client_encoding = 'UTF8'; 4 | SET standard_conforming_strings = on; 5 | SELECT pg_catalog.set_config('search_path', '', false); 6 | SET check_function_bodies = false; 7 | SET client_min_messages = warning; 8 | 9 | SET default_tablespace = ''; 10 | 11 | -- Name: ar_internal_metadata; Type: TABLE 12 | 13 | CREATE TABLE public.ar_internal_metadata ( 14 | key character varying NOT NULL, 15 | value character varying, 16 | created_at timestamp(6) without time zone NOT NULL, 17 | updated_at timestamp(6) without time zone NOT NULL 18 | ); 19 | 20 | -- Name: delayed_jobs; Type: TABLE 21 | 22 | CREATE TABLE public.delayed_jobs ( 23 | id BIGSERIAL PRIMARY KEY, 24 | priority integer DEFAULT 0, 25 | attempts integer DEFAULT 0, 26 | handler text, 27 | last_error text, 28 | run_at timestamp without time zone, 29 | locked_at timestamp without time zone, 30 | failed_at timestamp without time zone, 31 | locked_by character varying(255), 32 | queue character varying(255), 33 | created_at timestamp without time zone NOT NULL, 34 | updated_at timestamp without time zone NOT NULL, 35 | metadata jsonb DEFAULT '{}'::jsonb, 36 | whodunnit text 37 | ) 38 | WITH (fillfactor='85'); 39 | 40 | -- Name: schema_migrations; Type: TABLE 41 | 42 | CREATE TABLE public.schema_migrations ( 43 | version character varying NOT NULL 44 | ); 45 | 46 | ALTER TABLE ONLY public.ar_internal_metadata 47 | ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); 48 | 49 | ALTER TABLE ONLY public.schema_migrations 50 | ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); 51 | 52 | -- Name: index_delayed_jobs_on_locked_by; Type: INDEX 53 | 54 | CREATE INDEX index_delayed_jobs_on_locked_by ON public.delayed_jobs USING btree (locked_by); 55 | 56 | -- Name: index_delayed_jobs_on_queue; Type: INDEX 57 | 58 | CREATE INDEX index_delayed_jobs_on_queue ON public.delayed_jobs USING btree (queue); 59 | 60 | -- Name: index_delayed_jobs_on_run_at; Type: INDEX 61 | 62 | CREATE INDEX index_delayed_jobs_on_run_at ON public.delayed_jobs USING btree (run_at) WHERE (locked_at IS NULL); 63 | 64 | -- PostgreSQL database dump complete 65 | 66 | SET search_path TO "$user", public; 67 | 68 | INSERT INTO "schema_migrations" (version) VALUES 69 | ('20210903003251') 70 | ,('20210923052631') 71 | ,('20211012054749') 72 | ,('20211125055031') 73 | ,('20220802204003') 74 | ,('20240621020038') 75 | ,('20240621041110') 76 | ,('20240725043656') 77 | ,('20240822224954') 78 | ,('20240822225012') 79 | ; 80 | 81 | -------------------------------------------------------------------------------- /test/expectations/partitions.sql: -------------------------------------------------------------------------------- 1 | 2 | -- Name: autovacuum_run_stats_35d; Type: TABLE 3 | 4 | CREATE TABLE public.autovacuum_run_stats_35d ( 5 | autovacuum_run_stats_id uuid DEFAULT public.gen_random_uuid() NOT NULL, 6 | server_id uuid, 7 | schema_table_id bigint, 8 | occurred_at timestamp with time zone NOT NULL 9 | ) 10 | PARTITION BY RANGE (occurred_at); 11 | 12 | -- Name: index_autovacuum_run_stats_35d_on_schema_table_id_occurred_at; Type: INDEX 13 | 14 | CREATE INDEX index_autovacuum_run_stats_35d_on_schema_table_id_occurred_at ON public.autovacuum_run_stats_35d USING btree (schema_table_id, occurred_at); 15 | 16 | -- Name: index_autovacuum_run_stats_35d_on_server_id_and_occurred_at; Type: INDEX 17 | 18 | CREATE INDEX index_autovacuum_run_stats_35d_on_server_id_and_occurred_at ON public.autovacuum_run_stats_35d USING btree (server_id, occurred_at); 19 | 20 | -- Name: schema_table_infos_35d; Type: TABLE 21 | 22 | CREATE TABLE public.schema_table_infos_35d ( 23 | schema_table_id bigint NOT NULL, 24 | collected_at timestamp with time zone NOT NULL, 25 | server_id uuid NOT NULL 26 | ) 27 | PARTITION BY RANGE (collected_at); 28 | 29 | ALTER TABLE ONLY public.schema_table_infos_35d 30 | ADD CONSTRAINT schema_table_infos_35d_pkey PRIMARY KEY (schema_table_id, collected_at); 31 | -------------------------------------------------------------------------------- /test/test_helper.rb: -------------------------------------------------------------------------------- 1 | $LOAD_PATH.unshift File.expand_path("../lib", __dir__) 2 | require "activerecord-clean-db-structure" 3 | require 'activerecord-clean-db-structure/clean_dump' 4 | 5 | require "minitest/autorun" 6 | Dir[File.expand_path("support/**/*.rb", __dir__)].sort.each { |rb| require(rb) } 7 | --------------------------------------------------------------------------------